261
208
return not self.__eq__(other)
263
210
def __repr__(self):
264
return "<%s.%s object at 0x%x, %s, %s" % (
265
self.__class__.__module__, self.__class__.__name__, id(self),
266
self.pack_transport, self.name)
269
class ResumedPack(ExistingPack):
271
def __init__(self, name, revision_index, inventory_index, text_index,
272
signature_index, upload_transport, pack_transport, index_transport,
273
pack_collection, chk_index=None):
274
"""Create a ResumedPack object."""
275
ExistingPack.__init__(self, pack_transport, name, revision_index,
276
inventory_index, text_index, signature_index,
278
self.upload_transport = upload_transport
279
self.index_transport = index_transport
280
self.index_sizes = [None, None, None, None]
282
('revision', revision_index),
283
('inventory', inventory_index),
284
('text', text_index),
285
('signature', signature_index),
287
if chk_index is not None:
288
indices.append(('chk', chk_index))
289
self.index_sizes.append(None)
290
for index_type, index in indices:
291
offset = self.index_offset(index_type)
292
self.index_sizes[offset] = index._size
293
self.index_class = pack_collection._index_class
294
self._pack_collection = pack_collection
295
self._state = 'resumed'
296
# XXX: perhaps check that the .pack file exists?
298
def access_tuple(self):
299
if self._state == 'finished':
300
return Pack.access_tuple(self)
301
elif self._state == 'resumed':
302
return self.upload_transport, self.file_name()
304
raise AssertionError(self._state)
307
self.upload_transport.delete(self.file_name())
308
indices = [self.revision_index, self.inventory_index, self.text_index,
309
self.signature_index]
310
if self.chk_index is not None:
311
indices.append(self.chk_index)
312
for index in indices:
313
index._transport.delete(index._name)
316
self._check_references()
317
index_types = ['revision', 'inventory', 'text', 'signature']
318
if self.chk_index is not None:
319
index_types.append('chk')
320
for index_type in index_types:
321
old_name = self.index_name(index_type, self.name)
322
new_name = '../indices/' + old_name
323
self.upload_transport.rename(old_name, new_name)
324
self._replace_index_with_readonly(index_type)
325
new_name = '../packs/' + self.file_name()
326
self.upload_transport.rename(self.file_name(), new_name)
327
self._state = 'finished'
329
def _get_external_refs(self, index):
330
"""Return compression parents for this index that are not present.
332
This returns any compression parents that are referenced by this index,
333
which are not contained *in* this index. They may be present elsewhere.
335
return index.external_references(1)
211
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
212
id(self), self.transport, self.name)
338
215
class NewPack(Pack):
339
216
"""An in memory proxy for a pack which is being created."""
341
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
218
# A map of index 'type' to the file extension and position in the
220
index_definitions = {
221
'revision': ('.rix', 0),
222
'inventory': ('.iix', 1),
224
'signature': ('.six', 3),
227
def __init__(self, upload_transport, index_transport, pack_transport,
228
upload_suffix='', file_mode=None):
342
229
"""Create a NewPack instance.
344
:param pack_collection: A PackCollection into which this is being inserted.
231
:param upload_transport: A writable transport for the pack to be
232
incrementally uploaded to.
233
:param index_transport: A writable transport for the pack's indices to
234
be written to when the pack is finished.
235
:param pack_transport: A writable transport for the pack to be renamed
236
to when the upload is complete. This *must* be the same as
237
upload_transport.clone('../packs').
345
238
:param upload_suffix: An optional suffix to be given to any temporary
346
239
files created during the pack creation. e.g '.autopack'
347
:param file_mode: Unix permissions for newly created file.
240
:param file_mode: An optional file mode to create the new files with.
349
242
# The relative locations of the packs are constrained, but all are
350
243
# passed in because the caller has them, so as to avoid object churn.
351
index_builder_class = pack_collection._index_builder_class
352
if pack_collection.chk_index is not None:
353
chk_index = index_builder_class(reference_lists=0)
356
244
Pack.__init__(self,
357
245
# Revisions: parents list, no text compression.
358
index_builder_class(reference_lists=1),
246
InMemoryGraphIndex(reference_lists=1),
359
247
# Inventory: We want to map compression only, but currently the
360
248
# knit code hasn't been updated enough to understand that, so we
361
249
# have a regular 2-list index giving parents and compression
363
index_builder_class(reference_lists=2),
251
InMemoryGraphIndex(reference_lists=2),
364
252
# Texts: compression and per file graph, for all fileids - so two
365
253
# reference lists and two elements in the key tuple.
366
index_builder_class(reference_lists=2, key_elements=2),
254
InMemoryGraphIndex(reference_lists=2, key_elements=2),
367
255
# Signatures: Just blobs to store, no compression, no parents
369
index_builder_class(reference_lists=0),
370
# CHK based storage - just blobs, no compression or parents.
257
InMemoryGraphIndex(reference_lists=0),
373
self._pack_collection = pack_collection
374
# When we make readonly indices, we need this.
375
self.index_class = pack_collection._index_class
376
259
# where should the new pack be opened
377
self.upload_transport = pack_collection._upload_transport
260
self.upload_transport = upload_transport
378
261
# where are indices written out to
379
self.index_transport = pack_collection._index_transport
262
self.index_transport = index_transport
380
263
# where is the pack renamed to when it is finished?
381
self.pack_transport = pack_collection._pack_transport
264
self.pack_transport = pack_transport
382
265
# What file mode to upload the pack and indices with.
383
266
self._file_mode = file_mode
384
267
# tracks the content written to the .pack file.
385
self._hash = osutils.md5()
386
# a tuple with the length in bytes of the indices, once the pack
387
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
268
self._hash = md5.new()
269
# a four-tuple with the length in bytes of the indices, once the pack
270
# is finalised. (rev, inv, text, sigs)
388
271
self.index_sizes = None
389
272
# How much data to cache when writing packs. Note that this is not
390
273
# synchronised with reads, because it's not in the transport layer, so
1370
1113
class RepositoryPackCollection(object):
1371
"""Management of packs within a repository.
1373
:ivar _names: map of {pack_name: (index_size,)}
1376
pack_factory = NewPack
1377
resumed_pack_factory = ResumedPack
1114
"""Management of packs within a repository."""
1379
1116
def __init__(self, repo, transport, index_transport, upload_transport,
1380
pack_transport, index_builder_class, index_class,
1382
1118
"""Create a new RepositoryPackCollection.
1384
:param transport: Addresses the repository base directory
1120
:param transport: Addresses the repository base directory
1385
1121
(typically .bzr/repository/).
1386
1122
:param index_transport: Addresses the directory containing indices.
1387
1123
:param upload_transport: Addresses the directory into which packs are written
1388
1124
while they're being created.
1389
1125
:param pack_transport: Addresses the directory of existing complete packs.
1390
:param index_builder_class: The index builder class to use.
1391
:param index_class: The index class to use.
1392
:param use_chk_index: Whether to setup and manage a CHK index.
1394
# XXX: This should call self.reset()
1395
1127
self.repo = repo
1396
1128
self.transport = transport
1397
1129
self._index_transport = index_transport
1398
1130
self._upload_transport = upload_transport
1399
1131
self._pack_transport = pack_transport
1400
self._index_builder_class = index_builder_class
1401
self._index_class = index_class
1402
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1132
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1404
1133
self.packs = []
1405
1134
# name:Pack mapping
1407
1135
self._packs_by_name = {}
1408
1136
# the previous pack-names content
1409
1137
self._packs_at_load = None
1410
1138
# when a pack is being created by this object, the state of that pack.
1411
1139
self._new_pack = None
1412
1140
# aggregated revision index data
1413
flush = self._flush_new_pack
1414
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1415
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1416
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1417
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1419
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1421
# used to determine if we're using a chk_index elsewhere.
1422
self.chk_index = None
1424
self._resumed_packs = []
1427
return '%s(%r)' % (self.__class__.__name__, self.repo)
1141
self.revision_index = AggregateIndex()
1142
self.inventory_index = AggregateIndex()
1143
self.text_index = AggregateIndex()
1144
self.signature_index = AggregateIndex()
1429
1146
def add_pack_to_memory(self, pack):
1430
1147
"""Make a Pack object available to the repository to satisfy queries.
1432
1149
:param pack: A Pack object.
1434
if pack.name in self._packs_by_name:
1435
raise AssertionError(
1436
'pack %s already in _packs_by_name' % (pack.name,))
1151
assert pack.name not in self._packs_by_name
1437
1152
self.packs.append(pack)
1438
1153
self._packs_by_name[pack.name] = pack
1439
1154
self.revision_index.add_index(pack.revision_index, pack)
1440
1155
self.inventory_index.add_index(pack.inventory_index, pack)
1441
1156
self.text_index.add_index(pack.text_index, pack)
1442
1157
self.signature_index.add_index(pack.signature_index, pack)
1443
if self.chk_index is not None:
1444
self.chk_index.add_index(pack.chk_index, pack)
1159
def _add_text_to_weave(self, file_id, revision_id, new_lines, parents,
1160
nostore_sha, random_revid):
1161
file_id_index = GraphIndexPrefixAdapter(
1162
self.text_index.combined_index,
1164
add_nodes_callback=self.text_index.add_callback)
1165
self.repo._text_knit._index._graph_index = file_id_index
1166
self.repo._text_knit._index._add_callback = file_id_index.add_nodes
1167
return self.repo._text_knit.add_lines_with_ghosts(
1168
revision_id, parents, new_lines, nostore_sha=nostore_sha,
1169
random_id=random_revid, check_content=False)[0:2]
1446
1171
def all_packs(self):
1447
1172
"""Return a list of all the Pack objects this repository has.
1498
1219
# group their data with the relevant commit, and that may
1499
1220
# involve rewriting ancient history - which autopack tries to
1500
1221
# avoid. Alternatively we could not group the data but treat
1501
# each of these as having a single revision, and thus add
1222
# each of these as having a single revision, and thus add
1502
1223
# one revision for each to the total revision count, to get
1503
1224
# a matching distribution.
1505
1226
existing_packs.append((revision_count, pack))
1506
1227
pack_operations = self.plan_autopack_combinations(
1507
1228
existing_packs, pack_distribution)
1508
num_new_packs = len(pack_operations)
1509
num_old_packs = sum([len(po[1]) for po in pack_operations])
1510
num_revs_affected = sum([po[0] for po in pack_operations])
1511
mutter('Auto-packing repository %s, which has %d pack files, '
1512
'containing %d revisions. Packing %d files into %d affecting %d'
1513
' revisions', self, total_packs, total_revisions, num_old_packs,
1514
num_new_packs, num_revs_affected)
1515
result = self._execute_pack_operations(pack_operations,
1516
reload_func=self._restart_autopack)
1517
mutter('Auto-packing repository %s completed', self)
1229
self._execute_pack_operations(pack_operations)
1520
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1232
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1522
1233
"""Execute a series of pack operations.
1524
1235
:param pack_operations: A list of [revision_count, packs_to_combine].
1525
1236
:param _packer_class: The class of packer to use (default: Packer).
1526
:return: The new pack names.
1528
1239
for revision_count, packs in pack_operations:
1529
1240
# we may have no-ops from the setup logic
1530
1241
if len(packs) == 0:
1532
packer = _packer_class(self, packs, '.autopack',
1533
reload_func=reload_func)
1536
except errors.RetryWithNewPacks:
1537
# An exception is propagating out of this context, make sure
1538
# this packer has cleaned up. Packer() doesn't set its new_pack
1539
# state into the RepositoryPackCollection object, so we only
1540
# have access to it directly here.
1541
if packer.new_pack is not None:
1542
packer.new_pack.abort()
1243
_packer_class(self, packs, '.autopack').pack()
1544
1244
for pack in packs:
1545
1245
self._remove_pack_from_memory(pack)
1546
1246
# record the newly available packs and stop advertising the old
1548
to_be_obsoleted = []
1549
for _, packs in pack_operations:
1550
to_be_obsoleted.extend(packs)
1551
result = self._save_pack_names(clear_obsolete_packs=True,
1552
obsolete_packs=to_be_obsoleted)
1555
def _flush_new_pack(self):
1556
if self._new_pack is not None:
1557
self._new_pack.flush()
1248
self._save_pack_names(clear_obsolete_packs=True)
1249
# Move the old packs out of the way now they are no longer referenced.
1250
for revision_count, packs in pack_operations:
1251
self._obsolete_packs(packs)
1559
1253
def lock_names(self):
1560
1254
"""Acquire the mutex around the pack-names index.
1562
1256
This cannot be used in the middle of a read-only transaction on the
1565
1259
self.repo.control_files.lock_write()
1567
def _already_packed(self):
1568
"""Is the collection already packed?"""
1569
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1571
def pack(self, hint=None):
1572
1262
"""Pack the pack collection totally."""
1573
1263
self.ensure_loaded()
1574
1264
total_packs = len(self._names)
1575
if self._already_packed():
1266
# This is arguably wrong because we might not be optimal, but for
1267
# now lets leave it in. (e.g. reconcile -> one pack. But not
1577
1270
total_revisions = self.revision_index.combined_index.key_count()
1578
1271
# XXX: the following may want to be a class, to pack with a given
1580
1273
mutter('Packing repository %s, which has %d pack files, '
1581
'containing %d revisions with hint %r.', self, total_packs,
1582
total_revisions, hint)
1274
'containing %d revisions into 1 packs.', self, total_packs,
1583
1276
# determine which packs need changing
1277
pack_distribution = [1]
1584
1278
pack_operations = [[0, []]]
1585
1279
for pack in self.all_packs():
1586
if hint is None or pack.name in hint:
1587
# Either no hint was provided (so we are packing everything),
1588
# or this pack was included in the hint.
1589
pack_operations[-1][0] += pack.get_revision_count()
1590
pack_operations[-1][1].append(pack)
1280
pack_operations[-1][0] += pack.get_revision_count()
1281
pack_operations[-1][1].append(pack)
1591
1282
self._execute_pack_operations(pack_operations, OptimisingPacker)
1593
1284
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1878
1488
self._packs_by_name = {}
1879
1489
self._packs_at_load = None
1491
def _make_index_map(self, index_suffix):
1492
"""Return information on existing indices.
1494
:param suffix: Index suffix added to pack name.
1496
:returns: (pack_map, indices) where indices is a list of GraphIndex
1497
objects, and pack_map is a mapping from those objects to the
1498
pack tuple they describe.
1500
# TODO: stop using this; it creates new indices unnecessarily.
1501
self.ensure_loaded()
1502
suffix_map = {'.rix': 'revision_index',
1503
'.six': 'signature_index',
1504
'.iix': 'inventory_index',
1505
'.tix': 'text_index',
1507
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1508
suffix_map[index_suffix])
1510
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1511
"""Convert a list of packs to an index pack map and index list.
1513
:param packs: The packs list to process.
1514
:param index_attribute: The attribute that the desired index is found
1516
:return: A tuple (map, list) where map contains the dict from
1517
index:pack_tuple, and lsit contains the indices in the same order
1523
index = getattr(pack, index_attribute)
1524
indices.append(index)
1525
pack_map[index] = (pack.pack_transport, pack.file_name())
1526
return pack_map, indices
1528
def _index_contents(self, pack_map, key_filter=None):
1529
"""Get an iterable of the index contents from a pack_map.
1531
:param pack_map: A map from indices to pack details.
1532
:param key_filter: An optional filter to limit the
1535
indices = [index for index in pack_map.iterkeys()]
1536
all_index = CombinedGraphIndex(indices)
1537
if key_filter is None:
1538
return all_index.iter_all_entries()
1540
return all_index.iter_entries(key_filter)
1881
1542
def _unlock_names(self):
1882
1543
"""Release the mutex around the pack-names index."""
1883
1544
self.repo.control_files.unlock()
1885
def _diff_pack_names(self):
1886
"""Read the pack names from disk, and compare it to the one in memory.
1888
:return: (disk_nodes, deleted_nodes, new_nodes)
1889
disk_nodes The final set of nodes that should be referenced
1890
deleted_nodes Nodes which have been removed from when we started
1891
new_nodes Nodes that are newly introduced
1893
# load the disk nodes across
1895
for index, key, value in self._iter_disk_pack_index():
1896
disk_nodes.add((key, value))
1897
orig_disk_nodes = set(disk_nodes)
1899
# do a two-way diff against our original content
1900
current_nodes = set()
1901
for name, sizes in self._names.iteritems():
1903
((name, ), ' '.join(str(size) for size in sizes)))
1905
# Packs no longer present in the repository, which were present when we
1906
# locked the repository
1907
deleted_nodes = self._packs_at_load - current_nodes
1908
# Packs which this process is adding
1909
new_nodes = current_nodes - self._packs_at_load
1911
# Update the disk_nodes set to include the ones we are adding, and
1912
# remove the ones which were removed by someone else
1913
disk_nodes.difference_update(deleted_nodes)
1914
disk_nodes.update(new_nodes)
1916
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1918
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1919
"""Given the correct set of pack files, update our saved info.
1921
:return: (removed, added, modified)
1922
removed pack names removed from self._names
1923
added pack names added to self._names
1924
modified pack names that had changed value
1929
## self._packs_at_load = disk_nodes
1546
def _save_pack_names(self, clear_obsolete_packs=False):
1547
"""Save the list of packs.
1549
This will take out the mutex around the pack names list for the
1550
duration of the method call. If concurrent updates have been made, a
1551
three-way merge between the current list and the current in memory list
1554
:param clear_obsolete_packs: If True, clear out the contents of the
1555
obsolete_packs directory.
1559
builder = GraphIndexBuilder()
1560
# load the disk nodes across
1562
for index, key, value in self._iter_disk_pack_index():
1563
disk_nodes.add((key, value))
1564
# do a two-way diff against our original content
1565
current_nodes = set()
1566
for name, sizes in self._names.iteritems():
1568
((name, ), ' '.join(str(size) for size in sizes)))
1569
deleted_nodes = self._packs_at_load - current_nodes
1570
new_nodes = current_nodes - self._packs_at_load
1571
disk_nodes.difference_update(deleted_nodes)
1572
disk_nodes.update(new_nodes)
1573
# TODO: handle same-name, index-size-changes here -
1574
# e.g. use the value from disk, not ours, *unless* we're the one
1576
for key, value in disk_nodes:
1577
builder.add_node(key, value)
1578
self.transport.put_file('pack-names', builder.finish(),
1579
mode=self.repo.control_files._file_mode)
1580
# move the baseline forward
1581
self._packs_at_load = disk_nodes
1582
# now clear out the obsolete packs directory
1583
if clear_obsolete_packs:
1584
self.transport.clone('obsolete_packs').delete_multi(
1585
self.transport.list_dir('obsolete_packs'))
1587
self._unlock_names()
1588
# synchronise the memory packs list with what we just wrote:
1930
1589
new_names = dict(disk_nodes)
1931
1590
# drop no longer present nodes
1932
1591
for pack in self.all_packs():
1933
1592
if (pack.name,) not in new_names:
1934
removed.append(pack.name)
1935
1593
self._remove_pack_from_memory(pack)
1936
1594
# add new nodes/refresh existing ones
1937
1595
for key, value in disk_nodes:
1951
1609
self._remove_pack_from_memory(self.get_pack_by_name(name))
1952
1610
self._names[name] = sizes
1953
1611
self.get_pack_by_name(name)
1954
modified.append(name)
1957
1614
self._names[name] = sizes
1958
1615
self.get_pack_by_name(name)
1960
return removed, added, modified
1962
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1963
"""Save the list of packs.
1965
This will take out the mutex around the pack names list for the
1966
duration of the method call. If concurrent updates have been made, a
1967
three-way merge between the current list and the current in memory list
1970
:param clear_obsolete_packs: If True, clear out the contents of the
1971
obsolete_packs directory.
1972
:param obsolete_packs: Packs that are obsolete once the new pack-names
1973
file has been written.
1974
:return: A list of the names saved that were not previously on disk.
1976
already_obsolete = []
1979
builder = self._index_builder_class()
1980
(disk_nodes, deleted_nodes, new_nodes,
1981
orig_disk_nodes) = self._diff_pack_names()
1982
# TODO: handle same-name, index-size-changes here -
1983
# e.g. use the value from disk, not ours, *unless* we're the one
1985
for key, value in disk_nodes:
1986
builder.add_node(key, value)
1987
self.transport.put_file('pack-names', builder.finish(),
1988
mode=self.repo.bzrdir._get_file_mode())
1989
self._packs_at_load = disk_nodes
1990
if clear_obsolete_packs:
1993
to_preserve = set([o.name for o in obsolete_packs])
1994
already_obsolete = self._clear_obsolete_packs(to_preserve)
1996
self._unlock_names()
1997
# synchronise the memory packs list with what we just wrote:
1998
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2000
# TODO: We could add one more condition here. "if o.name not in
2001
# orig_disk_nodes and o != the new_pack we haven't written to
2002
# disk yet. However, the new pack object is not easily
2003
# accessible here (it would have to be passed through the
2004
# autopacking code, etc.)
2005
obsolete_packs = [o for o in obsolete_packs
2006
if o.name not in already_obsolete]
2007
self._obsolete_packs(obsolete_packs)
2008
return [new_node[0][0] for new_node in new_nodes]
2010
def reload_pack_names(self):
2011
"""Sync our pack listing with what is present in the repository.
2013
This should be called when we find out that something we thought was
2014
present is now missing. This happens when another process re-packs the
2017
:return: True if the in-memory list of packs has been altered at all.
2019
# The ensure_loaded call is to handle the case where the first call
2020
# made involving the collection was to reload_pack_names, where we
2021
# don't have a view of disk contents. Its a bit of a bandaid, and
2022
# causes two reads of pack-names, but its a rare corner case not struck
2023
# with regular push/pull etc.
2024
first_read = self.ensure_loaded()
2027
# out the new value.
2028
(disk_nodes, deleted_nodes, new_nodes,
2029
orig_disk_nodes) = self._diff_pack_names()
2030
# _packs_at_load is meant to be the explicit list of names in
2031
# 'pack-names' at then start. As such, it should not contain any
2032
# pending names that haven't been written out yet.
2033
self._packs_at_load = orig_disk_nodes
2035
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2036
if removed or added or modified:
2040
def _restart_autopack(self):
2041
"""Reload the pack names list, and restart the autopack code."""
2042
if not self.reload_pack_names():
2043
# Re-raise the original exception, because something went missing
2044
# and a restart didn't find it
2046
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2048
def _clear_obsolete_packs(self, preserve=None):
2049
"""Delete everything from the obsolete-packs directory.
2051
:return: A list of pack identifiers (the filename without '.pack') that
2052
were found in obsolete_packs.
2055
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2056
if preserve is None:
2058
for filename in obsolete_pack_transport.list_dir('.'):
2059
name, ext = osutils.splitext(filename)
2062
if name in preserve:
2065
obsolete_pack_transport.delete(filename)
2066
except (errors.PathError, errors.TransportError), e:
2067
warning("couldn't delete obsolete pack, skipping it:\n%s"
2071
1617
def _start_write_group(self):
2072
1618
# Do not permit preparation for writing if we're not in a 'write lock'.
2073
1619
if not self.repo.is_write_locked():
2074
1620
raise errors.NotWriteLocked(self)
2075
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
2076
file_mode=self.repo.bzrdir._get_file_mode())
1621
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1622
self._pack_transport, upload_suffix='.pack',
1623
file_mode=self.repo.control_files._file_mode)
2077
1624
# allow writing: queue writes to a new index
2078
1625
self.revision_index.add_writable_index(self._new_pack.revision_index,
2079
1626
self._new_pack)
2081
1628
self._new_pack)
2082
1629
self.text_index.add_writable_index(self._new_pack.text_index,
2083
1630
self._new_pack)
2084
self._new_pack.text_index.set_optimize(combine_backing_indices=False)
2085
1631
self.signature_index.add_writable_index(self._new_pack.signature_index,
2086
1632
self._new_pack)
2087
if self.chk_index is not None:
2088
self.chk_index.add_writable_index(self._new_pack.chk_index,
2090
self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
2091
self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
2093
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
2094
self.repo.revisions._index._add_callback = self.revision_index.add_callback
2095
self.repo.signatures._index._add_callback = self.signature_index.add_callback
2096
self.repo.texts._index._add_callback = self.text_index.add_callback
1634
# reused revision and signature knits may need updating
1636
# "Hysterical raisins. client code in bzrlib grabs those knits outside
1637
# of write groups and then mutates it inside the write group."
1638
if self.repo._revision_knit is not None:
1639
self.repo._revision_knit._index._add_callback = \
1640
self.revision_index.add_callback
1641
if self.repo._signature_knit is not None:
1642
self.repo._signature_knit._index._add_callback = \
1643
self.signature_index.add_callback
1644
# create a reused knit object for text addition in commit.
1645
self.repo._text_knit = self.repo.weave_store.get_weave_or_empty(
2098
1648
def _abort_write_group(self):
2099
1649
# FIXME: just drop the transient index.
2100
1650
# forget what names there are
2101
1651
if self._new_pack is not None:
2102
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2103
operation.add_cleanup(setattr, self, '_new_pack', None)
2104
# If we aborted while in the middle of finishing the write
2105
# group, _remove_pack_indices could fail because the indexes are
2106
# already gone. But they're not there we shouldn't fail in this
2107
# case, so we pass ignore_missing=True.
2108
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2109
ignore_missing=True)
2110
operation.run_simple()
2111
for resumed_pack in self._resumed_packs:
2112
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2113
# See comment in previous finally block.
2114
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2115
ignore_missing=True)
2116
operation.run_simple()
2117
del self._resumed_packs[:]
2119
def _remove_resumed_pack_indices(self):
2120
for resumed_pack in self._resumed_packs:
2121
self._remove_pack_indices(resumed_pack)
2122
del self._resumed_packs[:]
2124
def _check_new_inventories(self):
2125
"""Detect missing inventories in this write group.
2127
:returns: list of strs, summarising any problems found. If the list is
2128
empty no problems were found.
2130
# The base implementation does no checks. GCRepositoryPackCollection
1652
self._new_pack.abort()
1653
self._remove_pack_indices(self._new_pack)
1654
self._new_pack = None
1655
self.repo._text_knit = None
2134
1657
def _commit_write_group(self):
2136
for prefix, versioned_file in (
2137
('revisions', self.repo.revisions),
2138
('inventories', self.repo.inventories),
2139
('texts', self.repo.texts),
2140
('signatures', self.repo.signatures),
2142
missing = versioned_file.get_missing_compression_parent_keys()
2143
all_missing.update([(prefix,) + key for key in missing])
2145
raise errors.BzrCheckError(
2146
"Repository %s has missing compression parent(s) %r "
2147
% (self.repo, sorted(all_missing)))
2148
problems = self._check_new_inventories()
2150
problems_summary = '\n'.join(problems)
2151
raise errors.BzrCheckError(
2152
"Cannot add revision(s) to repository: " + problems_summary)
2153
1658
self._remove_pack_indices(self._new_pack)
2154
any_new_content = False
2155
1659
if self._new_pack.data_inserted():
2156
1660
# get all the data to disk and read to use
2157
1661
self._new_pack.finish()
2158
1662
self.allocate(self._new_pack)
2159
1663
self._new_pack = None
2160
any_new_content = True
2162
self._new_pack.abort()
2163
self._new_pack = None
2164
for resumed_pack in self._resumed_packs:
2165
# XXX: this is a pretty ugly way to turn the resumed pack into a
2166
# properly committed pack.
2167
self._names[resumed_pack.name] = None
2168
self._remove_pack_from_memory(resumed_pack)
2169
resumed_pack.finish()
2170
self.allocate(resumed_pack)
2171
any_new_content = True
2172
del self._resumed_packs[:]
2174
result = self.autopack()
1664
if not self.autopack():
2176
1665
# when autopack takes no steps, the names list is still
2178
return self._save_pack_names()
2182
def _suspend_write_group(self):
2183
tokens = [pack.name for pack in self._resumed_packs]
2184
self._remove_pack_indices(self._new_pack)
2185
if self._new_pack.data_inserted():
2186
# get all the data to disk and read to use
2187
self._new_pack.finish(suspend=True)
2188
tokens.append(self._new_pack.name)
2189
self._new_pack = None
1667
self._save_pack_names()
2191
1669
self._new_pack.abort()
2192
1670
self._new_pack = None
2193
self._remove_resumed_pack_indices()
2196
def _resume_write_group(self, tokens):
2197
for token in tokens:
2198
self._resume_pack(token)
1671
self.repo._text_knit = None
1674
class KnitPackRevisionStore(KnitRevisionStore):
1675
"""An object to adapt access from RevisionStore's to use KnitPacks.
1677
This class works by replacing the original RevisionStore.
1678
We need to do this because the KnitPackRevisionStore is less
1679
isolated in its layering - it uses services from the repo.
1682
def __init__(self, repo, transport, revisionstore):
1683
"""Create a KnitPackRevisionStore on repo with revisionstore.
1685
This will store its state in the Repository, use the
1686
indices to provide a KnitGraphIndex,
1687
and at the end of transactions write new indices.
1689
KnitRevisionStore.__init__(self, revisionstore.versioned_file_store)
1691
self._serializer = revisionstore._serializer
1692
self.transport = transport
1694
def get_revision_file(self, transaction):
1695
"""Get the revision versioned file object."""
1696
if getattr(self.repo, '_revision_knit', None) is not None:
1697
return self.repo._revision_knit
1698
self.repo._pack_collection.ensure_loaded()
1699
add_callback = self.repo._pack_collection.revision_index.add_callback
1700
# setup knit specific objects
1701
knit_index = KnitGraphIndex(
1702
self.repo._pack_collection.revision_index.combined_index,
1703
add_callback=add_callback)
1704
self.repo._revision_knit = knit.KnitVersionedFile(
1705
'revisions', self.transport.clone('..'),
1706
self.repo.control_files._file_mode,
1707
create=False, access_mode=self.repo._access_mode(),
1708
index=knit_index, delta=False, factory=knit.KnitPlainFactory(),
1709
access_method=self.repo._pack_collection.revision_index.knit_access)
1710
return self.repo._revision_knit
1712
def get_signature_file(self, transaction):
1713
"""Get the signature versioned file object."""
1714
if getattr(self.repo, '_signature_knit', None) is not None:
1715
return self.repo._signature_knit
1716
self.repo._pack_collection.ensure_loaded()
1717
add_callback = self.repo._pack_collection.signature_index.add_callback
1718
# setup knit specific objects
1719
knit_index = KnitGraphIndex(
1720
self.repo._pack_collection.signature_index.combined_index,
1721
add_callback=add_callback, parents=False)
1722
self.repo._signature_knit = knit.KnitVersionedFile(
1723
'signatures', self.transport.clone('..'),
1724
self.repo.control_files._file_mode,
1725
create=False, access_mode=self.repo._access_mode(),
1726
index=knit_index, delta=False, factory=knit.KnitPlainFactory(),
1727
access_method=self.repo._pack_collection.signature_index.knit_access)
1728
return self.repo._signature_knit
1731
class KnitPackTextStore(VersionedFileStore):
1732
"""Presents a TextStore abstraction on top of packs.
1734
This class works by replacing the original VersionedFileStore.
1735
We need to do this because the KnitPackRevisionStore is less
1736
isolated in its layering - it uses services from the repo and shares them
1737
with all the data written in a single write group.
1740
def __init__(self, repo, transport, weavestore):
1741
"""Create a KnitPackTextStore on repo with weavestore.
1743
This will store its state in the Repository, use the
1744
indices FileNames to provide a KnitGraphIndex,
1745
and at the end of transactions write new indices.
1747
# don't call base class constructor - it's not suitable.
1748
# no transient data stored in the transaction
1750
self._precious = False
1752
self.transport = transport
1753
self.weavestore = weavestore
1754
# XXX for check() which isn't updated yet
1755
self._transport = weavestore._transport
1757
def get_weave_or_empty(self, file_id, transaction):
1758
"""Get a 'Knit' backed by the .tix indices.
1760
The transaction parameter is ignored.
1762
self.repo._pack_collection.ensure_loaded()
1763
add_callback = self.repo._pack_collection.text_index.add_callback
1764
# setup knit specific objects
1765
file_id_index = GraphIndexPrefixAdapter(
1766
self.repo._pack_collection.text_index.combined_index,
1767
(file_id, ), 1, add_nodes_callback=add_callback)
1768
knit_index = KnitGraphIndex(file_id_index,
1769
add_callback=file_id_index.add_nodes,
1770
deltas=True, parents=True)
1771
return knit.KnitVersionedFile('text:' + file_id,
1772
self.transport.clone('..'),
1775
access_method=self.repo._pack_collection.text_index.knit_access,
1776
factory=knit.KnitPlainFactory())
1778
get_weave = get_weave_or_empty
1781
"""Generate a list of the fileids inserted, for use by check."""
1782
self.repo._pack_collection.ensure_loaded()
1784
for index, key, value, refs in \
1785
self.repo._pack_collection.text_index.combined_index.iter_all_entries():
1790
class InventoryKnitThunk(object):
1791
"""An object to manage thunking get_inventory_weave to pack based knits."""
1793
def __init__(self, repo, transport):
1794
"""Create an InventoryKnitThunk for repo at transport.
1796
This will store its state in the Repository, use the
1797
indices FileNames to provide a KnitGraphIndex,
1798
and at the end of transactions write a new index..
1801
self.transport = transport
1803
def get_weave(self):
1804
"""Get a 'Knit' that contains inventory data."""
1805
self.repo._pack_collection.ensure_loaded()
1806
add_callback = self.repo._pack_collection.inventory_index.add_callback
1807
# setup knit specific objects
1808
knit_index = KnitGraphIndex(
1809
self.repo._pack_collection.inventory_index.combined_index,
1810
add_callback=add_callback, deltas=True, parents=True)
1811
return knit.KnitVersionedFile(
1812
'inventory', self.transport.clone('..'),
1813
self.repo.control_files._file_mode,
1814
create=False, access_mode=self.repo._access_mode(),
1815
index=knit_index, delta=True, factory=knit.KnitPlainFactory(),
1816
access_method=self.repo._pack_collection.inventory_index.knit_access)
2201
1819
class KnitPackRepository(KnitRepository):
2202
"""Repository with knit objects stored inside pack containers.
2204
The layering for a KnitPackRepository is:
2206
Graph | HPSS | Repository public layer |
2207
===================================================
2208
Tuple based apis below, string based, and key based apis above
2209
---------------------------------------------------
2211
Provides .texts, .revisions etc
2212
This adapts the N-tuple keys to physical knit records which only have a
2213
single string identifier (for historical reasons), which in older formats
2214
was always the revision_id, and in the mapped code for packs is always
2215
the last element of key tuples.
2216
---------------------------------------------------
2218
A separate GraphIndex is used for each of the
2219
texts/inventories/revisions/signatures contained within each individual
2220
pack file. The GraphIndex layer works in N-tuples and is unaware of any
2222
===================================================
2226
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1820
"""Experimental graph-knit using repository."""
1822
def __init__(self, _format, a_bzrdir, control_files, _revision_store,
1823
control_store, text_store, _commit_builder_class, _serializer):
2228
1824
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
2229
_commit_builder_class, _serializer)
2230
index_transport = self._transport.clone('indices')
2231
self._pack_collection = RepositoryPackCollection(self, self._transport,
1825
_revision_store, control_store, text_store, _commit_builder_class,
1827
index_transport = control_files._transport.clone('indices')
1828
self._pack_collection = RepositoryPackCollection(self, control_files._transport,
2232
1829
index_transport,
2233
self._transport.clone('upload'),
2234
self._transport.clone('packs'),
2235
_format.index_builder_class,
2236
_format.index_class,
2237
use_chk_index=self._format.supports_chks,
2239
self.inventories = KnitVersionedFiles(
2240
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
2241
add_callback=self._pack_collection.inventory_index.add_callback,
2242
deltas=True, parents=True, is_locked=self.is_locked),
2243
data_access=self._pack_collection.inventory_index.data_access,
2244
max_delta_chain=200)
2245
self.revisions = KnitVersionedFiles(
2246
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
2247
add_callback=self._pack_collection.revision_index.add_callback,
2248
deltas=False, parents=True, is_locked=self.is_locked,
2249
track_external_parent_refs=True),
2250
data_access=self._pack_collection.revision_index.data_access,
2252
self.signatures = KnitVersionedFiles(
2253
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
2254
add_callback=self._pack_collection.signature_index.add_callback,
2255
deltas=False, parents=False, is_locked=self.is_locked),
2256
data_access=self._pack_collection.signature_index.data_access,
2258
self.texts = KnitVersionedFiles(
2259
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
2260
add_callback=self._pack_collection.text_index.add_callback,
2261
deltas=True, parents=True, is_locked=self.is_locked),
2262
data_access=self._pack_collection.text_index.data_access,
2263
max_delta_chain=200)
2264
if _format.supports_chks:
2265
# No graph, no compression:- references from chks are between
2266
# different objects not temporal versions of the same; and without
2267
# some sort of temporal structure knit compression will just fail.
2268
self.chk_bytes = KnitVersionedFiles(
2269
_KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2270
add_callback=self._pack_collection.chk_index.add_callback,
2271
deltas=False, parents=False, is_locked=self.is_locked),
2272
data_access=self._pack_collection.chk_index.data_access,
2275
self.chk_bytes = None
1830
control_files._transport.clone('upload'),
1831
control_files._transport.clone('packs'))
1832
self._revision_store = KnitPackRevisionStore(self, index_transport, self._revision_store)
1833
self.weave_store = KnitPackTextStore(self, index_transport, self.weave_store)
1834
self._inv_thunk = InventoryKnitThunk(self, index_transport)
2276
1835
# True when the repository object is 'write locked' (as opposed to the
2277
# physical lock only taken out around changes to the pack-names list.)
1836
# physical lock only taken out around changes to the pack-names list.)
2278
1837
# Another way to represent this would be a decorator around the control
2279
1838
# files object that presents logical locks as physical ones - if this
2280
1839
# gets ugly consider that alternative design. RBC 20071011
2285
1844
self._reconcile_fixes_text_parents = True
2286
1845
self._reconcile_backsup_inventory = False
2288
def _warn_if_deprecated(self, branch=None):
2289
# This class isn't deprecated, but one sub-format is
2290
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2291
super(KnitPackRepository, self)._warn_if_deprecated(branch)
2293
1847
def _abort_write_group(self):
2294
self.revisions._index._key_dependencies.clear()
2295
1848
self._pack_collection._abort_write_group()
2297
def _get_source(self, to_format):
2298
if to_format.network_name() == self._format.network_name():
2299
return KnitPackStreamSource(self, to_format)
2300
return super(KnitPackRepository, self)._get_source(to_format)
1850
def _access_mode(self):
1851
"""Return 'w' or 'r' for depending on whether a write lock is active.
1853
This method is a helper for the Knit-thunking support objects.
1855
if self.is_write_locked():
1859
def _find_inconsistent_revision_parents(self):
1860
"""Find revisions with incorrectly cached parents.
1862
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1863
parents-in-revision).
1865
if not self.is_locked():
1866
raise errors.ObjectNotLocked(self)
1867
pb = ui.ui_factory.nested_progress_bar()
1870
revision_nodes = self._pack_collection.revision_index \
1871
.combined_index.iter_all_entries()
1872
index_positions = []
1873
# Get the cached index values for all revisions, and also the location
1874
# in each index of the revision text so we can perform linear IO.
1875
for index, key, value, refs in revision_nodes:
1876
pos, length = value[1:].split(' ')
1877
index_positions.append((index, int(pos), key[0],
1878
tuple(parent[0] for parent in refs[0])))
1879
pb.update("Reading revision index.", 0, 0)
1880
index_positions.sort()
1881
batch_count = len(index_positions) / 1000 + 1
1882
pb.update("Checking cached revision graph.", 0, batch_count)
1883
for offset in xrange(batch_count):
1884
pb.update("Checking cached revision graph.", offset)
1885
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1888
rev_ids = [item[2] for item in to_query]
1889
revs = self.get_revisions(rev_ids)
1890
for revision, item in zip(revs, to_query):
1891
index_parents = item[3]
1892
rev_parents = tuple(revision.parent_ids)
1893
if index_parents != rev_parents:
1894
result.append((revision.revision_id, index_parents, rev_parents))
1899
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1900
def get_parents(self, revision_ids):
1901
"""See graph._StackedParentsProvider.get_parents."""
1902
parent_map = self.get_parent_map(revision_ids)
1903
return [parent_map.get(r, None) for r in revision_ids]
1905
def get_parent_map(self, keys):
1906
"""See graph._StackedParentsProvider.get_parent_map
1908
This implementation accesses the combined revision index to provide
1911
self._pack_collection.ensure_loaded()
1912
index = self._pack_collection.revision_index.combined_index
1914
if _mod_revision.NULL_REVISION in keys:
1915
keys.discard(_mod_revision.NULL_REVISION)
1916
found_parents = {_mod_revision.NULL_REVISION:()}
1919
search_keys = set((revision_id,) for revision_id in keys)
1920
for index, key, value, refs in index.iter_entries(search_keys):
1923
parents = (_mod_revision.NULL_REVISION,)
1925
parents = tuple(parent[0] for parent in parents)
1926
found_parents[key[0]] = parents
1927
return found_parents
1929
def has_revisions(self, revision_ids):
1930
"""See Repository.has_revisions()."""
1931
revision_ids = set(revision_ids)
1932
result = revision_ids.intersection(
1933
set([None, _mod_revision.NULL_REVISION]))
1934
revision_ids.difference_update(result)
1935
index = self._pack_collection.revision_index.combined_index
1936
keys = [(revision_id,) for revision_id in revision_ids]
1937
result.update(node[1][0] for node in index.iter_entries(keys))
2302
1940
def _make_parents_provider(self):
2303
1941
return graph.CachingParentsProvider(self)
2305
1943
def _refresh_data(self):
2306
if not self.is_locked():
2308
self._pack_collection.reload_pack_names()
1944
if self._write_lock_count == 1 or (
1945
self.control_files._lock_count == 1 and
1946
self.control_files._lock_mode == 'r'):
1947
# forget what names there are
1948
self._pack_collection.reset()
1949
# XXX: Better to do an in-memory merge when acquiring a new lock -
1950
# factor out code from _save_pack_names.
1951
self._pack_collection.ensure_loaded()
2310
1953
def _start_write_group(self):
2311
1954
self._pack_collection._start_write_group()
2313
1956
def _commit_write_group(self):
2314
hint = self._pack_collection._commit_write_group()
2315
self.revisions._index._key_dependencies.clear()
2318
def suspend_write_group(self):
2319
# XXX check self._write_group is self.get_transaction()?
2320
tokens = self._pack_collection._suspend_write_group()
2321
self.revisions._index._key_dependencies.clear()
2322
self._write_group = None
2325
def _resume_write_group(self, tokens):
2326
self._start_write_group()
2328
self._pack_collection._resume_write_group(tokens)
2329
except errors.UnresumableWriteGroup:
2330
self._abort_write_group()
2332
for pack in self._pack_collection._resumed_packs:
2333
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1957
return self._pack_collection._commit_write_group()
1959
def get_inventory_weave(self):
1960
return self._inv_thunk.get_weave()
2335
1962
def get_transaction(self):
2336
1963
if self._write_lock_count:
2422
2030
self.control_files.unlock()
2424
if not self.is_locked():
2425
for repo in self._fallback_repositories:
2429
class KnitPackStreamSource(StreamSource):
2430
"""A StreamSource used to transfer data between same-format KnitPack repos.
2432
This source assumes:
2433
1) Same serialization format for all objects
2434
2) Same root information
2435
3) XML format inventories
2436
4) Atomic inserts (so we can stream inventory texts before text
2441
def __init__(self, from_repository, to_format):
2442
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2443
self._text_keys = None
2444
self._text_fetch_order = 'unordered'
2446
def _get_filtered_inv_stream(self, revision_ids):
2447
from_repo = self.from_repository
2448
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2449
parent_keys = [(p,) for p in parent_ids]
2450
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2451
parent_text_keys = set(find_text_keys(
2452
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2453
content_text_keys = set()
2454
knit = KnitVersionedFiles(None, None)
2455
factory = KnitPlainFactory()
2456
def find_text_keys_from_content(record):
2457
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2458
raise ValueError("Unknown content storage kind for"
2459
" inventory text: %s" % (record.storage_kind,))
2460
# It's a knit record, it has a _raw_record field (even if it was
2461
# reconstituted from a network stream).
2462
raw_data = record._raw_record
2463
# read the entire thing
2464
revision_id = record.key[-1]
2465
content, _ = knit._parse_record(revision_id, raw_data)
2466
if record.storage_kind == 'knit-delta-gz':
2467
line_iterator = factory.get_linedelta_content(content)
2468
elif record.storage_kind == 'knit-ft-gz':
2469
line_iterator = factory.get_fulltext_content(content)
2470
content_text_keys.update(find_text_keys(
2471
[(line, revision_id) for line in line_iterator]))
2472
revision_keys = [(r,) for r in revision_ids]
2473
def _filtered_inv_stream():
2474
source_vf = from_repo.inventories
2475
stream = source_vf.get_record_stream(revision_keys,
2477
for record in stream:
2478
if record.storage_kind == 'absent':
2479
raise errors.NoSuchRevision(from_repo, record.key)
2480
find_text_keys_from_content(record)
2482
self._text_keys = content_text_keys - parent_text_keys
2483
return ('inventories', _filtered_inv_stream())
2485
def _get_text_stream(self):
2486
# Note: We know we don't have to handle adding root keys, because both
2487
# the source and target are the identical network name.
2488
text_stream = self.from_repository.texts.get_record_stream(
2489
self._text_keys, self._text_fetch_order, False)
2490
return ('texts', text_stream)
2492
def get_stream(self, search):
2493
revision_ids = search.get_keys()
2494
for stream_info in self._fetch_revision_texts(revision_ids):
2496
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2497
yield self._get_filtered_inv_stream(revision_ids)
2498
yield self._get_text_stream()
2502
2033
class RepositoryFormatPack(MetaDirRepositoryFormat):
2503
2034
"""Format logic for pack structured repositories.
2686
2253
return "Packs containing knits with rich root support\n"
2689
class RepositoryFormatKnitPack5(RepositoryFormatPack):
2690
"""Repository that supports external references to allow stacking.
2694
Supports external lookups, which results in non-truncated ghosts after
2695
reconcile compared to pack-0.92 formats.
2698
repository_class = KnitPackRepository
2699
_commit_builder_class = PackCommitBuilder
2700
supports_external_lookups = True
2701
# What index classes to use
2702
index_builder_class = InMemoryGraphIndex
2703
index_class = GraphIndex
2706
def _serializer(self):
2707
return xml5.serializer_v5
2709
def _get_matching_bzrdir(self):
2710
return bzrdir.format_registry.make_bzrdir('1.6')
2712
def _ignore_setting_bzrdir(self, format):
2715
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2717
def get_format_string(self):
2718
"""See RepositoryFormat.get_format_string()."""
2719
return "Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n"
2721
def get_format_description(self):
2722
"""See RepositoryFormat.get_format_description()."""
2723
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2726
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2727
"""A repository with rich roots and stacking.
2729
New in release 1.6.1.
2731
Supports stacking on other repositories, allowing data to be accessed
2732
without being stored locally.
2735
repository_class = KnitPackRepository
2736
_commit_builder_class = PackRootCommitBuilder
2737
rich_root_data = True
2738
supports_tree_reference = False # no subtrees
2739
supports_external_lookups = True
2740
# What index classes to use
2741
index_builder_class = InMemoryGraphIndex
2742
index_class = GraphIndex
2745
def _serializer(self):
2746
return xml6.serializer_v6
2748
def _get_matching_bzrdir(self):
2749
return bzrdir.format_registry.make_bzrdir(
2752
def _ignore_setting_bzrdir(self, format):
2755
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2757
def get_format_string(self):
2758
"""See RepositoryFormat.get_format_string()."""
2759
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2761
def get_format_description(self):
2762
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2765
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2766
"""A repository with rich roots and external references.
2770
Supports external lookups, which results in non-truncated ghosts after
2771
reconcile compared to pack-0.92 formats.
2773
This format was deprecated because the serializer it uses accidentally
2774
supported subtrees, when the format was not intended to. This meant that
2775
someone could accidentally fetch from an incorrect repository.
2778
repository_class = KnitPackRepository
2779
_commit_builder_class = PackRootCommitBuilder
2780
rich_root_data = True
2781
supports_tree_reference = False # no subtrees
2783
supports_external_lookups = True
2784
# What index classes to use
2785
index_builder_class = InMemoryGraphIndex
2786
index_class = GraphIndex
2789
def _serializer(self):
2790
return xml7.serializer_v7
2792
def _get_matching_bzrdir(self):
2793
matching = bzrdir.format_registry.make_bzrdir(
2795
matching.repository_format = self
2798
def _ignore_setting_bzrdir(self, format):
2801
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2803
def get_format_string(self):
2804
"""See RepositoryFormat.get_format_string()."""
2805
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2807
def get_format_description(self):
2808
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2812
class RepositoryFormatKnitPack6(RepositoryFormatPack):
2813
"""A repository with stacking and btree indexes,
2814
without rich roots or subtrees.
2816
This is equivalent to pack-1.6 with B+Tree indices.
2819
repository_class = KnitPackRepository
2820
_commit_builder_class = PackCommitBuilder
2821
supports_external_lookups = True
2822
# What index classes to use
2823
index_builder_class = BTreeBuilder
2824
index_class = BTreeGraphIndex
2827
def _serializer(self):
2828
return xml5.serializer_v5
2830
def _get_matching_bzrdir(self):
2831
return bzrdir.format_registry.make_bzrdir('1.9')
2833
def _ignore_setting_bzrdir(self, format):
2836
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2838
def get_format_string(self):
2839
"""See RepositoryFormat.get_format_string()."""
2840
return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
2842
def get_format_description(self):
2843
"""See RepositoryFormat.get_format_description()."""
2844
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2847
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2848
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2850
1.6-rich-root with B+Tree indices.
2853
repository_class = KnitPackRepository
2854
_commit_builder_class = PackRootCommitBuilder
2855
rich_root_data = True
2856
supports_tree_reference = False # no subtrees
2857
supports_external_lookups = True
2858
# What index classes to use
2859
index_builder_class = BTreeBuilder
2860
index_class = BTreeGraphIndex
2863
def _serializer(self):
2864
return xml6.serializer_v6
2866
def _get_matching_bzrdir(self):
2867
return bzrdir.format_registry.make_bzrdir(
2870
def _ignore_setting_bzrdir(self, format):
2873
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2875
def get_format_string(self):
2876
"""See RepositoryFormat.get_format_string()."""
2877
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2879
def get_format_description(self):
2880
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2883
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2256
class RepositoryFormatPackDevelopment0(RepositoryFormatPack):
2257
"""A no-subtrees development repository.
2259
This format should be retained until the second release after bzr 1.0.
2261
No changes to the disk behaviour from pack-0.92.
2264
repository_class = KnitPackRepository
2265
_commit_builder_class = PackCommitBuilder
2266
_serializer = xml5.serializer_v5
2268
def _get_matching_bzrdir(self):
2269
return bzrdir.format_registry.make_bzrdir('development0')
2271
def _ignore_setting_bzrdir(self, format):
2274
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2276
def get_format_string(self):
2277
"""See RepositoryFormat.get_format_string()."""
2278
return "Bazaar development format 0 (needs bzr.dev from before 1.3)\n"
2280
def get_format_description(self):
2281
"""See RepositoryFormat.get_format_description()."""
2282
return ("Development repository format, currently the same as "
2285
def check_conversion_target(self, target_format):
2289
class RepositoryFormatPackDevelopment0Subtree(RepositoryFormatPack):
2884
2290
"""A subtrees development repository.
2886
This format should be retained until the second release after bzr 1.7.
2888
1.6.1-subtree[as it might have been] with B+Tree indices.
2890
This is [now] retained until we have a CHK based subtree format in
2292
This format should be retained until the second release after bzr 1.0.
2294
No changes to the disk behaviour from pack-0.92-subtree.
2894
2297
repository_class = KnitPackRepository
2895
2298
_commit_builder_class = PackRootCommitBuilder
2896
2299
rich_root_data = True
2898
2300
supports_tree_reference = True
2899
supports_external_lookups = True
2900
# What index classes to use
2901
index_builder_class = BTreeBuilder
2902
index_class = BTreeGraphIndex
2905
def _serializer(self):
2906
return xml7.serializer_v7
2301
_serializer = xml7.serializer_v7
2908
2303
def _get_matching_bzrdir(self):
2909
2304
return bzrdir.format_registry.make_bzrdir(
2910
'development-subtree')
2305
'development0-subtree')
2912
2307
def _ignore_setting_bzrdir(self, format):
2915
2310
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2312
def check_conversion_target(self, target_format):
2313
if not target_format.rich_root_data:
2314
raise errors.BadConversionTarget(
2315
'Does not support rich root data.', target_format)
2316
if not getattr(target_format, 'supports_tree_reference', False):
2317
raise errors.BadConversionTarget(
2318
'Does not support nested trees', target_format)
2917
2320
def get_format_string(self):
2918
2321
"""See RepositoryFormat.get_format_string()."""
2919
return ("Bazaar development format 2 with subtree support "
2920
"(needs bzr.dev from before 1.8)\n")
2322
return ("Bazaar development format 0 with subtree support "
2323
"(needs bzr.dev from before 1.3)\n")
2922
2325
def get_format_description(self):
2923
2326
"""See RepositoryFormat.get_format_description()."""
2924
2327
return ("Development repository format, currently the same as "
2925
"1.6.1-subtree with B+Tree indices.\n")
2328
"pack-0.92-subtree\n")