150
132
texts/deltas (via (fileid, revisionid) tuples).
151
133
:param signature_index: A GraphIndex for determining what signatures are
152
134
present in the Pack and accessing the locations of their texts.
153
:param chk_index: A GraphIndex for accessing content by CHK, if the
156
136
self.revision_index = revision_index
157
137
self.inventory_index = inventory_index
158
138
self.text_index = text_index
159
139
self.signature_index = signature_index
160
self.chk_index = chk_index
162
141
def access_tuple(self):
163
142
"""Return a tuple (transport, name) for the pack content."""
164
143
return self.pack_transport, self.file_name()
166
def _check_references(self):
167
"""Make sure our external references are present.
169
Packs are allowed to have deltas whose base is not in the pack, but it
170
must be present somewhere in this collection. It is not allowed to
171
have deltas based on a fallback repository.
172
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
175
for (index_name, external_refs, index) in [
177
self._get_external_refs(self.text_index),
178
self._pack_collection.text_index.combined_index),
180
self._get_external_refs(self.inventory_index),
181
self._pack_collection.inventory_index.combined_index),
183
missing = external_refs.difference(
184
k for (idx, k, v, r) in
185
index.iter_entries(external_refs))
187
missing_items[index_name] = sorted(list(missing))
189
from pprint import pformat
190
raise errors.BzrCheckError(
191
"Newly created pack file %r has delta references to "
192
"items not in its repository:\n%s"
193
% (self, pformat(missing_items)))
195
145
def file_name(self):
196
146
"""Get the file name for the pack on disk."""
197
147
return self.name + '.pack'
223
165
"""The text index is the name + .tix."""
224
166
return self.index_name('text', name)
226
def _replace_index_with_readonly(self, index_type):
227
setattr(self, index_type + '_index',
228
self.index_class(self.index_transport,
229
self.index_name(index_type, self.name),
230
self.index_sizes[self.index_offset(index_type)]))
168
def _external_compression_parents_of_texts(self):
171
for node in self.text_index.iter_all_entries():
173
refs.update(node[3][1])
233
177
class ExistingPack(Pack):
234
178
"""An in memory proxy for an existing .pack and its disk indices."""
236
180
def __init__(self, pack_transport, name, revision_index, inventory_index,
237
text_index, signature_index, chk_index=None):
181
text_index, signature_index):
238
182
"""Create an ExistingPack object.
240
184
:param pack_transport: The transport where the pack file resides.
241
185
:param name: The name of the pack on disk in the pack_transport.
243
187
Pack.__init__(self, revision_index, inventory_index, text_index,
244
signature_index, chk_index)
246
190
self.pack_transport = pack_transport
247
191
if None in (revision_index, inventory_index, text_index,
255
199
return not self.__eq__(other)
257
201
def __repr__(self):
258
return "<%s.%s object at 0x%x, %s, %s" % (
259
self.__class__.__module__, self.__class__.__name__, id(self),
260
self.pack_transport, self.name)
263
class ResumedPack(ExistingPack):
265
def __init__(self, name, revision_index, inventory_index, text_index,
266
signature_index, upload_transport, pack_transport, index_transport,
267
pack_collection, chk_index=None):
268
"""Create a ResumedPack object."""
269
ExistingPack.__init__(self, pack_transport, name, revision_index,
270
inventory_index, text_index, signature_index,
272
self.upload_transport = upload_transport
273
self.index_transport = index_transport
274
self.index_sizes = [None, None, None, None]
276
('revision', revision_index),
277
('inventory', inventory_index),
278
('text', text_index),
279
('signature', signature_index),
281
if chk_index is not None:
282
indices.append(('chk', chk_index))
283
self.index_sizes.append(None)
284
for index_type, index in indices:
285
offset = self.index_offset(index_type)
286
self.index_sizes[offset] = index._size
287
self.index_class = pack_collection._index_class
288
self._pack_collection = pack_collection
289
self._state = 'resumed'
290
# XXX: perhaps check that the .pack file exists?
292
def access_tuple(self):
293
if self._state == 'finished':
294
return Pack.access_tuple(self)
295
elif self._state == 'resumed':
296
return self.upload_transport, self.file_name()
298
raise AssertionError(self._state)
301
self.upload_transport.delete(self.file_name())
302
indices = [self.revision_index, self.inventory_index, self.text_index,
303
self.signature_index]
304
if self.chk_index is not None:
305
indices.append(self.chk_index)
306
for index in indices:
307
index._transport.delete(index._name)
310
self._check_references()
311
index_types = ['revision', 'inventory', 'text', 'signature']
312
if self.chk_index is not None:
313
index_types.append('chk')
314
for index_type in index_types:
315
old_name = self.index_name(index_type, self.name)
316
new_name = '../indices/' + old_name
317
self.upload_transport.rename(old_name, new_name)
318
self._replace_index_with_readonly(index_type)
319
new_name = '../packs/' + self.file_name()
320
self.upload_transport.rename(self.file_name(), new_name)
321
self._state = 'finished'
323
def _get_external_refs(self, index):
324
"""Return compression parents for this index that are not present.
326
This returns any compression parents that are referenced by this index,
327
which are not contained *in* this index. They may be present elsewhere.
329
return index.external_references(1)
202
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
203
id(self), self.pack_transport, self.name)
332
206
class NewPack(Pack):
333
207
"""An in memory proxy for a pack which is being created."""
335
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
209
# A map of index 'type' to the file extension and position in the
211
index_definitions = {
212
'revision': ('.rix', 0),
213
'inventory': ('.iix', 1),
215
'signature': ('.six', 3),
218
def __init__(self, upload_transport, index_transport, pack_transport,
219
upload_suffix='', file_mode=None):
336
220
"""Create a NewPack instance.
338
:param pack_collection: A PackCollection into which this is being inserted.
222
:param upload_transport: A writable transport for the pack to be
223
incrementally uploaded to.
224
:param index_transport: A writable transport for the pack's indices to
225
be written to when the pack is finished.
226
:param pack_transport: A writable transport for the pack to be renamed
227
to when the upload is complete. This *must* be the same as
228
upload_transport.clone('../packs').
339
229
:param upload_suffix: An optional suffix to be given to any temporary
340
230
files created during the pack creation. e.g '.autopack'
341
:param file_mode: Unix permissions for newly created file.
231
:param file_mode: An optional file mode to create the new files with.
343
233
# The relative locations of the packs are constrained, but all are
344
234
# passed in because the caller has them, so as to avoid object churn.
345
index_builder_class = pack_collection._index_builder_class
346
if pack_collection.chk_index is not None:
347
chk_index = index_builder_class(reference_lists=0)
350
235
Pack.__init__(self,
351
236
# Revisions: parents list, no text compression.
352
index_builder_class(reference_lists=1),
237
InMemoryGraphIndex(reference_lists=1),
353
238
# Inventory: We want to map compression only, but currently the
354
239
# knit code hasn't been updated enough to understand that, so we
355
240
# have a regular 2-list index giving parents and compression
357
index_builder_class(reference_lists=2),
242
InMemoryGraphIndex(reference_lists=2),
358
243
# Texts: compression and per file graph, for all fileids - so two
359
244
# reference lists and two elements in the key tuple.
360
index_builder_class(reference_lists=2, key_elements=2),
245
InMemoryGraphIndex(reference_lists=2, key_elements=2),
361
246
# Signatures: Just blobs to store, no compression, no parents
363
index_builder_class(reference_lists=0),
364
# CHK based storage - just blobs, no compression or parents.
248
InMemoryGraphIndex(reference_lists=0),
367
self._pack_collection = pack_collection
368
# When we make readonly indices, we need this.
369
self.index_class = pack_collection._index_class
370
250
# where should the new pack be opened
371
self.upload_transport = pack_collection._upload_transport
251
self.upload_transport = upload_transport
372
252
# where are indices written out to
373
self.index_transport = pack_collection._index_transport
253
self.index_transport = index_transport
374
254
# where is the pack renamed to when it is finished?
375
self.pack_transport = pack_collection._pack_transport
255
self.pack_transport = pack_transport
376
256
# What file mode to upload the pack and indices with.
377
257
self._file_mode = file_mode
378
258
# tracks the content written to the .pack file.
379
self._hash = osutils.md5()
380
# a tuple with the length in bytes of the indices, once the pack
381
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
259
self._hash = md5.new()
260
# a four-tuple with the length in bytes of the indices, once the pack
261
# is finalised. (rev, inv, text, sigs)
382
262
self.index_sizes = None
383
263
# How much data to cache when writing packs. Note that this is not
384
264
# synchronised with reads, because it's not in the transport layer, so
463
342
if self._buffer[1]:
464
343
self._write_data('', flush=True)
465
344
self.name = self._hash.hexdigest()
467
self._check_references()
469
346
# XXX: It'd be better to write them all to temporary names, then
470
347
# rename them all into place, so that the window when only some are
471
348
# visible is smaller. On the other hand none will be seen until
472
349
# they're in the names list.
473
350
self.index_sizes = [None, None, None, None]
474
self._write_index('revision', self.revision_index, 'revision', suspend)
475
self._write_index('inventory', self.inventory_index, 'inventory',
477
self._write_index('text', self.text_index, 'file texts', suspend)
351
self._write_index('revision', self.revision_index, 'revision')
352
self._write_index('inventory', self.inventory_index, 'inventory')
353
self._write_index('text', self.text_index, 'file texts')
478
354
self._write_index('signature', self.signature_index,
479
'revision signatures', suspend)
480
if self.chk_index is not None:
481
self.index_sizes.append(None)
482
self._write_index('chk', self.chk_index,
483
'content hash bytes', suspend)
355
'revision signatures')
484
356
self.write_stream.close()
485
357
# Note that this will clobber an existing pack with the same name,
486
358
# without checking for hash collisions. While this is undesirable this
754
590
def open_pack(self):
755
591
"""Open a pack for the pack we are creating."""
756
new_pack = self._pack_collection.pack_factory(self._pack_collection,
757
upload_suffix=self.suffix,
758
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
759
# We know that we will process all nodes in order, and don't need to
760
# query, so don't combine any indices spilled to disk until we are done
761
new_pack.revision_index.set_optimize(combine_backing_indices=False)
762
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
763
new_pack.text_index.set_optimize(combine_backing_indices=False)
764
new_pack.signature_index.set_optimize(combine_backing_indices=False)
767
def _update_pack_order(self, entries, index_to_pack_map):
768
"""Determine how we want our packs to be ordered.
770
This changes the sort order of the self.packs list so that packs unused
771
by 'entries' will be at the end of the list, so that future requests
772
can avoid probing them. Used packs will be at the front of the
773
self.packs list, in the order of their first use in 'entries'.
775
:param entries: A list of (index, ...) tuples
776
:param index_to_pack_map: A mapping from index objects to pack objects.
780
for entry in entries:
782
if index not in seen_indexes:
783
packs.append(index_to_pack_map[index])
784
seen_indexes.add(index)
785
if len(packs) == len(self.packs):
786
if 'pack' in debug.debug_flags:
787
mutter('Not changing pack list, all packs used.')
789
seen_packs = set(packs)
790
for pack in self.packs:
791
if pack not in seen_packs:
794
if 'pack' in debug.debug_flags:
795
old_names = [p.access_tuple()[1] for p in self.packs]
796
new_names = [p.access_tuple()[1] for p in packs]
797
mutter('Reordering packs\nfrom: %s\n to: %s',
798
old_names, new_names)
592
return NewPack(self._pack_collection._upload_transport,
593
self._pack_collection._index_transport,
594
self._pack_collection._pack_transport, upload_suffix=self.suffix,
595
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
801
597
def _copy_revision_texts(self):
802
598
"""Copy revision data to the new pack."""
945
740
self._pack_collection.allocate(new_pack)
948
def _copy_chks(self, refs=None):
949
# XXX: Todo, recursive follow-pointers facility when fetching some
951
chk_index_map, chk_indices = self._pack_map_and_index_list(
953
chk_nodes = self._index_contents(chk_indices, refs)
955
# TODO: This isn't strictly tasteful as we are accessing some private
956
# variables (_serializer). Perhaps a better way would be to have
957
# Repository._deserialise_chk_node()
958
search_key_func = chk_map.search_key_registry.get(
959
self._pack_collection.repo._serializer.search_key_name)
960
def accumlate_refs(lines):
961
# XXX: move to a generic location
963
bytes = ''.join(lines)
964
node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
965
new_refs.update(node.refs())
966
self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
967
self.new_pack.chk_index, output_lines=accumlate_refs)
970
def _copy_nodes(self, nodes, index_map, writer, write_index,
972
"""Copy knit nodes between packs with no graph references.
974
:param output_lines: Output full texts of copied items.
743
def _copy_nodes(self, nodes, index_map, writer, write_index):
744
"""Copy knit nodes between packs with no graph references."""
976
745
pb = ui.ui_factory.nested_progress_bar()
978
747
return self._do_copy_nodes(nodes, index_map, writer,
979
write_index, pb, output_lines=output_lines)
983
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
752
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
985
753
# for record verification
986
754
knit = KnitVersionedFiles(None, None)
987
755
# plan a readv on each source pack:
1008
776
# linear scan up the pack
1009
777
pack_readv_requests.sort()
1011
pack_obj = index_map[index]
1012
transport, path = pack_obj.access_tuple()
1014
reader = pack.make_readv_reader(transport, path,
1015
[offset[0:2] for offset in pack_readv_requests])
1016
except errors.NoSuchFile:
1017
if self._reload_func is not None:
779
transport, path = index_map[index]
780
reader = pack.make_readv_reader(transport, path,
781
[offset[0:2] for offset in pack_readv_requests])
1020
782
for (names, read_func), (_1, _2, (key, eol_flag)) in \
1021
783
izip(reader.iter_records(), pack_readv_requests):
1022
784
raw_data = read_func(None)
1023
785
# check the header only
1024
if output_lines is not None:
1025
output_lines(knit._parse_record(key[-1], raw_data)[0])
1027
df, _ = knit._parse_record_header(key, raw_data)
786
df, _ = knit._parse_record_header(key, raw_data)
1029
788
pos, size = writer.add_bytes_record(raw_data, names)
1030
789
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1031
790
pb.update("Copied record", record_index)
1358
1101
class RepositoryPackCollection(object):
1359
1102
"""Management of packs within a repository.
1361
1104
:ivar _names: map of {pack_name: (index_size,)}
1364
pack_factory = NewPack
1365
resumed_pack_factory = ResumedPack
1367
1107
def __init__(self, repo, transport, index_transport, upload_transport,
1368
pack_transport, index_builder_class, index_class,
1370
1109
"""Create a new RepositoryPackCollection.
1372
:param transport: Addresses the repository base directory
1111
:param transport: Addresses the repository base directory
1373
1112
(typically .bzr/repository/).
1374
1113
:param index_transport: Addresses the directory containing indices.
1375
1114
:param upload_transport: Addresses the directory into which packs are written
1376
1115
while they're being created.
1377
1116
:param pack_transport: Addresses the directory of existing complete packs.
1378
:param index_builder_class: The index builder class to use.
1379
:param index_class: The index class to use.
1380
:param use_chk_index: Whether to setup and manage a CHK index.
1382
# XXX: This should call self.reset()
1383
1118
self.repo = repo
1384
1119
self.transport = transport
1385
1120
self._index_transport = index_transport
1386
1121
self._upload_transport = upload_transport
1387
1122
self._pack_transport = pack_transport
1388
self._index_builder_class = index_builder_class
1389
self._index_class = index_class
1390
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1123
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1392
1124
self.packs = []
1393
1125
# name:Pack mapping
1395
1126
self._packs_by_name = {}
1396
1127
# the previous pack-names content
1397
1128
self._packs_at_load = None
1398
1129
# when a pack is being created by this object, the state of that pack.
1399
1130
self._new_pack = None
1400
1131
# aggregated revision index data
1401
flush = self._flush_new_pack
1402
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1403
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1404
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1405
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1407
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1409
# used to determine if we're using a chk_index elsewhere.
1410
self.chk_index = None
1412
self._resumed_packs = []
1132
self.revision_index = AggregateIndex()
1133
self.inventory_index = AggregateIndex()
1134
self.text_index = AggregateIndex()
1135
self.signature_index = AggregateIndex()
1414
1137
def add_pack_to_memory(self, pack):
1415
1138
"""Make a Pack object available to the repository to satisfy queries.
1417
1140
:param pack: A Pack object.
1419
1142
if pack.name in self._packs_by_name:
1420
raise AssertionError(
1421
'pack %s already in _packs_by_name' % (pack.name,))
1143
raise AssertionError()
1422
1144
self.packs.append(pack)
1423
1145
self._packs_by_name[pack.name] = pack
1424
1146
self.revision_index.add_index(pack.revision_index, pack)
1425
1147
self.inventory_index.add_index(pack.inventory_index, pack)
1426
1148
self.text_index.add_index(pack.text_index, pack)
1427
1149
self.signature_index.add_index(pack.signature_index, pack)
1428
if self.chk_index is not None:
1429
self.chk_index.add_index(pack.chk_index, pack)
1431
1151
def all_packs(self):
1432
1152
"""Return a list of all the Pack objects this repository has.
1483
1199
# group their data with the relevant commit, and that may
1484
1200
# involve rewriting ancient history - which autopack tries to
1485
1201
# avoid. Alternatively we could not group the data but treat
1486
# each of these as having a single revision, and thus add
1202
# each of these as having a single revision, and thus add
1487
1203
# one revision for each to the total revision count, to get
1488
1204
# a matching distribution.
1490
1206
existing_packs.append((revision_count, pack))
1491
1207
pack_operations = self.plan_autopack_combinations(
1492
1208
existing_packs, pack_distribution)
1493
num_new_packs = len(pack_operations)
1494
num_old_packs = sum([len(po[1]) for po in pack_operations])
1495
num_revs_affected = sum([po[0] for po in pack_operations])
1496
mutter('Auto-packing repository %s, which has %d pack files, '
1497
'containing %d revisions. Packing %d files into %d affecting %d'
1498
' revisions', self, total_packs, total_revisions, num_old_packs,
1499
num_new_packs, num_revs_affected)
1500
result = self._execute_pack_operations(pack_operations,
1501
reload_func=self._restart_autopack)
1502
mutter('Auto-packing repository %s completed', self)
1209
self._execute_pack_operations(pack_operations)
1505
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1212
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1507
1213
"""Execute a series of pack operations.
1509
1215
:param pack_operations: A list of [revision_count, packs_to_combine].
1510
1216
:param _packer_class: The class of packer to use (default: Packer).
1511
:return: The new pack names.
1513
1219
for revision_count, packs in pack_operations:
1514
1220
# we may have no-ops from the setup logic
1515
1221
if len(packs) == 0:
1517
packer = _packer_class(self, packs, '.autopack',
1518
reload_func=reload_func)
1521
except errors.RetryWithNewPacks:
1522
# An exception is propagating out of this context, make sure
1523
# this packer has cleaned up. Packer() doesn't set its new_pack
1524
# state into the RepositoryPackCollection object, so we only
1525
# have access to it directly here.
1526
if packer.new_pack is not None:
1527
packer.new_pack.abort()
1223
_packer_class(self, packs, '.autopack').pack()
1529
1224
for pack in packs:
1530
1225
self._remove_pack_from_memory(pack)
1531
1226
# record the newly available packs and stop advertising the old
1533
result = self._save_pack_names(clear_obsolete_packs=True)
1228
self._save_pack_names(clear_obsolete_packs=True)
1534
1229
# Move the old packs out of the way now they are no longer referenced.
1535
1230
for revision_count, packs in pack_operations:
1536
1231
self._obsolete_packs(packs)
1539
def _flush_new_pack(self):
1540
if self._new_pack is not None:
1541
self._new_pack.flush()
1543
1233
def lock_names(self):
1544
1234
"""Acquire the mutex around the pack-names index.
1546
1236
This cannot be used in the middle of a read-only transaction on the
1549
1239
self.repo.control_files.lock_write()
1551
def _already_packed(self):
1552
"""Is the collection already packed?"""
1553
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1555
def pack(self, hint=None):
1556
1242
"""Pack the pack collection totally."""
1557
1243
self.ensure_loaded()
1558
1244
total_packs = len(self._names)
1559
if self._already_packed():
1246
# This is arguably wrong because we might not be optimal, but for
1247
# now lets leave it in. (e.g. reconcile -> one pack. But not
1561
1250
total_revisions = self.revision_index.combined_index.key_count()
1562
1251
# XXX: the following may want to be a class, to pack with a given
1564
1253
mutter('Packing repository %s, which has %d pack files, '
1565
'containing %d revisions with hint %r.', self, total_packs,
1566
total_revisions, hint)
1254
'containing %d revisions into 1 packs.', self, total_packs,
1567
1256
# determine which packs need changing
1257
pack_distribution = [1]
1568
1258
pack_operations = [[0, []]]
1569
1259
for pack in self.all_packs():
1570
if not hint or pack.name in hint:
1571
pack_operations[-1][0] += pack.get_revision_count()
1572
pack_operations[-1][1].append(pack)
1260
pack_operations[-1][0] += pack.get_revision_count()
1261
pack_operations[-1][1].append(pack)
1573
1262
self._execute_pack_operations(pack_operations, OptimisingPacker)
1575
1264
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1664
1335
inv_index = self._make_index(name, '.iix')
1665
1336
txt_index = self._make_index(name, '.tix')
1666
1337
sig_index = self._make_index(name, '.six')
1667
if self.chk_index is not None:
1668
chk_index = self._make_index(name, '.cix')
1671
1338
result = ExistingPack(self._pack_transport, name, rev_index,
1672
inv_index, txt_index, sig_index, chk_index)
1339
inv_index, txt_index, sig_index)
1673
1340
self.add_pack_to_memory(result)
1676
def _resume_pack(self, name):
1677
"""Get a suspended Pack object by name.
1679
:param name: The name of the pack - e.g. '123456'
1680
:return: A Pack object.
1682
if not re.match('[a-f0-9]{32}', name):
1683
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1685
raise errors.UnresumableWriteGroup(
1686
self.repo, [name], 'Malformed write group token')
1688
rev_index = self._make_index(name, '.rix', resume=True)
1689
inv_index = self._make_index(name, '.iix', resume=True)
1690
txt_index = self._make_index(name, '.tix', resume=True)
1691
sig_index = self._make_index(name, '.six', resume=True)
1692
if self.chk_index is not None:
1693
chk_index = self._make_index(name, '.cix', resume=True)
1696
result = self.resumed_pack_factory(name, rev_index, inv_index,
1697
txt_index, sig_index, self._upload_transport,
1698
self._pack_transport, self._index_transport, self,
1699
chk_index=chk_index)
1700
except errors.NoSuchFile, e:
1701
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1702
self.add_pack_to_memory(result)
1703
self._resumed_packs.append(result)
1706
1343
def allocate(self, a_new_pack):
1707
1344
"""Allocate name in the list of packs.
1841
1468
self._packs_by_name = {}
1842
1469
self._packs_at_load = None
1471
def _make_index_map(self, index_suffix):
1472
"""Return information on existing indices.
1474
:param suffix: Index suffix added to pack name.
1476
:returns: (pack_map, indices) where indices is a list of GraphIndex
1477
objects, and pack_map is a mapping from those objects to the
1478
pack tuple they describe.
1480
# TODO: stop using this; it creates new indices unnecessarily.
1481
self.ensure_loaded()
1482
suffix_map = {'.rix': 'revision_index',
1483
'.six': 'signature_index',
1484
'.iix': 'inventory_index',
1485
'.tix': 'text_index',
1487
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1488
suffix_map[index_suffix])
1490
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1491
"""Convert a list of packs to an index pack map and index list.
1493
:param packs: The packs list to process.
1494
:param index_attribute: The attribute that the desired index is found
1496
:return: A tuple (map, list) where map contains the dict from
1497
index:pack_tuple, and lsit contains the indices in the same order
1503
index = getattr(pack, index_attribute)
1504
indices.append(index)
1505
pack_map[index] = (pack.pack_transport, pack.file_name())
1506
return pack_map, indices
1508
def _index_contents(self, pack_map, key_filter=None):
1509
"""Get an iterable of the index contents from a pack_map.
1511
:param pack_map: A map from indices to pack details.
1512
:param key_filter: An optional filter to limit the
1515
indices = [index for index in pack_map.iterkeys()]
1516
all_index = CombinedGraphIndex(indices)
1517
if key_filter is None:
1518
return all_index.iter_all_entries()
1520
return all_index.iter_entries(key_filter)
1844
1522
def _unlock_names(self):
1845
1523
"""Release the mutex around the pack-names index."""
1846
1524
self.repo.control_files.unlock()
1848
def _diff_pack_names(self):
1849
"""Read the pack names from disk, and compare it to the one in memory.
1851
:return: (disk_nodes, deleted_nodes, new_nodes)
1852
disk_nodes The final set of nodes that should be referenced
1853
deleted_nodes Nodes which have been removed from when we started
1854
new_nodes Nodes that are newly introduced
1856
# load the disk nodes across
1858
for index, key, value in self._iter_disk_pack_index():
1859
disk_nodes.add((key, value))
1861
# do a two-way diff against our original content
1862
current_nodes = set()
1863
for name, sizes in self._names.iteritems():
1865
((name, ), ' '.join(str(size) for size in sizes)))
1867
# Packs no longer present in the repository, which were present when we
1868
# locked the repository
1869
deleted_nodes = self._packs_at_load - current_nodes
1870
# Packs which this process is adding
1871
new_nodes = current_nodes - self._packs_at_load
1873
# Update the disk_nodes set to include the ones we are adding, and
1874
# remove the ones which were removed by someone else
1875
disk_nodes.difference_update(deleted_nodes)
1876
disk_nodes.update(new_nodes)
1878
return disk_nodes, deleted_nodes, new_nodes
1880
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1881
"""Given the correct set of pack files, update our saved info.
1883
:return: (removed, added, modified)
1884
removed pack names removed from self._names
1885
added pack names added to self._names
1886
modified pack names that had changed value
1891
## self._packs_at_load = disk_nodes
1526
def _save_pack_names(self, clear_obsolete_packs=False):
1527
"""Save the list of packs.
1529
This will take out the mutex around the pack names list for the
1530
duration of the method call. If concurrent updates have been made, a
1531
three-way merge between the current list and the current in memory list
1534
:param clear_obsolete_packs: If True, clear out the contents of the
1535
obsolete_packs directory.
1539
builder = GraphIndexBuilder()
1540
# load the disk nodes across
1542
for index, key, value in self._iter_disk_pack_index():
1543
disk_nodes.add((key, value))
1544
# do a two-way diff against our original content
1545
current_nodes = set()
1546
for name, sizes in self._names.iteritems():
1548
((name, ), ' '.join(str(size) for size in sizes)))
1549
deleted_nodes = self._packs_at_load - current_nodes
1550
new_nodes = current_nodes - self._packs_at_load
1551
disk_nodes.difference_update(deleted_nodes)
1552
disk_nodes.update(new_nodes)
1553
# TODO: handle same-name, index-size-changes here -
1554
# e.g. use the value from disk, not ours, *unless* we're the one
1556
for key, value in disk_nodes:
1557
builder.add_node(key, value)
1558
self.transport.put_file('pack-names', builder.finish(),
1559
mode=self.repo.bzrdir._get_file_mode())
1560
# move the baseline forward
1561
self._packs_at_load = disk_nodes
1562
if clear_obsolete_packs:
1563
self._clear_obsolete_packs()
1565
self._unlock_names()
1566
# synchronise the memory packs list with what we just wrote:
1892
1567
new_names = dict(disk_nodes)
1893
1568
# drop no longer present nodes
1894
1569
for pack in self.all_packs():
1895
1570
if (pack.name,) not in new_names:
1896
removed.append(pack.name)
1897
1571
self._remove_pack_from_memory(pack)
1898
1572
# add new nodes/refresh existing ones
1899
1573
for key, value in disk_nodes:
1913
1587
self._remove_pack_from_memory(self.get_pack_by_name(name))
1914
1588
self._names[name] = sizes
1915
1589
self.get_pack_by_name(name)
1916
modified.append(name)
1919
1592
self._names[name] = sizes
1920
1593
self.get_pack_by_name(name)
1922
return removed, added, modified
1924
def _save_pack_names(self, clear_obsolete_packs=False):
1925
"""Save the list of packs.
1927
This will take out the mutex around the pack names list for the
1928
duration of the method call. If concurrent updates have been made, a
1929
three-way merge between the current list and the current in memory list
1932
:param clear_obsolete_packs: If True, clear out the contents of the
1933
obsolete_packs directory.
1934
:return: A list of the names saved that were not previously on disk.
1938
builder = self._index_builder_class()
1939
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1940
# TODO: handle same-name, index-size-changes here -
1941
# e.g. use the value from disk, not ours, *unless* we're the one
1943
for key, value in disk_nodes:
1944
builder.add_node(key, value)
1945
self.transport.put_file('pack-names', builder.finish(),
1946
mode=self.repo.bzrdir._get_file_mode())
1947
# move the baseline forward
1948
self._packs_at_load = disk_nodes
1949
if clear_obsolete_packs:
1950
self._clear_obsolete_packs()
1952
self._unlock_names()
1953
# synchronise the memory packs list with what we just wrote:
1954
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1955
return [new_node[0][0] for new_node in new_nodes]
1957
def reload_pack_names(self):
1958
"""Sync our pack listing with what is present in the repository.
1960
This should be called when we find out that something we thought was
1961
present is now missing. This happens when another process re-packs the
1964
:return: True if the in-memory list of packs has been altered at all.
1966
# The ensure_loaded call is to handle the case where the first call
1967
# made involving the collection was to reload_pack_names, where we
1968
# don't have a view of disk contents. Its a bit of a bandaid, and
1969
# causes two reads of pack-names, but its a rare corner case not struck
1970
# with regular push/pull etc.
1971
first_read = self.ensure_loaded()
1974
# out the new value.
1975
disk_nodes, _, _ = self._diff_pack_names()
1976
self._packs_at_load = disk_nodes
1978
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1979
if removed or added or modified:
1983
def _restart_autopack(self):
1984
"""Reload the pack names list, and restart the autopack code."""
1985
if not self.reload_pack_names():
1986
# Re-raise the original exception, because something went missing
1987
# and a restart didn't find it
1989
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1991
1595
def _clear_obsolete_packs(self):
1992
1596
"""Delete everything from the obsolete-packs directory.
2029
1628
# FIXME: just drop the transient index.
2030
1629
# forget what names there are
2031
1630
if self._new_pack is not None:
2033
self._new_pack.abort()
2035
# XXX: If we aborted while in the middle of finishing the write
2036
# group, _remove_pack_indices can fail because the indexes are
2037
# already gone. If they're not there we shouldn't fail in this
2038
# case. -- mbp 20081113
2039
self._remove_pack_indices(self._new_pack)
2040
self._new_pack = None
2041
for resumed_pack in self._resumed_packs:
2043
resumed_pack.abort()
2045
# See comment in previous finally block.
2047
self._remove_pack_indices(resumed_pack)
2050
del self._resumed_packs[:]
2052
def _remove_resumed_pack_indices(self):
2053
for resumed_pack in self._resumed_packs:
2054
self._remove_pack_indices(resumed_pack)
2055
del self._resumed_packs[:]
1631
self._new_pack.abort()
1632
self._remove_pack_indices(self._new_pack)
1633
self._new_pack = None
1634
self.repo._text_knit = None
2057
1636
def _commit_write_group(self):
2059
for prefix, versioned_file in (
2060
('revisions', self.repo.revisions),
2061
('inventories', self.repo.inventories),
2062
('texts', self.repo.texts),
2063
('signatures', self.repo.signatures),
2065
missing = versioned_file.get_missing_compression_parent_keys()
2066
all_missing.update([(prefix,) + key for key in missing])
2068
raise errors.BzrCheckError(
2069
"Repository %s has missing compression parent(s) %r "
2070
% (self.repo, sorted(all_missing)))
2071
1637
self._remove_pack_indices(self._new_pack)
2072
should_autopack = False
2073
1638
if self._new_pack.data_inserted():
2074
1639
# get all the data to disk and read to use
2075
1640
self._new_pack.finish()
2076
1641
self.allocate(self._new_pack)
2077
1642
self._new_pack = None
2078
should_autopack = True
2080
self._new_pack.abort()
2081
self._new_pack = None
2082
for resumed_pack in self._resumed_packs:
2083
# XXX: this is a pretty ugly way to turn the resumed pack into a
2084
# properly committed pack.
2085
self._names[resumed_pack.name] = None
2086
self._remove_pack_from_memory(resumed_pack)
2087
resumed_pack.finish()
2088
self.allocate(resumed_pack)
2089
should_autopack = True
2090
del self._resumed_packs[:]
2092
1643
if not self.autopack():
2093
1644
# when autopack takes no steps, the names list is still
2095
return self._save_pack_names()
2097
def _suspend_write_group(self):
2098
tokens = [pack.name for pack in self._resumed_packs]
2099
self._remove_pack_indices(self._new_pack)
2100
if self._new_pack.data_inserted():
2101
# get all the data to disk and read to use
2102
self._new_pack.finish(suspend=True)
2103
tokens.append(self._new_pack.name)
2104
self._new_pack = None
1646
self._save_pack_names()
2106
1648
self._new_pack.abort()
2107
1649
self._new_pack = None
2108
self._remove_resumed_pack_indices()
2111
def _resume_write_group(self, tokens):
2112
for token in tokens:
2113
self._resume_pack(token)
1650
self.repo._text_knit = None
2116
1653
class KnitPackRepository(KnitRepository):
2117
1654
"""Repository with knit objects stored inside pack containers.
2119
1656
The layering for a KnitPackRepository is:
2121
1658
Graph | HPSS | Repository public layer |
2229
1742
revision_nodes = self._pack_collection.revision_index \
2230
1743
.combined_index.iter_all_entries()
2231
1744
index_positions = []
2232
# Get the cached index values for all revisions, and also the
2233
# location in each index of the revision text so we can perform
1745
# Get the cached index values for all revisions, and also the location
1746
# in each index of the revision text so we can perform linear IO.
2235
1747
for index, key, value, refs in revision_nodes:
2236
node = (index, key, value, refs)
2237
index_memo = self.revisions._index._node_to_position(node)
2238
if index_memo[0] != index:
2239
raise AssertionError('%r != %r' % (index_memo[0], index))
2240
index_positions.append((index_memo, key[0],
2241
tuple(parent[0] for parent in refs[0])))
2242
pb.update("Reading revision index", 0, 0)
1748
pos, length = value[1:].split(' ')
1749
index_positions.append((index, int(pos), key[0],
1750
tuple(parent[0] for parent in refs[0])))
1751
pb.update("Reading revision index.", 0, 0)
2243
1752
index_positions.sort()
2245
pb.update("Checking cached revision graph", 0,
2246
len(index_positions))
2247
for offset in xrange(0, len(index_positions), 1000):
2248
pb.update("Checking cached revision graph", offset)
2249
to_query = index_positions[offset:offset + batch_size]
1753
batch_count = len(index_positions) / 1000 + 1
1754
pb.update("Checking cached revision graph.", 0, batch_count)
1755
for offset in xrange(batch_count):
1756
pb.update("Checking cached revision graph.", offset)
1757
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
2250
1758
if not to_query:
2252
rev_ids = [item[1] for item in to_query]
1760
rev_ids = [item[2] for item in to_query]
2253
1761
revs = self.get_revisions(rev_ids)
2254
1762
for revision, item in zip(revs, to_query):
2255
index_parents = item[2]
1763
index_parents = item[3]
2256
1764
rev_parents = tuple(revision.parent_ids)
2257
1765
if index_parents != rev_parents:
2258
result.append((revision.revision_id, index_parents,
1766
result.append((revision.revision_id, index_parents, rev_parents))
2264
def _get_source(self, to_format):
2265
if to_format.network_name() == self._format.network_name():
2266
return KnitPackStreamSource(self, to_format)
2267
return super(KnitPackRepository, self)._get_source(to_format)
1771
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1772
def get_parents(self, revision_ids):
1773
"""See graph._StackedParentsProvider.get_parents."""
1774
parent_map = self.get_parent_map(revision_ids)
1775
return [parent_map.get(r, None) for r in revision_ids]
2269
1777
def _make_parents_provider(self):
2270
1778
return graph.CachingParentsProvider(self)
2272
1780
def _refresh_data(self):
2273
if not self.is_locked():
2275
self._pack_collection.reload_pack_names()
1781
if self._write_lock_count == 1 or (
1782
self.control_files._lock_count == 1 and
1783
self.control_files._lock_mode == 'r'):
1784
# forget what names there are
1785
self._pack_collection.reset()
1786
# XXX: Better to do an in-memory merge when acquiring a new lock -
1787
# factor out code from _save_pack_names.
1788
self._pack_collection.ensure_loaded()
2277
1790
def _start_write_group(self):
2278
1791
self._pack_collection._start_write_group()
2280
1793
def _commit_write_group(self):
2281
self.revisions._index._key_dependencies.refs.clear()
2282
1794
return self._pack_collection._commit_write_group()
2284
def suspend_write_group(self):
2285
# XXX check self._write_group is self.get_transaction()?
2286
tokens = self._pack_collection._suspend_write_group()
2287
self.revisions._index._key_dependencies.refs.clear()
2288
self._write_group = None
2291
def _resume_write_group(self, tokens):
2292
self._start_write_group()
2294
self._pack_collection._resume_write_group(tokens)
2295
except errors.UnresumableWriteGroup:
2296
self._abort_write_group()
2298
for pack in self._pack_collection._resumed_packs:
2299
self.revisions._index.scan_unvalidated_index(pack.revision_index)
2301
1796
def get_transaction(self):
2302
1797
if self._write_lock_count:
2303
1798
return self._transaction
2377
1865
transaction = self._transaction
2378
1866
self._transaction = None
2379
1867
transaction.finish()
1868
for repo in self._fallback_repositories:
2381
1871
self.control_files.unlock()
2383
if not self.is_locked():
2384
1872
for repo in self._fallback_repositories:
2388
class KnitPackStreamSource(StreamSource):
2389
"""A StreamSource used to transfer data between same-format KnitPack repos.
2391
This source assumes:
2392
1) Same serialization format for all objects
2393
2) Same root information
2394
3) XML format inventories
2395
4) Atomic inserts (so we can stream inventory texts before text
2400
def __init__(self, from_repository, to_format):
2401
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2402
self._text_keys = None
2403
self._text_fetch_order = 'unordered'
2405
def _get_filtered_inv_stream(self, revision_ids):
2406
from_repo = self.from_repository
2407
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2408
parent_keys = [(p,) for p in parent_ids]
2409
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2410
parent_text_keys = set(find_text_keys(
2411
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2412
content_text_keys = set()
2413
knit = KnitVersionedFiles(None, None)
2414
factory = KnitPlainFactory()
2415
def find_text_keys_from_content(record):
2416
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2417
raise ValueError("Unknown content storage kind for"
2418
" inventory text: %s" % (record.storage_kind,))
2419
# It's a knit record, it has a _raw_record field (even if it was
2420
# reconstituted from a network stream).
2421
raw_data = record._raw_record
2422
# read the entire thing
2423
revision_id = record.key[-1]
2424
content, _ = knit._parse_record(revision_id, raw_data)
2425
if record.storage_kind == 'knit-delta-gz':
2426
line_iterator = factory.get_linedelta_content(content)
2427
elif record.storage_kind == 'knit-ft-gz':
2428
line_iterator = factory.get_fulltext_content(content)
2429
content_text_keys.update(find_text_keys(
2430
[(line, revision_id) for line in line_iterator]))
2431
revision_keys = [(r,) for r in revision_ids]
2432
def _filtered_inv_stream():
2433
source_vf = from_repo.inventories
2434
stream = source_vf.get_record_stream(revision_keys,
2436
for record in stream:
2437
if record.storage_kind == 'absent':
2438
raise errors.NoSuchRevision(from_repo, record.key)
2439
find_text_keys_from_content(record)
2441
self._text_keys = content_text_keys - parent_text_keys
2442
return ('inventories', _filtered_inv_stream())
2444
def _get_text_stream(self):
2445
# Note: We know we don't have to handle adding root keys, because both
2446
# the source and target are the identical network name.
2447
text_stream = self.from_repository.texts.get_record_stream(
2448
self._text_keys, self._text_fetch_order, False)
2449
return ('texts', text_stream)
2451
def get_stream(self, search):
2452
revision_ids = search.get_keys()
2453
for stream_info in self._fetch_revision_texts(revision_ids):
2455
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2456
yield self._get_filtered_inv_stream(revision_ids)
2457
yield self._get_text_stream()
2461
1876
class RepositoryFormatPack(MetaDirRepositoryFormat):
2462
1877
"""Format logic for pack structured repositories.
2703
2088
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2704
"""A repository with rich roots and stacking.
2706
New in release 1.6.1.
2708
Supports stacking on other repositories, allowing data to be accessed
2709
without being stored locally.
2712
repository_class = KnitPackRepository
2713
_commit_builder_class = PackRootCommitBuilder
2714
rich_root_data = True
2715
supports_tree_reference = False # no subtrees
2716
supports_external_lookups = True
2717
# What index classes to use
2718
index_builder_class = InMemoryGraphIndex
2719
index_class = GraphIndex
2722
def _serializer(self):
2723
return xml6.serializer_v6
2725
def _get_matching_bzrdir(self):
2726
return bzrdir.format_registry.make_bzrdir(
2729
def _ignore_setting_bzrdir(self, format):
2732
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2734
def check_conversion_target(self, target_format):
2735
if not target_format.rich_root_data:
2736
raise errors.BadConversionTarget(
2737
'Does not support rich root data.', target_format)
2739
def get_format_string(self):
2740
"""See RepositoryFormat.get_format_string()."""
2741
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2743
def get_format_description(self):
2744
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2747
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2748
2089
"""A repository with rich roots and external references.
2750
2091
New in release 1.6.
2752
2093
Supports external lookups, which results in non-truncated ghosts after
2753
2094
reconcile compared to pack-0.92 formats.
2755
This format was deprecated because the serializer it uses accidentally
2756
supported subtrees, when the format was not intended to. This meant that
2757
someone could accidentally fetch from an incorrect repository.
2760
2097
repository_class = KnitPackRepository
2761
2098
_commit_builder_class = PackRootCommitBuilder
2762
2099
rich_root_data = True
2763
2100
supports_tree_reference = False # no subtrees
2101
_serializer = xml7.serializer_v7
2765
2103
supports_external_lookups = True
2766
# What index classes to use
2767
index_builder_class = InMemoryGraphIndex
2768
index_class = GraphIndex
2771
def _serializer(self):
2772
return xml7.serializer_v7
2774
2105
def _get_matching_bzrdir(self):
2775
matching = bzrdir.format_registry.make_bzrdir(
2777
matching.repository_format = self
2106
return bzrdir.format_registry.make_bzrdir(
2107
'development1-subtree')
2780
2109
def _ignore_setting_bzrdir(self, format):
2786
2115
if not target_format.rich_root_data:
2787
2116
raise errors.BadConversionTarget(
2788
2117
'Does not support rich root data.', target_format)
2790
2119
def get_format_string(self):
2791
2120
"""See RepositoryFormat.get_format_string()."""
2792
2121
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2794
2123
def get_format_description(self):
2795
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2799
class RepositoryFormatKnitPack6(RepositoryFormatPack):
2800
"""A repository with stacking and btree indexes,
2801
without rich roots or subtrees.
2803
This is equivalent to pack-1.6 with B+Tree indices.
2124
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2127
class RepositoryFormatPackDevelopment0(RepositoryFormatPack):
2128
"""A no-subtrees development repository.
2130
This format should be retained until the second release after bzr 1.0.
2132
No changes to the disk behaviour from pack-0.92.
2806
2135
repository_class = KnitPackRepository
2807
2136
_commit_builder_class = PackCommitBuilder
2808
supports_external_lookups = True
2809
# What index classes to use
2810
index_builder_class = BTreeBuilder
2811
index_class = BTreeGraphIndex
2814
def _serializer(self):
2815
return xml5.serializer_v5
2137
_serializer = xml5.serializer_v5
2817
2139
def _get_matching_bzrdir(self):
2818
return bzrdir.format_registry.make_bzrdir('1.9')
2140
return bzrdir.format_registry.make_bzrdir('development0')
2820
2142
def _ignore_setting_bzrdir(self, format):
2825
2147
def get_format_string(self):
2826
2148
"""See RepositoryFormat.get_format_string()."""
2827
return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
2149
return "Bazaar development format 0 (needs bzr.dev from before 1.3)\n"
2829
2151
def get_format_description(self):
2830
2152
"""See RepositoryFormat.get_format_description()."""
2831
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2833
def check_conversion_target(self, target_format):
2837
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2838
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2840
1.6-rich-root with B+Tree indices.
2843
repository_class = KnitPackRepository
2844
_commit_builder_class = PackRootCommitBuilder
2845
rich_root_data = True
2846
supports_tree_reference = False # no subtrees
2847
supports_external_lookups = True
2848
# What index classes to use
2849
index_builder_class = BTreeBuilder
2850
index_class = BTreeGraphIndex
2853
def _serializer(self):
2854
return xml6.serializer_v6
2856
def _get_matching_bzrdir(self):
2857
return bzrdir.format_registry.make_bzrdir(
2860
def _ignore_setting_bzrdir(self, format):
2863
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2865
def check_conversion_target(self, target_format):
2866
if not target_format.rich_root_data:
2867
raise errors.BadConversionTarget(
2868
'Does not support rich root data.', target_format)
2870
def get_format_string(self):
2871
"""See RepositoryFormat.get_format_string()."""
2872
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2874
def get_format_description(self):
2875
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2878
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2153
return ("Development repository format, currently the same as "
2156
def check_conversion_target(self, target_format):
2160
class RepositoryFormatPackDevelopment0Subtree(RepositoryFormatPack):
2879
2161
"""A subtrees development repository.
2881
This format should be retained until the second release after bzr 1.7.
2883
1.6.1-subtree[as it might have been] with B+Tree indices.
2885
This is [now] retained until we have a CHK based subtree format in
2163
This format should be retained until the second release after bzr 1.0.
2165
No changes to the disk behaviour from pack-0.92-subtree.
2889
2168
repository_class = KnitPackRepository
2890
2169
_commit_builder_class = PackRootCommitBuilder
2891
2170
rich_root_data = True
2892
2171
supports_tree_reference = True
2893
supports_external_lookups = True
2894
# What index classes to use
2895
index_builder_class = BTreeBuilder
2896
index_class = BTreeGraphIndex
2899
def _serializer(self):
2900
return xml7.serializer_v7
2902
def _get_matching_bzrdir(self):
2903
return bzrdir.format_registry.make_bzrdir(
2904
'development-subtree')
2906
def _ignore_setting_bzrdir(self, format):
2909
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2911
def check_conversion_target(self, target_format):
2912
if not target_format.rich_root_data:
2913
raise errors.BadConversionTarget(
2914
'Does not support rich root data.', target_format)
2915
if not getattr(target_format, 'supports_tree_reference', False):
2916
raise errors.BadConversionTarget(
2917
'Does not support nested trees', target_format)
2919
def get_format_string(self):
2920
"""See RepositoryFormat.get_format_string()."""
2921
return ("Bazaar development format 2 with subtree support "
2922
"(needs bzr.dev from before 1.8)\n")
2924
def get_format_description(self):
2925
"""See RepositoryFormat.get_format_description()."""
2926
return ("Development repository format, currently the same as "
2927
"1.6.1-subtree with B+Tree indices.\n")
2172
_serializer = xml7.serializer_v7
2174
def _get_matching_bzrdir(self):
2175
return bzrdir.format_registry.make_bzrdir(
2176
'development0-subtree')
2178
def _ignore_setting_bzrdir(self, format):
2181
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2183
def check_conversion_target(self, target_format):
2184
if not target_format.rich_root_data:
2185
raise errors.BadConversionTarget(
2186
'Does not support rich root data.', target_format)
2187
if not getattr(target_format, 'supports_tree_reference', False):
2188
raise errors.BadConversionTarget(
2189
'Does not support nested trees', target_format)
2191
def get_format_string(self):
2192
"""See RepositoryFormat.get_format_string()."""
2193
return ("Bazaar development format 0 with subtree support "
2194
"(needs bzr.dev from before 1.3)\n")
2196
def get_format_description(self):
2197
"""See RepositoryFormat.get_format_description()."""
2198
return ("Development repository format, currently the same as "
2199
"pack-0.92-subtree\n")
2202
class RepositoryFormatPackDevelopment1(RepositoryFormatPackDevelopment0):
2203
"""A no-subtrees development repository.
2205
This format should be retained until the second release after bzr 1.5.
2207
Supports external lookups, which results in non-truncated ghosts after
2208
reconcile compared to pack-0.92 formats.
2211
supports_external_lookups = True
2213
def _get_matching_bzrdir(self):
2214
return bzrdir.format_registry.make_bzrdir('development1')
2216
def _ignore_setting_bzrdir(self, format):
2219
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2221
def get_format_string(self):
2222
"""See RepositoryFormat.get_format_string()."""
2223
return "Bazaar development format 1 (needs bzr.dev from before 1.6)\n"
2225
def get_format_description(self):
2226
"""See RepositoryFormat.get_format_description()."""
2227
return ("Development repository format, currently the same as "
2228
"pack-0.92 with external reference support.\n")
2230
def check_conversion_target(self, target_format):
2234
class RepositoryFormatPackDevelopment1Subtree(RepositoryFormatPackDevelopment0Subtree):
2235
"""A subtrees development repository.
2237
This format should be retained until the second release after bzr 1.5.
2239
Supports external lookups, which results in non-truncated ghosts after
2240
reconcile compared to pack-0.92 formats.
2243
supports_external_lookups = True
2245
def _get_matching_bzrdir(self):
2246
return bzrdir.format_registry.make_bzrdir(
2247
'development1-subtree')
2249
def _ignore_setting_bzrdir(self, format):
2252
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2254
def check_conversion_target(self, target_format):
2255
if not target_format.rich_root_data:
2256
raise errors.BadConversionTarget(
2257
'Does not support rich root data.', target_format)
2258
if not getattr(target_format, 'supports_tree_reference', False):
2259
raise errors.BadConversionTarget(
2260
'Does not support nested trees', target_format)
2262
def get_format_string(self):
2263
"""See RepositoryFormat.get_format_string()."""
2264
return ("Bazaar development format 1 with subtree support "
2265
"(needs bzr.dev from before 1.6)\n")
2267
def get_format_description(self):
2268
"""See RepositoryFormat.get_format_description()."""
2269
return ("Development repository format, currently the same as "
2270
"pack-0.92-subtree with external reference support.\n")