141
154
texts/deltas (via (fileid, revisionid) tuples).
142
155
:param signature_index: A GraphIndex for determining what signatures are
143
156
present in the Pack and accessing the locations of their texts.
157
:param chk_index: A GraphIndex for accessing content by CHK, if the
145
160
self.revision_index = revision_index
146
161
self.inventory_index = inventory_index
147
162
self.text_index = text_index
148
163
self.signature_index = signature_index
164
self.chk_index = chk_index
150
166
def access_tuple(self):
151
167
"""Return a tuple (transport, name) for the pack content."""
152
168
return self.pack_transport, self.file_name()
170
def _check_references(self):
171
"""Make sure our external references are present.
173
Packs are allowed to have deltas whose base is not in the pack, but it
174
must be present somewhere in this collection. It is not allowed to
175
have deltas based on a fallback repository.
176
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
179
for (index_name, external_refs, index) in [
181
self._get_external_refs(self.text_index),
182
self._pack_collection.text_index.combined_index),
184
self._get_external_refs(self.inventory_index),
185
self._pack_collection.inventory_index.combined_index),
187
missing = external_refs.difference(
188
k for (idx, k, v, r) in
189
index.iter_entries(external_refs))
191
missing_items[index_name] = sorted(list(missing))
193
from pprint import pformat
194
raise errors.BzrCheckError(
195
"Newly created pack file %r has delta references to "
196
"items not in its repository:\n%s"
197
% (self, pformat(missing_items)))
154
199
def file_name(self):
155
200
"""Get the file name for the pack on disk."""
156
201
return self.name + '.pack'
174
227
"""The text index is the name + .tix."""
175
228
return self.index_name('text', name)
230
def _replace_index_with_readonly(self, index_type):
231
unlimited_cache = False
232
if index_type == 'chk':
233
unlimited_cache = True
234
setattr(self, index_type + '_index',
235
self.index_class(self.index_transport,
236
self.index_name(index_type, self.name),
237
self.index_sizes[self.index_offset(index_type)],
238
unlimited_cache=unlimited_cache))
178
241
class ExistingPack(Pack):
179
242
"""An in memory proxy for an existing .pack and its disk indices."""
181
244
def __init__(self, pack_transport, name, revision_index, inventory_index,
182
text_index, signature_index):
245
text_index, signature_index, chk_index=None):
183
246
"""Create an ExistingPack object.
185
248
:param pack_transport: The transport where the pack file resides.
186
249
:param name: The name of the pack on disk in the pack_transport.
188
251
Pack.__init__(self, revision_index, inventory_index, text_index,
252
signature_index, chk_index)
191
254
self.pack_transport = pack_transport
192
255
if None in (revision_index, inventory_index, text_index,
200
263
return not self.__eq__(other)
202
265
def __repr__(self):
203
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
204
id(self), self.pack_transport, self.name)
266
return "<%s.%s object at 0x%x, %s, %s" % (
267
self.__class__.__module__, self.__class__.__name__, id(self),
268
self.pack_transport, self.name)
271
class ResumedPack(ExistingPack):
273
def __init__(self, name, revision_index, inventory_index, text_index,
274
signature_index, upload_transport, pack_transport, index_transport,
275
pack_collection, chk_index=None):
276
"""Create a ResumedPack object."""
277
ExistingPack.__init__(self, pack_transport, name, revision_index,
278
inventory_index, text_index, signature_index,
280
self.upload_transport = upload_transport
281
self.index_transport = index_transport
282
self.index_sizes = [None, None, None, None]
284
('revision', revision_index),
285
('inventory', inventory_index),
286
('text', text_index),
287
('signature', signature_index),
289
if chk_index is not None:
290
indices.append(('chk', chk_index))
291
self.index_sizes.append(None)
292
for index_type, index in indices:
293
offset = self.index_offset(index_type)
294
self.index_sizes[offset] = index._size
295
self.index_class = pack_collection._index_class
296
self._pack_collection = pack_collection
297
self._state = 'resumed'
298
# XXX: perhaps check that the .pack file exists?
300
def access_tuple(self):
301
if self._state == 'finished':
302
return Pack.access_tuple(self)
303
elif self._state == 'resumed':
304
return self.upload_transport, self.file_name()
306
raise AssertionError(self._state)
309
self.upload_transport.delete(self.file_name())
310
indices = [self.revision_index, self.inventory_index, self.text_index,
311
self.signature_index]
312
if self.chk_index is not None:
313
indices.append(self.chk_index)
314
for index in indices:
315
index._transport.delete(index._name)
318
self._check_references()
319
index_types = ['revision', 'inventory', 'text', 'signature']
320
if self.chk_index is not None:
321
index_types.append('chk')
322
for index_type in index_types:
323
old_name = self.index_name(index_type, self.name)
324
new_name = '../indices/' + old_name
325
self.upload_transport.rename(old_name, new_name)
326
self._replace_index_with_readonly(index_type)
327
new_name = '../packs/' + self.file_name()
328
self.upload_transport.rename(self.file_name(), new_name)
329
self._state = 'finished'
331
def _get_external_refs(self, index):
332
"""Return compression parents for this index that are not present.
334
This returns any compression parents that are referenced by this index,
335
which are not contained *in* this index. They may be present elsewhere.
337
return index.external_references(1)
207
340
class NewPack(Pack):
208
341
"""An in memory proxy for a pack which is being created."""
210
# A map of index 'type' to the file extension and position in the
212
index_definitions = {
213
'revision': ('.rix', 0),
214
'inventory': ('.iix', 1),
216
'signature': ('.six', 3),
219
343
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
220
344
"""Create a NewPack instance.
319
451
raise AssertionError(self._state)
321
def _check_references(self):
322
"""Make sure our external references are present.
324
Packs are allowed to have deltas whose base is not in the pack, but it
325
must be present somewhere in this collection. It is not allowed to
326
have deltas based on a fallback repository.
327
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
330
for (index_name, external_refs, index) in [
332
self.text_index._external_references(),
333
self._pack_collection.text_index.combined_index),
335
self.inventory_index._external_references(),
336
self._pack_collection.inventory_index.combined_index),
338
missing = external_refs.difference(
339
k for (idx, k, v, r) in
340
index.iter_entries(external_refs))
342
missing_items[index_name] = sorted(list(missing))
344
from pprint import pformat
345
raise errors.BzrCheckError(
346
"Newly created pack file %r has delta references to "
347
"items not in its repository:\n%s"
348
% (self, pformat(missing_items)))
350
453
def data_inserted(self):
351
454
"""True if data has been added to this pack."""
352
455
return bool(self.get_revision_count() or
353
456
self.inventory_index.key_count() or
354
457
self.text_index.key_count() or
355
self.signature_index.key_count())
458
self.signature_index.key_count() or
459
(self.chk_index is not None and self.chk_index.key_count()))
461
def finish_content(self):
462
if self.name is not None:
466
self._write_data('', flush=True)
467
self.name = self._hash.hexdigest()
469
def finish(self, suspend=False):
358
470
"""Finish the new pack.
365
477
- stores the index size tuple for the pack in the index_sizes
370
self._write_data('', flush=True)
371
self.name = self._hash.hexdigest()
372
self._check_references()
480
self.finish_content()
482
self._check_references()
374
484
# XXX: It'd be better to write them all to temporary names, then
375
485
# rename them all into place, so that the window when only some are
376
486
# visible is smaller. On the other hand none will be seen until
377
487
# they're in the names list.
378
488
self.index_sizes = [None, None, None, None]
379
self._write_index('revision', self.revision_index, 'revision')
380
self._write_index('inventory', self.inventory_index, 'inventory')
381
self._write_index('text', self.text_index, 'file texts')
489
self._write_index('revision', self.revision_index, 'revision', suspend)
490
self._write_index('inventory', self.inventory_index, 'inventory',
492
self._write_index('text', self.text_index, 'file texts', suspend)
382
493
self._write_index('signature', self.signature_index,
383
'revision signatures')
494
'revision signatures', suspend)
495
if self.chk_index is not None:
496
self.index_sizes.append(None)
497
self._write_index('chk', self.chk_index,
498
'content hash bytes', suspend)
384
499
self.write_stream.close()
385
500
# Note that this will clobber an existing pack with the same name,
386
501
# without checking for hash collisions. While this is undesirable this
393
508
# - try for HASH.pack
394
509
# - try for temporary-name
395
510
# - refresh the pack-list to see if the pack is now absent
396
self.upload_transport.rename(self.random_name,
397
'../packs/' + self.name + '.pack')
511
new_name = self.name + '.pack'
513
new_name = '../packs/' + new_name
514
self.upload_transport.rename(self.random_name, new_name)
398
515
self._state = 'finished'
399
516
if 'pack' in debug.debug_flags:
400
517
# XXX: size might be interesting?
401
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
518
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
402
519
time.ctime(), self.upload_transport.base, self.random_name,
403
self.pack_transport, self.name,
404
time.time() - self.start_time)
520
new_name, time.time() - self.start_time)
407
523
"""Flush any current data."""
411
527
self._hash.update(bytes)
412
528
self._buffer[:] = [[], 0]
414
def index_name(self, index_type, name):
415
"""Get the disk name of an index type for pack name 'name'."""
416
return name + NewPack.index_definitions[index_type][0]
418
def index_offset(self, index_type):
419
"""Get the position in a index_size array for a given index type."""
420
return NewPack.index_definitions[index_type][1]
422
def _replace_index_with_readonly(self, index_type):
423
setattr(self, index_type + '_index',
424
self.index_class(self.index_transport,
425
self.index_name(index_type, self.name),
426
self.index_sizes[self.index_offset(index_type)]))
530
def _get_external_refs(self, index):
531
return index._external_references()
428
533
def set_write_cache_size(self, size):
429
534
self._cache_limit = size
431
def _write_index(self, index_type, index, label):
536
def _write_index(self, index_type, index, label, suspend=False):
432
537
"""Write out an index.
434
539
:param index_type: The type of index to write - e.g. 'revision'.
436
541
:param label: What label to give the index e.g. 'revision'.
438
543
index_name = self.index_name(index_type, self.name)
439
self.index_sizes[self.index_offset(index_type)] = \
440
self.index_transport.put_file(index_name, index.finish(),
441
mode=self._file_mode)
545
transport = self.upload_transport
547
transport = self.index_transport
548
self.index_sizes[self.index_offset(index_type)] = transport.put_file(
549
index_name, index.finish(), mode=self._file_mode)
442
550
if 'pack' in debug.debug_flags:
443
551
# XXX: size might be interesting?
444
552
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
445
553
time.ctime(), label, self.upload_transport.base,
446
554
self.random_name, time.time() - self.start_time)
447
# Replace the writable index on this object with a readonly,
555
# Replace the writable index on this object with a readonly,
448
556
# presently unloaded index. We should alter
449
557
# the index layer to make its finish() error if add_node is
450
558
# subsequently used. RBC
477
585
self.index_to_pack = {}
478
586
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
479
587
self.data_access = _DirectPackAccess(self.index_to_pack,
480
reload_func=reload_func)
481
self.add_callback = None
483
def replace_indices(self, index_to_pack, indices):
484
"""Replace the current mappings with fresh ones.
486
This should probably not be used eventually, rather incremental add and
487
removal of indices. It has been added during refactoring of existing
490
:param index_to_pack: A mapping from index objects to
491
(transport, name) tuples for the pack file data.
492
:param indices: A list of indices.
494
# refresh the revision pack map dict without replacing the instance.
495
self.index_to_pack.clear()
496
self.index_to_pack.update(index_to_pack)
497
# XXX: API break - clearly a 'replace' method would be good?
498
self.combined_index._indices[:] = indices
499
# the current add nodes callback for the current writable index if
588
reload_func=reload_func,
589
flush_func=flush_func)
501
590
self.add_callback = None
503
592
def add_index(self, index, pack):
506
595
Future searches on the aggregate index will seach this new index
507
596
before all previously inserted indices.
509
598
:param index: An Index for the pack.
510
599
:param pack: A Pack instance.
512
601
# expose it to the index map
513
602
self.index_to_pack[index] = pack.access_tuple()
514
603
# put it at the front of the linear index list
515
self.combined_index.insert_index(0, index)
604
self.combined_index.insert_index(0, index, pack.name)
517
606
def add_writable_index(self, index, pack):
518
607
"""Add an index which is able to have data added to it.
520
609
There can be at most one writable index at any time. Any
521
610
modifications made to the knit are put into this index.
523
612
:param index: An index from the pack parameter.
524
613
:param pack: A Pack instance.
538
627
self.data_access.set_writer(None, None, (None, None))
539
628
self.index_to_pack.clear()
540
629
del self.combined_index._indices[:]
630
del self.combined_index._index_names[:]
541
631
self.add_callback = None
543
def remove_index(self, index, pack):
633
def remove_index(self, index):
544
634
"""Remove index from the indices used to answer queries.
546
636
:param index: An index from the pack parameter.
547
:param pack: A Pack instance.
549
638
del self.index_to_pack[index]
550
self.combined_index._indices.remove(index)
639
pos = self.combined_index._indices.index(index)
640
del self.combined_index._indices[pos]
641
del self.combined_index._index_names[pos]
551
642
if (self.add_callback is not None and
552
643
getattr(index, 'add_nodes', None) == self.add_callback):
553
644
self.add_callback = None
660
751
def open_pack(self):
661
752
"""Open a pack for the pack we are creating."""
662
return NewPack(self._pack_collection, upload_suffix=self.suffix,
753
new_pack = self._pack_collection.pack_factory(self._pack_collection,
754
upload_suffix=self.suffix,
663
755
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
756
# We know that we will process all nodes in order, and don't need to
757
# query, so don't combine any indices spilled to disk until we are done
758
new_pack.revision_index.set_optimize(combine_backing_indices=False)
759
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
760
new_pack.text_index.set_optimize(combine_backing_indices=False)
761
new_pack.signature_index.set_optimize(combine_backing_indices=False)
665
764
def _update_pack_order(self, entries, index_to_pack_map):
666
765
"""Determine how we want our packs to be ordered.
822
922
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
823
923
new_pack.signature_index.key_count(),
824
924
time.time() - new_pack.start_time)
926
# NB XXX: how to check CHK references are present? perhaps by yielding
927
# the items? How should that interact with stacked repos?
928
if new_pack.chk_index is not None:
930
if 'pack' in debug.debug_flags:
931
mutter('%s: create_pack: chk content copied: %s%s %d items t+%6.3fs',
932
time.ctime(), self._pack_collection._upload_transport.base,
933
new_pack.random_name,
934
new_pack.chk_index.key_count(),
935
time.time() - new_pack.start_time)
825
936
new_pack._check_references()
826
937
if not self._use_pack(new_pack):
831
942
self._pack_collection.allocate(new_pack)
834
def _copy_nodes(self, nodes, index_map, writer, write_index):
835
"""Copy knit nodes between packs with no graph references."""
945
def _copy_chks(self, refs=None):
946
# XXX: Todo, recursive follow-pointers facility when fetching some
948
chk_index_map, chk_indices = self._pack_map_and_index_list(
950
chk_nodes = self._index_contents(chk_indices, refs)
952
# TODO: This isn't strictly tasteful as we are accessing some private
953
# variables (_serializer). Perhaps a better way would be to have
954
# Repository._deserialise_chk_node()
955
search_key_func = chk_map.search_key_registry.get(
956
self._pack_collection.repo._serializer.search_key_name)
957
def accumlate_refs(lines):
958
# XXX: move to a generic location
960
bytes = ''.join(lines)
961
node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
962
new_refs.update(node.refs())
963
self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
964
self.new_pack.chk_index, output_lines=accumlate_refs)
967
def _copy_nodes(self, nodes, index_map, writer, write_index,
969
"""Copy knit nodes between packs with no graph references.
971
:param output_lines: Output full texts of copied items.
836
973
pb = ui.ui_factory.nested_progress_bar()
838
975
return self._do_copy_nodes(nodes, index_map, writer,
976
write_index, pb, output_lines=output_lines)
843
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
980
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
844
982
# for record verification
845
983
knit = KnitVersionedFiles(None, None)
846
984
# plan a readv on each source pack:
1214
1355
class RepositoryPackCollection(object):
1215
1356
"""Management of packs within a repository.
1217
1358
:ivar _names: map of {pack_name: (index_size,)}
1361
pack_factory = NewPack
1362
resumed_pack_factory = ResumedPack
1220
1364
def __init__(self, repo, transport, index_transport, upload_transport,
1221
pack_transport, index_builder_class, index_class):
1365
pack_transport, index_builder_class, index_class,
1222
1367
"""Create a new RepositoryPackCollection.
1224
:param transport: Addresses the repository base directory
1369
:param transport: Addresses the repository base directory
1225
1370
(typically .bzr/repository/).
1226
1371
:param index_transport: Addresses the directory containing indices.
1227
1372
:param upload_transport: Addresses the directory into which packs are written
1237
1384
self._pack_transport = pack_transport
1238
1385
self._index_builder_class = index_builder_class
1239
1386
self._index_class = index_class
1240
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1387
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1241
1389
self.packs = []
1242
1390
# name:Pack mapping
1243
1392
self._packs_by_name = {}
1244
1393
# the previous pack-names content
1245
1394
self._packs_at_load = None
1246
1395
# when a pack is being created by this object, the state of that pack.
1247
1396
self._new_pack = None
1248
1397
# aggregated revision index data
1249
self.revision_index = AggregateIndex(self.reload_pack_names)
1250
self.inventory_index = AggregateIndex(self.reload_pack_names)
1251
self.text_index = AggregateIndex(self.reload_pack_names)
1252
self.signature_index = AggregateIndex(self.reload_pack_names)
1398
flush = self._flush_new_pack
1399
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1400
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1406
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
# used to determine if we're using a chk_index elsewhere.
1410
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1418
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1254
1423
def add_pack_to_memory(self, pack):
1255
1424
"""Make a Pack object available to the repository to satisfy queries.
1257
1426
:param pack: A Pack object.
1259
1428
if pack.name in self._packs_by_name:
1260
raise AssertionError()
1429
raise AssertionError(
1430
'pack %s already in _packs_by_name' % (pack.name,))
1261
1431
self.packs.append(pack)
1262
1432
self._packs_by_name[pack.name] = pack
1263
1433
self.revision_index.add_index(pack.revision_index, pack)
1264
1434
self.inventory_index.add_index(pack.inventory_index, pack)
1265
1435
self.text_index.add_index(pack.text_index, pack)
1266
1436
self.signature_index.add_index(pack.signature_index, pack)
1437
if self.chk_index is not None:
1438
self.chk_index.add_index(pack.chk_index, pack)
1268
1440
def all_packs(self):
1269
1441
"""Return a list of all the Pack objects this repository has.
1368
1539
self._remove_pack_from_memory(pack)
1369
1540
# record the newly available packs and stop advertising the old
1371
self._save_pack_names(clear_obsolete_packs=True)
1372
# Move the old packs out of the way now they are no longer referenced.
1373
for revision_count, packs in pack_operations:
1374
self._obsolete_packs(packs)
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1549
def _flush_new_pack(self):
1550
if self._new_pack is not None:
1551
self._new_pack.flush()
1376
1553
def lock_names(self):
1377
1554
"""Acquire the mutex around the pack-names index.
1379
1556
This cannot be used in the middle of a read-only transaction on the
1382
1559
self.repo.control_files.lock_write()
1561
def _already_packed(self):
1562
"""Is the collection already packed?"""
1563
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1565
def pack(self, hint=None, clean_obsolete_packs=False):
1385
1566
"""Pack the pack collection totally."""
1386
1567
self.ensure_loaded()
1387
1568
total_packs = len(self._names)
1389
# This is arguably wrong because we might not be optimal, but for
1390
# now lets leave it in. (e.g. reconcile -> one pack. But not
1569
if self._already_packed():
1393
1571
total_revisions = self.revision_index.combined_index.key_count()
1394
1572
# XXX: the following may want to be a class, to pack with a given
1396
1574
mutter('Packing repository %s, which has %d pack files, '
1397
'containing %d revisions into 1 packs.', self, total_packs,
1575
'containing %d revisions with hint %r.', self, total_packs,
1576
total_revisions, hint)
1399
1577
# determine which packs need changing
1400
pack_distribution = [1]
1401
1578
pack_operations = [[0, []]]
1402
1579
for pack in self.all_packs():
1403
pack_operations[-1][0] += pack.get_revision_count()
1404
pack_operations[-1][1].append(pack)
1580
if hint is None or pack.name in hint:
1581
# Either no hint was provided (so we are packing everything),
1582
# or this pack was included in the hint.
1583
pack_operations[-1][0] += pack.get_revision_count()
1584
pack_operations[-1][1].append(pack)
1405
1585
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1407
1590
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1408
1591
"""Plan a pack operation.
1488
1679
inv_index = self._make_index(name, '.iix')
1489
1680
txt_index = self._make_index(name, '.tix')
1490
1681
sig_index = self._make_index(name, '.six')
1682
if self.chk_index is not None:
1683
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1491
1686
result = ExistingPack(self._pack_transport, name, rev_index,
1492
inv_index, txt_index, sig_index)
1687
inv_index, txt_index, sig_index, chk_index)
1493
1688
self.add_pack_to_memory(result)
1691
def _resume_pack(self, name):
1692
"""Get a suspended Pack object by name.
1694
:param name: The name of the pack - e.g. '123456'
1695
:return: A Pack object.
1697
if not re.match('[a-f0-9]{32}', name):
1698
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1700
raise errors.UnresumableWriteGroup(
1701
self.repo, [name], 'Malformed write group token')
1703
rev_index = self._make_index(name, '.rix', resume=True)
1704
inv_index = self._make_index(name, '.iix', resume=True)
1705
txt_index = self._make_index(name, '.tix', resume=True)
1706
sig_index = self._make_index(name, '.six', resume=True)
1707
if self.chk_index is not None:
1708
chk_index = self._make_index(name, '.cix', resume=True,
1709
unlimited_cache=True)
1712
result = self.resumed_pack_factory(name, rev_index, inv_index,
1713
txt_index, sig_index, self._upload_transport,
1714
self._pack_transport, self._index_transport, self,
1715
chk_index=chk_index)
1716
except errors.NoSuchFile, e:
1717
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1718
self.add_pack_to_memory(result)
1719
self._resumed_packs.append(result)
1496
1722
def allocate(self, a_new_pack):
1497
1723
"""Allocate name in the list of packs.
1516
1742
return self._index_class(self.transport, 'pack-names', None
1517
1743
).iter_all_entries()
1519
def _make_index(self, name, suffix):
1745
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1520
1746
size_offset = self._suffix_offsets[suffix]
1521
1747
index_name = name + suffix
1522
index_size = self._names[name][size_offset]
1523
return self._index_class(
1524
self._index_transport, index_name, index_size)
1749
transport = self._upload_transport
1750
index_size = transport.stat(index_name).st_size
1752
transport = self._index_transport
1753
index_size = self._names[name][size_offset]
1754
return self._index_class(transport, index_name, index_size,
1755
unlimited_cache=unlimited_cache)
1526
1757
def _max_pack_count(self, total_revisions):
1527
1758
"""Return the maximum number of packs to use for total revisions.
1529
1760
:param total_revisions: The total number of revisions in the
1555
1786
:param return: None.
1557
1788
for pack in packs:
1558
pack.pack_transport.rename(pack.file_name(),
1559
'../obsolete_packs/' + pack.file_name())
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1560
1796
# TODO: Probably needs to know all possible indices for this pack
1561
1797
# - or maybe list the directory and move all indices matching this
1562
1798
# name whether we recognize it or not?
1563
for suffix in ('.iix', '.six', '.tix', '.rix'):
1564
self._index_transport.rename(pack.name + suffix,
1565
'../obsolete_packs/' + pack.name + suffix)
1799
suffixes = ['.iix', '.six', '.tix', '.rix']
1800
if self.chk_index is not None:
1801
suffixes.append('.cix')
1802
for suffix in suffixes:
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1567
1810
def pack_distribution(self, total_revisions):
1568
1811
"""Generate a list of the number of revisions to put in each pack.
1594
1837
self._remove_pack_indices(pack)
1595
1838
self.packs.remove(pack)
1597
def _remove_pack_indices(self, pack):
1598
"""Remove the indices for pack from the aggregated indices."""
1599
self.revision_index.remove_index(pack.revision_index, pack)
1600
self.inventory_index.remove_index(pack.inventory_index, pack)
1601
self.text_index.remove_index(pack.text_index, pack)
1602
self.signature_index.remove_index(pack.signature_index, pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1604
1857
def reset(self):
1605
1858
"""Clear all cached data."""
1606
1859
# cached revision data
1607
self.repo._revision_knit = None
1608
1860
self.revision_index.clear()
1609
1861
# cached signature data
1610
self.repo._signature_knit = None
1611
1862
self.signature_index.clear()
1612
1863
# cached file text data
1613
1864
self.text_index.clear()
1614
self.repo._text_knit = None
1615
1865
# cached inventory data
1616
1866
self.inventory_index.clear()
1868
if self.chk_index is not None:
1869
self.chk_index.clear()
1617
1870
# remove the open pack
1618
1871
self._new_pack = None
1619
1872
# information about packs.
1713
1967
:param clear_obsolete_packs: If True, clear out the contents of the
1714
1968
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1971
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1716
1974
self.lock_names()
1718
1976
builder = self._index_builder_class()
1719
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1720
# TODO: handle same-name, index-size-changes here -
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1979
# TODO: handle same-name, index-size-changes here -
1721
1980
# e.g. use the value from disk, not ours, *unless* we're the one
1723
1982
for key, value in disk_nodes:
1724
1983
builder.add_node(key, value)
1725
1984
self.transport.put_file('pack-names', builder.finish(),
1726
1985
mode=self.repo.bzrdir._get_file_mode())
1727
# move the baseline forward
1728
1986
self._packs_at_load = disk_nodes
1729
1987
if clear_obsolete_packs:
1730
self._clear_obsolete_packs()
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1732
1993
self._unlock_names()
1733
1994
# synchronise the memory packs list with what we just wrote:
1734
1995
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
2005
return [new_node[0][0] for new_node in new_nodes]
1736
2007
def reload_pack_names(self):
1737
2008
"""Sync our pack listing with what is present in the repository.
1739
2010
This should be called when we find out that something we thought was
1740
2011
present is now missing. This happens when another process re-packs the
1741
2012
repository, etc.
2014
:return: True if the in-memory list of packs has been altered at all.
1743
# This is functionally similar to _save_pack_names, but we don't write
2016
# The ensure_loaded call is to handle the case where the first call
2017
# made involving the collection was to reload_pack_names, where we
2018
# don't have a view of disk contents. Its a bit of a bandaid, and
2019
# causes two reads of pack-names, but its a rare corner case not struck
2020
# with regular push/pull etc.
2021
first_read = self.ensure_loaded()
1744
2024
# out the new value.
1745
disk_nodes, _, _ = self._diff_pack_names()
1746
self._packs_at_load = disk_nodes
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
1747
2031
(removed, added,
1748
2032
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1749
2033
if removed or added or modified:
1759
2043
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1761
def _clear_obsolete_packs(self):
2045
def _clear_obsolete_packs(self, preserve=None):
1762
2046
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
1764
2052
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
1765
2055
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
1767
2062
obsolete_pack_transport.delete(filename)
1768
2063
except (errors.PathError, errors.TransportError), e:
1769
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
1771
2068
def _start_write_group(self):
1772
2069
# Do not permit preparation for writing if we're not in a 'write lock'.
1773
2070
if not self.repo.is_write_locked():
1774
2071
raise errors.NotWriteLocked(self)
1775
self._new_pack = NewPack(self, upload_suffix='.pack',
2072
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1776
2073
file_mode=self.repo.bzrdir._get_file_mode())
1777
2074
# allow writing: queue writes to a new index
1778
2075
self.revision_index.add_writable_index(self._new_pack.revision_index,
1793
2096
# FIXME: just drop the transient index.
1794
2097
# forget what names there are
1795
2098
if self._new_pack is not None:
1797
self._new_pack.abort()
1799
# XXX: If we aborted while in the middle of finishing the write
1800
# group, _remove_pack_indices can fail because the indexes are
1801
# already gone. If they're not there we shouldn't fail in this
1802
# case. -- mbp 20081113
1803
self._remove_pack_indices(self._new_pack)
1804
self._new_pack = None
1805
self.repo._text_knit = None
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2108
for resumed_pack in self._resumed_packs:
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2114
del self._resumed_packs[:]
2116
def _remove_resumed_pack_indices(self):
2117
for resumed_pack in self._resumed_packs:
2118
self._remove_pack_indices(resumed_pack)
2119
del self._resumed_packs[:]
2121
def _check_new_inventories(self):
2122
"""Detect missing inventories in this write group.
2124
:returns: list of strs, summarising any problems found. If the list is
2125
empty no problems were found.
2127
# The base implementation does no checks. GCRepositoryPackCollection
1807
2131
def _commit_write_group(self):
2133
for prefix, versioned_file in (
2134
('revisions', self.repo.revisions),
2135
('inventories', self.repo.inventories),
2136
('texts', self.repo.texts),
2137
('signatures', self.repo.signatures),
2139
missing = versioned_file.get_missing_compression_parent_keys()
2140
all_missing.update([(prefix,) + key for key in missing])
2142
raise errors.BzrCheckError(
2143
"Repository %s has missing compression parent(s) %r "
2144
% (self.repo, sorted(all_missing)))
2145
problems = self._check_new_inventories()
2147
problems_summary = '\n'.join(problems)
2148
raise errors.BzrCheckError(
2149
"Cannot add revision(s) to repository: " + problems_summary)
1808
2150
self._remove_pack_indices(self._new_pack)
2151
any_new_content = False
1809
2152
if self._new_pack.data_inserted():
1810
2153
# get all the data to disk and read to use
1811
2154
self._new_pack.finish()
1812
2155
self.allocate(self._new_pack)
1813
2156
self._new_pack = None
1814
if not self.autopack():
2157
any_new_content = True
2159
self._new_pack.abort()
2160
self._new_pack = None
2161
for resumed_pack in self._resumed_packs:
2162
# XXX: this is a pretty ugly way to turn the resumed pack into a
2163
# properly committed pack.
2164
self._names[resumed_pack.name] = None
2165
self._remove_pack_from_memory(resumed_pack)
2166
resumed_pack.finish()
2167
self.allocate(resumed_pack)
2168
any_new_content = True
2169
del self._resumed_packs[:]
2171
result = self.autopack()
1815
2173
# when autopack takes no steps, the names list is still
1817
self._save_pack_names()
2175
return self._save_pack_names()
2179
def _suspend_write_group(self):
2180
tokens = [pack.name for pack in self._resumed_packs]
2181
self._remove_pack_indices(self._new_pack)
2182
if self._new_pack.data_inserted():
2183
# get all the data to disk and read to use
2184
self._new_pack.finish(suspend=True)
2185
tokens.append(self._new_pack.name)
2186
self._new_pack = None
1819
2188
self._new_pack.abort()
1820
2189
self._new_pack = None
1821
self.repo._text_knit = None
2190
self._remove_resumed_pack_indices()
2193
def _resume_write_group(self, tokens):
2194
for token in tokens:
2195
self._resume_pack(token)
1824
2198
class KnitPackRepository(KnitRepository):
1825
2199
"""Repository with knit objects stored inside pack containers.
1827
2201
The layering for a KnitPackRepository is:
1829
2203
Graph | HPSS | Repository public layer |
1881
2258
deltas=True, parents=True, is_locked=self.is_locked),
1882
2259
data_access=self._pack_collection.text_index.data_access,
1883
2260
max_delta_chain=200)
2261
if _format.supports_chks:
2262
# No graph, no compression:- references from chks are between
2263
# different objects not temporal versions of the same; and without
2264
# some sort of temporal structure knit compression will just fail.
2265
self.chk_bytes = KnitVersionedFiles(
2266
_KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2267
add_callback=self._pack_collection.chk_index.add_callback,
2268
deltas=False, parents=False, is_locked=self.is_locked),
2269
data_access=self._pack_collection.chk_index.data_access,
2272
self.chk_bytes = None
1884
2273
# True when the repository object is 'write locked' (as opposed to the
1885
# physical lock only taken out around changes to the pack-names list.)
2274
# physical lock only taken out around changes to the pack-names list.)
1886
2275
# Another way to represent this would be a decorator around the control
1887
2276
# files object that presents logical locks as physical ones - if this
1888
2277
# gets ugly consider that alternative design. RBC 20071011
1892
2281
self._reconcile_does_inventory_gc = True
1893
2282
self._reconcile_fixes_text_parents = True
1894
2283
self._reconcile_backsup_inventory = False
1895
self._fetch_order = 'unordered'
1897
def _warn_if_deprecated(self):
2285
def _warn_if_deprecated(self, branch=None):
1898
2286
# This class isn't deprecated, but one sub-format is
1899
2287
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
1900
from bzrlib import repository
1901
if repository._deprecation_warning_done:
1903
repository._deprecation_warning_done = True
1904
warning("Format %s for %s is deprecated - please use"
1905
" 'bzr upgrade --1.6.1-rich-root'"
1906
% (self._format, self.bzrdir.transport.base))
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
1908
2290
def _abort_write_group(self):
2291
self.revisions._index._key_dependencies.clear()
1909
2292
self._pack_collection._abort_write_group()
1911
def _find_inconsistent_revision_parents(self):
1912
"""Find revisions with incorrectly cached parents.
1914
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1915
parents-in-revision).
1917
if not self.is_locked():
1918
raise errors.ObjectNotLocked(self)
1919
pb = ui.ui_factory.nested_progress_bar()
1922
revision_nodes = self._pack_collection.revision_index \
1923
.combined_index.iter_all_entries()
1924
index_positions = []
1925
# Get the cached index values for all revisions, and also the location
1926
# in each index of the revision text so we can perform linear IO.
1927
for index, key, value, refs in revision_nodes:
1928
pos, length = value[1:].split(' ')
1929
index_positions.append((index, int(pos), key[0],
1930
tuple(parent[0] for parent in refs[0])))
1931
pb.update("Reading revision index.", 0, 0)
1932
index_positions.sort()
1933
batch_count = len(index_positions) / 1000 + 1
1934
pb.update("Checking cached revision graph.", 0, batch_count)
1935
for offset in xrange(batch_count):
1936
pb.update("Checking cached revision graph.", offset)
1937
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1940
rev_ids = [item[2] for item in to_query]
1941
revs = self.get_revisions(rev_ids)
1942
for revision, item in zip(revs, to_query):
1943
index_parents = item[3]
1944
rev_parents = tuple(revision.parent_ids)
1945
if index_parents != rev_parents:
1946
result.append((revision.revision_id, index_parents, rev_parents))
1951
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1952
def get_parents(self, revision_ids):
1953
"""See graph._StackedParentsProvider.get_parents."""
1954
parent_map = self.get_parent_map(revision_ids)
1955
return [parent_map.get(r, None) for r in revision_ids]
2294
def _get_source(self, to_format):
2295
if to_format.network_name() == self._format.network_name():
2296
return KnitPackStreamSource(self, to_format)
2297
return super(KnitPackRepository, self)._get_source(to_format)
1957
2299
def _make_parents_provider(self):
1958
2300
return graph.CachingParentsProvider(self)
1960
2302
def _refresh_data(self):
1961
if self._write_lock_count == 1 or (
1962
self.control_files._lock_count == 1 and
1963
self.control_files._lock_mode == 'r'):
1964
# forget what names there are
1965
self._pack_collection.reset()
1966
# XXX: Better to do an in-memory merge when acquiring a new lock -
1967
# factor out code from _save_pack_names.
1968
self._pack_collection.ensure_loaded()
2303
if not self.is_locked():
2305
self._pack_collection.reload_pack_names()
1970
2307
def _start_write_group(self):
1971
2308
self._pack_collection._start_write_group()
1973
2310
def _commit_write_group(self):
1974
return self._pack_collection._commit_write_group()
2311
hint = self._pack_collection._commit_write_group()
2312
self.revisions._index._key_dependencies.clear()
2315
def suspend_write_group(self):
2316
# XXX check self._write_group is self.get_transaction()?
2317
tokens = self._pack_collection._suspend_write_group()
2318
self.revisions._index._key_dependencies.clear()
2319
self._write_group = None
2322
def _resume_write_group(self, tokens):
2323
self._start_write_group()
2325
self._pack_collection._resume_write_group(tokens)
2326
except errors.UnresumableWriteGroup:
2327
self._abort_write_group()
2329
for pack in self._pack_collection._resumed_packs:
2330
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1976
2332
def get_transaction(self):
1977
2333
if self._write_lock_count:
1986
2342
return self._write_lock_count
1988
2344
def lock_write(self, token=None):
1989
if not self._write_lock_count and self.is_locked():
2345
"""Lock the repository for writes.
2347
:return: A bzrlib.repository.RepositoryWriteLockResult.
2349
locked = self.is_locked()
2350
if not self._write_lock_count and locked:
1990
2351
raise errors.ReadOnlyError(self)
1991
2352
self._write_lock_count += 1
1992
2353
if self._write_lock_count == 1:
1993
2354
self._transaction = transactions.WriteTransaction()
2356
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
note('%r was write locked again', self)
2358
self._prev_lock = 'w'
1994
2359
for repo in self._fallback_repositories:
1995
2360
# Writes don't affect fallback repos
1996
2361
repo.lock_read()
1997
self._refresh_data()
2362
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
1999
2365
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2370
locked = self.is_locked()
2000
2371
if self._write_lock_count:
2001
2372
self._write_lock_count += 1
2003
2374
self.control_files.lock_read()
2376
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
note('%r was read locked again', self)
2378
self._prev_lock = 'r'
2004
2379
for repo in self._fallback_repositories:
2005
# Writes don't affect fallback repos
2006
2380
repo.lock_read()
2007
self._refresh_data()
2381
self._refresh_data()
2382
return LogicalLockResult(self.unlock)
2009
2384
def leave_lock_in_place(self):
2010
2385
# not supported - raise an error
2045
2425
transaction = self._transaction
2046
2426
self._transaction = None
2047
2427
transaction.finish()
2048
for repo in self._fallback_repositories:
2051
2429
self.control_files.unlock()
2431
if not self.is_locked():
2052
2432
for repo in self._fallback_repositories:
2436
class KnitPackStreamSource(StreamSource):
2437
"""A StreamSource used to transfer data between same-format KnitPack repos.
2439
This source assumes:
2440
1) Same serialization format for all objects
2441
2) Same root information
2442
3) XML format inventories
2443
4) Atomic inserts (so we can stream inventory texts before text
2448
def __init__(self, from_repository, to_format):
2449
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2450
self._text_keys = None
2451
self._text_fetch_order = 'unordered'
2453
def _get_filtered_inv_stream(self, revision_ids):
2454
from_repo = self.from_repository
2455
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2456
parent_keys = [(p,) for p in parent_ids]
2457
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2458
parent_text_keys = set(find_text_keys(
2459
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2460
content_text_keys = set()
2461
knit = KnitVersionedFiles(None, None)
2462
factory = KnitPlainFactory()
2463
def find_text_keys_from_content(record):
2464
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2465
raise ValueError("Unknown content storage kind for"
2466
" inventory text: %s" % (record.storage_kind,))
2467
# It's a knit record, it has a _raw_record field (even if it was
2468
# reconstituted from a network stream).
2469
raw_data = record._raw_record
2470
# read the entire thing
2471
revision_id = record.key[-1]
2472
content, _ = knit._parse_record(revision_id, raw_data)
2473
if record.storage_kind == 'knit-delta-gz':
2474
line_iterator = factory.get_linedelta_content(content)
2475
elif record.storage_kind == 'knit-ft-gz':
2476
line_iterator = factory.get_fulltext_content(content)
2477
content_text_keys.update(find_text_keys(
2478
[(line, revision_id) for line in line_iterator]))
2479
revision_keys = [(r,) for r in revision_ids]
2480
def _filtered_inv_stream():
2481
source_vf = from_repo.inventories
2482
stream = source_vf.get_record_stream(revision_keys,
2484
for record in stream:
2485
if record.storage_kind == 'absent':
2486
raise errors.NoSuchRevision(from_repo, record.key)
2487
find_text_keys_from_content(record)
2489
self._text_keys = content_text_keys - parent_text_keys
2490
return ('inventories', _filtered_inv_stream())
2492
def _get_text_stream(self):
2493
# Note: We know we don't have to handle adding root keys, because both
2494
# the source and target are the identical network name.
2495
text_stream = self.from_repository.texts.get_record_stream(
2496
self._text_keys, self._text_fetch_order, False)
2497
return ('texts', text_stream)
2499
def get_stream(self, search):
2500
revision_ids = search.get_keys()
2501
for stream_info in self._fetch_revision_texts(revision_ids):
2503
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2504
yield self._get_filtered_inv_stream(revision_ids)
2505
yield self._get_text_stream()
2056
2509
class RepositoryFormatPack(MetaDirRepositoryFormat):
2057
2510
"""Format logic for pack structured repositories.
2097
2556
builder = self.index_builder_class()
2098
2557
files = [('pack-names', builder.finish())]
2099
2558
utf8_files = [('format', self.get_format_string())]
2101
2560
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2102
return self.open(a_bzrdir=a_bzrdir, _found=True)
2561
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2104
2565
def open(self, a_bzrdir, _found=False, _override_transport=None):
2105
2566
"""See RepositoryFormat.open().
2107
2568
:param _override_transport: INTERNAL USE ONLY. Allows opening the
2108
2569
repository at a slightly different url
2109
2570
than normal. I.e. during 'upgrade'.
2464
2889
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2467
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
2468
"""A no-subtrees development repository.
2470
This format should be retained until the second release after bzr 1.7.
2472
This is pack-1.6.1 with B+Tree indices.
2475
repository_class = KnitPackRepository
2476
_commit_builder_class = PackCommitBuilder
2477
supports_external_lookups = True
2478
# What index classes to use
2479
index_builder_class = BTreeBuilder
2480
index_class = BTreeGraphIndex
2483
def _serializer(self):
2484
return xml5.serializer_v5
2486
def _get_matching_bzrdir(self):
2487
return bzrdir.format_registry.make_bzrdir('development2')
2489
def _ignore_setting_bzrdir(self, format):
2492
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2494
def get_format_string(self):
2495
"""See RepositoryFormat.get_format_string()."""
2496
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2498
def get_format_description(self):
2499
"""See RepositoryFormat.get_format_description()."""
2500
return ("Development repository format, currently the same as "
2501
"1.6.1 with B+Trees.\n")
2503
def check_conversion_target(self, target_format):
2507
2892
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2508
2893
"""A subtrees development repository.
2510
2895
This format should be retained until the second release after bzr 1.7.
2512
2897
1.6.1-subtree[as it might have been] with B+Tree indices.
2899
This is [now] retained until we have a CHK based subtree format in
2515
2903
repository_class = KnitPackRepository
2516
2904
_commit_builder_class = PackRootCommitBuilder
2517
2905
rich_root_data = True
2518
2907
supports_tree_reference = True
2519
2908
supports_external_lookups = True
2520
2909
# What index classes to use
2528
2917
def _get_matching_bzrdir(self):
2529
2918
return bzrdir.format_registry.make_bzrdir(
2530
'development2-subtree')
2919
'development-subtree')
2532
2921
def _ignore_setting_bzrdir(self, format):
2535
2924
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2537
def check_conversion_target(self, target_format):
2538
if not target_format.rich_root_data:
2539
raise errors.BadConversionTarget(
2540
'Does not support rich root data.', target_format)
2541
if not getattr(target_format, 'supports_tree_reference', False):
2542
raise errors.BadConversionTarget(
2543
'Does not support nested trees', target_format)
2545
2926
def get_format_string(self):
2546
2927
"""See RepositoryFormat.get_format_string()."""
2547
2928
return ("Bazaar development format 2 with subtree support "