354
352
"""Build a VersionedFiles instance on top of this group of packs."""
355
353
index_name = index_name + '_index'
356
354
index_to_pack = {}
357
access = _DirectPackAccess(index_to_pack,
358
reload_func=self._reload_func)
355
access = knit._DirectPackAccess(index_to_pack,
356
reload_func=self._reload_func)
361
359
if self.new_pack is None:
423
421
inventory_keys = source_vf.keys()
424
422
missing_inventories = set(self.revision_keys).difference(inventory_keys)
425
423
if missing_inventories:
426
# Go back to the original repo, to see if these are really missing
427
# https://bugs.launchpad.net/bzr/+bug/437003
428
# If we are packing a subset of the repo, it is fine to just have
429
# the data in another Pack file, which is not included in this pack
431
inv_index = self._pack_collection.repo.inventories._index
432
pmap = inv_index.get_parent_map(missing_inventories)
433
really_missing = missing_inventories.difference(pmap)
435
missing_inventories = sorted(really_missing)
436
raise ValueError('We are missing inventories for revisions: %s'
437
% (missing_inventories,))
424
missing_inventories = sorted(missing_inventories)
425
raise ValueError('We are missing inventories for revisions: %s'
426
% (missing_inventories,))
438
427
self._copy_stream(source_vf, target_vf, inventory_keys,
439
428
'inventories', self._get_filtered_inv_stream, 2)
605
594
def __init__(self, *args, **kwargs):
606
595
super(GCCHKCanonicalizingPacker, self).__init__(*args, **kwargs)
607
596
self._data_changed = False
609
598
def _exhaust_stream(self, source_vf, keys, message, vf_to_stream, pb_offset):
610
599
"""Create and exhaust a stream, but don't insert it.
612
601
This is useful to get the side-effects of generating a stream.
614
603
self.pb.update('scanning %s' % (message,), pb_offset)
792
779
% (sorted(missing_text_keys),))
796
class CHKInventoryRepository(PackRepository):
797
"""subclass of PackRepository that uses CHK based inventories."""
782
def _execute_pack_operations(self, pack_operations,
783
_packer_class=GCCHKPacker,
785
"""Execute a series of pack operations.
787
:param pack_operations: A list of [revision_count, packs_to_combine].
788
:param _packer_class: The class of packer to use (default: Packer).
791
# XXX: Copied across from RepositoryPackCollection simply because we
792
# want to override the _packer_class ... :(
793
for revision_count, packs in pack_operations:
794
# we may have no-ops from the setup logic
797
packer = GCCHKPacker(self, packs, '.autopack',
798
reload_func=reload_func)
800
result = packer.pack()
801
except errors.RetryWithNewPacks:
802
# An exception is propagating out of this context, make sure
803
# this packer has cleaned up. Packer() doesn't set its new_pack
804
# state into the RepositoryPackCollection object, so we only
805
# have access to it directly here.
806
if packer.new_pack is not None:
807
packer.new_pack.abort()
812
self._remove_pack_from_memory(pack)
813
# record the newly available packs and stop advertising the old
816
for _, packs in pack_operations:
817
to_be_obsoleted.extend(packs)
818
result = self._save_pack_names(clear_obsolete_packs=True,
819
obsolete_packs=to_be_obsoleted)
823
class CHKInventoryRepository(KnitPackRepository):
824
"""subclass of KnitPackRepository that uses CHK based inventories."""
799
826
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
801
828
"""Overridden to change pack collection class."""
802
super(CHKInventoryRepository, self).__init__(_format, a_bzrdir,
803
control_files, _commit_builder_class, _serializer)
829
KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
830
_commit_builder_class, _serializer)
831
# and now replace everything it did :)
804
832
index_transport = self._transport.clone('indices')
805
833
self._pack_collection = GCRepositoryPackCollection(self,
806
834
self._transport, index_transport,
1109
1137
return GroupCHKStreamSource(self, to_format)
1110
1138
return super(CHKInventoryRepository, self)._get_source(to_format)
1112
def _find_inconsistent_revision_parents(self, revisions_iterator=None):
1113
"""Find revisions with different parent lists in the revision object
1114
and in the index graph.
1116
:param revisions_iterator: None, or an iterator of (revid,
1117
Revision-or-None). This iterator controls the revisions checked.
1118
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1119
parents-in-revision).
1121
if not self.is_locked():
1122
raise AssertionError()
1124
if revisions_iterator is None:
1125
revisions_iterator = self._iter_revisions(None)
1126
for revid, revision in revisions_iterator:
1127
if revision is None:
1129
parent_map = vf.get_parent_map([(revid,)])
1130
parents_according_to_index = tuple(parent[-1] for parent in
1131
parent_map[(revid,)])
1132
parents_according_to_revision = tuple(revision.parent_ids)
1133
if parents_according_to_index != parents_according_to_revision:
1134
yield (revid, parents_according_to_index,
1135
parents_according_to_revision)
1137
def _check_for_inconsistent_revision_parents(self):
1138
inconsistencies = list(self._find_inconsistent_revision_parents())
1140
raise errors.BzrCheckError(
1141
"Revision index has inconsistent parents.")
1144
class GroupCHKStreamSource(StreamSource):
1141
class GroupCHKStreamSource(KnitPackStreamSource):
1145
1142
"""Used when both the source and target repo are GroupCHK repos."""
1147
1144
def __init__(self, from_repository, to_format):
1234
1231
self._chk_p_id_roots = None
1235
1232
yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1237
def _get_text_stream(self):
1238
# Note: We know we don't have to handle adding root keys, because both
1239
# the source and target are the identical network name.
1240
text_stream = self.from_repository.texts.get_record_stream(
1241
self._text_keys, self._text_fetch_order, False)
1242
return ('texts', text_stream)
1244
1234
def get_stream(self, search):
1245
1235
def wrap_and_count(pb, rc, stream):
1246
1236
"""Yield records from stream while showing progress."""
1261
1251
yield (stream_info[0],
1262
1252
wrap_and_count(pb, rc, stream_info[1]))
1263
1253
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1254
self.from_repository.revisions.clear_cache()
1255
self.from_repository.signatures.clear_cache()
1256
s = self._get_inventory_stream(self._revision_keys)
1257
yield (s[0], wrap_and_count(pb, rc, s[1]))
1258
self.from_repository.inventories.clear_cache()
1264
1259
# TODO: The keys to exclude might be part of the search recipe
1265
1260
# For now, exclude all parents that are at the edge of ancestry, for
1266
1261
# which we have inventories
1267
1262
from_repo = self.from_repository
1268
1263
parent_keys = from_repo._find_parent_keys_of_revisions(
1269
1264
self._revision_keys)
1270
self.from_repository.revisions.clear_cache()
1271
self.from_repository.signatures.clear_cache()
1272
# Clear the repo's get_parent_map cache too.
1273
self.from_repository._unstacked_provider.disable_cache()
1274
self.from_repository._unstacked_provider.enable_cache()
1275
s = self._get_inventory_stream(self._revision_keys)
1276
yield (s[0], wrap_and_count(pb, rc, s[1]))
1277
self.from_repository.inventories.clear_cache()
1278
1265
for stream_info in self._get_filtered_chk_streams(parent_keys):
1279
1266
yield (stream_info[0], wrap_and_count(pb, rc, stream_info[1]))
1280
1267
self.from_repository.chk_bytes.clear_cache()