250
207
return not self.__eq__(other)
252
209
def __repr__(self):
253
return "<%s.%s object at 0x%x, %s, %s" % (
254
self.__class__.__module__, self.__class__.__name__, id(self),
255
self.pack_transport, self.name)
258
class ResumedPack(ExistingPack):
260
def __init__(self, name, revision_index, inventory_index, text_index,
261
signature_index, upload_transport, pack_transport, index_transport,
262
pack_collection, chk_index=None):
263
"""Create a ResumedPack object."""
264
ExistingPack.__init__(self, pack_transport, name, revision_index,
265
inventory_index, text_index, signature_index,
267
self.upload_transport = upload_transport
268
self.index_transport = index_transport
269
self.index_sizes = [None, None, None, None]
271
('revision', revision_index),
272
('inventory', inventory_index),
273
('text', text_index),
274
('signature', signature_index),
276
if chk_index is not None:
277
indices.append(('chk', chk_index))
278
self.index_sizes.append(None)
279
for index_type, index in indices:
280
offset = self.index_offset(index_type)
281
self.index_sizes[offset] = index._size
282
self.index_class = pack_collection._index_class
283
self._pack_collection = pack_collection
284
self._state = 'resumed'
285
# XXX: perhaps check that the .pack file exists?
287
def access_tuple(self):
288
if self._state == 'finished':
289
return Pack.access_tuple(self)
290
elif self._state == 'resumed':
291
return self.upload_transport, self.file_name()
293
raise AssertionError(self._state)
296
self.upload_transport.delete(self.file_name())
297
indices = [self.revision_index, self.inventory_index, self.text_index,
298
self.signature_index]
299
if self.chk_index is not None:
300
indices.append(self.chk_index)
301
for index in indices:
302
index._transport.delete(index._name)
305
self._check_references()
306
index_types = ['revision', 'inventory', 'text', 'signature']
307
if self.chk_index is not None:
308
index_types.append('chk')
309
for index_type in index_types:
310
old_name = self.index_name(index_type, self.name)
311
new_name = '../indices/' + old_name
312
self.upload_transport.rename(old_name, new_name)
313
self._replace_index_with_readonly(index_type)
314
new_name = '../packs/' + self.file_name()
315
self.upload_transport.rename(self.file_name(), new_name)
316
self._state = 'finished'
318
def _get_external_refs(self, index):
319
"""Return compression parents for this index that are not present.
321
This returns any compression parents that are referenced by this index,
322
which are not contained *in* this index. They may be present elsewhere.
324
return index.external_references(1)
210
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
211
id(self), self.transport, self.name)
327
214
class NewPack(Pack):
328
215
"""An in memory proxy for a pack which is being created."""
330
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
217
# A map of index 'type' to the file extension and position in the
219
index_definitions = {
220
'revision': ('.rix', 0),
221
'inventory': ('.iix', 1),
223
'signature': ('.six', 3),
226
def __init__(self, upload_transport, index_transport, pack_transport,
227
upload_suffix='', file_mode=None):
331
228
"""Create a NewPack instance.
333
:param pack_collection: A PackCollection into which this is being inserted.
230
:param upload_transport: A writable transport for the pack to be
231
incrementally uploaded to.
232
:param index_transport: A writable transport for the pack's indices to
233
be written to when the pack is finished.
234
:param pack_transport: A writable transport for the pack to be renamed
235
to when the upload is complete. This *must* be the same as
236
upload_transport.clone('../packs').
334
237
:param upload_suffix: An optional suffix to be given to any temporary
335
238
files created during the pack creation. e.g '.autopack'
336
:param file_mode: Unix permissions for newly created file.
239
:param file_mode: An optional file mode to create the new files with.
338
241
# The relative locations of the packs are constrained, but all are
339
242
# passed in because the caller has them, so as to avoid object churn.
340
index_builder_class = pack_collection._index_builder_class
341
if pack_collection.chk_index is not None:
342
chk_index = index_builder_class(reference_lists=0)
345
243
Pack.__init__(self,
346
244
# Revisions: parents list, no text compression.
347
index_builder_class(reference_lists=1),
245
InMemoryGraphIndex(reference_lists=1),
348
246
# Inventory: We want to map compression only, but currently the
349
247
# knit code hasn't been updated enough to understand that, so we
350
248
# have a regular 2-list index giving parents and compression
352
index_builder_class(reference_lists=2),
250
InMemoryGraphIndex(reference_lists=2),
353
251
# Texts: compression and per file graph, for all fileids - so two
354
252
# reference lists and two elements in the key tuple.
355
index_builder_class(reference_lists=2, key_elements=2),
253
InMemoryGraphIndex(reference_lists=2, key_elements=2),
356
254
# Signatures: Just blobs to store, no compression, no parents
358
index_builder_class(reference_lists=0),
359
# CHK based storage - just blobs, no compression or parents.
256
InMemoryGraphIndex(reference_lists=0),
362
self._pack_collection = pack_collection
363
# When we make readonly indices, we need this.
364
self.index_class = pack_collection._index_class
365
258
# where should the new pack be opened
366
self.upload_transport = pack_collection._upload_transport
259
self.upload_transport = upload_transport
367
260
# where are indices written out to
368
self.index_transport = pack_collection._index_transport
261
self.index_transport = index_transport
369
262
# where is the pack renamed to when it is finished?
370
self.pack_transport = pack_collection._pack_transport
263
self.pack_transport = pack_transport
371
264
# What file mode to upload the pack and indices with.
372
265
self._file_mode = file_mode
373
266
# tracks the content written to the .pack file.
374
self._hash = osutils.md5()
375
# a tuple with the length in bytes of the indices, once the pack
376
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
267
self._hash = md5.new()
268
# a four-tuple with the length in bytes of the indices, once the pack
269
# is finalised. (rev, inv, text, sigs)
377
270
self.index_sizes = None
378
271
# How much data to cache when writing packs. Note that this is not
379
272
# synchronised with reads, because it's not in the transport layer, so
727
634
Sets self._text_filter appropriately.
729
raise NotImplementedError(self._copy_inventory_texts)
636
# select inventory keys
637
inv_keys = self._revision_keys # currently the same keyspace, and note that
638
# querying for keys here could introduce a bug where an inventory item
639
# is missed, so do not change it to query separately without cross
640
# checking like the text key check below.
641
inventory_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
642
self.packs, 'inventory_index')[0]
643
inv_nodes = self._pack_collection._index_contents(inventory_index_map, inv_keys)
644
# copy inventory keys and adjust values
645
# XXX: Should be a helper function to allow different inv representation
647
self.pb.update("Copying inventory texts", 2)
648
total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes)
649
# Only grab the output lines if we will be processing them
650
output_lines = bool(self.revision_ids)
651
inv_lines = self._copy_nodes_graph(inventory_index_map,
652
self.new_pack._writer, self.new_pack.inventory_index,
653
readv_group_iter, total_items, output_lines=output_lines)
654
if self.revision_ids:
655
self._process_inventory_lines(inv_lines)
657
# eat the iterator to cause it to execute.
659
self._text_filter = None
660
if 'pack' in debug.debug_flags:
661
mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs',
662
time.ctime(), self._pack_collection._upload_transport.base,
663
self.new_pack.random_name,
664
self.new_pack.inventory_index.key_count(),
665
time.time() - self.new_pack.start_time)
731
667
def _copy_text_texts(self):
732
raise NotImplementedError(self._copy_text_texts)
669
text_index_map, text_nodes = self._get_text_nodes()
670
if self._text_filter is not None:
671
# We could return the keys copied as part of the return value from
672
# _copy_nodes_graph but this doesn't work all that well with the
673
# need to get line output too, so we check separately, and as we're
674
# going to buffer everything anyway, we check beforehand, which
675
# saves reading knit data over the wire when we know there are
677
text_nodes = set(text_nodes)
678
present_text_keys = set(_node[1] for _node in text_nodes)
679
missing_text_keys = set(self._text_filter) - present_text_keys
680
if missing_text_keys:
681
# TODO: raise a specific error that can handle many missing
683
a_missing_key = missing_text_keys.pop()
684
raise errors.RevisionNotPresent(a_missing_key[1],
686
# copy text keys and adjust values
687
self.pb.update("Copying content texts", 3)
688
total_items, readv_group_iter = self._least_readv_node_readv(text_nodes)
689
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
690
self.new_pack.text_index, readv_group_iter, total_items))
691
self._log_copied_texts()
693
def _check_references(self):
694
"""Make sure our external refereneces are present."""
695
external_refs = self.new_pack._external_compression_parents_of_texts()
697
index = self._pack_collection.text_index.combined_index
698
found_items = list(index.iter_entries(external_refs))
699
if len(found_items) != len(external_refs):
700
found_keys = set(k for idx, k, refs, value in found_items)
701
missing_items = external_refs - found_keys
702
missing_file_id, missing_revision_id = missing_items.pop()
703
raise errors.RevisionNotPresent(missing_revision_id,
734
706
def _create_pack_from_packs(self):
735
raise NotImplementedError(self._create_pack_from_packs)
707
self.pb.update("Opening pack", 0, 5)
708
self.new_pack = self.open_pack()
709
new_pack = self.new_pack
710
# buffer data - we won't be reading-back during the pack creation and
711
# this makes a significant difference on sftp pushes.
712
new_pack.set_write_cache_size(1024*1024)
713
if 'pack' in debug.debug_flags:
714
plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name)
715
for a_pack in self.packs]
716
if self.revision_ids is not None:
717
rev_count = len(self.revision_ids)
720
mutter('%s: create_pack: creating pack from source packs: '
721
'%s%s %s revisions wanted %s t=0',
722
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
723
plain_pack_list, rev_count)
724
self._copy_revision_texts()
725
self._copy_inventory_texts()
726
self._copy_text_texts()
727
# select signature keys
728
signature_filter = self._revision_keys # same keyspace
729
signature_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
730
self.packs, 'signature_index')[0]
731
signature_nodes = self._pack_collection._index_contents(signature_index_map,
733
# copy signature keys and adjust values
734
self.pb.update("Copying signature texts", 4)
735
self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer,
736
new_pack.signature_index)
737
if 'pack' in debug.debug_flags:
738
mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs',
739
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
740
new_pack.signature_index.key_count(),
741
time.time() - new_pack.start_time)
742
self._check_references()
743
if not self._use_pack(new_pack):
746
self.pb.update("Finishing pack", 5)
748
self._pack_collection.allocate(new_pack)
751
def _copy_nodes(self, nodes, index_map, writer, write_index):
752
"""Copy knit nodes between packs with no graph references."""
753
pb = ui.ui_factory.nested_progress_bar()
755
return self._do_copy_nodes(nodes, index_map, writer,
760
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
761
# for record verification
762
knit = KnitVersionedFiles(None, None)
763
# plan a readv on each source pack:
765
nodes = sorted(nodes)
766
# how to map this into knit.py - or knit.py into this?
767
# we don't want the typical knit logic, we want grouping by pack
768
# at this point - perhaps a helper library for the following code
769
# duplication points?
771
for index, key, value in nodes:
772
if index not in request_groups:
773
request_groups[index] = []
774
request_groups[index].append((key, value))
776
pb.update("Copied record", record_index, len(nodes))
777
for index, items in request_groups.iteritems():
778
pack_readv_requests = []
779
for key, value in items:
780
# ---- KnitGraphIndex.get_position
781
bits = value[1:].split(' ')
782
offset, length = int(bits[0]), int(bits[1])
783
pack_readv_requests.append((offset, length, (key, value[0])))
784
# linear scan up the pack
785
pack_readv_requests.sort()
787
transport, path = index_map[index]
788
reader = pack.make_readv_reader(transport, path,
789
[offset[0:2] for offset in pack_readv_requests])
790
for (names, read_func), (_1, _2, (key, eol_flag)) in \
791
izip(reader.iter_records(), pack_readv_requests):
792
raw_data = read_func(None)
793
# check the header only
794
df, _ = knit._parse_record_header(key, raw_data)
796
pos, size = writer.add_bytes_record(raw_data, names)
797
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
798
pb.update("Copied record", record_index)
801
def _copy_nodes_graph(self, index_map, writer, write_index,
802
readv_group_iter, total_items, output_lines=False):
803
"""Copy knit nodes between packs.
805
:param output_lines: Return lines present in the copied data as
806
an iterator of line,version_id.
808
pb = ui.ui_factory.nested_progress_bar()
810
for result in self._do_copy_nodes_graph(index_map, writer,
811
write_index, output_lines, pb, readv_group_iter, total_items):
814
# Python 2.4 does not permit try:finally: in a generator.
820
def _do_copy_nodes_graph(self, index_map, writer, write_index,
821
output_lines, pb, readv_group_iter, total_items):
822
# for record verification
823
knit = KnitVersionedFiles(None, None)
824
# for line extraction when requested (inventories only)
826
factory = KnitPlainFactory()
828
pb.update("Copied record", record_index, total_items)
829
for index, readv_vector, node_vector in readv_group_iter:
831
transport, path = index_map[index]
832
reader = pack.make_readv_reader(transport, path, readv_vector)
833
for (names, read_func), (key, eol_flag, references) in \
834
izip(reader.iter_records(), node_vector):
835
raw_data = read_func(None)
837
# read the entire thing
838
content, _ = knit._parse_record(key[-1], raw_data)
839
if len(references[-1]) == 0:
840
line_iterator = factory.get_fulltext_content(content)
842
line_iterator = factory.get_linedelta_content(content)
843
for line in line_iterator:
846
# check the header only
847
df, _ = knit._parse_record_header(key, raw_data)
849
pos, size = writer.add_bytes_record(raw_data, names)
850
write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references)
851
pb.update("Copied record", record_index)
854
def _get_text_nodes(self):
855
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
856
self.packs, 'text_index')[0]
857
return text_index_map, self._pack_collection._index_contents(text_index_map,
860
def _least_readv_node_readv(self, nodes):
861
"""Generate request groups for nodes using the least readv's.
863
:param nodes: An iterable of graph index nodes.
864
:return: Total node count and an iterator of the data needed to perform
865
readvs to obtain the data for nodes. Each item yielded by the
866
iterator is a tuple with:
867
index, readv_vector, node_vector. readv_vector is a list ready to
868
hand to the transport readv method, and node_vector is a list of
869
(key, eol_flag, references) for the the node retrieved by the
870
matching readv_vector.
872
# group by pack so we do one readv per pack
873
nodes = sorted(nodes)
876
for index, key, value, references in nodes:
877
if index not in request_groups:
878
request_groups[index] = []
879
request_groups[index].append((key, value, references))
881
for index, items in request_groups.iteritems():
882
pack_readv_requests = []
883
for key, value, references in items:
884
# ---- KnitGraphIndex.get_position
885
bits = value[1:].split(' ')
886
offset, length = int(bits[0]), int(bits[1])
887
pack_readv_requests.append(
888
((offset, length), (key, value[0], references)))
889
# linear scan up the pack to maximum range combining.
890
pack_readv_requests.sort()
891
# split out the readv and the node data.
892
pack_readv = [readv for readv, node in pack_readv_requests]
893
node_vector = [node for readv, node in pack_readv_requests]
894
result.append((index, pack_readv, node_vector))
737
897
def _log_copied_texts(self):
738
898
if 'pack' in debug.debug_flags:
751
930
return new_pack.data_inserted()
933
class OptimisingPacker(Packer):
934
"""A packer which spends more time to create better disk layouts."""
936
def _revision_node_readv(self, revision_nodes):
937
"""Return the total revisions and the readv's to issue.
939
This sort places revisions in topological order with the ancestors
942
:param revision_nodes: The revision index contents for the packs being
943
incorporated into the new pack.
944
:return: As per _least_readv_node_readv.
946
# build an ancestors dict
949
for index, key, value, references in revision_nodes:
950
ancestors[key] = references[0]
951
by_key[key] = (index, value, references)
952
order = tsort.topo_sort(ancestors)
954
# Single IO is pathological, but it will work as a starting point.
956
for key in reversed(order):
957
index, value, references = by_key[key]
958
# ---- KnitGraphIndex.get_position
959
bits = value[1:].split(' ')
960
offset, length = int(bits[0]), int(bits[1])
962
(index, [(offset, length)], [(key, value[0], references)]))
963
# TODO: combine requests in the same index that are in ascending order.
964
return total, requests
967
class ReconcilePacker(Packer):
968
"""A packer which regenerates indices etc as it copies.
970
This is used by ``bzr reconcile`` to cause parent text pointers to be
974
def _extra_init(self):
975
self._data_changed = False
977
def _process_inventory_lines(self, inv_lines):
978
"""Generate a text key reference map rather for reconciling with."""
979
repo = self._pack_collection.repo
980
refs = repo._find_text_key_references_from_xml_inventory_lines(
982
self._text_refs = refs
983
# during reconcile we:
984
# - convert unreferenced texts to full texts
985
# - correct texts which reference a text not copied to be full texts
986
# - copy all others as-is but with corrected parents.
987
# - so at this point we don't know enough to decide what becomes a full
989
self._text_filter = None
991
def _copy_text_texts(self):
992
"""generate what texts we should have and then copy."""
993
self.pb.update("Copying content texts", 3)
994
# we have three major tasks here:
995
# 1) generate the ideal index
996
repo = self._pack_collection.repo
997
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
999
self.new_pack.revision_index.iter_all_entries()])
1000
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
1001
# 2) generate a text_nodes list that contains all the deltas that can
1002
# be used as-is, with corrected parents.
1005
discarded_nodes = []
1006
NULL_REVISION = _mod_revision.NULL_REVISION
1007
text_index_map, text_nodes = self._get_text_nodes()
1008
for node in text_nodes:
1014
ideal_parents = tuple(ideal_index[node[1]])
1016
discarded_nodes.append(node)
1017
self._data_changed = True
1019
if ideal_parents == (NULL_REVISION,):
1021
if ideal_parents == node[3][0]:
1023
ok_nodes.append(node)
1024
elif ideal_parents[0:1] == node[3][0][0:1]:
1025
# the left most parent is the same, or there are no parents
1026
# today. Either way, we can preserve the representation as
1027
# long as we change the refs to be inserted.
1028
self._data_changed = True
1029
ok_nodes.append((node[0], node[1], node[2],
1030
(ideal_parents, node[3][1])))
1031
self._data_changed = True
1033
# Reinsert this text completely
1034
bad_texts.append((node[1], ideal_parents))
1035
self._data_changed = True
1036
# we're finished with some data.
1039
# 3) bulk copy the ok data
1040
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
1041
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
1042
self.new_pack.text_index, readv_group_iter, total_items))
1043
# 4) adhoc copy all the other texts.
1044
# We have to topologically insert all texts otherwise we can fail to
1045
# reconcile when parts of a single delta chain are preserved intact,
1046
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
1047
# reinserted, and if d3 has incorrect parents it will also be
1048
# reinserted. If we insert d3 first, d2 is present (as it was bulk
1049
# copied), so we will try to delta, but d2 is not currently able to be
1050
# extracted because it's basis d1 is not present. Topologically sorting
1051
# addresses this. The following generates a sort for all the texts that
1052
# are being inserted without having to reference the entire text key
1053
# space (we only topo sort the revisions, which is smaller).
1054
topo_order = tsort.topo_sort(ancestors)
1055
rev_order = dict(zip(topo_order, range(len(topo_order))))
1056
bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1057
transaction = repo.get_transaction()
1058
file_id_index = GraphIndexPrefixAdapter(
1059
self.new_pack.text_index,
1061
add_nodes_callback=self.new_pack.text_index.add_nodes)
1062
data_access = _DirectPackAccess(
1063
{self.new_pack.text_index:self.new_pack.access_tuple()})
1064
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
1065
self.new_pack.access_tuple())
1066
output_texts = KnitVersionedFiles(
1067
_KnitGraphIndex(self.new_pack.text_index,
1068
add_callback=self.new_pack.text_index.add_nodes,
1069
deltas=True, parents=True, is_locked=repo.is_locked),
1070
data_access=data_access, max_delta_chain=200)
1071
for key, parent_keys in bad_texts:
1072
# We refer to the new pack to delta data being output.
1073
# A possible improvement would be to catch errors on short reads
1074
# and only flush then.
1075
self.new_pack.flush()
1077
for parent_key in parent_keys:
1078
if parent_key[0] != key[0]:
1079
# Graph parents must match the fileid
1080
raise errors.BzrError('Mismatched key parent %r:%r' %
1082
parents.append(parent_key[1])
1083
text_lines = split_lines(repo.texts.get_record_stream(
1084
[key], 'unordered', True).next().get_bytes_as('fulltext'))
1085
output_texts.add_lines(key, parent_keys, text_lines,
1086
random_id=True, check_content=False)
1087
# 5) check that nothing inserted has a reference outside the keyspace.
1088
missing_text_keys = self.new_pack._external_compression_parents_of_texts()
1089
if missing_text_keys:
1090
raise errors.BzrError('Reference to missing compression parents %r'
1091
% (missing_text_keys,))
1092
self._log_copied_texts()
1094
def _use_pack(self, new_pack):
1095
"""Override _use_pack to check for reconcile having changed content."""
1096
# XXX: we might be better checking this at the copy time.
1097
original_inventory_keys = set()
1098
inv_index = self._pack_collection.inventory_index.combined_index
1099
for entry in inv_index.iter_all_entries():
1100
original_inventory_keys.add(entry[1])
1101
new_inventory_keys = set()
1102
for entry in new_pack.inventory_index.iter_all_entries():
1103
new_inventory_keys.add(entry[1])
1104
if new_inventory_keys != original_inventory_keys:
1105
self._data_changed = True
1106
return new_pack.data_inserted() and self._data_changed
754
1109
class RepositoryPackCollection(object):
755
"""Management of packs within a repository.
757
:ivar _names: map of {pack_name: (index_size,)}
761
resumed_pack_factory = None
762
normal_packer_class = None
763
optimising_packer_class = None
1110
"""Management of packs within a repository."""
765
1112
def __init__(self, repo, transport, index_transport, upload_transport,
766
pack_transport, index_builder_class, index_class,
768
1114
"""Create a new RepositoryPackCollection.
770
:param transport: Addresses the repository base directory
1116
:param transport: Addresses the repository base directory
771
1117
(typically .bzr/repository/).
772
1118
:param index_transport: Addresses the directory containing indices.
773
1119
:param upload_transport: Addresses the directory into which packs are written
774
1120
while they're being created.
775
1121
:param pack_transport: Addresses the directory of existing complete packs.
776
:param index_builder_class: The index builder class to use.
777
:param index_class: The index class to use.
778
:param use_chk_index: Whether to setup and manage a CHK index.
780
# XXX: This should call self.reset()
781
1123
self.repo = repo
782
1124
self.transport = transport
783
1125
self._index_transport = index_transport
784
1126
self._upload_transport = upload_transport
785
1127
self._pack_transport = pack_transport
786
self._index_builder_class = index_builder_class
787
self._index_class = index_class
788
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1128
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
791
1130
# name:Pack mapping
793
1131
self._packs_by_name = {}
794
1132
# the previous pack-names content
795
1133
self._packs_at_load = None
796
1134
# when a pack is being created by this object, the state of that pack.
797
1135
self._new_pack = None
798
1136
# aggregated revision index data
799
flush = self._flush_new_pack
800
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
801
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
802
self.text_index = AggregateIndex(self.reload_pack_names, flush)
803
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
804
all_indices = [self.revision_index, self.inventory_index,
805
self.text_index, self.signature_index]
807
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
808
all_indices.append(self.chk_index)
810
# used to determine if we're using a chk_index elsewhere.
811
self.chk_index = None
812
# Tell all the CombinedGraphIndex objects about each other, so they can
813
# share hints about which pack names to search first.
814
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
815
for combined_idx in all_combined:
816
combined_idx.set_sibling_indices(
817
set(all_combined).difference([combined_idx]))
819
self._resumed_packs = []
822
return '%s(%r)' % (self.__class__.__name__, self.repo)
1137
self.revision_index = AggregateIndex()
1138
self.inventory_index = AggregateIndex()
1139
self.text_index = AggregateIndex()
1140
self.signature_index = AggregateIndex()
824
1142
def add_pack_to_memory(self, pack):
825
1143
"""Make a Pack object available to the repository to satisfy queries.
827
1145
:param pack: A Pack object.
829
1147
if pack.name in self._packs_by_name:
830
raise AssertionError(
831
'pack %s already in _packs_by_name' % (pack.name,))
1148
raise AssertionError()
832
1149
self.packs.append(pack)
833
1150
self._packs_by_name[pack.name] = pack
834
1151
self.revision_index.add_index(pack.revision_index, pack)
835
1152
self.inventory_index.add_index(pack.inventory_index, pack)
836
1153
self.text_index.add_index(pack.text_index, pack)
837
1154
self.signature_index.add_index(pack.signature_index, pack)
838
if self.chk_index is not None:
839
self.chk_index.add_index(pack.chk_index, pack)
841
1156
def all_packs(self):
842
1157
"""Return a list of all the Pack objects this repository has.
893
1204
# group their data with the relevant commit, and that may
894
1205
# involve rewriting ancient history - which autopack tries to
895
1206
# avoid. Alternatively we could not group the data but treat
896
# each of these as having a single revision, and thus add
1207
# each of these as having a single revision, and thus add
897
1208
# one revision for each to the total revision count, to get
898
1209
# a matching distribution.
900
1211
existing_packs.append((revision_count, pack))
901
1212
pack_operations = self.plan_autopack_combinations(
902
1213
existing_packs, pack_distribution)
903
num_new_packs = len(pack_operations)
904
num_old_packs = sum([len(po[1]) for po in pack_operations])
905
num_revs_affected = sum([po[0] for po in pack_operations])
906
mutter('Auto-packing repository %s, which has %d pack files, '
907
'containing %d revisions. Packing %d files into %d affecting %d'
908
' revisions', self, total_packs, total_revisions, num_old_packs,
909
num_new_packs, num_revs_affected)
910
result = self._execute_pack_operations(pack_operations, packer_class=self.normal_packer_class,
911
reload_func=self._restart_autopack)
912
mutter('Auto-packing repository %s completed', self)
1214
self._execute_pack_operations(pack_operations)
915
def _execute_pack_operations(self, pack_operations, packer_class,
1217
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
917
1218
"""Execute a series of pack operations.
919
1220
:param pack_operations: A list of [revision_count, packs_to_combine].
920
:param packer_class: The class of packer to use
921
:return: The new pack names.
1221
:param _packer_class: The class of packer to use (default: Packer).
923
1224
for revision_count, packs in pack_operations:
924
1225
# we may have no-ops from the setup logic
925
1226
if len(packs) == 0:
927
packer = packer_class(self, packs, '.autopack',
928
reload_func=reload_func)
930
result = packer.pack()
931
except errors.RetryWithNewPacks:
932
# An exception is propagating out of this context, make sure
933
# this packer has cleaned up. Packer() doesn't set its new_pack
934
# state into the RepositoryPackCollection object, so we only
935
# have access to it directly here.
936
if packer.new_pack is not None:
937
packer.new_pack.abort()
1228
_packer_class(self, packs, '.autopack').pack()
941
1229
for pack in packs:
942
1230
self._remove_pack_from_memory(pack)
943
1231
# record the newly available packs and stop advertising the old
946
for _, packs in pack_operations:
947
to_be_obsoleted.extend(packs)
948
result = self._save_pack_names(clear_obsolete_packs=True,
949
obsolete_packs=to_be_obsoleted)
952
def _flush_new_pack(self):
953
if self._new_pack is not None:
954
self._new_pack.flush()
1233
self._save_pack_names(clear_obsolete_packs=True)
1234
# Move the old packs out of the way now they are no longer referenced.
1235
for revision_count, packs in pack_operations:
1236
self._obsolete_packs(packs)
956
1238
def lock_names(self):
957
1239
"""Acquire the mutex around the pack-names index.
959
1241
This cannot be used in the middle of a read-only transaction on the
962
1244
self.repo.control_files.lock_write()
964
def _already_packed(self):
965
"""Is the collection already packed?"""
966
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
968
def pack(self, hint=None, clean_obsolete_packs=False):
969
1247
"""Pack the pack collection totally."""
970
1248
self.ensure_loaded()
971
1249
total_packs = len(self._names)
972
if self._already_packed():
1251
# This is arguably wrong because we might not be optimal, but for
1252
# now lets leave it in. (e.g. reconcile -> one pack. But not
974
1255
total_revisions = self.revision_index.combined_index.key_count()
975
1256
# XXX: the following may want to be a class, to pack with a given
977
1258
mutter('Packing repository %s, which has %d pack files, '
978
'containing %d revisions with hint %r.', self, total_packs,
979
total_revisions, hint)
982
self._try_pack_operations(hint)
983
except RetryPackOperations:
987
if clean_obsolete_packs:
988
self._clear_obsolete_packs()
990
def _try_pack_operations(self, hint):
991
"""Calculate the pack operations based on the hint (if any), and
1259
'containing %d revisions into 1 packs.', self, total_packs,
994
1261
# determine which packs need changing
1262
pack_distribution = [1]
995
1263
pack_operations = [[0, []]]
996
1264
for pack in self.all_packs():
997
if hint is None or pack.name in hint:
998
# Either no hint was provided (so we are packing everything),
999
# or this pack was included in the hint.
1000
pack_operations[-1][0] += pack.get_revision_count()
1001
pack_operations[-1][1].append(pack)
1002
self._execute_pack_operations(pack_operations,
1003
packer_class=self.optimising_packer_class,
1004
reload_func=self._restart_pack_operations)
1265
pack_operations[-1][0] += pack.get_revision_count()
1266
pack_operations[-1][1].append(pack)
1267
self._execute_pack_operations(pack_operations, OptimisingPacker)
1006
1269
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1007
1270
"""Plan a pack operation.
1095
1340
inv_index = self._make_index(name, '.iix')
1096
1341
txt_index = self._make_index(name, '.tix')
1097
1342
sig_index = self._make_index(name, '.six')
1098
if self.chk_index is not None:
1099
chk_index = self._make_index(name, '.cix', is_chk=True)
1102
1343
result = ExistingPack(self._pack_transport, name, rev_index,
1103
inv_index, txt_index, sig_index, chk_index)
1344
inv_index, txt_index, sig_index)
1104
1345
self.add_pack_to_memory(result)
1107
def _resume_pack(self, name):
1108
"""Get a suspended Pack object by name.
1110
:param name: The name of the pack - e.g. '123456'
1111
:return: A Pack object.
1113
if not re.match('[a-f0-9]{32}', name):
1114
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1116
raise errors.UnresumableWriteGroup(
1117
self.repo, [name], 'Malformed write group token')
1119
rev_index = self._make_index(name, '.rix', resume=True)
1120
inv_index = self._make_index(name, '.iix', resume=True)
1121
txt_index = self._make_index(name, '.tix', resume=True)
1122
sig_index = self._make_index(name, '.six', resume=True)
1123
if self.chk_index is not None:
1124
chk_index = self._make_index(name, '.cix', resume=True,
1128
result = self.resumed_pack_factory(name, rev_index, inv_index,
1129
txt_index, sig_index, self._upload_transport,
1130
self._pack_transport, self._index_transport, self,
1131
chk_index=chk_index)
1132
except errors.NoSuchFile, e:
1133
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1134
self.add_pack_to_memory(result)
1135
self._resumed_packs.append(result)
1138
1348
def allocate(self, a_new_pack):
1139
1349
"""Allocate name in the list of packs.
1294
1473
self._packs_by_name = {}
1295
1474
self._packs_at_load = None
1476
def _make_index_map(self, index_suffix):
1477
"""Return information on existing indices.
1479
:param suffix: Index suffix added to pack name.
1481
:returns: (pack_map, indices) where indices is a list of GraphIndex
1482
objects, and pack_map is a mapping from those objects to the
1483
pack tuple they describe.
1485
# TODO: stop using this; it creates new indices unnecessarily.
1486
self.ensure_loaded()
1487
suffix_map = {'.rix': 'revision_index',
1488
'.six': 'signature_index',
1489
'.iix': 'inventory_index',
1490
'.tix': 'text_index',
1492
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1493
suffix_map[index_suffix])
1495
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1496
"""Convert a list of packs to an index pack map and index list.
1498
:param packs: The packs list to process.
1499
:param index_attribute: The attribute that the desired index is found
1501
:return: A tuple (map, list) where map contains the dict from
1502
index:pack_tuple, and lsit contains the indices in the same order
1508
index = getattr(pack, index_attribute)
1509
indices.append(index)
1510
pack_map[index] = (pack.pack_transport, pack.file_name())
1511
return pack_map, indices
1513
def _index_contents(self, pack_map, key_filter=None):
1514
"""Get an iterable of the index contents from a pack_map.
1516
:param pack_map: A map from indices to pack details.
1517
:param key_filter: An optional filter to limit the
1520
indices = [index for index in pack_map.iterkeys()]
1521
all_index = CombinedGraphIndex(indices)
1522
if key_filter is None:
1523
return all_index.iter_all_entries()
1525
return all_index.iter_entries(key_filter)
1297
1527
def _unlock_names(self):
1298
1528
"""Release the mutex around the pack-names index."""
1299
1529
self.repo.control_files.unlock()
1301
def _diff_pack_names(self):
1302
"""Read the pack names from disk, and compare it to the one in memory.
1304
:return: (disk_nodes, deleted_nodes, new_nodes)
1305
disk_nodes The final set of nodes that should be referenced
1306
deleted_nodes Nodes which have been removed from when we started
1307
new_nodes Nodes that are newly introduced
1309
# load the disk nodes across
1311
for index, key, value in self._iter_disk_pack_index():
1312
disk_nodes.add((key, value))
1313
orig_disk_nodes = set(disk_nodes)
1315
# do a two-way diff against our original content
1316
current_nodes = set()
1317
for name, sizes in self._names.iteritems():
1319
((name, ), ' '.join(str(size) for size in sizes)))
1321
# Packs no longer present in the repository, which were present when we
1322
# locked the repository
1323
deleted_nodes = self._packs_at_load - current_nodes
1324
# Packs which this process is adding
1325
new_nodes = current_nodes - self._packs_at_load
1327
# Update the disk_nodes set to include the ones we are adding, and
1328
# remove the ones which were removed by someone else
1329
disk_nodes.difference_update(deleted_nodes)
1330
disk_nodes.update(new_nodes)
1332
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1334
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1335
"""Given the correct set of pack files, update our saved info.
1337
:return: (removed, added, modified)
1338
removed pack names removed from self._names
1339
added pack names added to self._names
1340
modified pack names that had changed value
1345
## self._packs_at_load = disk_nodes
1531
def _save_pack_names(self, clear_obsolete_packs=False):
1532
"""Save the list of packs.
1534
This will take out the mutex around the pack names list for the
1535
duration of the method call. If concurrent updates have been made, a
1536
three-way merge between the current list and the current in memory list
1539
:param clear_obsolete_packs: If True, clear out the contents of the
1540
obsolete_packs directory.
1544
builder = GraphIndexBuilder()
1545
# load the disk nodes across
1547
for index, key, value in self._iter_disk_pack_index():
1548
disk_nodes.add((key, value))
1549
# do a two-way diff against our original content
1550
current_nodes = set()
1551
for name, sizes in self._names.iteritems():
1553
((name, ), ' '.join(str(size) for size in sizes)))
1554
deleted_nodes = self._packs_at_load - current_nodes
1555
new_nodes = current_nodes - self._packs_at_load
1556
disk_nodes.difference_update(deleted_nodes)
1557
disk_nodes.update(new_nodes)
1558
# TODO: handle same-name, index-size-changes here -
1559
# e.g. use the value from disk, not ours, *unless* we're the one
1561
for key, value in disk_nodes:
1562
builder.add_node(key, value)
1563
self.transport.put_file('pack-names', builder.finish(),
1564
mode=self.repo.bzrdir._get_file_mode())
1565
# move the baseline forward
1566
self._packs_at_load = disk_nodes
1567
if clear_obsolete_packs:
1568
self._clear_obsolete_packs()
1570
self._unlock_names()
1571
# synchronise the memory packs list with what we just wrote:
1346
1572
new_names = dict(disk_nodes)
1347
1573
# drop no longer present nodes
1348
1574
for pack in self.all_packs():
1349
1575
if (pack.name,) not in new_names:
1350
removed.append(pack.name)
1351
1576
self._remove_pack_from_memory(pack)
1352
1577
# add new nodes/refresh existing ones
1353
1578
for key, value in disk_nodes:
1363
1588
# disk index because the set values are the same, unless
1364
1589
# the only index shows up as deleted by the set difference
1365
1590
# - which it may. Until there is a specific test for this,
1366
# assume it's broken. RBC 20071017.
1591
# assume its broken. RBC 20071017.
1367
1592
self._remove_pack_from_memory(self.get_pack_by_name(name))
1368
1593
self._names[name] = sizes
1369
1594
self.get_pack_by_name(name)
1370
modified.append(name)
1373
1597
self._names[name] = sizes
1374
1598
self.get_pack_by_name(name)
1376
return removed, added, modified
1378
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1379
"""Save the list of packs.
1381
This will take out the mutex around the pack names list for the
1382
duration of the method call. If concurrent updates have been made, a
1383
three-way merge between the current list and the current in memory list
1386
:param clear_obsolete_packs: If True, clear out the contents of the
1387
obsolete_packs directory.
1388
:param obsolete_packs: Packs that are obsolete once the new pack-names
1389
file has been written.
1390
:return: A list of the names saved that were not previously on disk.
1392
already_obsolete = []
1395
builder = self._index_builder_class()
1396
(disk_nodes, deleted_nodes, new_nodes,
1397
orig_disk_nodes) = self._diff_pack_names()
1398
# TODO: handle same-name, index-size-changes here -
1399
# e.g. use the value from disk, not ours, *unless* we're the one
1401
for key, value in disk_nodes:
1402
builder.add_node(key, value)
1403
self.transport.put_file('pack-names', builder.finish(),
1404
mode=self.repo.bzrdir._get_file_mode())
1405
self._packs_at_load = disk_nodes
1406
if clear_obsolete_packs:
1409
to_preserve = set([o.name for o in obsolete_packs])
1410
already_obsolete = self._clear_obsolete_packs(to_preserve)
1412
self._unlock_names()
1413
# synchronise the memory packs list with what we just wrote:
1414
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1416
# TODO: We could add one more condition here. "if o.name not in
1417
# orig_disk_nodes and o != the new_pack we haven't written to
1418
# disk yet. However, the new pack object is not easily
1419
# accessible here (it would have to be passed through the
1420
# autopacking code, etc.)
1421
obsolete_packs = [o for o in obsolete_packs
1422
if o.name not in already_obsolete]
1423
self._obsolete_packs(obsolete_packs)
1424
return [new_node[0][0] for new_node in new_nodes]
1426
def reload_pack_names(self):
1427
"""Sync our pack listing with what is present in the repository.
1429
This should be called when we find out that something we thought was
1430
present is now missing. This happens when another process re-packs the
1433
:return: True if the in-memory list of packs has been altered at all.
1435
# The ensure_loaded call is to handle the case where the first call
1436
# made involving the collection was to reload_pack_names, where we
1437
# don't have a view of disk contents. It's a bit of a bandaid, and
1438
# causes two reads of pack-names, but it's a rare corner case not
1439
# struck with regular push/pull etc.
1440
first_read = self.ensure_loaded()
1443
# out the new value.
1444
(disk_nodes, deleted_nodes, new_nodes,
1445
orig_disk_nodes) = self._diff_pack_names()
1446
# _packs_at_load is meant to be the explicit list of names in
1447
# 'pack-names' at then start. As such, it should not contain any
1448
# pending names that haven't been written out yet.
1449
self._packs_at_load = orig_disk_nodes
1451
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1452
if removed or added or modified:
1456
def _restart_autopack(self):
1457
"""Reload the pack names list, and restart the autopack code."""
1458
if not self.reload_pack_names():
1459
# Re-raise the original exception, because something went missing
1460
# and a restart didn't find it
1462
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1464
def _restart_pack_operations(self):
1465
"""Reload the pack names list, and restart the autopack code."""
1466
if not self.reload_pack_names():
1467
# Re-raise the original exception, because something went missing
1468
# and a restart didn't find it
1470
raise RetryPackOperations(self.repo, False, sys.exc_info())
1472
def _clear_obsolete_packs(self, preserve=None):
1600
def _clear_obsolete_packs(self):
1473
1601
"""Delete everything from the obsolete-packs directory.
1475
:return: A list of pack identifiers (the filename without '.pack') that
1476
were found in obsolete_packs.
1479
1603
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1480
if preserve is None:
1482
1604
for filename in obsolete_pack_transport.list_dir('.'):
1483
name, ext = osutils.splitext(filename)
1486
if name in preserve:
1489
1606
obsolete_pack_transport.delete(filename)
1490
1607
except (errors.PathError, errors.TransportError), e:
1491
warning("couldn't delete obsolete pack, skipping it:\n%s"
1608
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
1495
1610
def _start_write_group(self):
1496
1611
# Do not permit preparation for writing if we're not in a 'write lock'.
1497
1612
if not self.repo.is_write_locked():
1498
1613
raise errors.NotWriteLocked(self)
1499
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1614
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1615
self._pack_transport, upload_suffix='.pack',
1500
1616
file_mode=self.repo.bzrdir._get_file_mode())
1501
1617
# allow writing: queue writes to a new index
1502
1618
self.revision_index.add_writable_index(self._new_pack.revision_index,
1523
1633
# FIXME: just drop the transient index.
1524
1634
# forget what names there are
1525
1635
if self._new_pack is not None:
1526
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
1527
operation.add_cleanup(setattr, self, '_new_pack', None)
1528
# If we aborted while in the middle of finishing the write
1529
# group, _remove_pack_indices could fail because the indexes are
1530
# already gone. But they're not there we shouldn't fail in this
1531
# case, so we pass ignore_missing=True.
1532
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
1533
ignore_missing=True)
1534
operation.run_simple()
1535
for resumed_pack in self._resumed_packs:
1536
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
1537
# See comment in previous finally block.
1538
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
1539
ignore_missing=True)
1540
operation.run_simple()
1541
del self._resumed_packs[:]
1543
def _remove_resumed_pack_indices(self):
1544
for resumed_pack in self._resumed_packs:
1545
self._remove_pack_indices(resumed_pack)
1546
del self._resumed_packs[:]
1548
def _check_new_inventories(self):
1549
"""Detect missing inventories in this write group.
1551
:returns: list of strs, summarising any problems found. If the list is
1552
empty no problems were found.
1554
# The base implementation does no checks. GCRepositoryPackCollection
1636
self._new_pack.abort()
1637
self._remove_pack_indices(self._new_pack)
1638
self._new_pack = None
1639
self.repo._text_knit = None
1558
1641
def _commit_write_group(self):
1560
for prefix, versioned_file in (
1561
('revisions', self.repo.revisions),
1562
('inventories', self.repo.inventories),
1563
('texts', self.repo.texts),
1564
('signatures', self.repo.signatures),
1566
missing = versioned_file.get_missing_compression_parent_keys()
1567
all_missing.update([(prefix,) + key for key in missing])
1569
raise errors.BzrCheckError(
1570
"Repository %s has missing compression parent(s) %r "
1571
% (self.repo, sorted(all_missing)))
1572
problems = self._check_new_inventories()
1574
problems_summary = '\n'.join(problems)
1575
raise errors.BzrCheckError(
1576
"Cannot add revision(s) to repository: " + problems_summary)
1577
1642
self._remove_pack_indices(self._new_pack)
1578
any_new_content = False
1579
1643
if self._new_pack.data_inserted():
1580
1644
# get all the data to disk and read to use
1581
1645
self._new_pack.finish()
1582
1646
self.allocate(self._new_pack)
1583
1647
self._new_pack = None
1584
any_new_content = True
1586
self._new_pack.abort()
1587
self._new_pack = None
1588
for resumed_pack in self._resumed_packs:
1589
# XXX: this is a pretty ugly way to turn the resumed pack into a
1590
# properly committed pack.
1591
self._names[resumed_pack.name] = None
1592
self._remove_pack_from_memory(resumed_pack)
1593
resumed_pack.finish()
1594
self.allocate(resumed_pack)
1595
any_new_content = True
1596
del self._resumed_packs[:]
1598
result = self.autopack()
1648
if not self.autopack():
1600
1649
# when autopack takes no steps, the names list is still
1602
return self._save_pack_names()
1606
def _suspend_write_group(self):
1607
tokens = [pack.name for pack in self._resumed_packs]
1608
self._remove_pack_indices(self._new_pack)
1609
if self._new_pack.data_inserted():
1610
# get all the data to disk and read to use
1611
self._new_pack.finish(suspend=True)
1612
tokens.append(self._new_pack.name)
1613
self._new_pack = None
1651
self._save_pack_names()
1615
1653
self._new_pack.abort()
1616
1654
self._new_pack = None
1617
self._remove_resumed_pack_indices()
1620
def _resume_write_group(self, tokens):
1621
for token in tokens:
1622
self._resume_pack(token)
1625
class PackRepository(MetaDirRepository):
1655
self.repo._text_knit = None
1658
class KnitPackRepository(KnitRepository):
1626
1659
"""Repository with knit objects stored inside pack containers.
1628
1661
The layering for a KnitPackRepository is:
1630
1663
Graph | HPSS | Repository public layer |
1631
1664
===================================================
1632
1665
Tuple based apis below, string based, and key based apis above
1633
1666
---------------------------------------------------
1635
1668
Provides .texts, .revisions etc
1636
1669
This adapts the N-tuple keys to physical knit records which only have a
1637
1670
single string identifier (for historical reasons), which in older formats
1644
1677
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1645
1678
semantic value.
1646
1679
===================================================
1650
# These attributes are inherited from the Repository base class. Setting
1651
# them to None ensures that if the constructor is changed to not initialize
1652
# them, or a subclass fails to call the constructor, that an error will
1653
# occur rather than the system working but generating incorrect data.
1654
_commit_builder_class = None
1657
1683
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1659
MetaDirRepository.__init__(self, _format, a_bzrdir, control_files)
1660
self._commit_builder_class = _commit_builder_class
1661
self._serializer = _serializer
1685
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
1686
_commit_builder_class, _serializer)
1687
index_transport = self._transport.clone('indices')
1688
self._pack_collection = RepositoryPackCollection(self, self._transport,
1690
self._transport.clone('upload'),
1691
self._transport.clone('packs'))
1692
self.inventories = KnitVersionedFiles(
1693
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
1694
add_callback=self._pack_collection.inventory_index.add_callback,
1695
deltas=True, parents=True, is_locked=self.is_locked),
1696
data_access=self._pack_collection.inventory_index.data_access,
1697
max_delta_chain=200)
1698
self.revisions = KnitVersionedFiles(
1699
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
1700
add_callback=self._pack_collection.revision_index.add_callback,
1701
deltas=False, parents=True, is_locked=self.is_locked),
1702
data_access=self._pack_collection.revision_index.data_access,
1704
self.signatures = KnitVersionedFiles(
1705
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
1706
add_callback=self._pack_collection.signature_index.add_callback,
1707
deltas=False, parents=False, is_locked=self.is_locked),
1708
data_access=self._pack_collection.signature_index.data_access,
1710
self.texts = KnitVersionedFiles(
1711
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
1712
add_callback=self._pack_collection.text_index.add_callback,
1713
deltas=True, parents=True, is_locked=self.is_locked),
1714
data_access=self._pack_collection.text_index.data_access,
1715
max_delta_chain=200)
1716
# True when the repository object is 'write locked' (as opposed to the
1717
# physical lock only taken out around changes to the pack-names list.)
1718
# Another way to represent this would be a decorator around the control
1719
# files object that presents logical locks as physical ones - if this
1720
# gets ugly consider that alternative design. RBC 20071011
1721
self._write_lock_count = 0
1722
self._transaction = None
1724
self._reconcile_does_inventory_gc = True
1662
1725
self._reconcile_fixes_text_parents = True
1665
def _all_revision_ids(self):
1666
"""See Repository.all_revision_ids()."""
1667
return [key[0] for key in self.revisions.keys()]
1726
self._reconcile_backsup_inventory = False
1669
1728
def _abort_write_group(self):
1670
self.revisions._index._key_dependencies.clear()
1671
1729
self._pack_collection._abort_write_group()
1731
def _find_inconsistent_revision_parents(self):
1732
"""Find revisions with incorrectly cached parents.
1734
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1735
parents-in-revision).
1737
if not self.is_locked():
1738
raise errors.ObjectNotLocked(self)
1739
pb = ui.ui_factory.nested_progress_bar()
1742
revision_nodes = self._pack_collection.revision_index \
1743
.combined_index.iter_all_entries()
1744
index_positions = []
1745
# Get the cached index values for all revisions, and also the location
1746
# in each index of the revision text so we can perform linear IO.
1747
for index, key, value, refs in revision_nodes:
1748
pos, length = value[1:].split(' ')
1749
index_positions.append((index, int(pos), key[0],
1750
tuple(parent[0] for parent in refs[0])))
1751
pb.update("Reading revision index.", 0, 0)
1752
index_positions.sort()
1753
batch_count = len(index_positions) / 1000 + 1
1754
pb.update("Checking cached revision graph.", 0, batch_count)
1755
for offset in xrange(batch_count):
1756
pb.update("Checking cached revision graph.", offset)
1757
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1760
rev_ids = [item[2] for item in to_query]
1761
revs = self.get_revisions(rev_ids)
1762
for revision, item in zip(revs, to_query):
1763
index_parents = item[3]
1764
rev_parents = tuple(revision.parent_ids)
1765
if index_parents != rev_parents:
1766
result.append((revision.revision_id, index_parents, rev_parents))
1771
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1772
def get_parents(self, revision_ids):
1773
"""See graph._StackedParentsProvider.get_parents."""
1774
parent_map = self.get_parent_map(revision_ids)
1775
return [parent_map.get(r, None) for r in revision_ids]
1777
def get_parent_map(self, keys):
1778
"""See graph._StackedParentsProvider.get_parent_map
1780
This implementation accesses the combined revision index to provide
1783
self._pack_collection.ensure_loaded()
1784
index = self._pack_collection.revision_index.combined_index
1787
raise ValueError('get_parent_map(None) is not valid')
1788
if _mod_revision.NULL_REVISION in keys:
1789
keys.discard(_mod_revision.NULL_REVISION)
1790
found_parents = {_mod_revision.NULL_REVISION:()}
1793
search_keys = set((revision_id,) for revision_id in keys)
1794
for index, key, value, refs in index.iter_entries(search_keys):
1797
parents = (_mod_revision.NULL_REVISION,)
1799
parents = tuple(parent[0] for parent in parents)
1800
found_parents[key[0]] = parents
1801
return found_parents
1803
def has_revisions(self, revision_ids):
1804
"""See Repository.has_revisions()."""
1805
revision_ids = set(revision_ids)
1806
result = revision_ids.intersection(
1807
set([None, _mod_revision.NULL_REVISION]))
1808
revision_ids.difference_update(result)
1809
index = self._pack_collection.revision_index.combined_index
1810
keys = [(revision_id,) for revision_id in revision_ids]
1811
result.update(node[1][0] for node in index.iter_entries(keys))
1673
1814
def _make_parents_provider(self):
1674
1815
return graph.CachingParentsProvider(self)
1676
1817
def _refresh_data(self):
1677
if not self.is_locked():
1679
self._pack_collection.reload_pack_names()
1818
if self._write_lock_count == 1 or (
1819
self.control_files._lock_count == 1 and
1820
self.control_files._lock_mode == 'r'):
1821
# forget what names there are
1822
self._pack_collection.reset()
1823
# XXX: Better to do an in-memory merge when acquiring a new lock -
1824
# factor out code from _save_pack_names.
1825
self._pack_collection.ensure_loaded()
1681
1827
def _start_write_group(self):
1682
1828
self._pack_collection._start_write_group()
1684
1830
def _commit_write_group(self):
1685
hint = self._pack_collection._commit_write_group()
1686
self.revisions._index._key_dependencies.clear()
1689
def suspend_write_group(self):
1690
# XXX check self._write_group is self.get_transaction()?
1691
tokens = self._pack_collection._suspend_write_group()
1692
self.revisions._index._key_dependencies.clear()
1693
self._write_group = None
1696
def _resume_write_group(self, tokens):
1697
self._start_write_group()
1699
self._pack_collection._resume_write_group(tokens)
1700
except errors.UnresumableWriteGroup:
1701
self._abort_write_group()
1703
for pack in self._pack_collection._resumed_packs:
1704
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1831
return self._pack_collection._commit_write_group()
1706
1833
def get_transaction(self):
1707
1834
if self._write_lock_count:
1887
1968
_serializer=self._serializer)
1890
class RetryPackOperations(errors.RetryWithNewPacks):
1891
"""Raised when we are packing and we find a missing file.
1893
Meant as a signaling exception, to tell the RepositoryPackCollection.pack
1894
code it should try again.
1897
internal_error = True
1899
_fmt = ("Pack files have changed, reload and try pack again."
1900
" context: %(context)s %(orig_error)s")
1903
class _DirectPackAccess(object):
1904
"""Access to data in one or more packs with less translation."""
1906
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
1907
"""Create a _DirectPackAccess object.
1909
:param index_to_packs: A dict mapping index objects to the transport
1910
and file names for obtaining data.
1911
:param reload_func: A function to call if we determine that the pack
1912
files have moved and we need to reload our caches. See
1913
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
1915
self._container_writer = None
1916
self._write_index = None
1917
self._indices = index_to_packs
1918
self._reload_func = reload_func
1919
self._flush_func = flush_func
1921
def add_raw_records(self, key_sizes, raw_data):
1922
"""Add raw knit bytes to a storage area.
1924
The data is spooled to the container writer in one bytes-record per
1927
:param sizes: An iterable of tuples containing the key and size of each
1929
:param raw_data: A bytestring containing the data.
1930
:return: A list of memos to retrieve the record later. Each memo is an
1931
opaque index memo. For _DirectPackAccess the memo is (index, pos,
1932
length), where the index field is the write_index object supplied
1933
to the PackAccess object.
1935
if type(raw_data) is not str:
1936
raise AssertionError(
1937
'data must be plain bytes was %s' % type(raw_data))
1940
for key, size in key_sizes:
1941
p_offset, p_length = self._container_writer.add_bytes_record(
1942
raw_data[offset:offset+size], [])
1944
result.append((self._write_index, p_offset, p_length))
1948
"""Flush pending writes on this access object.
1950
This will flush any buffered writes to a NewPack.
1952
if self._flush_func is not None:
1955
def get_raw_records(self, memos_for_retrieval):
1956
"""Get the raw bytes for a records.
1958
:param memos_for_retrieval: An iterable containing the (index, pos,
1959
length) memo for retrieving the bytes. The Pack access method
1960
looks up the pack to use for a given record in its index_to_pack
1962
:return: An iterator over the bytes of the records.
1964
# first pass, group into same-index requests
1966
current_index = None
1967
for (index, offset, length) in memos_for_retrieval:
1968
if current_index == index:
1969
current_list.append((offset, length))
1971
if current_index is not None:
1972
request_lists.append((current_index, current_list))
1973
current_index = index
1974
current_list = [(offset, length)]
1975
# handle the last entry
1976
if current_index is not None:
1977
request_lists.append((current_index, current_list))
1978
for index, offsets in request_lists:
1980
transport, path = self._indices[index]
1982
# A KeyError here indicates that someone has triggered an index
1983
# reload, and this index has gone missing, we need to start
1985
if self._reload_func is None:
1986
# If we don't have a _reload_func there is nothing that can
1989
raise errors.RetryWithNewPacks(index,
1990
reload_occurred=True,
1991
exc_info=sys.exc_info())
1993
reader = pack.make_readv_reader(transport, path, offsets)
1994
for names, read_func in reader.iter_records():
1995
yield read_func(None)
1996
except errors.NoSuchFile:
1997
# A NoSuchFile error indicates that a pack file has gone
1998
# missing on disk, we need to trigger a reload, and start over.
1999
if self._reload_func is None:
2001
raise errors.RetryWithNewPacks(transport.abspath(path),
2002
reload_occurred=False,
2003
exc_info=sys.exc_info())
2005
def set_writer(self, writer, index, transport_packname):
2006
"""Set a writer to use for adding data."""
2007
if index is not None:
2008
self._indices[index] = transport_packname
2009
self._container_writer = writer
2010
self._write_index = index
2012
def reload_or_raise(self, retry_exc):
2013
"""Try calling the reload function, or re-raise the original exception.
2015
This should be called after _DirectPackAccess raises a
2016
RetryWithNewPacks exception. This function will handle the common logic
2017
of determining when the error is fatal versus being temporary.
2018
It will also make sure that the original exception is raised, rather
2019
than the RetryWithNewPacks exception.
2021
If this function returns, then the calling function should retry
2022
whatever operation was being performed. Otherwise an exception will
2025
:param retry_exc: A RetryWithNewPacks exception.
2028
if self._reload_func is None:
2030
elif not self._reload_func():
2031
# The reload claimed that nothing changed
2032
if not retry_exc.reload_occurred:
2033
# If there wasn't an earlier reload, then we really were
2034
# expecting to find changes. We didn't find them, so this is a
2038
exc_class, exc_value, exc_traceback = retry_exc.exc_info
2039
raise exc_class, exc_value, exc_traceback
1971
class RepositoryFormatKnitPack1(RepositoryFormatPack):
1972
"""A no-subtrees parameterized Pack repository.
1974
This format was introduced in 0.92.
1977
repository_class = KnitPackRepository
1978
_commit_builder_class = PackCommitBuilder
1979
_serializer = xml5.serializer_v5
1981
def _get_matching_bzrdir(self):
1982
return bzrdir.format_registry.make_bzrdir('pack-0.92')
1984
def _ignore_setting_bzrdir(self, format):
1987
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1989
def get_format_string(self):
1990
"""See RepositoryFormat.get_format_string()."""
1991
return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
1993
def get_format_description(self):
1994
"""See RepositoryFormat.get_format_description()."""
1995
return "Packs containing knits without subtree support"
1997
def check_conversion_target(self, target_format):
2001
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2002
"""A subtrees parameterized Pack repository.
2004
This repository format uses the xml7 serializer to get:
2005
- support for recording full info about the tree root
2006
- support for recording tree-references
2008
This format was introduced in 0.92.
2011
repository_class = KnitPackRepository
2012
_commit_builder_class = PackRootCommitBuilder
2013
rich_root_data = True
2014
supports_tree_reference = True
2015
_serializer = xml7.serializer_v7
2017
def _get_matching_bzrdir(self):
2018
return bzrdir.format_registry.make_bzrdir(
2019
'pack-0.92-subtree')
2021
def _ignore_setting_bzrdir(self, format):
2024
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2026
def check_conversion_target(self, target_format):
2027
if not target_format.rich_root_data:
2028
raise errors.BadConversionTarget(
2029
'Does not support rich root data.', target_format)
2030
if not getattr(target_format, 'supports_tree_reference', False):
2031
raise errors.BadConversionTarget(
2032
'Does not support nested trees', target_format)
2034
def get_format_string(self):
2035
"""See RepositoryFormat.get_format_string()."""
2036
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2038
def get_format_description(self):
2039
"""See RepositoryFormat.get_format_description()."""
2040
return "Packs containing knits with subtree support\n"
2043
class RepositoryFormatKnitPack4(RepositoryFormatPack):
2044
"""A rich-root, no subtrees parameterized Pack repository.
2046
This repository format uses the xml6 serializer to get:
2047
- support for recording full info about the tree root
2049
This format was introduced in 1.0.
2052
repository_class = KnitPackRepository
2053
_commit_builder_class = PackRootCommitBuilder
2054
rich_root_data = True
2055
supports_tree_reference = False
2056
_serializer = xml6.serializer_v6
2058
def _get_matching_bzrdir(self):
2059
return bzrdir.format_registry.make_bzrdir(
2062
def _ignore_setting_bzrdir(self, format):
2065
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2067
def check_conversion_target(self, target_format):
2068
if not target_format.rich_root_data:
2069
raise errors.BadConversionTarget(
2070
'Does not support rich root data.', target_format)
2072
def get_format_string(self):
2073
"""See RepositoryFormat.get_format_string()."""
2074
return ("Bazaar pack repository format 1 with rich root"
2075
" (needs bzr 1.0)\n")
2077
def get_format_description(self):
2078
"""See RepositoryFormat.get_format_description()."""
2079
return "Packs containing knits with rich root support\n"
2082
class RepositoryFormatPackDevelopment0(RepositoryFormatPack):
2083
"""A no-subtrees development repository.
2085
This format should be retained until the second release after bzr 1.0.
2087
No changes to the disk behaviour from pack-0.92.
2090
repository_class = KnitPackRepository
2091
_commit_builder_class = PackCommitBuilder
2092
_serializer = xml5.serializer_v5
2094
def _get_matching_bzrdir(self):
2095
return bzrdir.format_registry.make_bzrdir('development0')
2097
def _ignore_setting_bzrdir(self, format):
2100
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2102
def get_format_string(self):
2103
"""See RepositoryFormat.get_format_string()."""
2104
return "Bazaar development format 0 (needs bzr.dev from before 1.3)\n"
2106
def get_format_description(self):
2107
"""See RepositoryFormat.get_format_description()."""
2108
return ("Development repository format, currently the same as "
2111
def check_conversion_target(self, target_format):
2115
class RepositoryFormatPackDevelopment0Subtree(RepositoryFormatPack):
2116
"""A subtrees development repository.
2118
This format should be retained until the second release after bzr 1.0.
2120
No changes to the disk behaviour from pack-0.92-subtree.
2123
repository_class = KnitPackRepository
2124
_commit_builder_class = PackRootCommitBuilder
2125
rich_root_data = True
2126
supports_tree_reference = True
2127
_serializer = xml7.serializer_v7
2129
def _get_matching_bzrdir(self):
2130
return bzrdir.format_registry.make_bzrdir(
2131
'development0-subtree')
2133
def _ignore_setting_bzrdir(self, format):
2136
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2138
def check_conversion_target(self, target_format):
2139
if not target_format.rich_root_data:
2140
raise errors.BadConversionTarget(
2141
'Does not support rich root data.', target_format)
2142
if not getattr(target_format, 'supports_tree_reference', False):
2143
raise errors.BadConversionTarget(
2144
'Does not support nested trees', target_format)
2146
def get_format_string(self):
2147
"""See RepositoryFormat.get_format_string()."""
2148
return ("Bazaar development format 0 with subtree support "
2149
"(needs bzr.dev from before 1.3)\n")
2151
def get_format_description(self):
2152
"""See RepositoryFormat.get_format_description()."""
2153
return ("Development repository format, currently the same as "
2154
"pack-0.92-subtree\n")