200
256
return not self.__eq__(other)
202
258
def __repr__(self):
203
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
204
id(self), self.pack_transport, self.name)
259
return "<%s.%s object at 0x%x, %s, %s" % (
260
self.__class__.__module__, self.__class__.__name__, id(self),
261
self.pack_transport, self.name)
264
class ResumedPack(ExistingPack):
266
def __init__(self, name, revision_index, inventory_index, text_index,
267
signature_index, upload_transport, pack_transport, index_transport,
268
pack_collection, chk_index=None):
269
"""Create a ResumedPack object."""
270
ExistingPack.__init__(self, pack_transport, name, revision_index,
271
inventory_index, text_index, signature_index,
273
self.upload_transport = upload_transport
274
self.index_transport = index_transport
275
self.index_sizes = [None, None, None, None]
277
('revision', revision_index),
278
('inventory', inventory_index),
279
('text', text_index),
280
('signature', signature_index),
282
if chk_index is not None:
283
indices.append(('chk', chk_index))
284
self.index_sizes.append(None)
285
for index_type, index in indices:
286
offset = self.index_offset(index_type)
287
self.index_sizes[offset] = index._size
288
self.index_class = pack_collection._index_class
289
self._pack_collection = pack_collection
290
self._state = 'resumed'
291
# XXX: perhaps check that the .pack file exists?
293
def access_tuple(self):
294
if self._state == 'finished':
295
return Pack.access_tuple(self)
296
elif self._state == 'resumed':
297
return self.upload_transport, self.file_name()
299
raise AssertionError(self._state)
302
self.upload_transport.delete(self.file_name())
303
indices = [self.revision_index, self.inventory_index, self.text_index,
304
self.signature_index]
305
if self.chk_index is not None:
306
indices.append(self.chk_index)
307
for index in indices:
308
index._transport.delete(index._name)
311
self._check_references()
312
index_types = ['revision', 'inventory', 'text', 'signature']
313
if self.chk_index is not None:
314
index_types.append('chk')
315
for index_type in index_types:
316
old_name = self.index_name(index_type, self.name)
317
new_name = '../indices/' + old_name
318
self.upload_transport.move(old_name, new_name)
319
self._replace_index_with_readonly(index_type)
320
new_name = '../packs/' + self.file_name()
321
self.upload_transport.move(self.file_name(), new_name)
322
self._state = 'finished'
324
def _get_external_refs(self, index):
325
"""Return compression parents for this index that are not present.
327
This returns any compression parents that are referenced by this index,
328
which are not contained *in* this index. They may be present elsewhere.
330
return index.external_references(1)
207
333
class NewPack(Pack):
208
334
"""An in memory proxy for a pack which is being created."""
210
# A map of index 'type' to the file extension and position in the
212
index_definitions = {
213
'revision': ('.rix', 0),
214
'inventory': ('.iix', 1),
216
'signature': ('.six', 3),
219
336
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
220
337
"""Create a NewPack instance.
730
741
Sets self._text_filter appropriately.
732
# select inventory keys
733
inv_keys = self._revision_keys # currently the same keyspace, and note that
734
# querying for keys here could introduce a bug where an inventory item
735
# is missed, so do not change it to query separately without cross
736
# checking like the text key check below.
737
inventory_index_map, inventory_indices = self._pack_map_and_index_list(
739
inv_nodes = self._index_contents(inventory_indices, inv_keys)
740
# copy inventory keys and adjust values
741
# XXX: Should be a helper function to allow different inv representation
743
self.pb.update("Copying inventory texts", 2)
744
total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes)
745
# Only grab the output lines if we will be processing them
746
output_lines = bool(self.revision_ids)
747
inv_lines = self._copy_nodes_graph(inventory_index_map,
748
self.new_pack._writer, self.new_pack.inventory_index,
749
readv_group_iter, total_items, output_lines=output_lines)
750
if self.revision_ids:
751
self._process_inventory_lines(inv_lines)
753
# eat the iterator to cause it to execute.
755
self._text_filter = None
756
if 'pack' in debug.debug_flags:
757
mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs',
758
time.ctime(), self._pack_collection._upload_transport.base,
759
self.new_pack.random_name,
760
self.new_pack.inventory_index.key_count(),
761
time.time() - self.new_pack.start_time)
743
raise NotImplementedError(self._copy_inventory_texts)
763
745
def _copy_text_texts(self):
765
text_index_map, text_nodes = self._get_text_nodes()
766
if self._text_filter is not None:
767
# We could return the keys copied as part of the return value from
768
# _copy_nodes_graph but this doesn't work all that well with the
769
# need to get line output too, so we check separately, and as we're
770
# going to buffer everything anyway, we check beforehand, which
771
# saves reading knit data over the wire when we know there are
773
text_nodes = set(text_nodes)
774
present_text_keys = set(_node[1] for _node in text_nodes)
775
missing_text_keys = set(self._text_filter) - present_text_keys
776
if missing_text_keys:
777
# TODO: raise a specific error that can handle many missing
779
a_missing_key = missing_text_keys.pop()
780
raise errors.RevisionNotPresent(a_missing_key[1],
782
# copy text keys and adjust values
783
self.pb.update("Copying content texts", 3)
784
total_items, readv_group_iter = self._least_readv_node_readv(text_nodes)
785
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
786
self.new_pack.text_index, readv_group_iter, total_items))
787
self._log_copied_texts()
746
raise NotImplementedError(self._copy_text_texts)
789
748
def _create_pack_from_packs(self):
790
self.pb.update("Opening pack", 0, 5)
791
self.new_pack = self.open_pack()
792
new_pack = self.new_pack
793
# buffer data - we won't be reading-back during the pack creation and
794
# this makes a significant difference on sftp pushes.
795
new_pack.set_write_cache_size(1024*1024)
796
if 'pack' in debug.debug_flags:
797
plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name)
798
for a_pack in self.packs]
799
if self.revision_ids is not None:
800
rev_count = len(self.revision_ids)
803
mutter('%s: create_pack: creating pack from source packs: '
804
'%s%s %s revisions wanted %s t=0',
805
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
806
plain_pack_list, rev_count)
807
self._copy_revision_texts()
808
self._copy_inventory_texts()
809
self._copy_text_texts()
810
# select signature keys
811
signature_filter = self._revision_keys # same keyspace
812
signature_index_map, signature_indices = self._pack_map_and_index_list(
814
signature_nodes = self._index_contents(signature_indices,
816
# copy signature keys and adjust values
817
self.pb.update("Copying signature texts", 4)
818
self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer,
819
new_pack.signature_index)
820
if 'pack' in debug.debug_flags:
821
mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs',
822
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
823
new_pack.signature_index.key_count(),
824
time.time() - new_pack.start_time)
825
new_pack._check_references()
826
if not self._use_pack(new_pack):
829
self.pb.update("Finishing pack", 5)
831
self._pack_collection.allocate(new_pack)
834
def _copy_nodes(self, nodes, index_map, writer, write_index):
835
"""Copy knit nodes between packs with no graph references."""
836
pb = ui.ui_factory.nested_progress_bar()
838
return self._do_copy_nodes(nodes, index_map, writer,
843
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
844
# for record verification
845
knit = KnitVersionedFiles(None, None)
846
# plan a readv on each source pack:
848
nodes = sorted(nodes)
849
# how to map this into knit.py - or knit.py into this?
850
# we don't want the typical knit logic, we want grouping by pack
851
# at this point - perhaps a helper library for the following code
852
# duplication points?
854
for index, key, value in nodes:
855
if index not in request_groups:
856
request_groups[index] = []
857
request_groups[index].append((key, value))
859
pb.update("Copied record", record_index, len(nodes))
860
for index, items in request_groups.iteritems():
861
pack_readv_requests = []
862
for key, value in items:
863
# ---- KnitGraphIndex.get_position
864
bits = value[1:].split(' ')
865
offset, length = int(bits[0]), int(bits[1])
866
pack_readv_requests.append((offset, length, (key, value[0])))
867
# linear scan up the pack
868
pack_readv_requests.sort()
870
pack_obj = index_map[index]
871
transport, path = pack_obj.access_tuple()
873
reader = pack.make_readv_reader(transport, path,
874
[offset[0:2] for offset in pack_readv_requests])
875
except errors.NoSuchFile:
876
if self._reload_func is not None:
879
for (names, read_func), (_1, _2, (key, eol_flag)) in \
880
izip(reader.iter_records(), pack_readv_requests):
881
raw_data = read_func(None)
882
# check the header only
883
df, _ = knit._parse_record_header(key, raw_data)
885
pos, size = writer.add_bytes_record(raw_data, names)
886
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
887
pb.update("Copied record", record_index)
890
def _copy_nodes_graph(self, index_map, writer, write_index,
891
readv_group_iter, total_items, output_lines=False):
892
"""Copy knit nodes between packs.
894
:param output_lines: Return lines present in the copied data as
895
an iterator of line,version_id.
897
pb = ui.ui_factory.nested_progress_bar()
899
for result in self._do_copy_nodes_graph(index_map, writer,
900
write_index, output_lines, pb, readv_group_iter, total_items):
903
# Python 2.4 does not permit try:finally: in a generator.
909
def _do_copy_nodes_graph(self, index_map, writer, write_index,
910
output_lines, pb, readv_group_iter, total_items):
911
# for record verification
912
knit = KnitVersionedFiles(None, None)
913
# for line extraction when requested (inventories only)
915
factory = KnitPlainFactory()
917
pb.update("Copied record", record_index, total_items)
918
for index, readv_vector, node_vector in readv_group_iter:
920
pack_obj = index_map[index]
921
transport, path = pack_obj.access_tuple()
923
reader = pack.make_readv_reader(transport, path, readv_vector)
924
except errors.NoSuchFile:
925
if self._reload_func is not None:
928
for (names, read_func), (key, eol_flag, references) in \
929
izip(reader.iter_records(), node_vector):
930
raw_data = read_func(None)
932
# read the entire thing
933
content, _ = knit._parse_record(key[-1], raw_data)
934
if len(references[-1]) == 0:
935
line_iterator = factory.get_fulltext_content(content)
937
line_iterator = factory.get_linedelta_content(content)
938
for line in line_iterator:
941
# check the header only
942
df, _ = knit._parse_record_header(key, raw_data)
944
pos, size = writer.add_bytes_record(raw_data, names)
945
write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references)
946
pb.update("Copied record", record_index)
949
def _get_text_nodes(self):
950
text_index_map, text_indices = self._pack_map_and_index_list(
952
return text_index_map, self._index_contents(text_indices,
955
def _least_readv_node_readv(self, nodes):
956
"""Generate request groups for nodes using the least readv's.
958
:param nodes: An iterable of graph index nodes.
959
:return: Total node count and an iterator of the data needed to perform
960
readvs to obtain the data for nodes. Each item yielded by the
961
iterator is a tuple with:
962
index, readv_vector, node_vector. readv_vector is a list ready to
963
hand to the transport readv method, and node_vector is a list of
964
(key, eol_flag, references) for the the node retrieved by the
965
matching readv_vector.
967
# group by pack so we do one readv per pack
968
nodes = sorted(nodes)
971
for index, key, value, references in nodes:
972
if index not in request_groups:
973
request_groups[index] = []
974
request_groups[index].append((key, value, references))
976
for index, items in request_groups.iteritems():
977
pack_readv_requests = []
978
for key, value, references in items:
979
# ---- KnitGraphIndex.get_position
980
bits = value[1:].split(' ')
981
offset, length = int(bits[0]), int(bits[1])
982
pack_readv_requests.append(
983
((offset, length), (key, value[0], references)))
984
# linear scan up the pack to maximum range combining.
985
pack_readv_requests.sort()
986
# split out the readv and the node data.
987
pack_readv = [readv for readv, node in pack_readv_requests]
988
node_vector = [node for readv, node in pack_readv_requests]
989
result.append((index, pack_readv, node_vector))
749
raise NotImplementedError(self._create_pack_from_packs)
992
751
def _log_copied_texts(self):
993
752
if 'pack' in debug.debug_flags:
1025
765
return new_pack.data_inserted()
1028
class OptimisingPacker(Packer):
1029
"""A packer which spends more time to create better disk layouts."""
1031
def _revision_node_readv(self, revision_nodes):
1032
"""Return the total revisions and the readv's to issue.
1034
This sort places revisions in topological order with the ancestors
1037
:param revision_nodes: The revision index contents for the packs being
1038
incorporated into the new pack.
1039
:return: As per _least_readv_node_readv.
1041
# build an ancestors dict
1044
for index, key, value, references in revision_nodes:
1045
ancestors[key] = references[0]
1046
by_key[key] = (index, value, references)
1047
order = tsort.topo_sort(ancestors)
1049
# Single IO is pathological, but it will work as a starting point.
1051
for key in reversed(order):
1052
index, value, references = by_key[key]
1053
# ---- KnitGraphIndex.get_position
1054
bits = value[1:].split(' ')
1055
offset, length = int(bits[0]), int(bits[1])
1057
(index, [(offset, length)], [(key, value[0], references)]))
1058
# TODO: combine requests in the same index that are in ascending order.
1059
return total, requests
1061
def open_pack(self):
1062
"""Open a pack for the pack we are creating."""
1063
new_pack = super(OptimisingPacker, self).open_pack()
1064
# Turn on the optimization flags for all the index builders.
1065
new_pack.revision_index.set_optimize(for_size=True)
1066
new_pack.inventory_index.set_optimize(for_size=True)
1067
new_pack.text_index.set_optimize(for_size=True)
1068
new_pack.signature_index.set_optimize(for_size=True)
1072
class ReconcilePacker(Packer):
1073
"""A packer which regenerates indices etc as it copies.
1075
This is used by ``bzr reconcile`` to cause parent text pointers to be
1079
def _extra_init(self):
1080
self._data_changed = False
1082
def _process_inventory_lines(self, inv_lines):
1083
"""Generate a text key reference map rather for reconciling with."""
1084
repo = self._pack_collection.repo
1085
refs = repo._find_text_key_references_from_xml_inventory_lines(
1087
self._text_refs = refs
1088
# during reconcile we:
1089
# - convert unreferenced texts to full texts
1090
# - correct texts which reference a text not copied to be full texts
1091
# - copy all others as-is but with corrected parents.
1092
# - so at this point we don't know enough to decide what becomes a full
1094
self._text_filter = None
1096
def _copy_text_texts(self):
1097
"""generate what texts we should have and then copy."""
1098
self.pb.update("Copying content texts", 3)
1099
# we have three major tasks here:
1100
# 1) generate the ideal index
1101
repo = self._pack_collection.repo
1102
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
1103
_1, key, _2, refs in
1104
self.new_pack.revision_index.iter_all_entries()])
1105
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
1106
# 2) generate a text_nodes list that contains all the deltas that can
1107
# be used as-is, with corrected parents.
1110
discarded_nodes = []
1111
NULL_REVISION = _mod_revision.NULL_REVISION
1112
text_index_map, text_nodes = self._get_text_nodes()
1113
for node in text_nodes:
1119
ideal_parents = tuple(ideal_index[node[1]])
1121
discarded_nodes.append(node)
1122
self._data_changed = True
1124
if ideal_parents == (NULL_REVISION,):
1126
if ideal_parents == node[3][0]:
1128
ok_nodes.append(node)
1129
elif ideal_parents[0:1] == node[3][0][0:1]:
1130
# the left most parent is the same, or there are no parents
1131
# today. Either way, we can preserve the representation as
1132
# long as we change the refs to be inserted.
1133
self._data_changed = True
1134
ok_nodes.append((node[0], node[1], node[2],
1135
(ideal_parents, node[3][1])))
1136
self._data_changed = True
1138
# Reinsert this text completely
1139
bad_texts.append((node[1], ideal_parents))
1140
self._data_changed = True
1141
# we're finished with some data.
1144
# 3) bulk copy the ok data
1145
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
1146
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
1147
self.new_pack.text_index, readv_group_iter, total_items))
1148
# 4) adhoc copy all the other texts.
1149
# We have to topologically insert all texts otherwise we can fail to
1150
# reconcile when parts of a single delta chain are preserved intact,
1151
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
1152
# reinserted, and if d3 has incorrect parents it will also be
1153
# reinserted. If we insert d3 first, d2 is present (as it was bulk
1154
# copied), so we will try to delta, but d2 is not currently able to be
1155
# extracted because it's basis d1 is not present. Topologically sorting
1156
# addresses this. The following generates a sort for all the texts that
1157
# are being inserted without having to reference the entire text key
1158
# space (we only topo sort the revisions, which is smaller).
1159
topo_order = tsort.topo_sort(ancestors)
1160
rev_order = dict(zip(topo_order, range(len(topo_order))))
1161
bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1162
transaction = repo.get_transaction()
1163
file_id_index = GraphIndexPrefixAdapter(
1164
self.new_pack.text_index,
1166
add_nodes_callback=self.new_pack.text_index.add_nodes)
1167
data_access = _DirectPackAccess(
1168
{self.new_pack.text_index:self.new_pack.access_tuple()})
1169
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
1170
self.new_pack.access_tuple())
1171
output_texts = KnitVersionedFiles(
1172
_KnitGraphIndex(self.new_pack.text_index,
1173
add_callback=self.new_pack.text_index.add_nodes,
1174
deltas=True, parents=True, is_locked=repo.is_locked),
1175
data_access=data_access, max_delta_chain=200)
1176
for key, parent_keys in bad_texts:
1177
# We refer to the new pack to delta data being output.
1178
# A possible improvement would be to catch errors on short reads
1179
# and only flush then.
1180
self.new_pack.flush()
1182
for parent_key in parent_keys:
1183
if parent_key[0] != key[0]:
1184
# Graph parents must match the fileid
1185
raise errors.BzrError('Mismatched key parent %r:%r' %
1187
parents.append(parent_key[1])
1188
text_lines = osutils.split_lines(repo.texts.get_record_stream(
1189
[key], 'unordered', True).next().get_bytes_as('fulltext'))
1190
output_texts.add_lines(key, parent_keys, text_lines,
1191
random_id=True, check_content=False)
1192
# 5) check that nothing inserted has a reference outside the keyspace.
1193
missing_text_keys = self.new_pack.text_index._external_references()
1194
if missing_text_keys:
1195
raise errors.BzrCheckError('Reference to missing compression parents %r'
1196
% (missing_text_keys,))
1197
self._log_copied_texts()
1199
def _use_pack(self, new_pack):
1200
"""Override _use_pack to check for reconcile having changed content."""
1201
# XXX: we might be better checking this at the copy time.
1202
original_inventory_keys = set()
1203
inv_index = self._pack_collection.inventory_index.combined_index
1204
for entry in inv_index.iter_all_entries():
1205
original_inventory_keys.add(entry[1])
1206
new_inventory_keys = set()
1207
for entry in new_pack.inventory_index.iter_all_entries():
1208
new_inventory_keys.add(entry[1])
1209
if new_inventory_keys != original_inventory_keys:
1210
self._data_changed = True
1211
return new_pack.data_inserted() and self._data_changed
1214
768
class RepositoryPackCollection(object):
1215
769
"""Management of packs within a repository.
1217
771
:ivar _names: map of {pack_name: (index_size,)}
775
resumed_pack_factory = None
776
normal_packer_class = None
777
optimising_packer_class = None
1220
779
def __init__(self, repo, transport, index_transport, upload_transport,
1221
pack_transport, index_builder_class, index_class):
780
pack_transport, index_builder_class, index_class,
1222
782
"""Create a new RepositoryPackCollection.
1224
:param transport: Addresses the repository base directory
784
:param transport: Addresses the repository base directory
1225
785
(typically .bzr/repository/).
1226
786
:param index_transport: Addresses the directory containing indices.
1227
787
:param upload_transport: Addresses the directory into which packs are written
1793
1538
# FIXME: just drop the transient index.
1794
1539
# forget what names there are
1795
1540
if self._new_pack is not None:
1797
self._new_pack.abort()
1799
# XXX: If we aborted while in the middle of finishing the write
1800
# group, _remove_pack_indices can fail because the indexes are
1801
# already gone. If they're not there we shouldn't fail in this
1802
# case. -- mbp 20081113
1803
self._remove_pack_indices(self._new_pack)
1804
self._new_pack = None
1805
self.repo._text_knit = None
1541
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
1542
operation.add_cleanup(setattr, self, '_new_pack', None)
1543
# If we aborted while in the middle of finishing the write
1544
# group, _remove_pack_indices could fail because the indexes are
1545
# already gone. But they're not there we shouldn't fail in this
1546
# case, so we pass ignore_missing=True.
1547
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
1548
ignore_missing=True)
1549
operation.run_simple()
1550
for resumed_pack in self._resumed_packs:
1551
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
1552
# See comment in previous finally block.
1553
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
1554
ignore_missing=True)
1555
operation.run_simple()
1556
del self._resumed_packs[:]
1558
def _remove_resumed_pack_indices(self):
1559
for resumed_pack in self._resumed_packs:
1560
self._remove_pack_indices(resumed_pack)
1561
del self._resumed_packs[:]
1563
def _check_new_inventories(self):
1564
"""Detect missing inventories in this write group.
1566
:returns: list of strs, summarising any problems found. If the list is
1567
empty no problems were found.
1569
# The base implementation does no checks. GCRepositoryPackCollection
1807
1573
def _commit_write_group(self):
1575
for prefix, versioned_file in (
1576
('revisions', self.repo.revisions),
1577
('inventories', self.repo.inventories),
1578
('texts', self.repo.texts),
1579
('signatures', self.repo.signatures),
1581
missing = versioned_file.get_missing_compression_parent_keys()
1582
all_missing.update([(prefix,) + key for key in missing])
1584
raise errors.BzrCheckError(
1585
"Repository %s has missing compression parent(s) %r "
1586
% (self.repo, sorted(all_missing)))
1587
problems = self._check_new_inventories()
1589
problems_summary = '\n'.join(problems)
1590
raise errors.BzrCheckError(
1591
"Cannot add revision(s) to repository: " + problems_summary)
1808
1592
self._remove_pack_indices(self._new_pack)
1593
any_new_content = False
1809
1594
if self._new_pack.data_inserted():
1810
1595
# get all the data to disk and read to use
1811
1596
self._new_pack.finish()
1812
1597
self.allocate(self._new_pack)
1813
1598
self._new_pack = None
1814
if not self.autopack():
1599
any_new_content = True
1601
self._new_pack.abort()
1602
self._new_pack = None
1603
for resumed_pack in self._resumed_packs:
1604
# XXX: this is a pretty ugly way to turn the resumed pack into a
1605
# properly committed pack.
1606
self._names[resumed_pack.name] = None
1607
self._remove_pack_from_memory(resumed_pack)
1608
resumed_pack.finish()
1609
self.allocate(resumed_pack)
1610
any_new_content = True
1611
del self._resumed_packs[:]
1613
result = self.autopack()
1815
1615
# when autopack takes no steps, the names list is still
1817
self._save_pack_names()
1617
return self._save_pack_names()
1621
def _suspend_write_group(self):
1622
tokens = [pack.name for pack in self._resumed_packs]
1623
self._remove_pack_indices(self._new_pack)
1624
if self._new_pack.data_inserted():
1625
# get all the data to disk and read to use
1626
self._new_pack.finish(suspend=True)
1627
tokens.append(self._new_pack.name)
1628
self._new_pack = None
1819
1630
self._new_pack.abort()
1820
1631
self._new_pack = None
1821
self.repo._text_knit = None
1824
class KnitPackRepository(KnitRepository):
1632
self._remove_resumed_pack_indices()
1635
def _resume_write_group(self, tokens):
1636
for token in tokens:
1637
self._resume_pack(token)
1640
class PackRepository(MetaDirVersionedFileRepository):
1825
1641
"""Repository with knit objects stored inside pack containers.
1827
1643
The layering for a KnitPackRepository is:
1829
1645
Graph | HPSS | Repository public layer |
1830
1646
===================================================
1831
1647
Tuple based apis below, string based, and key based apis above
1832
1648
---------------------------------------------------
1834
1650
Provides .texts, .revisions etc
1835
1651
This adapts the N-tuple keys to physical knit records which only have a
1836
1652
single string identifier (for historical reasons), which in older formats
1843
1659
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1844
1660
semantic value.
1845
1661
===================================================
1665
# These attributes are inherited from the Repository base class. Setting
1666
# them to None ensures that if the constructor is changed to not initialize
1667
# them, or a subclass fails to call the constructor, that an error will
1668
# occur rather than the system working but generating incorrect data.
1669
_commit_builder_class = None
1849
1672
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1851
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
1852
_commit_builder_class, _serializer)
1853
index_transport = self._transport.clone('indices')
1854
self._pack_collection = RepositoryPackCollection(self, self._transport,
1856
self._transport.clone('upload'),
1857
self._transport.clone('packs'),
1858
_format.index_builder_class,
1859
_format.index_class)
1860
self.inventories = KnitVersionedFiles(
1861
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
1862
add_callback=self._pack_collection.inventory_index.add_callback,
1863
deltas=True, parents=True, is_locked=self.is_locked),
1864
data_access=self._pack_collection.inventory_index.data_access,
1865
max_delta_chain=200)
1866
self.revisions = KnitVersionedFiles(
1867
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
1868
add_callback=self._pack_collection.revision_index.add_callback,
1869
deltas=False, parents=True, is_locked=self.is_locked),
1870
data_access=self._pack_collection.revision_index.data_access,
1872
self.signatures = KnitVersionedFiles(
1873
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
1874
add_callback=self._pack_collection.signature_index.add_callback,
1875
deltas=False, parents=False, is_locked=self.is_locked),
1876
data_access=self._pack_collection.signature_index.data_access,
1878
self.texts = KnitVersionedFiles(
1879
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
1880
add_callback=self._pack_collection.text_index.add_callback,
1881
deltas=True, parents=True, is_locked=self.is_locked),
1882
data_access=self._pack_collection.text_index.data_access,
1883
max_delta_chain=200)
1884
# True when the repository object is 'write locked' (as opposed to the
1885
# physical lock only taken out around changes to the pack-names list.)
1886
# Another way to represent this would be a decorator around the control
1887
# files object that presents logical locks as physical ones - if this
1888
# gets ugly consider that alternative design. RBC 20071011
1889
self._write_lock_count = 0
1890
self._transaction = None
1892
self._reconcile_does_inventory_gc = True
1674
MetaDirRepository.__init__(self, _format, a_bzrdir, control_files)
1675
self._commit_builder_class = _commit_builder_class
1676
self._serializer = _serializer
1893
1677
self._reconcile_fixes_text_parents = True
1894
self._reconcile_backsup_inventory = False
1895
self._fetch_order = 'unordered'
1678
if self._format.supports_external_lookups:
1679
self._unstacked_provider = graph.CachingParentsProvider(
1680
self._make_parents_provider_unstacked())
1682
self._unstacked_provider = graph.CachingParentsProvider(self)
1683
self._unstacked_provider.disable_cache()
1897
def _warn_if_deprecated(self):
1898
# This class isn't deprecated, but one sub-format is
1899
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
1900
from bzrlib import repository
1901
if repository._deprecation_warning_done:
1903
repository._deprecation_warning_done = True
1904
warning("Format %s for %s is deprecated - please use"
1905
" 'bzr upgrade --1.6.1-rich-root'"
1906
% (self._format, self.bzrdir.transport.base))
1686
def _all_revision_ids(self):
1687
"""See Repository.all_revision_ids()."""
1688
return [key[0] for key in self.revisions.keys()]
1908
1690
def _abort_write_group(self):
1691
self.revisions._index._key_dependencies.clear()
1909
1692
self._pack_collection._abort_write_group()
1911
def _find_inconsistent_revision_parents(self):
1912
"""Find revisions with incorrectly cached parents.
1914
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1915
parents-in-revision).
1917
if not self.is_locked():
1918
raise errors.ObjectNotLocked(self)
1919
pb = ui.ui_factory.nested_progress_bar()
1922
revision_nodes = self._pack_collection.revision_index \
1923
.combined_index.iter_all_entries()
1924
index_positions = []
1925
# Get the cached index values for all revisions, and also the location
1926
# in each index of the revision text so we can perform linear IO.
1927
for index, key, value, refs in revision_nodes:
1928
pos, length = value[1:].split(' ')
1929
index_positions.append((index, int(pos), key[0],
1930
tuple(parent[0] for parent in refs[0])))
1931
pb.update("Reading revision index.", 0, 0)
1932
index_positions.sort()
1933
batch_count = len(index_positions) / 1000 + 1
1934
pb.update("Checking cached revision graph.", 0, batch_count)
1935
for offset in xrange(batch_count):
1936
pb.update("Checking cached revision graph.", offset)
1937
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1940
rev_ids = [item[2] for item in to_query]
1941
revs = self.get_revisions(rev_ids)
1942
for revision, item in zip(revs, to_query):
1943
index_parents = item[3]
1944
rev_parents = tuple(revision.parent_ids)
1945
if index_parents != rev_parents:
1946
result.append((revision.revision_id, index_parents, rev_parents))
1951
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1952
def get_parents(self, revision_ids):
1953
"""See graph._StackedParentsProvider.get_parents."""
1954
parent_map = self.get_parent_map(revision_ids)
1955
return [parent_map.get(r, None) for r in revision_ids]
1957
1694
def _make_parents_provider(self):
1958
return graph.CachingParentsProvider(self)
1695
if not self._format.supports_external_lookups:
1696
return self._unstacked_provider
1697
return graph.StackedParentsProvider(_LazyListJoin(
1698
[self._unstacked_provider], self._fallback_repositories))
1960
1700
def _refresh_data(self):
1961
if self._write_lock_count == 1 or (
1962
self.control_files._lock_count == 1 and
1963
self.control_files._lock_mode == 'r'):
1964
# forget what names there are
1965
self._pack_collection.reset()
1966
# XXX: Better to do an in-memory merge when acquiring a new lock -
1967
# factor out code from _save_pack_names.
1968
self._pack_collection.ensure_loaded()
1701
if not self.is_locked():
1703
self._pack_collection.reload_pack_names()
1704
self._unstacked_provider.disable_cache()
1705
self._unstacked_provider.enable_cache()
1970
1707
def _start_write_group(self):
1971
1708
self._pack_collection._start_write_group()
1973
1710
def _commit_write_group(self):
1974
return self._pack_collection._commit_write_group()
1711
hint = self._pack_collection._commit_write_group()
1712
self.revisions._index._key_dependencies.clear()
1713
# The commit may have added keys that were previously cached as
1714
# missing, so reset the cache.
1715
self._unstacked_provider.disable_cache()
1716
self._unstacked_provider.enable_cache()
1719
def suspend_write_group(self):
1720
# XXX check self._write_group is self.get_transaction()?
1721
tokens = self._pack_collection._suspend_write_group()
1722
self.revisions._index._key_dependencies.clear()
1723
self._write_group = None
1726
def _resume_write_group(self, tokens):
1727
self._start_write_group()
1729
self._pack_collection._resume_write_group(tokens)
1730
except errors.UnresumableWriteGroup:
1731
self._abort_write_group()
1733
for pack in self._pack_collection._resumed_packs:
1734
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1976
1736
def get_transaction(self):
1977
1737
if self._write_lock_count:
2123
1920
_serializer=self._serializer)
2126
class RepositoryFormatKnitPack1(RepositoryFormatPack):
2127
"""A no-subtrees parameterized Pack repository.
2129
This format was introduced in 0.92.
2132
repository_class = KnitPackRepository
2133
_commit_builder_class = PackCommitBuilder
2135
def _serializer(self):
2136
return xml5.serializer_v5
2137
# What index classes to use
2138
index_builder_class = InMemoryGraphIndex
2139
index_class = GraphIndex
2141
def _get_matching_bzrdir(self):
2142
return bzrdir.format_registry.make_bzrdir('pack-0.92')
2144
def _ignore_setting_bzrdir(self, format):
2147
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2149
def get_format_string(self):
2150
"""See RepositoryFormat.get_format_string()."""
2151
return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
2153
def get_format_description(self):
2154
"""See RepositoryFormat.get_format_description()."""
2155
return "Packs containing knits without subtree support"
2157
def check_conversion_target(self, target_format):
2161
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2162
"""A subtrees parameterized Pack repository.
2164
This repository format uses the xml7 serializer to get:
2165
- support for recording full info about the tree root
2166
- support for recording tree-references
2168
This format was introduced in 0.92.
2171
repository_class = KnitPackRepository
2172
_commit_builder_class = PackRootCommitBuilder
2173
rich_root_data = True
2174
supports_tree_reference = True
2176
def _serializer(self):
2177
return xml7.serializer_v7
2178
# What index classes to use
2179
index_builder_class = InMemoryGraphIndex
2180
index_class = GraphIndex
2182
def _get_matching_bzrdir(self):
2183
return bzrdir.format_registry.make_bzrdir(
2184
'pack-0.92-subtree')
2186
def _ignore_setting_bzrdir(self, format):
2189
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2191
def check_conversion_target(self, target_format):
2192
if not target_format.rich_root_data:
2193
raise errors.BadConversionTarget(
2194
'Does not support rich root data.', target_format)
2195
if not getattr(target_format, 'supports_tree_reference', False):
2196
raise errors.BadConversionTarget(
2197
'Does not support nested trees', target_format)
2199
def get_format_string(self):
2200
"""See RepositoryFormat.get_format_string()."""
2201
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2203
def get_format_description(self):
2204
"""See RepositoryFormat.get_format_description()."""
2205
return "Packs containing knits with subtree support\n"
2208
class RepositoryFormatKnitPack4(RepositoryFormatPack):
2209
"""A rich-root, no subtrees parameterized Pack repository.
2211
This repository format uses the xml6 serializer to get:
2212
- support for recording full info about the tree root
2214
This format was introduced in 1.0.
2217
repository_class = KnitPackRepository
2218
_commit_builder_class = PackRootCommitBuilder
2219
rich_root_data = True
2220
supports_tree_reference = False
2222
def _serializer(self):
2223
return xml6.serializer_v6
2224
# What index classes to use
2225
index_builder_class = InMemoryGraphIndex
2226
index_class = GraphIndex
2228
def _get_matching_bzrdir(self):
2229
return bzrdir.format_registry.make_bzrdir(
2232
def _ignore_setting_bzrdir(self, format):
2235
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2237
def check_conversion_target(self, target_format):
2238
if not target_format.rich_root_data:
2239
raise errors.BadConversionTarget(
2240
'Does not support rich root data.', target_format)
2242
def get_format_string(self):
2243
"""See RepositoryFormat.get_format_string()."""
2244
return ("Bazaar pack repository format 1 with rich root"
2245
" (needs bzr 1.0)\n")
2247
def get_format_description(self):
2248
"""See RepositoryFormat.get_format_description()."""
2249
return "Packs containing knits with rich root support\n"
2252
class RepositoryFormatKnitPack5(RepositoryFormatPack):
2253
"""Repository that supports external references to allow stacking.
2257
Supports external lookups, which results in non-truncated ghosts after
2258
reconcile compared to pack-0.92 formats.
2261
repository_class = KnitPackRepository
2262
_commit_builder_class = PackCommitBuilder
2263
supports_external_lookups = True
2264
# What index classes to use
2265
index_builder_class = InMemoryGraphIndex
2266
index_class = GraphIndex
2269
def _serializer(self):
2270
return xml5.serializer_v5
2272
def _get_matching_bzrdir(self):
2273
return bzrdir.format_registry.make_bzrdir('1.6')
2275
def _ignore_setting_bzrdir(self, format):
2278
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2280
def get_format_string(self):
2281
"""See RepositoryFormat.get_format_string()."""
2282
return "Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n"
2284
def get_format_description(self):
2285
"""See RepositoryFormat.get_format_description()."""
2286
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2288
def check_conversion_target(self, target_format):
2292
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2293
"""A repository with rich roots and stacking.
2295
New in release 1.6.1.
2297
Supports stacking on other repositories, allowing data to be accessed
2298
without being stored locally.
2301
repository_class = KnitPackRepository
2302
_commit_builder_class = PackRootCommitBuilder
2303
rich_root_data = True
2304
supports_tree_reference = False # no subtrees
2305
supports_external_lookups = True
2306
# What index classes to use
2307
index_builder_class = InMemoryGraphIndex
2308
index_class = GraphIndex
2311
def _serializer(self):
2312
return xml6.serializer_v6
2314
def _get_matching_bzrdir(self):
2315
return bzrdir.format_registry.make_bzrdir(
2318
def _ignore_setting_bzrdir(self, format):
2321
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2323
def check_conversion_target(self, target_format):
2324
if not target_format.rich_root_data:
2325
raise errors.BadConversionTarget(
2326
'Does not support rich root data.', target_format)
2328
def get_format_string(self):
2329
"""See RepositoryFormat.get_format_string()."""
2330
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2332
def get_format_description(self):
2333
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2336
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2337
"""A repository with rich roots and external references.
2341
Supports external lookups, which results in non-truncated ghosts after
2342
reconcile compared to pack-0.92 formats.
2344
This format was deprecated because the serializer it uses accidentally
2345
supported subtrees, when the format was not intended to. This meant that
2346
someone could accidentally fetch from an incorrect repository.
2349
repository_class = KnitPackRepository
2350
_commit_builder_class = PackRootCommitBuilder
2351
rich_root_data = True
2352
supports_tree_reference = False # no subtrees
2354
supports_external_lookups = True
2355
# What index classes to use
2356
index_builder_class = InMemoryGraphIndex
2357
index_class = GraphIndex
2360
def _serializer(self):
2361
return xml7.serializer_v7
2363
def _get_matching_bzrdir(self):
2364
matching = bzrdir.format_registry.make_bzrdir(
2366
matching.repository_format = self
2369
def _ignore_setting_bzrdir(self, format):
2372
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2374
def check_conversion_target(self, target_format):
2375
if not target_format.rich_root_data:
2376
raise errors.BadConversionTarget(
2377
'Does not support rich root data.', target_format)
2379
def get_format_string(self):
2380
"""See RepositoryFormat.get_format_string()."""
2381
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2383
def get_format_description(self):
2384
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2388
class RepositoryFormatKnitPack6(RepositoryFormatPack):
2389
"""A repository with stacking and btree indexes,
2390
without rich roots or subtrees.
2392
This is equivalent to pack-1.6 with B+Tree indices.
2395
repository_class = KnitPackRepository
2396
_commit_builder_class = PackCommitBuilder
2397
supports_external_lookups = True
2398
# What index classes to use
2399
index_builder_class = BTreeBuilder
2400
index_class = BTreeGraphIndex
2403
def _serializer(self):
2404
return xml5.serializer_v5
2406
def _get_matching_bzrdir(self):
2407
return bzrdir.format_registry.make_bzrdir('1.9')
2409
def _ignore_setting_bzrdir(self, format):
2412
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2414
def get_format_string(self):
2415
"""See RepositoryFormat.get_format_string()."""
2416
return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
2418
def get_format_description(self):
2419
"""See RepositoryFormat.get_format_description()."""
2420
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2422
def check_conversion_target(self, target_format):
2426
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2427
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2429
1.6-rich-root with B+Tree indices.
2432
repository_class = KnitPackRepository
2433
_commit_builder_class = PackRootCommitBuilder
2434
rich_root_data = True
2435
supports_tree_reference = False # no subtrees
2436
supports_external_lookups = True
2437
# What index classes to use
2438
index_builder_class = BTreeBuilder
2439
index_class = BTreeGraphIndex
2442
def _serializer(self):
2443
return xml6.serializer_v6
2445
def _get_matching_bzrdir(self):
2446
return bzrdir.format_registry.make_bzrdir(
2449
def _ignore_setting_bzrdir(self, format):
2452
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2454
def check_conversion_target(self, target_format):
2455
if not target_format.rich_root_data:
2456
raise errors.BadConversionTarget(
2457
'Does not support rich root data.', target_format)
2459
def get_format_string(self):
2460
"""See RepositoryFormat.get_format_string()."""
2461
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2463
def get_format_description(self):
2464
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2467
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
2468
"""A no-subtrees development repository.
2470
This format should be retained until the second release after bzr 1.7.
2472
This is pack-1.6.1 with B+Tree indices.
2475
repository_class = KnitPackRepository
2476
_commit_builder_class = PackCommitBuilder
2477
supports_external_lookups = True
2478
# What index classes to use
2479
index_builder_class = BTreeBuilder
2480
index_class = BTreeGraphIndex
2483
def _serializer(self):
2484
return xml5.serializer_v5
2486
def _get_matching_bzrdir(self):
2487
return bzrdir.format_registry.make_bzrdir('development2')
2489
def _ignore_setting_bzrdir(self, format):
2492
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2494
def get_format_string(self):
2495
"""See RepositoryFormat.get_format_string()."""
2496
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2498
def get_format_description(self):
2499
"""See RepositoryFormat.get_format_description()."""
2500
return ("Development repository format, currently the same as "
2501
"1.6.1 with B+Trees.\n")
2503
def check_conversion_target(self, target_format):
2507
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2508
"""A subtrees development repository.
2510
This format should be retained until the second release after bzr 1.7.
2512
1.6.1-subtree[as it might have been] with B+Tree indices.
2515
repository_class = KnitPackRepository
2516
_commit_builder_class = PackRootCommitBuilder
2517
rich_root_data = True
2518
supports_tree_reference = True
2519
supports_external_lookups = True
2520
# What index classes to use
2521
index_builder_class = BTreeBuilder
2522
index_class = BTreeGraphIndex
2525
def _serializer(self):
2526
return xml7.serializer_v7
2528
def _get_matching_bzrdir(self):
2529
return bzrdir.format_registry.make_bzrdir(
2530
'development2-subtree')
2532
def _ignore_setting_bzrdir(self, format):
2535
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2537
def check_conversion_target(self, target_format):
2538
if not target_format.rich_root_data:
2539
raise errors.BadConversionTarget(
2540
'Does not support rich root data.', target_format)
2541
if not getattr(target_format, 'supports_tree_reference', False):
2542
raise errors.BadConversionTarget(
2543
'Does not support nested trees', target_format)
2545
def get_format_string(self):
2546
"""See RepositoryFormat.get_format_string()."""
2547
return ("Bazaar development format 2 with subtree support "
2548
"(needs bzr.dev from before 1.8)\n")
2550
def get_format_description(self):
2551
"""See RepositoryFormat.get_format_description()."""
2552
return ("Development repository format, currently the same as "
2553
"1.6.1-subtree with B+Tree indices.\n")
1923
class RetryPackOperations(errors.RetryWithNewPacks):
1924
"""Raised when we are packing and we find a missing file.
1926
Meant as a signaling exception, to tell the RepositoryPackCollection.pack
1927
code it should try again.
1930
internal_error = True
1932
_fmt = ("Pack files have changed, reload and try pack again."
1933
" context: %(context)s %(orig_error)s")
1936
class _DirectPackAccess(object):
1937
"""Access to data in one or more packs with less translation."""
1939
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
1940
"""Create a _DirectPackAccess object.
1942
:param index_to_packs: A dict mapping index objects to the transport
1943
and file names for obtaining data.
1944
:param reload_func: A function to call if we determine that the pack
1945
files have moved and we need to reload our caches. See
1946
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
1948
self._container_writer = None
1949
self._write_index = None
1950
self._indices = index_to_packs
1951
self._reload_func = reload_func
1952
self._flush_func = flush_func
1954
def add_raw_records(self, key_sizes, raw_data):
1955
"""Add raw knit bytes to a storage area.
1957
The data is spooled to the container writer in one bytes-record per
1960
:param sizes: An iterable of tuples containing the key and size of each
1962
:param raw_data: A bytestring containing the data.
1963
:return: A list of memos to retrieve the record later. Each memo is an
1964
opaque index memo. For _DirectPackAccess the memo is (index, pos,
1965
length), where the index field is the write_index object supplied
1966
to the PackAccess object.
1968
if type(raw_data) is not str:
1969
raise AssertionError(
1970
'data must be plain bytes was %s' % type(raw_data))
1973
for key, size in key_sizes:
1974
p_offset, p_length = self._container_writer.add_bytes_record(
1975
raw_data[offset:offset+size], [])
1977
result.append((self._write_index, p_offset, p_length))
1981
"""Flush pending writes on this access object.
1983
This will flush any buffered writes to a NewPack.
1985
if self._flush_func is not None:
1988
def get_raw_records(self, memos_for_retrieval):
1989
"""Get the raw bytes for a records.
1991
:param memos_for_retrieval: An iterable containing the (index, pos,
1992
length) memo for retrieving the bytes. The Pack access method
1993
looks up the pack to use for a given record in its index_to_pack
1995
:return: An iterator over the bytes of the records.
1997
# first pass, group into same-index requests
1999
current_index = None
2000
for (index, offset, length) in memos_for_retrieval:
2001
if current_index == index:
2002
current_list.append((offset, length))
2004
if current_index is not None:
2005
request_lists.append((current_index, current_list))
2006
current_index = index
2007
current_list = [(offset, length)]
2008
# handle the last entry
2009
if current_index is not None:
2010
request_lists.append((current_index, current_list))
2011
for index, offsets in request_lists:
2013
transport, path = self._indices[index]
2015
# A KeyError here indicates that someone has triggered an index
2016
# reload, and this index has gone missing, we need to start
2018
if self._reload_func is None:
2019
# If we don't have a _reload_func there is nothing that can
2022
raise errors.RetryWithNewPacks(index,
2023
reload_occurred=True,
2024
exc_info=sys.exc_info())
2026
reader = pack.make_readv_reader(transport, path, offsets)
2027
for names, read_func in reader.iter_records():
2028
yield read_func(None)
2029
except errors.NoSuchFile:
2030
# A NoSuchFile error indicates that a pack file has gone
2031
# missing on disk, we need to trigger a reload, and start over.
2032
if self._reload_func is None:
2034
raise errors.RetryWithNewPacks(transport.abspath(path),
2035
reload_occurred=False,
2036
exc_info=sys.exc_info())
2038
def set_writer(self, writer, index, transport_packname):
2039
"""Set a writer to use for adding data."""
2040
if index is not None:
2041
self._indices[index] = transport_packname
2042
self._container_writer = writer
2043
self._write_index = index
2045
def reload_or_raise(self, retry_exc):
2046
"""Try calling the reload function, or re-raise the original exception.
2048
This should be called after _DirectPackAccess raises a
2049
RetryWithNewPacks exception. This function will handle the common logic
2050
of determining when the error is fatal versus being temporary.
2051
It will also make sure that the original exception is raised, rather
2052
than the RetryWithNewPacks exception.
2054
If this function returns, then the calling function should retry
2055
whatever operation was being performed. Otherwise an exception will
2058
:param retry_exc: A RetryWithNewPacks exception.
2061
if self._reload_func is None:
2063
elif not self._reload_func():
2064
# The reload claimed that nothing changed
2065
if not retry_exc.reload_occurred:
2066
# If there wasn't an earlier reload, then we really were
2067
# expecting to find changes. We didn't find them, so this is a
2071
exc_class, exc_value, exc_traceback = retry_exc.exc_info
2072
raise exc_class, exc_value, exc_traceback