756
624
out.extend(lines)
759
def annotate(self, knit, key):
627
def annotate(self, knit, version_id):
760
628
annotator = _KnitAnnotator(knit)
761
return annotator.annotate_flat(key)
765
def make_file_factory(annotated, mapper):
766
"""Create a factory for creating a file based KnitVersionedFiles.
768
This is only functional enough to run interface tests, it doesn't try to
769
provide a full pack environment.
771
:param annotated: knit annotations are wanted.
772
:param mapper: The mapper from keys to paths.
774
def factory(transport):
775
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
776
access = _KnitKeyAccess(transport, mapper)
777
return KnitVersionedFiles(index, access, annotated=annotated)
781
def make_pack_factory(graph, delta, keylength):
782
"""Create a factory for creating a pack based VersionedFiles.
784
This is only functional enough to run interface tests, it doesn't try to
785
provide a full pack environment.
787
:param graph: Store a graph.
788
:param delta: Delta compress contents.
789
:param keylength: How long should keys be.
791
def factory(transport):
792
parents = graph or delta
798
max_delta_chain = 200
801
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
802
key_elements=keylength)
803
stream = transport.open_write_stream('newpack')
804
writer = pack.ContainerWriter(stream.write)
806
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
807
deltas=delta, add_callback=graph_index.add_nodes)
808
access = _DirectPackAccess({})
809
access.set_writer(writer, graph_index, (transport, 'newpack'))
810
result = KnitVersionedFiles(index, access,
811
max_delta_chain=max_delta_chain)
812
result.stream = stream
813
result.writer = writer
818
def cleanup_pack_knit(versioned_files):
819
versioned_files.stream.close()
820
versioned_files.writer.end()
823
def _get_total_build_size(self, keys, positions):
824
"""Determine the total bytes to build these keys.
826
(helper function because _KnitGraphIndex and _KndxIndex work the same, but
827
don't inherit from a common base.)
829
:param keys: Keys that we want to build
830
:param positions: dict of {key, (info, index_memo, comp_parent)} (such
831
as returned by _get_components_positions)
832
:return: Number of bytes to build those keys
834
all_build_index_memos = {}
838
for key in build_keys:
839
# This is mostly for the 'stacked' case
840
# Where we will be getting the data from a fallback
841
if key not in positions:
843
_, index_memo, compression_parent = positions[key]
844
all_build_index_memos[key] = index_memo
845
if compression_parent not in all_build_index_memos:
846
next_keys.add(compression_parent)
847
build_keys = next_keys
848
return sum([index_memo[2] for index_memo
849
in all_build_index_memos.itervalues()])
852
class KnitVersionedFiles(VersionedFiles):
853
"""Storage for many versioned files using knit compression.
855
Backend storage is managed by indices and data objects.
857
:ivar _index: A _KnitGraphIndex or similar that can describe the
858
parents, graph, compression and data location of entries in this
859
KnitVersionedFiles. Note that this is only the index for
860
*this* vfs; if there are fallbacks they must be queried separately.
863
def __init__(self, index, data_access, max_delta_chain=200,
864
annotated=False, reload_func=None):
865
"""Create a KnitVersionedFiles with index and data_access.
867
:param index: The index for the knit data.
868
:param data_access: The access object to store and retrieve knit
870
:param max_delta_chain: The maximum number of deltas to permit during
871
insertion. Set to 0 to prohibit the use of deltas.
872
:param annotated: Set to True to cause annotations to be calculated and
873
stored during insertion.
874
:param reload_func: An function that can be called if we think we need
875
to reload the pack listing and try again. See
876
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
629
return annotator.annotate(version_id)
632
def make_empty_knit(transport, relpath):
633
"""Construct a empty knit at the specified location."""
634
k = make_file_knit(transport, relpath, 'w', KnitPlainFactory)
637
def make_file_knit(name, transport, file_mode=None, access_mode='w',
638
factory=None, delta=True, create=False, create_parent_dir=False,
639
delay_create=False, dir_mode=None, get_scope=None):
640
"""Factory to create a KnitVersionedFile for a .knit/.kndx file pair."""
642
factory = KnitAnnotateFactory()
643
if get_scope is None:
644
get_scope = lambda:None
645
index = _KnitIndex(transport, name + INDEX_SUFFIX,
646
access_mode, create=create, file_mode=file_mode,
647
create_parent_dir=create_parent_dir, delay_create=delay_create,
648
dir_mode=dir_mode, get_scope=get_scope)
649
access = _KnitAccess(transport, name + DATA_SUFFIX, file_mode,
650
dir_mode, ((create and not len(index)) and delay_create),
652
return KnitVersionedFile(name, transport, factory=factory,
653
create=create, delay_create=delay_create, index=index,
654
access_method=access)
658
"""Return the suffixes used by file based knits."""
659
return [DATA_SUFFIX, INDEX_SUFFIX]
660
make_file_knit.get_suffixes = get_suffixes
663
class KnitVersionedFile(VersionedFile):
664
"""Weave-like structure with faster random access.
666
A knit stores a number of texts and a summary of the relationships
667
between them. Texts are identified by a string version-id. Texts
668
are normally stored and retrieved as a series of lines, but can
669
also be passed as single strings.
671
Lines are stored with the trailing newline (if any) included, to
672
avoid special cases for files with no final newline. Lines are
673
composed of 8-bit characters, not unicode. The combination of
674
these approaches should mean any 'binary' file can be safely
675
stored and retrieved.
678
def __init__(self, relpath, transport, file_mode=None,
679
factory=None, delta=True, create=False, create_parent_dir=False,
680
delay_create=False, dir_mode=None, index=None, access_method=None):
681
"""Construct a knit at location specified by relpath.
683
:param create: If not True, only open an existing knit.
684
:param create_parent_dir: If True, create the parent directory if
685
creating the file fails. (This is used for stores with
686
hash-prefixes that may not exist yet)
687
:param delay_create: The calling code is aware that the knit won't
688
actually be created until the first data is stored.
689
:param index: An index to use for the knit.
691
super(KnitVersionedFile, self).__init__()
692
self.transport = transport
693
self.filename = relpath
694
self.factory = factory or KnitAnnotateFactory()
697
self._max_delta_chain = 200
699
if None in (access_method, index):
700
raise ValueError("No default access_method or index any more")
878
701
self._index = index
879
self._access = data_access
880
self._max_delta_chain = max_delta_chain
882
self._factory = KnitAnnotateFactory()
884
self._factory = KnitPlainFactory()
885
self._fallback_vfs = []
886
self._reload_func = reload_func
702
_access = access_method
703
if create and not len(self) and not delay_create:
705
self._data = _KnitData(_access)
888
707
def __repr__(self):
889
return "%s(%r, %r)" % (
890
self.__class__.__name__,
894
def add_fallback_versioned_files(self, a_versioned_files):
895
"""Add a source of texts for texts not present in this knit.
897
:param a_versioned_files: A VersionedFiles object.
899
self._fallback_vfs.append(a_versioned_files)
901
def add_lines(self, key, parents, lines, parent_texts=None,
902
left_matching_blocks=None, nostore_sha=None, random_id=False,
904
"""See VersionedFiles.add_lines()."""
905
self._index._check_write_ok()
906
self._check_add(key, lines, random_id, check_content)
908
# The caller might pass None if there is no graph data, but kndx
909
# indexes can't directly store that, so we give them
910
# an empty tuple instead.
912
line_bytes = ''.join(lines)
913
return self._add(key, lines, parents,
914
parent_texts, left_matching_blocks, nostore_sha, random_id,
915
line_bytes=line_bytes)
917
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
918
"""See VersionedFiles._add_text()."""
919
self._index._check_write_ok()
920
self._check_add(key, None, random_id, check_content=False)
921
if text.__class__ is not str:
922
raise errors.BzrBadParameterUnicode("text")
924
# The caller might pass None if there is no graph data, but kndx
925
# indexes can't directly store that, so we give them
926
# an empty tuple instead.
928
return self._add(key, None, parents,
929
None, None, nostore_sha, random_id,
932
def _add(self, key, lines, parents, parent_texts,
933
left_matching_blocks, nostore_sha, random_id,
935
"""Add a set of lines on top of version specified by parents.
937
Any versions not present will be converted into ghosts.
939
:param lines: A list of strings where each one is a single line (has a
940
single newline at the end of the string) This is now optional
941
(callers can pass None). It is left in its location for backwards
942
compatibility. It should ''.join(lines) must == line_bytes
943
:param line_bytes: A single string containing the content
945
We pass both lines and line_bytes because different routes bring the
946
values to this function. And for memory efficiency, we don't want to
947
have to split/join on-demand.
949
# first thing, if the content is something we don't need to store, find
951
digest = sha_string(line_bytes)
952
if nostore_sha == digest:
953
raise errors.ExistingContent
956
if parent_texts is None:
958
# Do a single query to ascertain parent presence; we only compress
959
# against parents in the same kvf.
960
present_parent_map = self._index.get_parent_map(parents)
961
for parent in parents:
962
if parent in present_parent_map:
963
present_parents.append(parent)
965
# Currently we can only compress against the left most present parent.
966
if (len(present_parents) == 0 or
967
present_parents[0] != parents[0]):
970
# To speed the extract of texts the delta chain is limited
971
# to a fixed number of deltas. This should minimize both
972
# I/O and the time spend applying deltas.
973
delta = self._check_should_delta(present_parents[0])
975
text_length = len(line_bytes)
978
# Note: line_bytes is not modified to add a newline, that is tracked
979
# via the no_eol flag. 'lines' *is* modified, because that is the
980
# general values needed by the Content code.
981
if line_bytes and line_bytes[-1] != '\n':
982
options.append('no-eol')
984
# Copy the existing list, or create a new one
986
lines = osutils.split_lines(line_bytes)
989
# Replace the last line with one that ends in a final newline
990
lines[-1] = lines[-1] + '\n'
992
lines = osutils.split_lines(line_bytes)
994
for element in key[:-1]:
995
if type(element) is not str:
996
raise TypeError("key contains non-strings: %r" % (key,))
998
key = key[:-1] + ('sha1:' + digest,)
999
elif type(key[-1]) is not str:
1000
raise TypeError("key contains non-strings: %r" % (key,))
1001
# Knit hunks are still last-element only
1002
version_id = key[-1]
1003
content = self._factory.make(lines, version_id)
1005
# Hint to the content object that its text() call should strip the
1007
content._should_strip_eol = True
1008
if delta or (self._factory.annotated and len(present_parents) > 0):
1009
# Merge annotations from parent texts if needed.
1010
delta_hunks = self._merge_annotations(content, present_parents,
1011
parent_texts, delta, self._factory.annotated,
1012
left_matching_blocks)
1015
options.append('line-delta')
1016
store_lines = self._factory.lower_line_delta(delta_hunks)
1017
size, bytes = self._record_to_data(key, digest,
1020
options.append('fulltext')
1021
# isinstance is slower and we have no hierarchy.
1022
if self._factory.__class__ is KnitPlainFactory:
1023
# Use the already joined bytes saving iteration time in
1025
dense_lines = [line_bytes]
1027
dense_lines.append('\n')
1028
size, bytes = self._record_to_data(key, digest,
1031
# get mixed annotation + content and feed it into the
1033
store_lines = self._factory.lower_fulltext(content)
1034
size, bytes = self._record_to_data(key, digest,
1037
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
1038
self._index.add_records(
1039
((key, options, access_memo, parents),),
1040
random_id=random_id)
1041
return digest, text_length, content
1043
def annotate(self, key):
1044
"""See VersionedFiles.annotate."""
1045
return self._factory.annotate(self, key)
1047
def get_annotator(self):
1048
return _KnitAnnotator(self)
1050
def check(self, progress_bar=None, keys=None):
1051
"""See VersionedFiles.check()."""
1053
return self._logical_check()
1055
# At the moment, check does not extra work over get_record_stream
1056
return self.get_record_stream(keys, 'unordered', True)
1058
def _logical_check(self):
1059
# This doesn't actually test extraction of everything, but that will
1060
# impact 'bzr check' substantially, and needs to be integrated with
1061
# care. However, it does check for the obvious problem of a delta with
1063
keys = self._index.keys()
1064
parent_map = self.get_parent_map(keys)
1066
if self._index.get_method(key) != 'fulltext':
1067
compression_parent = parent_map[key][0]
1068
if compression_parent not in parent_map:
1069
raise errors.KnitCorrupt(self,
1070
"Missing basis parent %s for %s" % (
1071
compression_parent, key))
1072
for fallback_vfs in self._fallback_vfs:
1073
fallback_vfs.check()
1075
def _check_add(self, key, lines, random_id, check_content):
1076
"""check that version_id and lines are safe to add."""
1077
version_id = key[-1]
1078
if version_id is not None:
1079
if contains_whitespace(version_id):
1080
raise InvalidRevisionId(version_id, self)
1081
self.check_not_reserved_id(version_id)
1082
# TODO: If random_id==False and the key is already present, we should
1083
# probably check that the existing content is identical to what is
1084
# being inserted, and otherwise raise an exception. This would make
1085
# the bundle code simpler.
1087
self._check_lines_not_unicode(lines)
1088
self._check_lines_are_lines(lines)
1090
def _check_header(self, key, line):
1091
rec = self._split_header(line)
1092
self._check_header_version(rec, key[-1])
1095
def _check_header_version(self, rec, version_id):
1096
"""Checks the header version on original format knit records.
1098
These have the last component of the key embedded in the record.
1100
if rec[1] != version_id:
1101
raise KnitCorrupt(self,
1102
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
1104
def _check_should_delta(self, parent):
708
return '%s(%s)' % (self.__class__.__name__,
709
self.transport.abspath(self.filename))
711
def _check_should_delta(self, first_parents):
1105
712
"""Iterate back through the parent listing, looking for a fulltext.
1107
714
This is used when we want to decide whether to add a delta or a new
1116
723
fulltext_size = None
724
delta_parents = first_parents
1117
725
for count in xrange(self._max_delta_chain):
1119
# Note that this only looks in the index of this particular
1120
# KnitVersionedFiles, not in the fallbacks. This ensures that
1121
# we won't store a delta spanning physical repository
1123
build_details = self._index.get_build_details([parent])
1124
parent_details = build_details[parent]
1125
except (RevisionNotPresent, KeyError), e:
1126
# Some basis is not locally present: always fulltext
1128
index_memo, compression_parent, _, _ = parent_details
1129
_, _, size = index_memo
1130
if compression_parent is None:
726
parent = delta_parents[0]
727
method = self._index.get_method(parent)
728
index, pos, size = self._index.get_position(parent)
729
if method == 'fulltext':
1131
730
fulltext_size = size
1133
732
delta_size += size
1134
# We don't explicitly check for presence because this is in an
1135
# inner loop, and if it's missing it'll fail anyhow.
1136
parent = compression_parent
733
delta_parents = self._index.get_parent_map([parent])[parent]
1138
735
# We couldn't find a fulltext, so we must create a new one
1140
# Simple heuristic - if the total I/O wold be greater as a delta than
1141
# the originally installed fulltext, we create a new fulltext.
1142
738
return fulltext_size > delta_size
1144
def _build_details_to_components(self, build_details):
1145
"""Convert a build_details tuple to a position tuple."""
1146
# record_details, access_memo, compression_parent
1147
return build_details[3], build_details[0], build_details[1]
1149
def _get_components_positions(self, keys, allow_missing=False):
1150
"""Produce a map of position data for the components of keys.
1152
This data is intended to be used for retrieving the knit records.
1154
A dict of key to (record_details, index_memo, next, parents) is
1156
method is the way referenced data should be applied.
1157
index_memo is the handle to pass to the data access to actually get the
1159
next is the build-parent of the version, or None for fulltexts.
1160
parents is the version_ids of the parents of this version
1162
:param allow_missing: If True do not raise an error on a missing component,
1166
pending_components = keys
1167
while pending_components:
1168
build_details = self._index.get_build_details(pending_components)
1169
current_components = set(pending_components)
1170
pending_components = set()
1171
for key, details in build_details.iteritems():
1172
(index_memo, compression_parent, parents,
1173
record_details) = details
1174
method = record_details[0]
1175
if compression_parent is not None:
1176
pending_components.add(compression_parent)
1177
component_data[key] = self._build_details_to_components(details)
1178
missing = current_components.difference(build_details)
1179
if missing and not allow_missing:
1180
raise errors.RevisionNotPresent(missing.pop(), self)
1181
return component_data
1183
def _get_content(self, key, parent_texts={}):
1184
"""Returns a content object that makes up the specified
1186
cached_version = parent_texts.get(key, None)
1187
if cached_version is not None:
1188
# Ensure the cache dict is valid.
1189
if not self.get_parent_map([key]):
1190
raise RevisionNotPresent(key, self)
1191
return cached_version
1192
generator = _VFContentMapGenerator(self, [key])
1193
return generator._get_content(key)
1195
def get_known_graph_ancestry(self, keys):
1196
"""Get a KnownGraph instance with the ancestry of keys."""
1197
parent_map, missing_keys = self._index.find_ancestry(keys)
1198
for fallback in self._fallback_vfs:
1199
if not missing_keys:
1201
(f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
1203
parent_map.update(f_parent_map)
1204
missing_keys = f_missing_keys
1205
kg = _mod_graph.KnownGraph(parent_map)
1208
def get_parent_map(self, keys):
1209
"""Get a map of the graph parents of keys.
1211
:param keys: The keys to look up parents for.
1212
:return: A mapping from keys to parents. Absent keys are absent from
1215
return self._get_parent_map_with_sources(keys)[0]
1217
def _get_parent_map_with_sources(self, keys):
1218
"""Get a map of the parents of keys.
1220
:param keys: The keys to look up parents for.
1221
:return: A tuple. The first element is a mapping from keys to parents.
1222
Absent keys are absent from the mapping. The second element is a
1223
list with the locations each key was found in. The first element
1224
is the in-this-knit parents, the second the first fallback source,
1228
sources = [self._index] + self._fallback_vfs
1231
for source in sources:
1234
new_result = source.get_parent_map(missing)
1235
source_results.append(new_result)
1236
result.update(new_result)
1237
missing.difference_update(set(new_result))
1238
return result, source_results
1240
def _get_record_map(self, keys, allow_missing=False):
1241
"""Produce a dictionary of knit records.
1243
:return: {key:(record, record_details, digest, next)}
1245
data returned from read_records (a KnitContentobject)
1247
opaque information to pass to parse_record
1249
SHA1 digest of the full text after all steps are done
1251
build-parent of the version, i.e. the leftmost ancestor.
1252
Will be None if the record is not a delta.
1253
:param keys: The keys to build a map for
1254
:param allow_missing: If some records are missing, rather than
1255
error, just return the data that could be generated.
1257
raw_map = self._get_record_map_unparsed(keys,
1258
allow_missing=allow_missing)
1259
return self._raw_map_to_record_map(raw_map)
1261
def _raw_map_to_record_map(self, raw_map):
1262
"""Parse the contents of _get_record_map_unparsed.
1264
:return: see _get_record_map.
1268
data, record_details, next = raw_map[key]
1269
content, digest = self._parse_record(key[-1], data)
1270
result[key] = content, record_details, digest, next
1273
def _get_record_map_unparsed(self, keys, allow_missing=False):
1274
"""Get the raw data for reconstructing keys without parsing it.
1276
:return: A dict suitable for parsing via _raw_map_to_record_map.
1277
key-> raw_bytes, (method, noeol), compression_parent
1279
# This retries the whole request if anything fails. Potentially we
1280
# could be a bit more selective. We could track the keys whose records
1281
# we have successfully found, and then only request the new records
1282
# from there. However, _get_components_positions grabs the whole build
1283
# chain, which means we'll likely try to grab the same records again
1284
# anyway. Also, can the build chains change as part of a pack
1285
# operation? We wouldn't want to end up with a broken chain.
1288
position_map = self._get_components_positions(keys,
1289
allow_missing=allow_missing)
1290
# key = component_id, r = record_details, i_m = index_memo,
1292
records = [(key, i_m) for key, (r, i_m, n)
1293
in position_map.iteritems()]
1294
# Sort by the index memo, so that we request records from the
1295
# same pack file together, and in forward-sorted order
1296
records.sort(key=operator.itemgetter(1))
1298
for key, data in self._read_records_iter_unchecked(records):
1299
(record_details, index_memo, next) = position_map[key]
1300
raw_record_map[key] = data, record_details, next
1301
return raw_record_map
1302
except errors.RetryWithNewPacks, e:
1303
self._access.reload_or_raise(e)
1306
def _split_by_prefix(cls, keys):
1307
"""For the given keys, split them up based on their prefix.
1309
To keep memory pressure somewhat under control, split the
1310
requests back into per-file-id requests, otherwise "bzr co"
1311
extracts the full tree into memory before writing it to disk.
1312
This should be revisited if _get_content_maps() can ever cross
1315
The keys for a given file_id are kept in the same relative order.
1316
Ordering between file_ids is not, though prefix_order will return the
1317
order that the key was first seen.
1319
:param keys: An iterable of key tuples
1320
:return: (split_map, prefix_order)
1321
split_map A dictionary mapping prefix => keys
1322
prefix_order The order that we saw the various prefixes
1324
split_by_prefix = {}
1332
if prefix in split_by_prefix:
1333
split_by_prefix[prefix].append(key)
1335
split_by_prefix[prefix] = [key]
1336
prefix_order.append(prefix)
1337
return split_by_prefix, prefix_order
1339
def _group_keys_for_io(self, keys, non_local_keys, positions,
1340
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
1341
"""For the given keys, group them into 'best-sized' requests.
1343
The idea is to avoid making 1 request per file, but to never try to
1344
unpack an entire 1.5GB source tree in a single pass. Also when
1345
possible, we should try to group requests to the same pack file
1348
:return: list of (keys, non_local) tuples that indicate what keys
1349
should be fetched next.
1351
# TODO: Ideally we would group on 2 factors. We want to extract texts
1352
# from the same pack file together, and we want to extract all
1353
# the texts for a given build-chain together. Ultimately it
1354
# probably needs a better global view.
1355
total_keys = len(keys)
1356
prefix_split_keys, prefix_order = self._split_by_prefix(keys)
1357
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
1359
cur_non_local = set()
1363
for prefix in prefix_order:
1364
keys = prefix_split_keys[prefix]
1365
non_local = prefix_split_non_local_keys.get(prefix, [])
1367
this_size = self._index._get_total_build_size(keys, positions)
1368
cur_size += this_size
1369
cur_keys.extend(keys)
1370
cur_non_local.update(non_local)
1371
if cur_size > _min_buffer_size:
1372
result.append((cur_keys, cur_non_local))
1373
sizes.append(cur_size)
1375
cur_non_local = set()
1378
result.append((cur_keys, cur_non_local))
1379
sizes.append(cur_size)
1382
def get_record_stream(self, keys, ordering, include_delta_closure):
1383
"""Get a stream of records for keys.
1385
:param keys: The keys to include.
740
def _check_write_ok(self):
741
return self._index._check_write_ok()
743
def _add_raw_records(self, records, data):
744
"""Add all the records 'records' with data pre-joined in 'data'.
746
:param records: A list of tuples(version_id, options, parents, size).
747
:param data: The data for the records. When it is written, the records
748
are adjusted to have pos pointing into data by the sum of
749
the preceding records sizes.
752
raw_record_sizes = [record[3] for record in records]
753
positions = self._data.add_raw_records(raw_record_sizes, data)
755
for (version_id, options, parents, _), access_memo in zip(
757
index_entries.append((version_id, options, access_memo, parents))
758
self._index.add_versions(index_entries)
760
def copy_to(self, name, transport):
761
"""See VersionedFile.copy_to()."""
762
# copy the current index to a temp index to avoid racing with local
764
transport.put_file_non_atomic(name + INDEX_SUFFIX + '.tmp',
765
self.transport.get(self._index._filename))
767
f = self._data._open_file()
769
transport.put_file(name + DATA_SUFFIX, f)
772
# move the copied index into place
773
transport.move(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
775
def get_data_stream(self, required_versions):
776
"""Get a data stream for the specified versions.
778
Versions may be returned in any order, not necessarily the order
779
specified. They are returned in a partial order by compression
780
parent, so that the deltas can be applied as the data stream is
781
inserted; however note that compression parents will not be sent
782
unless they were specifically requested, as the client may already
785
:param required_versions: The exact set of versions to be extracted.
786
Unlike some other knit methods, this is not used to generate a
787
transitive closure, rather it is used precisely as given.
789
:returns: format_signature, list of (version, options, length, parents),
792
required_version_set = frozenset(required_versions)
794
# list of revisions that can just be sent without waiting for their
797
# map from revision to the children based on it
799
# first, read all relevant index data, enough to sort into the right
801
for version_id in required_versions:
802
options = self._index.get_options(version_id)
803
parents = self._index.get_parents_with_ghosts(version_id)
804
index_memo = self._index.get_position(version_id)
805
version_index[version_id] = (index_memo, options, parents)
806
if ('line-delta' in options
807
and parents[0] in required_version_set):
808
# must wait until the parent has been sent
809
deferred.setdefault(parents[0], []). \
812
# either a fulltext, or a delta whose parent the client did
813
# not ask for and presumably already has
814
ready_to_send.append(version_id)
815
# build a list of results to return, plus instructions for data to
817
copy_queue_records = []
818
temp_version_list = []
820
# XXX: pushing and popping lists may be a bit inefficient
821
version_id = ready_to_send.pop(0)
822
(index_memo, options, parents) = version_index[version_id]
823
copy_queue_records.append((version_id, index_memo))
824
none, data_pos, data_size = index_memo
825
temp_version_list.append((version_id, options, data_size,
827
if version_id in deferred:
828
# now we can send all the children of this revision - we could
829
# put them in anywhere, but we hope that sending them soon
830
# after the fulltext will give good locality in the receiver
831
ready_to_send[:0] = deferred.pop(version_id)
832
if not (len(deferred) == 0):
833
raise AssertionError("Still have compressed child versions waiting to be sent")
834
# XXX: The stream format is such that we cannot stream it - we have to
835
# know the length of all the data a-priori.
837
result_version_list = []
838
for (version_id, raw_data, _), \
839
(version_id2, options, _, parents) in \
840
izip(self._data.read_records_iter_raw(copy_queue_records),
842
if not (version_id == version_id2):
843
raise AssertionError('logic error, inconsistent results')
844
raw_datum.append(raw_data)
845
result_version_list.append(
846
(version_id, options, len(raw_data), parents))
847
# provide a callback to get data incrementally.
848
pseudo_file = StringIO(''.join(raw_datum))
851
return pseudo_file.read()
853
return pseudo_file.read(length)
854
return (self.get_format_signature(), result_version_list, read)
856
def get_record_stream(self, versions, ordering, include_delta_closure):
857
"""Get a stream of records for versions.
859
:param versions: The versions to include. Each version is a tuple
1386
861
:param ordering: Either 'unordered' or 'topological'. A topologically
1387
862
sorted stream has compression parents strictly before their
1391
866
:return: An iterator of ContentFactory objects, each of which is only
1392
867
valid until the iterator is advanced.
1394
# keys might be a generator
869
if include_delta_closure:
870
# Nb: what we should do is plan the data to stream to allow
871
# reconstruction of all the texts without excessive buffering,
872
# including re-sending common bases as needed. This makes the most
873
# sense when we start serialising these streams though, so for now
874
# we just fallback to individual text construction behind the
875
# abstraction barrier.
879
# We end up doing multiple index lookups here for parents details and
880
# disk layout details - we need a unified api ?
881
parent_map = self.get_parent_map(versions)
882
absent_versions = set(versions) - set(parent_map)
883
if ordering == 'topological':
884
present_versions = topo_sort(parent_map)
886
# List comprehension to keep the requested order (as that seems
887
# marginally useful, at least until we start doing IO optimising
889
present_versions = [version for version in versions if version in
891
position_map = self._get_components_positions(present_versions)
892
records = [(version, position_map[version][1]) for version in
895
for version in absent_versions:
896
yield AbsentContentFactory((version,))
897
for version, raw_data, sha1 in \
898
self._data.read_records_iter_raw(records):
899
(record_details, index_memo, _) = position_map[version]
900
yield KnitContentFactory(version, parent_map[version],
901
record_details, sha1, raw_data, self.factory.annotated, knit)
903
def _extract_blocks(self, version_id, source, target):
904
if self._index.get_method(version_id) != 'line-delta':
906
parent, sha1, noeol, delta = self.get_delta(version_id)
907
return KnitContent.get_line_delta_blocks(delta, source, target)
909
def get_delta(self, version_id):
910
"""Get a delta for constructing version from some other version."""
911
self.check_not_reserved_id(version_id)
912
parents = self.get_parent_map([version_id])[version_id]
917
index_memo = self._index.get_position(version_id)
918
data, sha1 = self._data.read_records(((version_id, index_memo),))[version_id]
919
noeol = 'no-eol' in self._index.get_options(version_id)
920
if 'fulltext' == self._index.get_method(version_id):
921
new_content = self.factory.parse_fulltext(data, version_id)
922
if parent is not None:
923
reference_content = self._get_content(parent)
924
old_texts = reference_content.text()
927
new_texts = new_content.text()
928
delta_seq = patiencediff.PatienceSequenceMatcher(None, old_texts,
930
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
932
delta = self.factory.parse_line_delta(data, version_id)
933
return parent, sha1, noeol, delta
935
def get_format_signature(self):
936
"""See VersionedFile.get_format_signature()."""
937
if self.factory.annotated:
938
annotated_part = "annotated"
940
annotated_part = "plain"
941
return "knit-%s" % (annotated_part,)
943
def get_sha1s(self, version_ids):
944
"""See VersionedFile.get_sha1s()."""
945
record_map = self._get_record_map(version_ids)
946
# record entry 2 is the 'digest'.
947
return [record_map[v][2] for v in version_ids]
949
def insert_data_stream(self, (format, data_list, reader_callable)):
950
"""Insert knit records from a data stream into this knit.
952
If a version in the stream is already present in this knit, it will not
953
be inserted a second time. It will be checked for consistency with the
954
stored version however, and may cause a KnitCorrupt error to be raised
955
if the data in the stream disagrees with the already stored data.
957
:seealso: get_data_stream
959
if format != self.get_format_signature():
960
if 'knit' in debug.debug_flags:
962
'incompatible format signature inserting to %r', self)
963
source = self._knit_from_datastream(
964
(format, data_list, reader_callable))
965
stream = source.get_record_stream(source.versions(), 'unordered', False)
966
self.insert_record_stream(stream)
1398
if not self._index.has_graph:
1399
# Cannot sort when no graph has been stored.
1400
ordering = 'unordered'
1402
remaining_keys = keys
1405
keys = set(remaining_keys)
1406
for content_factory in self._get_remaining_record_stream(keys,
1407
ordering, include_delta_closure):
1408
remaining_keys.discard(content_factory.key)
1409
yield content_factory
1411
except errors.RetryWithNewPacks, e:
1412
self._access.reload_or_raise(e)
1414
def _get_remaining_record_stream(self, keys, ordering,
1415
include_delta_closure):
1416
"""This function is the 'retry' portion for get_record_stream."""
1417
if include_delta_closure:
1418
positions = self._get_components_positions(keys, allow_missing=True)
1420
build_details = self._index.get_build_details(keys)
1422
# (record_details, access_memo, compression_parent_key)
1423
positions = dict((key, self._build_details_to_components(details))
1424
for key, details in build_details.iteritems())
1425
absent_keys = keys.difference(set(positions))
1426
# There may be more absent keys : if we're missing the basis component
1427
# and are trying to include the delta closure.
1428
# XXX: We should not ever need to examine remote sources because we do
1429
# not permit deltas across versioned files boundaries.
1430
if include_delta_closure:
1431
needed_from_fallback = set()
1432
# Build up reconstructable_keys dict. key:True in this dict means
1433
# the key can be reconstructed.
1434
reconstructable_keys = {}
1438
chain = [key, positions[key][2]]
1440
needed_from_fallback.add(key)
1443
while chain[-1] is not None:
1444
if chain[-1] in reconstructable_keys:
1445
result = reconstructable_keys[chain[-1]]
1449
chain.append(positions[chain[-1]][2])
1451
# missing basis component
1452
needed_from_fallback.add(chain[-1])
1455
for chain_key in chain[:-1]:
1456
reconstructable_keys[chain_key] = result
1458
needed_from_fallback.add(key)
1459
# Double index lookups here : need a unified api ?
1460
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1461
if ordering in ('topological', 'groupcompress'):
1462
if ordering == 'topological':
1463
# Global topological sort
1464
present_keys = tsort.topo_sort(global_map)
969
for version_id, options, length, parents in data_list:
970
if self.has_version(version_id):
971
# First check: the list of parents.
972
my_parents = self.get_parents_with_ghosts(version_id)
973
if tuple(my_parents) != tuple(parents):
974
# XXX: KnitCorrupt is not quite the right exception here.
977
'parents list %r from data stream does not match '
978
'already recorded parents %r for %s'
979
% (parents, my_parents, version_id))
981
# Also check the SHA-1 of the fulltext this content will
983
raw_data = reader_callable(length)
984
my_fulltext_sha1 = self.get_sha1s([version_id])[0]
985
df, rec = self._data._parse_record_header(version_id, raw_data)
986
stream_fulltext_sha1 = rec[3]
987
if my_fulltext_sha1 != stream_fulltext_sha1:
988
# Actually, we don't know if it's this knit that's corrupt,
989
# or the data stream we're trying to insert.
991
self.filename, 'sha-1 does not match %s' % version_id)
1466
present_keys = sort_groupcompress(global_map)
1467
# Now group by source:
1469
current_source = None
1470
for key in present_keys:
1471
for parent_map in parent_maps:
1472
if key in parent_map:
1473
key_source = parent_map
1475
if current_source is not key_source:
1476
source_keys.append((key_source, []))
1477
current_source = key_source
1478
source_keys[-1][1].append(key)
1480
if ordering != 'unordered':
1481
raise AssertionError('valid values for ordering are:'
1482
' "unordered", "groupcompress" or "topological" not: %r'
1484
# Just group by source; remote sources first.
1487
for parent_map in reversed(parent_maps):
1488
source_keys.append((parent_map, []))
1489
for key in parent_map:
1490
present_keys.append(key)
1491
source_keys[-1][1].append(key)
1492
# We have been requested to return these records in an order that
1493
# suits us. So we ask the index to give us an optimally sorted
1495
for source, sub_keys in source_keys:
1496
if source is parent_maps[0]:
1497
# Only sort the keys for this VF
1498
self._index._sort_keys_by_io(sub_keys, positions)
1499
absent_keys = keys - set(global_map)
1500
for key in absent_keys:
1501
yield AbsentContentFactory(key)
1502
# restrict our view to the keys we can answer.
1503
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1504
# XXX: At that point we need to consider the impact of double reads by
1505
# utilising components multiple times.
1506
if include_delta_closure:
1507
# XXX: get_content_maps performs its own index queries; allow state
1509
non_local_keys = needed_from_fallback - absent_keys
1510
for keys, non_local_keys in self._group_keys_for_io(present_keys,
1513
generator = _VFContentMapGenerator(self, keys, non_local_keys,
1516
for record in generator.get_record_stream():
1519
for source, keys in source_keys:
1520
if source is parent_maps[0]:
1521
# this KnitVersionedFiles
1522
records = [(key, positions[key][1]) for key in keys]
1523
for key, raw_data in self._read_records_iter_unchecked(records):
1524
(record_details, index_memo, _) = positions[key]
1525
yield KnitContentFactory(key, global_map[key],
1526
record_details, None, raw_data, self._factory.annotated, None)
1528
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1529
for record in vf.get_record_stream(keys, ordering,
1530
include_delta_closure):
1533
def get_sha1s(self, keys):
1534
"""See VersionedFiles.get_sha1s()."""
1536
record_map = self._get_record_map(missing, allow_missing=True)
1538
for key, details in record_map.iteritems():
1539
if key not in missing:
1541
# record entry 2 is the 'digest'.
1542
result[key] = details[2]
1543
missing.difference_update(set(result))
1544
for source in self._fallback_vfs:
1547
new_result = source.get_sha1s(missing)
1548
result.update(new_result)
1549
missing.difference_update(set(new_result))
993
if 'line-delta' in options:
994
# Make sure that this knit record is actually useful: a
995
# line-delta is no use unless we have its parent.
996
# Fetching from a broken repository with this problem
997
# shouldn't break the target repository.
999
# See https://bugs.launchpad.net/bzr/+bug/164443
1000
if not self._index.has_version(parents[0]):
1003
'line-delta from stream '
1006
'missing parent %s\n'
1007
'Try running "bzr check" '
1008
'on the source repository, and "bzr reconcile" '
1010
(version_id, parents[0]))
1012
# We received a line-delta record for a non-delta knit.
1013
# Convert it to a fulltext.
1014
gzip_bytes = reader_callable(length)
1015
self._convert_line_delta_to_fulltext(
1016
gzip_bytes, version_id, parents)
1019
self._add_raw_records(
1020
[(version_id, options, parents, length)],
1021
reader_callable(length))
1023
def _convert_line_delta_to_fulltext(self, gzip_bytes, version_id, parents):
1024
lines, sha1 = self._data._parse_record(version_id, gzip_bytes)
1025
delta = self.factory.parse_line_delta(lines, version_id)
1026
content = self.factory.make(self.get_lines(parents[0]), parents[0])
1027
content.apply_delta(delta, version_id)
1028
digest, len, content = self.add_lines(
1029
version_id, parents, content.text())
1031
raise errors.VersionedFileInvalidChecksum(version_id)
1033
def _knit_from_datastream(self, (format, data_list, reader_callable)):
1034
"""Create a knit object from a data stream.
1036
This method exists to allow conversion of data streams that do not
1037
match the signature of this knit. Generally it will be slower and use
1038
more memory to use this method to insert data, but it will work.
1040
:seealso: get_data_stream for details on datastreams.
1041
:return: A knit versioned file which can be used to join the datastream
1044
if format == "knit-plain":
1045
factory = KnitPlainFactory()
1046
elif format == "knit-annotated":
1047
factory = KnitAnnotateFactory()
1049
raise errors.KnitDataStreamUnknown(format)
1050
index = _StreamIndex(data_list, self._index)
1051
access = _StreamAccess(reader_callable, index, self, factory)
1052
return KnitVersionedFile(self.filename, self.transport,
1053
factory=factory, index=index, access_method=access)
1552
1055
def insert_record_stream(self, stream):
1553
"""Insert a record stream into this container.
1056
"""Insert a record stream into this versioned file.
1555
:param stream: A stream of records to insert.
1058
:param stream: A stream of records to insert.
1557
:seealso VersionedFiles.get_record_stream:
1060
:seealso VersionedFile.get_record_stream:
1559
1062
def get_adapter(adapter_key):
1654
1119
# deprecated format this is tolerable. It can be fixed if
1655
1120
# needed by in the kndx index support raising on a duplicate
1656
1121
# add with identical parents and options.
1657
access_memo = self._access.add_raw_records(
1658
[(record.key, len(bytes))], bytes)[0]
1659
index_entry = (record.key, options, access_memo, parents)
1122
access_memo = self._data.add_raw_records([len(bytes)], bytes)[0]
1123
index_entry = (record.key[0], options, access_memo, parents)
1660
1125
if 'fulltext' not in options:
1661
# Not a fulltext, so we need to make sure the compression
1662
# parent will also be present.
1663
# Note that pack backed knits don't need to buffer here
1664
# because they buffer all writes to the transaction level,
1665
# but we don't expose that difference at the index level. If
1666
# the query here has sufficient cost to show up in
1667
# profiling we should do that.
1669
# They're required to be physically in this
1670
# KnitVersionedFiles, not in a fallback.
1671
if not self._index.has_key(compression_parent):
1126
basis_parent = parents[0]
1127
if not self.has_version(basis_parent):
1672
1128
pending = buffered_index_entries.setdefault(
1673
compression_parent, [])
1674
1130
pending.append(index_entry)
1675
1131
buffered = True
1676
1132
if not buffered:
1677
self._index.add_records([index_entry])
1678
elif record.storage_kind == 'chunked':
1679
self.add_lines(record.key, parents,
1680
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1133
self._index.add_versions([index_entry])
1134
elif record.storage_kind == 'fulltext':
1135
self.add_lines(record.key[0], parents,
1136
split_lines(record.get_bytes_as('fulltext')))
1682
# Not suitable for direct insertion as a
1683
# delta, either because it's not the right format, or this
1684
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1685
# 0) or because it depends on a base only present in the
1687
self._access.flush()
1689
# Try getting a fulltext directly from the record.
1690
bytes = record.get_bytes_as('fulltext')
1691
except errors.UnavailableRepresentation:
1692
adapter_key = record.storage_kind, 'fulltext'
1693
adapter = get_adapter(adapter_key)
1694
bytes = adapter.get_bytes(record)
1695
lines = split_lines(bytes)
1697
self.add_lines(record.key, parents, lines)
1138
adapter_key = record.storage_kind, 'fulltext'
1139
adapter = get_adapter(adapter_key)
1140
lines = split_lines(adapter.get_bytes(
1141
record, record.get_bytes_as(record.storage_kind)))
1143
self.add_lines(record.key[0], parents, lines)
1698
1144
except errors.RevisionAlreadyPresent:
1700
1146
# Add any records whose basis parent is now available.
1702
added_keys = [record.key]
1704
key = added_keys.pop(0)
1705
if key in buffered_index_entries:
1706
index_entries = buffered_index_entries[key]
1707
self._index.add_records(index_entries)
1709
[index_entry[0] for index_entry in index_entries])
1710
del buffered_index_entries[key]
1147
added_keys = [record.key[0]]
1149
key = added_keys.pop(0)
1150
if key in buffered_index_entries:
1151
index_entries = buffered_index_entries[key]
1152
self._index.add_versions(index_entries)
1154
[index_entry[0] for index_entry in index_entries])
1155
del buffered_index_entries[key]
1156
# If there were any deltas which had a missing basis parent, error.
1711
1157
if buffered_index_entries:
1712
# There were index entries buffered at the end of the stream,
1713
# So these need to be added (if the index supports holding such
1714
# entries for later insertion)
1716
for key in buffered_index_entries:
1717
index_entries = buffered_index_entries[key]
1718
all_entries.extend(index_entries)
1719
self._index.add_records(
1720
all_entries, missing_compression_parents=True)
1722
def get_missing_compression_parent_keys(self):
1723
"""Return an iterable of keys of missing compression parents.
1725
Check this after calling insert_record_stream to find out if there are
1726
any missing compression parents. If there are, the records that
1727
depend on them are not able to be inserted safely. For atomic
1728
KnitVersionedFiles built on packs, the transaction should be aborted or
1729
suspended - commit will fail at this point. Nonatomic knits will error
1730
earlier because they have no staging area to put pending entries into.
1732
return self._index.get_missing_compression_parents()
1734
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1735
"""Iterate over the lines in the versioned files from keys.
1737
This may return lines from other keys. Each item the returned
1738
iterator yields is a tuple of a line and a text version that that line
1739
is present in (not introduced in).
1741
Ordering of results is in whatever order is most suitable for the
1742
underlying storage format.
1744
If a progress bar is supplied, it may be used to indicate progress.
1745
The caller is responsible for cleaning up progress bars (because this
1749
* Lines are normalised by the underlying store: they will all have \\n
1751
* Lines are returned in arbitrary order.
1752
* If a requested key did not change any lines (or didn't have any
1753
lines), it may not be mentioned at all in the result.
1755
:param pb: Progress bar supplied by caller.
1756
:return: An iterator over (line, key).
1759
pb = ui.ui_factory.nested_progress_bar()
1765
# we don't care about inclusions, the caller cares.
1766
# but we need to setup a list of records to visit.
1767
# we need key, position, length
1769
build_details = self._index.get_build_details(keys)
1770
for key, details in build_details.iteritems():
1772
key_records.append((key, details[0]))
1773
records_iter = enumerate(self._read_records_iter(key_records))
1774
for (key_idx, (key, data, sha_value)) in records_iter:
1775
pb.update('Walking content', key_idx, total)
1776
compression_parent = build_details[key][1]
1777
if compression_parent is None:
1779
line_iterator = self._factory.get_fulltext_content(data)
1782
line_iterator = self._factory.get_linedelta_content(data)
1783
# Now that we are yielding the data for this key, remove it
1786
# XXX: It might be more efficient to yield (key,
1787
# line_iterator) in the future. However for now, this is a
1788
# simpler change to integrate into the rest of the
1789
# codebase. RBC 20071110
1790
for line in line_iterator:
1793
except errors.RetryWithNewPacks, e:
1794
self._access.reload_or_raise(e)
1795
# If there are still keys we've not yet found, we look in the fallback
1796
# vfs, and hope to find them there. Note that if the keys are found
1797
# but had no changes or no content, the fallback may not return
1799
if keys and not self._fallback_vfs:
1800
# XXX: strictly the second parameter is meant to be the file id
1801
# but it's not easily accessible here.
1802
raise RevisionNotPresent(keys, repr(self))
1803
for source in self._fallback_vfs:
1807
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1808
source_keys.add(key)
1810
keys.difference_update(source_keys)
1811
pb.update('Walking content', total, total)
1813
def _make_line_delta(self, delta_seq, new_content):
1814
"""Generate a line delta from delta_seq and new_content."""
1816
for op in delta_seq.get_opcodes():
1817
if op[0] == 'equal':
1819
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1158
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1162
"""See VersionedFile.versions."""
1163
if 'evil' in debug.debug_flags:
1164
trace.mutter_callsite(2, "versions scales with size of history")
1165
return self._index.get_versions()
1167
def has_version(self, version_id):
1168
"""See VersionedFile.has_version."""
1169
if 'evil' in debug.debug_flags:
1170
trace.mutter_callsite(2, "has_version is a LBYL scenario")
1171
return self._index.has_version(version_id)
1173
__contains__ = has_version
1822
1175
def _merge_annotations(self, content, parents, parent_texts={},
1823
1176
delta=None, annotated=None,
1824
1177
left_matching_blocks=None):
1825
"""Merge annotations for content and generate deltas.
1827
This is done by comparing the annotations based on changes to the text
1828
and generating a delta on the resulting full texts. If annotations are
1829
not being created then a simple delta is created.
1178
"""Merge annotations for content. This is done by comparing
1179
the annotations based on changed to the text.
1831
1181
if left_matching_blocks is not None:
1832
1182
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1834
1184
delta_seq = None
1836
for parent_key in parents:
1837
merge_content = self._get_content(parent_key, parent_texts)
1838
if (parent_key == parents[0] and delta_seq is not None):
1186
for parent_id in parents:
1187
merge_content = self._get_content(parent_id, parent_texts)
1188
if (parent_id == parents[0] and delta_seq is not None):
1839
1189
seq = delta_seq
1841
1191
seq = patiencediff.PatienceSequenceMatcher(
1864
1206
None, old_texts, new_texts)
1865
1207
return self._make_line_delta(delta_seq, content)
1867
def _parse_record(self, version_id, data):
1868
"""Parse an original format knit record.
1870
These have the last element of the key only present in the stored data.
1872
rec, record_contents = self._parse_record_unchecked(data)
1873
self._check_header_version(rec, version_id)
1874
return record_contents, rec[3]
1876
def _parse_record_header(self, key, raw_data):
1877
"""Parse a record header for consistency.
1879
:return: the header and the decompressor stream.
1880
as (stream, header_record)
1882
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1885
rec = self._check_header(key, df.readline())
1886
except Exception, e:
1887
raise KnitCorrupt(self,
1888
"While reading {%s} got %s(%s)"
1889
% (key, e.__class__.__name__, str(e)))
1892
def _parse_record_unchecked(self, data):
1894
# 4168 calls in 2880 217 internal
1895
# 4168 calls to _parse_record_header in 2121
1896
# 4168 calls to readlines in 330
1897
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1899
record_contents = df.readlines()
1900
except Exception, e:
1901
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1902
(data, e.__class__.__name__, str(e)))
1903
header = record_contents.pop(0)
1904
rec = self._split_header(header)
1905
last_line = record_contents.pop()
1906
if len(record_contents) != int(rec[2]):
1907
raise KnitCorrupt(self,
1908
'incorrect number of lines %s != %s'
1909
' for version {%s} %s'
1910
% (len(record_contents), int(rec[2]),
1911
rec[1], record_contents))
1912
if last_line != 'end %s\n' % rec[1]:
1913
raise KnitCorrupt(self,
1914
'unexpected version end line %r, wanted %r'
1915
% (last_line, rec[1]))
1917
return rec, record_contents
1919
def _read_records_iter(self, records):
1920
"""Read text records from data file and yield result.
1922
The result will be returned in whatever is the fastest to read.
1923
Not by the order requested. Also, multiple requests for the same
1924
record will only yield 1 response.
1925
:param records: A list of (key, access_memo) entries
1926
:return: Yields (key, contents, digest) in the order
1927
read, not the order requested
1932
# XXX: This smells wrong, IO may not be getting ordered right.
1933
needed_records = sorted(set(records), key=operator.itemgetter(1))
1934
if not needed_records:
1937
# The transport optimizes the fetching as well
1938
# (ie, reads continuous ranges.)
1939
raw_data = self._access.get_raw_records(
1940
[index_memo for key, index_memo in needed_records])
1942
for (key, index_memo), data in \
1943
izip(iter(needed_records), raw_data):
1944
content, digest = self._parse_record(key[-1], data)
1945
yield key, content, digest
1947
def _read_records_iter_raw(self, records):
1948
"""Read text records from data file and yield raw data.
1950
This unpacks enough of the text record to validate the id is
1951
as expected but thats all.
1953
Each item the iterator yields is (key, bytes,
1954
expected_sha1_of_full_text).
1956
for key, data in self._read_records_iter_unchecked(records):
1957
# validate the header (note that we can only use the suffix in
1958
# current knit records).
1959
df, rec = self._parse_record_header(key, data)
1961
yield key, data, rec[3]
1963
def _read_records_iter_unchecked(self, records):
1964
"""Read text records from data file and yield raw data.
1966
No validation is done.
1968
Yields tuples of (key, data).
1970
# setup an iterator of the external records:
1971
# uses readv so nice and fast we hope.
1973
# grab the disk data needed.
1974
needed_offsets = [index_memo for key, index_memo
1976
raw_records = self._access.get_raw_records(needed_offsets)
1978
for key, index_memo in records:
1979
data = raw_records.next()
1982
def _record_to_data(self, key, digest, lines, dense_lines=None):
1983
"""Convert key, digest, lines into a raw data block.
1985
:param key: The key of the record. Currently keys are always serialised
1986
using just the trailing component.
1987
:param dense_lines: The bytes of lines but in a denser form. For
1988
instance, if lines is a list of 1000 bytestrings each ending in \n,
1989
dense_lines may be a list with one line in it, containing all the
1990
1000's lines and their \n's. Using dense_lines if it is already
1991
known is a win because the string join to create bytes in this
1992
function spends less time resizing the final string.
1993
:return: (len, a StringIO instance with the raw data ready to read.)
1995
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
1996
chunks.extend(dense_lines or lines)
1997
chunks.append("end %s\n" % key[-1])
1998
for chunk in chunks:
1999
if type(chunk) is not str:
2000
raise AssertionError(
2001
'data must be plain bytes was %s' % type(chunk))
2002
if lines and lines[-1][-1] != '\n':
2003
raise ValueError('corrupt lines value %r' % lines)
2004
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
2005
return len(compressed_bytes), compressed_bytes
2007
def _split_header(self, line):
2010
raise KnitCorrupt(self,
2011
'unexpected number of elements in record header')
2015
"""See VersionedFiles.keys."""
2016
if 'evil' in debug.debug_flags:
2017
trace.mutter_callsite(2, "keys scales with size of history")
2018
sources = [self._index] + self._fallback_vfs
2020
for source in sources:
2021
result.update(source.keys())
2025
class _ContentMapGenerator(object):
2026
"""Generate texts or expose raw deltas for a set of texts."""
2028
def __init__(self, ordering='unordered'):
2029
self._ordering = ordering
2031
def _get_content(self, key):
2032
"""Get the content object for key."""
2033
# Note that _get_content is only called when the _ContentMapGenerator
2034
# has been constructed with just one key requested for reconstruction.
2035
if key in self.nonlocal_keys:
2036
record = self.get_record_stream().next()
2037
# Create a content object on the fly
2038
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
2039
return PlainKnitContent(lines, record.key)
1209
def _make_line_delta(self, delta_seq, new_content):
1210
"""Generate a line delta from delta_seq and new_content."""
1212
for op in delta_seq.get_opcodes():
1213
if op[0] == 'equal':
1215
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1218
def _get_components_positions(self, version_ids):
1219
"""Produce a map of position data for the components of versions.
1221
This data is intended to be used for retrieving the knit records.
1223
A dict of version_id to (record_details, index_memo, next, parents) is
1225
method is the way referenced data should be applied.
1226
index_memo is the handle to pass to the data access to actually get the
1228
next is the build-parent of the version, or None for fulltexts.
1229
parents is the version_ids of the parents of this version
1232
pending_components = version_ids
1233
while pending_components:
1234
build_details = self._index.get_build_details(pending_components)
1235
current_components = set(pending_components)
1236
pending_components = set()
1237
for version_id, details in build_details.iteritems():
1238
(index_memo, compression_parent, parents,
1239
record_details) = details
1240
method = record_details[0]
1241
if compression_parent is not None:
1242
pending_components.add(compression_parent)
1243
component_data[version_id] = (record_details, index_memo,
1245
missing = current_components.difference(build_details)
1247
raise errors.RevisionNotPresent(missing.pop(), self.filename)
1248
return component_data
1250
def _get_content(self, version_id, parent_texts={}):
1251
"""Returns a content object that makes up the specified
1253
cached_version = parent_texts.get(version_id, None)
1254
if cached_version is not None:
1255
if not self.has_version(version_id):
1256
raise RevisionNotPresent(version_id, self.filename)
1257
return cached_version
1259
text_map, contents_map = self._get_content_maps([version_id])
1260
return contents_map[version_id]
1262
def _check_versions_present(self, version_ids):
1263
"""Check that all specified versions are present."""
1264
self._index.check_versions_present(version_ids)
1266
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts,
1267
nostore_sha, random_id, check_content, left_matching_blocks):
1268
"""See VersionedFile.add_lines_with_ghosts()."""
1269
self._check_add(version_id, lines, random_id, check_content)
1270
return self._add(version_id, lines, parents, self.delta,
1271
parent_texts, left_matching_blocks, nostore_sha, random_id)
1273
def _add_lines(self, version_id, parents, lines, parent_texts,
1274
left_matching_blocks, nostore_sha, random_id, check_content):
1275
"""See VersionedFile.add_lines."""
1276
self._check_add(version_id, lines, random_id, check_content)
1277
self._check_versions_present(parents)
1278
return self._add(version_id, lines[:], parents, self.delta,
1279
parent_texts, left_matching_blocks, nostore_sha, random_id)
1281
def _check_add(self, version_id, lines, random_id, check_content):
1282
"""check that version_id and lines are safe to add."""
1283
if contains_whitespace(version_id):
1284
raise InvalidRevisionId(version_id, self.filename)
1285
self.check_not_reserved_id(version_id)
1286
# Technically this could be avoided if we are happy to allow duplicate
1287
# id insertion when other things than bzr core insert texts, but it
1288
# seems useful for folk using the knit api directly to have some safety
1289
# blanket that we can disable.
1290
if not random_id and self.has_version(version_id):
1291
raise RevisionAlreadyPresent(version_id, self.filename)
1293
self._check_lines_not_unicode(lines)
1294
self._check_lines_are_lines(lines)
1296
def _add(self, version_id, lines, parents, delta, parent_texts,
1297
left_matching_blocks, nostore_sha, random_id):
1298
"""Add a set of lines on top of version specified by parents.
1300
If delta is true, compress the text as a line-delta against
1303
Any versions not present will be converted into ghosts.
1305
# first thing, if the content is something we don't need to store, find
1307
line_bytes = ''.join(lines)
1308
digest = sha_string(line_bytes)
1309
if nostore_sha == digest:
1310
raise errors.ExistingContent
1312
present_parents = []
1313
if parent_texts is None:
1315
for parent in parents:
1316
if self.has_version(parent):
1317
present_parents.append(parent)
1319
# can only compress against the left most present parent.
1321
(len(present_parents) == 0 or
1322
present_parents[0] != parents[0])):
1325
text_length = len(line_bytes)
1328
if lines[-1][-1] != '\n':
1329
# copy the contents of lines.
1331
options.append('no-eol')
1332
lines[-1] = lines[-1] + '\n'
1336
# To speed the extract of texts the delta chain is limited
1337
# to a fixed number of deltas. This should minimize both
1338
# I/O and the time spend applying deltas.
1339
delta = self._check_should_delta(present_parents)
1341
content = self.factory.make(lines, version_id)
1342
if delta or (self.factory.annotated and len(present_parents) > 0):
1343
# Merge annotations from parent texts if needed.
1344
delta_hunks = self._merge_annotations(content, present_parents,
1345
parent_texts, delta, self.factory.annotated,
1346
left_matching_blocks)
1349
options.append('line-delta')
1350
store_lines = self.factory.lower_line_delta(delta_hunks)
1351
size, bytes = self._data._record_to_data(version_id, digest,
2041
# local keys we can ask for directly
2042
return self._get_one_work(key)
2044
def get_record_stream(self):
2045
"""Get a record stream for the keys requested during __init__."""
2046
for record in self._work():
2050
"""Produce maps of text and KnitContents as dicts.
1354
options.append('fulltext')
1355
# isinstance is slower and we have no hierarchy.
1356
if self.factory.__class__ == KnitPlainFactory:
1357
# Use the already joined bytes saving iteration time in
1359
size, bytes = self._data._record_to_data(version_id, digest,
1360
lines, [line_bytes])
1362
# get mixed annotation + content and feed it into the
1364
store_lines = self.factory.lower_fulltext(content)
1365
size, bytes = self._data._record_to_data(version_id, digest,
1368
access_memo = self._data.add_raw_records([size], bytes)[0]
1369
self._index.add_versions(
1370
((version_id, options, access_memo, parents),),
1371
random_id=random_id)
1372
return digest, text_length, content
1374
def check(self, progress_bar=None):
1375
"""See VersionedFile.check()."""
1376
# This doesn't actually test extraction of everything, but that will
1377
# impact 'bzr check' substantially, and needs to be integrated with
1378
# care. However, it does check for the obvious problem of a delta with
1380
versions = self.versions()
1381
parent_map = self.get_parent_map(versions)
1382
for version in versions:
1383
if self._index.get_method(version) != 'fulltext':
1384
compression_parent = parent_map[version][0]
1385
if compression_parent not in parent_map:
1386
raise errors.KnitCorrupt(self,
1387
"Missing basis parent %s for %s" % (
1388
compression_parent, version))
1390
def get_lines(self, version_id):
1391
"""See VersionedFile.get_lines()."""
1392
return self.get_line_list([version_id])[0]
1394
def _get_record_map(self, version_ids):
1395
"""Produce a dictionary of knit records.
1397
:return: {version_id:(record, record_details, digest, next)}
1399
data returned from read_records
1401
opaque information to pass to parse_record
1403
SHA1 digest of the full text after all steps are done
1405
build-parent of the version, i.e. the leftmost ancestor.
1406
Will be None if the record is not a delta.
1408
position_map = self._get_components_positions(version_ids)
1409
# c = component_id, r = record_details, i_m = index_memo, n = next
1410
records = [(c, i_m) for c, (r, i_m, n)
1411
in position_map.iteritems()]
1413
for component_id, record, digest in \
1414
self._data.read_records_iter(records):
1415
(record_details, index_memo, next) = position_map[component_id]
1416
record_map[component_id] = record, record_details, digest, next
1420
def get_text(self, version_id):
1421
"""See VersionedFile.get_text"""
1422
return self.get_texts([version_id])[0]
1424
def get_texts(self, version_ids):
1425
return [''.join(l) for l in self.get_line_list(version_ids)]
1427
def get_line_list(self, version_ids):
1428
"""Return the texts of listed versions as a list of strings."""
1429
for version_id in version_ids:
1430
self.check_not_reserved_id(version_id)
1431
text_map, content_map = self._get_content_maps(version_ids)
1432
return [text_map[v] for v in version_ids]
1434
_get_lf_split_line_list = get_line_list
1436
def _get_content_maps(self, version_ids):
1437
"""Produce maps of text and KnitContents
2052
1439
:return: (text_map, content_map) where text_map contains the texts for
2053
the requested versions and content_map contains the KnitContents.
1440
the requested versions and content_map contains the KnitContents.
1441
Both dicts take version_ids as their keys.
2055
# NB: By definition we never need to read remote sources unless texts
2056
# are requested from them: we don't delta across stores - and we
2057
# explicitly do not want to to prevent data loss situations.
2058
if self.global_map is None:
2059
self.global_map = self.vf.get_parent_map(self.keys)
2060
nonlocal_keys = self.nonlocal_keys
2062
missing_keys = set(nonlocal_keys)
2063
# Read from remote versioned file instances and provide to our caller.
2064
for source in self.vf._fallback_vfs:
2065
if not missing_keys:
2067
# Loop over fallback repositories asking them for texts - ignore
2068
# any missing from a particular fallback.
2069
for record in source.get_record_stream(missing_keys,
2070
self._ordering, True):
2071
if record.storage_kind == 'absent':
2072
# Not in thie particular stream, may be in one of the
2073
# other fallback vfs objects.
2075
missing_keys.remove(record.key)
2078
if self._raw_record_map is None:
2079
raise AssertionError('_raw_record_map should have been filled')
2081
for key in self.keys:
2082
if key in self.nonlocal_keys:
2084
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2087
def _get_one_work(self, requested_key):
2088
# Now, if we have calculated everything already, just return the
2090
if requested_key in self._contents_map:
2091
return self._contents_map[requested_key]
2092
# To simplify things, parse everything at once - code that wants one text
2093
# probably wants them all.
2094
1443
# FUTURE: This function could be improved for the 'extract many' case
2095
1444
# by tracking each component and only doing the copy when the number of
2096
1445
# children than need to apply delta's to it is > 1 or it is part of the
2097
1446
# final output.
2098
multiple_versions = len(self.keys) != 1
2099
if self._record_map is None:
2100
self._record_map = self.vf._raw_map_to_record_map(
2101
self._raw_record_map)
2102
record_map = self._record_map
2103
# raw_record_map is key:
2104
# Have read and parsed records at this point.
2105
for key in self.keys:
2106
if key in self.nonlocal_keys:
1447
version_ids = list(version_ids)
1448
multiple_versions = len(version_ids) != 1
1449
record_map = self._get_record_map(version_ids)
1454
for version_id in version_ids:
2109
1455
components = []
2111
1457
while cursor is not None:
2113
record, record_details, digest, next = record_map[cursor]
2115
raise RevisionNotPresent(cursor, self)
1458
record, record_details, digest, next = record_map[cursor]
2116
1459
components.append((cursor, record, record_details, digest))
1460
if cursor in content_map:
2118
if cursor in self._contents_map:
2119
# no need to plan further back
2120
components.append((cursor, None, None, None))
2124
1465
for (component_id, record, record_details,
2125
1466
digest) in reversed(components):
2126
if component_id in self._contents_map:
2127
content = self._contents_map[component_id]
1467
if component_id in content_map:
1468
content = content_map[component_id]
2129
content, delta = self._factory.parse_record(key[-1],
1470
content, delta = self.factory.parse_record(version_id,
2130
1471
record, record_details, content,
2131
1472
copy_base_content=multiple_versions)
2132
1473
if multiple_versions:
2133
self._contents_map[component_id] = content
1474
content_map[component_id] = content
1476
content.cleanup_eol(copy_on_mutate=multiple_versions)
1477
final_content[version_id] = content
2135
1479
# digest here is the digest from the last applied component.
2136
1480
text = content.text()
2137
1481
actual_sha = sha_strings(text)
2138
1482
if actual_sha != digest:
2139
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
2140
if multiple_versions:
2141
return self._contents_map[requested_key]
2145
def _wire_bytes(self):
2146
"""Get the bytes to put on the wire for 'key'.
2148
The first collection of bytes asked for returns the serialised
2149
raw_record_map and the additional details (key, parent) for key.
2150
Subsequent calls return just the additional details (key, parent).
2151
The wire storage_kind given for the first key is 'knit-delta-closure',
2152
For subsequent keys it is 'knit-delta-closure-ref'.
2154
:param key: A key from the content generator.
2155
:return: Bytes to put on the wire.
2158
# kind marker for dispatch on the far side,
2159
lines.append('knit-delta-closure')
2161
if self.vf._factory.annotated:
2162
lines.append('annotated')
2165
# then the list of keys
2166
lines.append('\t'.join(['\x00'.join(key) for key in self.keys
2167
if key not in self.nonlocal_keys]))
2168
# then the _raw_record_map in serialised form:
2170
# for each item in the map:
2172
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
2173
# one line with method
2174
# one line with noeol
2175
# one line with next ('' for None)
2176
# one line with byte count of the record bytes
2178
for key, (record_bytes, (method, noeol), next) in \
2179
self._raw_record_map.iteritems():
2180
key_bytes = '\x00'.join(key)
2181
parents = self.global_map.get(key, None)
2183
parent_bytes = 'None:'
2185
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2186
method_bytes = method
2192
next_bytes = '\x00'.join(next)
2195
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2196
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2197
len(record_bytes), record_bytes))
2198
map_bytes = ''.join(map_byte_list)
2199
lines.append(map_bytes)
2200
bytes = '\n'.join(lines)
2204
class _VFContentMapGenerator(_ContentMapGenerator):
2205
"""Content map generator reading from a VersionedFiles object."""
2207
def __init__(self, versioned_files, keys, nonlocal_keys=None,
2208
global_map=None, raw_record_map=None, ordering='unordered'):
2209
"""Create a _ContentMapGenerator.
2211
:param versioned_files: The versioned files that the texts are being
2213
:param keys: The keys to produce content maps for.
2214
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
2215
which are known to not be in this knit, but rather in one of the
2217
:param global_map: The result of get_parent_map(keys) (or a supermap).
2218
This is required if get_record_stream() is to be used.
2219
:param raw_record_map: A unparsed raw record map to use for answering
2222
_ContentMapGenerator.__init__(self, ordering=ordering)
2223
# The vf to source data from
2224
self.vf = versioned_files
2226
self.keys = list(keys)
2227
# Keys known to be in fallback vfs objects
2228
if nonlocal_keys is None:
2229
self.nonlocal_keys = set()
2231
self.nonlocal_keys = frozenset(nonlocal_keys)
2232
# Parents data for keys to be returned in get_record_stream
2233
self.global_map = global_map
2234
# The chunked lists for self.keys in text form
2236
# A cache of KnitContent objects used in extracting texts.
2237
self._contents_map = {}
2238
# All the knit records needed to assemble the requested keys as full
2240
self._record_map = None
2241
if raw_record_map is None:
2242
self._raw_record_map = self.vf._get_record_map_unparsed(keys,
2245
self._raw_record_map = raw_record_map
2246
# the factory for parsing records
2247
self._factory = self.vf._factory
2250
class _NetworkContentMapGenerator(_ContentMapGenerator):
2251
"""Content map generator sourced from a network stream."""
2253
def __init__(self, bytes, line_end):
2254
"""Construct a _NetworkContentMapGenerator from a bytes block."""
2256
self.global_map = {}
2257
self._raw_record_map = {}
2258
self._contents_map = {}
2259
self._record_map = None
2260
self.nonlocal_keys = []
2261
# Get access to record parsing facilities
2262
self.vf = KnitVersionedFiles(None, None)
2265
line_end = bytes.find('\n', start)
2266
line = bytes[start:line_end]
2267
start = line_end + 1
2268
if line == 'annotated':
2269
self._factory = KnitAnnotateFactory()
2271
self._factory = KnitPlainFactory()
2272
# list of keys to emit in get_record_stream
2273
line_end = bytes.find('\n', start)
2274
line = bytes[start:line_end]
2275
start = line_end + 1
2277
tuple(segment.split('\x00')) for segment in line.split('\t')
2279
# now a loop until the end. XXX: It would be nice if this was just a
2280
# bunch of the same records as get_record_stream(..., False) gives, but
2281
# there is a decent sized gap stopping that at the moment.
2285
line_end = bytes.find('\n', start)
2286
key = tuple(bytes[start:line_end].split('\x00'))
2287
start = line_end + 1
2288
# 1 line with parents (None: for None, '' for ())
2289
line_end = bytes.find('\n', start)
2290
line = bytes[start:line_end]
2295
[tuple(segment.split('\x00')) for segment in line.split('\t')
2297
self.global_map[key] = parents
2298
start = line_end + 1
2299
# one line with method
2300
line_end = bytes.find('\n', start)
2301
line = bytes[start:line_end]
2303
start = line_end + 1
2304
# one line with noeol
2305
line_end = bytes.find('\n', start)
2306
line = bytes[start:line_end]
2308
start = line_end + 1
2309
# one line with next ('' for None)
2310
line_end = bytes.find('\n', start)
2311
line = bytes[start:line_end]
2315
next = tuple(bytes[start:line_end].split('\x00'))
2316
start = line_end + 1
2317
# one line with byte count of the record bytes
2318
line_end = bytes.find('\n', start)
2319
line = bytes[start:line_end]
2321
start = line_end + 1
2323
record_bytes = bytes[start:start+count]
2324
start = start + count
2326
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2328
def get_record_stream(self):
2329
"""Get a record stream for for keys requested by the bytestream."""
2331
for key in self.keys:
2332
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2335
def _wire_bytes(self):
2339
class _KndxIndex(object):
2340
"""Manages knit index files
2342
The index is kept in memory and read on startup, to enable
1483
raise KnitCorrupt(self.filename,
1485
'\n of reconstructed text does not match'
1487
'\n for version %s' %
1488
(actual_sha, digest, version_id))
1489
text_map[version_id] = text
1490
return text_map, final_content
1492
def iter_lines_added_or_present_in_versions(self, version_ids=None,
1494
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
1495
if version_ids is None:
1496
version_ids = self.versions()
1498
pb = progress.DummyProgress()
1499
# we don't care about inclusions, the caller cares.
1500
# but we need to setup a list of records to visit.
1501
# we need version_id, position, length
1502
version_id_records = []
1503
requested_versions = set(version_ids)
1504
# filter for available versions
1505
for version_id in requested_versions:
1506
if not self.has_version(version_id):
1507
raise RevisionNotPresent(version_id, self.filename)
1508
# get a in-component-order queue:
1509
for version_id in self.versions():
1510
if version_id in requested_versions:
1511
index_memo = self._index.get_position(version_id)
1512
version_id_records.append((version_id, index_memo))
1514
total = len(version_id_records)
1515
for version_idx, (version_id, data, sha_value) in \
1516
enumerate(self._data.read_records_iter(version_id_records)):
1517
pb.update('Walking content.', version_idx, total)
1518
method = self._index.get_method(version_id)
1519
if method == 'fulltext':
1520
line_iterator = self.factory.get_fulltext_content(data)
1521
elif method == 'line-delta':
1522
line_iterator = self.factory.get_linedelta_content(data)
1524
raise ValueError('invalid method %r' % (method,))
1525
# XXX: It might be more efficient to yield (version_id,
1526
# line_iterator) in the future. However for now, this is a simpler
1527
# change to integrate into the rest of the codebase. RBC 20071110
1528
for line in line_iterator:
1529
yield line, version_id
1531
pb.update('Walking content.', total, total)
1533
def num_versions(self):
1534
"""See VersionedFile.num_versions()."""
1535
return self._index.num_versions()
1537
__len__ = num_versions
1539
def annotate(self, version_id):
1540
"""See VersionedFile.annotate."""
1541
return self.factory.annotate(self, version_id)
1543
def get_parent_map(self, version_ids):
1544
"""See VersionedFile.get_parent_map."""
1545
return self._index.get_parent_map(version_ids)
1547
def get_ancestry(self, versions, topo_sorted=True):
1548
"""See VersionedFile.get_ancestry."""
1549
if isinstance(versions, basestring):
1550
versions = [versions]
1553
return self._index.get_ancestry(versions, topo_sorted)
1555
def get_ancestry_with_ghosts(self, versions):
1556
"""See VersionedFile.get_ancestry_with_ghosts."""
1557
if isinstance(versions, basestring):
1558
versions = [versions]
1561
return self._index.get_ancestry_with_ghosts(versions)
1563
def plan_merge(self, ver_a, ver_b):
1564
"""See VersionedFile.plan_merge."""
1565
ancestors_b = set(self.get_ancestry(ver_b, topo_sorted=False))
1566
ancestors_a = set(self.get_ancestry(ver_a, topo_sorted=False))
1567
annotated_a = self.annotate(ver_a)
1568
annotated_b = self.annotate(ver_b)
1569
return merge._plan_annotate_merge(annotated_a, annotated_b,
1570
ancestors_a, ancestors_b)
1573
class _KnitComponentFile(object):
1574
"""One of the files used to implement a knit database"""
1576
def __init__(self, transport, filename, mode, file_mode=None,
1577
create_parent_dir=False, dir_mode=None):
1578
self._transport = transport
1579
self._filename = filename
1581
self._file_mode = file_mode
1582
self._dir_mode = dir_mode
1583
self._create_parent_dir = create_parent_dir
1584
self._need_to_create = False
1586
def _full_path(self):
1587
"""Return the full path to this file."""
1588
return self._transport.base + self._filename
1590
def check_header(self, fp):
1591
line = fp.readline()
1593
# An empty file can actually be treated as though the file doesn't
1595
raise errors.NoSuchFile(self._full_path())
1596
if line != self.HEADER:
1597
raise KnitHeaderError(badline=line,
1598
filename=self._transport.abspath(self._filename))
1601
return '%s(%s)' % (self.__class__.__name__, self._filename)
1604
class _KnitIndex(_KnitComponentFile):
1605
"""Manages knit index file.
1607
The index is already kept in memory and read on startup, to enable
2343
1608
fast lookups of revision information. The cursor of the index
2344
1609
file is always pointing to the end, making it easy to append
2387
1652
to ensure that records always start on new lines even if the last write was
2388
1653
interrupted. As a result its normal for the last line in the index to be
2389
1654
missing a trailing newline. One can be added with no harmful effects.
2391
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
2392
where prefix is e.g. the (fileid,) for .texts instances or () for
2393
constant-mapped things like .revisions, and the old state is
2394
tuple(history_vector, cache_dict). This is used to prevent having an
2395
ABI change with the C extension that reads .kndx files.
2398
1657
HEADER = "# bzr knit index 8\n"
2400
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
2401
"""Create a _KndxIndex on transport using mapper."""
2402
self._transport = transport
2403
self._mapper = mapper
2404
self._get_scope = get_scope
2405
self._allow_writes = allow_writes
2406
self._is_locked = is_locked
2408
self.has_graph = True
2410
def add_records(self, records, random_id=False, missing_compression_parents=False):
2411
"""Add multiple records to the index.
2413
:param records: a list of tuples:
2414
(key, options, access_memo, parents).
2415
:param random_id: If True the ids being added were randomly generated
2416
and no check for existence will be performed.
2417
:param missing_compression_parents: If True the records being added are
2418
only compressed against texts already in the index (or inside
2419
records). If False the records all refer to unavailable texts (or
2420
texts inside records) as compression parents.
2422
if missing_compression_parents:
2423
# It might be nice to get the edge of the records. But keys isn't
2425
keys = sorted(record[0] for record in records)
2426
raise errors.RevisionNotPresent(keys, self)
2428
for record in records:
2431
path = self._mapper.map(key) + '.kndx'
2432
path_keys = paths.setdefault(path, (prefix, []))
2433
path_keys[1].append(record)
2434
for path in sorted(paths):
2435
prefix, path_keys = paths[path]
2436
self._load_prefixes([prefix])
2438
orig_history = self._kndx_cache[prefix][1][:]
2439
orig_cache = self._kndx_cache[prefix][0].copy()
2442
for key, options, (_, pos, size), parents in path_keys:
2444
# kndx indices cannot be parentless.
2446
line = "\n%s %s %s %s %s :" % (
2447
key[-1], ','.join(options), pos, size,
2448
self._dictionary_compress(parents))
2449
if type(line) is not str:
2450
raise AssertionError(
2451
'data must be utf8 was %s' % type(line))
2453
self._cache_key(key, options, pos, size, parents)
2454
if len(orig_history):
2455
self._transport.append_bytes(path, ''.join(lines))
2457
self._init_index(path, lines)
2459
# If any problems happen, restore the original values and re-raise
2460
self._kndx_cache[prefix] = (orig_cache, orig_history)
2463
def scan_unvalidated_index(self, graph_index):
2464
"""See _KnitGraphIndex.scan_unvalidated_index."""
2465
# Because kndx files do not support atomic insertion via separate index
2466
# files, they do not support this method.
2467
raise NotImplementedError(self.scan_unvalidated_index)
2469
def get_missing_compression_parents(self):
2470
"""See _KnitGraphIndex.get_missing_compression_parents."""
2471
# Because kndx files do not support atomic insertion via separate index
2472
# files, they do not support this method.
2473
raise NotImplementedError(self.get_missing_compression_parents)
2475
def _cache_key(self, key, options, pos, size, parent_keys):
1659
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
1660
# __slots__ = ['_cache', '_history', '_transport', '_filename']
1662
def _cache_version(self, version_id, options, pos, size, parents):
2476
1663
"""Cache a version record in the history array and index cache.
2478
1665
This is inlined into _load_data for performance. KEEP IN SYNC.
2479
1666
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
2483
version_id = key[-1]
2484
# last-element only for compatibilty with the C load_data.
2485
parents = tuple(parent[-1] for parent in parent_keys)
2486
for parent in parent_keys:
2487
if parent[:-1] != prefix:
2488
raise ValueError("mismatched prefixes for %r, %r" % (
2490
cache, history = self._kndx_cache[prefix]
2491
1669
# only want the _history index to reference the 1st index entry
2492
1670
# for version_id
2493
if version_id not in cache:
2494
index = len(history)
2495
history.append(version_id)
1671
if version_id not in self._cache:
1672
index = len(self._history)
1673
self._history.append(version_id)
2497
index = cache[version_id][5]
2498
cache[version_id] = (version_id,
1675
index = self._cache[version_id][5]
1676
self._cache[version_id] = (version_id,
2505
def check_header(self, fp):
2506
line = fp.readline()
2508
# An empty file can actually be treated as though the file doesn't
2510
raise errors.NoSuchFile(self)
2511
if line != self.HEADER:
2512
raise KnitHeaderError(badline=line, filename=self)
2514
def _check_read(self):
2515
if not self._is_locked():
2516
raise errors.ObjectNotLocked(self)
2517
if self._get_scope() != self._scope:
2520
1683
def _check_write_ok(self):
2521
"""Assert if not writes are permitted."""
2522
if not self._is_locked():
2523
raise errors.ObjectNotLocked(self)
2524
1684
if self._get_scope() != self._scope:
1685
raise errors.OutSideTransaction()
2526
1686
if self._mode != 'w':
2527
1687
raise errors.ReadOnlyObjectDirtiedError(self)
2529
def get_build_details(self, keys):
2530
"""Get the method, index_memo and compression parent for keys.
1689
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1690
create_parent_dir=False, delay_create=False, dir_mode=None,
1692
_KnitComponentFile.__init__(self, transport, filename, mode,
1693
file_mode=file_mode,
1694
create_parent_dir=create_parent_dir,
1697
# position in _history is the 'official' index for a revision
1698
# but the values may have come from a newer entry.
1699
# so - wc -l of a knit index is != the number of unique names
1703
fp = self._transport.get(self._filename)
1705
# _load_data may raise NoSuchFile if the target knit is
1707
_load_data(self, fp)
1711
if mode != 'w' or not create:
1714
self._need_to_create = True
1716
self._transport.put_bytes_non_atomic(
1717
self._filename, self.HEADER, mode=self._file_mode)
1718
self._scope = get_scope()
1719
self._get_scope = get_scope
1721
def get_ancestry(self, versions, topo_sorted=True):
1722
"""See VersionedFile.get_ancestry."""
1723
# get a graph of all the mentioned versions:
1725
pending = set(versions)
1728
version = pending.pop()
1731
parents = [p for p in cache[version][4] if p in cache]
1733
raise RevisionNotPresent(version, self._filename)
1734
# if not completed and not a ghost
1735
pending.update([p for p in parents if p not in graph])
1736
graph[version] = parents
1739
return topo_sort(graph.items())
1741
def get_ancestry_with_ghosts(self, versions):
1742
"""See VersionedFile.get_ancestry_with_ghosts."""
1743
# get a graph of all the mentioned versions:
1744
self.check_versions_present(versions)
1747
pending = set(versions)
1749
version = pending.pop()
1751
parents = cache[version][4]
1757
pending.update([p for p in parents if p not in graph])
1758
graph[version] = parents
1759
return topo_sort(graph.items())
1761
def get_build_details(self, version_ids):
1762
"""Get the method, index_memo and compression parent for version_ids.
2532
1764
Ghosts are omitted from the result.
2534
:param keys: An iterable of keys.
2535
:return: A dict of key:(index_memo, compression_parent, parents,
1766
:param version_ids: An iterable of version_ids.
1767
:return: A dict of version_id:(index_memo, compression_parent,
1768
parents, record_details).
2538
1770
opaque structure to pass to read_records to extract the raw
2545
1777
extra information about the content which needs to be passed to
2546
1778
Factory.parse_record
2548
parent_map = self.get_parent_map(keys)
2551
if key not in parent_map:
2553
method = self.get_method(key)
2554
parents = parent_map[key]
1781
for version_id in version_ids:
1782
if version_id not in self._cache:
1783
# ghosts are omitted
1785
method = self.get_method(version_id)
1786
parents = self.get_parents_with_ghosts(version_id)
2555
1787
if method == 'fulltext':
2556
1788
compression_parent = None
2558
1790
compression_parent = parents[0]
2559
noeol = 'no-eol' in self.get_options(key)
2560
index_memo = self.get_position(key)
2561
result[key] = (index_memo, compression_parent,
1791
noeol = 'no-eol' in self.get_options(version_id)
1792
index_memo = self.get_position(version_id)
1793
result[version_id] = (index_memo, compression_parent,
2562
1794
parents, (method, noeol))
2565
def get_method(self, key):
2566
"""Return compression method of specified key."""
2567
options = self.get_options(key)
2568
if 'fulltext' in options:
2570
elif 'line-delta' in options:
2573
raise errors.KnitIndexUnknownMethod(self, options)
2575
def get_options(self, key):
2576
"""Return a list representing options.
2580
prefix, suffix = self._split_key(key)
2581
self._load_prefixes([prefix])
2583
return self._kndx_cache[prefix][0][suffix][1]
2585
raise RevisionNotPresent(key, self)
2587
def find_ancestry(self, keys):
2588
"""See CombinedGraphIndex.find_ancestry()"""
2589
prefixes = set(key[:-1] for key in keys)
2590
self._load_prefixes(prefixes)
2593
missing_keys = set()
2594
pending_keys = list(keys)
2595
# This assumes that keys will not reference parents in a different
2596
# prefix, which is accurate so far.
2598
key = pending_keys.pop()
2599
if key in parent_map:
2603
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2605
missing_keys.add(key)
2607
parent_keys = tuple([prefix + (suffix,)
2608
for suffix in suffix_parents])
2609
parent_map[key] = parent_keys
2610
pending_keys.extend([p for p in parent_keys
2611
if p not in parent_map])
2612
return parent_map, missing_keys
2614
def get_parent_map(self, keys):
2615
"""Get a map of the parents of keys.
2617
:param keys: The keys to look up parents for.
2618
:return: A mapping from keys to parents. Absent keys are absent from
2621
# Parse what we need to up front, this potentially trades off I/O
2622
# locality (.kndx and .knit in the same block group for the same file
2623
# id) for less checking in inner loops.
2624
prefixes = set(key[:-1] for key in keys)
2625
self._load_prefixes(prefixes)
2630
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2634
result[key] = tuple(prefix + (suffix,) for
2635
suffix in suffix_parents)
2638
def get_position(self, key):
2639
"""Return details needed to access the version.
2641
:return: a tuple (key, data position, size) to hand to the access
2642
logic to get the record.
2644
prefix, suffix = self._split_key(key)
2645
self._load_prefixes([prefix])
2646
entry = self._kndx_cache[prefix][0][suffix]
2647
return key, entry[2], entry[3]
2649
has_key = _mod_index._has_key_from_parent_map
2651
def _init_index(self, path, extra_lines=[]):
2652
"""Initialize an index."""
2654
sio.write(self.HEADER)
2655
sio.writelines(extra_lines)
2657
self._transport.put_file_non_atomic(path, sio,
2658
create_parent_dir=True)
2659
# self._create_parent_dir)
2660
# mode=self._file_mode,
2661
# dir_mode=self._dir_mode)
2664
"""Get all the keys in the collection.
2666
The keys are not ordered.
2669
# Identify all key prefixes.
2670
# XXX: A bit hacky, needs polish.
2671
if type(self._mapper) is ConstantMapper:
2675
for quoted_relpath in self._transport.iter_files_recursive():
2676
path, ext = os.path.splitext(quoted_relpath)
2678
prefixes = [self._mapper.unmap(path) for path in relpaths]
2679
self._load_prefixes(prefixes)
2680
for prefix in prefixes:
2681
for suffix in self._kndx_cache[prefix][1]:
2682
result.add(prefix + (suffix,))
2685
def _load_prefixes(self, prefixes):
2686
"""Load the indices for prefixes."""
2688
for prefix in prefixes:
2689
if prefix not in self._kndx_cache:
2690
# the load_data interface writes to these variables.
2693
self._filename = prefix
2695
path = self._mapper.map(prefix) + '.kndx'
2696
fp = self._transport.get(path)
2698
# _load_data may raise NoSuchFile if the target knit is
2700
_load_data(self, fp)
2703
self._kndx_cache[prefix] = (self._cache, self._history)
2708
self._kndx_cache[prefix] = ({}, [])
2709
if type(self._mapper) is ConstantMapper:
2710
# preserve behaviour for revisions.kndx etc.
2711
self._init_index(path)
2716
missing_keys = _mod_index._missing_keys_from_parent_map
2718
def _partition_keys(self, keys):
2719
"""Turn keys into a dict of prefix:suffix_list."""
2722
prefix_keys = result.setdefault(key[:-1], [])
2723
prefix_keys.append(key[-1])
2726
def _dictionary_compress(self, keys):
2727
"""Dictionary compress keys.
2729
:param keys: The keys to generate references to.
2730
:return: A string representation of keys. keys which are present are
2731
dictionary compressed, and others are emitted as fulltext with a
1797
def num_versions(self):
1798
return len(self._history)
1800
__len__ = num_versions
1802
def get_versions(self):
1803
"""Get all the versions in the file. not topologically sorted."""
1804
return self._history
1806
def _version_list_to_index(self, versions):
2736
1807
result_list = []
2737
prefix = keys[0][:-1]
2738
cache = self._kndx_cache[prefix][0]
2740
if key[:-1] != prefix:
2741
# kndx indices cannot refer across partitioned storage.
2742
raise ValueError("mismatched prefixes for %r" % keys)
2743
if key[-1] in cache:
1809
for version in versions:
1810
if version in cache:
2744
1811
# -- inlined lookup() --
2745
result_list.append(str(cache[key[-1]][5]))
1812
result_list.append(str(cache[version][5]))
2746
1813
# -- end lookup () --
2748
result_list.append('.' + key[-1])
1815
result_list.append('.' + version)
2749
1816
return ' '.join(result_list)
2751
def _reset_cache(self):
2752
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2753
# (cache_dict, history_vector) for parsed kndx files.
2754
self._kndx_cache = {}
2755
self._scope = self._get_scope()
2756
allow_writes = self._allow_writes()
2762
def _sort_keys_by_io(self, keys, positions):
2763
"""Figure out an optimal order to read the records for the given keys.
2765
Sort keys, grouped by index and sorted by position.
2767
:param keys: A list of keys whose records we want to read. This will be
2769
:param positions: A dict, such as the one returned by
2770
_get_components_positions()
2773
def get_sort_key(key):
2774
index_memo = positions[key][1]
2775
# Group by prefix and position. index_memo[0] is the key, so it is
2776
# (file_id, revision_id) and we don't want to sort on revision_id,
2777
# index_memo[1] is the position, and index_memo[2] is the size,
2778
# which doesn't matter for the sort
2779
return index_memo[0][:-1], index_memo[1]
2780
return keys.sort(key=get_sort_key)
2782
_get_total_build_size = _get_total_build_size
2784
def _split_key(self, key):
2785
"""Split key into a prefix and suffix."""
2786
return key[:-1], key[-1]
2789
class _KeyRefs(object):
2791
def __init__(self, track_new_keys=False):
2792
# dict mapping 'key' to 'set of keys referring to that key'
2795
# set remembering all new keys
2796
self.new_keys = set()
2798
self.new_keys = None
2804
self.new_keys.clear()
2806
def add_references(self, key, refs):
2807
# Record the new references
2808
for referenced in refs:
1818
def add_version(self, version_id, options, index_memo, parents):
1819
"""Add a version record to the index."""
1820
self.add_versions(((version_id, options, index_memo, parents),))
1822
def add_versions(self, versions, random_id=False):
1823
"""Add multiple versions to the index.
1825
:param versions: a list of tuples:
1826
(version_id, options, pos, size, parents).
1827
:param random_id: If True the ids being added were randomly generated
1828
and no check for existence will be performed.
1831
orig_history = self._history[:]
1832
orig_cache = self._cache.copy()
1835
for version_id, options, (index, pos, size), parents in versions:
1836
line = "\n%s %s %s %s %s :" % (version_id,
1840
self._version_list_to_index(parents))
1842
self._cache_version(version_id, options, pos, size, tuple(parents))
1843
if not self._need_to_create:
1844
self._transport.append_bytes(self._filename, ''.join(lines))
1847
sio.write(self.HEADER)
1848
sio.writelines(lines)
1850
self._transport.put_file_non_atomic(self._filename, sio,
1851
create_parent_dir=self._create_parent_dir,
1852
mode=self._file_mode,
1853
dir_mode=self._dir_mode)
1854
self._need_to_create = False
1856
# If any problems happen, restore the original values and re-raise
1857
self._history = orig_history
1858
self._cache = orig_cache
1861
def has_version(self, version_id):
1862
"""True if the version is in the index."""
1863
return version_id in self._cache
1865
def get_position(self, version_id):
1866
"""Return details needed to access the version.
1868
.kndx indices do not support split-out data, so return None for the
1871
:return: a tuple (None, data position, size) to hand to the access
1872
logic to get the record.
1874
entry = self._cache[version_id]
1875
return None, entry[2], entry[3]
1877
def get_method(self, version_id):
1878
"""Return compression method of specified version."""
1880
options = self._cache[version_id][1]
1882
raise RevisionNotPresent(version_id, self._filename)
1883
if 'fulltext' in options:
1886
if 'line-delta' not in options:
1887
raise errors.KnitIndexUnknownMethod(self._full_path(), options)
1890
def get_options(self, version_id):
1891
"""Return a list representing options.
1895
return self._cache[version_id][1]
1897
def get_parent_map(self, version_ids):
1898
"""Passed through to by KnitVersionedFile.get_parent_map."""
1900
for version_id in version_ids:
2810
needed_by = self.refs[referenced]
1902
result[version_id] = tuple(self._cache[version_id][4])
2811
1903
except KeyError:
2812
needed_by = self.refs[referenced] = set()
2814
# Discard references satisfied by the new key
2817
def get_new_keys(self):
2818
return self.new_keys
2820
def get_unsatisfied_refs(self):
2821
return self.refs.iterkeys()
2823
def _satisfy_refs_for_key(self, key):
2827
# No keys depended on this key. That's ok.
2830
def add_key(self, key):
2831
# satisfy refs for key, and remember that we've seen this key.
2832
self._satisfy_refs_for_key(key)
2833
if self.new_keys is not None:
2834
self.new_keys.add(key)
2836
def satisfy_refs_for_keys(self, keys):
2838
self._satisfy_refs_for_key(key)
2840
def get_referrers(self):
2842
for referrers in self.refs.itervalues():
2843
result.update(referrers)
2847
class _KnitGraphIndex(object):
2848
"""A KnitVersionedFiles index layered on GraphIndex."""
2850
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2851
add_callback=None, track_external_parent_refs=False):
1907
def get_parents_with_ghosts(self, version_id):
1908
"""Return parents of specified version with ghosts."""
1910
return self.get_parent_map([version_id])[version_id]
1912
raise RevisionNotPresent(version_id, self)
1914
def check_versions_present(self, version_ids):
1915
"""Check that all specified versions are present."""
1917
for version_id in version_ids:
1918
if version_id not in cache:
1919
raise RevisionNotPresent(version_id, self._filename)
1922
class KnitGraphIndex(object):
1923
"""A knit index that builds on GraphIndex."""
1925
def __init__(self, graph_index, deltas=False, parents=True, add_callback=None):
2852
1926
"""Construct a KnitGraphIndex on a graph_index.
2854
1928
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2855
:param is_locked: A callback to check whether the object should answer
2857
1929
:param deltas: Allow delta-compressed records.
2858
:param parents: If True, record knits parents, if not do not record
2860
1930
:param add_callback: If not None, allow additions to the index and call
2861
1931
this callback with a list of added GraphIndex nodes:
2862
1932
[(node, value, node_refs), ...]
2863
:param is_locked: A callback, returns True if the index is locked and
2865
:param track_external_parent_refs: If True, record all external parent
2866
references parents from added records. These can be retrieved
2867
later by calling get_missing_parents().
1933
:param parents: If True, record knits parents, if not do not record
2869
self._add_callback = add_callback
2870
1936
self._graph_index = graph_index
2871
1937
self._deltas = deltas
1938
self._add_callback = add_callback
2872
1939
self._parents = parents
2873
1940
if deltas and not parents:
2874
# XXX: TODO: Delta tree and parent graph should be conceptually
2876
1941
raise KnitCorrupt(self, "Cannot do delta compression without "
2877
1942
"parent tracking.")
2878
self.has_graph = parents
2879
self._is_locked = is_locked
2880
self._missing_compression_parents = set()
2881
if track_external_parent_refs:
2882
self._key_dependencies = _KeyRefs()
2884
self._key_dependencies = None
2887
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2889
def add_records(self, records, random_id=False,
2890
missing_compression_parents=False):
2891
"""Add multiple records to the index.
1944
def _check_write_ok(self):
1947
def _get_entries(self, keys, check_present=False):
1948
"""Get the entries for keys.
1950
:param keys: An iterable of index keys, - 1-tuples.
1955
for node in self._graph_index.iter_entries(keys):
1957
found_keys.add(node[1])
1959
# adapt parentless index to the rest of the code.
1960
for node in self._graph_index.iter_entries(keys):
1961
yield node[0], node[1], node[2], ()
1962
found_keys.add(node[1])
1964
missing_keys = keys.difference(found_keys)
1966
raise RevisionNotPresent(missing_keys.pop(), self)
1968
def _present_keys(self, version_ids):
1970
node[1] for node in self._get_entries(version_ids)])
1972
def _parentless_ancestry(self, versions):
1973
"""Honour the get_ancestry API for parentless knit indices."""
1974
wanted_keys = self._version_ids_to_keys(versions)
1975
present_keys = self._present_keys(wanted_keys)
1976
missing = set(wanted_keys).difference(present_keys)
1978
raise RevisionNotPresent(missing.pop(), self)
1979
return list(self._keys_to_version_ids(present_keys))
1981
def get_ancestry(self, versions, topo_sorted=True):
1982
"""See VersionedFile.get_ancestry."""
1983
if not self._parents:
1984
return self._parentless_ancestry(versions)
1985
# XXX: This will do len(history) index calls - perhaps
1986
# it should be altered to be a index core feature?
1987
# get a graph of all the mentioned versions:
1990
versions = self._version_ids_to_keys(versions)
1991
pending = set(versions)
1993
# get all pending nodes
1994
this_iteration = pending
1995
new_nodes = self._get_entries(this_iteration)
1998
for (index, key, value, node_refs) in new_nodes:
1999
# dont ask for ghosties - otherwise
2000
# we we can end up looping with pending
2001
# being entirely ghosted.
2002
graph[key] = [parent for parent in node_refs[0]
2003
if parent not in ghosts]
2005
for parent in graph[key]:
2006
# dont examine known nodes again
2011
ghosts.update(this_iteration.difference(found))
2012
if versions.difference(graph):
2013
raise RevisionNotPresent(versions.difference(graph).pop(), self)
2015
result_keys = topo_sort(graph.items())
2017
result_keys = graph.iterkeys()
2018
return [key[0] for key in result_keys]
2020
def get_ancestry_with_ghosts(self, versions):
2021
"""See VersionedFile.get_ancestry."""
2022
if not self._parents:
2023
return self._parentless_ancestry(versions)
2024
# XXX: This will do len(history) index calls - perhaps
2025
# it should be altered to be a index core feature?
2026
# get a graph of all the mentioned versions:
2028
versions = self._version_ids_to_keys(versions)
2029
pending = set(versions)
2031
# get all pending nodes
2032
this_iteration = pending
2033
new_nodes = self._get_entries(this_iteration)
2035
for (index, key, value, node_refs) in new_nodes:
2036
graph[key] = node_refs[0]
2038
for parent in graph[key]:
2039
# dont examine known nodes again
2043
missing_versions = this_iteration.difference(graph)
2044
missing_needed = versions.intersection(missing_versions)
2046
raise RevisionNotPresent(missing_needed.pop(), self)
2047
for missing_version in missing_versions:
2048
# add a key, no parents
2049
graph[missing_version] = []
2050
pending.discard(missing_version) # don't look for it
2051
result_keys = topo_sort(graph.items())
2052
return [key[0] for key in result_keys]
2054
def get_build_details(self, version_ids):
2055
"""Get the method, index_memo and compression parent for version_ids.
2057
Ghosts are omitted from the result.
2059
:param version_ids: An iterable of version_ids.
2060
:return: A dict of version_id:(index_memo, compression_parent,
2061
parents, record_details).
2063
opaque structure to pass to read_records to extract the raw
2066
Content that this record is built upon, may be None
2068
Logical parents of this node
2070
extra information about the content which needs to be passed to
2071
Factory.parse_record
2074
entries = self._get_entries(self._version_ids_to_keys(version_ids), True)
2075
for entry in entries:
2076
version_id = self._keys_to_version_ids((entry[1],))[0]
2077
if not self._parents:
2080
parents = self._keys_to_version_ids(entry[3][0])
2081
if not self._deltas:
2082
compression_parent = None
2084
compression_parent_key = self._compression_parent(entry)
2085
if compression_parent_key:
2086
compression_parent = self._keys_to_version_ids(
2087
(compression_parent_key,))[0]
2089
compression_parent = None
2090
noeol = (entry[2][0] == 'N')
2091
if compression_parent:
2092
method = 'line-delta'
2095
result[version_id] = (self._node_to_position(entry),
2096
compression_parent, parents,
2100
def _compression_parent(self, an_entry):
2101
# return the key that an_entry is compressed against, or None
2102
# Grab the second parent list (as deltas implies parents currently)
2103
compression_parents = an_entry[3][1]
2104
if not compression_parents:
2106
return compression_parents[0]
2108
def _get_method(self, node):
2109
if not self._deltas:
2111
if self._compression_parent(node):
2116
def num_versions(self):
2117
return len(list(self._graph_index.iter_all_entries()))
2119
__len__ = num_versions
2121
def get_versions(self):
2122
"""Get all the versions in the file. not topologically sorted."""
2123
return [node[1][0] for node in self._graph_index.iter_all_entries()]
2125
def has_version(self, version_id):
2126
"""True if the version is in the index."""
2127
return len(self._present_keys(self._version_ids_to_keys([version_id]))) == 1
2129
def _keys_to_version_ids(self, keys):
2130
return tuple(key[0] for key in keys)
2132
def get_position(self, version_id):
2133
"""Return details needed to access the version.
2135
:return: a tuple (index, data position, size) to hand to the access
2136
logic to get the record.
2138
node = self._get_node(version_id)
2139
return self._node_to_position(node)
2141
def _node_to_position(self, node):
2142
"""Convert an index value to position details."""
2143
bits = node[2][1:].split(' ')
2144
return node[0], int(bits[0]), int(bits[1])
2146
def get_method(self, version_id):
2147
"""Return compression method of specified version."""
2148
return self._get_method(self._get_node(version_id))
2150
def _get_node(self, version_id):
2152
return list(self._get_entries(self._version_ids_to_keys([version_id])))[0]
2154
raise RevisionNotPresent(version_id, self)
2156
def get_options(self, version_id):
2157
"""Return a list representing options.
2161
node = self._get_node(version_id)
2162
options = [self._get_method(node)]
2163
if node[2][0] == 'N':
2164
options.append('no-eol')
2167
def get_parent_map(self, version_ids):
2168
"""Passed through to by KnitVersionedFile.get_parent_map."""
2169
nodes = self._get_entries(self._version_ids_to_keys(version_ids))
2173
result[node[1][0]] = self._keys_to_version_ids(node[3][0])
2176
result[node[1][0]] = ()
2179
def get_parents_with_ghosts(self, version_id):
2180
"""Return parents of specified version with ghosts."""
2182
return self.get_parent_map([version_id])[version_id]
2184
raise RevisionNotPresent(version_id, self)
2186
def check_versions_present(self, version_ids):
2187
"""Check that all specified versions are present."""
2188
keys = self._version_ids_to_keys(version_ids)
2189
present = self._present_keys(keys)
2190
missing = keys.difference(present)
2192
raise RevisionNotPresent(missing.pop(), self)
2194
def add_version(self, version_id, options, access_memo, parents):
2195
"""Add a version record to the index."""
2196
return self.add_versions(((version_id, options, access_memo, parents),))
2198
def add_versions(self, versions, random_id=False):
2199
"""Add multiple versions to the index.
2893
2201
This function does not insert data into the Immutable GraphIndex
2894
2202
backing the KnitGraphIndex, instead it prepares data for insertion by
2895
2203
the caller and checks that it is safe to insert then calls
2896
2204
self._add_callback with the prepared GraphIndex nodes.
2898
:param records: a list of tuples:
2899
(key, options, access_memo, parents).
2206
:param versions: a list of tuples:
2207
(version_id, options, pos, size, parents).
2900
2208
:param random_id: If True the ids being added were randomly generated
2901
2209
and no check for existence will be performed.
2902
:param missing_compression_parents: If True the records being added are
2903
only compressed against texts already in the index (or inside
2904
records). If False the records all refer to unavailable texts (or
2905
texts inside records) as compression parents.
2907
2211
if not self._add_callback:
2908
2212
raise errors.ReadOnlyError(self)
2909
2213
# we hope there are no repositories with inconsistent parentage
2913
compression_parents = set()
2914
key_dependencies = self._key_dependencies
2915
for (key, options, access_memo, parents) in records:
2917
parents = tuple(parents)
2918
if key_dependencies is not None:
2919
key_dependencies.add_references(key, parents)
2218
for (version_id, options, access_memo, parents) in versions:
2920
2219
index, pos, size = access_memo
2220
key = (version_id, )
2221
parents = tuple((parent, ) for parent in parents)
2921
2222
if 'no-eol' in options:
2964
2256
for key, (value, node_refs) in keys.iteritems():
2965
2257
result.append((key, value))
2966
2258
self._add_callback(result)
2967
if missing_compression_parents:
2968
# This may appear to be incorrect (it does not check for
2969
# compression parents that are in the existing graph index),
2970
# but such records won't have been buffered, so this is
2971
# actually correct: every entry when
2972
# missing_compression_parents==True either has a missing parent, or
2973
# a parent that is one of the keys in records.
2974
compression_parents.difference_update(keys)
2975
self._missing_compression_parents.update(compression_parents)
2976
# Adding records may have satisfied missing compression parents.
2977
self._missing_compression_parents.difference_update(keys)
2979
def scan_unvalidated_index(self, graph_index):
2980
"""Inform this _KnitGraphIndex that there is an unvalidated index.
2982
This allows this _KnitGraphIndex to keep track of any missing
2983
compression parents we may want to have filled in to make those
2986
:param graph_index: A GraphIndex
2989
new_missing = graph_index.external_references(ref_list_num=1)
2990
new_missing.difference_update(self.get_parent_map(new_missing))
2991
self._missing_compression_parents.update(new_missing)
2992
if self._key_dependencies is not None:
2993
# Add parent refs from graph_index (and discard parent refs that
2994
# the graph_index has).
2995
for node in graph_index.iter_all_entries():
2996
self._key_dependencies.add_references(node[1], node[3][0])
2998
def get_missing_compression_parents(self):
2999
"""Return the keys of missing compression parents.
3001
Missing compression parents occur when a record stream was missing
3002
basis texts, or a index was scanned that had missing basis texts.
3004
return frozenset(self._missing_compression_parents)
3006
def get_missing_parents(self):
3007
"""Return the keys of missing parents."""
3008
# If updating this, you should also update
3009
# groupcompress._GCGraphIndex.get_missing_parents
3010
# We may have false positives, so filter those out.
3011
self._key_dependencies.satisfy_refs_for_keys(
3012
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
3013
return frozenset(self._key_dependencies.get_unsatisfied_refs())
3015
def _check_read(self):
3016
"""raise if reads are not permitted."""
3017
if not self._is_locked():
3018
raise errors.ObjectNotLocked(self)
3020
def _check_write_ok(self):
3021
"""Assert if writes are not permitted."""
3022
if not self._is_locked():
3023
raise errors.ObjectNotLocked(self)
3025
def _compression_parent(self, an_entry):
3026
# return the key that an_entry is compressed against, or None
3027
# Grab the second parent list (as deltas implies parents currently)
3028
compression_parents = an_entry[3][1]
3029
if not compression_parents:
3031
if len(compression_parents) != 1:
3032
raise AssertionError(
3033
"Too many compression parents: %r" % compression_parents)
3034
return compression_parents[0]
3036
def get_build_details(self, keys):
3037
"""Get the method, index_memo and compression parent for version_ids.
3039
Ghosts are omitted from the result.
3041
:param keys: An iterable of keys.
3042
:return: A dict of key:
3043
(index_memo, compression_parent, parents, record_details).
3045
opaque structure to pass to read_records to extract the raw
3048
Content that this record is built upon, may be None
3050
Logical parents of this node
3052
extra information about the content which needs to be passed to
3053
Factory.parse_record
3057
entries = self._get_entries(keys, False)
3058
for entry in entries:
3060
if not self._parents:
3063
parents = entry[3][0]
3064
if not self._deltas:
3065
compression_parent_key = None
3067
compression_parent_key = self._compression_parent(entry)
3068
noeol = (entry[2][0] == 'N')
3069
if compression_parent_key:
3070
method = 'line-delta'
3073
result[key] = (self._node_to_position(entry),
3074
compression_parent_key, parents,
3078
def _get_entries(self, keys, check_present=False):
3079
"""Get the entries for keys.
3081
:param keys: An iterable of index key tuples.
3086
for node in self._graph_index.iter_entries(keys):
3088
found_keys.add(node[1])
3090
# adapt parentless index to the rest of the code.
3091
for node in self._graph_index.iter_entries(keys):
3092
yield node[0], node[1], node[2], ()
3093
found_keys.add(node[1])
3095
missing_keys = keys.difference(found_keys)
3097
raise RevisionNotPresent(missing_keys.pop(), self)
3099
def get_method(self, key):
3100
"""Return compression method of specified key."""
3101
return self._get_method(self._get_node(key))
3103
def _get_method(self, node):
3104
if not self._deltas:
3106
if self._compression_parent(node):
3111
def _get_node(self, key):
3113
return list(self._get_entries([key]))[0]
3115
raise RevisionNotPresent(key, self)
3117
def get_options(self, key):
3118
"""Return a list representing options.
3122
node = self._get_node(key)
3123
options = [self._get_method(node)]
3124
if node[2][0] == 'N':
3125
options.append('no-eol')
3128
def find_ancestry(self, keys):
3129
"""See CombinedGraphIndex.find_ancestry()"""
3130
return self._graph_index.find_ancestry(keys, 0)
3132
def get_parent_map(self, keys):
3133
"""Get a map of the parents of keys.
3135
:param keys: The keys to look up parents for.
3136
:return: A mapping from keys to parents. Absent keys are absent from
3140
nodes = self._get_entries(keys)
3144
result[node[1]] = node[3][0]
3147
result[node[1]] = None
3150
def get_position(self, key):
3151
"""Return details needed to access the version.
3153
:return: a tuple (index, data position, size) to hand to the access
3154
logic to get the record.
3156
node = self._get_node(key)
3157
return self._node_to_position(node)
3159
has_key = _mod_index._has_key_from_parent_map
3162
"""Get all the keys in the collection.
3164
The keys are not ordered.
3167
return [node[1] for node in self._graph_index.iter_all_entries()]
3169
missing_keys = _mod_index._missing_keys_from_parent_map
3171
def _node_to_position(self, node):
3172
"""Convert an index value to position details."""
3173
bits = node[2][1:].split(' ')
3174
return node[0], int(bits[0]), int(bits[1])
3176
def _sort_keys_by_io(self, keys, positions):
3177
"""Figure out an optimal order to read the records for the given keys.
3179
Sort keys, grouped by index and sorted by position.
3181
:param keys: A list of keys whose records we want to read. This will be
3183
:param positions: A dict, such as the one returned by
3184
_get_components_positions()
3187
def get_index_memo(key):
3188
# index_memo is at offset [1]. It is made up of (GraphIndex,
3189
# position, size). GI is an object, which will be unique for each
3190
# pack file. This causes us to group by pack file, then sort by
3191
# position. Size doesn't matter, but it isn't worth breaking up the
3193
return positions[key][1]
3194
return keys.sort(key=get_index_memo)
3196
_get_total_build_size = _get_total_build_size
3199
class _KnitKeyAccess(object):
3200
"""Access to records in .knit files."""
3202
def __init__(self, transport, mapper):
3203
"""Create a _KnitKeyAccess with transport and mapper.
3205
:param transport: The transport the access object is rooted at.
3206
:param mapper: The mapper used to map keys to .knit files.
2260
def _version_ids_to_keys(self, version_ids):
2261
return set((version_id, ) for version_id in version_ids)
2264
class _KnitAccess(object):
2265
"""Access to knit records in a .knit file."""
2267
def __init__(self, transport, filename, _file_mode, _dir_mode,
2268
_need_to_create, _create_parent_dir):
2269
"""Create a _KnitAccess for accessing and inserting data.
2271
:param transport: The transport the .knit is located on.
2272
:param filename: The filename of the .knit.
3208
2274
self._transport = transport
3209
self._mapper = mapper
2275
self._filename = filename
2276
self._file_mode = _file_mode
2277
self._dir_mode = _dir_mode
2278
self._need_to_create = _need_to_create
2279
self._create_parent_dir = _create_parent_dir
3211
def add_raw_records(self, key_sizes, raw_data):
2281
def add_raw_records(self, sizes, raw_data):
3212
2282
"""Add raw knit bytes to a storage area.
3214
The data is spooled to the container writer in one bytes-record per
2284
The data is spooled to whereever the access method is storing data.
3217
:param sizes: An iterable of tuples containing the key and size of each
2286
:param sizes: An iterable containing the size of each raw data segment.
3219
2287
:param raw_data: A bytestring containing the data.
3220
:return: A list of memos to retrieve the record later. Each memo is an
3221
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
3222
length), where the key is the record key.
2288
:return: A list of memos to retrieve the record later. Each memo is a
2289
tuple - (index, pos, length), where the index field is always None
2290
for the .knit access method.
3224
if type(raw_data) is not str:
3225
raise AssertionError(
3226
'data must be plain bytes was %s' % type(raw_data))
2292
if not self._need_to_create:
2293
base = self._transport.append_bytes(self._filename, raw_data)
2295
self._transport.put_bytes_non_atomic(self._filename, raw_data,
2296
create_parent_dir=self._create_parent_dir,
2297
mode=self._file_mode,
2298
dir_mode=self._dir_mode)
2299
self._need_to_create = False
3229
# TODO: This can be tuned for writing to sftp and other servers where
3230
# append() is relatively expensive by grouping the writes to each key
3232
for key, size in key_sizes:
3233
path = self._mapper.map(key)
3235
base = self._transport.append_bytes(path + '.knit',
3236
raw_data[offset:offset+size])
3237
except errors.NoSuchFile:
3238
self._transport.mkdir(osutils.dirname(path))
3239
base = self._transport.append_bytes(path + '.knit',
3240
raw_data[offset:offset+size])
3244
result.append((key, base, size))
2303
result.append((None, base, size))
3248
"""Flush pending writes on this access object.
3250
For .knit files this is a no-op.
2308
"""IFF this data access has its own storage area, initialise it.
2312
self._transport.put_bytes_non_atomic(self._filename, '',
2313
mode=self._file_mode)
2315
def open_file(self):
2316
"""IFF this data access can be represented as a single file, open it.
2318
For knits that are not mapped to a single file on disk this will
2321
:return: None or a file handle.
2324
return self._transport.get(self._filename)
3254
2329
def get_raw_records(self, memos_for_retrieval):
3255
2330
"""Get the raw bytes for a records.
3257
:param memos_for_retrieval: An iterable containing the access memo for
3258
retrieving the bytes.
2332
:param memos_for_retrieval: An iterable containing the (index, pos,
2333
length) memo for retrieving the bytes. The .knit method ignores
2334
the index as there is always only a single file.
3259
2335
:return: An iterator over the bytes of the records.
3261
# first pass, group into same-index request to minimise readv's issued.
3263
current_prefix = None
3264
for (key, offset, length) in memos_for_retrieval:
3265
if current_prefix == key[:-1]:
3266
current_list.append((offset, length))
3268
if current_prefix is not None:
3269
request_lists.append((current_prefix, current_list))
3270
current_prefix = key[:-1]
3271
current_list = [(offset, length)]
3272
# handle the last entry
3273
if current_prefix is not None:
3274
request_lists.append((current_prefix, current_list))
3275
for prefix, read_vector in request_lists:
3276
path = self._mapper.map(prefix) + '.knit'
3277
for pos, data in self._transport.readv(path, read_vector):
3281
class _DirectPackAccess(object):
3282
"""Access to data in one or more packs with less translation."""
3284
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3285
"""Create a _DirectPackAccess object.
2337
read_vector = [(pos, size) for (index, pos, size) in memos_for_retrieval]
2338
for pos, data in self._transport.readv(self._filename, read_vector):
2342
class _PackAccess(object):
2343
"""Access to knit records via a collection of packs."""
2345
def __init__(self, index_to_packs, writer=None):
2346
"""Create a _PackAccess object.
3287
2348
:param index_to_packs: A dict mapping index objects to the transport
3288
2349
and file names for obtaining data.
3289
:param reload_func: A function to call if we determine that the pack
3290
files have moved and we need to reload our caches. See
3291
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
2350
:param writer: A tuple (pack.ContainerWriter, write_index) which
2351
contains the pack to write, and the index that reads from it will
3293
self._container_writer = None
3294
self._write_index = None
3295
self._indices = index_to_packs
3296
self._reload_func = reload_func
3297
self._flush_func = flush_func
2355
self.container_writer = writer[0]
2356
self.write_index = writer[1]
2358
self.container_writer = None
2359
self.write_index = None
2360
self.indices = index_to_packs
3299
def add_raw_records(self, key_sizes, raw_data):
2362
def add_raw_records(self, sizes, raw_data):
3300
2363
"""Add raw knit bytes to a storage area.
3302
2365
The data is spooled to the container writer in one bytes-record per
3305
:param sizes: An iterable of tuples containing the key and size of each
2368
:param sizes: An iterable containing the size of each raw data segment.
3307
2369
:param raw_data: A bytestring containing the data.
3308
:return: A list of memos to retrieve the record later. Each memo is an
3309
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3310
length), where the index field is the write_index object supplied
3311
to the PackAccess object.
2370
:return: A list of memos to retrieve the record later. Each memo is a
2371
tuple - (index, pos, length), where the index field is the
2372
write_index object supplied to the PackAccess object.
3313
if type(raw_data) is not str:
3314
raise AssertionError(
3315
'data must be plain bytes was %s' % type(raw_data))
3318
for key, size in key_sizes:
3319
p_offset, p_length = self._container_writer.add_bytes_record(
2377
p_offset, p_length = self.container_writer.add_bytes_record(
3320
2378
raw_data[offset:offset+size], [])
3322
result.append((self._write_index, p_offset, p_length))
2380
result.append((self.write_index, p_offset, p_length))
3326
"""Flush pending writes on this access object.
2384
"""Pack based knits do not get individually created."""
3328
This will flush any buffered writes to a NewPack.
3330
if self._flush_func is not None:
3333
2386
def get_raw_records(self, memos_for_retrieval):
3334
2387
"""Get the raw bytes for a records.
3336
:param memos_for_retrieval: An iterable containing the (index, pos,
2389
:param memos_for_retrieval: An iterable containing the (index, pos,
3337
2390
length) memo for retrieving the bytes. The Pack access method
3338
2391
looks up the pack to use for a given record in its index_to_pack
3354
2407
if current_index is not None:
3355
2408
request_lists.append((current_index, current_list))
3356
2409
for index, offsets in request_lists:
3358
transport, path = self._indices[index]
3360
# A KeyError here indicates that someone has triggered an index
3361
# reload, and this index has gone missing, we need to start
3363
if self._reload_func is None:
3364
# If we don't have a _reload_func there is nothing that can
3367
raise errors.RetryWithNewPacks(index,
3368
reload_occurred=True,
3369
exc_info=sys.exc_info())
3371
reader = pack.make_readv_reader(transport, path, offsets)
3372
for names, read_func in reader.iter_records():
3373
yield read_func(None)
3374
except errors.NoSuchFile:
3375
# A NoSuchFile error indicates that a pack file has gone
3376
# missing on disk, we need to trigger a reload, and start over.
3377
if self._reload_func is None:
3379
raise errors.RetryWithNewPacks(transport.abspath(path),
3380
reload_occurred=False,
3381
exc_info=sys.exc_info())
3383
def set_writer(self, writer, index, transport_packname):
2410
transport, path = self.indices[index]
2411
reader = pack.make_readv_reader(transport, path, offsets)
2412
for names, read_func in reader.iter_records():
2413
yield read_func(None)
2415
def open_file(self):
2416
"""Pack based knits have no single file."""
2419
def set_writer(self, writer, index, (transport, packname)):
3384
2420
"""Set a writer to use for adding data."""
3385
2421
if index is not None:
3386
self._indices[index] = transport_packname
3387
self._container_writer = writer
3388
self._write_index = index
3390
def reload_or_raise(self, retry_exc):
3391
"""Try calling the reload function, or re-raise the original exception.
3393
This should be called after _DirectPackAccess raises a
3394
RetryWithNewPacks exception. This function will handle the common logic
3395
of determining when the error is fatal versus being temporary.
3396
It will also make sure that the original exception is raised, rather
3397
than the RetryWithNewPacks exception.
3399
If this function returns, then the calling function should retry
3400
whatever operation was being performed. Otherwise an exception will
3403
:param retry_exc: A RetryWithNewPacks exception.
3406
if self._reload_func is None:
3408
elif not self._reload_func():
3409
# The reload claimed that nothing changed
3410
if not retry_exc.reload_occurred:
3411
# If there wasn't an earlier reload, then we really were
3412
# expecting to find changes. We didn't find them, so this is a
3416
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3417
raise exc_class, exc_value, exc_traceback
2422
self.indices[index] = (transport, packname)
2423
self.container_writer = writer
2424
self.write_index = index
2427
class _StreamAccess(object):
2428
"""A Knit Access object that provides data from a datastream.
2430
It also provides a fallback to present as unannotated data, annotated data
2431
from a *backing* access object.
2433
This is triggered by a index_memo which is pointing to a different index
2434
than this was constructed with, and is used to allow extracting full
2435
unannotated texts for insertion into annotated knits.
2438
def __init__(self, reader_callable, stream_index, backing_knit,
2440
"""Create a _StreamAccess object.
2442
:param reader_callable: The reader_callable from the datastream.
2443
This is called to buffer all the data immediately, for
2445
:param stream_index: The index the data stream this provides access to
2446
which will be present in native index_memo's.
2447
:param backing_knit: The knit object that will provide access to
2448
annotated texts which are not available in the stream, so as to
2449
create unannotated texts.
2450
:param orig_factory: The original content factory used to generate the
2451
stream. This is used for checking whether the thunk code for
2452
supporting _copy_texts will generate the correct form of data.
2454
self.data = reader_callable(None)
2455
self.stream_index = stream_index
2456
self.backing_knit = backing_knit
2457
self.orig_factory = orig_factory
2459
def get_raw_records(self, memos_for_retrieval):
2460
"""Get the raw bytes for a records.
2462
:param memos_for_retrieval: An iterable of memos from the
2463
_StreamIndex object identifying bytes to read; for these classes
2464
they are (from_backing_knit, index, start, end) and can point to
2465
either the backing knit or streamed data.
2466
:return: An iterator yielding a byte string for each record in
2467
memos_for_retrieval.
2469
# use a generator for memory friendliness
2470
for from_backing_knit, version_id, start, end in memos_for_retrieval:
2471
if not from_backing_knit:
2472
if version_id is not self.stream_index:
2473
raise AssertionError()
2474
yield self.data[start:end]
2476
# we have been asked to thunk. This thunking only occurs when
2477
# we are obtaining plain texts from an annotated backing knit
2478
# so that _copy_texts will work.
2479
# We could improve performance here by scanning for where we need
2480
# to do this and using get_line_list, then interleaving the output
2481
# as desired. However, for now, this is sufficient.
2482
if self.orig_factory.__class__ != KnitPlainFactory:
2483
raise errors.KnitCorrupt(
2484
self, 'Bad thunk request %r cannot be backed by %r' %
2485
(version_id, self.orig_factory))
2486
lines = self.backing_knit.get_lines(version_id)
2487
line_bytes = ''.join(lines)
2488
digest = sha_string(line_bytes)
2489
# the packed form of the fulltext always has a trailing newline,
2490
# even if the actual text does not, unless the file is empty. the
2491
# record options including the noeol flag are passed through by
2492
# _StreamIndex, so this is safe.
2494
if lines[-1][-1] != '\n':
2495
lines[-1] = lines[-1] + '\n'
2497
# We want plain data, because we expect to thunk only to allow text
2499
size, bytes = self.backing_knit._data._record_to_data(version_id,
2500
digest, lines, line_bytes)
2504
class _StreamIndex(object):
2505
"""A Knit Index object that uses the data map from a datastream."""
2507
def __init__(self, data_list, backing_index):
2508
"""Create a _StreamIndex object.
2510
:param data_list: The data_list from the datastream.
2511
:param backing_index: The index which will supply values for nodes
2512
referenced outside of this stream.
2514
self.data_list = data_list
2515
self.backing_index = backing_index
2516
self._by_version = {}
2518
for key, options, length, parents in data_list:
2519
self._by_version[key] = options, (pos, pos + length), parents
2522
def get_ancestry(self, versions, topo_sorted):
2523
"""Get an ancestry list for versions."""
2525
# Not needed for basic joins
2526
raise NotImplementedError(self.get_ancestry)
2527
# get a graph of all the mentioned versions:
2528
# Little ugly - basically copied from KnitIndex, but don't want to
2529
# accidentally incorporate too much of that index's code.
2531
pending = set(versions)
2532
cache = self._by_version
2534
version = pending.pop()
2537
parents = [p for p in cache[version][2] if p in cache]
2539
raise RevisionNotPresent(version, self)
2540
# if not completed and not a ghost
2541
pending.update([p for p in parents if p not in ancestry])
2542
ancestry.add(version)
2543
return list(ancestry)
2545
def get_build_details(self, version_ids):
2546
"""Get the method, index_memo and compression parent for version_ids.
2548
Ghosts are omitted from the result.
2550
:param version_ids: An iterable of version_ids.
2551
:return: A dict of version_id:(index_memo, compression_parent,
2552
parents, record_details).
2554
opaque memo that can be passed to _StreamAccess.read_records
2555
to extract the raw data; for these classes it is
2556
(from_backing_knit, index, start, end)
2558
Content that this record is built upon, may be None
2560
Logical parents of this node
2562
extra information about the content which needs to be passed to
2563
Factory.parse_record
2566
for version_id in version_ids:
2568
method = self.get_method(version_id)
2569
except errors.RevisionNotPresent:
2570
# ghosts are omitted
2572
parent_ids = self.get_parents_with_ghosts(version_id)
2573
noeol = ('no-eol' in self.get_options(version_id))
2574
index_memo = self.get_position(version_id)
2575
from_backing_knit = index_memo[0]
2576
if from_backing_knit:
2577
# texts retrieved from the backing knit are always full texts
2579
if method == 'fulltext':
2580
compression_parent = None
2582
compression_parent = parent_ids[0]
2583
result[version_id] = (index_memo, compression_parent,
2584
parent_ids, (method, noeol))
2587
def get_method(self, version_id):
2588
"""Return compression method of specified version."""
2589
options = self.get_options(version_id)
2590
if 'fulltext' in options:
2592
elif 'line-delta' in options:
2595
raise errors.KnitIndexUnknownMethod(self, options)
2597
def get_options(self, version_id):
2598
"""Return a list representing options.
2603
return self._by_version[version_id][0]
2605
options = list(self.backing_index.get_options(version_id))
2606
if 'fulltext' in options:
2608
elif 'line-delta' in options:
2609
# Texts from the backing knit are always returned from the stream
2611
options.remove('line-delta')
2612
options.append('fulltext')
2614
raise errors.KnitIndexUnknownMethod(self, options)
2615
return tuple(options)
2617
def get_parent_map(self, version_ids):
2618
"""Passed through to by KnitVersionedFile.get_parent_map."""
2621
for version_id in version_ids:
2623
result[version_id] = self._by_version[version_id][2]
2625
pending_ids.add(version_id)
2626
result.update(self.backing_index.get_parent_map(pending_ids))
2629
def get_parents_with_ghosts(self, version_id):
2630
"""Return parents of specified version with ghosts."""
2632
return self.get_parent_map([version_id])[version_id]
2634
raise RevisionNotPresent(version_id, self)
2636
def get_position(self, version_id):
2637
"""Return details needed to access the version.
2639
_StreamAccess has the data as a big array, so we return slice
2640
coordinates into that (as index_memo's are opaque outside the
2641
index and matching access class).
2643
:return: a tuple (from_backing_knit, index, start, end) that can
2644
be passed e.g. to get_raw_records.
2645
If from_backing_knit is False, index will be self, otherwise it
2646
will be a version id.
2649
start, end = self._by_version[version_id][1]
2650
return False, self, start, end
2652
# Signal to the access object to handle this from the backing knit.
2653
return (True, version_id, None, None)
2655
def get_versions(self):
2656
"""Get all the versions in the stream."""
2657
return self._by_version.keys()
2660
class _KnitData(object):
2661
"""Manage extraction of data from a KnitAccess, caching and decompressing.
2663
The KnitData class provides the logic for parsing and using knit records,
2664
making use of an access method for the low level read and write operations.
2667
def __init__(self, access):
2668
"""Create a KnitData object.
2670
:param access: The access method to use. Access methods such as
2671
_KnitAccess manage the insertion of raw records and the subsequent
2672
retrieval of the same.
2674
self._access = access
2675
self._checked = False
2677
def _open_file(self):
2678
return self._access.open_file()
2680
def _record_to_data(self, version_id, digest, lines, dense_lines=None):
2681
"""Convert version_id, digest, lines into a raw data block.
2683
:param dense_lines: The bytes of lines but in a denser form. For
2684
instance, if lines is a list of 1000 bytestrings each ending in \n,
2685
dense_lines may be a list with one line in it, containing all the
2686
1000's lines and their \n's. Using dense_lines if it is already
2687
known is a win because the string join to create bytes in this
2688
function spends less time resizing the final string.
2689
:return: (len, a StringIO instance with the raw data ready to read.)
2691
# Note: using a string copy here increases memory pressure with e.g.
2692
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
2693
# when doing the initial commit of a mozilla tree. RBC 20070921
2694
bytes = ''.join(chain(
2695
["version %s %d %s\n" % (version_id,
2698
dense_lines or lines,
2699
["end %s\n" % version_id]))
2700
compressed_bytes = bytes_to_gzip(bytes)
2701
return len(compressed_bytes), compressed_bytes
2703
def add_raw_records(self, sizes, raw_data):
2704
"""Append a prepared record to the data file.
2706
:param sizes: An iterable containing the size of each raw data segment.
2707
:param raw_data: A bytestring containing the data.
2708
:return: a list of index data for the way the data was stored.
2709
See the access method add_raw_records documentation for more
2712
return self._access.add_raw_records(sizes, raw_data)
2714
def _parse_record_header(self, version_id, raw_data):
2715
"""Parse a record header for consistency.
2717
:return: the header and the decompressor stream.
2718
as (stream, header_record)
2720
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
2722
rec = self._check_header(version_id, df.readline())
2723
except Exception, e:
2724
raise KnitCorrupt(self._access,
2725
"While reading {%s} got %s(%s)"
2726
% (version_id, e.__class__.__name__, str(e)))
2729
def _split_header(self, line):
2732
raise KnitCorrupt(self._access,
2733
'unexpected number of elements in record header')
2736
def _check_header_version(self, rec, version_id):
2737
if rec[1] != version_id:
2738
raise KnitCorrupt(self._access,
2739
'unexpected version, wanted %r, got %r'
2740
% (version_id, rec[1]))
2742
def _check_header(self, version_id, line):
2743
rec = self._split_header(line)
2744
self._check_header_version(rec, version_id)
2747
def _parse_record_unchecked(self, data):
2749
# 4168 calls in 2880 217 internal
2750
# 4168 calls to _parse_record_header in 2121
2751
# 4168 calls to readlines in 330
2752
df = GzipFile(mode='rb', fileobj=StringIO(data))
2754
record_contents = df.readlines()
2755
except Exception, e:
2756
raise KnitCorrupt(self._access, "Corrupt compressed record %r, got %s(%s)" %
2757
(data, e.__class__.__name__, str(e)))
2758
header = record_contents.pop(0)
2759
rec = self._split_header(header)
2760
last_line = record_contents.pop()
2761
if len(record_contents) != int(rec[2]):
2762
raise KnitCorrupt(self._access,
2763
'incorrect number of lines %s != %s'
2765
% (len(record_contents), int(rec[2]),
2767
if last_line != 'end %s\n' % rec[1]:
2768
raise KnitCorrupt(self._access,
2769
'unexpected version end line %r, wanted %r'
2770
% (last_line, rec[1]))
2772
return rec, record_contents
2774
def _parse_record(self, version_id, data):
2775
rec, record_contents = self._parse_record_unchecked(data)
2776
self._check_header_version(rec, version_id)
2777
return record_contents, rec[3]
2779
def read_records_iter_raw(self, records):
2780
"""Read text records from data file and yield raw data.
2782
This unpacks enough of the text record to validate the id is
2783
as expected but thats all.
2785
Each item the iterator yields is (version_id, bytes,
2788
# setup an iterator of the external records:
2789
# uses readv so nice and fast we hope.
2791
# grab the disk data needed.
2792
needed_offsets = [index_memo for version_id, index_memo
2794
raw_records = self._access.get_raw_records(needed_offsets)
2796
for version_id, index_memo in records:
2797
data = raw_records.next()
2798
# validate the header
2799
df, rec = self._parse_record_header(version_id, data)
2801
yield version_id, data, rec[3]
2803
def read_records_iter(self, records):
2804
"""Read text records from data file and yield result.
2806
The result will be returned in whatever is the fastest to read.
2807
Not by the order requested. Also, multiple requests for the same
2808
record will only yield 1 response.
2809
:param records: A list of (version_id, pos, len) entries
2810
:return: Yields (version_id, contents, digest) in the order
2811
read, not the order requested
2816
needed_records = sorted(set(records), key=operator.itemgetter(1))
2817
if not needed_records:
2820
# The transport optimizes the fetching as well
2821
# (ie, reads continuous ranges.)
2822
raw_data = self._access.get_raw_records(
2823
[index_memo for version_id, index_memo in needed_records])
2825
for (version_id, index_memo), data in \
2826
izip(iter(needed_records), raw_data):
2827
content, digest = self._parse_record(version_id, data)
2828
yield version_id, content, digest
2830
def read_records(self, records):
2831
"""Read records into a dictionary."""
2833
for record_id, content, digest in \
2834
self.read_records_iter(records):
2835
components[record_id] = (content, digest)
2839
class InterKnit(InterVersionedFile):
2840
"""Optimised code paths for knit to knit operations."""
2842
_matching_file_from_factory = staticmethod(make_file_knit)
2843
_matching_file_to_factory = staticmethod(make_file_knit)
2846
def is_compatible(source, target):
2847
"""Be compatible with knits. """
2849
return (isinstance(source, KnitVersionedFile) and
2850
isinstance(target, KnitVersionedFile))
2851
except AttributeError:
2854
def _copy_texts(self, pb, msg, version_ids, ignore_missing=False):
2855
"""Copy texts to the target by extracting and adding them one by one.
2857
see join() for the parameter definitions.
2859
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2860
# --- the below is factorable out with VersionedFile.join, but wait for
2861
# VersionedFiles, it may all be simpler then.
2862
graph = Graph(self.source)
2863
search = graph._make_breadth_first_searcher(version_ids)
2864
transitive_ids = set()
2865
map(transitive_ids.update, list(search))
2866
parent_map = self.source.get_parent_map(transitive_ids)
2867
order = topo_sort(parent_map.items())
2869
def size_of_content(content):
2870
return sum(len(line) for line in content.text())
2871
# Cache at most 10MB of parent texts
2872
parent_cache = lru_cache.LRUSizeCache(max_size=10*1024*1024,
2873
compute_size=size_of_content)
2874
# TODO: jam 20071116 It would be nice to have a streaming interface to
2875
# get multiple texts from a source. The source could be smarter
2876
# about how it handled intermediate stages.
2877
# get_line_list() or make_mpdiffs() seem like a possibility, but
2878
# at the moment they extract all full texts into memory, which
2879
# causes us to store more than our 3x fulltext goal.
2880
# Repository.iter_files_bytes() may be another possibility
2881
to_process = [version for version in order
2882
if version not in self.target]
2883
total = len(to_process)
2884
pb = ui.ui_factory.nested_progress_bar()
2886
for index, version in enumerate(to_process):
2887
pb.update('Converting versioned data', index, total)
2888
sha1, num_bytes, parent_text = self.target.add_lines(version,
2889
self.source.get_parents_with_ghosts(version),
2890
self.source.get_lines(version),
2891
parent_texts=parent_cache)
2892
parent_cache[version] = parent_text
2897
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2898
"""See InterVersionedFile.join."""
2899
# If the source and target are mismatched w.r.t. annotations vs
2900
# plain, the data needs to be converted accordingly
2901
if self.source.factory.annotated == self.target.factory.annotated:
2903
elif self.source.factory.annotated:
2904
converter = self._anno_to_plain_converter
2906
# We're converting from a plain to an annotated knit. Copy them
2907
# across by full texts.
2908
return self._copy_texts(pb, msg, version_ids, ignore_missing)
2910
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2914
pb = ui.ui_factory.nested_progress_bar()
2916
version_ids = list(version_ids)
2917
if None in version_ids:
2918
version_ids.remove(None)
2920
self.source_ancestry = set(self.source.get_ancestry(version_ids,
2922
this_versions = set(self.target._index.get_versions())
2923
# XXX: For efficiency we should not look at the whole index,
2924
# we only need to consider the referenced revisions - they
2925
# must all be present, or the method must be full-text.
2926
# TODO, RBC 20070919
2927
needed_versions = self.source_ancestry - this_versions
2929
if not needed_versions:
2931
full_list = topo_sort(
2932
self.source.get_parent_map(self.source.versions()))
2934
version_list = [i for i in full_list if (not self.target.has_version(i)
2935
and i in needed_versions)]
2939
copy_queue_records = []
2941
for version_id in version_list:
2942
options = self.source._index.get_options(version_id)
2943
parents = self.source._index.get_parents_with_ghosts(version_id)
2944
# check that its will be a consistent copy:
2945
for parent in parents:
2946
# if source has the parent, we must :
2947
# * already have it or
2948
# * have it scheduled already
2949
# otherwise we don't care
2950
if not (self.target.has_version(parent) or
2951
parent in copy_set or
2952
not self.source.has_version(parent)):
2953
raise AssertionError("problem joining parent %r "
2955
% (parent, self.source, self.target))
2956
index_memo = self.source._index.get_position(version_id)
2957
copy_queue_records.append((version_id, index_memo))
2958
copy_queue.append((version_id, options, parents))
2959
copy_set.add(version_id)
2961
# data suck the join:
2963
total = len(version_list)
2966
for (version_id, raw_data, _), \
2967
(version_id2, options, parents) in \
2968
izip(self.source._data.read_records_iter_raw(copy_queue_records),
2970
if not (version_id == version_id2):
2971
raise AssertionError('logic error, inconsistent results')
2973
pb.update("Joining knit", count, total)
2975
size, raw_data = converter(raw_data, version_id, options,
2978
size = len(raw_data)
2979
raw_records.append((version_id, options, parents, size))
2980
raw_datum.append(raw_data)
2981
self.target._add_raw_records(raw_records, ''.join(raw_datum))
2986
def _anno_to_plain_converter(self, raw_data, version_id, options,
2988
"""Convert annotated content to plain content."""
2989
data, digest = self.source._data._parse_record(version_id, raw_data)
2990
if 'fulltext' in options:
2991
content = self.source.factory.parse_fulltext(data, version_id)
2992
lines = self.target.factory.lower_fulltext(content)
2994
delta = self.source.factory.parse_line_delta(data, version_id,
2996
lines = self.target.factory.lower_line_delta(delta)
2997
return self.target._data._record_to_data(version_id, digest, lines)
3000
InterVersionedFile.register_optimiser(InterKnit)
3003
class WeaveToKnit(InterVersionedFile):
3004
"""Optimised code paths for weave to knit operations."""
3006
_matching_file_from_factory = bzrlib.weave.WeaveFile
3007
_matching_file_to_factory = staticmethod(make_file_knit)
3010
def is_compatible(source, target):
3011
"""Be compatible with weaves to knits."""
3013
return (isinstance(source, bzrlib.weave.Weave) and
3014
isinstance(target, KnitVersionedFile))
3015
except AttributeError:
3018
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
3019
"""See InterVersionedFile.join."""
3020
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
3025
pb = ui.ui_factory.nested_progress_bar()
3027
version_ids = list(version_ids)
3029
self.source_ancestry = set(self.source.get_ancestry(version_ids))
3030
this_versions = set(self.target._index.get_versions())
3031
needed_versions = self.source_ancestry - this_versions
3033
if not needed_versions:
3035
full_list = topo_sort(
3036
self.source.get_parent_map(self.source.versions()))
3038
version_list = [i for i in full_list if (not self.target.has_version(i)
3039
and i in needed_versions)]
3043
total = len(version_list)
3044
parent_map = self.source.get_parent_map(version_list)
3045
for version_id in version_list:
3046
pb.update("Converting to knit", count, total)
3047
parents = parent_map[version_id]
3048
# check that its will be a consistent copy:
3049
for parent in parents:
3050
# if source has the parent, we must already have it
3051
if not self.target.has_version(parent):
3052
raise AssertionError("%r does not have parent %r"
3053
% (self.target, parent))
3054
self.target.add_lines(
3055
version_id, parents, self.source.get_lines(version_id))
3062
InterVersionedFile.register_optimiser(WeaveToKnit)
3420
3065
# Deprecated, use PatienceSequenceMatcher instead
3465
3180
parents to create an annotation, but only need 1 parent to generate the
3468
:return: A list of (key, index_memo) records, suitable for
3469
passing to read_records_iter to start reading in the raw data from
3183
:return: A list of (revision_id, index_memo) records, suitable for
3184
passing to read_records_iter to start reading in the raw data fro/
3472
pending = set([key])
3187
if revision_id in self._annotated_lines:
3190
pending = set([revision_id])
3475
self._num_needed_children[key] = 1
3477
3195
# get all pending nodes
3478
3197
this_iteration = pending
3479
build_details = self._vf._index.get_build_details(this_iteration)
3198
build_details = self._knit._index.get_build_details(this_iteration)
3480
3199
self._all_build_details.update(build_details)
3481
# new_nodes = self._vf._index._get_entries(this_iteration)
3200
# new_nodes = self._knit._index._get_entries(this_iteration)
3482
3201
pending = set()
3483
for key, details in build_details.iteritems():
3484
(index_memo, compression_parent, parent_keys,
3202
for rev_id, details in build_details.iteritems():
3203
(index_memo, compression_parent, parents,
3485
3204
record_details) = details
3486
self._parent_map[key] = parent_keys
3487
self._heads_provider = None
3488
records.append((key, index_memo))
3205
self._revision_id_graph[rev_id] = parents
3206
records.append((rev_id, index_memo))
3489
3207
# Do we actually need to check _annotated_lines?
3490
pending.update([p for p in parent_keys
3491
if p not in self._all_build_details])
3493
for parent_key in parent_keys:
3494
if parent_key in self._num_needed_children:
3495
self._num_needed_children[parent_key] += 1
3497
self._num_needed_children[parent_key] = 1
3208
pending.update(p for p in parents
3209
if p not in self._all_build_details)
3498
3210
if compression_parent:
3499
if compression_parent in self._num_compression_children:
3500
self._num_compression_children[compression_parent] += 1
3502
self._num_compression_children[compression_parent] = 1
3211
self._compression_children.setdefault(compression_parent,
3214
for parent in parents:
3215
self._annotate_children.setdefault(parent,
3217
num_gens = generation - kept_generation
3218
if ((num_gens >= self._generations_until_keep)
3219
and len(parents) > 1):
3220
kept_generation = generation
3221
self._nodes_to_keep_annotations.add(rev_id)
3504
3223
missing_versions = this_iteration.difference(build_details.keys())
3505
if missing_versions:
3506
for key in missing_versions:
3507
if key in self._parent_map and key in self._text_cache:
3508
# We already have this text ready, we just need to
3509
# yield it later so we get it annotated
3511
parent_keys = self._parent_map[key]
3512
for parent_key in parent_keys:
3513
if parent_key in self._num_needed_children:
3514
self._num_needed_children[parent_key] += 1
3516
self._num_needed_children[parent_key] = 1
3517
pending.update([p for p in parent_keys
3518
if p not in self._all_build_details])
3520
raise errors.RevisionNotPresent(key, self._vf)
3224
self._ghosts.update(missing_versions)
3225
for missing_version in missing_versions:
3226
# add a key, no parents
3227
self._revision_id_graph[missing_version] = ()
3228
pending.discard(missing_version) # don't look for it
3229
if self._ghosts.intersection(self._compression_children):
3231
"We cannot have nodes which have a ghost compression parent:\n"
3233
"compression children: %r"
3234
% (self._ghosts, self._compression_children))
3235
# Cleanout anything that depends on a ghost so that we don't wait for
3236
# the ghost to show up
3237
for node in self._ghosts:
3238
if node in self._annotate_children:
3239
# We won't be building this node
3240
del self._annotate_children[node]
3521
3241
# Generally we will want to read the records in reverse order, because
3522
3242
# we find the parent nodes after the children
3523
3243
records.reverse()
3524
return records, ann_keys
3526
def _get_needed_texts(self, key, pb=None):
3527
# if True or len(self._vf._fallback_vfs) > 0:
3528
if len(self._vf._fallback_vfs) > 0:
3529
# If we have fallbacks, go to the generic path
3530
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3535
records, ann_keys = self._get_build_graph(key)
3536
for idx, (sub_key, text, num_lines) in enumerate(
3537
self._extract_texts(records)):
3539
pb.update('annotating', idx, len(records))
3540
yield sub_key, text, num_lines
3541
for sub_key in ann_keys:
3542
text = self._text_cache[sub_key]
3543
num_lines = len(text) # bad assumption
3544
yield sub_key, text, num_lines
3546
except errors.RetryWithNewPacks, e:
3547
self._vf._access.reload_or_raise(e)
3548
# The cached build_details are no longer valid
3549
self._all_build_details.clear()
3551
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3552
parent_lines = self._text_cache[compression_parent]
3553
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3554
self._matching_blocks[(key, compression_parent)] = blocks
3556
def _expand_record(self, key, parent_keys, compression_parent, record,
3559
if compression_parent:
3560
if compression_parent not in self._content_objects:
3561
# Waiting for the parent
3562
self._pending_deltas.setdefault(compression_parent, []).append(
3563
(key, parent_keys, record, record_details))
3565
# We have the basis parent, so expand the delta
3566
num = self._num_compression_children[compression_parent]
3569
base_content = self._content_objects.pop(compression_parent)
3570
self._num_compression_children.pop(compression_parent)
3572
self._num_compression_children[compression_parent] = num
3573
base_content = self._content_objects[compression_parent]
3574
# It is tempting to want to copy_base_content=False for the last
3575
# child object. However, whenever noeol=False,
3576
# self._text_cache[parent_key] is content._lines. So mutating it
3577
# gives very bad results.
3578
# The alternative is to copy the lines into text cache, but then we
3579
# are copying anyway, so just do it here.
3580
content, delta = self._vf._factory.parse_record(
3581
key, record, record_details, base_content,
3582
copy_base_content=True)
3585
content, _ = self._vf._factory.parse_record(
3586
key, record, record_details, None)
3587
if self._num_compression_children.get(key, 0) > 0:
3588
self._content_objects[key] = content
3589
lines = content.text()
3590
self._text_cache[key] = lines
3591
if delta is not None:
3592
self._cache_delta_blocks(key, compression_parent, delta, lines)
3595
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3596
"""Get the list of annotations for the parent, and the matching lines.
3598
:param text: The opaque value given by _get_needed_texts
3599
:param parent_key: The key for the parent text
3600
:return: (parent_annotations, matching_blocks)
3601
parent_annotations is a list as long as the number of lines in
3603
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3604
indicating which lines match between the two texts
3606
block_key = (key, parent_key)
3607
if block_key in self._matching_blocks:
3608
blocks = self._matching_blocks.pop(block_key)
3609
parent_annotations = self._annotations_cache[parent_key]
3610
return parent_annotations, blocks
3611
return annotate.Annotator._get_parent_annotations_and_matches(self,
3612
key, text, parent_key)
3614
def _process_pending(self, key):
3615
"""The content for 'key' was just processed.
3617
Determine if there is any more pending work to be processed.
3620
if key in self._pending_deltas:
3621
compression_parent = key
3622
children = self._pending_deltas.pop(key)
3623
for child_key, parent_keys, record, record_details in children:
3624
lines = self._expand_record(child_key, parent_keys,
3626
record, record_details)
3627
if self._check_ready_for_annotations(child_key, parent_keys):
3628
to_return.append(child_key)
3629
# Also check any children that are waiting for this parent to be
3631
if key in self._pending_annotation:
3632
children = self._pending_annotation.pop(key)
3633
to_return.extend([c for c, p_keys in children
3634
if self._check_ready_for_annotations(c, p_keys)])
3637
def _check_ready_for_annotations(self, key, parent_keys):
3638
"""return true if this text is ready to be yielded.
3640
Otherwise, this will return False, and queue the text into
3641
self._pending_annotation
3643
for parent_key in parent_keys:
3644
if parent_key not in self._annotations_cache:
3645
# still waiting on at least one parent text, so queue it up
3646
# Note that if there are multiple parents, we need to wait
3648
self._pending_annotation.setdefault(parent_key,
3649
[]).append((key, parent_keys))
3653
def _extract_texts(self, records):
3654
"""Extract the various texts needed based on records"""
3246
def _annotate_records(self, records):
3247
"""Build the annotations for the listed records."""
3655
3248
# We iterate in the order read, rather than a strict order requested
3656
3249
# However, process what we can, and put off to the side things that
3657
3250
# still need parents, cleaning them up when those parents are
3660
# 1) As 'records' are read, see if we can expand these records into
3661
# Content objects (and thus lines)
3662
# 2) If a given line-delta is waiting on its compression parent, it
3663
# gets queued up into self._pending_deltas, otherwise we expand
3664
# it, and put it into self._text_cache and self._content_objects
3665
# 3) If we expanded the text, we will then check to see if all
3666
# parents have also been processed. If so, this text gets yielded,
3667
# else this record gets set aside into pending_annotation
3668
# 4) Further, if we expanded the text in (2), we will then check to
3669
# see if there are any children in self._pending_deltas waiting to
3670
# also be processed. If so, we go back to (2) for those
3671
# 5) Further again, if we yielded the text, we can then check if that
3672
# 'unlocks' any of the texts in pending_annotations, which should
3673
# then get yielded as well
3674
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3675
# compression child could unlock yet another, and yielding a fulltext
3676
# will also 'unlock' the children that are waiting on that annotation.
3677
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3678
# if other parents are also waiting.)
3679
# We want to yield content before expanding child content objects, so
3680
# that we know when we can re-use the content lines, and the annotation
3681
# code can know when it can stop caching fulltexts, as well.
3683
# Children that are missing their compression parent
3685
for (key, record, digest) in self._vf._read_records_iter(records):
3687
details = self._all_build_details[key]
3688
(_, compression_parent, parent_keys, record_details) = details
3689
lines = self._expand_record(key, parent_keys, compression_parent,
3690
record, record_details)
3692
# Pending delta should be queued up
3252
for (rev_id, record,
3253
digest) in self._knit._data.read_records_iter(records):
3254
if rev_id in self._annotated_lines:
3694
# At this point, we may be able to yield this content, if all
3695
# parents are also finished
3696
yield_this_text = self._check_ready_for_annotations(key,
3699
# All parents present
3700
yield key, lines, len(lines)
3701
to_process = self._process_pending(key)
3703
this_process = to_process
3705
for key in this_process:
3706
lines = self._text_cache[key]
3707
yield key, lines, len(lines)
3708
to_process.extend(self._process_pending(key))
3256
parent_ids = self._revision_id_graph[rev_id]
3257
parent_ids = [p for p in parent_ids if p not in self._ghosts]
3258
details = self._all_build_details[rev_id]
3259
(index_memo, compression_parent, parents,
3260
record_details) = details
3261
nodes_to_annotate = []
3262
# TODO: Remove the punning between compression parents, and
3263
# parent_ids, we should be able to do this without assuming
3265
if len(parent_ids) == 0:
3266
# There are no parents for this node, so just add it
3267
# TODO: This probably needs to be decoupled
3268
fulltext_content, delta = self._knit.factory.parse_record(
3269
rev_id, record, record_details, None)
3270
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
3271
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
3272
parent_ids, left_matching_blocks=None))
3274
child = (rev_id, parent_ids, record)
3275
# Check if all the parents are present
3276
self._check_parents(child, nodes_to_annotate)
3277
while nodes_to_annotate:
3278
# Should we use a queue here instead of a stack?
3279
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
3280
(index_memo, compression_parent, parents,
3281
record_details) = self._all_build_details[rev_id]
3282
if compression_parent is not None:
3283
comp_children = self._compression_children[compression_parent]
3284
if rev_id not in comp_children:
3285
raise AssertionError("%r not in compression children %r"
3286
% (rev_id, comp_children))
3287
# If there is only 1 child, it is safe to reuse this
3289
reuse_content = (len(comp_children) == 1
3290
and compression_parent not in
3291
self._nodes_to_keep_annotations)
3293
# Remove it from the cache since it will be changing
3294
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
3295
# Make sure to copy the fulltext since it might be
3297
parent_fulltext = list(parent_fulltext_content.text())
3299
parent_fulltext_content = self._fulltext_contents[compression_parent]
3300
parent_fulltext = parent_fulltext_content.text()
3301
comp_children.remove(rev_id)
3302
fulltext_content, delta = self._knit.factory.parse_record(
3303
rev_id, record, record_details,
3304
parent_fulltext_content,
3305
copy_base_content=(not reuse_content))
3306
fulltext = self._add_fulltext_content(rev_id,
3308
blocks = KnitContent.get_line_delta_blocks(delta,
3309
parent_fulltext, fulltext)
3311
fulltext_content = self._knit.factory.parse_fulltext(
3313
fulltext = self._add_fulltext_content(rev_id,
3316
nodes_to_annotate.extend(
3317
self._add_annotation(rev_id, fulltext, parent_ids,
3318
left_matching_blocks=blocks))
3320
def _get_heads_provider(self):
3321
"""Create a heads provider for resolving ancestry issues."""
3322
if self._heads_provider is not None:
3323
return self._heads_provider
3324
parent_provider = _mod_graph.DictParentsProvider(
3325
self._revision_id_graph)
3326
graph_obj = _mod_graph.Graph(parent_provider)
3327
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
3328
self._heads_provider = head_cache
3331
def annotate(self, revision_id):
3332
"""Return the annotated fulltext at the given revision.
3334
:param revision_id: The revision id for this file
3336
records = self._get_build_graph(revision_id)
3337
if revision_id in self._ghosts:
3338
raise errors.RevisionNotPresent(revision_id, self._knit)
3339
self._annotate_records(records)
3340
return self._annotated_lines[revision_id]
3711
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3712
except ImportError, e:
3713
osutils.failed_to_load_extension(e)
3344
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3714
3346
from bzrlib._knit_load_data_py import _load_data_py as _load_data