124
126
DATA_SUFFIX = '.knit'
125
127
INDEX_SUFFIX = '.kndx'
126
_STREAM_MIN_BUFFER_SIZE = 5*1024*1024
129
class KnitAdapter(object):
130
"""Base class for knit record adaption."""
132
def __init__(self, basis_vf):
133
"""Create an adapter which accesses full texts from basis_vf.
135
:param basis_vf: A versioned file to access basis texts of deltas from.
136
May be None for adapters that do not need to access basis texts.
138
self._data = KnitVersionedFiles(None, None)
139
self._annotate_factory = KnitAnnotateFactory()
140
self._plain_factory = KnitPlainFactory()
141
self._basis_vf = basis_vf
144
class FTAnnotatedToUnannotated(KnitAdapter):
145
"""An adapter from FT annotated knits to unannotated ones."""
147
def get_bytes(self, factory):
148
annotated_compressed_bytes = factory._raw_record
150
self._data._parse_record_unchecked(annotated_compressed_bytes)
151
content = self._annotate_factory.parse_fulltext(contents, rec[1])
152
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
156
class DeltaAnnotatedToUnannotated(KnitAdapter):
157
"""An adapter for deltas from annotated to unannotated."""
159
def get_bytes(self, factory):
160
annotated_compressed_bytes = factory._raw_record
162
self._data._parse_record_unchecked(annotated_compressed_bytes)
163
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
165
contents = self._plain_factory.lower_line_delta(delta)
166
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
170
class FTAnnotatedToFullText(KnitAdapter):
171
"""An adapter from FT annotated knits to unannotated ones."""
173
def get_bytes(self, factory):
174
annotated_compressed_bytes = factory._raw_record
176
self._data._parse_record_unchecked(annotated_compressed_bytes)
177
content, delta = self._annotate_factory.parse_record(factory.key[-1],
178
contents, factory._build_details, None)
179
return ''.join(content.text())
182
class DeltaAnnotatedToFullText(KnitAdapter):
183
"""An adapter for deltas from annotated to unannotated."""
185
def get_bytes(self, factory):
186
annotated_compressed_bytes = factory._raw_record
188
self._data._parse_record_unchecked(annotated_compressed_bytes)
189
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
191
compression_parent = factory.parents[0]
192
basis_entry = self._basis_vf.get_record_stream(
193
[compression_parent], 'unordered', True).next()
194
if basis_entry.storage_kind == 'absent':
195
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
196
basis_chunks = basis_entry.get_bytes_as('chunked')
197
basis_lines = osutils.chunks_to_lines(basis_chunks)
198
# Manually apply the delta because we have one annotated content and
200
basis_content = PlainKnitContent(basis_lines, compression_parent)
201
basis_content.apply_delta(delta, rec[1])
202
basis_content._should_strip_eol = factory._build_details[1]
203
return ''.join(basis_content.text())
206
class FTPlainToFullText(KnitAdapter):
207
"""An adapter from FT plain knits to unannotated ones."""
209
def get_bytes(self, factory):
210
compressed_bytes = factory._raw_record
212
self._data._parse_record_unchecked(compressed_bytes)
213
content, delta = self._plain_factory.parse_record(factory.key[-1],
214
contents, factory._build_details, None)
215
return ''.join(content.text())
218
class DeltaPlainToFullText(KnitAdapter):
219
"""An adapter for deltas from annotated to unannotated."""
221
def get_bytes(self, factory):
222
compressed_bytes = factory._raw_record
224
self._data._parse_record_unchecked(compressed_bytes)
225
delta = self._plain_factory.parse_line_delta(contents, rec[1])
226
compression_parent = factory.parents[0]
227
# XXX: string splitting overhead.
228
basis_entry = self._basis_vf.get_record_stream(
229
[compression_parent], 'unordered', True).next()
230
if basis_entry.storage_kind == 'absent':
231
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
232
basis_chunks = basis_entry.get_bytes_as('chunked')
233
basis_lines = osutils.chunks_to_lines(basis_chunks)
234
basis_content = PlainKnitContent(basis_lines, compression_parent)
235
# Manually apply the delta because we have one annotated content and
237
content, _ = self._plain_factory.parse_record(rec[1], contents,
238
factory._build_details, basis_content)
239
return ''.join(content.text())
242
class KnitContentFactory(ContentFactory):
243
"""Content factory for streaming from knits.
245
:seealso ContentFactory:
248
def __init__(self, key, parents, build_details, sha1, raw_record,
249
annotated, knit=None, network_bytes=None):
250
"""Create a KnitContentFactory for key.
253
:param parents: The parents.
254
:param build_details: The build details as returned from
256
:param sha1: The sha1 expected from the full text of this object.
257
:param raw_record: The bytes of the knit data from disk.
258
:param annotated: True if the raw data is annotated.
259
:param network_bytes: None to calculate the network bytes on demand,
260
not-none if they are already known.
262
ContentFactory.__init__(self)
265
self.parents = parents
266
if build_details[0] == 'line-delta':
271
annotated_kind = 'annotated-'
274
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
275
self._raw_record = raw_record
276
self._network_bytes = network_bytes
277
self._build_details = build_details
280
def _create_network_bytes(self):
281
"""Create a fully serialised network version for transmission."""
282
# storage_kind, key, parents, Noeol, raw_record
283
key_bytes = '\x00'.join(self.key)
284
if self.parents is None:
285
parent_bytes = 'None:'
287
parent_bytes = '\t'.join('\x00'.join(key) for key in self.parents)
288
if self._build_details[1]:
292
network_bytes = "%s\n%s\n%s\n%s%s" % (self.storage_kind, key_bytes,
293
parent_bytes, noeol, self._raw_record)
294
self._network_bytes = network_bytes
296
def get_bytes_as(self, storage_kind):
297
if storage_kind == self.storage_kind:
298
if self._network_bytes is None:
299
self._create_network_bytes()
300
return self._network_bytes
301
if ('-ft-' in self.storage_kind and
302
storage_kind in ('chunked', 'fulltext')):
303
adapter_key = (self.storage_kind, 'fulltext')
304
adapter_factory = adapter_registry.get(adapter_key)
305
adapter = adapter_factory(None)
306
bytes = adapter.get_bytes(self)
307
if storage_kind == 'chunked':
311
if self._knit is not None:
312
# Not redundant with direct conversion above - that only handles
314
if storage_kind == 'chunked':
315
return self._knit.get_lines(self.key[0])
316
elif storage_kind == 'fulltext':
317
return self._knit.get_text(self.key[0])
318
raise errors.UnavailableRepresentation(self.key, storage_kind,
322
class LazyKnitContentFactory(ContentFactory):
323
"""A ContentFactory which can either generate full text or a wire form.
325
:seealso ContentFactory:
328
def __init__(self, key, parents, generator, first):
329
"""Create a LazyKnitContentFactory.
331
:param key: The key of the record.
332
:param parents: The parents of the record.
333
:param generator: A _ContentMapGenerator containing the record for this
335
:param first: Is this the first content object returned from generator?
336
if it is, its storage kind is knit-delta-closure, otherwise it is
337
knit-delta-closure-ref
340
self.parents = parents
342
self._generator = generator
343
self.storage_kind = "knit-delta-closure"
345
self.storage_kind = self.storage_kind + "-ref"
348
def get_bytes_as(self, storage_kind):
349
if storage_kind == self.storage_kind:
351
return self._generator._wire_bytes()
353
# all the keys etc are contained in the bytes returned in the
356
if storage_kind in ('chunked', 'fulltext'):
357
chunks = self._generator._get_one_work(self.key).text()
358
if storage_kind == 'chunked':
361
return ''.join(chunks)
362
raise errors.UnavailableRepresentation(self.key, storage_kind,
366
def knit_delta_closure_to_records(storage_kind, bytes, line_end):
367
"""Convert a network record to a iterator over stream records.
369
:param storage_kind: The storage kind of the record.
370
Must be 'knit-delta-closure'.
371
:param bytes: The bytes of the record on the network.
373
generator = _NetworkContentMapGenerator(bytes, line_end)
374
return generator.get_record_stream()
377
def knit_network_to_record(storage_kind, bytes, line_end):
378
"""Convert a network record to a record object.
380
:param storage_kind: The storage kind of the record.
381
:param bytes: The bytes of the record on the network.
384
line_end = bytes.find('\n', start)
385
key = tuple(bytes[start:line_end].split('\x00'))
387
line_end = bytes.find('\n', start)
388
parent_line = bytes[start:line_end]
389
if parent_line == 'None:':
393
[tuple(segment.split('\x00')) for segment in parent_line.split('\t')
396
noeol = bytes[start] == 'N'
397
if 'ft' in storage_kind:
400
method = 'line-delta'
401
build_details = (method, noeol)
403
raw_record = bytes[start:]
404
annotated = 'annotated' in storage_kind
405
return [KnitContentFactory(key, parents, build_details, None, raw_record,
406
annotated, network_bytes=bytes)]
409
130
class KnitContent(object):
410
"""Content of a knit version to which deltas can be applied.
412
This is always stored in memory as a list of lines with \\n at the end,
413
plus a flag saying if the final ending is really there or not, because that
414
corresponds to the on-disk knit representation.
418
self._should_strip_eol = False
420
def apply_delta(self, delta, new_version_id):
421
"""Apply delta to this object to become new_version_id."""
422
raise NotImplementedError(self.apply_delta)
131
"""Content of a knit version to which deltas can be applied."""
133
def __init__(self, lines):
136
def annotate_iter(self):
137
"""Yield tuples of (origin, text) for each content line."""
138
return iter(self._lines)
141
"""Return a list of (origin, text) tuples."""
142
return list(self.annotate_iter())
424
144
def line_delta_iter(self, new_lines):
425
145
"""Generate line-based delta from this content to new_lines."""
426
146
new_texts = new_lines.text()
427
147
old_texts = self.text()
428
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
148
s = KnitSequenceMatcher(None, old_texts, new_texts)
429
149
for tag, i1, i2, j1, j2 in s.get_opcodes():
430
150
if tag == 'equal':
751
340
for start, end, c, lines in delta:
752
341
out.append('%d,%d,%d\n' % (start, end, c))
342
out.extend([text for origin, text in lines])
756
def annotate(self, knit, key):
757
annotator = _KnitAnnotator(knit)
758
return annotator.annotate_flat(key)
762
def make_file_factory(annotated, mapper):
763
"""Create a factory for creating a file based KnitVersionedFiles.
765
This is only functional enough to run interface tests, it doesn't try to
766
provide a full pack environment.
768
:param annotated: knit annotations are wanted.
769
:param mapper: The mapper from keys to paths.
771
def factory(transport):
772
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
773
access = _KnitKeyAccess(transport, mapper)
774
return KnitVersionedFiles(index, access, annotated=annotated)
778
def make_pack_factory(graph, delta, keylength):
779
"""Create a factory for creating a pack based VersionedFiles.
781
This is only functional enough to run interface tests, it doesn't try to
782
provide a full pack environment.
784
:param graph: Store a graph.
785
:param delta: Delta compress contents.
786
:param keylength: How long should keys be.
788
def factory(transport):
789
parents = graph or delta
795
max_delta_chain = 200
798
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
799
key_elements=keylength)
800
stream = transport.open_write_stream('newpack')
801
writer = pack.ContainerWriter(stream.write)
803
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
804
deltas=delta, add_callback=graph_index.add_nodes)
805
access = pack_repo._DirectPackAccess({})
806
access.set_writer(writer, graph_index, (transport, 'newpack'))
807
result = KnitVersionedFiles(index, access,
808
max_delta_chain=max_delta_chain)
809
result.stream = stream
810
result.writer = writer
815
def cleanup_pack_knit(versioned_files):
816
versioned_files.stream.close()
817
versioned_files.writer.end()
820
def _get_total_build_size(self, keys, positions):
821
"""Determine the total bytes to build these keys.
823
(helper function because _KnitGraphIndex and _KndxIndex work the same, but
824
don't inherit from a common base.)
826
:param keys: Keys that we want to build
827
:param positions: dict of {key, (info, index_memo, comp_parent)} (such
828
as returned by _get_components_positions)
829
:return: Number of bytes to build those keys
831
all_build_index_memos = {}
835
for key in build_keys:
836
# This is mostly for the 'stacked' case
837
# Where we will be getting the data from a fallback
838
if key not in positions:
840
_, index_memo, compression_parent = positions[key]
841
all_build_index_memos[key] = index_memo
842
if compression_parent not in all_build_index_memos:
843
next_keys.add(compression_parent)
844
build_keys = next_keys
845
return sum([index_memo[2] for index_memo
846
in all_build_index_memos.itervalues()])
849
class KnitVersionedFiles(VersionedFilesWithFallbacks):
850
"""Storage for many versioned files using knit compression.
852
Backend storage is managed by indices and data objects.
854
:ivar _index: A _KnitGraphIndex or similar that can describe the
855
parents, graph, compression and data location of entries in this
856
KnitVersionedFiles. Note that this is only the index for
857
*this* vfs; if there are fallbacks they must be queried separately.
860
def __init__(self, index, data_access, max_delta_chain=200,
861
annotated=False, reload_func=None):
862
"""Create a KnitVersionedFiles with index and data_access.
864
:param index: The index for the knit data.
865
:param data_access: The access object to store and retrieve knit
867
:param max_delta_chain: The maximum number of deltas to permit during
868
insertion. Set to 0 to prohibit the use of deltas.
869
:param annotated: Set to True to cause annotations to be calculated and
870
stored during insertion.
871
:param reload_func: An function that can be called if we think we need
872
to reload the pack listing and try again. See
873
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
346
def make_empty_knit(transport, relpath):
347
"""Construct a empty knit at the specified location."""
348
k = KnitVersionedFile(transport, relpath, 'w', KnitPlainFactory)
351
class KnitVersionedFile(VersionedFile):
352
"""Weave-like structure with faster random access.
354
A knit stores a number of texts and a summary of the relationships
355
between them. Texts are identified by a string version-id. Texts
356
are normally stored and retrieved as a series of lines, but can
357
also be passed as single strings.
359
Lines are stored with the trailing newline (if any) included, to
360
avoid special cases for files with no final newline. Lines are
361
composed of 8-bit characters, not unicode. The combination of
362
these approaches should mean any 'binary' file can be safely
363
stored and retrieved.
366
def __init__(self, relpath, transport, file_mode=None, access_mode=None,
367
factory=None, basis_knit=DEPRECATED_PARAMETER, delta=True,
368
create=False, create_parent_dir=False, delay_create=False,
369
dir_mode=None, index=None, access_method=None):
370
"""Construct a knit at location specified by relpath.
372
:param create: If not True, only open an existing knit.
373
:param create_parent_dir: If True, create the parent directory if
374
creating the file fails. (This is used for stores with
375
hash-prefixes that may not exist yet)
376
:param delay_create: The calling code is aware that the knit won't
377
actually be created until the first data is stored.
378
:param index: An index to use for the knit.
876
self._access = data_access
877
self._max_delta_chain = max_delta_chain
879
self._factory = KnitAnnotateFactory()
881
self._factory = KnitPlainFactory()
882
self._immediate_fallback_vfs = []
883
self._reload_func = reload_func
380
if deprecated_passed(basis_knit):
381
warnings.warn("KnitVersionedFile.__(): The basis_knit parameter is"
382
" deprecated as of bzr 0.9.",
383
DeprecationWarning, stacklevel=2)
384
if access_mode is None:
386
super(KnitVersionedFile, self).__init__(access_mode)
387
assert access_mode in ('r', 'w'), "invalid mode specified %r" % access_mode
388
self.transport = transport
389
self.filename = relpath
390
self.factory = factory or KnitAnnotateFactory()
391
self.writable = (access_mode == 'w')
394
self._max_delta_chain = 200
397
self._index = _KnitIndex(transport, relpath + INDEX_SUFFIX,
398
access_mode, create=create, file_mode=file_mode,
399
create_parent_dir=create_parent_dir, delay_create=delay_create,
403
if access_method is None:
404
_access = _KnitAccess(transport, relpath + DATA_SUFFIX, file_mode, dir_mode,
405
((create and not len(self)) and delay_create), create_parent_dir)
407
_access = access_method
408
if create and not len(self) and not delay_create:
410
self._data = _KnitData(_access)
885
412
def __repr__(self):
886
return "%s(%r, %r)" % (
887
self.__class__.__name__,
891
def without_fallbacks(self):
892
"""Return a clone of this object without any fallbacks configured."""
893
return KnitVersionedFiles(self._index, self._access,
894
self._max_delta_chain, self._factory.annotated,
897
def add_fallback_versioned_files(self, a_versioned_files):
898
"""Add a source of texts for texts not present in this knit.
900
:param a_versioned_files: A VersionedFiles object.
902
self._immediate_fallback_vfs.append(a_versioned_files)
904
def add_lines(self, key, parents, lines, parent_texts=None,
905
left_matching_blocks=None, nostore_sha=None, random_id=False,
907
"""See VersionedFiles.add_lines()."""
908
self._index._check_write_ok()
909
self._check_add(key, lines, random_id, check_content)
911
# The caller might pass None if there is no graph data, but kndx
912
# indexes can't directly store that, so we give them
913
# an empty tuple instead.
915
line_bytes = ''.join(lines)
916
return self._add(key, lines, parents,
917
parent_texts, left_matching_blocks, nostore_sha, random_id,
918
line_bytes=line_bytes)
920
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
921
"""See VersionedFiles._add_text()."""
922
self._index._check_write_ok()
923
self._check_add(key, None, random_id, check_content=False)
924
if text.__class__ is not str:
925
raise errors.BzrBadParameterUnicode("text")
927
# The caller might pass None if there is no graph data, but kndx
928
# indexes can't directly store that, so we give them
929
# an empty tuple instead.
931
return self._add(key, None, parents,
932
None, None, nostore_sha, random_id,
935
def _add(self, key, lines, parents, parent_texts,
936
left_matching_blocks, nostore_sha, random_id,
938
"""Add a set of lines on top of version specified by parents.
940
Any versions not present will be converted into ghosts.
942
:param lines: A list of strings where each one is a single line (has a
943
single newline at the end of the string) This is now optional
944
(callers can pass None). It is left in its location for backwards
945
compatibility. It should ''.join(lines) must == line_bytes
946
:param line_bytes: A single string containing the content
948
We pass both lines and line_bytes because different routes bring the
949
values to this function. And for memory efficiency, we don't want to
950
have to split/join on-demand.
952
# first thing, if the content is something we don't need to store, find
954
digest = sha_string(line_bytes)
955
if nostore_sha == digest:
956
raise errors.ExistingContent
959
if parent_texts is None:
961
# Do a single query to ascertain parent presence; we only compress
962
# against parents in the same kvf.
963
present_parent_map = self._index.get_parent_map(parents)
964
for parent in parents:
965
if parent in present_parent_map:
966
present_parents.append(parent)
968
# Currently we can only compress against the left most present parent.
969
if (len(present_parents) == 0 or
970
present_parents[0] != parents[0]):
973
# To speed the extract of texts the delta chain is limited
974
# to a fixed number of deltas. This should minimize both
975
# I/O and the time spend applying deltas.
976
delta = self._check_should_delta(present_parents[0])
978
text_length = len(line_bytes)
981
# Note: line_bytes is not modified to add a newline, that is tracked
982
# via the no_eol flag. 'lines' *is* modified, because that is the
983
# general values needed by the Content code.
984
if line_bytes and line_bytes[-1] != '\n':
985
options.append('no-eol')
987
# Copy the existing list, or create a new one
989
lines = osutils.split_lines(line_bytes)
992
# Replace the last line with one that ends in a final newline
993
lines[-1] = lines[-1] + '\n'
995
lines = osutils.split_lines(line_bytes)
997
for element in key[:-1]:
998
if type(element) is not str:
999
raise TypeError("key contains non-strings: %r" % (key,))
1001
key = key[:-1] + ('sha1:' + digest,)
1002
elif type(key[-1]) is not str:
1003
raise TypeError("key contains non-strings: %r" % (key,))
1004
# Knit hunks are still last-element only
1005
version_id = key[-1]
1006
content = self._factory.make(lines, version_id)
1008
# Hint to the content object that its text() call should strip the
1010
content._should_strip_eol = True
1011
if delta or (self._factory.annotated and len(present_parents) > 0):
1012
# Merge annotations from parent texts if needed.
1013
delta_hunks = self._merge_annotations(content, present_parents,
1014
parent_texts, delta, self._factory.annotated,
1015
left_matching_blocks)
1018
options.append('line-delta')
1019
store_lines = self._factory.lower_line_delta(delta_hunks)
1020
size, bytes = self._record_to_data(key, digest,
1023
options.append('fulltext')
1024
# isinstance is slower and we have no hierarchy.
1025
if self._factory.__class__ is KnitPlainFactory:
1026
# Use the already joined bytes saving iteration time in
1028
dense_lines = [line_bytes]
1030
dense_lines.append('\n')
1031
size, bytes = self._record_to_data(key, digest,
1034
# get mixed annotation + content and feed it into the
1036
store_lines = self._factory.lower_fulltext(content)
1037
size, bytes = self._record_to_data(key, digest,
1040
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
1041
self._index.add_records(
1042
((key, options, access_memo, parents),),
1043
random_id=random_id)
1044
return digest, text_length, content
1046
def annotate(self, key):
1047
"""See VersionedFiles.annotate."""
1048
return self._factory.annotate(self, key)
1050
def get_annotator(self):
1051
return _KnitAnnotator(self)
1053
def check(self, progress_bar=None, keys=None):
1054
"""See VersionedFiles.check()."""
1056
return self._logical_check()
1058
# At the moment, check does not extra work over get_record_stream
1059
return self.get_record_stream(keys, 'unordered', True)
1061
def _logical_check(self):
1062
# This doesn't actually test extraction of everything, but that will
1063
# impact 'bzr check' substantially, and needs to be integrated with
1064
# care. However, it does check for the obvious problem of a delta with
1066
keys = self._index.keys()
1067
parent_map = self.get_parent_map(keys)
1069
if self._index.get_method(key) != 'fulltext':
1070
compression_parent = parent_map[key][0]
1071
if compression_parent not in parent_map:
1072
raise errors.KnitCorrupt(self,
1073
"Missing basis parent %s for %s" % (
1074
compression_parent, key))
1075
for fallback_vfs in self._immediate_fallback_vfs:
1076
fallback_vfs.check()
1078
def _check_add(self, key, lines, random_id, check_content):
1079
"""check that version_id and lines are safe to add."""
1080
version_id = key[-1]
1081
if version_id is not None:
1082
if contains_whitespace(version_id):
1083
raise InvalidRevisionId(version_id, self)
1084
self.check_not_reserved_id(version_id)
1085
# TODO: If random_id==False and the key is already present, we should
1086
# probably check that the existing content is identical to what is
1087
# being inserted, and otherwise raise an exception. This would make
1088
# the bundle code simpler.
1090
self._check_lines_not_unicode(lines)
1091
self._check_lines_are_lines(lines)
1093
def _check_header(self, key, line):
1094
rec = self._split_header(line)
1095
self._check_header_version(rec, key[-1])
1098
def _check_header_version(self, rec, version_id):
1099
"""Checks the header version on original format knit records.
1101
These have the last component of the key embedded in the record.
1103
if rec[1] != version_id:
1104
raise KnitCorrupt(self,
1105
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
1107
def _check_should_delta(self, parent):
413
return '%s(%s)' % (self.__class__.__name__,
414
self.transport.abspath(self.filename))
416
def _check_should_delta(self, first_parents):
1108
417
"""Iterate back through the parent listing, looking for a fulltext.
1110
419
This is used when we want to decide whether to add a delta or a new
1119
428
fulltext_size = None
429
delta_parents = first_parents
1120
430
for count in xrange(self._max_delta_chain):
1122
# Note that this only looks in the index of this particular
1123
# KnitVersionedFiles, not in the fallbacks. This ensures that
1124
# we won't store a delta spanning physical repository
1126
build_details = self._index.get_build_details([parent])
1127
parent_details = build_details[parent]
1128
except (RevisionNotPresent, KeyError), e:
1129
# Some basis is not locally present: always fulltext
1131
index_memo, compression_parent, _, _ = parent_details
1132
_, _, size = index_memo
1133
if compression_parent is None:
431
parent = delta_parents[0]
432
method = self._index.get_method(parent)
433
index, pos, size = self._index.get_position(parent)
434
if method == 'fulltext':
1134
435
fulltext_size = size
1136
437
delta_size += size
1137
# We don't explicitly check for presence because this is in an
1138
# inner loop, and if it's missing it'll fail anyhow.
1139
parent = compression_parent
438
delta_parents = self._index.get_parents(parent)
1141
440
# We couldn't find a fulltext, so we must create a new one
1143
# Simple heuristic - if the total I/O wold be greater as a delta than
1144
# the originally installed fulltext, we create a new fulltext.
1145
443
return fulltext_size > delta_size
1147
def _build_details_to_components(self, build_details):
1148
"""Convert a build_details tuple to a position tuple."""
1149
# record_details, access_memo, compression_parent
1150
return build_details[3], build_details[0], build_details[1]
1152
def _get_components_positions(self, keys, allow_missing=False):
1153
"""Produce a map of position data for the components of keys.
1155
This data is intended to be used for retrieving the knit records.
1157
A dict of key to (record_details, index_memo, next, parents) is
1160
* method is the way referenced data should be applied.
1161
* index_memo is the handle to pass to the data access to actually get
1163
* next is the build-parent of the version, or None for fulltexts.
1164
* parents is the version_ids of the parents of this version
1166
:param allow_missing: If True do not raise an error on a missing
1167
component, just ignore it.
1170
pending_components = keys
1171
while pending_components:
1172
build_details = self._index.get_build_details(pending_components)
1173
current_components = set(pending_components)
1174
pending_components = set()
1175
for key, details in build_details.iteritems():
1176
(index_memo, compression_parent, parents,
1177
record_details) = details
1178
method = record_details[0]
1179
if compression_parent is not None:
1180
pending_components.add(compression_parent)
1181
component_data[key] = self._build_details_to_components(details)
1182
missing = current_components.difference(build_details)
1183
if missing and not allow_missing:
1184
raise errors.RevisionNotPresent(missing.pop(), self)
1185
return component_data
1187
def _get_content(self, key, parent_texts={}):
1188
"""Returns a content object that makes up the specified
1190
cached_version = parent_texts.get(key, None)
1191
if cached_version is not None:
1192
# Ensure the cache dict is valid.
1193
if not self.get_parent_map([key]):
1194
raise RevisionNotPresent(key, self)
1195
return cached_version
1196
generator = _VFContentMapGenerator(self, [key])
1197
return generator._get_content(key)
1199
def get_parent_map(self, keys):
1200
"""Get a map of the graph parents of keys.
1202
:param keys: The keys to look up parents for.
1203
:return: A mapping from keys to parents. Absent keys are absent from
1206
return self._get_parent_map_with_sources(keys)[0]
1208
def _get_parent_map_with_sources(self, keys):
1209
"""Get a map of the parents of keys.
1211
:param keys: The keys to look up parents for.
1212
:return: A tuple. The first element is a mapping from keys to parents.
1213
Absent keys are absent from the mapping. The second element is a
1214
list with the locations each key was found in. The first element
1215
is the in-this-knit parents, the second the first fallback source,
1219
sources = [self._index] + self._immediate_fallback_vfs
1222
for source in sources:
1225
new_result = source.get_parent_map(missing)
1226
source_results.append(new_result)
1227
result.update(new_result)
1228
missing.difference_update(set(new_result))
1229
return result, source_results
1231
def _get_record_map(self, keys, allow_missing=False):
1232
"""Produce a dictionary of knit records.
1234
:return: {key:(record, record_details, digest, next)}
1236
* record: data returned from read_records (a KnitContentobject)
1237
* record_details: opaque information to pass to parse_record
1238
* digest: SHA1 digest of the full text after all steps are done
1239
* next: build-parent of the version, i.e. the leftmost ancestor.
1240
Will be None if the record is not a delta.
1242
:param keys: The keys to build a map for
1243
:param allow_missing: If some records are missing, rather than
1244
error, just return the data that could be generated.
1246
raw_map = self._get_record_map_unparsed(keys,
1247
allow_missing=allow_missing)
1248
return self._raw_map_to_record_map(raw_map)
1250
def _raw_map_to_record_map(self, raw_map):
1251
"""Parse the contents of _get_record_map_unparsed.
1253
:return: see _get_record_map.
1257
data, record_details, next = raw_map[key]
1258
content, digest = self._parse_record(key[-1], data)
1259
result[key] = content, record_details, digest, next
1262
def _get_record_map_unparsed(self, keys, allow_missing=False):
1263
"""Get the raw data for reconstructing keys without parsing it.
1265
:return: A dict suitable for parsing via _raw_map_to_record_map.
1266
key-> raw_bytes, (method, noeol), compression_parent
1268
# This retries the whole request if anything fails. Potentially we
1269
# could be a bit more selective. We could track the keys whose records
1270
# we have successfully found, and then only request the new records
1271
# from there. However, _get_components_positions grabs the whole build
1272
# chain, which means we'll likely try to grab the same records again
1273
# anyway. Also, can the build chains change as part of a pack
1274
# operation? We wouldn't want to end up with a broken chain.
1277
position_map = self._get_components_positions(keys,
1278
allow_missing=allow_missing)
1279
# key = component_id, r = record_details, i_m = index_memo,
1281
records = [(key, i_m) for key, (r, i_m, n)
1282
in position_map.iteritems()]
1283
# Sort by the index memo, so that we request records from the
1284
# same pack file together, and in forward-sorted order
1285
records.sort(key=operator.itemgetter(1))
1287
for key, data in self._read_records_iter_unchecked(records):
1288
(record_details, index_memo, next) = position_map[key]
1289
raw_record_map[key] = data, record_details, next
1290
return raw_record_map
1291
except errors.RetryWithNewPacks, e:
1292
self._access.reload_or_raise(e)
1295
def _split_by_prefix(cls, keys):
1296
"""For the given keys, split them up based on their prefix.
1298
To keep memory pressure somewhat under control, split the
1299
requests back into per-file-id requests, otherwise "bzr co"
1300
extracts the full tree into memory before writing it to disk.
1301
This should be revisited if _get_content_maps() can ever cross
1304
The keys for a given file_id are kept in the same relative order.
1305
Ordering between file_ids is not, though prefix_order will return the
1306
order that the key was first seen.
1308
:param keys: An iterable of key tuples
1309
:return: (split_map, prefix_order)
1310
split_map A dictionary mapping prefix => keys
1311
prefix_order The order that we saw the various prefixes
1313
split_by_prefix = {}
1321
if prefix in split_by_prefix:
1322
split_by_prefix[prefix].append(key)
1324
split_by_prefix[prefix] = [key]
1325
prefix_order.append(prefix)
1326
return split_by_prefix, prefix_order
1328
def _group_keys_for_io(self, keys, non_local_keys, positions,
1329
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
1330
"""For the given keys, group them into 'best-sized' requests.
1332
The idea is to avoid making 1 request per file, but to never try to
1333
unpack an entire 1.5GB source tree in a single pass. Also when
1334
possible, we should try to group requests to the same pack file
1337
:return: list of (keys, non_local) tuples that indicate what keys
1338
should be fetched next.
1340
# TODO: Ideally we would group on 2 factors. We want to extract texts
1341
# from the same pack file together, and we want to extract all
1342
# the texts for a given build-chain together. Ultimately it
1343
# probably needs a better global view.
1344
total_keys = len(keys)
1345
prefix_split_keys, prefix_order = self._split_by_prefix(keys)
1346
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
1348
cur_non_local = set()
1352
for prefix in prefix_order:
1353
keys = prefix_split_keys[prefix]
1354
non_local = prefix_split_non_local_keys.get(prefix, [])
1356
this_size = self._index._get_total_build_size(keys, positions)
1357
cur_size += this_size
1358
cur_keys.extend(keys)
1359
cur_non_local.update(non_local)
1360
if cur_size > _min_buffer_size:
1361
result.append((cur_keys, cur_non_local))
1362
sizes.append(cur_size)
1364
cur_non_local = set()
1367
result.append((cur_keys, cur_non_local))
1368
sizes.append(cur_size)
1371
def get_record_stream(self, keys, ordering, include_delta_closure):
1372
"""Get a stream of records for keys.
1374
:param keys: The keys to include.
1375
:param ordering: Either 'unordered' or 'topological'. A topologically
1376
sorted stream has compression parents strictly before their
1378
:param include_delta_closure: If True then the closure across any
1379
compression parents will be included (in the opaque data).
1380
:return: An iterator of ContentFactory objects, each of which is only
1381
valid until the iterator is advanced.
1383
# keys might be a generator
1387
if not self._index.has_graph:
1388
# Cannot sort when no graph has been stored.
1389
ordering = 'unordered'
1391
remaining_keys = keys
1394
keys = set(remaining_keys)
1395
for content_factory in self._get_remaining_record_stream(keys,
1396
ordering, include_delta_closure):
1397
remaining_keys.discard(content_factory.key)
1398
yield content_factory
1400
except errors.RetryWithNewPacks, e:
1401
self._access.reload_or_raise(e)
1403
def _get_remaining_record_stream(self, keys, ordering,
1404
include_delta_closure):
1405
"""This function is the 'retry' portion for get_record_stream."""
1406
if include_delta_closure:
1407
positions = self._get_components_positions(keys, allow_missing=True)
1409
build_details = self._index.get_build_details(keys)
1411
# (record_details, access_memo, compression_parent_key)
1412
positions = dict((key, self._build_details_to_components(details))
1413
for key, details in build_details.iteritems())
1414
absent_keys = keys.difference(set(positions))
1415
# There may be more absent keys : if we're missing the basis component
1416
# and are trying to include the delta closure.
1417
# XXX: We should not ever need to examine remote sources because we do
1418
# not permit deltas across versioned files boundaries.
1419
if include_delta_closure:
1420
needed_from_fallback = set()
1421
# Build up reconstructable_keys dict. key:True in this dict means
1422
# the key can be reconstructed.
1423
reconstructable_keys = {}
1427
chain = [key, positions[key][2]]
1429
needed_from_fallback.add(key)
1432
while chain[-1] is not None:
1433
if chain[-1] in reconstructable_keys:
1434
result = reconstructable_keys[chain[-1]]
1438
chain.append(positions[chain[-1]][2])
1440
# missing basis component
1441
needed_from_fallback.add(chain[-1])
1444
for chain_key in chain[:-1]:
1445
reconstructable_keys[chain_key] = result
1447
needed_from_fallback.add(key)
1448
# Double index lookups here : need a unified api ?
1449
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1450
if ordering in ('topological', 'groupcompress'):
1451
if ordering == 'topological':
1452
# Global topological sort
1453
present_keys = tsort.topo_sort(global_map)
1455
present_keys = sort_groupcompress(global_map)
1456
# Now group by source:
1458
current_source = None
1459
for key in present_keys:
1460
for parent_map in parent_maps:
1461
if key in parent_map:
1462
key_source = parent_map
1464
if current_source is not key_source:
1465
source_keys.append((key_source, []))
1466
current_source = key_source
1467
source_keys[-1][1].append(key)
1469
if ordering != 'unordered':
1470
raise AssertionError('valid values for ordering are:'
1471
' "unordered", "groupcompress" or "topological" not: %r'
1473
# Just group by source; remote sources first.
1476
for parent_map in reversed(parent_maps):
1477
source_keys.append((parent_map, []))
1478
for key in parent_map:
1479
present_keys.append(key)
1480
source_keys[-1][1].append(key)
1481
# We have been requested to return these records in an order that
1482
# suits us. So we ask the index to give us an optimally sorted
1484
for source, sub_keys in source_keys:
1485
if source is parent_maps[0]:
1486
# Only sort the keys for this VF
1487
self._index._sort_keys_by_io(sub_keys, positions)
1488
absent_keys = keys - set(global_map)
1489
for key in absent_keys:
1490
yield AbsentContentFactory(key)
1491
# restrict our view to the keys we can answer.
1492
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1493
# XXX: At that point we need to consider the impact of double reads by
1494
# utilising components multiple times.
1495
if include_delta_closure:
1496
# XXX: get_content_maps performs its own index queries; allow state
1498
non_local_keys = needed_from_fallback - absent_keys
1499
for keys, non_local_keys in self._group_keys_for_io(present_keys,
1502
generator = _VFContentMapGenerator(self, keys, non_local_keys,
1505
for record in generator.get_record_stream():
1508
for source, keys in source_keys:
1509
if source is parent_maps[0]:
1510
# this KnitVersionedFiles
1511
records = [(key, positions[key][1]) for key in keys]
1512
for key, raw_data in self._read_records_iter_unchecked(records):
1513
(record_details, index_memo, _) = positions[key]
1514
yield KnitContentFactory(key, global_map[key],
1515
record_details, None, raw_data, self._factory.annotated, None)
1517
vf = self._immediate_fallback_vfs[parent_maps.index(source) - 1]
1518
for record in vf.get_record_stream(keys, ordering,
1519
include_delta_closure):
1522
def get_sha1s(self, keys):
1523
"""See VersionedFiles.get_sha1s()."""
1525
record_map = self._get_record_map(missing, allow_missing=True)
1527
for key, details in record_map.iteritems():
1528
if key not in missing:
1530
# record entry 2 is the 'digest'.
1531
result[key] = details[2]
1532
missing.difference_update(set(result))
1533
for source in self._immediate_fallback_vfs:
1536
new_result = source.get_sha1s(missing)
1537
result.update(new_result)
1538
missing.difference_update(set(new_result))
1541
def insert_record_stream(self, stream):
1542
"""Insert a record stream into this container.
1544
:param stream: A stream of records to insert.
1546
:seealso VersionedFiles.get_record_stream:
1548
def get_adapter(adapter_key):
1550
return adapters[adapter_key]
1552
adapter_factory = adapter_registry.get(adapter_key)
1553
adapter = adapter_factory(self)
1554
adapters[adapter_key] = adapter
1557
if self._factory.annotated:
1558
# self is annotated, we need annotated knits to use directly.
1559
annotated = "annotated-"
1562
# self is not annotated, but we can strip annotations cheaply.
1564
convertibles = set(["knit-annotated-ft-gz"])
1565
if self._max_delta_chain:
1566
delta_types.add("knit-annotated-delta-gz")
1567
convertibles.add("knit-annotated-delta-gz")
1568
# The set of types we can cheaply adapt without needing basis texts.
1569
native_types = set()
1570
if self._max_delta_chain:
1571
native_types.add("knit-%sdelta-gz" % annotated)
1572
delta_types.add("knit-%sdelta-gz" % annotated)
1573
native_types.add("knit-%sft-gz" % annotated)
1574
knit_types = native_types.union(convertibles)
1576
# Buffer all index entries that we can't add immediately because their
1577
# basis parent is missing. We don't buffer all because generating
1578
# annotations may require access to some of the new records. However we
1579
# can't generate annotations from new deltas until their basis parent
1580
# is present anyway, so we get away with not needing an index that
1581
# includes the new keys.
1583
# See <http://launchpad.net/bugs/300177> about ordering of compression
1584
# parents in the records - to be conservative, we insist that all
1585
# parents must be present to avoid expanding to a fulltext.
1587
# key = basis_parent, value = index entry to add
1588
buffered_index_entries = {}
1589
for record in stream:
1590
kind = record.storage_kind
1591
if kind.startswith('knit-') and kind.endswith('-gz'):
1592
# Check that the ID in the header of the raw knit bytes matches
1593
# the record metadata.
1594
raw_data = record._raw_record
1595
df, rec = self._parse_record_header(record.key, raw_data)
1598
parents = record.parents
1599
if record.storage_kind in delta_types:
1600
# TODO: eventually the record itself should track
1601
# compression_parent
1602
compression_parent = parents[0]
1604
compression_parent = None
1605
# Raise an error when a record is missing.
1606
if record.storage_kind == 'absent':
1607
raise RevisionNotPresent([record.key], self)
1608
elif ((record.storage_kind in knit_types)
1609
and (compression_parent is None
1610
or not self._immediate_fallback_vfs
1611
or self._index.has_key(compression_parent)
1612
or not self.has_key(compression_parent))):
1613
# we can insert the knit record literally if either it has no
1614
# compression parent OR we already have its basis in this kvf
1615
# OR the basis is not present even in the fallbacks. In the
1616
# last case it will either turn up later in the stream and all
1617
# will be well, or it won't turn up at all and we'll raise an
1620
# TODO: self.has_key is somewhat redundant with
1621
# self._index.has_key; we really want something that directly
1622
# asks if it's only present in the fallbacks. -- mbp 20081119
1623
if record.storage_kind not in native_types:
1625
adapter_key = (record.storage_kind, "knit-delta-gz")
1626
adapter = get_adapter(adapter_key)
1628
adapter_key = (record.storage_kind, "knit-ft-gz")
1629
adapter = get_adapter(adapter_key)
1630
bytes = adapter.get_bytes(record)
1632
# It's a knit record, it has a _raw_record field (even if
1633
# it was reconstituted from a network stream).
1634
bytes = record._raw_record
1635
options = [record._build_details[0]]
1636
if record._build_details[1]:
1637
options.append('no-eol')
1638
# Just blat it across.
1639
# Note: This does end up adding data on duplicate keys. As
1640
# modern repositories use atomic insertions this should not
1641
# lead to excessive growth in the event of interrupted fetches.
1642
# 'knit' repositories may suffer excessive growth, but as a
1643
# deprecated format this is tolerable. It can be fixed if
1644
# needed by in the kndx index support raising on a duplicate
1645
# add with identical parents and options.
1646
access_memo = self._access.add_raw_records(
1647
[(record.key, len(bytes))], bytes)[0]
1648
index_entry = (record.key, options, access_memo, parents)
1649
if 'fulltext' not in options:
1650
# Not a fulltext, so we need to make sure the compression
1651
# parent will also be present.
1652
# Note that pack backed knits don't need to buffer here
1653
# because they buffer all writes to the transaction level,
1654
# but we don't expose that difference at the index level. If
1655
# the query here has sufficient cost to show up in
1656
# profiling we should do that.
1658
# They're required to be physically in this
1659
# KnitVersionedFiles, not in a fallback.
1660
if not self._index.has_key(compression_parent):
1661
pending = buffered_index_entries.setdefault(
1662
compression_parent, [])
1663
pending.append(index_entry)
1666
self._index.add_records([index_entry])
1667
elif record.storage_kind == 'chunked':
1668
self.add_lines(record.key, parents,
1669
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1671
# Not suitable for direct insertion as a
1672
# delta, either because it's not the right format, or this
1673
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1674
# 0) or because it depends on a base only present in the
1676
self._access.flush()
1678
# Try getting a fulltext directly from the record.
1679
bytes = record.get_bytes_as('fulltext')
1680
except errors.UnavailableRepresentation:
1681
adapter_key = record.storage_kind, 'fulltext'
1682
adapter = get_adapter(adapter_key)
1683
bytes = adapter.get_bytes(record)
1684
lines = split_lines(bytes)
1686
self.add_lines(record.key, parents, lines)
1687
except errors.RevisionAlreadyPresent:
1689
# Add any records whose basis parent is now available.
1691
added_keys = [record.key]
1693
key = added_keys.pop(0)
1694
if key in buffered_index_entries:
1695
index_entries = buffered_index_entries[key]
1696
self._index.add_records(index_entries)
1698
[index_entry[0] for index_entry in index_entries])
1699
del buffered_index_entries[key]
1700
if buffered_index_entries:
1701
# There were index entries buffered at the end of the stream,
1702
# So these need to be added (if the index supports holding such
1703
# entries for later insertion)
1705
for key in buffered_index_entries:
1706
index_entries = buffered_index_entries[key]
1707
all_entries.extend(index_entries)
1708
self._index.add_records(
1709
all_entries, missing_compression_parents=True)
1711
def get_missing_compression_parent_keys(self):
1712
"""Return an iterable of keys of missing compression parents.
1714
Check this after calling insert_record_stream to find out if there are
1715
any missing compression parents. If there are, the records that
1716
depend on them are not able to be inserted safely. For atomic
1717
KnitVersionedFiles built on packs, the transaction should be aborted or
1718
suspended - commit will fail at this point. Nonatomic knits will error
1719
earlier because they have no staging area to put pending entries into.
1721
return self._index.get_missing_compression_parents()
1723
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1724
"""Iterate over the lines in the versioned files from keys.
1726
This may return lines from other keys. Each item the returned
1727
iterator yields is a tuple of a line and a text version that that line
1728
is present in (not introduced in).
1730
Ordering of results is in whatever order is most suitable for the
1731
underlying storage format.
1733
If a progress bar is supplied, it may be used to indicate progress.
1734
The caller is responsible for cleaning up progress bars (because this
1738
* Lines are normalised by the underlying store: they will all have \\n
1740
* Lines are returned in arbitrary order.
1741
* If a requested key did not change any lines (or didn't have any
1742
lines), it may not be mentioned at all in the result.
1744
:param pb: Progress bar supplied by caller.
1745
:return: An iterator over (line, key).
1748
pb = ui.ui_factory.nested_progress_bar()
1754
# we don't care about inclusions, the caller cares.
1755
# but we need to setup a list of records to visit.
1756
# we need key, position, length
1758
build_details = self._index.get_build_details(keys)
1759
for key, details in build_details.iteritems():
1761
key_records.append((key, details[0]))
1762
records_iter = enumerate(self._read_records_iter(key_records))
1763
for (key_idx, (key, data, sha_value)) in records_iter:
1764
pb.update(gettext('Walking content'), key_idx, total)
1765
compression_parent = build_details[key][1]
1766
if compression_parent is None:
1768
line_iterator = self._factory.get_fulltext_content(data)
1771
line_iterator = self._factory.get_linedelta_content(data)
1772
# Now that we are yielding the data for this key, remove it
1775
# XXX: It might be more efficient to yield (key,
1776
# line_iterator) in the future. However for now, this is a
1777
# simpler change to integrate into the rest of the
1778
# codebase. RBC 20071110
1779
for line in line_iterator:
1782
except errors.RetryWithNewPacks, e:
1783
self._access.reload_or_raise(e)
1784
# If there are still keys we've not yet found, we look in the fallback
1785
# vfs, and hope to find them there. Note that if the keys are found
1786
# but had no changes or no content, the fallback may not return
1788
if keys and not self._immediate_fallback_vfs:
1789
# XXX: strictly the second parameter is meant to be the file id
1790
# but it's not easily accessible here.
1791
raise RevisionNotPresent(keys, repr(self))
1792
for source in self._immediate_fallback_vfs:
1796
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1797
source_keys.add(key)
1799
keys.difference_update(source_keys)
1800
pb.update(gettext('Walking content'), total, total)
1802
def _make_line_delta(self, delta_seq, new_content):
1803
"""Generate a line delta from delta_seq and new_content."""
1805
for op in delta_seq.get_opcodes():
1806
if op[0] == 'equal':
1808
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
445
def _add_delta(self, version_id, parents, delta_parent, sha1, noeol, delta):
446
"""See VersionedFile._add_delta()."""
447
self._check_add(version_id, []) # should we check the lines ?
448
self._check_versions_present(parents)
452
for parent in parents:
453
if not self.has_version(parent):
454
ghosts.append(parent)
456
present_parents.append(parent)
458
if delta_parent is None:
459
# reconstitute as full text.
460
assert len(delta) == 1 or len(delta) == 0
462
assert delta[0][0] == 0
463
assert delta[0][1] == 0, delta[0][1]
464
return super(KnitVersionedFile, self)._add_delta(version_id,
475
options.append('no-eol')
477
if delta_parent is not None:
478
# determine the current delta chain length.
479
# To speed the extract of texts the delta chain is limited
480
# to a fixed number of deltas. This should minimize both
481
# I/O and the time spend applying deltas.
482
# The window was changed to a maximum of 200 deltas, but also added
483
# was a check that the total compressed size of the deltas is
484
# smaller than the compressed size of the fulltext.
485
if not self._check_should_delta([delta_parent]):
486
# We don't want a delta here, just do a normal insertion.
487
return super(KnitVersionedFile, self)._add_delta(version_id,
494
options.append('line-delta')
495
store_lines = self.factory.lower_line_delta(delta)
497
access_memo = self._data.add_record(version_id, digest, store_lines)
498
self._index.add_version(version_id, options, access_memo, parents)
500
def _add_raw_records(self, records, data):
501
"""Add all the records 'records' with data pre-joined in 'data'.
503
:param records: A list of tuples(version_id, options, parents, size).
504
:param data: The data for the records. When it is written, the records
505
are adjusted to have pos pointing into data by the sum of
506
the preceding records sizes.
509
raw_record_sizes = [record[3] for record in records]
510
positions = self._data.add_raw_records(raw_record_sizes, data)
513
for (version_id, options, parents, size), access_memo in zip(
515
index_entries.append((version_id, options, access_memo, parents))
516
if self._data._do_cache:
517
self._data._cache[version_id] = data[offset:offset+size]
519
self._index.add_versions(index_entries)
521
def enable_cache(self):
522
"""Start caching data for this knit"""
523
self._data.enable_cache()
525
def clear_cache(self):
526
"""Clear the data cache only."""
527
self._data.clear_cache()
529
def copy_to(self, name, transport):
530
"""See VersionedFile.copy_to()."""
531
# copy the current index to a temp index to avoid racing with local
533
transport.put_file_non_atomic(name + INDEX_SUFFIX + '.tmp',
534
self.transport.get(self._index._filename))
536
f = self._data._open_file()
538
transport.put_file(name + DATA_SUFFIX, f)
541
# move the copied index into place
542
transport.move(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
544
def create_empty(self, name, transport, mode=None):
545
return KnitVersionedFile(name, transport, factory=self.factory,
546
delta=self.delta, create=True)
548
def _fix_parents(self, version_id, new_parents):
549
"""Fix the parents list for version.
551
This is done by appending a new version to the index
552
with identical data except for the parents list.
553
the parents list must be a superset of the current
556
current_values = self._index._cache[version_id]
557
assert set(current_values[4]).difference(set(new_parents)) == set()
558
self._index.add_version(version_id,
560
(None, current_values[2], current_values[3]),
563
def _extract_blocks(self, version_id, source, target):
564
if self._index.get_method(version_id) != 'line-delta':
566
parent, sha1, noeol, delta = self.get_delta(version_id)
567
return KnitContent.get_line_delta_blocks(delta, source, target)
569
def get_delta(self, version_id):
570
"""Get a delta for constructing version from some other version."""
571
version_id = osutils.safe_revision_id(version_id)
572
self.check_not_reserved_id(version_id)
573
if not self.has_version(version_id):
574
raise RevisionNotPresent(version_id, self.filename)
576
parents = self.get_parents(version_id)
581
index_memo = self._index.get_position(version_id)
582
data, sha1 = self._data.read_records(((version_id, index_memo),))[version_id]
583
noeol = 'no-eol' in self._index.get_options(version_id)
584
if 'fulltext' == self._index.get_method(version_id):
585
new_content = self.factory.parse_fulltext(data, version_id)
586
if parent is not None:
587
reference_content = self._get_content(parent)
588
old_texts = reference_content.text()
591
new_texts = new_content.text()
592
delta_seq = KnitSequenceMatcher(None, old_texts, new_texts)
593
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
595
delta = self.factory.parse_line_delta(data, version_id)
596
return parent, sha1, noeol, delta
598
def get_graph_with_ghosts(self):
599
"""See VersionedFile.get_graph_with_ghosts()."""
600
graph_items = self._index.get_graph()
601
return dict(graph_items)
603
def get_sha1(self, version_id):
604
return self.get_sha1s([version_id])[0]
606
def get_sha1s(self, version_ids):
607
"""See VersionedFile.get_sha1()."""
608
version_ids = [osutils.safe_revision_id(v) for v in version_ids]
609
record_map = self._get_record_map(version_ids)
610
# record entry 2 is the 'digest'.
611
return [record_map[v][2] for v in version_ids]
615
"""See VersionedFile.get_suffixes()."""
616
return [DATA_SUFFIX, INDEX_SUFFIX]
618
def has_ghost(self, version_id):
619
"""True if there is a ghost reference in the file to version_id."""
620
version_id = osutils.safe_revision_id(version_id)
622
if self.has_version(version_id):
624
# optimisable if needed by memoising the _ghosts set.
625
items = self._index.get_graph()
626
for node, parents in items:
627
for parent in parents:
628
if parent not in self._index._cache:
629
if parent == version_id:
634
"""See VersionedFile.versions."""
635
return self._index.get_versions()
637
def has_version(self, version_id):
638
"""See VersionedFile.has_version."""
639
version_id = osutils.safe_revision_id(version_id)
640
return self._index.has_version(version_id)
642
__contains__ = has_version
1811
644
def _merge_annotations(self, content, parents, parent_texts={},
1812
645
delta=None, annotated=None,
1813
646
left_matching_blocks=None):
1814
"""Merge annotations for content and generate deltas.
1816
This is done by comparing the annotations based on changes to the text
1817
and generating a delta on the resulting full texts. If annotations are
1818
not being created then a simple delta is created.
647
"""Merge annotations for content. This is done by comparing
648
the annotations based on changed to the text.
1820
650
if left_matching_blocks is not None:
1821
651
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1823
653
delta_seq = None
1825
for parent_key in parents:
1826
merge_content = self._get_content(parent_key, parent_texts)
1827
if (parent_key == parents[0] and delta_seq is not None):
655
for parent_id in parents:
656
merge_content = self._get_content(parent_id, parent_texts)
657
if (parent_id == parents[0] and delta_seq is not None):
1830
660
seq = patiencediff.PatienceSequenceMatcher(
1853
675
None, old_texts, new_texts)
1854
676
return self._make_line_delta(delta_seq, content)
1856
def _parse_record(self, version_id, data):
1857
"""Parse an original format knit record.
1859
These have the last element of the key only present in the stored data.
1861
rec, record_contents = self._parse_record_unchecked(data)
1862
self._check_header_version(rec, version_id)
1863
return record_contents, rec[3]
1865
def _parse_record_header(self, key, raw_data):
1866
"""Parse a record header for consistency.
1868
:return: the header and the decompressor stream.
1869
as (stream, header_record)
1871
df = gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1874
rec = self._check_header(key, df.readline())
1875
except Exception, e:
1876
raise KnitCorrupt(self,
1877
"While reading {%s} got %s(%s)"
1878
% (key, e.__class__.__name__, str(e)))
1881
def _parse_record_unchecked(self, data):
1883
# 4168 calls in 2880 217 internal
1884
# 4168 calls to _parse_record_header in 2121
1885
# 4168 calls to readlines in 330
1886
df = gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1888
record_contents = df.readlines()
1889
except Exception, e:
1890
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1891
(data, e.__class__.__name__, str(e)))
1892
header = record_contents.pop(0)
1893
rec = self._split_header(header)
1894
last_line = record_contents.pop()
1895
if len(record_contents) != int(rec[2]):
1896
raise KnitCorrupt(self,
1897
'incorrect number of lines %s != %s'
1898
' for version {%s} %s'
1899
% (len(record_contents), int(rec[2]),
1900
rec[1], record_contents))
1901
if last_line != 'end %s\n' % rec[1]:
1902
raise KnitCorrupt(self,
1903
'unexpected version end line %r, wanted %r'
1904
% (last_line, rec[1]))
1906
return rec, record_contents
1908
def _read_records_iter(self, records):
1909
"""Read text records from data file and yield result.
1911
The result will be returned in whatever is the fastest to read.
1912
Not by the order requested. Also, multiple requests for the same
1913
record will only yield 1 response.
1915
:param records: A list of (key, access_memo) entries
1916
:return: Yields (key, contents, digest) in the order
1917
read, not the order requested
1922
# XXX: This smells wrong, IO may not be getting ordered right.
1923
needed_records = sorted(set(records), key=operator.itemgetter(1))
1924
if not needed_records:
1927
# The transport optimizes the fetching as well
1928
# (ie, reads continuous ranges.)
1929
raw_data = self._access.get_raw_records(
1930
[index_memo for key, index_memo in needed_records])
1932
for (key, index_memo), data in \
1933
izip(iter(needed_records), raw_data):
1934
content, digest = self._parse_record(key[-1], data)
1935
yield key, content, digest
1937
def _read_records_iter_raw(self, records):
1938
"""Read text records from data file and yield raw data.
1940
This unpacks enough of the text record to validate the id is
1941
as expected but thats all.
1943
Each item the iterator yields is (key, bytes,
1944
expected_sha1_of_full_text).
1946
for key, data in self._read_records_iter_unchecked(records):
1947
# validate the header (note that we can only use the suffix in
1948
# current knit records).
1949
df, rec = self._parse_record_header(key, data)
1951
yield key, data, rec[3]
1953
def _read_records_iter_unchecked(self, records):
1954
"""Read text records from data file and yield raw data.
1956
No validation is done.
1958
Yields tuples of (key, data).
1960
# setup an iterator of the external records:
1961
# uses readv so nice and fast we hope.
1963
# grab the disk data needed.
1964
needed_offsets = [index_memo for key, index_memo
1966
raw_records = self._access.get_raw_records(needed_offsets)
1968
for key, index_memo in records:
1969
data = raw_records.next()
1972
def _record_to_data(self, key, digest, lines, dense_lines=None):
1973
"""Convert key, digest, lines into a raw data block.
1975
:param key: The key of the record. Currently keys are always serialised
1976
using just the trailing component.
1977
:param dense_lines: The bytes of lines but in a denser form. For
1978
instance, if lines is a list of 1000 bytestrings each ending in
1979
\\n, dense_lines may be a list with one line in it, containing all
1980
the 1000's lines and their \\n's. Using dense_lines if it is
1981
already known is a win because the string join to create bytes in
1982
this function spends less time resizing the final string.
1983
:return: (len, a StringIO instance with the raw data ready to read.)
1985
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
1986
chunks.extend(dense_lines or lines)
1987
chunks.append("end %s\n" % key[-1])
1988
for chunk in chunks:
1989
if type(chunk) is not str:
1990
raise AssertionError(
1991
'data must be plain bytes was %s' % type(chunk))
1992
if lines and lines[-1][-1] != '\n':
1993
raise ValueError('corrupt lines value %r' % lines)
1994
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
1995
return len(compressed_bytes), compressed_bytes
1997
def _split_header(self, line):
2000
raise KnitCorrupt(self,
2001
'unexpected number of elements in record header')
2005
"""See VersionedFiles.keys."""
2006
if 'evil' in debug.debug_flags:
2007
trace.mutter_callsite(2, "keys scales with size of history")
2008
sources = [self._index] + self._immediate_fallback_vfs
2010
for source in sources:
2011
result.update(source.keys())
2015
class _ContentMapGenerator(object):
2016
"""Generate texts or expose raw deltas for a set of texts."""
2018
def __init__(self, ordering='unordered'):
2019
self._ordering = ordering
2021
def _get_content(self, key):
2022
"""Get the content object for key."""
2023
# Note that _get_content is only called when the _ContentMapGenerator
2024
# has been constructed with just one key requested for reconstruction.
2025
if key in self.nonlocal_keys:
2026
record = self.get_record_stream().next()
2027
# Create a content object on the fly
2028
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
2029
return PlainKnitContent(lines, record.key)
678
def _make_line_delta(self, delta_seq, new_content):
679
"""Generate a line delta from delta_seq and new_content."""
681
for op in delta_seq.get_opcodes():
684
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
687
def _get_components_positions(self, version_ids):
688
"""Produce a map of position data for the components of versions.
690
This data is intended to be used for retrieving the knit records.
692
A dict of version_id to (method, data_pos, data_size, next) is
694
method is the way referenced data should be applied.
695
data_pos is the position of the data in the knit.
696
data_size is the size of the data in the knit.
697
next is the build-parent of the version, or None for fulltexts.
700
for version_id in version_ids:
703
while cursor is not None and cursor not in component_data:
704
method = self._index.get_method(cursor)
705
if method == 'fulltext':
708
next = self.get_parents(cursor)[0]
709
index_memo = self._index.get_position(cursor)
710
component_data[cursor] = (method, index_memo, next)
712
return component_data
714
def _get_content(self, version_id, parent_texts={}):
715
"""Returns a content object that makes up the specified
717
if not self.has_version(version_id):
718
raise RevisionNotPresent(version_id, self.filename)
720
cached_version = parent_texts.get(version_id, None)
721
if cached_version is not None:
722
return cached_version
724
text_map, contents_map = self._get_content_maps([version_id])
725
return contents_map[version_id]
727
def _check_versions_present(self, version_ids):
728
"""Check that all specified versions are present."""
729
self._index.check_versions_present(version_ids)
731
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts):
732
"""See VersionedFile.add_lines_with_ghosts()."""
733
self._check_add(version_id, lines)
734
return self._add(version_id, lines[:], parents, self.delta, parent_texts)
736
def _add_lines(self, version_id, parents, lines, parent_texts,
737
left_matching_blocks=None):
738
"""See VersionedFile.add_lines."""
739
self._check_add(version_id, lines)
740
self._check_versions_present(parents)
741
return self._add(version_id, lines[:], parents, self.delta,
742
parent_texts, left_matching_blocks)
744
def _check_add(self, version_id, lines):
745
"""check that version_id and lines are safe to add."""
746
assert self.writable, "knit is not opened for write"
747
### FIXME escape. RBC 20060228
748
if contains_whitespace(version_id):
749
raise InvalidRevisionId(version_id, self.filename)
750
self.check_not_reserved_id(version_id)
751
if self.has_version(version_id):
752
raise RevisionAlreadyPresent(version_id, self.filename)
753
self._check_lines_not_unicode(lines)
754
self._check_lines_are_lines(lines)
756
def _add(self, version_id, lines, parents, delta, parent_texts,
757
left_matching_blocks=None):
758
"""Add a set of lines on top of version specified by parents.
760
If delta is true, compress the text as a line-delta against
763
Any versions not present will be converted into ghosts.
765
# 461 0 6546.0390 43.9100 bzrlib.knit:489(_add)
766
# +400 0 889.4890 418.9790 +bzrlib.knit:192(lower_fulltext)
767
# +461 0 1364.8070 108.8030 +bzrlib.knit:996(add_record)
768
# +461 0 193.3940 41.5720 +bzrlib.knit:898(add_version)
769
# +461 0 134.0590 18.3810 +bzrlib.osutils:361(sha_strings)
770
# +461 0 36.3420 15.4540 +bzrlib.knit:146(make)
771
# +1383 0 8.0370 8.0370 +<len>
772
# +61 0 13.5770 7.9190 +bzrlib.knit:199(lower_line_delta)
773
# +61 0 963.3470 7.8740 +bzrlib.knit:427(_get_content)
774
# +61 0 973.9950 5.2950 +bzrlib.knit:136(line_delta)
775
# +61 0 1918.1800 5.2640 +bzrlib.knit:359(_merge_annotations)
779
if parent_texts is None:
781
for parent in parents:
782
if not self.has_version(parent):
783
ghosts.append(parent)
785
present_parents.append(parent)
787
if delta and not len(present_parents):
790
digest = sha_strings(lines)
793
if lines[-1][-1] != '\n':
794
options.append('no-eol')
795
lines[-1] = lines[-1] + '\n'
797
if len(present_parents) and delta:
798
# To speed the extract of texts the delta chain is limited
799
# to a fixed number of deltas. This should minimize both
800
# I/O and the time spend applying deltas.
801
delta = self._check_should_delta(present_parents)
803
assert isinstance(version_id, str)
804
lines = self.factory.make(lines, version_id)
805
if delta or (self.factory.annotated and len(present_parents) > 0):
806
# Merge annotations from parent texts if so is needed.
807
delta_hunks = self._merge_annotations(lines, present_parents,
808
parent_texts, delta, self.factory.annotated,
809
left_matching_blocks)
812
options.append('line-delta')
813
store_lines = self.factory.lower_line_delta(delta_hunks)
2031
# local keys we can ask for directly
2032
return self._get_one_work(key)
2034
def get_record_stream(self):
2035
"""Get a record stream for the keys requested during __init__."""
2036
for record in self._work():
2040
"""Produce maps of text and KnitContents as dicts.
815
options.append('fulltext')
816
store_lines = self.factory.lower_fulltext(lines)
818
access_memo = self._data.add_record(version_id, digest, store_lines)
819
self._index.add_version(version_id, options, access_memo, parents)
822
def check(self, progress_bar=None):
823
"""See VersionedFile.check()."""
825
def _clone_text(self, new_version_id, old_version_id, parents):
826
"""See VersionedFile.clone_text()."""
827
# FIXME RBC 20060228 make fast by only inserting an index with null
829
self.add_lines(new_version_id, parents, self.get_lines(old_version_id))
831
def get_lines(self, version_id):
832
"""See VersionedFile.get_lines()."""
833
return self.get_line_list([version_id])[0]
835
def _get_record_map(self, version_ids):
836
"""Produce a dictionary of knit records.
838
The keys are version_ids, the values are tuples of (method, content,
840
method is the way the content should be applied.
841
content is a KnitContent object.
842
digest is the SHA1 digest of this version id after all steps are done
843
next is the build-parent of the version, i.e. the leftmost ancestor.
844
If the method is fulltext, next will be None.
846
position_map = self._get_components_positions(version_ids)
847
# c = component_id, m = method, i_m = index_memo, n = next
848
records = [(c, i_m) for c, (m, i_m, n) in position_map.iteritems()]
850
for component_id, content, digest in \
851
self._data.read_records_iter(records):
852
method, index_memo, next = position_map[component_id]
853
record_map[component_id] = method, content, digest, next
857
def get_text(self, version_id):
858
"""See VersionedFile.get_text"""
859
return self.get_texts([version_id])[0]
861
def get_texts(self, version_ids):
862
return [''.join(l) for l in self.get_line_list(version_ids)]
864
def get_line_list(self, version_ids):
865
"""Return the texts of listed versions as a list of strings."""
866
version_ids = [osutils.safe_revision_id(v) for v in version_ids]
867
for version_id in version_ids:
868
self.check_not_reserved_id(version_id)
869
text_map, content_map = self._get_content_maps(version_ids)
870
return [text_map[v] for v in version_ids]
872
_get_lf_split_line_list = get_line_list
874
def _get_content_maps(self, version_ids):
875
"""Produce maps of text and KnitContents
2042
877
:return: (text_map, content_map) where text_map contains the texts for
2043
the requested versions and content_map contains the KnitContents.
878
the requested versions and content_map contains the KnitContents.
879
Both dicts take version_ids as their keys.
2045
# NB: By definition we never need to read remote sources unless texts
2046
# are requested from them: we don't delta across stores - and we
2047
# explicitly do not want to to prevent data loss situations.
2048
if self.global_map is None:
2049
self.global_map = self.vf.get_parent_map(self.keys)
2050
nonlocal_keys = self.nonlocal_keys
2052
missing_keys = set(nonlocal_keys)
2053
# Read from remote versioned file instances and provide to our caller.
2054
for source in self.vf._immediate_fallback_vfs:
2055
if not missing_keys:
2057
# Loop over fallback repositories asking them for texts - ignore
2058
# any missing from a particular fallback.
2059
for record in source.get_record_stream(missing_keys,
2060
self._ordering, True):
2061
if record.storage_kind == 'absent':
2062
# Not in thie particular stream, may be in one of the
2063
# other fallback vfs objects.
2065
missing_keys.remove(record.key)
2068
if self._raw_record_map is None:
2069
raise AssertionError('_raw_record_map should have been filled')
2071
for key in self.keys:
2072
if key in self.nonlocal_keys:
2074
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2077
def _get_one_work(self, requested_key):
2078
# Now, if we have calculated everything already, just return the
2080
if requested_key in self._contents_map:
2081
return self._contents_map[requested_key]
2082
# To simplify things, parse everything at once - code that wants one text
2083
# probably wants them all.
2084
# FUTURE: This function could be improved for the 'extract many' case
2085
# by tracking each component and only doing the copy when the number of
2086
# children than need to apply delta's to it is > 1 or it is part of the
2088
multiple_versions = len(self.keys) != 1
2089
if self._record_map is None:
2090
self._record_map = self.vf._raw_map_to_record_map(
2091
self._raw_record_map)
2092
record_map = self._record_map
2093
# raw_record_map is key:
2094
# Have read and parsed records at this point.
2095
for key in self.keys:
2096
if key in self.nonlocal_keys:
881
for version_id in version_ids:
882
if not self.has_version(version_id):
883
raise RevisionNotPresent(version_id, self.filename)
884
record_map = self._get_record_map(version_ids)
889
for version_id in version_ids:
2101
892
while cursor is not None:
2103
record, record_details, digest, next = record_map[cursor]
2105
raise RevisionNotPresent(cursor, self)
2106
components.append((cursor, record, record_details, digest))
2108
if cursor in self._contents_map:
2109
# no need to plan further back
2110
components.append((cursor, None, None, None))
893
method, data, digest, next = record_map[cursor]
894
components.append((cursor, method, data, digest))
895
if cursor in content_map:
2114
for (component_id, record, record_details,
2115
digest) in reversed(components):
2116
if component_id in self._contents_map:
2117
content = self._contents_map[component_id]
900
for component_id, method, data, digest in reversed(components):
901
if component_id in content_map:
902
content = content_map[component_id]
2119
content, delta = self._factory.parse_record(key[-1],
2120
record, record_details, content,
2121
copy_base_content=multiple_versions)
2122
if multiple_versions:
2123
self._contents_map[component_id] = content
904
if method == 'fulltext':
905
assert content is None
906
content = self.factory.parse_fulltext(data, version_id)
907
elif method == 'line-delta':
908
delta = self.factory.parse_line_delta(data, version_id)
909
content = content.copy()
910
content._lines = self._apply_delta(content._lines,
912
content_map[component_id] = content
914
if 'no-eol' in self._index.get_options(version_id):
915
content = content.copy()
916
line = content._lines[-1][1].rstrip('\n')
917
content._lines[-1] = (content._lines[-1][0], line)
918
final_content[version_id] = content
2125
920
# digest here is the digest from the last applied component.
2126
921
text = content.text()
2127
actual_sha = sha_strings(text)
2128
if actual_sha != digest:
2129
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
2130
if multiple_versions:
2131
return self._contents_map[requested_key]
2135
def _wire_bytes(self):
2136
"""Get the bytes to put on the wire for 'key'.
2138
The first collection of bytes asked for returns the serialised
2139
raw_record_map and the additional details (key, parent) for key.
2140
Subsequent calls return just the additional details (key, parent).
2141
The wire storage_kind given for the first key is 'knit-delta-closure',
2142
For subsequent keys it is 'knit-delta-closure-ref'.
2144
:param key: A key from the content generator.
2145
:return: Bytes to put on the wire.
2148
# kind marker for dispatch on the far side,
2149
lines.append('knit-delta-closure')
2151
if self.vf._factory.annotated:
2152
lines.append('annotated')
2155
# then the list of keys
2156
lines.append('\t'.join(['\x00'.join(key) for key in self.keys
2157
if key not in self.nonlocal_keys]))
2158
# then the _raw_record_map in serialised form:
2160
# for each item in the map:
2162
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
2163
# one line with method
2164
# one line with noeol
2165
# one line with next ('' for None)
2166
# one line with byte count of the record bytes
2168
for key, (record_bytes, (method, noeol), next) in \
2169
self._raw_record_map.iteritems():
2170
key_bytes = '\x00'.join(key)
2171
parents = self.global_map.get(key, None)
2173
parent_bytes = 'None:'
2175
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2176
method_bytes = method
2182
next_bytes = '\x00'.join(next)
2185
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2186
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2187
len(record_bytes), record_bytes))
2188
map_bytes = ''.join(map_byte_list)
2189
lines.append(map_bytes)
2190
bytes = '\n'.join(lines)
2194
class _VFContentMapGenerator(_ContentMapGenerator):
2195
"""Content map generator reading from a VersionedFiles object."""
2197
def __init__(self, versioned_files, keys, nonlocal_keys=None,
2198
global_map=None, raw_record_map=None, ordering='unordered'):
2199
"""Create a _ContentMapGenerator.
2201
:param versioned_files: The versioned files that the texts are being
2203
:param keys: The keys to produce content maps for.
2204
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
2205
which are known to not be in this knit, but rather in one of the
2207
:param global_map: The result of get_parent_map(keys) (or a supermap).
2208
This is required if get_record_stream() is to be used.
2209
:param raw_record_map: A unparsed raw record map to use for answering
2212
_ContentMapGenerator.__init__(self, ordering=ordering)
2213
# The vf to source data from
2214
self.vf = versioned_files
2216
self.keys = list(keys)
2217
# Keys known to be in fallback vfs objects
2218
if nonlocal_keys is None:
2219
self.nonlocal_keys = set()
2221
self.nonlocal_keys = frozenset(nonlocal_keys)
2222
# Parents data for keys to be returned in get_record_stream
2223
self.global_map = global_map
2224
# The chunked lists for self.keys in text form
2226
# A cache of KnitContent objects used in extracting texts.
2227
self._contents_map = {}
2228
# All the knit records needed to assemble the requested keys as full
2230
self._record_map = None
2231
if raw_record_map is None:
2232
self._raw_record_map = self.vf._get_record_map_unparsed(keys,
2235
self._raw_record_map = raw_record_map
2236
# the factory for parsing records
2237
self._factory = self.vf._factory
2240
class _NetworkContentMapGenerator(_ContentMapGenerator):
2241
"""Content map generator sourced from a network stream."""
2243
def __init__(self, bytes, line_end):
2244
"""Construct a _NetworkContentMapGenerator from a bytes block."""
2246
self.global_map = {}
2247
self._raw_record_map = {}
2248
self._contents_map = {}
2249
self._record_map = None
2250
self.nonlocal_keys = []
2251
# Get access to record parsing facilities
2252
self.vf = KnitVersionedFiles(None, None)
2255
line_end = bytes.find('\n', start)
2256
line = bytes[start:line_end]
2257
start = line_end + 1
2258
if line == 'annotated':
2259
self._factory = KnitAnnotateFactory()
2261
self._factory = KnitPlainFactory()
2262
# list of keys to emit in get_record_stream
2263
line_end = bytes.find('\n', start)
2264
line = bytes[start:line_end]
2265
start = line_end + 1
2267
tuple(segment.split('\x00')) for segment in line.split('\t')
2269
# now a loop until the end. XXX: It would be nice if this was just a
2270
# bunch of the same records as get_record_stream(..., False) gives, but
2271
# there is a decent sized gap stopping that at the moment.
2275
line_end = bytes.find('\n', start)
2276
key = tuple(bytes[start:line_end].split('\x00'))
2277
start = line_end + 1
2278
# 1 line with parents (None: for None, '' for ())
2279
line_end = bytes.find('\n', start)
2280
line = bytes[start:line_end]
2285
[tuple(segment.split('\x00')) for segment in line.split('\t')
2287
self.global_map[key] = parents
2288
start = line_end + 1
2289
# one line with method
2290
line_end = bytes.find('\n', start)
2291
line = bytes[start:line_end]
2293
start = line_end + 1
2294
# one line with noeol
2295
line_end = bytes.find('\n', start)
2296
line = bytes[start:line_end]
2298
start = line_end + 1
2299
# one line with next ('' for None)
2300
line_end = bytes.find('\n', start)
2301
line = bytes[start:line_end]
2305
next = tuple(bytes[start:line_end].split('\x00'))
2306
start = line_end + 1
2307
# one line with byte count of the record bytes
2308
line_end = bytes.find('\n', start)
2309
line = bytes[start:line_end]
2311
start = line_end + 1
2313
record_bytes = bytes[start:start+count]
2314
start = start + count
2316
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2318
def get_record_stream(self):
2319
"""Get a record stream for for keys requested by the bytestream."""
2321
for key in self.keys:
2322
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2325
def _wire_bytes(self):
2329
class _KndxIndex(object):
2330
"""Manages knit index files
2332
The index is kept in memory and read on startup, to enable
922
if sha_strings(text) != digest:
923
raise KnitCorrupt(self.filename,
924
'sha-1 does not match %s' % version_id)
926
text_map[version_id] = text
927
return text_map, final_content
929
def iter_lines_added_or_present_in_versions(self, version_ids=None,
931
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
932
if version_ids is None:
933
version_ids = self.versions()
935
version_ids = [osutils.safe_revision_id(v) for v in version_ids]
937
pb = progress.DummyProgress()
938
# we don't care about inclusions, the caller cares.
939
# but we need to setup a list of records to visit.
940
# we need version_id, position, length
941
version_id_records = []
942
requested_versions = set(version_ids)
943
# filter for available versions
944
for version_id in requested_versions:
945
if not self.has_version(version_id):
946
raise RevisionNotPresent(version_id, self.filename)
947
# get a in-component-order queue:
948
for version_id in self.versions():
949
if version_id in requested_versions:
950
index_memo = self._index.get_position(version_id)
951
version_id_records.append((version_id, index_memo))
953
total = len(version_id_records)
954
for version_idx, (version_id, data, sha_value) in \
955
enumerate(self._data.read_records_iter(version_id_records)):
956
pb.update('Walking content.', version_idx, total)
957
method = self._index.get_method(version_id)
959
assert method in ('fulltext', 'line-delta')
960
if method == 'fulltext':
961
line_iterator = self.factory.get_fulltext_content(data)
963
line_iterator = self.factory.get_linedelta_content(data)
964
for line in line_iterator:
967
pb.update('Walking content.', total, total)
969
def iter_parents(self, version_ids):
970
"""Iterate through the parents for many version ids.
972
:param version_ids: An iterable yielding version_ids.
973
:return: An iterator that yields (version_id, parents). Requested
974
version_ids not present in the versioned file are simply skipped.
975
The order is undefined, allowing for different optimisations in
976
the underlying implementation.
978
version_ids = [osutils.safe_revision_id(version_id) for
979
version_id in version_ids]
980
return self._index.iter_parents(version_ids)
982
def num_versions(self):
983
"""See VersionedFile.num_versions()."""
984
return self._index.num_versions()
986
__len__ = num_versions
988
def annotate_iter(self, version_id):
989
"""See VersionedFile.annotate_iter."""
990
version_id = osutils.safe_revision_id(version_id)
991
content = self._get_content(version_id)
992
for origin, text in content.annotate_iter():
995
def get_parents(self, version_id):
996
"""See VersionedFile.get_parents."""
999
# 52554 calls in 1264 872 internal down from 3674
1000
version_id = osutils.safe_revision_id(version_id)
1002
return self._index.get_parents(version_id)
1004
raise RevisionNotPresent(version_id, self.filename)
1006
def get_parents_with_ghosts(self, version_id):
1007
"""See VersionedFile.get_parents."""
1008
version_id = osutils.safe_revision_id(version_id)
1010
return self._index.get_parents_with_ghosts(version_id)
1012
raise RevisionNotPresent(version_id, self.filename)
1014
def get_ancestry(self, versions, topo_sorted=True):
1015
"""See VersionedFile.get_ancestry."""
1016
if isinstance(versions, basestring):
1017
versions = [versions]
1020
versions = [osutils.safe_revision_id(v) for v in versions]
1021
return self._index.get_ancestry(versions, topo_sorted)
1023
def get_ancestry_with_ghosts(self, versions):
1024
"""See VersionedFile.get_ancestry_with_ghosts."""
1025
if isinstance(versions, basestring):
1026
versions = [versions]
1029
versions = [osutils.safe_revision_id(v) for v in versions]
1030
return self._index.get_ancestry_with_ghosts(versions)
1032
def plan_merge(self, ver_a, ver_b):
1033
"""See VersionedFile.plan_merge."""
1034
ver_a = osutils.safe_revision_id(ver_a)
1035
ver_b = osutils.safe_revision_id(ver_b)
1036
ancestors_b = set(self.get_ancestry(ver_b, topo_sorted=False))
1038
ancestors_a = set(self.get_ancestry(ver_a, topo_sorted=False))
1039
annotated_a = self.annotate(ver_a)
1040
annotated_b = self.annotate(ver_b)
1041
return merge._plan_annotate_merge(annotated_a, annotated_b,
1042
ancestors_a, ancestors_b)
1045
class _KnitComponentFile(object):
1046
"""One of the files used to implement a knit database"""
1048
def __init__(self, transport, filename, mode, file_mode=None,
1049
create_parent_dir=False, dir_mode=None):
1050
self._transport = transport
1051
self._filename = filename
1053
self._file_mode = file_mode
1054
self._dir_mode = dir_mode
1055
self._create_parent_dir = create_parent_dir
1056
self._need_to_create = False
1058
def _full_path(self):
1059
"""Return the full path to this file."""
1060
return self._transport.base + self._filename
1062
def check_header(self, fp):
1063
line = fp.readline()
1065
# An empty file can actually be treated as though the file doesn't
1067
raise errors.NoSuchFile(self._full_path())
1068
if line != self.HEADER:
1069
raise KnitHeaderError(badline=line,
1070
filename=self._transport.abspath(self._filename))
1073
return '%s(%s)' % (self.__class__.__name__, self._filename)
1076
class _KnitIndex(_KnitComponentFile):
1077
"""Manages knit index file.
1079
The index is already kept in memory and read on startup, to enable
2333
1080
fast lookups of revision information. The cursor of the index
2334
1081
file is always pointing to the end, making it easy to append
2377
1124
to ensure that records always start on new lines even if the last write was
2378
1125
interrupted. As a result its normal for the last line in the index to be
2379
1126
missing a trailing newline. One can be added with no harmful effects.
2381
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
2382
where prefix is e.g. the (fileid,) for .texts instances or () for
2383
constant-mapped things like .revisions, and the old state is
2384
tuple(history_vector, cache_dict). This is used to prevent having an
2385
ABI change with the C extension that reads .kndx files.
2388
1129
HEADER = "# bzr knit index 8\n"
2390
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
2391
"""Create a _KndxIndex on transport using mapper."""
2392
self._transport = transport
2393
self._mapper = mapper
2394
self._get_scope = get_scope
2395
self._allow_writes = allow_writes
2396
self._is_locked = is_locked
2398
self.has_graph = True
2400
def add_records(self, records, random_id=False, missing_compression_parents=False):
2401
"""Add multiple records to the index.
2403
:param records: a list of tuples:
2404
(key, options, access_memo, parents).
2405
:param random_id: If True the ids being added were randomly generated
2406
and no check for existence will be performed.
2407
:param missing_compression_parents: If True the records being added are
2408
only compressed against texts already in the index (or inside
2409
records). If False the records all refer to unavailable texts (or
2410
texts inside records) as compression parents.
2412
if missing_compression_parents:
2413
# It might be nice to get the edge of the records. But keys isn't
2415
keys = sorted(record[0] for record in records)
2416
raise errors.RevisionNotPresent(keys, self)
2418
for record in records:
2421
path = self._mapper.map(key) + '.kndx'
2422
path_keys = paths.setdefault(path, (prefix, []))
2423
path_keys[1].append(record)
2424
for path in sorted(paths):
2425
prefix, path_keys = paths[path]
2426
self._load_prefixes([prefix])
2428
orig_history = self._kndx_cache[prefix][1][:]
2429
orig_cache = self._kndx_cache[prefix][0].copy()
2432
for key, options, (_, pos, size), parents in path_keys:
2434
# kndx indices cannot be parentless.
2436
line = "\n%s %s %s %s %s :" % (
2437
key[-1], ','.join(options), pos, size,
2438
self._dictionary_compress(parents))
2439
if type(line) is not str:
2440
raise AssertionError(
2441
'data must be utf8 was %s' % type(line))
2443
self._cache_key(key, options, pos, size, parents)
2444
if len(orig_history):
2445
self._transport.append_bytes(path, ''.join(lines))
2447
self._init_index(path, lines)
2449
# If any problems happen, restore the original values and re-raise
2450
self._kndx_cache[prefix] = (orig_cache, orig_history)
2453
def scan_unvalidated_index(self, graph_index):
2454
"""See _KnitGraphIndex.scan_unvalidated_index."""
2455
# Because kndx files do not support atomic insertion via separate index
2456
# files, they do not support this method.
2457
raise NotImplementedError(self.scan_unvalidated_index)
2459
def get_missing_compression_parents(self):
2460
"""See _KnitGraphIndex.get_missing_compression_parents."""
2461
# Because kndx files do not support atomic insertion via separate index
2462
# files, they do not support this method.
2463
raise NotImplementedError(self.get_missing_compression_parents)
2465
def _cache_key(self, key, options, pos, size, parent_keys):
1131
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
1132
# __slots__ = ['_cache', '_history', '_transport', '_filename']
1134
def _cache_version(self, version_id, options, pos, size, parents):
2466
1135
"""Cache a version record in the history array and index cache.
2468
1137
This is inlined into _load_data for performance. KEEP IN SYNC.
2469
1138
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
2473
version_id = key[-1]
2474
# last-element only for compatibilty with the C load_data.
2475
parents = tuple(parent[-1] for parent in parent_keys)
2476
for parent in parent_keys:
2477
if parent[:-1] != prefix:
2478
raise ValueError("mismatched prefixes for %r, %r" % (
2480
cache, history = self._kndx_cache[prefix]
2481
1141
# only want the _history index to reference the 1st index entry
2482
1142
# for version_id
2483
if version_id not in cache:
2484
index = len(history)
2485
history.append(version_id)
1143
if version_id not in self._cache:
1144
index = len(self._history)
1145
self._history.append(version_id)
2487
index = cache[version_id][5]
2488
cache[version_id] = (version_id,
1147
index = self._cache[version_id][5]
1148
self._cache[version_id] = (version_id,
2495
def check_header(self, fp):
2496
line = fp.readline()
2498
# An empty file can actually be treated as though the file doesn't
2500
raise errors.NoSuchFile(self)
2501
if line != self.HEADER:
2502
raise KnitHeaderError(badline=line, filename=self)
2504
def _check_read(self):
2505
if not self._is_locked():
2506
raise errors.ObjectNotLocked(self)
2507
if self._get_scope() != self._scope:
2510
def _check_write_ok(self):
2511
"""Assert if not writes are permitted."""
2512
if not self._is_locked():
2513
raise errors.ObjectNotLocked(self)
2514
if self._get_scope() != self._scope:
2516
if self._mode != 'w':
2517
raise errors.ReadOnlyObjectDirtiedError(self)
2519
def get_build_details(self, keys):
2520
"""Get the method, index_memo and compression parent for keys.
2522
Ghosts are omitted from the result.
2524
:param keys: An iterable of keys.
2525
:return: A dict of key:(index_memo, compression_parent, parents,
2528
opaque structure to pass to read_records to extract the raw
2531
Content that this record is built upon, may be None
2533
Logical parents of this node
2535
extra information about the content which needs to be passed to
2536
Factory.parse_record
2538
parent_map = self.get_parent_map(keys)
2541
if key not in parent_map:
2543
method = self.get_method(key)
2544
parents = parent_map[key]
2545
if method == 'fulltext':
2546
compression_parent = None
2548
compression_parent = parents[0]
2549
noeol = 'no-eol' in self.get_options(key)
2550
index_memo = self.get_position(key)
2551
result[key] = (index_memo, compression_parent,
2552
parents, (method, noeol))
2555
def get_method(self, key):
2556
"""Return compression method of specified key."""
2557
options = self.get_options(key)
2558
if 'fulltext' in options:
2560
elif 'line-delta' in options:
2563
raise errors.KnitIndexUnknownMethod(self, options)
2565
def get_options(self, key):
2566
"""Return a list representing options.
2570
prefix, suffix = self._split_key(key)
2571
self._load_prefixes([prefix])
1155
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1156
create_parent_dir=False, delay_create=False, dir_mode=None):
1157
_KnitComponentFile.__init__(self, transport, filename, mode,
1158
file_mode=file_mode,
1159
create_parent_dir=create_parent_dir,
1162
# position in _history is the 'official' index for a revision
1163
# but the values may have come from a newer entry.
1164
# so - wc -l of a knit index is != the number of unique names
2573
return self._kndx_cache[prefix][0][suffix][1]
2575
raise RevisionNotPresent(key, self)
2577
def find_ancestry(self, keys):
2578
"""See CombinedGraphIndex.find_ancestry()"""
2579
prefixes = set(key[:-1] for key in keys)
2580
self._load_prefixes(prefixes)
2583
missing_keys = set()
2584
pending_keys = list(keys)
2585
# This assumes that keys will not reference parents in a different
2586
# prefix, which is accurate so far.
2588
key = pending_keys.pop()
2589
if key in parent_map:
2593
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2595
missing_keys.add(key)
2597
parent_keys = tuple([prefix + (suffix,)
2598
for suffix in suffix_parents])
2599
parent_map[key] = parent_keys
2600
pending_keys.extend([p for p in parent_keys
2601
if p not in parent_map])
2602
return parent_map, missing_keys
2604
def get_parent_map(self, keys):
2605
"""Get a map of the parents of keys.
2607
:param keys: The keys to look up parents for.
2608
:return: A mapping from keys to parents. Absent keys are absent from
1168
fp = self._transport.get(self._filename)
1170
# _load_data may raise NoSuchFile if the target knit is
1172
_load_data(self, fp)
1176
if mode != 'w' or not create:
1179
self._need_to_create = True
1181
self._transport.put_bytes_non_atomic(
1182
self._filename, self.HEADER, mode=self._file_mode)
1184
def get_graph(self):
1185
"""Return a list of the node:parents lists from this knit index."""
1186
return [(vid, idx[4]) for vid, idx in self._cache.iteritems()]
1188
def get_ancestry(self, versions, topo_sorted=True):
1189
"""See VersionedFile.get_ancestry."""
1190
# get a graph of all the mentioned versions:
1192
pending = set(versions)
1195
version = pending.pop()
1198
parents = [p for p in cache[version][4] if p in cache]
1200
raise RevisionNotPresent(version, self._filename)
1201
# if not completed and not a ghost
1202
pending.update([p for p in parents if p not in graph])
1203
graph[version] = parents
1206
return topo_sort(graph.items())
1208
def get_ancestry_with_ghosts(self, versions):
1209
"""See VersionedFile.get_ancestry_with_ghosts."""
1210
# get a graph of all the mentioned versions:
1211
self.check_versions_present(versions)
1214
pending = set(versions)
1216
version = pending.pop()
1218
parents = cache[version][4]
1224
pending.update([p for p in parents if p not in graph])
1225
graph[version] = parents
1226
return topo_sort(graph.items())
1228
def iter_parents(self, version_ids):
1229
"""Iterate through the parents for many version ids.
1231
:param version_ids: An iterable yielding version_ids.
1232
:return: An iterator that yields (version_id, parents). Requested
1233
version_ids not present in the versioned file are simply skipped.
1234
The order is undefined, allowing for different optimisations in
1235
the underlying implementation.
2611
# Parse what we need to up front, this potentially trades off I/O
2612
# locality (.kndx and .knit in the same block group for the same file
2613
# id) for less checking in inner loops.
2614
prefixes = set(key[:-1] for key in keys)
2615
self._load_prefixes(prefixes)
1237
for version_id in version_ids:
2620
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
1239
yield version_id, tuple(self.get_parents(version_id))
2621
1240
except KeyError:
2624
result[key] = tuple(prefix + (suffix,) for
2625
suffix in suffix_parents)
2628
def get_position(self, key):
2629
"""Return details needed to access the version.
2631
:return: a tuple (key, data position, size) to hand to the access
2632
logic to get the record.
2634
prefix, suffix = self._split_key(key)
2635
self._load_prefixes([prefix])
2636
entry = self._kndx_cache[prefix][0][suffix]
2637
return key, entry[2], entry[3]
2639
has_key = _mod_index._has_key_from_parent_map
2641
def _init_index(self, path, extra_lines=[]):
2642
"""Initialize an index."""
2644
sio.write(self.HEADER)
2645
sio.writelines(extra_lines)
2647
self._transport.put_file_non_atomic(path, sio,
2648
create_parent_dir=True)
2649
# self._create_parent_dir)
2650
# mode=self._file_mode,
2651
# dir_mode=self._dir_mode)
2654
"""Get all the keys in the collection.
2656
The keys are not ordered.
2659
# Identify all key prefixes.
2660
# XXX: A bit hacky, needs polish.
2661
if type(self._mapper) is ConstantMapper:
2665
for quoted_relpath in self._transport.iter_files_recursive():
2666
path, ext = os.path.splitext(quoted_relpath)
2668
prefixes = [self._mapper.unmap(path) for path in relpaths]
2669
self._load_prefixes(prefixes)
2670
for prefix in prefixes:
2671
for suffix in self._kndx_cache[prefix][1]:
2672
result.add(prefix + (suffix,))
2675
def _load_prefixes(self, prefixes):
2676
"""Load the indices for prefixes."""
2678
for prefix in prefixes:
2679
if prefix not in self._kndx_cache:
2680
# the load_data interface writes to these variables.
2683
self._filename = prefix
2685
path = self._mapper.map(prefix) + '.kndx'
2686
fp = self._transport.get(path)
2688
# _load_data may raise NoSuchFile if the target knit is
2690
_load_data(self, fp)
2693
self._kndx_cache[prefix] = (self._cache, self._history)
2698
self._kndx_cache[prefix] = ({}, [])
2699
if type(self._mapper) is ConstantMapper:
2700
# preserve behaviour for revisions.kndx etc.
2701
self._init_index(path)
2706
missing_keys = _mod_index._missing_keys_from_parent_map
2708
def _partition_keys(self, keys):
2709
"""Turn keys into a dict of prefix:suffix_list."""
2712
prefix_keys = result.setdefault(key[:-1], [])
2713
prefix_keys.append(key[-1])
2716
def _dictionary_compress(self, keys):
2717
"""Dictionary compress keys.
2719
:param keys: The keys to generate references to.
2720
:return: A string representation of keys. keys which are present are
2721
dictionary compressed, and others are emitted as fulltext with a
1243
def num_versions(self):
1244
return len(self._history)
1246
__len__ = num_versions
1248
def get_versions(self):
1249
"""Get all the versions in the file. not topologically sorted."""
1250
return self._history
1252
def _version_list_to_index(self, versions):
2726
1253
result_list = []
2727
prefix = keys[0][:-1]
2728
cache = self._kndx_cache[prefix][0]
2730
if key[:-1] != prefix:
2731
# kndx indices cannot refer across partitioned storage.
2732
raise ValueError("mismatched prefixes for %r" % keys)
2733
if key[-1] in cache:
1255
for version in versions:
1256
if version in cache:
2734
1257
# -- inlined lookup() --
2735
result_list.append(str(cache[key[-1]][5]))
1258
result_list.append(str(cache[version][5]))
2736
1259
# -- end lookup () --
2738
result_list.append('.' + key[-1])
1261
result_list.append('.' + version)
2739
1262
return ' '.join(result_list)
2741
def _reset_cache(self):
2742
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2743
# (cache_dict, history_vector) for parsed kndx files.
2744
self._kndx_cache = {}
2745
self._scope = self._get_scope()
2746
allow_writes = self._allow_writes()
1264
def add_version(self, version_id, options, index_memo, parents):
1265
"""Add a version record to the index."""
1266
self.add_versions(((version_id, options, index_memo, parents),))
1268
def add_versions(self, versions):
1269
"""Add multiple versions to the index.
1271
:param versions: a list of tuples:
1272
(version_id, options, pos, size, parents).
1275
orig_history = self._history[:]
1276
orig_cache = self._cache.copy()
1279
for version_id, options, (index, pos, size), parents in versions:
1280
line = "\n%s %s %s %s %s :" % (version_id,
1284
self._version_list_to_index(parents))
1285
assert isinstance(line, str), \
1286
'content must be utf-8 encoded: %r' % (line,)
1288
self._cache_version(version_id, options, pos, size, parents)
1289
if not self._need_to_create:
1290
self._transport.append_bytes(self._filename, ''.join(lines))
1293
sio.write(self.HEADER)
1294
sio.writelines(lines)
1296
self._transport.put_file_non_atomic(self._filename, sio,
1297
create_parent_dir=self._create_parent_dir,
1298
mode=self._file_mode,
1299
dir_mode=self._dir_mode)
1300
self._need_to_create = False
1302
# If any problems happen, restore the original values and re-raise
1303
self._history = orig_history
1304
self._cache = orig_cache
1307
def has_version(self, version_id):
1308
"""True if the version is in the index."""
1309
return version_id in self._cache
1311
def get_position(self, version_id):
1312
"""Return details needed to access the version.
1314
.kndx indices do not support split-out data, so return None for the
1317
:return: a tuple (None, data position, size) to hand to the access
1318
logic to get the record.
1320
entry = self._cache[version_id]
1321
return None, entry[2], entry[3]
1323
def get_method(self, version_id):
1324
"""Return compression method of specified version."""
1325
options = self._cache[version_id][1]
1326
if 'fulltext' in options:
2752
def _sort_keys_by_io(self, keys, positions):
2753
"""Figure out an optimal order to read the records for the given keys.
2755
Sort keys, grouped by index and sorted by position.
2757
:param keys: A list of keys whose records we want to read. This will be
2759
:param positions: A dict, such as the one returned by
2760
_get_components_positions()
1329
if 'line-delta' not in options:
1330
raise errors.KnitIndexUnknownMethod(self._full_path(), options)
1333
def get_options(self, version_id):
1334
"""Return a string represention options.
2763
def get_sort_key(key):
2764
index_memo = positions[key][1]
2765
# Group by prefix and position. index_memo[0] is the key, so it is
2766
# (file_id, revision_id) and we don't want to sort on revision_id,
2767
# index_memo[1] is the position, and index_memo[2] is the size,
2768
# which doesn't matter for the sort
2769
return index_memo[0][:-1], index_memo[1]
2770
return keys.sort(key=get_sort_key)
2772
_get_total_build_size = _get_total_build_size
2774
def _split_key(self, key):
2775
"""Split key into a prefix and suffix."""
2776
return key[:-1], key[-1]
2779
class _KnitGraphIndex(object):
2780
"""A KnitVersionedFiles index layered on GraphIndex."""
2782
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2783
add_callback=None, track_external_parent_refs=False):
1338
return self._cache[version_id][1]
1340
def get_parents(self, version_id):
1341
"""Return parents of specified version ignoring ghosts."""
1342
return [parent for parent in self._cache[version_id][4]
1343
if parent in self._cache]
1345
def get_parents_with_ghosts(self, version_id):
1346
"""Return parents of specified version with ghosts."""
1347
return self._cache[version_id][4]
1349
def check_versions_present(self, version_ids):
1350
"""Check that all specified versions are present."""
1352
for version_id in version_ids:
1353
if version_id not in cache:
1354
raise RevisionNotPresent(version_id, self._filename)
1357
class KnitGraphIndex(object):
1358
"""A knit index that builds on GraphIndex."""
1360
def __init__(self, graph_index, deltas=False, parents=True, add_callback=None):
2784
1361
"""Construct a KnitGraphIndex on a graph_index.
2786
1363
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2787
:param is_locked: A callback to check whether the object should answer
2789
1364
:param deltas: Allow delta-compressed records.
2790
:param parents: If True, record knits parents, if not do not record
2792
1365
:param add_callback: If not None, allow additions to the index and call
2793
1366
this callback with a list of added GraphIndex nodes:
2794
1367
[(node, value, node_refs), ...]
2795
:param is_locked: A callback, returns True if the index is locked and
2797
:param track_external_parent_refs: If True, record all external parent
2798
references parents from added records. These can be retrieved
2799
later by calling get_missing_parents().
1368
:param parents: If True, record knits parents, if not do not record
2801
self._add_callback = add_callback
2802
1371
self._graph_index = graph_index
2803
1372
self._deltas = deltas
1373
self._add_callback = add_callback
2804
1374
self._parents = parents
2805
1375
if deltas and not parents:
2806
# XXX: TODO: Delta tree and parent graph should be conceptually
2808
1376
raise KnitCorrupt(self, "Cannot do delta compression without "
2809
1377
"parent tracking.")
2810
self.has_graph = parents
2811
self._is_locked = is_locked
2812
self._missing_compression_parents = set()
2813
if track_external_parent_refs:
2814
self._key_dependencies = _KeyRefs()
2816
self._key_dependencies = None
2819
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2821
def add_records(self, records, random_id=False,
2822
missing_compression_parents=False):
2823
"""Add multiple records to the index.
1379
def _get_entries(self, keys, check_present=False):
1380
"""Get the entries for keys.
1382
:param keys: An iterable of index keys, - 1-tuples.
1387
for node in self._graph_index.iter_entries(keys):
1389
found_keys.add(node[1])
1391
# adapt parentless index to the rest of the code.
1392
for node in self._graph_index.iter_entries(keys):
1393
yield node[0], node[1], node[2], ()
1394
found_keys.add(node[1])
1396
missing_keys = keys.difference(found_keys)
1398
raise RevisionNotPresent(missing_keys.pop(), self)
1400
def _present_keys(self, version_ids):
1402
node[1] for node in self._get_entries(version_ids)])
1404
def _parentless_ancestry(self, versions):
1405
"""Honour the get_ancestry API for parentless knit indices."""
1406
wanted_keys = self._version_ids_to_keys(versions)
1407
present_keys = self._present_keys(wanted_keys)
1408
missing = set(wanted_keys).difference(present_keys)
1410
raise RevisionNotPresent(missing.pop(), self)
1411
return list(self._keys_to_version_ids(present_keys))
1413
def get_ancestry(self, versions, topo_sorted=True):
1414
"""See VersionedFile.get_ancestry."""
1415
if not self._parents:
1416
return self._parentless_ancestry(versions)
1417
# XXX: This will do len(history) index calls - perhaps
1418
# it should be altered to be a index core feature?
1419
# get a graph of all the mentioned versions:
1422
versions = self._version_ids_to_keys(versions)
1423
pending = set(versions)
1425
# get all pending nodes
1426
this_iteration = pending
1427
new_nodes = self._get_entries(this_iteration)
1430
for (index, key, value, node_refs) in new_nodes:
1431
# dont ask for ghosties - otherwise
1432
# we we can end up looping with pending
1433
# being entirely ghosted.
1434
graph[key] = [parent for parent in node_refs[0]
1435
if parent not in ghosts]
1437
for parent in graph[key]:
1438
# dont examine known nodes again
1443
ghosts.update(this_iteration.difference(found))
1444
if versions.difference(graph):
1445
raise RevisionNotPresent(versions.difference(graph).pop(), self)
1447
result_keys = topo_sort(graph.items())
1449
result_keys = graph.iterkeys()
1450
return [key[0] for key in result_keys]
1452
def get_ancestry_with_ghosts(self, versions):
1453
"""See VersionedFile.get_ancestry."""
1454
if not self._parents:
1455
return self._parentless_ancestry(versions)
1456
# XXX: This will do len(history) index calls - perhaps
1457
# it should be altered to be a index core feature?
1458
# get a graph of all the mentioned versions:
1460
versions = self._version_ids_to_keys(versions)
1461
pending = set(versions)
1463
# get all pending nodes
1464
this_iteration = pending
1465
new_nodes = self._get_entries(this_iteration)
1467
for (index, key, value, node_refs) in new_nodes:
1468
graph[key] = node_refs[0]
1470
for parent in graph[key]:
1471
# dont examine known nodes again
1475
missing_versions = this_iteration.difference(graph)
1476
missing_needed = versions.intersection(missing_versions)
1478
raise RevisionNotPresent(missing_needed.pop(), self)
1479
for missing_version in missing_versions:
1480
# add a key, no parents
1481
graph[missing_version] = []
1482
pending.discard(missing_version) # don't look for it
1483
result_keys = topo_sort(graph.items())
1484
return [key[0] for key in result_keys]
1486
def get_graph(self):
1487
"""Return a list of the node:parents lists from this knit index."""
1488
if not self._parents:
1489
return [(key, ()) for key in self.get_versions()]
1491
for index, key, value, refs in self._graph_index.iter_all_entries():
1492
result.append((key[0], tuple([ref[0] for ref in refs[0]])))
1495
def iter_parents(self, version_ids):
1496
"""Iterate through the parents for many version ids.
1498
:param version_ids: An iterable yielding version_ids.
1499
:return: An iterator that yields (version_id, parents). Requested
1500
version_ids not present in the versioned file are simply skipped.
1501
The order is undefined, allowing for different optimisations in
1502
the underlying implementation.
1505
all_nodes = set(self._get_entries(self._version_ids_to_keys(version_ids)))
1507
present_parents = set()
1508
for node in all_nodes:
1509
all_parents.update(node[3][0])
1510
# any node we are querying must be present
1511
present_parents.add(node[1])
1512
unknown_parents = all_parents.difference(present_parents)
1513
present_parents.update(self._present_keys(unknown_parents))
1514
for node in all_nodes:
1516
for parent in node[3][0]:
1517
if parent in present_parents:
1518
parents.append(parent[0])
1519
yield node[1][0], tuple(parents)
1521
for node in self._get_entries(self._version_ids_to_keys(version_ids)):
1522
yield node[1][0], ()
1524
def num_versions(self):
1525
return len(list(self._graph_index.iter_all_entries()))
1527
__len__ = num_versions
1529
def get_versions(self):
1530
"""Get all the versions in the file. not topologically sorted."""
1531
return [node[1][0] for node in self._graph_index.iter_all_entries()]
1533
def has_version(self, version_id):
1534
"""True if the version is in the index."""
1535
return len(self._present_keys(self._version_ids_to_keys([version_id]))) == 1
1537
def _keys_to_version_ids(self, keys):
1538
return tuple(key[0] for key in keys)
1540
def get_position(self, version_id):
1541
"""Return details needed to access the version.
1543
:return: a tuple (index, data position, size) to hand to the access
1544
logic to get the record.
1546
node = self._get_node(version_id)
1547
bits = node[2][1:].split(' ')
1548
return node[0], int(bits[0]), int(bits[1])
1550
def get_method(self, version_id):
1551
"""Return compression method of specified version."""
1552
if not self._deltas:
1554
return self._parent_compression(self._get_node(version_id)[3][1])
1556
def _parent_compression(self, reference_list):
1557
# use the second reference list to decide if this is delta'd or not.
1558
if len(reference_list):
1563
def _get_node(self, version_id):
1564
return list(self._get_entries(self._version_ids_to_keys([version_id])))[0]
1566
def get_options(self, version_id):
1567
"""Return a string represention options.
1571
node = self._get_node(version_id)
1572
if not self._deltas:
1573
options = ['fulltext']
1575
options = [self._parent_compression(node[3][1])]
1576
if node[2][0] == 'N':
1577
options.append('no-eol')
1580
def get_parents(self, version_id):
1581
"""Return parents of specified version ignoring ghosts."""
1582
parents = list(self.iter_parents([version_id]))
1585
raise errors.RevisionNotPresent(version_id, self)
1586
return parents[0][1]
1588
def get_parents_with_ghosts(self, version_id):
1589
"""Return parents of specified version with ghosts."""
1590
nodes = list(self._get_entries(self._version_ids_to_keys([version_id]),
1591
check_present=True))
1592
if not self._parents:
1594
return self._keys_to_version_ids(nodes[0][3][0])
1596
def check_versions_present(self, version_ids):
1597
"""Check that all specified versions are present."""
1598
keys = self._version_ids_to_keys(version_ids)
1599
present = self._present_keys(keys)
1600
missing = keys.difference(present)
1602
raise RevisionNotPresent(missing.pop(), self)
1604
def add_version(self, version_id, options, access_memo, parents):
1605
"""Add a version record to the index."""
1606
return self.add_versions(((version_id, options, access_memo, parents),))
1608
def add_versions(self, versions):
1609
"""Add multiple versions to the index.
2825
1611
This function does not insert data into the Immutable GraphIndex
2826
1612
backing the KnitGraphIndex, instead it prepares data for insertion by
2827
1613
the caller and checks that it is safe to insert then calls
2828
1614
self._add_callback with the prepared GraphIndex nodes.
2830
:param records: a list of tuples:
2831
(key, options, access_memo, parents).
2832
:param random_id: If True the ids being added were randomly generated
2833
and no check for existence will be performed.
2834
:param missing_compression_parents: If True the records being added are
2835
only compressed against texts already in the index (or inside
2836
records). If False the records all refer to unavailable texts (or
2837
texts inside records) as compression parents.
1616
:param versions: a list of tuples:
1617
(version_id, options, pos, size, parents).
2839
1619
if not self._add_callback:
2840
1620
raise errors.ReadOnlyError(self)
2841
1621
# we hope there are no repositories with inconsistent parentage
2845
compression_parents = set()
2846
key_dependencies = self._key_dependencies
2847
for (key, options, access_memo, parents) in records:
2849
parents = tuple(parents)
2850
if key_dependencies is not None:
2851
key_dependencies.add_references(key, parents)
1626
for (version_id, options, access_memo, parents) in versions:
2852
1627
index, pos, size = access_memo
1628
key = (version_id, )
1629
parents = tuple((parent, ) for parent in parents)
2853
1630
if 'no-eol' in options:
2896
1663
for key, (value, node_refs) in keys.iteritems():
2897
1664
result.append((key, value))
2898
1665
self._add_callback(result)
2899
if missing_compression_parents:
2900
# This may appear to be incorrect (it does not check for
2901
# compression parents that are in the existing graph index),
2902
# but such records won't have been buffered, so this is
2903
# actually correct: every entry when
2904
# missing_compression_parents==True either has a missing parent, or
2905
# a parent that is one of the keys in records.
2906
compression_parents.difference_update(keys)
2907
self._missing_compression_parents.update(compression_parents)
2908
# Adding records may have satisfied missing compression parents.
2909
self._missing_compression_parents.difference_update(keys)
2911
def scan_unvalidated_index(self, graph_index):
2912
"""Inform this _KnitGraphIndex that there is an unvalidated index.
2914
This allows this _KnitGraphIndex to keep track of any missing
2915
compression parents we may want to have filled in to make those
2918
:param graph_index: A GraphIndex
2921
new_missing = graph_index.external_references(ref_list_num=1)
2922
new_missing.difference_update(self.get_parent_map(new_missing))
2923
self._missing_compression_parents.update(new_missing)
2924
if self._key_dependencies is not None:
2925
# Add parent refs from graph_index (and discard parent refs that
2926
# the graph_index has).
2927
for node in graph_index.iter_all_entries():
2928
self._key_dependencies.add_references(node[1], node[3][0])
2930
def get_missing_compression_parents(self):
2931
"""Return the keys of missing compression parents.
2933
Missing compression parents occur when a record stream was missing
2934
basis texts, or a index was scanned that had missing basis texts.
2936
return frozenset(self._missing_compression_parents)
2938
def get_missing_parents(self):
2939
"""Return the keys of missing parents."""
2940
# If updating this, you should also update
2941
# groupcompress._GCGraphIndex.get_missing_parents
2942
# We may have false positives, so filter those out.
2943
self._key_dependencies.satisfy_refs_for_keys(
2944
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
2945
return frozenset(self._key_dependencies.get_unsatisfied_refs())
2947
def _check_read(self):
2948
"""raise if reads are not permitted."""
2949
if not self._is_locked():
2950
raise errors.ObjectNotLocked(self)
2952
def _check_write_ok(self):
2953
"""Assert if writes are not permitted."""
2954
if not self._is_locked():
2955
raise errors.ObjectNotLocked(self)
2957
def _compression_parent(self, an_entry):
2958
# return the key that an_entry is compressed against, or None
2959
# Grab the second parent list (as deltas implies parents currently)
2960
compression_parents = an_entry[3][1]
2961
if not compression_parents:
2963
if len(compression_parents) != 1:
2964
raise AssertionError(
2965
"Too many compression parents: %r" % compression_parents)
2966
return compression_parents[0]
2968
def get_build_details(self, keys):
2969
"""Get the method, index_memo and compression parent for version_ids.
2971
Ghosts are omitted from the result.
2973
:param keys: An iterable of keys.
2974
:return: A dict of key:
2975
(index_memo, compression_parent, parents, record_details).
2977
opaque structure to pass to read_records to extract the raw
2980
Content that this record is built upon, may be None
2982
Logical parents of this node
2984
extra information about the content which needs to be passed to
2985
Factory.parse_record
2989
entries = self._get_entries(keys, False)
2990
for entry in entries:
2992
if not self._parents:
2995
parents = entry[3][0]
2996
if not self._deltas:
2997
compression_parent_key = None
2999
compression_parent_key = self._compression_parent(entry)
3000
noeol = (entry[2][0] == 'N')
3001
if compression_parent_key:
3002
method = 'line-delta'
3005
result[key] = (self._node_to_position(entry),
3006
compression_parent_key, parents,
3010
def _get_entries(self, keys, check_present=False):
3011
"""Get the entries for keys.
3013
:param keys: An iterable of index key tuples.
3018
for node in self._graph_index.iter_entries(keys):
3020
found_keys.add(node[1])
3022
# adapt parentless index to the rest of the code.
3023
for node in self._graph_index.iter_entries(keys):
3024
yield node[0], node[1], node[2], ()
3025
found_keys.add(node[1])
3027
missing_keys = keys.difference(found_keys)
3029
raise RevisionNotPresent(missing_keys.pop(), self)
3031
def get_method(self, key):
3032
"""Return compression method of specified key."""
3033
return self._get_method(self._get_node(key))
3035
def _get_method(self, node):
3036
if not self._deltas:
3038
if self._compression_parent(node):
3043
def _get_node(self, key):
3045
return list(self._get_entries([key]))[0]
3047
raise RevisionNotPresent(key, self)
3049
def get_options(self, key):
3050
"""Return a list representing options.
3054
node = self._get_node(key)
3055
options = [self._get_method(node)]
3056
if node[2][0] == 'N':
3057
options.append('no-eol')
3060
def find_ancestry(self, keys):
3061
"""See CombinedGraphIndex.find_ancestry()"""
3062
return self._graph_index.find_ancestry(keys, 0)
3064
def get_parent_map(self, keys):
3065
"""Get a map of the parents of keys.
3067
:param keys: The keys to look up parents for.
3068
:return: A mapping from keys to parents. Absent keys are absent from
3072
nodes = self._get_entries(keys)
3076
result[node[1]] = node[3][0]
3079
result[node[1]] = None
3082
def get_position(self, key):
3083
"""Return details needed to access the version.
3085
:return: a tuple (index, data position, size) to hand to the access
3086
logic to get the record.
3088
node = self._get_node(key)
3089
return self._node_to_position(node)
3091
has_key = _mod_index._has_key_from_parent_map
3094
"""Get all the keys in the collection.
3096
The keys are not ordered.
3099
return [node[1] for node in self._graph_index.iter_all_entries()]
3101
missing_keys = _mod_index._missing_keys_from_parent_map
3103
def _node_to_position(self, node):
3104
"""Convert an index value to position details."""
3105
bits = node[2][1:].split(' ')
3106
return node[0], int(bits[0]), int(bits[1])
3108
def _sort_keys_by_io(self, keys, positions):
3109
"""Figure out an optimal order to read the records for the given keys.
3111
Sort keys, grouped by index and sorted by position.
3113
:param keys: A list of keys whose records we want to read. This will be
3115
:param positions: A dict, such as the one returned by
3116
_get_components_positions()
3119
def get_index_memo(key):
3120
# index_memo is at offset [1]. It is made up of (GraphIndex,
3121
# position, size). GI is an object, which will be unique for each
3122
# pack file. This causes us to group by pack file, then sort by
3123
# position. Size doesn't matter, but it isn't worth breaking up the
3125
return positions[key][1]
3126
return keys.sort(key=get_index_memo)
3128
_get_total_build_size = _get_total_build_size
3131
class _KnitKeyAccess(object):
3132
"""Access to records in .knit files."""
3134
def __init__(self, transport, mapper):
3135
"""Create a _KnitKeyAccess with transport and mapper.
3137
:param transport: The transport the access object is rooted at.
3138
:param mapper: The mapper used to map keys to .knit files.
1667
def _version_ids_to_keys(self, version_ids):
1668
return set((version_id, ) for version_id in version_ids)
1671
class _KnitAccess(object):
1672
"""Access to knit records in a .knit file."""
1674
def __init__(self, transport, filename, _file_mode, _dir_mode,
1675
_need_to_create, _create_parent_dir):
1676
"""Create a _KnitAccess for accessing and inserting data.
1678
:param transport: The transport the .knit is located on.
1679
:param filename: The filename of the .knit.
3140
1681
self._transport = transport
3141
self._mapper = mapper
1682
self._filename = filename
1683
self._file_mode = _file_mode
1684
self._dir_mode = _dir_mode
1685
self._need_to_create = _need_to_create
1686
self._create_parent_dir = _create_parent_dir
3143
def add_raw_records(self, key_sizes, raw_data):
1688
def add_raw_records(self, sizes, raw_data):
3144
1689
"""Add raw knit bytes to a storage area.
3146
The data is spooled to the container writer in one bytes-record per
1691
The data is spooled to whereever the access method is storing data.
3149
:param sizes: An iterable of tuples containing the key and size of each
1693
:param sizes: An iterable containing the size of each raw data segment.
3151
1694
:param raw_data: A bytestring containing the data.
3152
:return: A list of memos to retrieve the record later. Each memo is an
3153
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
3154
length), where the key is the record key.
1695
:return: A list of memos to retrieve the record later. Each memo is a
1696
tuple - (index, pos, length), where the index field is always None
1697
for the .knit access method.
3156
if type(raw_data) is not str:
3157
raise AssertionError(
3158
'data must be plain bytes was %s' % type(raw_data))
1699
assert type(raw_data) == str, \
1700
'data must be plain bytes was %s' % type(raw_data)
1701
if not self._need_to_create:
1702
base = self._transport.append_bytes(self._filename, raw_data)
1704
self._transport.put_bytes_non_atomic(self._filename, raw_data,
1705
create_parent_dir=self._create_parent_dir,
1706
mode=self._file_mode,
1707
dir_mode=self._dir_mode)
1708
self._need_to_create = False
3161
# TODO: This can be tuned for writing to sftp and other servers where
3162
# append() is relatively expensive by grouping the writes to each key
3164
for key, size in key_sizes:
3165
path = self._mapper.map(key)
3167
base = self._transport.append_bytes(path + '.knit',
3168
raw_data[offset:offset+size])
3169
except errors.NoSuchFile:
3170
self._transport.mkdir(osutils.dirname(path))
3171
base = self._transport.append_bytes(path + '.knit',
3172
raw_data[offset:offset+size])
3176
result.append((key, base, size))
1712
result.append((None, base, size))
3180
"""Flush pending writes on this access object.
3182
For .knit files this is a no-op.
1717
"""IFF this data access has its own storage area, initialise it.
1721
self._transport.put_bytes_non_atomic(self._filename, '',
1722
mode=self._file_mode)
1724
def open_file(self):
1725
"""IFF this data access can be represented as a single file, open it.
1727
For knits that are not mapped to a single file on disk this will
1730
:return: None or a file handle.
1733
return self._transport.get(self._filename)
3186
1738
def get_raw_records(self, memos_for_retrieval):
3187
1739
"""Get the raw bytes for a records.
3189
:param memos_for_retrieval: An iterable containing the access memo for
3190
retrieving the bytes.
1741
:param memos_for_retrieval: An iterable containing the (index, pos,
1742
length) memo for retrieving the bytes. The .knit method ignores
1743
the index as there is always only a single file.
3191
1744
:return: An iterator over the bytes of the records.
3193
# first pass, group into same-index request to minimise readv's issued.
3195
current_prefix = None
3196
for (key, offset, length) in memos_for_retrieval:
3197
if current_prefix == key[:-1]:
3198
current_list.append((offset, length))
3200
if current_prefix is not None:
3201
request_lists.append((current_prefix, current_list))
3202
current_prefix = key[:-1]
3203
current_list = [(offset, length)]
3204
# handle the last entry
3205
if current_prefix is not None:
3206
request_lists.append((current_prefix, current_list))
3207
for prefix, read_vector in request_lists:
3208
path = self._mapper.map(prefix) + '.knit'
3209
for pos, data in self._transport.readv(path, read_vector):
3213
class _DirectPackAccess(object):
3214
"""Access to data in one or more packs with less translation."""
3216
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3217
"""Create a _DirectPackAccess object.
1746
read_vector = [(pos, size) for (index, pos, size) in memos_for_retrieval]
1747
for pos, data in self._transport.readv(self._filename, read_vector):
1751
class _PackAccess(object):
1752
"""Access to knit records via a collection of packs."""
1754
def __init__(self, index_to_packs, writer=None):
1755
"""Create a _PackAccess object.
3219
1757
:param index_to_packs: A dict mapping index objects to the transport
3220
1758
and file names for obtaining data.
3221
:param reload_func: A function to call if we determine that the pack
3222
files have moved and we need to reload our caches. See
3223
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
1759
:param writer: A tuple (pack.ContainerWriter, write_index) which
1760
contains the pack to write, and the index that reads from it will
3225
self._container_writer = None
3226
self._write_index = None
3227
self._indices = index_to_packs
3228
self._reload_func = reload_func
3229
self._flush_func = flush_func
1764
self.container_writer = writer[0]
1765
self.write_index = writer[1]
1767
self.container_writer = None
1768
self.write_index = None
1769
self.indices = index_to_packs
3231
def add_raw_records(self, key_sizes, raw_data):
1771
def add_raw_records(self, sizes, raw_data):
3232
1772
"""Add raw knit bytes to a storage area.
3234
1774
The data is spooled to the container writer in one bytes-record per
3237
:param sizes: An iterable of tuples containing the key and size of each
1777
:param sizes: An iterable containing the size of each raw data segment.
3239
1778
:param raw_data: A bytestring containing the data.
3240
:return: A list of memos to retrieve the record later. Each memo is an
3241
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3242
length), where the index field is the write_index object supplied
3243
to the PackAccess object.
1779
:return: A list of memos to retrieve the record later. Each memo is a
1780
tuple - (index, pos, length), where the index field is the
1781
write_index object supplied to the PackAccess object.
3245
if type(raw_data) is not str:
3246
raise AssertionError(
3247
'data must be plain bytes was %s' % type(raw_data))
1783
assert type(raw_data) == str, \
1784
'data must be plain bytes was %s' % type(raw_data)
3250
for key, size in key_sizes:
3251
p_offset, p_length = self._container_writer.add_bytes_record(
1788
p_offset, p_length = self.container_writer.add_bytes_record(
3252
1789
raw_data[offset:offset+size], [])
3254
result.append((self._write_index, p_offset, p_length))
1791
result.append((self.write_index, p_offset, p_length))
3258
"""Flush pending writes on this access object.
1795
"""Pack based knits do not get individually created."""
3260
This will flush any buffered writes to a NewPack.
3262
if self._flush_func is not None:
3265
1797
def get_raw_records(self, memos_for_retrieval):
3266
1798
"""Get the raw bytes for a records.
3268
:param memos_for_retrieval: An iterable containing the (index, pos,
1800
:param memos_for_retrieval: An iterable containing the (index, pos,
3269
1801
length) memo for retrieving the bytes. The Pack access method
3270
1802
looks up the pack to use for a given record in its index_to_pack
3286
1818
if current_index is not None:
3287
1819
request_lists.append((current_index, current_list))
3288
1820
for index, offsets in request_lists:
3290
transport, path = self._indices[index]
3292
# A KeyError here indicates that someone has triggered an index
3293
# reload, and this index has gone missing, we need to start
3295
if self._reload_func is None:
3296
# If we don't have a _reload_func there is nothing that can
3299
raise errors.RetryWithNewPacks(index,
3300
reload_occurred=True,
3301
exc_info=sys.exc_info())
3303
reader = pack.make_readv_reader(transport, path, offsets)
3304
for names, read_func in reader.iter_records():
3305
yield read_func(None)
3306
except errors.NoSuchFile:
3307
# A NoSuchFile error indicates that a pack file has gone
3308
# missing on disk, we need to trigger a reload, and start over.
3309
if self._reload_func is None:
3311
raise errors.RetryWithNewPacks(transport.abspath(path),
3312
reload_occurred=False,
3313
exc_info=sys.exc_info())
3315
def set_writer(self, writer, index, transport_packname):
1821
transport, path = self.indices[index]
1822
reader = pack.make_readv_reader(transport, path, offsets)
1823
for names, read_func in reader.iter_records():
1824
yield read_func(None)
1826
def open_file(self):
1827
"""Pack based knits have no single file."""
1830
def set_writer(self, writer, index, (transport, packname)):
3316
1831
"""Set a writer to use for adding data."""
3317
if index is not None:
3318
self._indices[index] = transport_packname
3319
self._container_writer = writer
3320
self._write_index = index
3322
def reload_or_raise(self, retry_exc):
3323
"""Try calling the reload function, or re-raise the original exception.
3325
This should be called after _DirectPackAccess raises a
3326
RetryWithNewPacks exception. This function will handle the common logic
3327
of determining when the error is fatal versus being temporary.
3328
It will also make sure that the original exception is raised, rather
3329
than the RetryWithNewPacks exception.
3331
If this function returns, then the calling function should retry
3332
whatever operation was being performed. Otherwise an exception will
3335
:param retry_exc: A RetryWithNewPacks exception.
3338
if self._reload_func is None:
3340
elif not self._reload_func():
3341
# The reload claimed that nothing changed
3342
if not retry_exc.reload_occurred:
3343
# If there wasn't an earlier reload, then we really were
3344
# expecting to find changes. We didn't find them, so this is a
3348
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3349
raise exc_class, exc_value, exc_traceback
3352
def annotate_knit(knit, revision_id):
3353
"""Annotate a knit with no cached annotations.
3355
This implementation is for knits with no cached annotations.
3356
It will work for knits with cached annotations, but this is not
3359
annotator = _KnitAnnotator(knit)
3360
return iter(annotator.annotate_flat(revision_id))
3363
class _KnitAnnotator(annotate.Annotator):
3364
"""Build up the annotations for a text."""
3366
def __init__(self, vf):
3367
annotate.Annotator.__init__(self, vf)
3369
# TODO: handle Nodes which cannot be extracted
3370
# self._ghosts = set()
3372
# Map from (key, parent_key) => matching_blocks, should be 'use once'
3373
self._matching_blocks = {}
3375
# KnitContent objects
3376
self._content_objects = {}
3377
# The number of children that depend on this fulltext content object
3378
self._num_compression_children = {}
3379
# Delta records that need their compression parent before they can be
3381
self._pending_deltas = {}
3382
# Fulltext records that are waiting for their parents fulltexts before
3383
# they can be yielded for annotation
3384
self._pending_annotation = {}
3386
self._all_build_details = {}
3388
def _get_build_graph(self, key):
3389
"""Get the graphs for building texts and annotations.
3391
The data you need for creating a full text may be different than the
3392
data you need to annotate that text. (At a minimum, you need both
3393
parents to create an annotation, but only need 1 parent to generate the
3396
:return: A list of (key, index_memo) records, suitable for
3397
passing to read_records_iter to start reading in the raw data from
3400
pending = set([key])
3403
self._num_needed_children[key] = 1
3405
# get all pending nodes
3406
this_iteration = pending
3407
build_details = self._vf._index.get_build_details(this_iteration)
3408
self._all_build_details.update(build_details)
3409
# new_nodes = self._vf._index._get_entries(this_iteration)
3411
for key, details in build_details.iteritems():
3412
(index_memo, compression_parent, parent_keys,
3413
record_details) = details
3414
self._parent_map[key] = parent_keys
3415
self._heads_provider = None
3416
records.append((key, index_memo))
3417
# Do we actually need to check _annotated_lines?
3418
pending.update([p for p in parent_keys
3419
if p not in self._all_build_details])
3421
for parent_key in parent_keys:
3422
if parent_key in self._num_needed_children:
3423
self._num_needed_children[parent_key] += 1
3425
self._num_needed_children[parent_key] = 1
3426
if compression_parent:
3427
if compression_parent in self._num_compression_children:
3428
self._num_compression_children[compression_parent] += 1
3430
self._num_compression_children[compression_parent] = 1
3432
missing_versions = this_iteration.difference(build_details.keys())
3433
if missing_versions:
3434
for key in missing_versions:
3435
if key in self._parent_map and key in self._text_cache:
3436
# We already have this text ready, we just need to
3437
# yield it later so we get it annotated
3439
parent_keys = self._parent_map[key]
3440
for parent_key in parent_keys:
3441
if parent_key in self._num_needed_children:
3442
self._num_needed_children[parent_key] += 1
3444
self._num_needed_children[parent_key] = 1
3445
pending.update([p for p in parent_keys
3446
if p not in self._all_build_details])
3448
raise errors.RevisionNotPresent(key, self._vf)
3449
# Generally we will want to read the records in reverse order, because
3450
# we find the parent nodes after the children
3452
return records, ann_keys
3454
def _get_needed_texts(self, key, pb=None):
3455
# if True or len(self._vf._immediate_fallback_vfs) > 0:
3456
if len(self._vf._immediate_fallback_vfs) > 0:
3457
# If we have fallbacks, go to the generic path
3458
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
1832
self.indices[index] = (transport, packname)
1833
self.container_writer = writer
1834
self.write_index = index
1837
class _KnitData(object):
1838
"""Manage extraction of data from a KnitAccess, caching and decompressing.
1840
The KnitData class provides the logic for parsing and using knit records,
1841
making use of an access method for the low level read and write operations.
1844
def __init__(self, access):
1845
"""Create a KnitData object.
1847
:param access: The access method to use. Access methods such as
1848
_KnitAccess manage the insertion of raw records and the subsequent
1849
retrieval of the same.
1851
self._access = access
1852
self._checked = False
1853
# TODO: jam 20060713 conceptually, this could spill to disk
1854
# if the cached size gets larger than a certain amount
1855
# but it complicates the model a bit, so for now just use
1856
# a simple dictionary
1858
self._do_cache = False
1860
def enable_cache(self):
1861
"""Enable caching of reads."""
1862
self._do_cache = True
1864
def clear_cache(self):
1865
"""Clear the record cache."""
1866
self._do_cache = False
1869
def _open_file(self):
1870
return self._access.open_file()
1872
def _record_to_data(self, version_id, digest, lines):
1873
"""Convert version_id, digest, lines into a raw data block.
1875
:return: (len, a StringIO instance with the raw data ready to read.)
1878
data_file = GzipFile(None, mode='wb', fileobj=sio)
1880
assert isinstance(version_id, str)
1881
data_file.writelines(chain(
1882
["version %s %d %s\n" % (version_id,
1886
["end %s\n" % version_id]))
1893
def add_raw_records(self, sizes, raw_data):
1894
"""Append a prepared record to the data file.
1896
:param sizes: An iterable containing the size of each raw data segment.
1897
:param raw_data: A bytestring containing the data.
1898
:return: a list of index data for the way the data was stored.
1899
See the access method add_raw_records documentation for more
1902
return self._access.add_raw_records(sizes, raw_data)
1904
def add_record(self, version_id, digest, lines):
1905
"""Write new text record to disk.
1907
Returns index data for retrieving it later, as per add_raw_records.
1909
size, sio = self._record_to_data(version_id, digest, lines)
1910
result = self.add_raw_records([size], sio.getvalue())
1912
self._cache[version_id] = sio.getvalue()
1915
def _parse_record_header(self, version_id, raw_data):
1916
"""Parse a record header for consistency.
1918
:return: the header and the decompressor stream.
1919
as (stream, header_record)
1921
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
1923
rec = self._check_header(version_id, df.readline())
1924
except Exception, e:
1925
raise KnitCorrupt(self._access,
1926
"While reading {%s} got %s(%s)"
1927
% (version_id, e.__class__.__name__, str(e)))
1930
def _check_header(self, version_id, line):
1933
raise KnitCorrupt(self._access,
1934
'unexpected number of elements in record header')
1935
if rec[1] != version_id:
1936
raise KnitCorrupt(self._access,
1937
'unexpected version, wanted %r, got %r'
1938
% (version_id, rec[1]))
1941
def _parse_record(self, version_id, data):
1943
# 4168 calls in 2880 217 internal
1944
# 4168 calls to _parse_record_header in 2121
1945
# 4168 calls to readlines in 330
1946
df = GzipFile(mode='rb', fileobj=StringIO(data))
1949
record_contents = df.readlines()
1950
except Exception, e:
1951
raise KnitCorrupt(self._access,
1952
"While reading {%s} got %s(%s)"
1953
% (version_id, e.__class__.__name__, str(e)))
1954
header = record_contents.pop(0)
1955
rec = self._check_header(version_id, header)
1957
last_line = record_contents.pop()
1958
if len(record_contents) != int(rec[2]):
1959
raise KnitCorrupt(self._access,
1960
'incorrect number of lines %s != %s'
1962
% (len(record_contents), int(rec[2]),
1964
if last_line != 'end %s\n' % rec[1]:
1965
raise KnitCorrupt(self._access,
1966
'unexpected version end line %r, wanted %r'
1967
% (last_line, version_id))
1969
return record_contents, rec[3]
1971
def read_records_iter_raw(self, records):
1972
"""Read text records from data file and yield raw data.
1974
This unpacks enough of the text record to validate the id is
1975
as expected but thats all.
1977
# setup an iterator of the external records:
1978
# uses readv so nice and fast we hope.
1980
# grab the disk data needed.
1982
# Don't check _cache if it is empty
1983
needed_offsets = [index_memo for version_id, index_memo
1985
if version_id not in self._cache]
1987
needed_offsets = [index_memo for version_id, index_memo
1990
raw_records = self._access.get_raw_records(needed_offsets)
1992
for version_id, index_memo in records:
1993
if version_id in self._cache:
1994
# This data has already been validated
1995
data = self._cache[version_id]
1997
data = raw_records.next()
1999
self._cache[version_id] = data
2001
# validate the header
2002
df, rec = self._parse_record_header(version_id, data)
2004
yield version_id, data
2006
def read_records_iter(self, records):
2007
"""Read text records from data file and yield result.
2009
The result will be returned in whatever is the fastest to read.
2010
Not by the order requested. Also, multiple requests for the same
2011
record will only yield 1 response.
2012
:param records: A list of (version_id, pos, len) entries
2013
:return: Yields (version_id, contents, digest) in the order
2014
read, not the order requested
2020
# Skip records we have alread seen
2021
yielded_records = set()
2022
needed_records = set()
2023
for record in records:
2024
if record[0] in self._cache:
2025
if record[0] in yielded_records:
2027
yielded_records.add(record[0])
2028
data = self._cache[record[0]]
2029
content, digest = self._parse_record(record[0], data)
2030
yield (record[0], content, digest)
2032
needed_records.add(record)
2033
needed_records = sorted(needed_records, key=operator.itemgetter(1))
2035
needed_records = sorted(set(records), key=operator.itemgetter(1))
2037
if not needed_records:
2040
# The transport optimizes the fetching as well
2041
# (ie, reads continuous ranges.)
2042
raw_data = self._access.get_raw_records(
2043
[index_memo for version_id, index_memo in needed_records])
2045
for (version_id, index_memo), data in \
2046
izip(iter(needed_records), raw_data):
2047
content, digest = self._parse_record(version_id, data)
2049
self._cache[version_id] = data
2050
yield version_id, content, digest
2052
def read_records(self, records):
2053
"""Read records into a dictionary."""
2055
for record_id, content, digest in \
2056
self.read_records_iter(records):
2057
components[record_id] = (content, digest)
2061
class InterKnit(InterVersionedFile):
2062
"""Optimised code paths for knit to knit operations."""
2064
_matching_file_from_factory = KnitVersionedFile
2065
_matching_file_to_factory = KnitVersionedFile
2068
def is_compatible(source, target):
2069
"""Be compatible with knits. """
2071
return (isinstance(source, KnitVersionedFile) and
2072
isinstance(target, KnitVersionedFile))
2073
except AttributeError:
2076
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2077
"""See InterVersionedFile.join."""
2078
assert isinstance(self.source, KnitVersionedFile)
2079
assert isinstance(self.target, KnitVersionedFile)
2081
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2086
pb = ui.ui_factory.nested_progress_bar()
2088
version_ids = list(version_ids)
2089
if None in version_ids:
2090
version_ids.remove(None)
2092
self.source_ancestry = set(self.source.get_ancestry(version_ids))
2093
this_versions = set(self.target._index.get_versions())
2094
needed_versions = self.source_ancestry - this_versions
2095
cross_check_versions = self.source_ancestry.intersection(this_versions)
2096
mismatched_versions = set()
2097
for version in cross_check_versions:
2098
# scan to include needed parents.
2099
n1 = set(self.target.get_parents_with_ghosts(version))
2100
n2 = set(self.source.get_parents_with_ghosts(version))
2102
# FIXME TEST this check for cycles being introduced works
2103
# the logic is we have a cycle if in our graph we are an
2104
# ancestor of any of the n2 revisions.
2110
parent_ancestors = self.source.get_ancestry(parent)
2111
if version in parent_ancestors:
2112
raise errors.GraphCycleError([parent, version])
2113
# ensure this parent will be available later.
2114
new_parents = n2.difference(n1)
2115
needed_versions.update(new_parents.difference(this_versions))
2116
mismatched_versions.add(version)
2118
if not needed_versions and not mismatched_versions:
2120
full_list = topo_sort(self.source.get_graph())
2122
version_list = [i for i in full_list if (not self.target.has_version(i)
2123
and i in needed_versions)]
2127
copy_queue_records = []
2129
for version_id in version_list:
2130
options = self.source._index.get_options(version_id)
2131
parents = self.source._index.get_parents_with_ghosts(version_id)
2132
# check that its will be a consistent copy:
2133
for parent in parents:
2134
# if source has the parent, we must :
2135
# * already have it or
2136
# * have it scheduled already
2137
# otherwise we don't care
2138
assert (self.target.has_version(parent) or
2139
parent in copy_set or
2140
not self.source.has_version(parent))
2141
index_memo = self.source._index.get_position(version_id)
2142
copy_queue_records.append((version_id, index_memo))
2143
copy_queue.append((version_id, options, parents))
2144
copy_set.add(version_id)
2146
# data suck the join:
2148
total = len(version_list)
2151
for (version_id, raw_data), \
2152
(version_id2, options, parents) in \
2153
izip(self.source._data.read_records_iter_raw(copy_queue_records),
2155
assert version_id == version_id2, 'logic error, inconsistent results'
2157
pb.update("Joining knit", count, total)
2158
raw_records.append((version_id, options, parents, len(raw_data)))
2159
raw_datum.append(raw_data)
2160
self.target._add_raw_records(raw_records, ''.join(raw_datum))
2162
for version in mismatched_versions:
2163
# FIXME RBC 20060309 is this needed?
2164
n1 = set(self.target.get_parents_with_ghosts(version))
2165
n2 = set(self.source.get_parents_with_ghosts(version))
2166
# write a combined record to our history preserving the current
2167
# parents as first in the list
2168
new_parents = self.target.get_parents_with_ghosts(version) + list(n2.difference(n1))
2169
self.target.fix_parents(version, new_parents)
2175
InterVersionedFile.register_optimiser(InterKnit)
2178
class WeaveToKnit(InterVersionedFile):
2179
"""Optimised code paths for weave to knit operations."""
2181
_matching_file_from_factory = bzrlib.weave.WeaveFile
2182
_matching_file_to_factory = KnitVersionedFile
2185
def is_compatible(source, target):
2186
"""Be compatible with weaves to knits."""
2188
return (isinstance(source, bzrlib.weave.Weave) and
2189
isinstance(target, KnitVersionedFile))
2190
except AttributeError:
2193
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2194
"""See InterVersionedFile.join."""
2195
assert isinstance(self.source, bzrlib.weave.Weave)
2196
assert isinstance(self.target, KnitVersionedFile)
2198
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2203
pb = ui.ui_factory.nested_progress_bar()
2205
version_ids = list(version_ids)
2207
self.source_ancestry = set(self.source.get_ancestry(version_ids))
2208
this_versions = set(self.target._index.get_versions())
2209
needed_versions = self.source_ancestry - this_versions
2210
cross_check_versions = self.source_ancestry.intersection(this_versions)
2211
mismatched_versions = set()
2212
for version in cross_check_versions:
2213
# scan to include needed parents.
2214
n1 = set(self.target.get_parents_with_ghosts(version))
2215
n2 = set(self.source.get_parents(version))
2216
# if all of n2's parents are in n1, then its fine.
2217
if n2.difference(n1):
2218
# FIXME TEST this check for cycles being introduced works
2219
# the logic is we have a cycle if in our graph we are an
2220
# ancestor of any of the n2 revisions.
2226
parent_ancestors = self.source.get_ancestry(parent)
2227
if version in parent_ancestors:
2228
raise errors.GraphCycleError([parent, version])
2229
# ensure this parent will be available later.
2230
new_parents = n2.difference(n1)
2231
needed_versions.update(new_parents.difference(this_versions))
2232
mismatched_versions.add(version)
2234
if not needed_versions and not mismatched_versions:
2236
full_list = topo_sort(self.source.get_graph())
2238
version_list = [i for i in full_list if (not self.target.has_version(i)
2239
and i in needed_versions)]
2243
total = len(version_list)
2244
for version_id in version_list:
2245
pb.update("Converting to knit", count, total)
2246
parents = self.source.get_parents(version_id)
2247
# check that its will be a consistent copy:
2248
for parent in parents:
2249
# if source has the parent, we must already have it
2250
assert (self.target.has_version(parent))
2251
self.target.add_lines(
2252
version_id, parents, self.source.get_lines(version_id))
2255
for version in mismatched_versions:
2256
# FIXME RBC 20060309 is this needed?
2257
n1 = set(self.target.get_parents_with_ghosts(version))
2258
n2 = set(self.source.get_parents(version))
2259
# write a combined record to our history preserving the current
2260
# parents as first in the list
2261
new_parents = self.target.get_parents_with_ghosts(version) + list(n2.difference(n1))
2262
self.target.fix_parents(version, new_parents)
2268
InterVersionedFile.register_optimiser(WeaveToKnit)
2271
class KnitSequenceMatcher(difflib.SequenceMatcher):
2272
"""Knit tuned sequence matcher.
2274
This is based on profiling of difflib which indicated some improvements
2275
for our usage pattern.
2278
def find_longest_match(self, alo, ahi, blo, bhi):
2279
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
2281
If isjunk is not defined:
2283
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
2284
alo <= i <= i+k <= ahi
2285
blo <= j <= j+k <= bhi
2286
and for all (i',j',k') meeting those conditions,
2289
and if i == i', j <= j'
2291
In other words, of all maximal matching blocks, return one that
2292
starts earliest in a, and of all those maximal matching blocks that
2293
start earliest in a, return the one that starts earliest in b.
2295
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
2296
>>> s.find_longest_match(0, 5, 0, 9)
2299
If isjunk is defined, first the longest matching block is
2300
determined as above, but with the additional restriction that no
2301
junk element appears in the block. Then that block is extended as
2302
far as possible by matching (only) junk elements on both sides. So
2303
the resulting block never matches on junk except as identical junk
2304
happens to be adjacent to an "interesting" match.
2306
Here's the same example as before, but considering blanks to be
2307
junk. That prevents " abcd" from matching the " abcd" at the tail
2308
end of the second sequence directly. Instead only the "abcd" can
2309
match, and matches the leftmost "abcd" in the second sequence:
2311
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
2312
>>> s.find_longest_match(0, 5, 0, 9)
2315
If no blocks match, return (alo, blo, 0).
2317
>>> s = SequenceMatcher(None, "ab", "c")
2318
>>> s.find_longest_match(0, 2, 0, 1)
2322
# CAUTION: stripping common prefix or suffix would be incorrect.
2326
# Longest matching block is "ab", but if common prefix is
2327
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
2328
# strip, so ends up claiming that ab is changed to acab by
2329
# inserting "ca" in the middle. That's minimal but unintuitive:
2330
# "it's obvious" that someone inserted "ac" at the front.
2331
# Windiff ends up at the same place as diff, but by pairing up
2332
# the unique 'b's and then matching the first two 'a's.
2334
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
2335
besti, bestj, bestsize = alo, blo, 0
2336
# find longest junk-free match
2337
# during an iteration of the loop, j2len[j] = length of longest
2338
# junk-free match ending with a[i-1] and b[j]
2342
for i in xrange(alo, ahi):
2343
# look at all instances of a[i] in b; note that because
2344
# b2j has no junk keys, the loop is skipped if a[i] is junk
2345
j2lenget = j2len.get
2348
# changing b2j.get(a[i], nothing) to a try:KeyError pair produced the
2349
# following improvement
2350
# 704 0 4650.5320 2620.7410 bzrlib.knit:1336(find_longest_match)
2351
# +326674 0 1655.1210 1655.1210 +<method 'get' of 'dict' objects>
2352
# +76519 0 374.6700 374.6700 +<method 'has_key' of 'dict' objects>
2354
# 704 0 3733.2820 2209.6520 bzrlib.knit:1336(find_longest_match)
2355
# +211400 0 1147.3520 1147.3520 +<method 'get' of 'dict' objects>
2356
# +76519 0 376.2780 376.2780 +<method 'has_key' of 'dict' objects>
3463
records, ann_keys = self._get_build_graph(key)
3464
for idx, (sub_key, text, num_lines) in enumerate(
3465
self._extract_texts(records)):
3467
pb.update(gettext('annotating'), idx, len(records))
3468
yield sub_key, text, num_lines
3469
for sub_key in ann_keys:
3470
text = self._text_cache[sub_key]
3471
num_lines = len(text) # bad assumption
3472
yield sub_key, text, num_lines
3474
except errors.RetryWithNewPacks, e:
3475
self._vf._access.reload_or_raise(e)
3476
# The cached build_details are no longer valid
3477
self._all_build_details.clear()
3479
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3480
parent_lines = self._text_cache[compression_parent]
3481
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3482
self._matching_blocks[(key, compression_parent)] = blocks
3484
def _expand_record(self, key, parent_keys, compression_parent, record,
3487
if compression_parent:
3488
if compression_parent not in self._content_objects:
3489
# Waiting for the parent
3490
self._pending_deltas.setdefault(compression_parent, []).append(
3491
(key, parent_keys, record, record_details))
3493
# We have the basis parent, so expand the delta
3494
num = self._num_compression_children[compression_parent]
3497
base_content = self._content_objects.pop(compression_parent)
3498
self._num_compression_children.pop(compression_parent)
3500
self._num_compression_children[compression_parent] = num
3501
base_content = self._content_objects[compression_parent]
3502
# It is tempting to want to copy_base_content=False for the last
3503
# child object. However, whenever noeol=False,
3504
# self._text_cache[parent_key] is content._lines. So mutating it
3505
# gives very bad results.
3506
# The alternative is to copy the lines into text cache, but then we
3507
# are copying anyway, so just do it here.
3508
content, delta = self._vf._factory.parse_record(
3509
key, record, record_details, base_content,
3510
copy_base_content=True)
3513
content, _ = self._vf._factory.parse_record(
3514
key, record, record_details, None)
3515
if self._num_compression_children.get(key, 0) > 0:
3516
self._content_objects[key] = content
3517
lines = content.text()
3518
self._text_cache[key] = lines
3519
if delta is not None:
3520
self._cache_delta_blocks(key, compression_parent, delta, lines)
3523
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3524
"""Get the list of annotations for the parent, and the matching lines.
3526
:param text: The opaque value given by _get_needed_texts
3527
:param parent_key: The key for the parent text
3528
:return: (parent_annotations, matching_blocks)
3529
parent_annotations is a list as long as the number of lines in
3531
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3532
indicating which lines match between the two texts
3534
block_key = (key, parent_key)
3535
if block_key in self._matching_blocks:
3536
blocks = self._matching_blocks.pop(block_key)
3537
parent_annotations = self._annotations_cache[parent_key]
3538
return parent_annotations, blocks
3539
return annotate.Annotator._get_parent_annotations_and_matches(self,
3540
key, text, parent_key)
3542
def _process_pending(self, key):
3543
"""The content for 'key' was just processed.
3545
Determine if there is any more pending work to be processed.
3548
if key in self._pending_deltas:
3549
compression_parent = key
3550
children = self._pending_deltas.pop(key)
3551
for child_key, parent_keys, record, record_details in children:
3552
lines = self._expand_record(child_key, parent_keys,
3554
record, record_details)
3555
if self._check_ready_for_annotations(child_key, parent_keys):
3556
to_return.append(child_key)
3557
# Also check any children that are waiting for this parent to be
3559
if key in self._pending_annotation:
3560
children = self._pending_annotation.pop(key)
3561
to_return.extend([c for c, p_keys in children
3562
if self._check_ready_for_annotations(c, p_keys)])
3565
def _check_ready_for_annotations(self, key, parent_keys):
3566
"""return true if this text is ready to be yielded.
3568
Otherwise, this will return False, and queue the text into
3569
self._pending_annotation
3571
for parent_key in parent_keys:
3572
if parent_key not in self._annotations_cache:
3573
# still waiting on at least one parent text, so queue it up
3574
# Note that if there are multiple parents, we need to wait
3576
self._pending_annotation.setdefault(parent_key,
3577
[]).append((key, parent_keys))
3581
def _extract_texts(self, records):
3582
"""Extract the various texts needed based on records"""
3583
# We iterate in the order read, rather than a strict order requested
3584
# However, process what we can, and put off to the side things that
3585
# still need parents, cleaning them up when those parents are
3588
# 1) As 'records' are read, see if we can expand these records into
3589
# Content objects (and thus lines)
3590
# 2) If a given line-delta is waiting on its compression parent, it
3591
# gets queued up into self._pending_deltas, otherwise we expand
3592
# it, and put it into self._text_cache and self._content_objects
3593
# 3) If we expanded the text, we will then check to see if all
3594
# parents have also been processed. If so, this text gets yielded,
3595
# else this record gets set aside into pending_annotation
3596
# 4) Further, if we expanded the text in (2), we will then check to
3597
# see if there are any children in self._pending_deltas waiting to
3598
# also be processed. If so, we go back to (2) for those
3599
# 5) Further again, if we yielded the text, we can then check if that
3600
# 'unlocks' any of the texts in pending_annotations, which should
3601
# then get yielded as well
3602
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3603
# compression child could unlock yet another, and yielding a fulltext
3604
# will also 'unlock' the children that are waiting on that annotation.
3605
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3606
# if other parents are also waiting.)
3607
# We want to yield content before expanding child content objects, so
3608
# that we know when we can re-use the content lines, and the annotation
3609
# code can know when it can stop caching fulltexts, as well.
3611
# Children that are missing their compression parent
3613
for (key, record, digest) in self._vf._read_records_iter(records):
3615
details = self._all_build_details[key]
3616
(_, compression_parent, parent_keys, record_details) = details
3617
lines = self._expand_record(key, parent_keys, compression_parent,
3618
record, record_details)
3620
# Pending delta should be queued up
3622
# At this point, we may be able to yield this content, if all
3623
# parents are also finished
3624
yield_this_text = self._check_ready_for_annotations(key,
3627
# All parents present
3628
yield key, lines, len(lines)
3629
to_process = self._process_pending(key)
3631
this_process = to_process
3633
for key in this_process:
3634
lines = self._text_cache[key]
3635
yield key, lines, len(lines)
3636
to_process.extend(self._process_pending(key))
2368
k = newj2len[j] = 1 + j2lenget(-1 + j, 0)
2370
besti, bestj, bestsize = 1 + i-k, 1 + j-k, k
2373
# Extend the best by non-junk elements on each end. In particular,
2374
# "popular" non-junk elements aren't in b2j, which greatly speeds
2375
# the inner loop above, but also means "the best" match so far
2376
# doesn't contain any junk *or* popular non-junk elements.
2377
while besti > alo and bestj > blo and \
2378
not isbjunk(b[bestj-1]) and \
2379
a[besti-1] == b[bestj-1]:
2380
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
2381
while besti+bestsize < ahi and bestj+bestsize < bhi and \
2382
not isbjunk(b[bestj+bestsize]) and \
2383
a[besti+bestsize] == b[bestj+bestsize]:
2386
# Now that we have a wholly interesting match (albeit possibly
2387
# empty!), we may as well suck up the matching junk on each
2388
# side of it too. Can't think of a good reason not to, and it
2389
# saves post-processing the (possibly considerable) expense of
2390
# figuring out what to do with it. In the case of an empty
2391
# interesting match, this is clearly the right thing to do,
2392
# because no other kind of match is possible in the regions.
2393
while besti > alo and bestj > blo and \
2394
isbjunk(b[bestj-1]) and \
2395
a[besti-1] == b[bestj-1]:
2396
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
2397
while besti+bestsize < ahi and bestj+bestsize < bhi and \
2398
isbjunk(b[bestj+bestsize]) and \
2399
a[besti+bestsize] == b[bestj+bestsize]:
2400
bestsize = bestsize + 1
2402
return besti, bestj, bestsize
3639
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3640
except ImportError, e:
3641
osutils.failed_to_load_extension(e)
2406
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3642
2408
from bzrlib._knit_load_data_py import _load_data_py as _load_data