123
130
DATA_SUFFIX = '.knit'
124
131
INDEX_SUFFIX = '.kndx'
125
_STREAM_MIN_BUFFER_SIZE = 5*1024*1024
128
class KnitAdapter(object):
129
"""Base class for knit record adaption."""
131
def __init__(self, basis_vf):
132
"""Create an adapter which accesses full texts from basis_vf.
134
:param basis_vf: A versioned file to access basis texts of deltas from.
135
May be None for adapters that do not need to access basis texts.
137
self._data = KnitVersionedFiles(None, None)
138
self._annotate_factory = KnitAnnotateFactory()
139
self._plain_factory = KnitPlainFactory()
140
self._basis_vf = basis_vf
143
class FTAnnotatedToUnannotated(KnitAdapter):
144
"""An adapter from FT annotated knits to unannotated ones."""
146
def get_bytes(self, factory):
147
annotated_compressed_bytes = factory._raw_record
149
self._data._parse_record_unchecked(annotated_compressed_bytes)
150
content = self._annotate_factory.parse_fulltext(contents, rec[1])
151
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
155
class DeltaAnnotatedToUnannotated(KnitAdapter):
156
"""An adapter for deltas from annotated to unannotated."""
158
def get_bytes(self, factory):
159
annotated_compressed_bytes = factory._raw_record
161
self._data._parse_record_unchecked(annotated_compressed_bytes)
162
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
164
contents = self._plain_factory.lower_line_delta(delta)
165
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
169
class FTAnnotatedToFullText(KnitAdapter):
170
"""An adapter from FT annotated knits to unannotated ones."""
172
def get_bytes(self, factory):
173
annotated_compressed_bytes = factory._raw_record
175
self._data._parse_record_unchecked(annotated_compressed_bytes)
176
content, delta = self._annotate_factory.parse_record(factory.key[-1],
177
contents, factory._build_details, None)
178
return ''.join(content.text())
181
class DeltaAnnotatedToFullText(KnitAdapter):
182
"""An adapter for deltas from annotated to unannotated."""
184
def get_bytes(self, factory):
185
annotated_compressed_bytes = factory._raw_record
187
self._data._parse_record_unchecked(annotated_compressed_bytes)
188
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
190
compression_parent = factory.parents[0]
191
basis_entry = self._basis_vf.get_record_stream(
192
[compression_parent], 'unordered', True).next()
193
if basis_entry.storage_kind == 'absent':
194
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
195
basis_chunks = basis_entry.get_bytes_as('chunked')
196
basis_lines = osutils.chunks_to_lines(basis_chunks)
197
# Manually apply the delta because we have one annotated content and
199
basis_content = PlainKnitContent(basis_lines, compression_parent)
200
basis_content.apply_delta(delta, rec[1])
201
basis_content._should_strip_eol = factory._build_details[1]
202
return ''.join(basis_content.text())
205
class FTPlainToFullText(KnitAdapter):
206
"""An adapter from FT plain knits to unannotated ones."""
208
def get_bytes(self, factory):
209
compressed_bytes = factory._raw_record
211
self._data._parse_record_unchecked(compressed_bytes)
212
content, delta = self._plain_factory.parse_record(factory.key[-1],
213
contents, factory._build_details, None)
214
return ''.join(content.text())
217
class DeltaPlainToFullText(KnitAdapter):
218
"""An adapter for deltas from annotated to unannotated."""
220
def get_bytes(self, factory):
221
compressed_bytes = factory._raw_record
223
self._data._parse_record_unchecked(compressed_bytes)
224
delta = self._plain_factory.parse_line_delta(contents, rec[1])
225
compression_parent = factory.parents[0]
226
# XXX: string splitting overhead.
227
basis_entry = self._basis_vf.get_record_stream(
228
[compression_parent], 'unordered', True).next()
229
if basis_entry.storage_kind == 'absent':
230
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
231
basis_chunks = basis_entry.get_bytes_as('chunked')
232
basis_lines = osutils.chunks_to_lines(basis_chunks)
233
basis_content = PlainKnitContent(basis_lines, compression_parent)
234
# Manually apply the delta because we have one annotated content and
236
content, _ = self._plain_factory.parse_record(rec[1], contents,
237
factory._build_details, basis_content)
238
return ''.join(content.text())
241
class KnitContentFactory(ContentFactory):
242
"""Content factory for streaming from knits.
244
:seealso ContentFactory:
247
def __init__(self, key, parents, build_details, sha1, raw_record,
248
annotated, knit=None, network_bytes=None):
249
"""Create a KnitContentFactory for key.
252
:param parents: The parents.
253
:param build_details: The build details as returned from
255
:param sha1: The sha1 expected from the full text of this object.
256
:param raw_record: The bytes of the knit data from disk.
257
:param annotated: True if the raw data is annotated.
258
:param network_bytes: None to calculate the network bytes on demand,
259
not-none if they are already known.
261
ContentFactory.__init__(self)
264
self.parents = parents
265
if build_details[0] == 'line-delta':
270
annotated_kind = 'annotated-'
273
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
274
self._raw_record = raw_record
275
self._network_bytes = network_bytes
276
self._build_details = build_details
279
def _create_network_bytes(self):
280
"""Create a fully serialised network version for transmission."""
281
# storage_kind, key, parents, Noeol, raw_record
282
key_bytes = '\x00'.join(self.key)
283
if self.parents is None:
284
parent_bytes = 'None:'
286
parent_bytes = '\t'.join('\x00'.join(key) for key in self.parents)
287
if self._build_details[1]:
291
network_bytes = "%s\n%s\n%s\n%s%s" % (self.storage_kind, key_bytes,
292
parent_bytes, noeol, self._raw_record)
293
self._network_bytes = network_bytes
295
def get_bytes_as(self, storage_kind):
296
if storage_kind == self.storage_kind:
297
if self._network_bytes is None:
298
self._create_network_bytes()
299
return self._network_bytes
300
if ('-ft-' in self.storage_kind and
301
storage_kind in ('chunked', 'fulltext')):
302
adapter_key = (self.storage_kind, 'fulltext')
303
adapter_factory = adapter_registry.get(adapter_key)
304
adapter = adapter_factory(None)
305
bytes = adapter.get_bytes(self)
306
if storage_kind == 'chunked':
310
if self._knit is not None:
311
# Not redundant with direct conversion above - that only handles
313
if storage_kind == 'chunked':
314
return self._knit.get_lines(self.key[0])
315
elif storage_kind == 'fulltext':
316
return self._knit.get_text(self.key[0])
317
raise errors.UnavailableRepresentation(self.key, storage_kind,
321
class LazyKnitContentFactory(ContentFactory):
322
"""A ContentFactory which can either generate full text or a wire form.
324
:seealso ContentFactory:
327
def __init__(self, key, parents, generator, first):
328
"""Create a LazyKnitContentFactory.
330
:param key: The key of the record.
331
:param parents: The parents of the record.
332
:param generator: A _ContentMapGenerator containing the record for this
334
:param first: Is this the first content object returned from generator?
335
if it is, its storage kind is knit-delta-closure, otherwise it is
336
knit-delta-closure-ref
339
self.parents = parents
341
self._generator = generator
342
self.storage_kind = "knit-delta-closure"
344
self.storage_kind = self.storage_kind + "-ref"
347
def get_bytes_as(self, storage_kind):
348
if storage_kind == self.storage_kind:
350
return self._generator._wire_bytes()
352
# all the keys etc are contained in the bytes returned in the
355
if storage_kind in ('chunked', 'fulltext'):
356
chunks = self._generator._get_one_work(self.key).text()
357
if storage_kind == 'chunked':
360
return ''.join(chunks)
361
raise errors.UnavailableRepresentation(self.key, storage_kind,
365
def knit_delta_closure_to_records(storage_kind, bytes, line_end):
366
"""Convert a network record to a iterator over stream records.
368
:param storage_kind: The storage kind of the record.
369
Must be 'knit-delta-closure'.
370
:param bytes: The bytes of the record on the network.
372
generator = _NetworkContentMapGenerator(bytes, line_end)
373
return generator.get_record_stream()
376
def knit_network_to_record(storage_kind, bytes, line_end):
377
"""Convert a network record to a record object.
379
:param storage_kind: The storage kind of the record.
380
:param bytes: The bytes of the record on the network.
383
line_end = bytes.find('\n', start)
384
key = tuple(bytes[start:line_end].split('\x00'))
386
line_end = bytes.find('\n', start)
387
parent_line = bytes[start:line_end]
388
if parent_line == 'None:':
392
[tuple(segment.split('\x00')) for segment in parent_line.split('\t')
395
noeol = bytes[start] == 'N'
396
if 'ft' in storage_kind:
399
method = 'line-delta'
400
build_details = (method, noeol)
402
raw_record = bytes[start:]
403
annotated = 'annotated' in storage_kind
404
return [KnitContentFactory(key, parents, build_details, None, raw_record,
405
annotated, network_bytes=bytes)]
408
134
class KnitContent(object):
409
"""Content of a knit version to which deltas can be applied.
411
This is always stored in memory as a list of lines with \n at the end,
412
plus a flag saying if the final ending is really there or not, because that
413
corresponds to the on-disk knit representation.
417
self._should_strip_eol = False
135
"""Content of a knit version to which deltas can be applied."""
138
"""Return a list of (origin, text) tuples."""
139
return list(self.annotate_iter())
419
141
def apply_delta(self, delta, new_version_id):
420
142
"""Apply delta to this object to become new_version_id."""
752
425
out.extend(lines)
755
def annotate(self, knit, key):
756
annotator = _KnitAnnotator(knit)
757
return annotator.annotate_flat(key)
761
def make_file_factory(annotated, mapper):
762
"""Create a factory for creating a file based KnitVersionedFiles.
764
This is only functional enough to run interface tests, it doesn't try to
765
provide a full pack environment.
767
:param annotated: knit annotations are wanted.
768
:param mapper: The mapper from keys to paths.
770
def factory(transport):
771
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
772
access = _KnitKeyAccess(transport, mapper)
773
return KnitVersionedFiles(index, access, annotated=annotated)
777
def make_pack_factory(graph, delta, keylength):
778
"""Create a factory for creating a pack based VersionedFiles.
780
This is only functional enough to run interface tests, it doesn't try to
781
provide a full pack environment.
783
:param graph: Store a graph.
784
:param delta: Delta compress contents.
785
:param keylength: How long should keys be.
787
def factory(transport):
788
parents = graph or delta
794
max_delta_chain = 200
797
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
798
key_elements=keylength)
799
stream = transport.open_write_stream('newpack')
800
writer = pack.ContainerWriter(stream.write)
802
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
803
deltas=delta, add_callback=graph_index.add_nodes)
804
access = pack_repo._DirectPackAccess({})
805
access.set_writer(writer, graph_index, (transport, 'newpack'))
806
result = KnitVersionedFiles(index, access,
807
max_delta_chain=max_delta_chain)
808
result.stream = stream
809
result.writer = writer
814
def cleanup_pack_knit(versioned_files):
815
versioned_files.stream.close()
816
versioned_files.writer.end()
819
def _get_total_build_size(self, keys, positions):
820
"""Determine the total bytes to build these keys.
822
(helper function because _KnitGraphIndex and _KndxIndex work the same, but
823
don't inherit from a common base.)
825
:param keys: Keys that we want to build
826
:param positions: dict of {key, (info, index_memo, comp_parent)} (such
827
as returned by _get_components_positions)
828
:return: Number of bytes to build those keys
830
all_build_index_memos = {}
834
for key in build_keys:
835
# This is mostly for the 'stacked' case
836
# Where we will be getting the data from a fallback
837
if key not in positions:
839
_, index_memo, compression_parent = positions[key]
840
all_build_index_memos[key] = index_memo
841
if compression_parent not in all_build_index_memos:
842
next_keys.add(compression_parent)
843
build_keys = next_keys
844
return sum([index_memo[2] for index_memo
845
in all_build_index_memos.itervalues()])
848
class KnitVersionedFiles(VersionedFiles):
849
"""Storage for many versioned files using knit compression.
851
Backend storage is managed by indices and data objects.
853
:ivar _index: A _KnitGraphIndex or similar that can describe the
854
parents, graph, compression and data location of entries in this
855
KnitVersionedFiles. Note that this is only the index for
856
*this* vfs; if there are fallbacks they must be queried separately.
859
def __init__(self, index, data_access, max_delta_chain=200,
860
annotated=False, reload_func=None):
861
"""Create a KnitVersionedFiles with index and data_access.
863
:param index: The index for the knit data.
864
:param data_access: The access object to store and retrieve knit
866
:param max_delta_chain: The maximum number of deltas to permit during
867
insertion. Set to 0 to prohibit the use of deltas.
868
:param annotated: Set to True to cause annotations to be calculated and
869
stored during insertion.
870
:param reload_func: An function that can be called if we think we need
871
to reload the pack listing and try again. See
872
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
428
def annotate_iter(self, knit, version_id):
429
return annotate_knit(knit, version_id)
432
def make_empty_knit(transport, relpath):
433
"""Construct a empty knit at the specified location."""
434
k = KnitVersionedFile(transport, relpath, 'w', KnitPlainFactory)
437
class KnitVersionedFile(VersionedFile):
438
"""Weave-like structure with faster random access.
440
A knit stores a number of texts and a summary of the relationships
441
between them. Texts are identified by a string version-id. Texts
442
are normally stored and retrieved as a series of lines, but can
443
also be passed as single strings.
445
Lines are stored with the trailing newline (if any) included, to
446
avoid special cases for files with no final newline. Lines are
447
composed of 8-bit characters, not unicode. The combination of
448
these approaches should mean any 'binary' file can be safely
449
stored and retrieved.
452
def __init__(self, relpath, transport, file_mode=None, access_mode=None,
453
factory=None, delta=True, create=False, create_parent_dir=False,
454
delay_create=False, dir_mode=None, index=None, access_method=None):
455
"""Construct a knit at location specified by relpath.
457
:param create: If not True, only open an existing knit.
458
:param create_parent_dir: If True, create the parent directory if
459
creating the file fails. (This is used for stores with
460
hash-prefixes that may not exist yet)
461
:param delay_create: The calling code is aware that the knit won't
462
actually be created until the first data is stored.
463
:param index: An index to use for the knit.
875
self._access = data_access
876
self._max_delta_chain = max_delta_chain
878
self._factory = KnitAnnotateFactory()
880
self._factory = KnitPlainFactory()
881
self._immediate_fallback_vfs = []
882
self._reload_func = reload_func
465
if access_mode is None:
467
super(KnitVersionedFile, self).__init__(access_mode)
468
assert access_mode in ('r', 'w'), "invalid mode specified %r" % access_mode
469
self.transport = transport
470
self.filename = relpath
471
self.factory = factory or KnitAnnotateFactory()
472
self.writable = (access_mode == 'w')
475
self._max_delta_chain = 200
478
self._index = _KnitIndex(transport, relpath + INDEX_SUFFIX,
479
access_mode, create=create, file_mode=file_mode,
480
create_parent_dir=create_parent_dir, delay_create=delay_create,
484
if access_method is None:
485
_access = _KnitAccess(transport, relpath + DATA_SUFFIX, file_mode, dir_mode,
486
((create and not len(self)) and delay_create), create_parent_dir)
488
_access = access_method
489
if create and not len(self) and not delay_create:
491
self._data = _KnitData(_access)
884
493
def __repr__(self):
885
return "%s(%r, %r)" % (
886
self.__class__.__name__,
890
def add_fallback_versioned_files(self, a_versioned_files):
891
"""Add a source of texts for texts not present in this knit.
893
:param a_versioned_files: A VersionedFiles object.
895
self._immediate_fallback_vfs.append(a_versioned_files)
897
def add_lines(self, key, parents, lines, parent_texts=None,
898
left_matching_blocks=None, nostore_sha=None, random_id=False,
900
"""See VersionedFiles.add_lines()."""
901
self._index._check_write_ok()
902
self._check_add(key, lines, random_id, check_content)
904
# The caller might pass None if there is no graph data, but kndx
905
# indexes can't directly store that, so we give them
906
# an empty tuple instead.
908
line_bytes = ''.join(lines)
909
return self._add(key, lines, parents,
910
parent_texts, left_matching_blocks, nostore_sha, random_id,
911
line_bytes=line_bytes)
913
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
914
"""See VersionedFiles._add_text()."""
915
self._index._check_write_ok()
916
self._check_add(key, None, random_id, check_content=False)
917
if text.__class__ is not str:
918
raise errors.BzrBadParameterUnicode("text")
920
# The caller might pass None if there is no graph data, but kndx
921
# indexes can't directly store that, so we give them
922
# an empty tuple instead.
924
return self._add(key, None, parents,
925
None, None, nostore_sha, random_id,
928
def _add(self, key, lines, parents, parent_texts,
929
left_matching_blocks, nostore_sha, random_id,
931
"""Add a set of lines on top of version specified by parents.
933
Any versions not present will be converted into ghosts.
935
:param lines: A list of strings where each one is a single line (has a
936
single newline at the end of the string) This is now optional
937
(callers can pass None). It is left in its location for backwards
938
compatibility. It should ''.join(lines) must == line_bytes
939
:param line_bytes: A single string containing the content
941
We pass both lines and line_bytes because different routes bring the
942
values to this function. And for memory efficiency, we don't want to
943
have to split/join on-demand.
945
# first thing, if the content is something we don't need to store, find
947
digest = sha_string(line_bytes)
948
if nostore_sha == digest:
949
raise errors.ExistingContent
952
if parent_texts is None:
954
# Do a single query to ascertain parent presence; we only compress
955
# against parents in the same kvf.
956
present_parent_map = self._index.get_parent_map(parents)
957
for parent in parents:
958
if parent in present_parent_map:
959
present_parents.append(parent)
961
# Currently we can only compress against the left most present parent.
962
if (len(present_parents) == 0 or
963
present_parents[0] != parents[0]):
966
# To speed the extract of texts the delta chain is limited
967
# to a fixed number of deltas. This should minimize both
968
# I/O and the time spend applying deltas.
969
delta = self._check_should_delta(present_parents[0])
971
text_length = len(line_bytes)
974
# Note: line_bytes is not modified to add a newline, that is tracked
975
# via the no_eol flag. 'lines' *is* modified, because that is the
976
# general values needed by the Content code.
977
if line_bytes and line_bytes[-1] != '\n':
978
options.append('no-eol')
980
# Copy the existing list, or create a new one
982
lines = osutils.split_lines(line_bytes)
985
# Replace the last line with one that ends in a final newline
986
lines[-1] = lines[-1] + '\n'
988
lines = osutils.split_lines(line_bytes)
990
for element in key[:-1]:
991
if type(element) is not str:
992
raise TypeError("key contains non-strings: %r" % (key,))
994
key = key[:-1] + ('sha1:' + digest,)
995
elif type(key[-1]) is not str:
996
raise TypeError("key contains non-strings: %r" % (key,))
997
# Knit hunks are still last-element only
999
content = self._factory.make(lines, version_id)
1001
# Hint to the content object that its text() call should strip the
1003
content._should_strip_eol = True
1004
if delta or (self._factory.annotated and len(present_parents) > 0):
1005
# Merge annotations from parent texts if needed.
1006
delta_hunks = self._merge_annotations(content, present_parents,
1007
parent_texts, delta, self._factory.annotated,
1008
left_matching_blocks)
1011
options.append('line-delta')
1012
store_lines = self._factory.lower_line_delta(delta_hunks)
1013
size, bytes = self._record_to_data(key, digest,
1016
options.append('fulltext')
1017
# isinstance is slower and we have no hierarchy.
1018
if self._factory.__class__ is KnitPlainFactory:
1019
# Use the already joined bytes saving iteration time in
1021
dense_lines = [line_bytes]
1023
dense_lines.append('\n')
1024
size, bytes = self._record_to_data(key, digest,
1027
# get mixed annotation + content and feed it into the
1029
store_lines = self._factory.lower_fulltext(content)
1030
size, bytes = self._record_to_data(key, digest,
1033
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
1034
self._index.add_records(
1035
((key, options, access_memo, parents),),
1036
random_id=random_id)
1037
return digest, text_length, content
1039
def annotate(self, key):
1040
"""See VersionedFiles.annotate."""
1041
return self._factory.annotate(self, key)
1043
def get_annotator(self):
1044
return _KnitAnnotator(self)
1046
def check(self, progress_bar=None, keys=None):
1047
"""See VersionedFiles.check()."""
1049
return self._logical_check()
1051
# At the moment, check does not extra work over get_record_stream
1052
return self.get_record_stream(keys, 'unordered', True)
1054
def _logical_check(self):
1055
# This doesn't actually test extraction of everything, but that will
1056
# impact 'bzr check' substantially, and needs to be integrated with
1057
# care. However, it does check for the obvious problem of a delta with
1059
keys = self._index.keys()
1060
parent_map = self.get_parent_map(keys)
1062
if self._index.get_method(key) != 'fulltext':
1063
compression_parent = parent_map[key][0]
1064
if compression_parent not in parent_map:
1065
raise errors.KnitCorrupt(self,
1066
"Missing basis parent %s for %s" % (
1067
compression_parent, key))
1068
for fallback_vfs in self._immediate_fallback_vfs:
1069
fallback_vfs.check()
1071
def _check_add(self, key, lines, random_id, check_content):
1072
"""check that version_id and lines are safe to add."""
1073
version_id = key[-1]
1074
if version_id is not None:
1075
if contains_whitespace(version_id):
1076
raise InvalidRevisionId(version_id, self)
1077
self.check_not_reserved_id(version_id)
1078
# TODO: If random_id==False and the key is already present, we should
1079
# probably check that the existing content is identical to what is
1080
# being inserted, and otherwise raise an exception. This would make
1081
# the bundle code simpler.
1083
self._check_lines_not_unicode(lines)
1084
self._check_lines_are_lines(lines)
1086
def _check_header(self, key, line):
1087
rec = self._split_header(line)
1088
self._check_header_version(rec, key[-1])
1091
def _check_header_version(self, rec, version_id):
1092
"""Checks the header version on original format knit records.
1094
These have the last component of the key embedded in the record.
1096
if rec[1] != version_id:
1097
raise KnitCorrupt(self,
1098
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
1100
def _check_should_delta(self, parent):
494
return '%s(%s)' % (self.__class__.__name__,
495
self.transport.abspath(self.filename))
497
def _check_should_delta(self, first_parents):
1101
498
"""Iterate back through the parent listing, looking for a fulltext.
1103
500
This is used when we want to decide whether to add a delta or a new
1112
509
fulltext_size = None
510
delta_parents = first_parents
1113
511
for count in xrange(self._max_delta_chain):
1115
# Note that this only looks in the index of this particular
1116
# KnitVersionedFiles, not in the fallbacks. This ensures that
1117
# we won't store a delta spanning physical repository
1119
build_details = self._index.get_build_details([parent])
1120
parent_details = build_details[parent]
1121
except (RevisionNotPresent, KeyError), e:
1122
# Some basis is not locally present: always fulltext
1124
index_memo, compression_parent, _, _ = parent_details
1125
_, _, size = index_memo
1126
if compression_parent is None:
512
parent = delta_parents[0]
513
method = self._index.get_method(parent)
514
index, pos, size = self._index.get_position(parent)
515
if method == 'fulltext':
1127
516
fulltext_size = size
1129
518
delta_size += size
1130
# We don't explicitly check for presence because this is in an
1131
# inner loop, and if it's missing it'll fail anyhow.
1132
parent = compression_parent
519
delta_parents = self._index.get_parents(parent)
1134
521
# We couldn't find a fulltext, so we must create a new one
1136
# Simple heuristic - if the total I/O wold be greater as a delta than
1137
# the originally installed fulltext, we create a new fulltext.
1138
524
return fulltext_size > delta_size
1140
def _build_details_to_components(self, build_details):
1141
"""Convert a build_details tuple to a position tuple."""
1142
# record_details, access_memo, compression_parent
1143
return build_details[3], build_details[0], build_details[1]
1145
def _get_components_positions(self, keys, allow_missing=False):
1146
"""Produce a map of position data for the components of keys.
1148
This data is intended to be used for retrieving the knit records.
1150
A dict of key to (record_details, index_memo, next, parents) is
1152
method is the way referenced data should be applied.
1153
index_memo is the handle to pass to the data access to actually get the
1155
next is the build-parent of the version, or None for fulltexts.
1156
parents is the version_ids of the parents of this version
1158
:param allow_missing: If True do not raise an error on a missing component,
1162
pending_components = keys
1163
while pending_components:
1164
build_details = self._index.get_build_details(pending_components)
1165
current_components = set(pending_components)
1166
pending_components = set()
1167
for key, details in build_details.iteritems():
1168
(index_memo, compression_parent, parents,
1169
record_details) = details
1170
method = record_details[0]
1171
if compression_parent is not None:
1172
pending_components.add(compression_parent)
1173
component_data[key] = self._build_details_to_components(details)
1174
missing = current_components.difference(build_details)
1175
if missing and not allow_missing:
1176
raise errors.RevisionNotPresent(missing.pop(), self)
1177
return component_data
1179
def _get_content(self, key, parent_texts={}):
1180
"""Returns a content object that makes up the specified
1182
cached_version = parent_texts.get(key, None)
1183
if cached_version is not None:
1184
# Ensure the cache dict is valid.
1185
if not self.get_parent_map([key]):
1186
raise RevisionNotPresent(key, self)
1187
return cached_version
1188
generator = _VFContentMapGenerator(self, [key])
1189
return generator._get_content(key)
1191
def get_known_graph_ancestry(self, keys):
1192
"""Get a KnownGraph instance with the ancestry of keys."""
1193
parent_map, missing_keys = self._index.find_ancestry(keys)
1194
for fallback in self._transitive_fallbacks():
1195
if not missing_keys:
1197
(f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
1199
parent_map.update(f_parent_map)
1200
missing_keys = f_missing_keys
1201
kg = _mod_graph.KnownGraph(parent_map)
1204
def get_parent_map(self, keys):
1205
"""Get a map of the graph parents of keys.
1207
:param keys: The keys to look up parents for.
1208
:return: A mapping from keys to parents. Absent keys are absent from
1211
return self._get_parent_map_with_sources(keys)[0]
1213
def _get_parent_map_with_sources(self, keys):
1214
"""Get a map of the parents of keys.
1216
:param keys: The keys to look up parents for.
1217
:return: A tuple. The first element is a mapping from keys to parents.
1218
Absent keys are absent from the mapping. The second element is a
1219
list with the locations each key was found in. The first element
1220
is the in-this-knit parents, the second the first fallback source,
1224
sources = [self._index] + self._immediate_fallback_vfs
1227
for source in sources:
1230
new_result = source.get_parent_map(missing)
1231
source_results.append(new_result)
1232
result.update(new_result)
1233
missing.difference_update(set(new_result))
1234
return result, source_results
1236
def _get_record_map(self, keys, allow_missing=False):
1237
"""Produce a dictionary of knit records.
1239
:return: {key:(record, record_details, digest, next)}
1241
data returned from read_records (a KnitContentobject)
1243
opaque information to pass to parse_record
1245
SHA1 digest of the full text after all steps are done
1247
build-parent of the version, i.e. the leftmost ancestor.
1248
Will be None if the record is not a delta.
1249
:param keys: The keys to build a map for
1250
:param allow_missing: If some records are missing, rather than
1251
error, just return the data that could be generated.
1253
raw_map = self._get_record_map_unparsed(keys,
1254
allow_missing=allow_missing)
1255
return self._raw_map_to_record_map(raw_map)
1257
def _raw_map_to_record_map(self, raw_map):
1258
"""Parse the contents of _get_record_map_unparsed.
1260
:return: see _get_record_map.
1264
data, record_details, next = raw_map[key]
1265
content, digest = self._parse_record(key[-1], data)
1266
result[key] = content, record_details, digest, next
1269
def _get_record_map_unparsed(self, keys, allow_missing=False):
1270
"""Get the raw data for reconstructing keys without parsing it.
1272
:return: A dict suitable for parsing via _raw_map_to_record_map.
1273
key-> raw_bytes, (method, noeol), compression_parent
1275
# This retries the whole request if anything fails. Potentially we
1276
# could be a bit more selective. We could track the keys whose records
1277
# we have successfully found, and then only request the new records
1278
# from there. However, _get_components_positions grabs the whole build
1279
# chain, which means we'll likely try to grab the same records again
1280
# anyway. Also, can the build chains change as part of a pack
1281
# operation? We wouldn't want to end up with a broken chain.
1284
position_map = self._get_components_positions(keys,
1285
allow_missing=allow_missing)
1286
# key = component_id, r = record_details, i_m = index_memo,
1288
records = [(key, i_m) for key, (r, i_m, n)
1289
in position_map.iteritems()]
1290
# Sort by the index memo, so that we request records from the
1291
# same pack file together, and in forward-sorted order
1292
records.sort(key=operator.itemgetter(1))
1294
for key, data in self._read_records_iter_unchecked(records):
1295
(record_details, index_memo, next) = position_map[key]
1296
raw_record_map[key] = data, record_details, next
1297
return raw_record_map
1298
except errors.RetryWithNewPacks, e:
1299
self._access.reload_or_raise(e)
1302
def _split_by_prefix(cls, keys):
1303
"""For the given keys, split them up based on their prefix.
1305
To keep memory pressure somewhat under control, split the
1306
requests back into per-file-id requests, otherwise "bzr co"
1307
extracts the full tree into memory before writing it to disk.
1308
This should be revisited if _get_content_maps() can ever cross
1311
The keys for a given file_id are kept in the same relative order.
1312
Ordering between file_ids is not, though prefix_order will return the
1313
order that the key was first seen.
1315
:param keys: An iterable of key tuples
1316
:return: (split_map, prefix_order)
1317
split_map A dictionary mapping prefix => keys
1318
prefix_order The order that we saw the various prefixes
1320
split_by_prefix = {}
1328
if prefix in split_by_prefix:
1329
split_by_prefix[prefix].append(key)
1331
split_by_prefix[prefix] = [key]
1332
prefix_order.append(prefix)
1333
return split_by_prefix, prefix_order
1335
def _group_keys_for_io(self, keys, non_local_keys, positions,
1336
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
1337
"""For the given keys, group them into 'best-sized' requests.
1339
The idea is to avoid making 1 request per file, but to never try to
1340
unpack an entire 1.5GB source tree in a single pass. Also when
1341
possible, we should try to group requests to the same pack file
1344
:return: list of (keys, non_local) tuples that indicate what keys
1345
should be fetched next.
1347
# TODO: Ideally we would group on 2 factors. We want to extract texts
1348
# from the same pack file together, and we want to extract all
1349
# the texts for a given build-chain together. Ultimately it
1350
# probably needs a better global view.
1351
total_keys = len(keys)
1352
prefix_split_keys, prefix_order = self._split_by_prefix(keys)
1353
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
1355
cur_non_local = set()
1359
for prefix in prefix_order:
1360
keys = prefix_split_keys[prefix]
1361
non_local = prefix_split_non_local_keys.get(prefix, [])
1363
this_size = self._index._get_total_build_size(keys, positions)
1364
cur_size += this_size
1365
cur_keys.extend(keys)
1366
cur_non_local.update(non_local)
1367
if cur_size > _min_buffer_size:
1368
result.append((cur_keys, cur_non_local))
1369
sizes.append(cur_size)
1371
cur_non_local = set()
1374
result.append((cur_keys, cur_non_local))
1375
sizes.append(cur_size)
1378
def get_record_stream(self, keys, ordering, include_delta_closure):
1379
"""Get a stream of records for keys.
1381
:param keys: The keys to include.
1382
:param ordering: Either 'unordered' or 'topological'. A topologically
1383
sorted stream has compression parents strictly before their
1385
:param include_delta_closure: If True then the closure across any
1386
compression parents will be included (in the opaque data).
1387
:return: An iterator of ContentFactory objects, each of which is only
1388
valid until the iterator is advanced.
1390
# keys might be a generator
526
def _add_raw_records(self, records, data):
527
"""Add all the records 'records' with data pre-joined in 'data'.
529
:param records: A list of tuples(version_id, options, parents, size).
530
:param data: The data for the records. When it is written, the records
531
are adjusted to have pos pointing into data by the sum of
532
the preceding records sizes.
535
raw_record_sizes = [record[3] for record in records]
536
positions = self._data.add_raw_records(raw_record_sizes, data)
539
for (version_id, options, parents, size), access_memo in zip(
541
index_entries.append((version_id, options, access_memo, parents))
542
if self._data._do_cache:
543
self._data._cache[version_id] = data[offset:offset+size]
545
self._index.add_versions(index_entries)
547
def enable_cache(self):
548
"""Start caching data for this knit"""
549
self._data.enable_cache()
551
def clear_cache(self):
552
"""Clear the data cache only."""
553
self._data.clear_cache()
555
def copy_to(self, name, transport):
556
"""See VersionedFile.copy_to()."""
557
# copy the current index to a temp index to avoid racing with local
559
transport.put_file_non_atomic(name + INDEX_SUFFIX + '.tmp',
560
self.transport.get(self._index._filename))
562
f = self._data._open_file()
564
transport.put_file(name + DATA_SUFFIX, f)
567
# move the copied index into place
568
transport.move(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
570
def create_empty(self, name, transport, mode=None):
571
return KnitVersionedFile(name, transport, factory=self.factory,
572
delta=self.delta, create=True)
574
def get_data_stream(self, required_versions):
575
"""Get a data stream for the specified versions.
577
Versions may be returned in any order, not necessarily the order
578
specified. They are returned in a partial order by compression
579
parent, so that the deltas can be applied as the data stream is
580
inserted; however note that compression parents will not be sent
581
unless they were specifically requested, as the client may already
584
:param required_versions: The exact set of versions to be extracted.
585
Unlike some other knit methods, this is not used to generate a
586
transitive closure, rather it is used precisely as given.
588
:returns: format_signature, list of (version, options, length, parents),
591
required_version_set = frozenset(required_versions)
593
# list of revisions that can just be sent without waiting for their
596
# map from revision to the children based on it
598
# first, read all relevant index data, enough to sort into the right
600
for version_id in required_versions:
601
options = self._index.get_options(version_id)
602
parents = self._index.get_parents_with_ghosts(version_id)
603
index_memo = self._index.get_position(version_id)
604
version_index[version_id] = (index_memo, options, parents)
605
if ('line-delta' in options
606
and parents[0] in required_version_set):
607
# must wait until the parent has been sent
608
deferred.setdefault(parents[0], []). \
611
# either a fulltext, or a delta whose parent the client did
612
# not ask for and presumably already has
613
ready_to_send.append(version_id)
614
# build a list of results to return, plus instructions for data to
616
copy_queue_records = []
617
temp_version_list = []
619
# XXX: pushing and popping lists may be a bit inefficient
620
version_id = ready_to_send.pop(0)
621
(index_memo, options, parents) = version_index[version_id]
622
copy_queue_records.append((version_id, index_memo))
623
none, data_pos, data_size = index_memo
624
temp_version_list.append((version_id, options, data_size,
626
if version_id in deferred:
627
# now we can send all the children of this revision - we could
628
# put them in anywhere, but we hope that sending them soon
629
# after the fulltext will give good locality in the receiver
630
ready_to_send[:0] = deferred.pop(version_id)
631
assert len(deferred) == 0, \
632
"Still have compressed child versions waiting to be sent"
633
# XXX: The stream format is such that we cannot stream it - we have to
634
# know the length of all the data a-priori.
636
result_version_list = []
637
for (version_id, raw_data), \
638
(version_id2, options, _, parents) in \
639
izip(self._data.read_records_iter_raw(copy_queue_records),
641
assert version_id == version_id2, \
642
'logic error, inconsistent results'
643
raw_datum.append(raw_data)
644
result_version_list.append(
645
(version_id, options, len(raw_data), parents))
646
# provide a callback to get data incrementally.
647
pseudo_file = StringIO(''.join(raw_datum))
650
return pseudo_file.read()
652
return pseudo_file.read(length)
653
return (self.get_format_signature(), result_version_list, read)
655
def _extract_blocks(self, version_id, source, target):
656
if self._index.get_method(version_id) != 'line-delta':
658
parent, sha1, noeol, delta = self.get_delta(version_id)
659
return KnitContent.get_line_delta_blocks(delta, source, target)
661
def get_delta(self, version_id):
662
"""Get a delta for constructing version from some other version."""
663
self.check_not_reserved_id(version_id)
664
parents = self.get_parents(version_id)
669
index_memo = self._index.get_position(version_id)
670
data, sha1 = self._data.read_records(((version_id, index_memo),))[version_id]
671
noeol = 'no-eol' in self._index.get_options(version_id)
672
if 'fulltext' == self._index.get_method(version_id):
673
new_content = self.factory.parse_fulltext(data, version_id)
674
if parent is not None:
675
reference_content = self._get_content(parent)
676
old_texts = reference_content.text()
679
new_texts = new_content.text()
680
delta_seq = patiencediff.PatienceSequenceMatcher(None, old_texts,
682
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
684
delta = self.factory.parse_line_delta(data, version_id)
685
return parent, sha1, noeol, delta
687
def get_format_signature(self):
688
"""See VersionedFile.get_format_signature()."""
689
if self.factory.annotated:
690
annotated_part = "annotated"
692
annotated_part = "plain"
693
return "knit-%s" % (annotated_part,)
695
def get_graph_with_ghosts(self):
696
"""See VersionedFile.get_graph_with_ghosts()."""
697
graph_items = self._index.get_graph()
698
return dict(graph_items)
700
def get_sha1(self, version_id):
701
return self.get_sha1s([version_id])[0]
703
def get_sha1s(self, version_ids):
704
"""See VersionedFile.get_sha1()."""
705
record_map = self._get_record_map(version_ids)
706
# record entry 2 is the 'digest'.
707
return [record_map[v][2] for v in version_ids]
711
"""See VersionedFile.get_suffixes()."""
712
return [DATA_SUFFIX, INDEX_SUFFIX]
714
def has_ghost(self, version_id):
715
"""True if there is a ghost reference in the file to version_id."""
717
if self.has_version(version_id):
719
# optimisable if needed by memoising the _ghosts set.
720
items = self._index.get_graph()
721
for node, parents in items:
722
for parent in parents:
723
if parent not in self._index._cache:
724
if parent == version_id:
728
def insert_data_stream(self, (format, data_list, reader_callable)):
729
"""Insert knit records from a data stream into this knit.
731
If a version in the stream is already present in this knit, it will not
732
be inserted a second time. It will be checked for consistency with the
733
stored version however, and may cause a KnitCorrupt error to be raised
734
if the data in the stream disagrees with the already stored data.
736
:seealso: get_data_stream
738
if format != self.get_format_signature():
739
trace.mutter('incompatible format signature inserting to %r', self)
740
source = self._knit_from_datastream(
741
(format, data_list, reader_callable))
1394
if not self._index.has_graph:
1395
# Cannot sort when no graph has been stored.
1396
ordering = 'unordered'
1398
remaining_keys = keys
1401
keys = set(remaining_keys)
1402
for content_factory in self._get_remaining_record_stream(keys,
1403
ordering, include_delta_closure):
1404
remaining_keys.discard(content_factory.key)
1405
yield content_factory
1407
except errors.RetryWithNewPacks, e:
1408
self._access.reload_or_raise(e)
1410
def _get_remaining_record_stream(self, keys, ordering,
1411
include_delta_closure):
1412
"""This function is the 'retry' portion for get_record_stream."""
1413
if include_delta_closure:
1414
positions = self._get_components_positions(keys, allow_missing=True)
1416
build_details = self._index.get_build_details(keys)
1418
# (record_details, access_memo, compression_parent_key)
1419
positions = dict((key, self._build_details_to_components(details))
1420
for key, details in build_details.iteritems())
1421
absent_keys = keys.difference(set(positions))
1422
# There may be more absent keys : if we're missing the basis component
1423
# and are trying to include the delta closure.
1424
# XXX: We should not ever need to examine remote sources because we do
1425
# not permit deltas across versioned files boundaries.
1426
if include_delta_closure:
1427
needed_from_fallback = set()
1428
# Build up reconstructable_keys dict. key:True in this dict means
1429
# the key can be reconstructed.
1430
reconstructable_keys = {}
1434
chain = [key, positions[key][2]]
1436
needed_from_fallback.add(key)
1439
while chain[-1] is not None:
1440
if chain[-1] in reconstructable_keys:
1441
result = reconstructable_keys[chain[-1]]
1445
chain.append(positions[chain[-1]][2])
1447
# missing basis component
1448
needed_from_fallback.add(chain[-1])
1451
for chain_key in chain[:-1]:
1452
reconstructable_keys[chain_key] = result
1454
needed_from_fallback.add(key)
1455
# Double index lookups here : need a unified api ?
1456
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1457
if ordering in ('topological', 'groupcompress'):
1458
if ordering == 'topological':
1459
# Global topological sort
1460
present_keys = tsort.topo_sort(global_map)
1462
present_keys = sort_groupcompress(global_map)
1463
# Now group by source:
1465
current_source = None
1466
for key in present_keys:
1467
for parent_map in parent_maps:
1468
if key in parent_map:
1469
key_source = parent_map
1471
if current_source is not key_source:
1472
source_keys.append((key_source, []))
1473
current_source = key_source
1474
source_keys[-1][1].append(key)
1476
if ordering != 'unordered':
1477
raise AssertionError('valid values for ordering are:'
1478
' "unordered", "groupcompress" or "topological" not: %r'
1480
# Just group by source; remote sources first.
1483
for parent_map in reversed(parent_maps):
1484
source_keys.append((parent_map, []))
1485
for key in parent_map:
1486
present_keys.append(key)
1487
source_keys[-1][1].append(key)
1488
# We have been requested to return these records in an order that
1489
# suits us. So we ask the index to give us an optimally sorted
1491
for source, sub_keys in source_keys:
1492
if source is parent_maps[0]:
1493
# Only sort the keys for this VF
1494
self._index._sort_keys_by_io(sub_keys, positions)
1495
absent_keys = keys - set(global_map)
1496
for key in absent_keys:
1497
yield AbsentContentFactory(key)
1498
# restrict our view to the keys we can answer.
1499
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1500
# XXX: At that point we need to consider the impact of double reads by
1501
# utilising components multiple times.
1502
if include_delta_closure:
1503
# XXX: get_content_maps performs its own index queries; allow state
1505
non_local_keys = needed_from_fallback - absent_keys
1506
for keys, non_local_keys in self._group_keys_for_io(present_keys,
1509
generator = _VFContentMapGenerator(self, keys, non_local_keys,
1512
for record in generator.get_record_stream():
1515
for source, keys in source_keys:
1516
if source is parent_maps[0]:
1517
# this KnitVersionedFiles
1518
records = [(key, positions[key][1]) for key in keys]
1519
for key, raw_data in self._read_records_iter_unchecked(records):
1520
(record_details, index_memo, _) = positions[key]
1521
yield KnitContentFactory(key, global_map[key],
1522
record_details, None, raw_data, self._factory.annotated, None)
1524
vf = self._immediate_fallback_vfs[parent_maps.index(source) - 1]
1525
for record in vf.get_record_stream(keys, ordering,
1526
include_delta_closure):
1529
def get_sha1s(self, keys):
1530
"""See VersionedFiles.get_sha1s()."""
1532
record_map = self._get_record_map(missing, allow_missing=True)
1534
for key, details in record_map.iteritems():
1535
if key not in missing:
1537
# record entry 2 is the 'digest'.
1538
result[key] = details[2]
1539
missing.difference_update(set(result))
1540
for source in self._immediate_fallback_vfs:
1543
new_result = source.get_sha1s(missing)
1544
result.update(new_result)
1545
missing.difference_update(set(new_result))
1548
def insert_record_stream(self, stream):
1549
"""Insert a record stream into this container.
1551
:param stream: A stream of records to insert.
1553
:seealso VersionedFiles.get_record_stream:
1555
def get_adapter(adapter_key):
1557
return adapters[adapter_key]
1559
adapter_factory = adapter_registry.get(adapter_key)
1560
adapter = adapter_factory(self)
1561
adapters[adapter_key] = adapter
1564
if self._factory.annotated:
1565
# self is annotated, we need annotated knits to use directly.
1566
annotated = "annotated-"
1569
# self is not annotated, but we can strip annotations cheaply.
1571
convertibles = set(["knit-annotated-ft-gz"])
1572
if self._max_delta_chain:
1573
delta_types.add("knit-annotated-delta-gz")
1574
convertibles.add("knit-annotated-delta-gz")
1575
# The set of types we can cheaply adapt without needing basis texts.
1576
native_types = set()
1577
if self._max_delta_chain:
1578
native_types.add("knit-%sdelta-gz" % annotated)
1579
delta_types.add("knit-%sdelta-gz" % annotated)
1580
native_types.add("knit-%sft-gz" % annotated)
1581
knit_types = native_types.union(convertibles)
1583
# Buffer all index entries that we can't add immediately because their
1584
# basis parent is missing. We don't buffer all because generating
1585
# annotations may require access to some of the new records. However we
1586
# can't generate annotations from new deltas until their basis parent
1587
# is present anyway, so we get away with not needing an index that
1588
# includes the new keys.
1590
# See <http://launchpad.net/bugs/300177> about ordering of compression
1591
# parents in the records - to be conservative, we insist that all
1592
# parents must be present to avoid expanding to a fulltext.
1594
# key = basis_parent, value = index entry to add
1595
buffered_index_entries = {}
1596
for record in stream:
1597
kind = record.storage_kind
1598
if kind.startswith('knit-') and kind.endswith('-gz'):
1599
# Check that the ID in the header of the raw knit bytes matches
1600
# the record metadata.
1601
raw_data = record._raw_record
1602
df, rec = self._parse_record_header(record.key, raw_data)
1605
parents = record.parents
1606
if record.storage_kind in delta_types:
1607
# TODO: eventually the record itself should track
1608
# compression_parent
1609
compression_parent = parents[0]
1611
compression_parent = None
1612
# Raise an error when a record is missing.
1613
if record.storage_kind == 'absent':
1614
raise RevisionNotPresent([record.key], self)
1615
elif ((record.storage_kind in knit_types)
1616
and (compression_parent is None
1617
or not self._immediate_fallback_vfs
1618
or self._index.has_key(compression_parent)
1619
or not self.has_key(compression_parent))):
1620
# we can insert the knit record literally if either it has no
1621
# compression parent OR we already have its basis in this kvf
1622
# OR the basis is not present even in the fallbacks. In the
1623
# last case it will either turn up later in the stream and all
1624
# will be well, or it won't turn up at all and we'll raise an
1627
# TODO: self.has_key is somewhat redundant with
1628
# self._index.has_key; we really want something that directly
1629
# asks if it's only present in the fallbacks. -- mbp 20081119
1630
if record.storage_kind not in native_types:
1632
adapter_key = (record.storage_kind, "knit-delta-gz")
1633
adapter = get_adapter(adapter_key)
1635
adapter_key = (record.storage_kind, "knit-ft-gz")
1636
adapter = get_adapter(adapter_key)
1637
bytes = adapter.get_bytes(record)
1639
# It's a knit record, it has a _raw_record field (even if
1640
# it was reconstituted from a network stream).
1641
bytes = record._raw_record
1642
options = [record._build_details[0]]
1643
if record._build_details[1]:
1644
options.append('no-eol')
1645
# Just blat it across.
1646
# Note: This does end up adding data on duplicate keys. As
1647
# modern repositories use atomic insertions this should not
1648
# lead to excessive growth in the event of interrupted fetches.
1649
# 'knit' repositories may suffer excessive growth, but as a
1650
# deprecated format this is tolerable. It can be fixed if
1651
# needed by in the kndx index support raising on a duplicate
1652
# add with identical parents and options.
1653
access_memo = self._access.add_raw_records(
1654
[(record.key, len(bytes))], bytes)[0]
1655
index_entry = (record.key, options, access_memo, parents)
1656
if 'fulltext' not in options:
1657
# Not a fulltext, so we need to make sure the compression
1658
# parent will also be present.
1659
# Note that pack backed knits don't need to buffer here
1660
# because they buffer all writes to the transaction level,
1661
# but we don't expose that difference at the index level. If
1662
# the query here has sufficient cost to show up in
1663
# profiling we should do that.
745
for version_id, options, length, parents in data_list:
746
if self.has_version(version_id):
747
# First check: the list of parents.
748
my_parents = self.get_parents_with_ghosts(version_id)
749
if my_parents != parents:
750
# XXX: KnitCorrupt is not quite the right exception here.
753
'parents list %r from data stream does not match '
754
'already recorded parents %r for %s'
755
% (parents, my_parents, version_id))
757
# Also check the SHA-1 of the fulltext this content will
759
raw_data = reader_callable(length)
760
my_fulltext_sha1 = self.get_sha1(version_id)
761
df, rec = self._data._parse_record_header(version_id, raw_data)
762
stream_fulltext_sha1 = rec[3]
763
if my_fulltext_sha1 != stream_fulltext_sha1:
764
# Actually, we don't know if it's this knit that's corrupt,
765
# or the data stream we're trying to insert.
767
self.filename, 'sha-1 does not match %s' % version_id)
769
if 'line-delta' in options:
770
# Make sure that this knit record is actually useful: a
771
# line-delta is no use unless we have its parent.
772
# Fetching from a broken repository with this problem
773
# shouldn't break the target repository.
1665
# They're required to be physically in this
1666
# KnitVersionedFiles, not in a fallback.
1667
if not self._index.has_key(compression_parent):
1668
pending = buffered_index_entries.setdefault(
1669
compression_parent, [])
1670
pending.append(index_entry)
1673
self._index.add_records([index_entry])
1674
elif record.storage_kind == 'chunked':
1675
self.add_lines(record.key, parents,
1676
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1678
# Not suitable for direct insertion as a
1679
# delta, either because it's not the right format, or this
1680
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1681
# 0) or because it depends on a base only present in the
1683
self._access.flush()
1685
# Try getting a fulltext directly from the record.
1686
bytes = record.get_bytes_as('fulltext')
1687
except errors.UnavailableRepresentation:
1688
adapter_key = record.storage_kind, 'fulltext'
1689
adapter = get_adapter(adapter_key)
1690
bytes = adapter.get_bytes(record)
1691
lines = split_lines(bytes)
1693
self.add_lines(record.key, parents, lines)
1694
except errors.RevisionAlreadyPresent:
1696
# Add any records whose basis parent is now available.
1698
added_keys = [record.key]
1700
key = added_keys.pop(0)
1701
if key in buffered_index_entries:
1702
index_entries = buffered_index_entries[key]
1703
self._index.add_records(index_entries)
1705
[index_entry[0] for index_entry in index_entries])
1706
del buffered_index_entries[key]
1707
if buffered_index_entries:
1708
# There were index entries buffered at the end of the stream,
1709
# So these need to be added (if the index supports holding such
1710
# entries for later insertion)
1712
for key in buffered_index_entries:
1713
index_entries = buffered_index_entries[key]
1714
all_entries.extend(index_entries)
1715
self._index.add_records(
1716
all_entries, missing_compression_parents=True)
1718
def get_missing_compression_parent_keys(self):
1719
"""Return an iterable of keys of missing compression parents.
1721
Check this after calling insert_record_stream to find out if there are
1722
any missing compression parents. If there are, the records that
1723
depend on them are not able to be inserted safely. For atomic
1724
KnitVersionedFiles built on packs, the transaction should be aborted or
1725
suspended - commit will fail at this point. Nonatomic knits will error
1726
earlier because they have no staging area to put pending entries into.
1728
return self._index.get_missing_compression_parents()
1730
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1731
"""Iterate over the lines in the versioned files from keys.
1733
This may return lines from other keys. Each item the returned
1734
iterator yields is a tuple of a line and a text version that that line
1735
is present in (not introduced in).
1737
Ordering of results is in whatever order is most suitable for the
1738
underlying storage format.
1740
If a progress bar is supplied, it may be used to indicate progress.
1741
The caller is responsible for cleaning up progress bars (because this
1745
* Lines are normalised by the underlying store: they will all have \\n
1747
* Lines are returned in arbitrary order.
1748
* If a requested key did not change any lines (or didn't have any
1749
lines), it may not be mentioned at all in the result.
1751
:param pb: Progress bar supplied by caller.
1752
:return: An iterator over (line, key).
1755
pb = ui.ui_factory.nested_progress_bar()
1761
# we don't care about inclusions, the caller cares.
1762
# but we need to setup a list of records to visit.
1763
# we need key, position, length
1765
build_details = self._index.get_build_details(keys)
1766
for key, details in build_details.iteritems():
1768
key_records.append((key, details[0]))
1769
records_iter = enumerate(self._read_records_iter(key_records))
1770
for (key_idx, (key, data, sha_value)) in records_iter:
1771
pb.update('Walking content', key_idx, total)
1772
compression_parent = build_details[key][1]
1773
if compression_parent is None:
1775
line_iterator = self._factory.get_fulltext_content(data)
1778
line_iterator = self._factory.get_linedelta_content(data)
1779
# Now that we are yielding the data for this key, remove it
1782
# XXX: It might be more efficient to yield (key,
1783
# line_iterator) in the future. However for now, this is a
1784
# simpler change to integrate into the rest of the
1785
# codebase. RBC 20071110
1786
for line in line_iterator:
1789
except errors.RetryWithNewPacks, e:
1790
self._access.reload_or_raise(e)
1791
# If there are still keys we've not yet found, we look in the fallback
1792
# vfs, and hope to find them there. Note that if the keys are found
1793
# but had no changes or no content, the fallback may not return
1795
if keys and not self._immediate_fallback_vfs:
1796
# XXX: strictly the second parameter is meant to be the file id
1797
# but it's not easily accessible here.
1798
raise RevisionNotPresent(keys, repr(self))
1799
for source in self._immediate_fallback_vfs:
1803
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1804
source_keys.add(key)
1806
keys.difference_update(source_keys)
1807
pb.update('Walking content', total, total)
1809
def _make_line_delta(self, delta_seq, new_content):
1810
"""Generate a line delta from delta_seq and new_content."""
1812
for op in delta_seq.get_opcodes():
1813
if op[0] == 'equal':
1815
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
775
# See https://bugs.launchpad.net/bzr/+bug/164443
776
if not self._index.has_version(parents[0]):
779
'line-delta from stream '
782
'missing parent %s\n'
783
'Try running "bzr check" '
784
'on the source repository, and "bzr reconcile" '
786
(version_id, parents[0]))
787
self._add_raw_records(
788
[(version_id, options, parents, length)],
789
reader_callable(length))
791
def _knit_from_datastream(self, (format, data_list, reader_callable)):
792
"""Create a knit object from a data stream.
794
This method exists to allow conversion of data streams that do not
795
match the signature of this knit. Generally it will be slower and use
796
more memory to use this method to insert data, but it will work.
798
:seealso: get_data_stream for details on datastreams.
799
:return: A knit versioned file which can be used to join the datastream
802
if format == "knit-plain":
803
factory = KnitPlainFactory()
804
elif format == "knit-annotated":
805
factory = KnitAnnotateFactory()
807
raise errors.KnitDataStreamUnknown(format)
808
index = _StreamIndex(data_list)
809
access = _StreamAccess(reader_callable, index, self, factory)
810
return KnitVersionedFile(self.filename, self.transport,
811
factory=factory, index=index, access_method=access)
814
"""See VersionedFile.versions."""
815
if 'evil' in debug.debug_flags:
816
trace.mutter_callsite(2, "versions scales with size of history")
817
return self._index.get_versions()
819
def has_version(self, version_id):
820
"""See VersionedFile.has_version."""
821
if 'evil' in debug.debug_flags:
822
trace.mutter_callsite(2, "has_version is a LBYL scenario")
823
return self._index.has_version(version_id)
825
__contains__ = has_version
1818
827
def _merge_annotations(self, content, parents, parent_texts={},
1819
828
delta=None, annotated=None,
1820
829
left_matching_blocks=None):
1821
"""Merge annotations for content and generate deltas.
1823
This is done by comparing the annotations based on changes to the text
1824
and generating a delta on the resulting full texts. If annotations are
1825
not being created then a simple delta is created.
830
"""Merge annotations for content. This is done by comparing
831
the annotations based on changed to the text.
1827
833
if left_matching_blocks is not None:
1828
834
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1830
836
delta_seq = None
1832
for parent_key in parents:
1833
merge_content = self._get_content(parent_key, parent_texts)
1834
if (parent_key == parents[0] and delta_seq is not None):
838
for parent_id in parents:
839
merge_content = self._get_content(parent_id, parent_texts)
840
if (parent_id == parents[0] and delta_seq is not None):
1837
843
seq = patiencediff.PatienceSequenceMatcher(
1860
858
None, old_texts, new_texts)
1861
859
return self._make_line_delta(delta_seq, content)
1863
def _parse_record(self, version_id, data):
1864
"""Parse an original format knit record.
1866
These have the last element of the key only present in the stored data.
1868
rec, record_contents = self._parse_record_unchecked(data)
1869
self._check_header_version(rec, version_id)
1870
return record_contents, rec[3]
1872
def _parse_record_header(self, key, raw_data):
1873
"""Parse a record header for consistency.
1875
:return: the header and the decompressor stream.
1876
as (stream, header_record)
1878
df = gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1881
rec = self._check_header(key, df.readline())
1882
except Exception, e:
1883
raise KnitCorrupt(self,
1884
"While reading {%s} got %s(%s)"
1885
% (key, e.__class__.__name__, str(e)))
1888
def _parse_record_unchecked(self, data):
1890
# 4168 calls in 2880 217 internal
1891
# 4168 calls to _parse_record_header in 2121
1892
# 4168 calls to readlines in 330
1893
df = gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1895
record_contents = df.readlines()
1896
except Exception, e:
1897
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1898
(data, e.__class__.__name__, str(e)))
1899
header = record_contents.pop(0)
1900
rec = self._split_header(header)
1901
last_line = record_contents.pop()
1902
if len(record_contents) != int(rec[2]):
1903
raise KnitCorrupt(self,
1904
'incorrect number of lines %s != %s'
1905
' for version {%s} %s'
1906
% (len(record_contents), int(rec[2]),
1907
rec[1], record_contents))
1908
if last_line != 'end %s\n' % rec[1]:
1909
raise KnitCorrupt(self,
1910
'unexpected version end line %r, wanted %r'
1911
% (last_line, rec[1]))
1913
return rec, record_contents
1915
def _read_records_iter(self, records):
1916
"""Read text records from data file and yield result.
1918
The result will be returned in whatever is the fastest to read.
1919
Not by the order requested. Also, multiple requests for the same
1920
record will only yield 1 response.
1921
:param records: A list of (key, access_memo) entries
1922
:return: Yields (key, contents, digest) in the order
1923
read, not the order requested
1928
# XXX: This smells wrong, IO may not be getting ordered right.
1929
needed_records = sorted(set(records), key=operator.itemgetter(1))
1930
if not needed_records:
1933
# The transport optimizes the fetching as well
1934
# (ie, reads continuous ranges.)
1935
raw_data = self._access.get_raw_records(
1936
[index_memo for key, index_memo in needed_records])
1938
for (key, index_memo), data in \
1939
izip(iter(needed_records), raw_data):
1940
content, digest = self._parse_record(key[-1], data)
1941
yield key, content, digest
1943
def _read_records_iter_raw(self, records):
1944
"""Read text records from data file and yield raw data.
1946
This unpacks enough of the text record to validate the id is
1947
as expected but thats all.
1949
Each item the iterator yields is (key, bytes,
1950
expected_sha1_of_full_text).
1952
for key, data in self._read_records_iter_unchecked(records):
1953
# validate the header (note that we can only use the suffix in
1954
# current knit records).
1955
df, rec = self._parse_record_header(key, data)
1957
yield key, data, rec[3]
1959
def _read_records_iter_unchecked(self, records):
1960
"""Read text records from data file and yield raw data.
1962
No validation is done.
1964
Yields tuples of (key, data).
1966
# setup an iterator of the external records:
1967
# uses readv so nice and fast we hope.
1969
# grab the disk data needed.
1970
needed_offsets = [index_memo for key, index_memo
1972
raw_records = self._access.get_raw_records(needed_offsets)
1974
for key, index_memo in records:
1975
data = raw_records.next()
1978
def _record_to_data(self, key, digest, lines, dense_lines=None):
1979
"""Convert key, digest, lines into a raw data block.
1981
:param key: The key of the record. Currently keys are always serialised
1982
using just the trailing component.
1983
:param dense_lines: The bytes of lines but in a denser form. For
1984
instance, if lines is a list of 1000 bytestrings each ending in \n,
1985
dense_lines may be a list with one line in it, containing all the
1986
1000's lines and their \n's. Using dense_lines if it is already
1987
known is a win because the string join to create bytes in this
1988
function spends less time resizing the final string.
1989
:return: (len, a StringIO instance with the raw data ready to read.)
1991
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
1992
chunks.extend(dense_lines or lines)
1993
chunks.append("end %s\n" % key[-1])
1994
for chunk in chunks:
1995
if type(chunk) is not str:
1996
raise AssertionError(
1997
'data must be plain bytes was %s' % type(chunk))
1998
if lines and lines[-1][-1] != '\n':
1999
raise ValueError('corrupt lines value %r' % lines)
2000
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
2001
return len(compressed_bytes), compressed_bytes
2003
def _split_header(self, line):
2006
raise KnitCorrupt(self,
2007
'unexpected number of elements in record header')
2011
"""See VersionedFiles.keys."""
2012
if 'evil' in debug.debug_flags:
2013
trace.mutter_callsite(2, "keys scales with size of history")
2014
sources = [self._index] + self._immediate_fallback_vfs
2016
for source in sources:
2017
result.update(source.keys())
2021
class _ContentMapGenerator(object):
2022
"""Generate texts or expose raw deltas for a set of texts."""
2024
def __init__(self, ordering='unordered'):
2025
self._ordering = ordering
2027
def _get_content(self, key):
2028
"""Get the content object for key."""
2029
# Note that _get_content is only called when the _ContentMapGenerator
2030
# has been constructed with just one key requested for reconstruction.
2031
if key in self.nonlocal_keys:
2032
record = self.get_record_stream().next()
2033
# Create a content object on the fly
2034
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
2035
return PlainKnitContent(lines, record.key)
861
def _make_line_delta(self, delta_seq, new_content):
862
"""Generate a line delta from delta_seq and new_content."""
864
for op in delta_seq.get_opcodes():
867
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
870
def _get_components_positions(self, version_ids):
871
"""Produce a map of position data for the components of versions.
873
This data is intended to be used for retrieving the knit records.
875
A dict of version_id to (method, data_pos, data_size, next) is
877
method is the way referenced data should be applied.
878
data_pos is the position of the data in the knit.
879
data_size is the size of the data in the knit.
880
next is the build-parent of the version, or None for fulltexts.
883
for version_id in version_ids:
886
while cursor is not None and cursor not in component_data:
887
method = self._index.get_method(cursor)
888
if method == 'fulltext':
891
next = self.get_parents_with_ghosts(cursor)[0]
892
index_memo = self._index.get_position(cursor)
893
component_data[cursor] = (method, index_memo, next)
895
return component_data
897
def _get_content(self, version_id, parent_texts={}):
898
"""Returns a content object that makes up the specified
900
cached_version = parent_texts.get(version_id, None)
901
if cached_version is not None:
902
if not self.has_version(version_id):
903
raise RevisionNotPresent(version_id, self.filename)
904
return cached_version
906
text_map, contents_map = self._get_content_maps([version_id])
907
return contents_map[version_id]
909
def _check_versions_present(self, version_ids):
910
"""Check that all specified versions are present."""
911
self._index.check_versions_present(version_ids)
913
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts,
914
nostore_sha, random_id, check_content):
915
"""See VersionedFile.add_lines_with_ghosts()."""
916
self._check_add(version_id, lines, random_id, check_content)
917
return self._add(version_id, lines, parents, self.delta,
918
parent_texts, None, nostore_sha, random_id)
920
def _add_lines(self, version_id, parents, lines, parent_texts,
921
left_matching_blocks, nostore_sha, random_id, check_content):
922
"""See VersionedFile.add_lines."""
923
self._check_add(version_id, lines, random_id, check_content)
924
self._check_versions_present(parents)
925
return self._add(version_id, lines[:], parents, self.delta,
926
parent_texts, left_matching_blocks, nostore_sha, random_id)
928
def _check_add(self, version_id, lines, random_id, check_content):
929
"""check that version_id and lines are safe to add."""
930
if contains_whitespace(version_id):
931
raise InvalidRevisionId(version_id, self.filename)
932
self.check_not_reserved_id(version_id)
933
# Technically this could be avoided if we are happy to allow duplicate
934
# id insertion when other things than bzr core insert texts, but it
935
# seems useful for folk using the knit api directly to have some safety
936
# blanket that we can disable.
937
if not random_id and self.has_version(version_id):
938
raise RevisionAlreadyPresent(version_id, self.filename)
940
self._check_lines_not_unicode(lines)
941
self._check_lines_are_lines(lines)
943
def _add(self, version_id, lines, parents, delta, parent_texts,
944
left_matching_blocks, nostore_sha, random_id):
945
"""Add a set of lines on top of version specified by parents.
947
If delta is true, compress the text as a line-delta against
950
Any versions not present will be converted into ghosts.
952
# first thing, if the content is something we don't need to store, find
954
line_bytes = ''.join(lines)
955
digest = sha_string(line_bytes)
956
if nostore_sha == digest:
957
raise errors.ExistingContent
960
if parent_texts is None:
962
for parent in parents:
963
if self.has_version(parent):
964
present_parents.append(parent)
966
# can only compress against the left most present parent.
968
(len(present_parents) == 0 or
969
present_parents[0] != parents[0])):
972
text_length = len(line_bytes)
975
if lines[-1][-1] != '\n':
976
# copy the contents of lines.
978
options.append('no-eol')
979
lines[-1] = lines[-1] + '\n'
983
# To speed the extract of texts the delta chain is limited
984
# to a fixed number of deltas. This should minimize both
985
# I/O and the time spend applying deltas.
986
delta = self._check_should_delta(present_parents)
988
assert isinstance(version_id, str)
989
content = self.factory.make(lines, version_id)
990
if delta or (self.factory.annotated and len(present_parents) > 0):
991
# Merge annotations from parent texts if needed.
992
delta_hunks = self._merge_annotations(content, present_parents,
993
parent_texts, delta, self.factory.annotated,
994
left_matching_blocks)
997
options.append('line-delta')
998
store_lines = self.factory.lower_line_delta(delta_hunks)
999
size, bytes = self._data._record_to_data(version_id, digest,
2037
# local keys we can ask for directly
2038
return self._get_one_work(key)
2040
def get_record_stream(self):
2041
"""Get a record stream for the keys requested during __init__."""
2042
for record in self._work():
2046
"""Produce maps of text and KnitContents as dicts.
1002
options.append('fulltext')
1003
# isinstance is slower and we have no hierarchy.
1004
if self.factory.__class__ == KnitPlainFactory:
1005
# Use the already joined bytes saving iteration time in
1007
size, bytes = self._data._record_to_data(version_id, digest,
1008
lines, [line_bytes])
1010
# get mixed annotation + content and feed it into the
1012
store_lines = self.factory.lower_fulltext(content)
1013
size, bytes = self._data._record_to_data(version_id, digest,
1016
access_memo = self._data.add_raw_records([size], bytes)[0]
1017
self._index.add_versions(
1018
((version_id, options, access_memo, parents),),
1019
random_id=random_id)
1020
return digest, text_length, content
1022
def check(self, progress_bar=None):
1023
"""See VersionedFile.check()."""
1025
def _clone_text(self, new_version_id, old_version_id, parents):
1026
"""See VersionedFile.clone_text()."""
1027
# FIXME RBC 20060228 make fast by only inserting an index with null
1029
self.add_lines(new_version_id, parents, self.get_lines(old_version_id))
1031
def get_lines(self, version_id):
1032
"""See VersionedFile.get_lines()."""
1033
return self.get_line_list([version_id])[0]
1035
def _get_record_map(self, version_ids):
1036
"""Produce a dictionary of knit records.
1038
The keys are version_ids, the values are tuples of (method, content,
1040
method is the way the content should be applied.
1041
content is a KnitContent object.
1042
digest is the SHA1 digest of this version id after all steps are done
1043
next is the build-parent of the version, i.e. the leftmost ancestor.
1044
If the method is fulltext, next will be None.
1046
position_map = self._get_components_positions(version_ids)
1047
# c = component_id, m = method, i_m = index_memo, n = next
1048
records = [(c, i_m) for c, (m, i_m, n) in position_map.iteritems()]
1050
for component_id, content, digest in \
1051
self._data.read_records_iter(records):
1052
method, index_memo, next = position_map[component_id]
1053
record_map[component_id] = method, content, digest, next
1057
def get_text(self, version_id):
1058
"""See VersionedFile.get_text"""
1059
return self.get_texts([version_id])[0]
1061
def get_texts(self, version_ids):
1062
return [''.join(l) for l in self.get_line_list(version_ids)]
1064
def get_line_list(self, version_ids):
1065
"""Return the texts of listed versions as a list of strings."""
1066
for version_id in version_ids:
1067
self.check_not_reserved_id(version_id)
1068
text_map, content_map = self._get_content_maps(version_ids)
1069
return [text_map[v] for v in version_ids]
1071
_get_lf_split_line_list = get_line_list
1073
def _get_content_maps(self, version_ids):
1074
"""Produce maps of text and KnitContents
2048
1076
:return: (text_map, content_map) where text_map contains the texts for
2049
the requested versions and content_map contains the KnitContents.
1077
the requested versions and content_map contains the KnitContents.
1078
Both dicts take version_ids as their keys.
2051
# NB: By definition we never need to read remote sources unless texts
2052
# are requested from them: we don't delta across stores - and we
2053
# explicitly do not want to to prevent data loss situations.
2054
if self.global_map is None:
2055
self.global_map = self.vf.get_parent_map(self.keys)
2056
nonlocal_keys = self.nonlocal_keys
2058
missing_keys = set(nonlocal_keys)
2059
# Read from remote versioned file instances and provide to our caller.
2060
for source in self.vf._immediate_fallback_vfs:
2061
if not missing_keys:
2063
# Loop over fallback repositories asking them for texts - ignore
2064
# any missing from a particular fallback.
2065
for record in source.get_record_stream(missing_keys,
2066
self._ordering, True):
2067
if record.storage_kind == 'absent':
2068
# Not in thie particular stream, may be in one of the
2069
# other fallback vfs objects.
2071
missing_keys.remove(record.key)
2074
if self._raw_record_map is None:
2075
raise AssertionError('_raw_record_map should have been filled')
2077
for key in self.keys:
2078
if key in self.nonlocal_keys:
2080
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2083
def _get_one_work(self, requested_key):
2084
# Now, if we have calculated everything already, just return the
2086
if requested_key in self._contents_map:
2087
return self._contents_map[requested_key]
2088
# To simplify things, parse everything at once - code that wants one text
2089
# probably wants them all.
2090
1080
# FUTURE: This function could be improved for the 'extract many' case
2091
1081
# by tracking each component and only doing the copy when the number of
2092
1082
# children than need to apply delta's to it is > 1 or it is part of the
2093
1083
# final output.
2094
multiple_versions = len(self.keys) != 1
2095
if self._record_map is None:
2096
self._record_map = self.vf._raw_map_to_record_map(
2097
self._raw_record_map)
2098
record_map = self._record_map
2099
# raw_record_map is key:
2100
# Have read and parsed records at this point.
2101
for key in self.keys:
2102
if key in self.nonlocal_keys:
1084
version_ids = list(version_ids)
1085
multiple_versions = len(version_ids) != 1
1086
record_map = self._get_record_map(version_ids)
1091
for version_id in version_ids:
2105
1092
components = []
2107
1094
while cursor is not None:
2109
record, record_details, digest, next = record_map[cursor]
2111
raise RevisionNotPresent(cursor, self)
2112
components.append((cursor, record, record_details, digest))
2114
if cursor in self._contents_map:
2115
# no need to plan further back
2116
components.append((cursor, None, None, None))
1095
method, data, digest, next = record_map[cursor]
1096
components.append((cursor, method, data, digest))
1097
if cursor in content_map:
2120
for (component_id, record, record_details,
2121
digest) in reversed(components):
2122
if component_id in self._contents_map:
2123
content = self._contents_map[component_id]
1102
for component_id, method, data, digest in reversed(components):
1103
if component_id in content_map:
1104
content = content_map[component_id]
2125
content, delta = self._factory.parse_record(key[-1],
2126
record, record_details, content,
2127
copy_base_content=multiple_versions)
1106
if method == 'fulltext':
1107
assert content is None
1108
content = self.factory.parse_fulltext(data, version_id)
1109
elif method == 'line-delta':
1110
delta = self.factory.parse_line_delta(data, version_id)
1111
if multiple_versions:
1112
# only doing this when we want multiple versions
1113
# output avoids list copies - which reference and
1114
# dereference many strings.
1115
content = content.copy()
1116
content.apply_delta(delta, version_id)
2128
1117
if multiple_versions:
2129
self._contents_map[component_id] = content
1118
content_map[component_id] = content
1120
if 'no-eol' in self._index.get_options(version_id):
1121
if multiple_versions:
1122
content = content.copy()
1123
content.strip_last_line_newline()
1124
final_content[version_id] = content
2131
1126
# digest here is the digest from the last applied component.
2132
1127
text = content.text()
2133
1128
actual_sha = sha_strings(text)
2134
1129
if actual_sha != digest:
2135
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
2136
if multiple_versions:
2137
return self._contents_map[requested_key]
2141
def _wire_bytes(self):
2142
"""Get the bytes to put on the wire for 'key'.
2144
The first collection of bytes asked for returns the serialised
2145
raw_record_map and the additional details (key, parent) for key.
2146
Subsequent calls return just the additional details (key, parent).
2147
The wire storage_kind given for the first key is 'knit-delta-closure',
2148
For subsequent keys it is 'knit-delta-closure-ref'.
2150
:param key: A key from the content generator.
2151
:return: Bytes to put on the wire.
2154
# kind marker for dispatch on the far side,
2155
lines.append('knit-delta-closure')
2157
if self.vf._factory.annotated:
2158
lines.append('annotated')
2161
# then the list of keys
2162
lines.append('\t'.join(['\x00'.join(key) for key in self.keys
2163
if key not in self.nonlocal_keys]))
2164
# then the _raw_record_map in serialised form:
2166
# for each item in the map:
2168
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
2169
# one line with method
2170
# one line with noeol
2171
# one line with next ('' for None)
2172
# one line with byte count of the record bytes
2174
for key, (record_bytes, (method, noeol), next) in \
2175
self._raw_record_map.iteritems():
2176
key_bytes = '\x00'.join(key)
2177
parents = self.global_map.get(key, None)
2179
parent_bytes = 'None:'
2181
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2182
method_bytes = method
2188
next_bytes = '\x00'.join(next)
2191
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2192
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2193
len(record_bytes), record_bytes))
2194
map_bytes = ''.join(map_byte_list)
2195
lines.append(map_bytes)
2196
bytes = '\n'.join(lines)
2200
class _VFContentMapGenerator(_ContentMapGenerator):
2201
"""Content map generator reading from a VersionedFiles object."""
2203
def __init__(self, versioned_files, keys, nonlocal_keys=None,
2204
global_map=None, raw_record_map=None, ordering='unordered'):
2205
"""Create a _ContentMapGenerator.
2207
:param versioned_files: The versioned files that the texts are being
2209
:param keys: The keys to produce content maps for.
2210
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
2211
which are known to not be in this knit, but rather in one of the
2213
:param global_map: The result of get_parent_map(keys) (or a supermap).
2214
This is required if get_record_stream() is to be used.
2215
:param raw_record_map: A unparsed raw record map to use for answering
2218
_ContentMapGenerator.__init__(self, ordering=ordering)
2219
# The vf to source data from
2220
self.vf = versioned_files
2222
self.keys = list(keys)
2223
# Keys known to be in fallback vfs objects
2224
if nonlocal_keys is None:
2225
self.nonlocal_keys = set()
2227
self.nonlocal_keys = frozenset(nonlocal_keys)
2228
# Parents data for keys to be returned in get_record_stream
2229
self.global_map = global_map
2230
# The chunked lists for self.keys in text form
2232
# A cache of KnitContent objects used in extracting texts.
2233
self._contents_map = {}
2234
# All the knit records needed to assemble the requested keys as full
2236
self._record_map = None
2237
if raw_record_map is None:
2238
self._raw_record_map = self.vf._get_record_map_unparsed(keys,
2241
self._raw_record_map = raw_record_map
2242
# the factory for parsing records
2243
self._factory = self.vf._factory
2246
class _NetworkContentMapGenerator(_ContentMapGenerator):
2247
"""Content map generator sourced from a network stream."""
2249
def __init__(self, bytes, line_end):
2250
"""Construct a _NetworkContentMapGenerator from a bytes block."""
2252
self.global_map = {}
2253
self._raw_record_map = {}
2254
self._contents_map = {}
2255
self._record_map = None
2256
self.nonlocal_keys = []
2257
# Get access to record parsing facilities
2258
self.vf = KnitVersionedFiles(None, None)
2261
line_end = bytes.find('\n', start)
2262
line = bytes[start:line_end]
2263
start = line_end + 1
2264
if line == 'annotated':
2265
self._factory = KnitAnnotateFactory()
2267
self._factory = KnitPlainFactory()
2268
# list of keys to emit in get_record_stream
2269
line_end = bytes.find('\n', start)
2270
line = bytes[start:line_end]
2271
start = line_end + 1
2273
tuple(segment.split('\x00')) for segment in line.split('\t')
2275
# now a loop until the end. XXX: It would be nice if this was just a
2276
# bunch of the same records as get_record_stream(..., False) gives, but
2277
# there is a decent sized gap stopping that at the moment.
2281
line_end = bytes.find('\n', start)
2282
key = tuple(bytes[start:line_end].split('\x00'))
2283
start = line_end + 1
2284
# 1 line with parents (None: for None, '' for ())
2285
line_end = bytes.find('\n', start)
2286
line = bytes[start:line_end]
2291
[tuple(segment.split('\x00')) for segment in line.split('\t')
2293
self.global_map[key] = parents
2294
start = line_end + 1
2295
# one line with method
2296
line_end = bytes.find('\n', start)
2297
line = bytes[start:line_end]
2299
start = line_end + 1
2300
# one line with noeol
2301
line_end = bytes.find('\n', start)
2302
line = bytes[start:line_end]
2304
start = line_end + 1
2305
# one line with next ('' for None)
2306
line_end = bytes.find('\n', start)
2307
line = bytes[start:line_end]
2311
next = tuple(bytes[start:line_end].split('\x00'))
2312
start = line_end + 1
2313
# one line with byte count of the record bytes
2314
line_end = bytes.find('\n', start)
2315
line = bytes[start:line_end]
2317
start = line_end + 1
2319
record_bytes = bytes[start:start+count]
2320
start = start + count
2322
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2324
def get_record_stream(self):
2325
"""Get a record stream for for keys requested by the bytestream."""
2327
for key in self.keys:
2328
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2331
def _wire_bytes(self):
2335
class _KndxIndex(object):
2336
"""Manages knit index files
2338
The index is kept in memory and read on startup, to enable
1130
raise KnitCorrupt(self.filename,
1132
'\n of reconstructed text does not match'
1134
'\n for version %s' %
1135
(actual_sha, digest, version_id))
1136
text_map[version_id] = text
1137
return text_map, final_content
1139
def iter_lines_added_or_present_in_versions(self, version_ids=None,
1141
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
1142
if version_ids is None:
1143
version_ids = self.versions()
1145
pb = progress.DummyProgress()
1146
# we don't care about inclusions, the caller cares.
1147
# but we need to setup a list of records to visit.
1148
# we need version_id, position, length
1149
version_id_records = []
1150
requested_versions = set(version_ids)
1151
# filter for available versions
1152
for version_id in requested_versions:
1153
if not self.has_version(version_id):
1154
raise RevisionNotPresent(version_id, self.filename)
1155
# get a in-component-order queue:
1156
for version_id in self.versions():
1157
if version_id in requested_versions:
1158
index_memo = self._index.get_position(version_id)
1159
version_id_records.append((version_id, index_memo))
1161
total = len(version_id_records)
1162
for version_idx, (version_id, data, sha_value) in \
1163
enumerate(self._data.read_records_iter(version_id_records)):
1164
pb.update('Walking content.', version_idx, total)
1165
method = self._index.get_method(version_id)
1167
assert method in ('fulltext', 'line-delta')
1168
if method == 'fulltext':
1169
line_iterator = self.factory.get_fulltext_content(data)
1171
line_iterator = self.factory.get_linedelta_content(data)
1172
# XXX: It might be more efficient to yield (version_id,
1173
# line_iterator) in the future. However for now, this is a simpler
1174
# change to integrate into the rest of the codebase. RBC 20071110
1175
for line in line_iterator:
1176
yield line, version_id
1178
pb.update('Walking content.', total, total)
1180
def iter_parents(self, version_ids):
1181
"""Iterate through the parents for many version ids.
1183
:param version_ids: An iterable yielding version_ids.
1184
:return: An iterator that yields (version_id, parents). Requested
1185
version_ids not present in the versioned file are simply skipped.
1186
The order is undefined, allowing for different optimisations in
1187
the underlying implementation.
1189
return self._index.iter_parents(version_ids)
1191
def num_versions(self):
1192
"""See VersionedFile.num_versions()."""
1193
return self._index.num_versions()
1195
__len__ = num_versions
1197
def annotate_iter(self, version_id):
1198
"""See VersionedFile.annotate_iter."""
1199
return self.factory.annotate_iter(self, version_id)
1201
def get_parents(self, version_id):
1202
"""See VersionedFile.get_parents."""
1205
# 52554 calls in 1264 872 internal down from 3674
1207
return self._index.get_parents(version_id)
1209
raise RevisionNotPresent(version_id, self.filename)
1211
def get_parents_with_ghosts(self, version_id):
1212
"""See VersionedFile.get_parents."""
1214
return self._index.get_parents_with_ghosts(version_id)
1216
raise RevisionNotPresent(version_id, self.filename)
1218
def get_ancestry(self, versions, topo_sorted=True):
1219
"""See VersionedFile.get_ancestry."""
1220
if isinstance(versions, basestring):
1221
versions = [versions]
1224
return self._index.get_ancestry(versions, topo_sorted)
1226
def get_ancestry_with_ghosts(self, versions):
1227
"""See VersionedFile.get_ancestry_with_ghosts."""
1228
if isinstance(versions, basestring):
1229
versions = [versions]
1232
return self._index.get_ancestry_with_ghosts(versions)
1234
def plan_merge(self, ver_a, ver_b):
1235
"""See VersionedFile.plan_merge."""
1236
ancestors_b = set(self.get_ancestry(ver_b, topo_sorted=False))
1237
ancestors_a = set(self.get_ancestry(ver_a, topo_sorted=False))
1238
annotated_a = self.annotate(ver_a)
1239
annotated_b = self.annotate(ver_b)
1240
return merge._plan_annotate_merge(annotated_a, annotated_b,
1241
ancestors_a, ancestors_b)
1244
class _KnitComponentFile(object):
1245
"""One of the files used to implement a knit database"""
1247
def __init__(self, transport, filename, mode, file_mode=None,
1248
create_parent_dir=False, dir_mode=None):
1249
self._transport = transport
1250
self._filename = filename
1252
self._file_mode = file_mode
1253
self._dir_mode = dir_mode
1254
self._create_parent_dir = create_parent_dir
1255
self._need_to_create = False
1257
def _full_path(self):
1258
"""Return the full path to this file."""
1259
return self._transport.base + self._filename
1261
def check_header(self, fp):
1262
line = fp.readline()
1264
# An empty file can actually be treated as though the file doesn't
1266
raise errors.NoSuchFile(self._full_path())
1267
if line != self.HEADER:
1268
raise KnitHeaderError(badline=line,
1269
filename=self._transport.abspath(self._filename))
1272
return '%s(%s)' % (self.__class__.__name__, self._filename)
1275
class _KnitIndex(_KnitComponentFile):
1276
"""Manages knit index file.
1278
The index is already kept in memory and read on startup, to enable
2339
1279
fast lookups of revision information. The cursor of the index
2340
1280
file is always pointing to the end, making it easy to append
2383
1323
to ensure that records always start on new lines even if the last write was
2384
1324
interrupted. As a result its normal for the last line in the index to be
2385
1325
missing a trailing newline. One can be added with no harmful effects.
2387
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
2388
where prefix is e.g. the (fileid,) for .texts instances or () for
2389
constant-mapped things like .revisions, and the old state is
2390
tuple(history_vector, cache_dict). This is used to prevent having an
2391
ABI change with the C extension that reads .kndx files.
2394
1328
HEADER = "# bzr knit index 8\n"
2396
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
2397
"""Create a _KndxIndex on transport using mapper."""
2398
self._transport = transport
2399
self._mapper = mapper
2400
self._get_scope = get_scope
2401
self._allow_writes = allow_writes
2402
self._is_locked = is_locked
2404
self.has_graph = True
2406
def add_records(self, records, random_id=False, missing_compression_parents=False):
2407
"""Add multiple records to the index.
2409
:param records: a list of tuples:
2410
(key, options, access_memo, parents).
2411
:param random_id: If True the ids being added were randomly generated
2412
and no check for existence will be performed.
2413
:param missing_compression_parents: If True the records being added are
2414
only compressed against texts already in the index (or inside
2415
records). If False the records all refer to unavailable texts (or
2416
texts inside records) as compression parents.
2418
if missing_compression_parents:
2419
# It might be nice to get the edge of the records. But keys isn't
2421
keys = sorted(record[0] for record in records)
2422
raise errors.RevisionNotPresent(keys, self)
2424
for record in records:
2427
path = self._mapper.map(key) + '.kndx'
2428
path_keys = paths.setdefault(path, (prefix, []))
2429
path_keys[1].append(record)
2430
for path in sorted(paths):
2431
prefix, path_keys = paths[path]
2432
self._load_prefixes([prefix])
2434
orig_history = self._kndx_cache[prefix][1][:]
2435
orig_cache = self._kndx_cache[prefix][0].copy()
2438
for key, options, (_, pos, size), parents in path_keys:
2440
# kndx indices cannot be parentless.
2442
line = "\n%s %s %s %s %s :" % (
2443
key[-1], ','.join(options), pos, size,
2444
self._dictionary_compress(parents))
2445
if type(line) is not str:
2446
raise AssertionError(
2447
'data must be utf8 was %s' % type(line))
2449
self._cache_key(key, options, pos, size, parents)
2450
if len(orig_history):
2451
self._transport.append_bytes(path, ''.join(lines))
2453
self._init_index(path, lines)
2455
# If any problems happen, restore the original values and re-raise
2456
self._kndx_cache[prefix] = (orig_cache, orig_history)
2459
def scan_unvalidated_index(self, graph_index):
2460
"""See _KnitGraphIndex.scan_unvalidated_index."""
2461
# Because kndx files do not support atomic insertion via separate index
2462
# files, they do not support this method.
2463
raise NotImplementedError(self.scan_unvalidated_index)
2465
def get_missing_compression_parents(self):
2466
"""See _KnitGraphIndex.get_missing_compression_parents."""
2467
# Because kndx files do not support atomic insertion via separate index
2468
# files, they do not support this method.
2469
raise NotImplementedError(self.get_missing_compression_parents)
2471
def _cache_key(self, key, options, pos, size, parent_keys):
1330
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
1331
# __slots__ = ['_cache', '_history', '_transport', '_filename']
1333
def _cache_version(self, version_id, options, pos, size, parents):
2472
1334
"""Cache a version record in the history array and index cache.
2474
1336
This is inlined into _load_data for performance. KEEP IN SYNC.
2475
1337
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
2479
version_id = key[-1]
2480
# last-element only for compatibilty with the C load_data.
2481
parents = tuple(parent[-1] for parent in parent_keys)
2482
for parent in parent_keys:
2483
if parent[:-1] != prefix:
2484
raise ValueError("mismatched prefixes for %r, %r" % (
2486
cache, history = self._kndx_cache[prefix]
2487
1340
# only want the _history index to reference the 1st index entry
2488
1341
# for version_id
2489
if version_id not in cache:
2490
index = len(history)
2491
history.append(version_id)
1342
if version_id not in self._cache:
1343
index = len(self._history)
1344
self._history.append(version_id)
2493
index = cache[version_id][5]
2494
cache[version_id] = (version_id,
1346
index = self._cache[version_id][5]
1347
self._cache[version_id] = (version_id,
2501
def check_header(self, fp):
2502
line = fp.readline()
2504
# An empty file can actually be treated as though the file doesn't
2506
raise errors.NoSuchFile(self)
2507
if line != self.HEADER:
2508
raise KnitHeaderError(badline=line, filename=self)
2510
def _check_read(self):
2511
if not self._is_locked():
2512
raise errors.ObjectNotLocked(self)
2513
if self._get_scope() != self._scope:
2516
def _check_write_ok(self):
2517
"""Assert if not writes are permitted."""
2518
if not self._is_locked():
2519
raise errors.ObjectNotLocked(self)
2520
if self._get_scope() != self._scope:
2522
if self._mode != 'w':
2523
raise errors.ReadOnlyObjectDirtiedError(self)
2525
def get_build_details(self, keys):
2526
"""Get the method, index_memo and compression parent for keys.
2528
Ghosts are omitted from the result.
2530
:param keys: An iterable of keys.
2531
:return: A dict of key:(index_memo, compression_parent, parents,
2534
opaque structure to pass to read_records to extract the raw
2537
Content that this record is built upon, may be None
2539
Logical parents of this node
2541
extra information about the content which needs to be passed to
2542
Factory.parse_record
2544
parent_map = self.get_parent_map(keys)
2547
if key not in parent_map:
2549
method = self.get_method(key)
2550
parents = parent_map[key]
2551
if method == 'fulltext':
2552
compression_parent = None
2554
compression_parent = parents[0]
2555
noeol = 'no-eol' in self.get_options(key)
2556
index_memo = self.get_position(key)
2557
result[key] = (index_memo, compression_parent,
2558
parents, (method, noeol))
2561
def get_method(self, key):
2562
"""Return compression method of specified key."""
2563
options = self.get_options(key)
2564
if 'fulltext' in options:
2566
elif 'line-delta' in options:
2569
raise errors.KnitIndexUnknownMethod(self, options)
2571
def get_options(self, key):
2572
"""Return a list representing options.
2576
prefix, suffix = self._split_key(key)
2577
self._load_prefixes([prefix])
1354
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1355
create_parent_dir=False, delay_create=False, dir_mode=None):
1356
_KnitComponentFile.__init__(self, transport, filename, mode,
1357
file_mode=file_mode,
1358
create_parent_dir=create_parent_dir,
1361
# position in _history is the 'official' index for a revision
1362
# but the values may have come from a newer entry.
1363
# so - wc -l of a knit index is != the number of unique names
2579
return self._kndx_cache[prefix][0][suffix][1]
2581
raise RevisionNotPresent(key, self)
2583
def find_ancestry(self, keys):
2584
"""See CombinedGraphIndex.find_ancestry()"""
2585
prefixes = set(key[:-1] for key in keys)
2586
self._load_prefixes(prefixes)
2589
missing_keys = set()
2590
pending_keys = list(keys)
2591
# This assumes that keys will not reference parents in a different
2592
# prefix, which is accurate so far.
2594
key = pending_keys.pop()
2595
if key in parent_map:
2599
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2601
missing_keys.add(key)
2603
parent_keys = tuple([prefix + (suffix,)
2604
for suffix in suffix_parents])
2605
parent_map[key] = parent_keys
2606
pending_keys.extend([p for p in parent_keys
2607
if p not in parent_map])
2608
return parent_map, missing_keys
2610
def get_parent_map(self, keys):
2611
"""Get a map of the parents of keys.
2613
:param keys: The keys to look up parents for.
2614
:return: A mapping from keys to parents. Absent keys are absent from
1367
fp = self._transport.get(self._filename)
1369
# _load_data may raise NoSuchFile if the target knit is
1371
_load_data(self, fp)
1375
if mode != 'w' or not create:
1378
self._need_to_create = True
1380
self._transport.put_bytes_non_atomic(
1381
self._filename, self.HEADER, mode=self._file_mode)
1383
def get_graph(self):
1384
"""Return a list of the node:parents lists from this knit index."""
1385
return [(vid, idx[4]) for vid, idx in self._cache.iteritems()]
1387
def get_ancestry(self, versions, topo_sorted=True):
1388
"""See VersionedFile.get_ancestry."""
1389
# get a graph of all the mentioned versions:
1391
pending = set(versions)
1394
version = pending.pop()
1397
parents = [p for p in cache[version][4] if p in cache]
1399
raise RevisionNotPresent(version, self._filename)
1400
# if not completed and not a ghost
1401
pending.update([p for p in parents if p not in graph])
1402
graph[version] = parents
1405
return topo_sort(graph.items())
1407
def get_ancestry_with_ghosts(self, versions):
1408
"""See VersionedFile.get_ancestry_with_ghosts."""
1409
# get a graph of all the mentioned versions:
1410
self.check_versions_present(versions)
1413
pending = set(versions)
1415
version = pending.pop()
1417
parents = cache[version][4]
1423
pending.update([p for p in parents if p not in graph])
1424
graph[version] = parents
1425
return topo_sort(graph.items())
1427
def iter_parents(self, version_ids):
1428
"""Iterate through the parents for many version ids.
1430
:param version_ids: An iterable yielding version_ids.
1431
:return: An iterator that yields (version_id, parents). Requested
1432
version_ids not present in the versioned file are simply skipped.
1433
The order is undefined, allowing for different optimisations in
1434
the underlying implementation.
2617
# Parse what we need to up front, this potentially trades off I/O
2618
# locality (.kndx and .knit in the same block group for the same file
2619
# id) for less checking in inner loops.
2620
prefixes = set(key[:-1] for key in keys)
2621
self._load_prefixes(prefixes)
1436
for version_id in version_ids:
2626
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
1438
yield version_id, tuple(self.get_parents(version_id))
2627
1439
except KeyError:
2630
result[key] = tuple(prefix + (suffix,) for
2631
suffix in suffix_parents)
2634
def get_position(self, key):
2635
"""Return details needed to access the version.
2637
:return: a tuple (key, data position, size) to hand to the access
2638
logic to get the record.
2640
prefix, suffix = self._split_key(key)
2641
self._load_prefixes([prefix])
2642
entry = self._kndx_cache[prefix][0][suffix]
2643
return key, entry[2], entry[3]
2645
has_key = _mod_index._has_key_from_parent_map
2647
def _init_index(self, path, extra_lines=[]):
2648
"""Initialize an index."""
2650
sio.write(self.HEADER)
2651
sio.writelines(extra_lines)
2653
self._transport.put_file_non_atomic(path, sio,
2654
create_parent_dir=True)
2655
# self._create_parent_dir)
2656
# mode=self._file_mode,
2657
# dir_mode=self._dir_mode)
2660
"""Get all the keys in the collection.
2662
The keys are not ordered.
2665
# Identify all key prefixes.
2666
# XXX: A bit hacky, needs polish.
2667
if type(self._mapper) is ConstantMapper:
2671
for quoted_relpath in self._transport.iter_files_recursive():
2672
path, ext = os.path.splitext(quoted_relpath)
2674
prefixes = [self._mapper.unmap(path) for path in relpaths]
2675
self._load_prefixes(prefixes)
2676
for prefix in prefixes:
2677
for suffix in self._kndx_cache[prefix][1]:
2678
result.add(prefix + (suffix,))
2681
def _load_prefixes(self, prefixes):
2682
"""Load the indices for prefixes."""
2684
for prefix in prefixes:
2685
if prefix not in self._kndx_cache:
2686
# the load_data interface writes to these variables.
2689
self._filename = prefix
2691
path = self._mapper.map(prefix) + '.kndx'
2692
fp = self._transport.get(path)
2694
# _load_data may raise NoSuchFile if the target knit is
2696
_load_data(self, fp)
2699
self._kndx_cache[prefix] = (self._cache, self._history)
2704
self._kndx_cache[prefix] = ({}, [])
2705
if type(self._mapper) is ConstantMapper:
2706
# preserve behaviour for revisions.kndx etc.
2707
self._init_index(path)
2712
missing_keys = _mod_index._missing_keys_from_parent_map
2714
def _partition_keys(self, keys):
2715
"""Turn keys into a dict of prefix:suffix_list."""
2718
prefix_keys = result.setdefault(key[:-1], [])
2719
prefix_keys.append(key[-1])
2722
def _dictionary_compress(self, keys):
2723
"""Dictionary compress keys.
2725
:param keys: The keys to generate references to.
2726
:return: A string representation of keys. keys which are present are
2727
dictionary compressed, and others are emitted as fulltext with a
1442
def num_versions(self):
1443
return len(self._history)
1445
__len__ = num_versions
1447
def get_versions(self):
1448
"""Get all the versions in the file. not topologically sorted."""
1449
return self._history
1451
def _version_list_to_index(self, versions):
2732
1452
result_list = []
2733
prefix = keys[0][:-1]
2734
cache = self._kndx_cache[prefix][0]
2736
if key[:-1] != prefix:
2737
# kndx indices cannot refer across partitioned storage.
2738
raise ValueError("mismatched prefixes for %r" % keys)
2739
if key[-1] in cache:
1454
for version in versions:
1455
if version in cache:
2740
1456
# -- inlined lookup() --
2741
result_list.append(str(cache[key[-1]][5]))
1457
result_list.append(str(cache[version][5]))
2742
1458
# -- end lookup () --
2744
result_list.append('.' + key[-1])
1460
result_list.append('.' + version)
2745
1461
return ' '.join(result_list)
2747
def _reset_cache(self):
2748
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2749
# (cache_dict, history_vector) for parsed kndx files.
2750
self._kndx_cache = {}
2751
self._scope = self._get_scope()
2752
allow_writes = self._allow_writes()
1463
def add_version(self, version_id, options, index_memo, parents):
1464
"""Add a version record to the index."""
1465
self.add_versions(((version_id, options, index_memo, parents),))
1467
def add_versions(self, versions, random_id=False):
1468
"""Add multiple versions to the index.
1470
:param versions: a list of tuples:
1471
(version_id, options, pos, size, parents).
1472
:param random_id: If True the ids being added were randomly generated
1473
and no check for existence will be performed.
1476
orig_history = self._history[:]
1477
orig_cache = self._cache.copy()
1480
for version_id, options, (index, pos, size), parents in versions:
1481
line = "\n%s %s %s %s %s :" % (version_id,
1485
self._version_list_to_index(parents))
1486
assert isinstance(line, str), \
1487
'content must be utf-8 encoded: %r' % (line,)
1489
self._cache_version(version_id, options, pos, size, parents)
1490
if not self._need_to_create:
1491
self._transport.append_bytes(self._filename, ''.join(lines))
1494
sio.write(self.HEADER)
1495
sio.writelines(lines)
1497
self._transport.put_file_non_atomic(self._filename, sio,
1498
create_parent_dir=self._create_parent_dir,
1499
mode=self._file_mode,
1500
dir_mode=self._dir_mode)
1501
self._need_to_create = False
1503
# If any problems happen, restore the original values and re-raise
1504
self._history = orig_history
1505
self._cache = orig_cache
1508
def has_version(self, version_id):
1509
"""True if the version is in the index."""
1510
return version_id in self._cache
1512
def get_position(self, version_id):
1513
"""Return details needed to access the version.
1515
.kndx indices do not support split-out data, so return None for the
1518
:return: a tuple (None, data position, size) to hand to the access
1519
logic to get the record.
1521
entry = self._cache[version_id]
1522
return None, entry[2], entry[3]
1524
def get_method(self, version_id):
1525
"""Return compression method of specified version."""
1527
options = self._cache[version_id][1]
1529
raise RevisionNotPresent(version_id, self._filename)
1530
if 'fulltext' in options:
2758
def _sort_keys_by_io(self, keys, positions):
2759
"""Figure out an optimal order to read the records for the given keys.
2761
Sort keys, grouped by index and sorted by position.
2763
:param keys: A list of keys whose records we want to read. This will be
2765
:param positions: A dict, such as the one returned by
2766
_get_components_positions()
1533
if 'line-delta' not in options:
1534
raise errors.KnitIndexUnknownMethod(self._full_path(), options)
1537
def get_options(self, version_id):
1538
"""Return a list representing options.
2769
def get_sort_key(key):
2770
index_memo = positions[key][1]
2771
# Group by prefix and position. index_memo[0] is the key, so it is
2772
# (file_id, revision_id) and we don't want to sort on revision_id,
2773
# index_memo[1] is the position, and index_memo[2] is the size,
2774
# which doesn't matter for the sort
2775
return index_memo[0][:-1], index_memo[1]
2776
return keys.sort(key=get_sort_key)
2778
_get_total_build_size = _get_total_build_size
2780
def _split_key(self, key):
2781
"""Split key into a prefix and suffix."""
2782
return key[:-1], key[-1]
2785
class _KnitGraphIndex(object):
2786
"""A KnitVersionedFiles index layered on GraphIndex."""
2788
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2789
add_callback=None, track_external_parent_refs=False):
1542
return self._cache[version_id][1]
1544
def get_parents(self, version_id):
1545
"""Return parents of specified version ignoring ghosts."""
1546
return [parent for parent in self._cache[version_id][4]
1547
if parent in self._cache]
1549
def get_parents_with_ghosts(self, version_id):
1550
"""Return parents of specified version with ghosts."""
1551
return self._cache[version_id][4]
1553
def check_versions_present(self, version_ids):
1554
"""Check that all specified versions are present."""
1556
for version_id in version_ids:
1557
if version_id not in cache:
1558
raise RevisionNotPresent(version_id, self._filename)
1561
class KnitGraphIndex(object):
1562
"""A knit index that builds on GraphIndex."""
1564
def __init__(self, graph_index, deltas=False, parents=True, add_callback=None):
2790
1565
"""Construct a KnitGraphIndex on a graph_index.
2792
1567
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2793
:param is_locked: A callback to check whether the object should answer
2795
1568
:param deltas: Allow delta-compressed records.
2796
:param parents: If True, record knits parents, if not do not record
2798
1569
:param add_callback: If not None, allow additions to the index and call
2799
1570
this callback with a list of added GraphIndex nodes:
2800
1571
[(node, value, node_refs), ...]
2801
:param is_locked: A callback, returns True if the index is locked and
2803
:param track_external_parent_refs: If True, record all external parent
2804
references parents from added records. These can be retrieved
2805
later by calling get_missing_parents().
1572
:param parents: If True, record knits parents, if not do not record
2807
self._add_callback = add_callback
2808
1575
self._graph_index = graph_index
2809
1576
self._deltas = deltas
1577
self._add_callback = add_callback
2810
1578
self._parents = parents
2811
1579
if deltas and not parents:
2812
# XXX: TODO: Delta tree and parent graph should be conceptually
2814
1580
raise KnitCorrupt(self, "Cannot do delta compression without "
2815
1581
"parent tracking.")
2816
self.has_graph = parents
2817
self._is_locked = is_locked
2818
self._missing_compression_parents = set()
2819
if track_external_parent_refs:
2820
self._key_dependencies = _KeyRefs()
2822
self._key_dependencies = None
2825
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2827
def add_records(self, records, random_id=False,
2828
missing_compression_parents=False):
2829
"""Add multiple records to the index.
1583
def _get_entries(self, keys, check_present=False):
1584
"""Get the entries for keys.
1586
:param keys: An iterable of index keys, - 1-tuples.
1591
for node in self._graph_index.iter_entries(keys):
1593
found_keys.add(node[1])
1595
# adapt parentless index to the rest of the code.
1596
for node in self._graph_index.iter_entries(keys):
1597
yield node[0], node[1], node[2], ()
1598
found_keys.add(node[1])
1600
missing_keys = keys.difference(found_keys)
1602
raise RevisionNotPresent(missing_keys.pop(), self)
1604
def _present_keys(self, version_ids):
1606
node[1] for node in self._get_entries(version_ids)])
1608
def _parentless_ancestry(self, versions):
1609
"""Honour the get_ancestry API for parentless knit indices."""
1610
wanted_keys = self._version_ids_to_keys(versions)
1611
present_keys = self._present_keys(wanted_keys)
1612
missing = set(wanted_keys).difference(present_keys)
1614
raise RevisionNotPresent(missing.pop(), self)
1615
return list(self._keys_to_version_ids(present_keys))
1617
def get_ancestry(self, versions, topo_sorted=True):
1618
"""See VersionedFile.get_ancestry."""
1619
if not self._parents:
1620
return self._parentless_ancestry(versions)
1621
# XXX: This will do len(history) index calls - perhaps
1622
# it should be altered to be a index core feature?
1623
# get a graph of all the mentioned versions:
1626
versions = self._version_ids_to_keys(versions)
1627
pending = set(versions)
1629
# get all pending nodes
1630
this_iteration = pending
1631
new_nodes = self._get_entries(this_iteration)
1634
for (index, key, value, node_refs) in new_nodes:
1635
# dont ask for ghosties - otherwise
1636
# we we can end up looping with pending
1637
# being entirely ghosted.
1638
graph[key] = [parent for parent in node_refs[0]
1639
if parent not in ghosts]
1641
for parent in graph[key]:
1642
# dont examine known nodes again
1647
ghosts.update(this_iteration.difference(found))
1648
if versions.difference(graph):
1649
raise RevisionNotPresent(versions.difference(graph).pop(), self)
1651
result_keys = topo_sort(graph.items())
1653
result_keys = graph.iterkeys()
1654
return [key[0] for key in result_keys]
1656
def get_ancestry_with_ghosts(self, versions):
1657
"""See VersionedFile.get_ancestry."""
1658
if not self._parents:
1659
return self._parentless_ancestry(versions)
1660
# XXX: This will do len(history) index calls - perhaps
1661
# it should be altered to be a index core feature?
1662
# get a graph of all the mentioned versions:
1664
versions = self._version_ids_to_keys(versions)
1665
pending = set(versions)
1667
# get all pending nodes
1668
this_iteration = pending
1669
new_nodes = self._get_entries(this_iteration)
1671
for (index, key, value, node_refs) in new_nodes:
1672
graph[key] = node_refs[0]
1674
for parent in graph[key]:
1675
# dont examine known nodes again
1679
missing_versions = this_iteration.difference(graph)
1680
missing_needed = versions.intersection(missing_versions)
1682
raise RevisionNotPresent(missing_needed.pop(), self)
1683
for missing_version in missing_versions:
1684
# add a key, no parents
1685
graph[missing_version] = []
1686
pending.discard(missing_version) # don't look for it
1687
result_keys = topo_sort(graph.items())
1688
return [key[0] for key in result_keys]
1690
def get_graph(self):
1691
"""Return a list of the node:parents lists from this knit index."""
1692
if not self._parents:
1693
return [(key, ()) for key in self.get_versions()]
1695
for index, key, value, refs in self._graph_index.iter_all_entries():
1696
result.append((key[0], tuple([ref[0] for ref in refs[0]])))
1699
def iter_parents(self, version_ids):
1700
"""Iterate through the parents for many version ids.
1702
:param version_ids: An iterable yielding version_ids.
1703
:return: An iterator that yields (version_id, parents). Requested
1704
version_ids not present in the versioned file are simply skipped.
1705
The order is undefined, allowing for different optimisations in
1706
the underlying implementation.
1709
all_nodes = set(self._get_entries(self._version_ids_to_keys(version_ids)))
1711
present_parents = set()
1712
for node in all_nodes:
1713
all_parents.update(node[3][0])
1714
# any node we are querying must be present
1715
present_parents.add(node[1])
1716
unknown_parents = all_parents.difference(present_parents)
1717
present_parents.update(self._present_keys(unknown_parents))
1718
for node in all_nodes:
1720
for parent in node[3][0]:
1721
if parent in present_parents:
1722
parents.append(parent[0])
1723
yield node[1][0], tuple(parents)
1725
for node in self._get_entries(self._version_ids_to_keys(version_ids)):
1726
yield node[1][0], ()
1728
def num_versions(self):
1729
return len(list(self._graph_index.iter_all_entries()))
1731
__len__ = num_versions
1733
def get_versions(self):
1734
"""Get all the versions in the file. not topologically sorted."""
1735
return [node[1][0] for node in self._graph_index.iter_all_entries()]
1737
def has_version(self, version_id):
1738
"""True if the version is in the index."""
1739
return len(self._present_keys(self._version_ids_to_keys([version_id]))) == 1
1741
def _keys_to_version_ids(self, keys):
1742
return tuple(key[0] for key in keys)
1744
def get_position(self, version_id):
1745
"""Return details needed to access the version.
1747
:return: a tuple (index, data position, size) to hand to the access
1748
logic to get the record.
1750
node = self._get_node(version_id)
1751
bits = node[2][1:].split(' ')
1752
return node[0], int(bits[0]), int(bits[1])
1754
def get_method(self, version_id):
1755
"""Return compression method of specified version."""
1756
if not self._deltas:
1758
return self._parent_compression(self._get_node(version_id)[3][1])
1760
def _parent_compression(self, reference_list):
1761
# use the second reference list to decide if this is delta'd or not.
1762
if len(reference_list):
1767
def _get_node(self, version_id):
1769
return list(self._get_entries(self._version_ids_to_keys([version_id])))[0]
1771
raise RevisionNotPresent(version_id, self)
1773
def get_options(self, version_id):
1774
"""Return a list representing options.
1778
node = self._get_node(version_id)
1779
if not self._deltas:
1780
options = ['fulltext']
1782
options = [self._parent_compression(node[3][1])]
1783
if node[2][0] == 'N':
1784
options.append('no-eol')
1787
def get_parents(self, version_id):
1788
"""Return parents of specified version ignoring ghosts."""
1789
parents = list(self.iter_parents([version_id]))
1792
raise errors.RevisionNotPresent(version_id, self)
1793
return parents[0][1]
1795
def get_parents_with_ghosts(self, version_id):
1796
"""Return parents of specified version with ghosts."""
1797
nodes = list(self._get_entries(self._version_ids_to_keys([version_id]),
1798
check_present=True))
1799
if not self._parents:
1801
return self._keys_to_version_ids(nodes[0][3][0])
1803
def check_versions_present(self, version_ids):
1804
"""Check that all specified versions are present."""
1805
keys = self._version_ids_to_keys(version_ids)
1806
present = self._present_keys(keys)
1807
missing = keys.difference(present)
1809
raise RevisionNotPresent(missing.pop(), self)
1811
def add_version(self, version_id, options, access_memo, parents):
1812
"""Add a version record to the index."""
1813
return self.add_versions(((version_id, options, access_memo, parents),))
1815
def add_versions(self, versions, random_id=False):
1816
"""Add multiple versions to the index.
2831
1818
This function does not insert data into the Immutable GraphIndex
2832
1819
backing the KnitGraphIndex, instead it prepares data for insertion by
2833
1820
the caller and checks that it is safe to insert then calls
2834
1821
self._add_callback with the prepared GraphIndex nodes.
2836
:param records: a list of tuples:
2837
(key, options, access_memo, parents).
1823
:param versions: a list of tuples:
1824
(version_id, options, pos, size, parents).
2838
1825
:param random_id: If True the ids being added were randomly generated
2839
1826
and no check for existence will be performed.
2840
:param missing_compression_parents: If True the records being added are
2841
only compressed against texts already in the index (or inside
2842
records). If False the records all refer to unavailable texts (or
2843
texts inside records) as compression parents.
2845
1828
if not self._add_callback:
2846
1829
raise errors.ReadOnlyError(self)
2847
1830
# we hope there are no repositories with inconsistent parentage
2851
compression_parents = set()
2852
key_dependencies = self._key_dependencies
2853
for (key, options, access_memo, parents) in records:
2855
parents = tuple(parents)
2856
if key_dependencies is not None:
2857
key_dependencies.add_references(key, parents)
1835
for (version_id, options, access_memo, parents) in versions:
2858
1836
index, pos, size = access_memo
1837
key = (version_id, )
1838
parents = tuple((parent, ) for parent in parents)
2859
1839
if 'no-eol' in options:
2902
1873
for key, (value, node_refs) in keys.iteritems():
2903
1874
result.append((key, value))
2904
1875
self._add_callback(result)
2905
if missing_compression_parents:
2906
# This may appear to be incorrect (it does not check for
2907
# compression parents that are in the existing graph index),
2908
# but such records won't have been buffered, so this is
2909
# actually correct: every entry when
2910
# missing_compression_parents==True either has a missing parent, or
2911
# a parent that is one of the keys in records.
2912
compression_parents.difference_update(keys)
2913
self._missing_compression_parents.update(compression_parents)
2914
# Adding records may have satisfied missing compression parents.
2915
self._missing_compression_parents.difference_update(keys)
2917
def scan_unvalidated_index(self, graph_index):
2918
"""Inform this _KnitGraphIndex that there is an unvalidated index.
2920
This allows this _KnitGraphIndex to keep track of any missing
2921
compression parents we may want to have filled in to make those
2924
:param graph_index: A GraphIndex
2927
new_missing = graph_index.external_references(ref_list_num=1)
2928
new_missing.difference_update(self.get_parent_map(new_missing))
2929
self._missing_compression_parents.update(new_missing)
2930
if self._key_dependencies is not None:
2931
# Add parent refs from graph_index (and discard parent refs that
2932
# the graph_index has).
2933
for node in graph_index.iter_all_entries():
2934
self._key_dependencies.add_references(node[1], node[3][0])
2936
def get_missing_compression_parents(self):
2937
"""Return the keys of missing compression parents.
2939
Missing compression parents occur when a record stream was missing
2940
basis texts, or a index was scanned that had missing basis texts.
2942
return frozenset(self._missing_compression_parents)
2944
def get_missing_parents(self):
2945
"""Return the keys of missing parents."""
2946
# If updating this, you should also update
2947
# groupcompress._GCGraphIndex.get_missing_parents
2948
# We may have false positives, so filter those out.
2949
self._key_dependencies.satisfy_refs_for_keys(
2950
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
2951
return frozenset(self._key_dependencies.get_unsatisfied_refs())
2953
def _check_read(self):
2954
"""raise if reads are not permitted."""
2955
if not self._is_locked():
2956
raise errors.ObjectNotLocked(self)
2958
def _check_write_ok(self):
2959
"""Assert if writes are not permitted."""
2960
if not self._is_locked():
2961
raise errors.ObjectNotLocked(self)
2963
def _compression_parent(self, an_entry):
2964
# return the key that an_entry is compressed against, or None
2965
# Grab the second parent list (as deltas implies parents currently)
2966
compression_parents = an_entry[3][1]
2967
if not compression_parents:
2969
if len(compression_parents) != 1:
2970
raise AssertionError(
2971
"Too many compression parents: %r" % compression_parents)
2972
return compression_parents[0]
2974
def get_build_details(self, keys):
2975
"""Get the method, index_memo and compression parent for version_ids.
2977
Ghosts are omitted from the result.
2979
:param keys: An iterable of keys.
2980
:return: A dict of key:
2981
(index_memo, compression_parent, parents, record_details).
2983
opaque structure to pass to read_records to extract the raw
2986
Content that this record is built upon, may be None
2988
Logical parents of this node
2990
extra information about the content which needs to be passed to
2991
Factory.parse_record
2995
entries = self._get_entries(keys, False)
2996
for entry in entries:
2998
if not self._parents:
3001
parents = entry[3][0]
3002
if not self._deltas:
3003
compression_parent_key = None
3005
compression_parent_key = self._compression_parent(entry)
3006
noeol = (entry[2][0] == 'N')
3007
if compression_parent_key:
3008
method = 'line-delta'
3011
result[key] = (self._node_to_position(entry),
3012
compression_parent_key, parents,
3016
def _get_entries(self, keys, check_present=False):
3017
"""Get the entries for keys.
3019
:param keys: An iterable of index key tuples.
3024
for node in self._graph_index.iter_entries(keys):
3026
found_keys.add(node[1])
3028
# adapt parentless index to the rest of the code.
3029
for node in self._graph_index.iter_entries(keys):
3030
yield node[0], node[1], node[2], ()
3031
found_keys.add(node[1])
3033
missing_keys = keys.difference(found_keys)
3035
raise RevisionNotPresent(missing_keys.pop(), self)
3037
def get_method(self, key):
3038
"""Return compression method of specified key."""
3039
return self._get_method(self._get_node(key))
3041
def _get_method(self, node):
3042
if not self._deltas:
3044
if self._compression_parent(node):
3049
def _get_node(self, key):
3051
return list(self._get_entries([key]))[0]
3053
raise RevisionNotPresent(key, self)
3055
def get_options(self, key):
3056
"""Return a list representing options.
3060
node = self._get_node(key)
3061
options = [self._get_method(node)]
3062
if node[2][0] == 'N':
3063
options.append('no-eol')
3066
def find_ancestry(self, keys):
3067
"""See CombinedGraphIndex.find_ancestry()"""
3068
return self._graph_index.find_ancestry(keys, 0)
3070
def get_parent_map(self, keys):
3071
"""Get a map of the parents of keys.
3073
:param keys: The keys to look up parents for.
3074
:return: A mapping from keys to parents. Absent keys are absent from
3078
nodes = self._get_entries(keys)
3082
result[node[1]] = node[3][0]
3085
result[node[1]] = None
3088
def get_position(self, key):
3089
"""Return details needed to access the version.
3091
:return: a tuple (index, data position, size) to hand to the access
3092
logic to get the record.
3094
node = self._get_node(key)
3095
return self._node_to_position(node)
3097
has_key = _mod_index._has_key_from_parent_map
3100
"""Get all the keys in the collection.
3102
The keys are not ordered.
3105
return [node[1] for node in self._graph_index.iter_all_entries()]
3107
missing_keys = _mod_index._missing_keys_from_parent_map
3109
def _node_to_position(self, node):
3110
"""Convert an index value to position details."""
3111
bits = node[2][1:].split(' ')
3112
return node[0], int(bits[0]), int(bits[1])
3114
def _sort_keys_by_io(self, keys, positions):
3115
"""Figure out an optimal order to read the records for the given keys.
3117
Sort keys, grouped by index and sorted by position.
3119
:param keys: A list of keys whose records we want to read. This will be
3121
:param positions: A dict, such as the one returned by
3122
_get_components_positions()
3125
def get_index_memo(key):
3126
# index_memo is at offset [1]. It is made up of (GraphIndex,
3127
# position, size). GI is an object, which will be unique for each
3128
# pack file. This causes us to group by pack file, then sort by
3129
# position. Size doesn't matter, but it isn't worth breaking up the
3131
return positions[key][1]
3132
return keys.sort(key=get_index_memo)
3134
_get_total_build_size = _get_total_build_size
3137
class _KnitKeyAccess(object):
3138
"""Access to records in .knit files."""
3140
def __init__(self, transport, mapper):
3141
"""Create a _KnitKeyAccess with transport and mapper.
3143
:param transport: The transport the access object is rooted at.
3144
:param mapper: The mapper used to map keys to .knit files.
1877
def _version_ids_to_keys(self, version_ids):
1878
return set((version_id, ) for version_id in version_ids)
1881
class _KnitAccess(object):
1882
"""Access to knit records in a .knit file."""
1884
def __init__(self, transport, filename, _file_mode, _dir_mode,
1885
_need_to_create, _create_parent_dir):
1886
"""Create a _KnitAccess for accessing and inserting data.
1888
:param transport: The transport the .knit is located on.
1889
:param filename: The filename of the .knit.
3146
1891
self._transport = transport
3147
self._mapper = mapper
1892
self._filename = filename
1893
self._file_mode = _file_mode
1894
self._dir_mode = _dir_mode
1895
self._need_to_create = _need_to_create
1896
self._create_parent_dir = _create_parent_dir
3149
def add_raw_records(self, key_sizes, raw_data):
1898
def add_raw_records(self, sizes, raw_data):
3150
1899
"""Add raw knit bytes to a storage area.
3152
The data is spooled to the container writer in one bytes-record per
1901
The data is spooled to whereever the access method is storing data.
3155
:param sizes: An iterable of tuples containing the key and size of each
1903
:param sizes: An iterable containing the size of each raw data segment.
3157
1904
:param raw_data: A bytestring containing the data.
3158
:return: A list of memos to retrieve the record later. Each memo is an
3159
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
3160
length), where the key is the record key.
1905
:return: A list of memos to retrieve the record later. Each memo is a
1906
tuple - (index, pos, length), where the index field is always None
1907
for the .knit access method.
3162
if type(raw_data) is not str:
3163
raise AssertionError(
3164
'data must be plain bytes was %s' % type(raw_data))
1909
assert type(raw_data) == str, \
1910
'data must be plain bytes was %s' % type(raw_data)
1911
if not self._need_to_create:
1912
base = self._transport.append_bytes(self._filename, raw_data)
1914
self._transport.put_bytes_non_atomic(self._filename, raw_data,
1915
create_parent_dir=self._create_parent_dir,
1916
mode=self._file_mode,
1917
dir_mode=self._dir_mode)
1918
self._need_to_create = False
3167
# TODO: This can be tuned for writing to sftp and other servers where
3168
# append() is relatively expensive by grouping the writes to each key
3170
for key, size in key_sizes:
3171
path = self._mapper.map(key)
3173
base = self._transport.append_bytes(path + '.knit',
3174
raw_data[offset:offset+size])
3175
except errors.NoSuchFile:
3176
self._transport.mkdir(osutils.dirname(path))
3177
base = self._transport.append_bytes(path + '.knit',
3178
raw_data[offset:offset+size])
3182
result.append((key, base, size))
1922
result.append((None, base, size))
3186
"""Flush pending writes on this access object.
3188
For .knit files this is a no-op.
1927
"""IFF this data access has its own storage area, initialise it.
1931
self._transport.put_bytes_non_atomic(self._filename, '',
1932
mode=self._file_mode)
1934
def open_file(self):
1935
"""IFF this data access can be represented as a single file, open it.
1937
For knits that are not mapped to a single file on disk this will
1940
:return: None or a file handle.
1943
return self._transport.get(self._filename)
3192
1948
def get_raw_records(self, memos_for_retrieval):
3193
1949
"""Get the raw bytes for a records.
3195
:param memos_for_retrieval: An iterable containing the access memo for
3196
retrieving the bytes.
1951
:param memos_for_retrieval: An iterable containing the (index, pos,
1952
length) memo for retrieving the bytes. The .knit method ignores
1953
the index as there is always only a single file.
3197
1954
:return: An iterator over the bytes of the records.
3199
# first pass, group into same-index request to minimise readv's issued.
3201
current_prefix = None
3202
for (key, offset, length) in memos_for_retrieval:
3203
if current_prefix == key[:-1]:
3204
current_list.append((offset, length))
3206
if current_prefix is not None:
3207
request_lists.append((current_prefix, current_list))
3208
current_prefix = key[:-1]
3209
current_list = [(offset, length)]
3210
# handle the last entry
3211
if current_prefix is not None:
3212
request_lists.append((current_prefix, current_list))
3213
for prefix, read_vector in request_lists:
3214
path = self._mapper.map(prefix) + '.knit'
3215
for pos, data in self._transport.readv(path, read_vector):
3219
class _DirectPackAccess(object):
3220
"""Access to data in one or more packs with less translation."""
3222
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3223
"""Create a _DirectPackAccess object.
1956
read_vector = [(pos, size) for (index, pos, size) in memos_for_retrieval]
1957
for pos, data in self._transport.readv(self._filename, read_vector):
1961
class _PackAccess(object):
1962
"""Access to knit records via a collection of packs."""
1964
def __init__(self, index_to_packs, writer=None):
1965
"""Create a _PackAccess object.
3225
1967
:param index_to_packs: A dict mapping index objects to the transport
3226
1968
and file names for obtaining data.
3227
:param reload_func: A function to call if we determine that the pack
3228
files have moved and we need to reload our caches. See
3229
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
1969
:param writer: A tuple (pack.ContainerWriter, write_index) which
1970
contains the pack to write, and the index that reads from it will
3231
self._container_writer = None
3232
self._write_index = None
3233
self._indices = index_to_packs
3234
self._reload_func = reload_func
3235
self._flush_func = flush_func
1974
self.container_writer = writer[0]
1975
self.write_index = writer[1]
1977
self.container_writer = None
1978
self.write_index = None
1979
self.indices = index_to_packs
3237
def add_raw_records(self, key_sizes, raw_data):
1981
def add_raw_records(self, sizes, raw_data):
3238
1982
"""Add raw knit bytes to a storage area.
3240
1984
The data is spooled to the container writer in one bytes-record per
3243
:param sizes: An iterable of tuples containing the key and size of each
1987
:param sizes: An iterable containing the size of each raw data segment.
3245
1988
:param raw_data: A bytestring containing the data.
3246
:return: A list of memos to retrieve the record later. Each memo is an
3247
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3248
length), where the index field is the write_index object supplied
3249
to the PackAccess object.
1989
:return: A list of memos to retrieve the record later. Each memo is a
1990
tuple - (index, pos, length), where the index field is the
1991
write_index object supplied to the PackAccess object.
3251
if type(raw_data) is not str:
3252
raise AssertionError(
3253
'data must be plain bytes was %s' % type(raw_data))
1993
assert type(raw_data) == str, \
1994
'data must be plain bytes was %s' % type(raw_data)
3256
for key, size in key_sizes:
3257
p_offset, p_length = self._container_writer.add_bytes_record(
1998
p_offset, p_length = self.container_writer.add_bytes_record(
3258
1999
raw_data[offset:offset+size], [])
3260
result.append((self._write_index, p_offset, p_length))
2001
result.append((self.write_index, p_offset, p_length))
3264
"""Flush pending writes on this access object.
2005
"""Pack based knits do not get individually created."""
3266
This will flush any buffered writes to a NewPack.
3268
if self._flush_func is not None:
3271
2007
def get_raw_records(self, memos_for_retrieval):
3272
2008
"""Get the raw bytes for a records.
3274
:param memos_for_retrieval: An iterable containing the (index, pos,
2010
:param memos_for_retrieval: An iterable containing the (index, pos,
3275
2011
length) memo for retrieving the bytes. The Pack access method
3276
2012
looks up the pack to use for a given record in its index_to_pack
3292
2028
if current_index is not None:
3293
2029
request_lists.append((current_index, current_list))
3294
2030
for index, offsets in request_lists:
3296
transport, path = self._indices[index]
3298
# A KeyError here indicates that someone has triggered an index
3299
# reload, and this index has gone missing, we need to start
3301
if self._reload_func is None:
3302
# If we don't have a _reload_func there is nothing that can
3305
raise errors.RetryWithNewPacks(index,
3306
reload_occurred=True,
3307
exc_info=sys.exc_info())
3309
reader = pack.make_readv_reader(transport, path, offsets)
3310
for names, read_func in reader.iter_records():
3311
yield read_func(None)
3312
except errors.NoSuchFile:
3313
# A NoSuchFile error indicates that a pack file has gone
3314
# missing on disk, we need to trigger a reload, and start over.
3315
if self._reload_func is None:
3317
raise errors.RetryWithNewPacks(transport.abspath(path),
3318
reload_occurred=False,
3319
exc_info=sys.exc_info())
3321
def set_writer(self, writer, index, transport_packname):
2031
transport, path = self.indices[index]
2032
reader = pack.make_readv_reader(transport, path, offsets)
2033
for names, read_func in reader.iter_records():
2034
yield read_func(None)
2036
def open_file(self):
2037
"""Pack based knits have no single file."""
2040
def set_writer(self, writer, index, (transport, packname)):
3322
2041
"""Set a writer to use for adding data."""
3323
2042
if index is not None:
3324
self._indices[index] = transport_packname
3325
self._container_writer = writer
3326
self._write_index = index
3328
def reload_or_raise(self, retry_exc):
3329
"""Try calling the reload function, or re-raise the original exception.
3331
This should be called after _DirectPackAccess raises a
3332
RetryWithNewPacks exception. This function will handle the common logic
3333
of determining when the error is fatal versus being temporary.
3334
It will also make sure that the original exception is raised, rather
3335
than the RetryWithNewPacks exception.
3337
If this function returns, then the calling function should retry
3338
whatever operation was being performed. Otherwise an exception will
3341
:param retry_exc: A RetryWithNewPacks exception.
3344
if self._reload_func is None:
3346
elif not self._reload_func():
3347
# The reload claimed that nothing changed
3348
if not retry_exc.reload_occurred:
3349
# If there wasn't an earlier reload, then we really were
3350
# expecting to find changes. We didn't find them, so this is a
3354
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3355
raise exc_class, exc_value, exc_traceback
2043
self.indices[index] = (transport, packname)
2044
self.container_writer = writer
2045
self.write_index = index
2048
class _StreamAccess(object):
2049
"""A Knit Access object that provides data from a datastream.
2051
It also provides a fallback to present as unannotated data, annotated data
2052
from a *backing* access object.
2054
This is triggered by a index_memo which is pointing to a different index
2055
than this was constructed with, and is used to allow extracting full
2056
unannotated texts for insertion into annotated knits.
2059
def __init__(self, reader_callable, stream_index, backing_knit,
2061
"""Create a _StreamAccess object.
2063
:param reader_callable: The reader_callable from the datastream.
2064
This is called to buffer all the data immediately, for
2066
:param stream_index: The index the data stream this provides access to
2067
which will be present in native index_memo's.
2068
:param backing_knit: The knit object that will provide access to
2069
annotated texts which are not available in the stream, so as to
2070
create unannotated texts.
2071
:param orig_factory: The original content factory used to generate the
2072
stream. This is used for checking whether the thunk code for
2073
supporting _copy_texts will generate the correct form of data.
2075
self.data = reader_callable(None)
2076
self.stream_index = stream_index
2077
self.backing_knit = backing_knit
2078
self.orig_factory = orig_factory
2080
def get_raw_records(self, memos_for_retrieval):
2081
"""Get the raw bytes for a records.
2083
:param memos_for_retrieval: An iterable containing the (thunk_flag,
2084
index, start, end) memo for retrieving the bytes.
2085
:return: An iterator over the bytes of the records.
2087
# use a generator for memory friendliness
2088
for thunk_flag, version_id, start, end in memos_for_retrieval:
2089
if version_id is self.stream_index:
2090
yield self.data[start:end]
2092
# we have been asked to thunk. This thunking only occurs when
2093
# we are obtaining plain texts from an annotated backing knit
2094
# so that _copy_texts will work.
2095
# We could improve performance here by scanning for where we need
2096
# to do this and using get_line_list, then interleaving the output
2097
# as desired. However, for now, this is sufficient.
2098
if self.orig_factory.__class__ != KnitPlainFactory:
2099
raise errors.KnitCorrupt(
2100
self, 'Bad thunk request %r' % version_id)
2101
lines = self.backing_knit.get_lines(version_id)
2102
line_bytes = ''.join(lines)
2103
digest = sha_string(line_bytes)
2105
if lines[-1][-1] != '\n':
2106
lines[-1] = lines[-1] + '\n'
2108
orig_options = list(self.backing_knit._index.get_options(version_id))
2109
if 'fulltext' not in orig_options:
2110
if 'line-delta' not in orig_options:
2111
raise errors.KnitCorrupt(self,
2112
'Unknown compression method %r' % orig_options)
2113
orig_options.remove('line-delta')
2114
orig_options.append('fulltext')
2115
# We want plain data, because we expect to thunk only to allow text
2117
size, bytes = self.backing_knit._data._record_to_data(version_id,
2118
digest, lines, line_bytes)
2122
class _StreamIndex(object):
2123
"""A Knit Index object that uses the data map from a datastream."""
2125
def __init__(self, data_list):
2126
"""Create a _StreamIndex object.
2128
:param data_list: The data_list from the datastream.
2130
self.data_list = data_list
2131
self._by_version = {}
2133
for key, options, length, parents in data_list:
2134
self._by_version[key] = options, (pos, pos + length), parents
2137
def get_ancestry(self, versions, topo_sorted):
2138
"""Get an ancestry list for versions."""
2140
# Not needed for basic joins
2141
raise NotImplementedError(self.get_ancestry)
2142
# get a graph of all the mentioned versions:
2143
# Little ugly - basically copied from KnitIndex, but don't want to
2144
# accidentally incorporate too much of that index's code.
2146
pending = set(versions)
2147
cache = self._by_version
2149
version = pending.pop()
2152
parents = [p for p in cache[version][2] if p in cache]
2154
raise RevisionNotPresent(version, self)
2155
# if not completed and not a ghost
2156
pending.update([p for p in parents if p not in ancestry])
2157
ancestry.add(version)
2158
return list(ancestry)
2160
def get_method(self, version_id):
2161
"""Return compression method of specified version."""
2163
options = self._by_version[version_id][0]
2165
# Strictly speaking this should check in the backing knit, but
2166
# until we have a test to discriminate, this will do.
2168
if 'fulltext' in options:
2170
elif 'line-delta' in options:
2173
raise errors.KnitIndexUnknownMethod(self, options)
2175
def get_options(self, version_id):
2176
"""Return a list representing options.
2180
return self._by_version[version_id][0]
2182
def get_parents_with_ghosts(self, version_id):
2183
"""Return parents of specified version with ghosts."""
2184
return self._by_version[version_id][2]
2186
def get_position(self, version_id):
2187
"""Return details needed to access the version.
2189
_StreamAccess has the data as a big array, so we return slice
2190
coordinates into that (as index_memo's are opaque outside the
2191
index and matching access class).
2193
:return: a tuple (thunk_flag, index, start, end). If thunk_flag is
2194
False, index will be self, otherwise it will be a version id.
2197
start, end = self._by_version[version_id][1]
2198
return False, self, start, end
2200
# Signal to the access object to handle this from the backing knit.
2201
return (True, version_id, None, None)
2203
def get_versions(self):
2204
"""Get all the versions in the stream."""
2205
return self._by_version.keys()
2207
def iter_parents(self, version_ids):
2208
"""Iterate through the parents for many version ids.
2210
:param version_ids: An iterable yielding version_ids.
2211
:return: An iterator that yields (version_id, parents). Requested
2212
version_ids not present in the versioned file are simply skipped.
2213
The order is undefined, allowing for different optimisations in
2214
the underlying implementation.
2217
for version in version_ids:
2219
result.append((version, self._by_version[version][2]))
2225
class _KnitData(object):
2226
"""Manage extraction of data from a KnitAccess, caching and decompressing.
2228
The KnitData class provides the logic for parsing and using knit records,
2229
making use of an access method for the low level read and write operations.
2232
def __init__(self, access):
2233
"""Create a KnitData object.
2235
:param access: The access method to use. Access methods such as
2236
_KnitAccess manage the insertion of raw records and the subsequent
2237
retrieval of the same.
2239
self._access = access
2240
self._checked = False
2241
# TODO: jam 20060713 conceptually, this could spill to disk
2242
# if the cached size gets larger than a certain amount
2243
# but it complicates the model a bit, so for now just use
2244
# a simple dictionary
2246
self._do_cache = False
2248
def enable_cache(self):
2249
"""Enable caching of reads."""
2250
self._do_cache = True
2252
def clear_cache(self):
2253
"""Clear the record cache."""
2254
self._do_cache = False
2257
def _open_file(self):
2258
return self._access.open_file()
2260
def _record_to_data(self, version_id, digest, lines, dense_lines=None):
2261
"""Convert version_id, digest, lines into a raw data block.
2263
:param dense_lines: The bytes of lines but in a denser form. For
2264
instance, if lines is a list of 1000 bytestrings each ending in \n,
2265
dense_lines may be a list with one line in it, containing all the
2266
1000's lines and their \n's. Using dense_lines if it is already
2267
known is a win because the string join to create bytes in this
2268
function spends less time resizing the final string.
2269
:return: (len, a StringIO instance with the raw data ready to read.)
2271
# Note: using a string copy here increases memory pressure with e.g.
2272
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
2273
# when doing the initial commit of a mozilla tree. RBC 20070921
2274
bytes = ''.join(chain(
2275
["version %s %d %s\n" % (version_id,
2278
dense_lines or lines,
2279
["end %s\n" % version_id]))
2280
assert bytes.__class__ == str
2281
compressed_bytes = bytes_to_gzip(bytes)
2282
return len(compressed_bytes), compressed_bytes
2284
def add_raw_records(self, sizes, raw_data):
2285
"""Append a prepared record to the data file.
2287
:param sizes: An iterable containing the size of each raw data segment.
2288
:param raw_data: A bytestring containing the data.
2289
:return: a list of index data for the way the data was stored.
2290
See the access method add_raw_records documentation for more
2293
return self._access.add_raw_records(sizes, raw_data)
2295
def _parse_record_header(self, version_id, raw_data):
2296
"""Parse a record header for consistency.
2298
:return: the header and the decompressor stream.
2299
as (stream, header_record)
2301
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
2303
rec = self._check_header(version_id, df.readline())
2304
except Exception, e:
2305
raise KnitCorrupt(self._access,
2306
"While reading {%s} got %s(%s)"
2307
% (version_id, e.__class__.__name__, str(e)))
2310
def _check_header(self, version_id, line):
2313
raise KnitCorrupt(self._access,
2314
'unexpected number of elements in record header')
2315
if rec[1] != version_id:
2316
raise KnitCorrupt(self._access,
2317
'unexpected version, wanted %r, got %r'
2318
% (version_id, rec[1]))
2321
def _parse_record(self, version_id, data):
2323
# 4168 calls in 2880 217 internal
2324
# 4168 calls to _parse_record_header in 2121
2325
# 4168 calls to readlines in 330
2326
df = GzipFile(mode='rb', fileobj=StringIO(data))
2329
record_contents = df.readlines()
2330
except Exception, e:
2331
raise KnitCorrupt(self._access,
2332
"While reading {%s} got %s(%s)"
2333
% (version_id, e.__class__.__name__, str(e)))
2334
header = record_contents.pop(0)
2335
rec = self._check_header(version_id, header)
2337
last_line = record_contents.pop()
2338
if len(record_contents) != int(rec[2]):
2339
raise KnitCorrupt(self._access,
2340
'incorrect number of lines %s != %s'
2342
% (len(record_contents), int(rec[2]),
2344
if last_line != 'end %s\n' % rec[1]:
2345
raise KnitCorrupt(self._access,
2346
'unexpected version end line %r, wanted %r'
2347
% (last_line, version_id))
2349
return record_contents, rec[3]
2351
def read_records_iter_raw(self, records):
2352
"""Read text records from data file and yield raw data.
2354
This unpacks enough of the text record to validate the id is
2355
as expected but thats all.
2357
# setup an iterator of the external records:
2358
# uses readv so nice and fast we hope.
2360
# grab the disk data needed.
2362
# Don't check _cache if it is empty
2363
needed_offsets = [index_memo for version_id, index_memo
2365
if version_id not in self._cache]
2367
needed_offsets = [index_memo for version_id, index_memo
2370
raw_records = self._access.get_raw_records(needed_offsets)
2372
for version_id, index_memo in records:
2373
if version_id in self._cache:
2374
# This data has already been validated
2375
data = self._cache[version_id]
2377
data = raw_records.next()
2379
self._cache[version_id] = data
2381
# validate the header
2382
df, rec = self._parse_record_header(version_id, data)
2384
yield version_id, data
2386
def read_records_iter(self, records):
2387
"""Read text records from data file and yield result.
2389
The result will be returned in whatever is the fastest to read.
2390
Not by the order requested. Also, multiple requests for the same
2391
record will only yield 1 response.
2392
:param records: A list of (version_id, pos, len) entries
2393
:return: Yields (version_id, contents, digest) in the order
2394
read, not the order requested
2400
# Skip records we have alread seen
2401
yielded_records = set()
2402
needed_records = set()
2403
for record in records:
2404
if record[0] in self._cache:
2405
if record[0] in yielded_records:
2407
yielded_records.add(record[0])
2408
data = self._cache[record[0]]
2409
content, digest = self._parse_record(record[0], data)
2410
yield (record[0], content, digest)
2412
needed_records.add(record)
2413
needed_records = sorted(needed_records, key=operator.itemgetter(1))
2415
needed_records = sorted(set(records), key=operator.itemgetter(1))
2417
if not needed_records:
2420
# The transport optimizes the fetching as well
2421
# (ie, reads continuous ranges.)
2422
raw_data = self._access.get_raw_records(
2423
[index_memo for version_id, index_memo in needed_records])
2425
for (version_id, index_memo), data in \
2426
izip(iter(needed_records), raw_data):
2427
content, digest = self._parse_record(version_id, data)
2429
self._cache[version_id] = data
2430
yield version_id, content, digest
2432
def read_records(self, records):
2433
"""Read records into a dictionary."""
2435
for record_id, content, digest in \
2436
self.read_records_iter(records):
2437
components[record_id] = (content, digest)
2441
class InterKnit(InterVersionedFile):
2442
"""Optimised code paths for knit to knit operations."""
2444
_matching_file_from_factory = KnitVersionedFile
2445
_matching_file_to_factory = KnitVersionedFile
2448
def is_compatible(source, target):
2449
"""Be compatible with knits. """
2451
return (isinstance(source, KnitVersionedFile) and
2452
isinstance(target, KnitVersionedFile))
2453
except AttributeError:
2456
def _copy_texts(self, pb, msg, version_ids, ignore_missing=False):
2457
"""Copy texts to the target by extracting and adding them one by one.
2459
see join() for the parameter definitions.
2461
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2462
graph = self.source.get_graph(version_ids)
2463
order = topo_sort(graph.items())
2465
def size_of_content(content):
2466
return sum(len(line) for line in content.text())
2467
# Cache at most 10MB of parent texts
2468
parent_cache = lru_cache.LRUSizeCache(max_size=10*1024*1024,
2469
compute_size=size_of_content)
2470
# TODO: jam 20071116 It would be nice to have a streaming interface to
2471
# get multiple texts from a source. The source could be smarter
2472
# about how it handled intermediate stages.
2473
# get_line_list() or make_mpdiffs() seem like a possibility, but
2474
# at the moment they extract all full texts into memory, which
2475
# causes us to store more than our 3x fulltext goal.
2476
# Repository.iter_files_bytes() may be another possibility
2477
to_process = [version for version in order
2478
if version not in self.target]
2479
total = len(to_process)
2480
pb = ui.ui_factory.nested_progress_bar()
2482
for index, version in enumerate(to_process):
2483
pb.update('Converting versioned data', index, total)
2484
sha1, num_bytes, parent_text = self.target.add_lines(version,
2485
self.source.get_parents_with_ghosts(version),
2486
self.source.get_lines(version),
2487
parent_texts=parent_cache)
2488
parent_cache[version] = parent_text
2493
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2494
"""See InterVersionedFile.join."""
2495
assert isinstance(self.source, KnitVersionedFile)
2496
assert isinstance(self.target, KnitVersionedFile)
2498
# If the source and target are mismatched w.r.t. annotations vs
2499
# plain, the data needs to be converted accordingly
2500
if self.source.factory.annotated == self.target.factory.annotated:
2502
elif self.source.factory.annotated:
2503
converter = self._anno_to_plain_converter
2505
# We're converting from a plain to an annotated knit. Copy them
2506
# across by full texts.
2507
return self._copy_texts(pb, msg, version_ids, ignore_missing)
2509
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2513
pb = ui.ui_factory.nested_progress_bar()
2515
version_ids = list(version_ids)
2516
if None in version_ids:
2517
version_ids.remove(None)
2519
self.source_ancestry = set(self.source.get_ancestry(version_ids,
2521
this_versions = set(self.target._index.get_versions())
2522
# XXX: For efficiency we should not look at the whole index,
2523
# we only need to consider the referenced revisions - they
2524
# must all be present, or the method must be full-text.
2525
# TODO, RBC 20070919
2526
needed_versions = self.source_ancestry - this_versions
2528
if not needed_versions:
2530
full_list = topo_sort(self.source.get_graph())
2532
version_list = [i for i in full_list if (not self.target.has_version(i)
2533
and i in needed_versions)]
2537
copy_queue_records = []
2539
for version_id in version_list:
2540
options = self.source._index.get_options(version_id)
2541
parents = self.source._index.get_parents_with_ghosts(version_id)
2542
# check that its will be a consistent copy:
2543
for parent in parents:
2544
# if source has the parent, we must :
2545
# * already have it or
2546
# * have it scheduled already
2547
# otherwise we don't care
2548
assert (self.target.has_version(parent) or
2549
parent in copy_set or
2550
not self.source.has_version(parent))
2551
index_memo = self.source._index.get_position(version_id)
2552
copy_queue_records.append((version_id, index_memo))
2553
copy_queue.append((version_id, options, parents))
2554
copy_set.add(version_id)
2556
# data suck the join:
2558
total = len(version_list)
2561
for (version_id, raw_data), \
2562
(version_id2, options, parents) in \
2563
izip(self.source._data.read_records_iter_raw(copy_queue_records),
2565
assert version_id == version_id2, 'logic error, inconsistent results'
2567
pb.update("Joining knit", count, total)
2569
size, raw_data = converter(raw_data, version_id, options,
2572
size = len(raw_data)
2573
raw_records.append((version_id, options, parents, size))
2574
raw_datum.append(raw_data)
2575
self.target._add_raw_records(raw_records, ''.join(raw_datum))
2580
def _anno_to_plain_converter(self, raw_data, version_id, options,
2582
"""Convert annotated content to plain content."""
2583
data, digest = self.source._data._parse_record(version_id, raw_data)
2584
if 'fulltext' in options:
2585
content = self.source.factory.parse_fulltext(data, version_id)
2586
lines = self.target.factory.lower_fulltext(content)
2588
delta = self.source.factory.parse_line_delta(data, version_id,
2590
lines = self.target.factory.lower_line_delta(delta)
2591
return self.target._data._record_to_data(version_id, digest, lines)
2594
InterVersionedFile.register_optimiser(InterKnit)
2597
class WeaveToKnit(InterVersionedFile):
2598
"""Optimised code paths for weave to knit operations."""
2600
_matching_file_from_factory = bzrlib.weave.WeaveFile
2601
_matching_file_to_factory = KnitVersionedFile
2604
def is_compatible(source, target):
2605
"""Be compatible with weaves to knits."""
2607
return (isinstance(source, bzrlib.weave.Weave) and
2608
isinstance(target, KnitVersionedFile))
2609
except AttributeError:
2612
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2613
"""See InterVersionedFile.join."""
2614
assert isinstance(self.source, bzrlib.weave.Weave)
2615
assert isinstance(self.target, KnitVersionedFile)
2617
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2622
pb = ui.ui_factory.nested_progress_bar()
2624
version_ids = list(version_ids)
2626
self.source_ancestry = set(self.source.get_ancestry(version_ids))
2627
this_versions = set(self.target._index.get_versions())
2628
needed_versions = self.source_ancestry - this_versions
2630
if not needed_versions:
2632
full_list = topo_sort(self.source.get_graph())
2634
version_list = [i for i in full_list if (not self.target.has_version(i)
2635
and i in needed_versions)]
2639
total = len(version_list)
2640
for version_id in version_list:
2641
pb.update("Converting to knit", count, total)
2642
parents = self.source.get_parents(version_id)
2643
# check that its will be a consistent copy:
2644
for parent in parents:
2645
# if source has the parent, we must already have it
2646
assert (self.target.has_version(parent))
2647
self.target.add_lines(
2648
version_id, parents, self.source.get_lines(version_id))
2655
InterVersionedFile.register_optimiser(WeaveToKnit)
2658
# Deprecated, use PatienceSequenceMatcher instead
2659
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
3358
2662
def annotate_knit(knit, revision_id):
3362
2666
It will work for knits with cached annotations, but this is not
3365
annotator = _KnitAnnotator(knit)
3366
return iter(annotator.annotate_flat(revision_id))
3369
class _KnitAnnotator(annotate.Annotator):
3370
"""Build up the annotations for a text."""
3372
def __init__(self, vf):
3373
annotate.Annotator.__init__(self, vf)
3375
# TODO: handle Nodes which cannot be extracted
3376
# self._ghosts = set()
3378
# Map from (key, parent_key) => matching_blocks, should be 'use once'
3379
self._matching_blocks = {}
3381
# KnitContent objects
3382
self._content_objects = {}
3383
# The number of children that depend on this fulltext content object
3384
self._num_compression_children = {}
3385
# Delta records that need their compression parent before they can be
3387
self._pending_deltas = {}
3388
# Fulltext records that are waiting for their parents fulltexts before
3389
# they can be yielded for annotation
3390
self._pending_annotation = {}
3392
self._all_build_details = {}
3394
def _get_build_graph(self, key):
3395
"""Get the graphs for building texts and annotations.
3397
The data you need for creating a full text may be different than the
3398
data you need to annotate that text. (At a minimum, you need both
3399
parents to create an annotation, but only need 1 parent to generate the
3402
:return: A list of (key, index_memo) records, suitable for
3403
passing to read_records_iter to start reading in the raw data from
3406
pending = set([key])
3409
self._num_needed_children[key] = 1
3411
# get all pending nodes
3412
this_iteration = pending
3413
build_details = self._vf._index.get_build_details(this_iteration)
3414
self._all_build_details.update(build_details)
3415
# new_nodes = self._vf._index._get_entries(this_iteration)
3417
for key, details in build_details.iteritems():
3418
(index_memo, compression_parent, parent_keys,
3419
record_details) = details
3420
self._parent_map[key] = parent_keys
3421
self._heads_provider = None
3422
records.append((key, index_memo))
3423
# Do we actually need to check _annotated_lines?
3424
pending.update([p for p in parent_keys
3425
if p not in self._all_build_details])
3427
for parent_key in parent_keys:
3428
if parent_key in self._num_needed_children:
3429
self._num_needed_children[parent_key] += 1
3431
self._num_needed_children[parent_key] = 1
3432
if compression_parent:
3433
if compression_parent in self._num_compression_children:
3434
self._num_compression_children[compression_parent] += 1
3436
self._num_compression_children[compression_parent] = 1
3438
missing_versions = this_iteration.difference(build_details.keys())
3439
if missing_versions:
3440
for key in missing_versions:
3441
if key in self._parent_map and key in self._text_cache:
3442
# We already have this text ready, we just need to
3443
# yield it later so we get it annotated
3445
parent_keys = self._parent_map[key]
3446
for parent_key in parent_keys:
3447
if parent_key in self._num_needed_children:
3448
self._num_needed_children[parent_key] += 1
3450
self._num_needed_children[parent_key] = 1
3451
pending.update([p for p in parent_keys
3452
if p not in self._all_build_details])
3454
raise errors.RevisionNotPresent(key, self._vf)
3455
# Generally we will want to read the records in reverse order, because
3456
# we find the parent nodes after the children
3458
return records, ann_keys
3460
def _get_needed_texts(self, key, pb=None):
3461
# if True or len(self._vf._immediate_fallback_vfs) > 0:
3462
if len(self._vf._immediate_fallback_vfs) > 0:
3463
# If we have fallbacks, go to the generic path
3464
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3469
records, ann_keys = self._get_build_graph(key)
3470
for idx, (sub_key, text, num_lines) in enumerate(
3471
self._extract_texts(records)):
3473
pb.update('annotating', idx, len(records))
3474
yield sub_key, text, num_lines
3475
for sub_key in ann_keys:
3476
text = self._text_cache[sub_key]
3477
num_lines = len(text) # bad assumption
3478
yield sub_key, text, num_lines
3480
except errors.RetryWithNewPacks, e:
3481
self._vf._access.reload_or_raise(e)
3482
# The cached build_details are no longer valid
3483
self._all_build_details.clear()
3485
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3486
parent_lines = self._text_cache[compression_parent]
3487
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3488
self._matching_blocks[(key, compression_parent)] = blocks
3490
def _expand_record(self, key, parent_keys, compression_parent, record,
3493
if compression_parent:
3494
if compression_parent not in self._content_objects:
3495
# Waiting for the parent
3496
self._pending_deltas.setdefault(compression_parent, []).append(
3497
(key, parent_keys, record, record_details))
3499
# We have the basis parent, so expand the delta
3500
num = self._num_compression_children[compression_parent]
3503
base_content = self._content_objects.pop(compression_parent)
3504
self._num_compression_children.pop(compression_parent)
3506
self._num_compression_children[compression_parent] = num
3507
base_content = self._content_objects[compression_parent]
3508
# It is tempting to want to copy_base_content=False for the last
3509
# child object. However, whenever noeol=False,
3510
# self._text_cache[parent_key] is content._lines. So mutating it
3511
# gives very bad results.
3512
# The alternative is to copy the lines into text cache, but then we
3513
# are copying anyway, so just do it here.
3514
content, delta = self._vf._factory.parse_record(
3515
key, record, record_details, base_content,
3516
copy_base_content=True)
2669
ancestry = knit.get_ancestry(revision_id)
2670
fulltext = dict(zip(ancestry, knit.get_line_list(ancestry)))
2672
for candidate in ancestry:
2673
if candidate in annotations:
2675
parents = knit.get_parents(candidate)
2676
if len(parents) == 0:
2678
elif knit._index.get_method(candidate) != 'line-delta':
3519
content, _ = self._vf._factory.parse_record(
3520
key, record, record_details, None)
3521
if self._num_compression_children.get(key, 0) > 0:
3522
self._content_objects[key] = content
3523
lines = content.text()
3524
self._text_cache[key] = lines
3525
if delta is not None:
3526
self._cache_delta_blocks(key, compression_parent, delta, lines)
3529
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3530
"""Get the list of annotations for the parent, and the matching lines.
3532
:param text: The opaque value given by _get_needed_texts
3533
:param parent_key: The key for the parent text
3534
:return: (parent_annotations, matching_blocks)
3535
parent_annotations is a list as long as the number of lines in
3537
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3538
indicating which lines match between the two texts
3540
block_key = (key, parent_key)
3541
if block_key in self._matching_blocks:
3542
blocks = self._matching_blocks.pop(block_key)
3543
parent_annotations = self._annotations_cache[parent_key]
3544
return parent_annotations, blocks
3545
return annotate.Annotator._get_parent_annotations_and_matches(self,
3546
key, text, parent_key)
3548
def _process_pending(self, key):
3549
"""The content for 'key' was just processed.
3551
Determine if there is any more pending work to be processed.
3554
if key in self._pending_deltas:
3555
compression_parent = key
3556
children = self._pending_deltas.pop(key)
3557
for child_key, parent_keys, record, record_details in children:
3558
lines = self._expand_record(child_key, parent_keys,
3560
record, record_details)
3561
if self._check_ready_for_annotations(child_key, parent_keys):
3562
to_return.append(child_key)
3563
# Also check any children that are waiting for this parent to be
3565
if key in self._pending_annotation:
3566
children = self._pending_annotation.pop(key)
3567
to_return.extend([c for c, p_keys in children
3568
if self._check_ready_for_annotations(c, p_keys)])
3571
def _check_ready_for_annotations(self, key, parent_keys):
3572
"""return true if this text is ready to be yielded.
3574
Otherwise, this will return False, and queue the text into
3575
self._pending_annotation
3577
for parent_key in parent_keys:
3578
if parent_key not in self._annotations_cache:
3579
# still waiting on at least one parent text, so queue it up
3580
# Note that if there are multiple parents, we need to wait
3582
self._pending_annotation.setdefault(parent_key,
3583
[]).append((key, parent_keys))
3587
def _extract_texts(self, records):
3588
"""Extract the various texts needed based on records"""
3589
# We iterate in the order read, rather than a strict order requested
3590
# However, process what we can, and put off to the side things that
3591
# still need parents, cleaning them up when those parents are
3594
# 1) As 'records' are read, see if we can expand these records into
3595
# Content objects (and thus lines)
3596
# 2) If a given line-delta is waiting on its compression parent, it
3597
# gets queued up into self._pending_deltas, otherwise we expand
3598
# it, and put it into self._text_cache and self._content_objects
3599
# 3) If we expanded the text, we will then check to see if all
3600
# parents have also been processed. If so, this text gets yielded,
3601
# else this record gets set aside into pending_annotation
3602
# 4) Further, if we expanded the text in (2), we will then check to
3603
# see if there are any children in self._pending_deltas waiting to
3604
# also be processed. If so, we go back to (2) for those
3605
# 5) Further again, if we yielded the text, we can then check if that
3606
# 'unlocks' any of the texts in pending_annotations, which should
3607
# then get yielded as well
3608
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3609
# compression child could unlock yet another, and yielding a fulltext
3610
# will also 'unlock' the children that are waiting on that annotation.
3611
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3612
# if other parents are also waiting.)
3613
# We want to yield content before expanding child content objects, so
3614
# that we know when we can re-use the content lines, and the annotation
3615
# code can know when it can stop caching fulltexts, as well.
3617
# Children that are missing their compression parent
3619
for (key, record, digest) in self._vf._read_records_iter(records):
3621
details = self._all_build_details[key]
3622
(_, compression_parent, parent_keys, record_details) = details
3623
lines = self._expand_record(key, parent_keys, compression_parent,
3624
record, record_details)
3626
# Pending delta should be queued up
3628
# At this point, we may be able to yield this content, if all
3629
# parents are also finished
3630
yield_this_text = self._check_ready_for_annotations(key,
3633
# All parents present
3634
yield key, lines, len(lines)
3635
to_process = self._process_pending(key)
3637
this_process = to_process
3639
for key in this_process:
3640
lines = self._text_cache[key]
3641
yield key, lines, len(lines)
3642
to_process.extend(self._process_pending(key))
2681
parent, sha1, noeol, delta = knit.get_delta(candidate)
2682
blocks = KnitContent.get_line_delta_blocks(delta,
2683
fulltext[parents[0]], fulltext[candidate])
2684
annotations[candidate] = list(annotate.reannotate([annotations[p]
2685
for p in parents], fulltext[candidate], candidate, blocks))
2686
return iter(annotations[revision_id])
3645
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3646
except ImportError, e:
3647
osutils.failed_to_load_extension(e)
2690
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3648
2692
from bzrlib._knit_load_data_py import _load_data_py as _load_data