131
138
INDEX_SUFFIX = '.kndx'
134
class KnitAdapter(object):
135
"""Base class for knit record adaption."""
137
def __init__(self, basis_vf):
138
"""Create an adapter which accesses full texts from basis_vf.
140
:param basis_vf: A versioned file to access basis texts of deltas from.
141
May be None for adapters that do not need to access basis texts.
143
self._data = KnitVersionedFiles(None, None)
144
self._annotate_factory = KnitAnnotateFactory()
145
self._plain_factory = KnitPlainFactory()
146
self._basis_vf = basis_vf
149
class FTAnnotatedToUnannotated(KnitAdapter):
150
"""An adapter from FT annotated knits to unannotated ones."""
152
def get_bytes(self, factory, annotated_compressed_bytes):
154
self._data._parse_record_unchecked(annotated_compressed_bytes)
155
content = self._annotate_factory.parse_fulltext(contents, rec[1])
156
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
160
class DeltaAnnotatedToUnannotated(KnitAdapter):
161
"""An adapter for deltas from annotated to unannotated."""
163
def get_bytes(self, factory, annotated_compressed_bytes):
165
self._data._parse_record_unchecked(annotated_compressed_bytes)
166
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
168
contents = self._plain_factory.lower_line_delta(delta)
169
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
173
class FTAnnotatedToFullText(KnitAdapter):
174
"""An adapter from FT annotated knits to unannotated ones."""
176
def get_bytes(self, factory, annotated_compressed_bytes):
178
self._data._parse_record_unchecked(annotated_compressed_bytes)
179
content, delta = self._annotate_factory.parse_record(factory.key[-1],
180
contents, factory._build_details, None)
181
return ''.join(content.text())
184
class DeltaAnnotatedToFullText(KnitAdapter):
185
"""An adapter for deltas from annotated to unannotated."""
187
def get_bytes(self, factory, annotated_compressed_bytes):
189
self._data._parse_record_unchecked(annotated_compressed_bytes)
190
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
192
compression_parent = factory.parents[0]
193
basis_entry = self._basis_vf.get_record_stream(
194
[compression_parent], 'unordered', True).next()
195
if basis_entry.storage_kind == 'absent':
196
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
197
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
198
# Manually apply the delta because we have one annotated content and
200
basis_content = PlainKnitContent(basis_lines, compression_parent)
201
basis_content.apply_delta(delta, rec[1])
202
basis_content._should_strip_eol = factory._build_details[1]
203
return ''.join(basis_content.text())
206
class FTPlainToFullText(KnitAdapter):
207
"""An adapter from FT plain knits to unannotated ones."""
209
def get_bytes(self, factory, compressed_bytes):
211
self._data._parse_record_unchecked(compressed_bytes)
212
content, delta = self._plain_factory.parse_record(factory.key[-1],
213
contents, factory._build_details, None)
214
return ''.join(content.text())
217
class DeltaPlainToFullText(KnitAdapter):
218
"""An adapter for deltas from annotated to unannotated."""
220
def get_bytes(self, factory, compressed_bytes):
222
self._data._parse_record_unchecked(compressed_bytes)
223
delta = self._plain_factory.parse_line_delta(contents, rec[1])
224
compression_parent = factory.parents[0]
225
# XXX: string splitting overhead.
226
basis_entry = self._basis_vf.get_record_stream(
227
[compression_parent], 'unordered', True).next()
228
if basis_entry.storage_kind == 'absent':
229
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
230
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
231
basis_content = PlainKnitContent(basis_lines, compression_parent)
232
# Manually apply the delta because we have one annotated content and
234
content, _ = self._plain_factory.parse_record(rec[1], contents,
235
factory._build_details, basis_content)
236
return ''.join(content.text())
239
class KnitContentFactory(ContentFactory):
240
"""Content factory for streaming from knits.
242
:seealso ContentFactory:
245
def __init__(self, key, parents, build_details, sha1, raw_record,
246
annotated, knit=None):
247
"""Create a KnitContentFactory for key.
250
:param parents: The parents.
251
:param build_details: The build details as returned from
253
:param sha1: The sha1 expected from the full text of this object.
254
:param raw_record: The bytes of the knit data from disk.
255
:param annotated: True if the raw data is annotated.
257
ContentFactory.__init__(self)
260
self.parents = parents
261
if build_details[0] == 'line-delta':
266
annotated_kind = 'annotated-'
269
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
270
self._raw_record = raw_record
271
self._build_details = build_details
274
def get_bytes_as(self, storage_kind):
275
if storage_kind == self.storage_kind:
276
return self._raw_record
277
if storage_kind == 'fulltext' and self._knit is not None:
278
return self._knit.get_text(self.key[0])
280
raise errors.UnavailableRepresentation(self.key, storage_kind,
284
141
class KnitContent(object):
285
"""Content of a knit version to which deltas can be applied.
287
This is always stored in memory as a list of lines with \n at the end,
288
plus a flag saying if the final ending is really there or not, because that
289
corresponds to the on-disk knit representation.
142
"""Content of a knit version to which deltas can be applied."""
292
144
def __init__(self):
293
145
self._should_strip_eol = False
630
483
out.extend(lines)
633
def annotate(self, knit, key):
486
def annotate(self, knit, version_id):
634
487
annotator = _KnitAnnotator(knit)
635
return annotator.annotate(key)
639
def make_file_factory(annotated, mapper):
640
"""Create a factory for creating a file based KnitVersionedFiles.
642
This is only functional enough to run interface tests, it doesn't try to
643
provide a full pack environment.
645
:param annotated: knit annotations are wanted.
646
:param mapper: The mapper from keys to paths.
648
def factory(transport):
649
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
650
access = _KnitKeyAccess(transport, mapper)
651
return KnitVersionedFiles(index, access, annotated=annotated)
655
def make_pack_factory(graph, delta, keylength):
656
"""Create a factory for creating a pack based VersionedFiles.
658
This is only functional enough to run interface tests, it doesn't try to
659
provide a full pack environment.
661
:param graph: Store a graph.
662
:param delta: Delta compress contents.
663
:param keylength: How long should keys be.
665
def factory(transport):
666
parents = graph or delta
672
max_delta_chain = 200
675
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
676
key_elements=keylength)
677
stream = transport.open_write_stream('newpack')
678
writer = pack.ContainerWriter(stream.write)
680
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
681
deltas=delta, add_callback=graph_index.add_nodes)
682
access = _DirectPackAccess({})
683
access.set_writer(writer, graph_index, (transport, 'newpack'))
684
result = KnitVersionedFiles(index, access,
685
max_delta_chain=max_delta_chain)
686
result.stream = stream
687
result.writer = writer
692
def cleanup_pack_knit(versioned_files):
693
versioned_files.stream.close()
694
versioned_files.writer.end()
697
class KnitVersionedFiles(VersionedFiles):
698
"""Storage for many versioned files using knit compression.
700
Backend storage is managed by indices and data objects.
702
:ivar _index: A _KnitGraphIndex or similar that can describe the
703
parents, graph, compression and data location of entries in this
704
KnitVersionedFiles. Note that this is only the index for
705
*this* vfs; if there are fallbacks they must be queried separately.
708
def __init__(self, index, data_access, max_delta_chain=200,
710
"""Create a KnitVersionedFiles with index and data_access.
712
:param index: The index for the knit data.
713
:param data_access: The access object to store and retrieve knit
715
:param max_delta_chain: The maximum number of deltas to permit during
716
insertion. Set to 0 to prohibit the use of deltas.
717
:param annotated: Set to True to cause annotations to be calculated and
718
stored during insertion.
488
return annotator.annotate(version_id)
491
def make_empty_knit(transport, relpath):
492
"""Construct a empty knit at the specified location."""
493
k = make_file_knit(transport, relpath, 'w', KnitPlainFactory)
496
def make_file_knit(name, transport, file_mode=None, access_mode='w',
497
factory=None, delta=True, create=False, create_parent_dir=False,
498
delay_create=False, dir_mode=None, get_scope=None):
499
"""Factory to create a KnitVersionedFile for a .knit/.kndx file pair."""
501
factory = KnitAnnotateFactory()
503
factory = KnitPlainFactory()
504
if get_scope is None:
505
get_scope = lambda:None
506
index = _KnitIndex(transport, name + INDEX_SUFFIX,
507
access_mode, create=create, file_mode=file_mode,
508
create_parent_dir=create_parent_dir, delay_create=delay_create,
509
dir_mode=dir_mode, get_scope=get_scope)
510
access = _KnitAccess(transport, name + DATA_SUFFIX, file_mode,
511
dir_mode, ((create and not len(index)) and delay_create),
513
return KnitVersionedFile(name, transport, factory=factory,
514
create=create, delay_create=delay_create, index=index,
515
access_method=access)
519
"""Return the suffixes used by file based knits."""
520
return [DATA_SUFFIX, INDEX_SUFFIX]
521
make_file_knit.get_suffixes = get_suffixes
524
class KnitVersionedFile(VersionedFile):
525
"""Weave-like structure with faster random access.
527
A knit stores a number of texts and a summary of the relationships
528
between them. Texts are identified by a string version-id. Texts
529
are normally stored and retrieved as a series of lines, but can
530
also be passed as single strings.
532
Lines are stored with the trailing newline (if any) included, to
533
avoid special cases for files with no final newline. Lines are
534
composed of 8-bit characters, not unicode. The combination of
535
these approaches should mean any 'binary' file can be safely
536
stored and retrieved.
539
def __init__(self, relpath, transport, file_mode=None,
540
factory=None, delta=True, create=False, create_parent_dir=False,
541
delay_create=False, dir_mode=None, index=None, access_method=None):
542
"""Construct a knit at location specified by relpath.
544
:param create: If not True, only open an existing knit.
545
:param create_parent_dir: If True, create the parent directory if
546
creating the file fails. (This is used for stores with
547
hash-prefixes that may not exist yet)
548
:param delay_create: The calling code is aware that the knit won't
549
actually be created until the first data is stored.
550
:param index: An index to use for the knit.
552
super(KnitVersionedFile, self).__init__()
553
self.transport = transport
554
self.filename = relpath
555
self.factory = factory or KnitAnnotateFactory()
558
self._max_delta_chain = 200
560
if None in (access_method, index):
561
raise ValueError("No default access_method or index any more")
720
562
self._index = index
721
self._access = data_access
722
self._max_delta_chain = max_delta_chain
563
_access = access_method
564
if create and not len(self) and not delay_create:
566
self._data = _KnitData(_access)
569
return '%s(%s)' % (self.__class__.__name__,
570
self.transport.abspath(self.filename))
572
def _check_should_delta(self, first_parents):
573
"""Iterate back through the parent listing, looking for a fulltext.
575
This is used when we want to decide whether to add a delta or a new
576
fulltext. It searches for _max_delta_chain parents. When it finds a
577
fulltext parent, it sees if the total size of the deltas leading up to
578
it is large enough to indicate that we want a new full text anyway.
580
Return True if we should create a new delta, False if we should use a
585
delta_parents = first_parents
586
for count in xrange(self._max_delta_chain):
587
parent = delta_parents[0]
588
method = self._index.get_method(parent)
589
index, pos, size = self._index.get_position(parent)
590
if method == 'fulltext':
594
delta_parents = self._index.get_parent_map([parent])[parent]
596
# We couldn't find a fulltext, so we must create a new one
599
return fulltext_size > delta_size
601
def _check_write_ok(self):
602
return self._index._check_write_ok()
604
def _add_raw_records(self, records, data):
605
"""Add all the records 'records' with data pre-joined in 'data'.
607
:param records: A list of tuples(version_id, options, parents, size).
608
:param data: The data for the records. When it is written, the records
609
are adjusted to have pos pointing into data by the sum of
610
the preceding records sizes.
613
raw_record_sizes = [record[3] for record in records]
614
positions = self._data.add_raw_records(raw_record_sizes, data)
617
for (version_id, options, parents, size), access_memo in zip(
619
index_entries.append((version_id, options, access_memo, parents))
621
self._index.add_versions(index_entries)
623
def copy_to(self, name, transport):
624
"""See VersionedFile.copy_to()."""
625
# copy the current index to a temp index to avoid racing with local
627
transport.put_file_non_atomic(name + INDEX_SUFFIX + '.tmp',
628
self.transport.get(self._index._filename))
630
f = self._data._open_file()
632
transport.put_file(name + DATA_SUFFIX, f)
635
# move the copied index into place
636
transport.move(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
638
def get_data_stream(self, required_versions):
639
"""Get a data stream for the specified versions.
641
Versions may be returned in any order, not necessarily the order
642
specified. They are returned in a partial order by compression
643
parent, so that the deltas can be applied as the data stream is
644
inserted; however note that compression parents will not be sent
645
unless they were specifically requested, as the client may already
648
:param required_versions: The exact set of versions to be extracted.
649
Unlike some other knit methods, this is not used to generate a
650
transitive closure, rather it is used precisely as given.
652
:returns: format_signature, list of (version, options, length, parents),
655
required_version_set = frozenset(required_versions)
657
# list of revisions that can just be sent without waiting for their
660
# map from revision to the children based on it
662
# first, read all relevant index data, enough to sort into the right
664
for version_id in required_versions:
665
options = self._index.get_options(version_id)
666
parents = self._index.get_parents_with_ghosts(version_id)
667
index_memo = self._index.get_position(version_id)
668
version_index[version_id] = (index_memo, options, parents)
669
if ('line-delta' in options
670
and parents[0] in required_version_set):
671
# must wait until the parent has been sent
672
deferred.setdefault(parents[0], []). \
675
# either a fulltext, or a delta whose parent the client did
676
# not ask for and presumably already has
677
ready_to_send.append(version_id)
678
# build a list of results to return, plus instructions for data to
680
copy_queue_records = []
681
temp_version_list = []
683
# XXX: pushing and popping lists may be a bit inefficient
684
version_id = ready_to_send.pop(0)
685
(index_memo, options, parents) = version_index[version_id]
686
copy_queue_records.append((version_id, index_memo))
687
none, data_pos, data_size = index_memo
688
temp_version_list.append((version_id, options, data_size,
690
if version_id in deferred:
691
# now we can send all the children of this revision - we could
692
# put them in anywhere, but we hope that sending them soon
693
# after the fulltext will give good locality in the receiver
694
ready_to_send[:0] = deferred.pop(version_id)
695
if not (len(deferred) == 0):
696
raise AssertionError("Still have compressed child versions waiting to be sent")
697
# XXX: The stream format is such that we cannot stream it - we have to
698
# know the length of all the data a-priori.
700
result_version_list = []
701
for (version_id, raw_data), \
702
(version_id2, options, _, parents) in \
703
izip(self._data.read_records_iter_raw(copy_queue_records),
705
if not (version_id == version_id2):
706
raise AssertionError('logic error, inconsistent results')
707
raw_datum.append(raw_data)
708
result_version_list.append(
709
(version_id, options, len(raw_data), parents))
710
# provide a callback to get data incrementally.
711
pseudo_file = StringIO(''.join(raw_datum))
714
return pseudo_file.read()
716
return pseudo_file.read(length)
717
return (self.get_format_signature(), result_version_list, read)
719
def _extract_blocks(self, version_id, source, target):
720
if self._index.get_method(version_id) != 'line-delta':
722
parent, sha1, noeol, delta = self.get_delta(version_id)
723
return KnitContent.get_line_delta_blocks(delta, source, target)
725
def get_delta(self, version_id):
726
"""Get a delta for constructing version from some other version."""
727
self.check_not_reserved_id(version_id)
728
parents = self.get_parent_map([version_id])[version_id]
733
index_memo = self._index.get_position(version_id)
734
data, sha1 = self._data.read_records(((version_id, index_memo),))[version_id]
735
noeol = 'no-eol' in self._index.get_options(version_id)
736
if 'fulltext' == self._index.get_method(version_id):
737
new_content = self.factory.parse_fulltext(data, version_id)
738
if parent is not None:
739
reference_content = self._get_content(parent)
740
old_texts = reference_content.text()
743
new_texts = new_content.text()
744
delta_seq = patiencediff.PatienceSequenceMatcher(None, old_texts,
746
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
748
delta = self.factory.parse_line_delta(data, version_id)
749
return parent, sha1, noeol, delta
751
def get_format_signature(self):
752
"""See VersionedFile.get_format_signature()."""
753
if self.factory.annotated:
754
annotated_part = "annotated"
756
annotated_part = "plain"
757
return "knit-%s" % (annotated_part,)
759
@deprecated_method(one_four)
760
def get_graph_with_ghosts(self):
761
"""See VersionedFile.get_graph_with_ghosts()."""
762
return self.get_parent_map(self.versions())
764
def get_sha1s(self, version_ids):
765
"""See VersionedFile.get_sha1s()."""
766
record_map = self._get_record_map(version_ids)
767
# record entry 2 is the 'digest'.
768
return [record_map[v][2] for v in version_ids]
770
@deprecated_method(one_four)
771
def has_ghost(self, version_id):
772
"""True if there is a ghost reference in the file to version_id."""
774
if self.has_version(version_id):
776
# optimisable if needed by memoising the _ghosts set.
777
items = self.get_parent_map(self.versions())
778
for parents in items.itervalues():
779
for parent in parents:
780
if parent == version_id and parent not in items:
784
def insert_data_stream(self, (format, data_list, reader_callable)):
785
"""Insert knit records from a data stream into this knit.
787
If a version in the stream is already present in this knit, it will not
788
be inserted a second time. It will be checked for consistency with the
789
stored version however, and may cause a KnitCorrupt error to be raised
790
if the data in the stream disagrees with the already stored data.
792
:seealso: get_data_stream
794
if format != self.get_format_signature():
795
if 'knit' in debug.debug_flags:
797
'incompatible format signature inserting to %r', self)
798
source = self._knit_from_datastream(
799
(format, data_list, reader_callable))
803
for version_id, options, length, parents in data_list:
804
if self.has_version(version_id):
805
# First check: the list of parents.
806
my_parents = self.get_parents_with_ghosts(version_id)
807
if tuple(my_parents) != tuple(parents):
808
# XXX: KnitCorrupt is not quite the right exception here.
811
'parents list %r from data stream does not match '
812
'already recorded parents %r for %s'
813
% (parents, my_parents, version_id))
815
# Also check the SHA-1 of the fulltext this content will
817
raw_data = reader_callable(length)
818
my_fulltext_sha1 = self.get_sha1s([version_id])[0]
819
df, rec = self._data._parse_record_header(version_id, raw_data)
820
stream_fulltext_sha1 = rec[3]
821
if my_fulltext_sha1 != stream_fulltext_sha1:
822
# Actually, we don't know if it's this knit that's corrupt,
823
# or the data stream we're trying to insert.
825
self.filename, 'sha-1 does not match %s' % version_id)
827
if 'line-delta' in options:
828
# Make sure that this knit record is actually useful: a
829
# line-delta is no use unless we have its parent.
830
# Fetching from a broken repository with this problem
831
# shouldn't break the target repository.
833
# See https://bugs.launchpad.net/bzr/+bug/164443
834
if not self._index.has_version(parents[0]):
837
'line-delta from stream '
840
'missing parent %s\n'
841
'Try running "bzr check" '
842
'on the source repository, and "bzr reconcile" '
844
(version_id, parents[0]))
845
self._add_raw_records(
846
[(version_id, options, parents, length)],
847
reader_callable(length))
849
def _knit_from_datastream(self, (format, data_list, reader_callable)):
850
"""Create a knit object from a data stream.
852
This method exists to allow conversion of data streams that do not
853
match the signature of this knit. Generally it will be slower and use
854
more memory to use this method to insert data, but it will work.
856
:seealso: get_data_stream for details on datastreams.
857
:return: A knit versioned file which can be used to join the datastream
860
if format == "knit-plain":
861
factory = KnitPlainFactory()
862
elif format == "knit-annotated":
863
factory = KnitAnnotateFactory()
865
raise errors.KnitDataStreamUnknown(format)
866
index = _StreamIndex(data_list, self._index)
867
access = _StreamAccess(reader_callable, index, self, factory)
868
return KnitVersionedFile(self.filename, self.transport,
869
factory=factory, index=index, access_method=access)
872
"""See VersionedFile.versions."""
873
if 'evil' in debug.debug_flags:
874
trace.mutter_callsite(2, "versions scales with size of history")
875
return self._index.get_versions()
877
def has_version(self, version_id):
878
"""See VersionedFile.has_version."""
879
if 'evil' in debug.debug_flags:
880
trace.mutter_callsite(2, "has_version is a LBYL scenario")
881
return self._index.has_version(version_id)
883
__contains__ = has_version
885
def _merge_annotations(self, content, parents, parent_texts={},
886
delta=None, annotated=None,
887
left_matching_blocks=None):
888
"""Merge annotations for content. This is done by comparing
889
the annotations based on changed to the text.
891
if left_matching_blocks is not None:
892
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
724
self._factory = KnitAnnotateFactory()
726
self._factory = KnitPlainFactory()
727
self._fallback_vfs = []
729
def add_fallback_versioned_files(self, a_versioned_files):
730
"""Add a source of texts for texts not present in this knit.
732
:param a_versioned_files: A VersionedFiles object.
896
for parent_id in parents:
897
merge_content = self._get_content(parent_id, parent_texts)
898
if (parent_id == parents[0] and delta_seq is not None):
901
seq = patiencediff.PatienceSequenceMatcher(
902
None, merge_content.text(), content.text())
903
for i, j, n in seq.get_matching_blocks():
906
# this appears to copy (origin, text) pairs across to the
907
# new content for any line that matches the last-checked
909
content._lines[j:j+n] = merge_content._lines[i:i+n]
911
if delta_seq is None:
912
reference_content = self._get_content(parents[0], parent_texts)
913
new_texts = content.text()
914
old_texts = reference_content.text()
915
delta_seq = patiencediff.PatienceSequenceMatcher(
916
None, old_texts, new_texts)
917
return self._make_line_delta(delta_seq, content)
919
def _make_line_delta(self, delta_seq, new_content):
920
"""Generate a line delta from delta_seq and new_content."""
922
for op in delta_seq.get_opcodes():
925
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
928
def _get_components_positions(self, version_ids):
929
"""Produce a map of position data for the components of versions.
931
This data is intended to be used for retrieving the knit records.
933
A dict of version_id to (record_details, index_memo, next, parents) is
935
method is the way referenced data should be applied.
936
index_memo is the handle to pass to the data access to actually get the
938
next is the build-parent of the version, or None for fulltexts.
939
parents is the version_ids of the parents of this version
734
self._fallback_vfs.append(a_versioned_files)
736
def add_lines(self, key, parents, lines, parent_texts=None,
737
left_matching_blocks=None, nostore_sha=None, random_id=False,
739
"""See VersionedFiles.add_lines()."""
740
self._index._check_write_ok()
741
self._check_add(key, lines, random_id, check_content)
743
# The caller might pass None if there is no graph data, but kndx
744
# indexes can't directly store that, so we give them
745
# an empty tuple instead.
747
return self._add(key, lines, parents,
748
parent_texts, left_matching_blocks, nostore_sha, random_id)
750
def _add(self, key, lines, parents, parent_texts,
942
pending_components = version_ids
943
while pending_components:
944
build_details = self._index.get_build_details(pending_components)
945
current_components = set(pending_components)
946
pending_components = set()
947
for version_id, details in build_details.iteritems():
948
(index_memo, compression_parent, parents,
949
record_details) = details
950
method = record_details[0]
951
if compression_parent is not None:
952
pending_components.add(compression_parent)
953
component_data[version_id] = (record_details, index_memo,
955
missing = current_components.difference(build_details)
957
raise errors.RevisionNotPresent(missing.pop(), self.filename)
958
return component_data
960
def _get_content(self, version_id, parent_texts={}):
961
"""Returns a content object that makes up the specified
963
cached_version = parent_texts.get(version_id, None)
964
if cached_version is not None:
965
if not self.has_version(version_id):
966
raise RevisionNotPresent(version_id, self.filename)
967
return cached_version
969
text_map, contents_map = self._get_content_maps([version_id])
970
return contents_map[version_id]
972
def _check_versions_present(self, version_ids):
973
"""Check that all specified versions are present."""
974
self._index.check_versions_present(version_ids)
976
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts,
977
nostore_sha, random_id, check_content, left_matching_blocks):
978
"""See VersionedFile.add_lines_with_ghosts()."""
979
self._check_add(version_id, lines, random_id, check_content)
980
return self._add(version_id, lines, parents, self.delta,
981
parent_texts, left_matching_blocks, nostore_sha, random_id)
983
def _add_lines(self, version_id, parents, lines, parent_texts,
984
left_matching_blocks, nostore_sha, random_id, check_content):
985
"""See VersionedFile.add_lines."""
986
self._check_add(version_id, lines, random_id, check_content)
987
self._check_versions_present(parents)
988
return self._add(version_id, lines[:], parents, self.delta,
989
parent_texts, left_matching_blocks, nostore_sha, random_id)
991
def _check_add(self, version_id, lines, random_id, check_content):
992
"""check that version_id and lines are safe to add."""
993
if contains_whitespace(version_id):
994
raise InvalidRevisionId(version_id, self.filename)
995
self.check_not_reserved_id(version_id)
996
# Technically this could be avoided if we are happy to allow duplicate
997
# id insertion when other things than bzr core insert texts, but it
998
# seems useful for folk using the knit api directly to have some safety
999
# blanket that we can disable.
1000
if not random_id and self.has_version(version_id):
1001
raise RevisionAlreadyPresent(version_id, self.filename)
1003
self._check_lines_not_unicode(lines)
1004
self._check_lines_are_lines(lines)
1006
def _add(self, version_id, lines, parents, delta, parent_texts,
751
1007
left_matching_blocks, nostore_sha, random_id):
752
1008
"""Add a set of lines on top of version specified by parents.
1010
If delta is true, compress the text as a line-delta against
754
1013
Any versions not present will be converted into ghosts.
756
1015
# first thing, if the content is something we don't need to store, find
789
1042
lines[-1] = lines[-1] + '\n'
790
1043
line_bytes += '\n'
793
if type(element) != str:
794
raise TypeError("key contains non-strings: %r" % (key,))
795
# Knit hunks are still last-element only
797
content = self._factory.make(lines, version_id)
798
if 'no-eol' in options:
799
# Hint to the content object that its text() call should strip the
801
content._should_strip_eol = True
802
if delta or (self._factory.annotated and len(present_parents) > 0):
1046
# To speed the extract of texts the delta chain is limited
1047
# to a fixed number of deltas. This should minimize both
1048
# I/O and the time spend applying deltas.
1049
delta = self._check_should_delta(present_parents)
1051
content = self.factory.make(lines, version_id)
1052
if delta or (self.factory.annotated and len(present_parents) > 0):
803
1053
# Merge annotations from parent texts if needed.
804
1054
delta_hunks = self._merge_annotations(content, present_parents,
805
parent_texts, delta, self._factory.annotated,
1055
parent_texts, delta, self.factory.annotated,
806
1056
left_matching_blocks)
809
1059
options.append('line-delta')
810
store_lines = self._factory.lower_line_delta(delta_hunks)
811
size, bytes = self._record_to_data(key, digest,
1060
store_lines = self.factory.lower_line_delta(delta_hunks)
1061
size, bytes = self._data._record_to_data(version_id, digest,
814
1064
options.append('fulltext')
815
1065
# isinstance is slower and we have no hierarchy.
816
if self._factory.__class__ == KnitPlainFactory:
1066
if self.factory.__class__ == KnitPlainFactory:
817
1067
# Use the already joined bytes saving iteration time in
818
1068
# _record_to_data.
819
size, bytes = self._record_to_data(key, digest,
1069
size, bytes = self._data._record_to_data(version_id, digest,
820
1070
lines, [line_bytes])
822
1072
# get mixed annotation + content and feed it into the
824
store_lines = self._factory.lower_fulltext(content)
825
size, bytes = self._record_to_data(key, digest,
1074
store_lines = self.factory.lower_fulltext(content)
1075
size, bytes = self._data._record_to_data(version_id, digest,
828
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
829
self._index.add_records(
830
((key, options, access_memo, parents),),
1078
access_memo = self._data.add_raw_records([size], bytes)[0]
1079
self._index.add_versions(
1080
((version_id, options, access_memo, parents),),
831
1081
random_id=random_id)
832
1082
return digest, text_length, content
834
def annotate(self, key):
835
"""See VersionedFiles.annotate."""
836
return self._factory.annotate(self, key)
838
1084
def check(self, progress_bar=None):
839
"""See VersionedFiles.check()."""
840
# This doesn't actually test extraction of everything, but that will
841
# impact 'bzr check' substantially, and needs to be integrated with
842
# care. However, it does check for the obvious problem of a delta with
844
keys = self._index.keys()
845
parent_map = self.get_parent_map(keys)
847
if self._index.get_method(key) != 'fulltext':
848
compression_parent = parent_map[key][0]
849
if compression_parent not in parent_map:
850
raise errors.KnitCorrupt(self,
851
"Missing basis parent %s for %s" % (
852
compression_parent, key))
853
for fallback_vfs in self._fallback_vfs:
856
def _check_add(self, key, lines, random_id, check_content):
857
"""check that version_id and lines are safe to add."""
859
if contains_whitespace(version_id):
860
raise InvalidRevisionId(version_id, self)
861
self.check_not_reserved_id(version_id)
862
# TODO: If random_id==False and the key is already present, we should
863
# probably check that the existing content is identical to what is
864
# being inserted, and otherwise raise an exception. This would make
865
# the bundle code simpler.
867
self._check_lines_not_unicode(lines)
868
self._check_lines_are_lines(lines)
870
def _check_header(self, key, line):
871
rec = self._split_header(line)
872
self._check_header_version(rec, key[-1])
875
def _check_header_version(self, rec, version_id):
876
"""Checks the header version on original format knit records.
1085
"""See VersionedFile.check()."""
1087
def get_lines(self, version_id):
1088
"""See VersionedFile.get_lines()."""
1089
return self.get_line_list([version_id])[0]
1091
def _get_record_map(self, version_ids):
1092
"""Produce a dictionary of knit records.
878
These have the last component of the key embedded in the record.
880
if rec[1] != version_id:
881
raise KnitCorrupt(self,
882
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
884
def _check_should_delta(self, parent):
885
"""Iterate back through the parent listing, looking for a fulltext.
887
This is used when we want to decide whether to add a delta or a new
888
fulltext. It searches for _max_delta_chain parents. When it finds a
889
fulltext parent, it sees if the total size of the deltas leading up to
890
it is large enough to indicate that we want a new full text anyway.
892
Return True if we should create a new delta, False if we should use a
897
for count in xrange(self._max_delta_chain):
898
# XXX: Collapse these two queries:
900
# Note that this only looks in the index of this particular
901
# KnitVersionedFiles, not in the fallbacks. This ensures that
902
# we won't store a delta spanning physical repository
904
method = self._index.get_method(parent)
905
except RevisionNotPresent:
906
# Some basis is not locally present: always delta
908
index, pos, size = self._index.get_position(parent)
909
if method == 'fulltext':
913
# We don't explicitly check for presence because this is in an
914
# inner loop, and if it's missing it'll fail anyhow.
915
# TODO: This should be asking for compression parent, not graph
917
parent = self._index.get_parent_map([parent])[parent][0]
919
# We couldn't find a fulltext, so we must create a new one
921
# Simple heuristic - if the total I/O wold be greater as a delta than
922
# the originally installed fulltext, we create a new fulltext.
923
return fulltext_size > delta_size
925
def _build_details_to_components(self, build_details):
926
"""Convert a build_details tuple to a position tuple."""
927
# record_details, access_memo, compression_parent
928
return build_details[3], build_details[0], build_details[1]
930
def _get_components_positions(self, keys, allow_missing=False):
931
"""Produce a map of position data for the components of keys.
933
This data is intended to be used for retrieving the knit records.
935
A dict of key to (record_details, index_memo, next, parents) is
937
method is the way referenced data should be applied.
938
index_memo is the handle to pass to the data access to actually get the
940
next is the build-parent of the version, or None for fulltexts.
941
parents is the version_ids of the parents of this version
943
:param allow_missing: If True do not raise an error on a missing component,
947
pending_components = keys
948
while pending_components:
949
build_details = self._index.get_build_details(pending_components)
950
current_components = set(pending_components)
951
pending_components = set()
952
for key, details in build_details.iteritems():
953
(index_memo, compression_parent, parents,
954
record_details) = details
955
method = record_details[0]
956
if compression_parent is not None:
957
pending_components.add(compression_parent)
958
component_data[key] = self._build_details_to_components(details)
959
missing = current_components.difference(build_details)
960
if missing and not allow_missing:
961
raise errors.RevisionNotPresent(missing.pop(), self)
962
return component_data
964
def _get_content(self, key, parent_texts={}):
965
"""Returns a content object that makes up the specified
967
cached_version = parent_texts.get(key, None)
968
if cached_version is not None:
969
# Ensure the cache dict is valid.
970
if not self.get_parent_map([key]):
971
raise RevisionNotPresent(key, self)
972
return cached_version
973
text_map, contents_map = self._get_content_maps([key])
974
return contents_map[key]
976
def _get_content_maps(self, keys, nonlocal_keys=None):
1094
:return: {version_id:(record, record_details, digest, next)}
1096
data returned from read_records
1098
opaque information to pass to parse_record
1100
SHA1 digest of the full text after all steps are done
1102
build-parent of the version, i.e. the leftmost ancestor.
1103
Will be None if the record is not a delta.
1105
position_map = self._get_components_positions(version_ids)
1106
# c = component_id, r = record_details, i_m = index_memo, n = next
1107
records = [(c, i_m) for c, (r, i_m, n)
1108
in position_map.iteritems()]
1110
for component_id, record, digest in \
1111
self._data.read_records_iter(records):
1112
(record_details, index_memo, next) = position_map[component_id]
1113
record_map[component_id] = record, record_details, digest, next
1117
def get_text(self, version_id):
1118
"""See VersionedFile.get_text"""
1119
return self.get_texts([version_id])[0]
1121
def get_texts(self, version_ids):
1122
return [''.join(l) for l in self.get_line_list(version_ids)]
1124
def get_line_list(self, version_ids):
1125
"""Return the texts of listed versions as a list of strings."""
1126
for version_id in version_ids:
1127
self.check_not_reserved_id(version_id)
1128
text_map, content_map = self._get_content_maps(version_ids)
1129
return [text_map[v] for v in version_ids]
1131
_get_lf_split_line_list = get_line_list
1133
def _get_content_maps(self, version_ids):
977
1134
"""Produce maps of text and KnitContents
979
:param keys: The keys to produce content maps for.
980
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
981
which are known to not be in this knit, but rather in one of the
983
1136
:return: (text_map, content_map) where text_map contains the texts for
984
the requested versions and content_map contains the KnitContents.
1137
the requested versions and content_map contains the KnitContents.
1138
Both dicts take version_ids as their keys.
986
1140
# FUTURE: This function could be improved for the 'extract many' case
987
1141
# by tracking each component and only doing the copy when the number of
988
1142
# children than need to apply delta's to it is > 1 or it is part of the
991
multiple_versions = len(keys) != 1
992
record_map = self._get_record_map(keys, allow_missing=True)
1144
version_ids = list(version_ids)
1145
multiple_versions = len(version_ids) != 1
1146
record_map = self._get_record_map(version_ids)
995
1149
content_map = {}
996
1150
final_content = {}
997
if nonlocal_keys is None:
998
nonlocal_keys = set()
1000
nonlocal_keys = frozenset(nonlocal_keys)
1001
missing_keys = set(nonlocal_keys)
1002
for source in self._fallback_vfs:
1003
if not missing_keys:
1005
for record in source.get_record_stream(missing_keys,
1007
if record.storage_kind == 'absent':
1009
missing_keys.remove(record.key)
1010
lines = split_lines(record.get_bytes_as('fulltext'))
1011
text_map[record.key] = lines
1012
content_map[record.key] = PlainKnitContent(lines, record.key)
1013
if record.key in keys:
1014
final_content[record.key] = content_map[record.key]
1016
if key in nonlocal_keys:
1151
for version_id in version_ids:
1019
1152
components = []
1021
1154
while cursor is not None:
1023
record, record_details, digest, next = record_map[cursor]
1025
raise RevisionNotPresent(cursor, self)
1155
record, record_details, digest, next = record_map[cursor]
1026
1156
components.append((cursor, record, record_details, digest))
1028
1157
if cursor in content_map:
1029
# no need to plan further back
1030
components.append((cursor, None, None, None))
1034
1162
for (component_id, record, record_details,
1036
1164
if component_id in content_map:
1037
1165
content = content_map[component_id]
1039
content, delta = self._factory.parse_record(key[-1],
1167
content, delta = self.factory.parse_record(version_id,
1040
1168
record, record_details, content,
1041
1169
copy_base_content=multiple_versions)
1042
1170
if multiple_versions:
1043
1171
content_map[component_id] = content
1045
final_content[key] = content
1173
content.cleanup_eol(copy_on_mutate=multiple_versions)
1174
final_content[version_id] = content
1047
1176
# digest here is the digest from the last applied component.
1048
1177
text = content.text()
1049
1178
actual_sha = sha_strings(text)
1050
1179
if actual_sha != digest:
1051
raise KnitCorrupt(self,
1180
raise KnitCorrupt(self.filename,
1053
1182
'\n of reconstructed text does not match'
1054
1183
'\n expected %s'
1055
1184
'\n for version %s' %
1056
(actual_sha, digest, key))
1057
text_map[key] = text
1185
(actual_sha, digest, version_id))
1186
text_map[version_id] = text
1058
1187
return text_map, final_content
1060
def get_parent_map(self, keys):
1061
"""Get a map of the graph parents of keys.
1063
:param keys: The keys to look up parents for.
1064
:return: A mapping from keys to parents. Absent keys are absent from
1067
return self._get_parent_map_with_sources(keys)[0]
1069
def _get_parent_map_with_sources(self, keys):
1070
"""Get a map of the parents of keys.
1072
:param keys: The keys to look up parents for.
1073
:return: A tuple. The first element is a mapping from keys to parents.
1074
Absent keys are absent from the mapping. The second element is a
1075
list with the locations each key was found in. The first element
1076
is the in-this-knit parents, the second the first fallback source,
1080
sources = [self._index] + self._fallback_vfs
1083
for source in sources:
1086
new_result = source.get_parent_map(missing)
1087
source_results.append(new_result)
1088
result.update(new_result)
1089
missing.difference_update(set(new_result))
1090
return result, source_results
1092
def _get_record_map(self, keys, allow_missing=False):
1093
"""Produce a dictionary of knit records.
1095
:return: {key:(record, record_details, digest, next)}
1097
data returned from read_records
1099
opaque information to pass to parse_record
1101
SHA1 digest of the full text after all steps are done
1103
build-parent of the version, i.e. the leftmost ancestor.
1104
Will be None if the record is not a delta.
1105
:param keys: The keys to build a map for
1106
:param allow_missing: If some records are missing, rather than
1107
error, just return the data that could be generated.
1109
position_map = self._get_components_positions(keys,
1110
allow_missing=allow_missing)
1111
# key = component_id, r = record_details, i_m = index_memo, n = next
1112
records = [(key, i_m) for key, (r, i_m, n)
1113
in position_map.iteritems()]
1115
for key, record, digest in \
1116
self._read_records_iter(records):
1117
(record_details, index_memo, next) = position_map[key]
1118
record_map[key] = record, record_details, digest, next
1121
def get_record_stream(self, keys, ordering, include_delta_closure):
1122
"""Get a stream of records for keys.
1124
:param keys: The keys to include.
1125
:param ordering: Either 'unordered' or 'topological'. A topologically
1126
sorted stream has compression parents strictly before their
1128
:param include_delta_closure: If True then the closure across any
1129
compression parents will be included (in the opaque data).
1130
:return: An iterator of ContentFactory objects, each of which is only
1131
valid until the iterator is advanced.
1133
# keys might be a generator
1137
if not self._index.has_graph:
1138
# Cannot topological order when no graph has been stored.
1139
ordering = 'unordered'
1140
if include_delta_closure:
1141
positions = self._get_components_positions(keys, allow_missing=True)
1143
build_details = self._index.get_build_details(keys)
1145
# (record_details, access_memo, compression_parent_key)
1146
positions = dict((key, self._build_details_to_components(details))
1147
for key, details in build_details.iteritems())
1148
absent_keys = keys.difference(set(positions))
1149
# There may be more absent keys : if we're missing the basis component
1150
# and are trying to include the delta closure.
1151
if include_delta_closure:
1152
needed_from_fallback = set()
1153
# Build up reconstructable_keys dict. key:True in this dict means
1154
# the key can be reconstructed.
1155
reconstructable_keys = {}
1159
chain = [key, positions[key][2]]
1161
needed_from_fallback.add(key)
1164
while chain[-1] is not None:
1165
if chain[-1] in reconstructable_keys:
1166
result = reconstructable_keys[chain[-1]]
1170
chain.append(positions[chain[-1]][2])
1172
# missing basis component
1173
needed_from_fallback.add(chain[-1])
1176
for chain_key in chain[:-1]:
1177
reconstructable_keys[chain_key] = result
1179
needed_from_fallback.add(key)
1180
# Double index lookups here : need a unified api ?
1181
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1182
if ordering == 'topological':
1183
# Global topological sort
1184
present_keys = tsort.topo_sort(global_map)
1185
# Now group by source:
1187
current_source = None
1188
for key in present_keys:
1189
for parent_map in parent_maps:
1190
if key in parent_map:
1191
key_source = parent_map
1193
if current_source is not key_source:
1194
source_keys.append((key_source, []))
1195
current_source = key_source
1196
source_keys[-1][1].append(key)
1198
if ordering != 'unordered':
1199
raise AssertionError('valid values for ordering are:'
1200
' "unordered" or "topological" not: %r'
1202
# Just group by source; remote sources first.
1205
for parent_map in reversed(parent_maps):
1206
source_keys.append((parent_map, []))
1207
for key in parent_map:
1208
present_keys.append(key)
1209
source_keys[-1][1].append(key)
1210
absent_keys = keys - set(global_map)
1211
for key in absent_keys:
1212
yield AbsentContentFactory(key)
1213
# restrict our view to the keys we can answer.
1214
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1215
# XXX: At that point we need to consider the impact of double reads by
1216
# utilising components multiple times.
1217
if include_delta_closure:
1218
# XXX: get_content_maps performs its own index queries; allow state
1220
text_map, _ = self._get_content_maps(present_keys,
1221
needed_from_fallback - absent_keys)
1222
for key in present_keys:
1223
yield FulltextContentFactory(key, global_map[key], None,
1224
''.join(text_map[key]))
1226
for source, keys in source_keys:
1227
if source is parent_maps[0]:
1228
# this KnitVersionedFiles
1229
records = [(key, positions[key][1]) for key in keys]
1230
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1231
(record_details, index_memo, _) = positions[key]
1232
yield KnitContentFactory(key, global_map[key],
1233
record_details, sha1, raw_data, self._factory.annotated, None)
1235
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1236
for record in vf.get_record_stream(keys, ordering,
1237
include_delta_closure):
1240
def get_sha1s(self, keys):
1241
"""See VersionedFiles.get_sha1s()."""
1243
record_map = self._get_record_map(missing, allow_missing=True)
1245
for key, details in record_map.iteritems():
1246
if key not in missing:
1248
# record entry 2 is the 'digest'.
1249
result[key] = details[2]
1250
missing.difference_update(set(result))
1251
for source in self._fallback_vfs:
1254
new_result = source.get_sha1s(missing)
1255
result.update(new_result)
1256
missing.difference_update(set(new_result))
1259
def insert_record_stream(self, stream):
1260
"""Insert a record stream into this container.
1262
:param stream: A stream of records to insert.
1264
:seealso VersionedFiles.get_record_stream:
1266
def get_adapter(adapter_key):
1268
return adapters[adapter_key]
1270
adapter_factory = adapter_registry.get(adapter_key)
1271
adapter = adapter_factory(self)
1272
adapters[adapter_key] = adapter
1274
if self._factory.annotated:
1275
# self is annotated, we need annotated knits to use directly.
1276
annotated = "annotated-"
1279
# self is not annotated, but we can strip annotations cheaply.
1281
convertibles = set(["knit-annotated-ft-gz"])
1282
if self._max_delta_chain:
1283
convertibles.add("knit-annotated-delta-gz")
1284
# The set of types we can cheaply adapt without needing basis texts.
1285
native_types = set()
1286
if self._max_delta_chain:
1287
native_types.add("knit-%sdelta-gz" % annotated)
1288
native_types.add("knit-%sft-gz" % annotated)
1289
knit_types = native_types.union(convertibles)
1291
# Buffer all index entries that we can't add immediately because their
1292
# basis parent is missing. We don't buffer all because generating
1293
# annotations may require access to some of the new records. However we
1294
# can't generate annotations from new deltas until their basis parent
1295
# is present anyway, so we get away with not needing an index that
1296
# includes the new keys.
1297
# key = basis_parent, value = index entry to add
1298
buffered_index_entries = {}
1299
for record in stream:
1300
parents = record.parents
1301
# Raise an error when a record is missing.
1302
if record.storage_kind == 'absent':
1303
raise RevisionNotPresent([record.key], self)
1304
if record.storage_kind in knit_types:
1305
if record.storage_kind not in native_types:
1307
adapter_key = (record.storage_kind, "knit-delta-gz")
1308
adapter = get_adapter(adapter_key)
1310
adapter_key = (record.storage_kind, "knit-ft-gz")
1311
adapter = get_adapter(adapter_key)
1312
bytes = adapter.get_bytes(
1313
record, record.get_bytes_as(record.storage_kind))
1315
bytes = record.get_bytes_as(record.storage_kind)
1316
options = [record._build_details[0]]
1317
if record._build_details[1]:
1318
options.append('no-eol')
1319
# Just blat it across.
1320
# Note: This does end up adding data on duplicate keys. As
1321
# modern repositories use atomic insertions this should not
1322
# lead to excessive growth in the event of interrupted fetches.
1323
# 'knit' repositories may suffer excessive growth, but as a
1324
# deprecated format this is tolerable. It can be fixed if
1325
# needed by in the kndx index support raising on a duplicate
1326
# add with identical parents and options.
1327
access_memo = self._access.add_raw_records(
1328
[(record.key, len(bytes))], bytes)[0]
1329
index_entry = (record.key, options, access_memo, parents)
1331
if 'fulltext' not in options:
1332
basis_parent = parents[0]
1333
# Note that pack backed knits don't need to buffer here
1334
# because they buffer all writes to the transaction level,
1335
# but we don't expose that difference at the index level. If
1336
# the query here has sufficient cost to show up in
1337
# profiling we should do that.
1338
if basis_parent not in self.get_parent_map([basis_parent]):
1339
pending = buffered_index_entries.setdefault(
1341
pending.append(index_entry)
1344
self._index.add_records([index_entry])
1345
elif record.storage_kind == 'fulltext':
1346
self.add_lines(record.key, parents,
1347
split_lines(record.get_bytes_as('fulltext')))
1349
adapter_key = record.storage_kind, 'fulltext'
1350
adapter = get_adapter(adapter_key)
1351
lines = split_lines(adapter.get_bytes(
1352
record, record.get_bytes_as(record.storage_kind)))
1354
self.add_lines(record.key, parents, lines)
1355
except errors.RevisionAlreadyPresent:
1357
# Add any records whose basis parent is now available.
1358
added_keys = [record.key]
1360
key = added_keys.pop(0)
1361
if key in buffered_index_entries:
1362
index_entries = buffered_index_entries[key]
1363
self._index.add_records(index_entries)
1365
[index_entry[0] for index_entry in index_entries])
1366
del buffered_index_entries[key]
1367
# If there were any deltas which had a missing basis parent, error.
1368
if buffered_index_entries:
1369
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1372
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1373
"""Iterate over the lines in the versioned files from keys.
1375
This may return lines from other keys. Each item the returned
1376
iterator yields is a tuple of a line and a text version that that line
1377
is present in (not introduced in).
1379
Ordering of results is in whatever order is most suitable for the
1380
underlying storage format.
1382
If a progress bar is supplied, it may be used to indicate progress.
1383
The caller is responsible for cleaning up progress bars (because this
1387
* Lines are normalised by the underlying store: they will all have \n
1389
* Lines are returned in arbitrary order.
1391
:return: An iterator over (line, key).
1189
def iter_lines_added_or_present_in_versions(self, version_ids=None,
1191
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
1192
if version_ids is None:
1193
version_ids = self.versions()
1394
1195
pb = progress.DummyProgress()
1397
1196
# we don't care about inclusions, the caller cares.
1398
1197
# but we need to setup a list of records to visit.
1399
# we need key, position, length
1401
build_details = self._index.get_build_details(keys)
1402
for key, details in build_details.iteritems():
1404
key_records.append((key, details[0]))
1406
records_iter = enumerate(self._read_records_iter(key_records))
1407
for (key_idx, (key, data, sha_value)) in records_iter:
1408
pb.update('Walking content.', key_idx, total)
1409
compression_parent = build_details[key][1]
1410
if compression_parent is None:
1412
line_iterator = self._factory.get_fulltext_content(data)
1198
# we need version_id, position, length
1199
version_id_records = []
1200
requested_versions = set(version_ids)
1201
# filter for available versions
1202
for version_id in requested_versions:
1203
if not self.has_version(version_id):
1204
raise RevisionNotPresent(version_id, self.filename)
1205
# get a in-component-order queue:
1206
for version_id in self.versions():
1207
if version_id in requested_versions:
1208
index_memo = self._index.get_position(version_id)
1209
version_id_records.append((version_id, index_memo))
1211
total = len(version_id_records)
1212
for version_idx, (version_id, data, sha_value) in \
1213
enumerate(self._data.read_records_iter(version_id_records)):
1214
pb.update('Walking content.', version_idx, total)
1215
method = self._index.get_method(version_id)
1216
if method == 'fulltext':
1217
line_iterator = self.factory.get_fulltext_content(data)
1218
elif method == 'line-delta':
1219
line_iterator = self.factory.get_linedelta_content(data)
1415
line_iterator = self._factory.get_linedelta_content(data)
1416
# XXX: It might be more efficient to yield (key,
1221
raise ValueError('invalid method %r' % (method,))
1222
# XXX: It might be more efficient to yield (version_id,
1417
1223
# line_iterator) in the future. However for now, this is a simpler
1418
1224
# change to integrate into the rest of the codebase. RBC 20071110
1419
1225
for line in line_iterator:
1421
for source in self._fallback_vfs:
1425
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1426
source_keys.add(key)
1428
keys.difference_update(source_keys)
1430
raise RevisionNotPresent(keys, self.filename)
1226
yield line, version_id
1431
1228
pb.update('Walking content.', total, total)
1433
def _make_line_delta(self, delta_seq, new_content):
1434
"""Generate a line delta from delta_seq and new_content."""
1436
for op in delta_seq.get_opcodes():
1437
if op[0] == 'equal':
1439
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1442
def _merge_annotations(self, content, parents, parent_texts={},
1443
delta=None, annotated=None,
1444
left_matching_blocks=None):
1445
"""Merge annotations for content and generate deltas.
1447
This is done by comparing the annotations based on changes to the text
1448
and generating a delta on the resulting full texts. If annotations are
1449
not being created then a simple delta is created.
1451
if left_matching_blocks is not None:
1452
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1456
for parent_key in parents:
1457
merge_content = self._get_content(parent_key, parent_texts)
1458
if (parent_key == parents[0] and delta_seq is not None):
1461
seq = patiencediff.PatienceSequenceMatcher(
1462
None, merge_content.text(), content.text())
1463
for i, j, n in seq.get_matching_blocks():
1466
# this copies (origin, text) pairs across to the new
1467
# content for any line that matches the last-checked
1469
content._lines[j:j+n] = merge_content._lines[i:i+n]
1470
# XXX: Robert says the following block is a workaround for a
1471
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1472
if content._lines and content._lines[-1][1][-1] != '\n':
1473
# The copied annotation was from a line without a trailing EOL,
1474
# reinstate one for the content object, to ensure correct
1476
line = content._lines[-1][1] + '\n'
1477
content._lines[-1] = (content._lines[-1][0], line)
1479
if delta_seq is None:
1480
reference_content = self._get_content(parents[0], parent_texts)
1481
new_texts = content.text()
1482
old_texts = reference_content.text()
1483
delta_seq = patiencediff.PatienceSequenceMatcher(
1484
None, old_texts, new_texts)
1485
return self._make_line_delta(delta_seq, content)
1487
def _parse_record(self, version_id, data):
1488
"""Parse an original format knit record.
1490
These have the last element of the key only present in the stored data.
1492
rec, record_contents = self._parse_record_unchecked(data)
1493
self._check_header_version(rec, version_id)
1494
return record_contents, rec[3]
1496
def _parse_record_header(self, key, raw_data):
1497
"""Parse a record header for consistency.
1499
:return: the header and the decompressor stream.
1500
as (stream, header_record)
1502
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1505
rec = self._check_header(key, df.readline())
1506
except Exception, e:
1507
raise KnitCorrupt(self,
1508
"While reading {%s} got %s(%s)"
1509
% (key, e.__class__.__name__, str(e)))
1512
def _parse_record_unchecked(self, data):
1514
# 4168 calls in 2880 217 internal
1515
# 4168 calls to _parse_record_header in 2121
1516
# 4168 calls to readlines in 330
1517
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1519
record_contents = df.readlines()
1520
except Exception, e:
1521
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1522
(data, e.__class__.__name__, str(e)))
1523
header = record_contents.pop(0)
1524
rec = self._split_header(header)
1525
last_line = record_contents.pop()
1526
if len(record_contents) != int(rec[2]):
1527
raise KnitCorrupt(self,
1528
'incorrect number of lines %s != %s'
1529
' for version {%s} %s'
1530
% (len(record_contents), int(rec[2]),
1531
rec[1], record_contents))
1532
if last_line != 'end %s\n' % rec[1]:
1533
raise KnitCorrupt(self,
1534
'unexpected version end line %r, wanted %r'
1535
% (last_line, rec[1]))
1537
return rec, record_contents
1539
def _read_records_iter(self, records):
1540
"""Read text records from data file and yield result.
1542
The result will be returned in whatever is the fastest to read.
1543
Not by the order requested. Also, multiple requests for the same
1544
record will only yield 1 response.
1545
:param records: A list of (key, access_memo) entries
1546
:return: Yields (key, contents, digest) in the order
1547
read, not the order requested
1552
# XXX: This smells wrong, IO may not be getting ordered right.
1553
needed_records = sorted(set(records), key=operator.itemgetter(1))
1554
if not needed_records:
1557
# The transport optimizes the fetching as well
1558
# (ie, reads continuous ranges.)
1559
raw_data = self._access.get_raw_records(
1560
[index_memo for key, index_memo in needed_records])
1562
for (key, index_memo), data in \
1563
izip(iter(needed_records), raw_data):
1564
content, digest = self._parse_record(key[-1], data)
1565
yield key, content, digest
1567
def _read_records_iter_raw(self, records):
1568
"""Read text records from data file and yield raw data.
1570
This unpacks enough of the text record to validate the id is
1571
as expected but thats all.
1573
Each item the iterator yields is (key, bytes, sha1_of_full_text).
1575
# setup an iterator of the external records:
1576
# uses readv so nice and fast we hope.
1578
# grab the disk data needed.
1579
needed_offsets = [index_memo for key, index_memo
1581
raw_records = self._access.get_raw_records(needed_offsets)
1583
for key, index_memo in records:
1584
data = raw_records.next()
1585
# validate the header (note that we can only use the suffix in
1586
# current knit records).
1587
df, rec = self._parse_record_header(key, data)
1589
yield key, data, rec[3]
1591
def _record_to_data(self, key, digest, lines, dense_lines=None):
1592
"""Convert key, digest, lines into a raw data block.
1594
:param key: The key of the record. Currently keys are always serialised
1595
using just the trailing component.
1596
:param dense_lines: The bytes of lines but in a denser form. For
1597
instance, if lines is a list of 1000 bytestrings each ending in \n,
1598
dense_lines may be a list with one line in it, containing all the
1599
1000's lines and their \n's. Using dense_lines if it is already
1600
known is a win because the string join to create bytes in this
1601
function spends less time resizing the final string.
1602
:return: (len, a StringIO instance with the raw data ready to read.)
1604
# Note: using a string copy here increases memory pressure with e.g.
1605
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
1606
# when doing the initial commit of a mozilla tree. RBC 20070921
1607
bytes = ''.join(chain(
1608
["version %s %d %s\n" % (key[-1],
1611
dense_lines or lines,
1612
["end %s\n" % key[-1]]))
1613
if type(bytes) != str:
1614
raise AssertionError(
1615
'data must be plain bytes was %s' % type(bytes))
1616
if lines and lines[-1][-1] != '\n':
1617
raise ValueError('corrupt lines value %r' % lines)
1618
compressed_bytes = tuned_gzip.bytes_to_gzip(bytes)
1619
return len(compressed_bytes), compressed_bytes
1621
def _split_header(self, line):
1624
raise KnitCorrupt(self,
1625
'unexpected number of elements in record header')
1629
"""See VersionedFiles.keys."""
1630
if 'evil' in debug.debug_flags:
1631
trace.mutter_callsite(2, "keys scales with size of history")
1632
sources = [self._index] + self._fallback_vfs
1634
for source in sources:
1635
result.update(source.keys())
1640
class _KndxIndex(object):
1641
"""Manages knit index files
1643
The index is kept in memory and read on startup, to enable
1230
def num_versions(self):
1231
"""See VersionedFile.num_versions()."""
1232
return self._index.num_versions()
1234
__len__ = num_versions
1236
def annotate(self, version_id):
1237
"""See VersionedFile.annotate."""
1238
return self.factory.annotate(self, version_id)
1240
def get_parent_map(self, version_ids):
1241
"""See VersionedFile.get_parent_map."""
1242
return self._index.get_parent_map(version_ids)
1244
def get_ancestry(self, versions, topo_sorted=True):
1245
"""See VersionedFile.get_ancestry."""
1246
if isinstance(versions, basestring):
1247
versions = [versions]
1250
return self._index.get_ancestry(versions, topo_sorted)
1252
def get_ancestry_with_ghosts(self, versions):
1253
"""See VersionedFile.get_ancestry_with_ghosts."""
1254
if isinstance(versions, basestring):
1255
versions = [versions]
1258
return self._index.get_ancestry_with_ghosts(versions)
1260
def plan_merge(self, ver_a, ver_b):
1261
"""See VersionedFile.plan_merge."""
1262
ancestors_b = set(self.get_ancestry(ver_b, topo_sorted=False))
1263
ancestors_a = set(self.get_ancestry(ver_a, topo_sorted=False))
1264
annotated_a = self.annotate(ver_a)
1265
annotated_b = self.annotate(ver_b)
1266
return merge._plan_annotate_merge(annotated_a, annotated_b,
1267
ancestors_a, ancestors_b)
1270
class _KnitComponentFile(object):
1271
"""One of the files used to implement a knit database"""
1273
def __init__(self, transport, filename, mode, file_mode=None,
1274
create_parent_dir=False, dir_mode=None):
1275
self._transport = transport
1276
self._filename = filename
1278
self._file_mode = file_mode
1279
self._dir_mode = dir_mode
1280
self._create_parent_dir = create_parent_dir
1281
self._need_to_create = False
1283
def _full_path(self):
1284
"""Return the full path to this file."""
1285
return self._transport.base + self._filename
1287
def check_header(self, fp):
1288
line = fp.readline()
1290
# An empty file can actually be treated as though the file doesn't
1292
raise errors.NoSuchFile(self._full_path())
1293
if line != self.HEADER:
1294
raise KnitHeaderError(badline=line,
1295
filename=self._transport.abspath(self._filename))
1298
return '%s(%s)' % (self.__class__.__name__, self._filename)
1301
class _KnitIndex(_KnitComponentFile):
1302
"""Manages knit index file.
1304
The index is already kept in memory and read on startup, to enable
1644
1305
fast lookups of revision information. The cursor of the index
1645
1306
file is always pointing to the end, making it easy to append
1688
1349
to ensure that records always start on new lines even if the last write was
1689
1350
interrupted. As a result its normal for the last line in the index to be
1690
1351
missing a trailing newline. One can be added with no harmful effects.
1692
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
1693
where prefix is e.g. the (fileid,) for .texts instances or () for
1694
constant-mapped things like .revisions, and the old state is
1695
tuple(history_vector, cache_dict). This is used to prevent having an
1696
ABI change with the C extension that reads .kndx files.
1699
1354
HEADER = "# bzr knit index 8\n"
1701
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
1702
"""Create a _KndxIndex on transport using mapper."""
1703
self._transport = transport
1704
self._mapper = mapper
1705
self._get_scope = get_scope
1706
self._allow_writes = allow_writes
1707
self._is_locked = is_locked
1709
self.has_graph = True
1711
def add_records(self, records, random_id=False):
1712
"""Add multiple records to the index.
1714
:param records: a list of tuples:
1715
(key, options, access_memo, parents).
1716
:param random_id: If True the ids being added were randomly generated
1717
and no check for existence will be performed.
1720
for record in records:
1723
path = self._mapper.map(key) + '.kndx'
1724
path_keys = paths.setdefault(path, (prefix, []))
1725
path_keys[1].append(record)
1726
for path in sorted(paths):
1727
prefix, path_keys = paths[path]
1728
self._load_prefixes([prefix])
1730
orig_history = self._kndx_cache[prefix][1][:]
1731
orig_cache = self._kndx_cache[prefix][0].copy()
1734
for key, options, (_, pos, size), parents in path_keys:
1736
# kndx indices cannot be parentless.
1738
line = "\n%s %s %s %s %s :" % (
1739
key[-1], ','.join(options), pos, size,
1740
self._dictionary_compress(parents))
1741
if type(line) != str:
1742
raise AssertionError(
1743
'data must be utf8 was %s' % type(line))
1745
self._cache_key(key, options, pos, size, parents)
1746
if len(orig_history):
1747
self._transport.append_bytes(path, ''.join(lines))
1749
self._init_index(path, lines)
1751
# If any problems happen, restore the original values and re-raise
1752
self._kndx_cache[prefix] = (orig_cache, orig_history)
1755
def _cache_key(self, key, options, pos, size, parent_keys):
1356
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
1357
# __slots__ = ['_cache', '_history', '_transport', '_filename']
1359
def _cache_version(self, version_id, options, pos, size, parents):
1756
1360
"""Cache a version record in the history array and index cache.
1758
1362
This is inlined into _load_data for performance. KEEP IN SYNC.
1759
1363
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
1763
version_id = key[-1]
1764
# last-element only for compatibilty with the C load_data.
1765
parents = tuple(parent[-1] for parent in parent_keys)
1766
for parent in parent_keys:
1767
if parent[:-1] != prefix:
1768
raise ValueError("mismatched prefixes for %r, %r" % (
1770
cache, history = self._kndx_cache[prefix]
1771
1366
# only want the _history index to reference the 1st index entry
1772
1367
# for version_id
1773
if version_id not in cache:
1774
index = len(history)
1775
history.append(version_id)
1368
if version_id not in self._cache:
1369
index = len(self._history)
1370
self._history.append(version_id)
1777
index = cache[version_id][5]
1778
cache[version_id] = (version_id,
1372
index = self._cache[version_id][5]
1373
self._cache[version_id] = (version_id,
1785
def check_header(self, fp):
1786
line = fp.readline()
1788
# An empty file can actually be treated as though the file doesn't
1790
raise errors.NoSuchFile(self)
1791
if line != self.HEADER:
1792
raise KnitHeaderError(badline=line, filename=self)
1794
def _check_read(self):
1795
if not self._is_locked():
1796
raise errors.ObjectNotLocked(self)
1797
if self._get_scope() != self._scope:
1800
1380
def _check_write_ok(self):
1801
"""Assert if not writes are permitted."""
1802
if not self._is_locked():
1803
raise errors.ObjectNotLocked(self)
1804
1381
if self._get_scope() != self._scope:
1382
raise errors.OutSideTransaction()
1806
1383
if self._mode != 'w':
1807
1384
raise errors.ReadOnlyObjectDirtiedError(self)
1809
def get_build_details(self, keys):
1810
"""Get the method, index_memo and compression parent for keys.
1386
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1387
create_parent_dir=False, delay_create=False, dir_mode=None,
1389
_KnitComponentFile.__init__(self, transport, filename, mode,
1390
file_mode=file_mode,
1391
create_parent_dir=create_parent_dir,
1394
# position in _history is the 'official' index for a revision
1395
# but the values may have come from a newer entry.
1396
# so - wc -l of a knit index is != the number of unique names
1400
fp = self._transport.get(self._filename)
1402
# _load_data may raise NoSuchFile if the target knit is
1404
_load_data(self, fp)
1408
if mode != 'w' or not create:
1411
self._need_to_create = True
1413
self._transport.put_bytes_non_atomic(
1414
self._filename, self.HEADER, mode=self._file_mode)
1415
self._scope = get_scope()
1416
self._get_scope = get_scope
1418
def get_ancestry(self, versions, topo_sorted=True):
1419
"""See VersionedFile.get_ancestry."""
1420
# get a graph of all the mentioned versions:
1422
pending = set(versions)
1425
version = pending.pop()
1428
parents = [p for p in cache[version][4] if p in cache]
1430
raise RevisionNotPresent(version, self._filename)
1431
# if not completed and not a ghost
1432
pending.update([p for p in parents if p not in graph])
1433
graph[version] = parents
1436
return topo_sort(graph.items())
1438
def get_ancestry_with_ghosts(self, versions):
1439
"""See VersionedFile.get_ancestry_with_ghosts."""
1440
# get a graph of all the mentioned versions:
1441
self.check_versions_present(versions)
1444
pending = set(versions)
1446
version = pending.pop()
1448
parents = cache[version][4]
1454
pending.update([p for p in parents if p not in graph])
1455
graph[version] = parents
1456
return topo_sort(graph.items())
1458
def get_build_details(self, version_ids):
1459
"""Get the method, index_memo and compression parent for version_ids.
1812
1461
Ghosts are omitted from the result.
1814
:param keys: An iterable of keys.
1815
:return: A dict of key:(index_memo, compression_parent, parents,
1463
:param version_ids: An iterable of version_ids.
1464
:return: A dict of version_id:(index_memo, compression_parent,
1465
parents, record_details).
1818
1467
opaque structure to pass to read_records to extract the raw
1825
1474
extra information about the content which needs to be passed to
1826
1475
Factory.parse_record
1828
prefixes = self._partition_keys(keys)
1829
parent_map = self.get_parent_map(keys)
1832
if key not in parent_map:
1834
method = self.get_method(key)
1835
parents = parent_map[key]
1478
for version_id in version_ids:
1479
if version_id not in self._cache:
1480
# ghosts are omitted
1482
method = self.get_method(version_id)
1483
parents = self.get_parents_with_ghosts(version_id)
1836
1484
if method == 'fulltext':
1837
1485
compression_parent = None
1839
1487
compression_parent = parents[0]
1840
noeol = 'no-eol' in self.get_options(key)
1841
index_memo = self.get_position(key)
1842
result[key] = (index_memo, compression_parent,
1488
noeol = 'no-eol' in self.get_options(version_id)
1489
index_memo = self.get_position(version_id)
1490
result[version_id] = (index_memo, compression_parent,
1843
1491
parents, (method, noeol))
1846
def get_method(self, key):
1847
"""Return compression method of specified key."""
1848
options = self.get_options(key)
1849
if 'fulltext' in options:
1851
elif 'line-delta' in options:
1854
raise errors.KnitIndexUnknownMethod(self, options)
1856
def get_options(self, key):
1857
"""Return a list representing options.
1861
prefix, suffix = self._split_key(key)
1862
self._load_prefixes([prefix])
1864
return self._kndx_cache[prefix][0][suffix][1]
1866
raise RevisionNotPresent(key, self)
1868
def get_parent_map(self, keys):
1869
"""Get a map of the parents of keys.
1871
:param keys: The keys to look up parents for.
1872
:return: A mapping from keys to parents. Absent keys are absent from
1875
# Parse what we need to up front, this potentially trades off I/O
1876
# locality (.kndx and .knit in the same block group for the same file
1877
# id) for less checking in inner loops.
1878
prefixes = set(key[:-1] for key in keys)
1879
self._load_prefixes(prefixes)
1884
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
1888
result[key] = tuple(prefix + (suffix,) for
1889
suffix in suffix_parents)
1892
def get_position(self, key):
1893
"""Return details needed to access the version.
1895
:return: a tuple (key, data position, size) to hand to the access
1896
logic to get the record.
1898
prefix, suffix = self._split_key(key)
1899
self._load_prefixes([prefix])
1900
entry = self._kndx_cache[prefix][0][suffix]
1901
return key, entry[2], entry[3]
1903
def _init_index(self, path, extra_lines=[]):
1904
"""Initialize an index."""
1906
sio.write(self.HEADER)
1907
sio.writelines(extra_lines)
1909
self._transport.put_file_non_atomic(path, sio,
1910
create_parent_dir=True)
1911
# self._create_parent_dir)
1912
# mode=self._file_mode,
1913
# dir_mode=self._dir_mode)
1916
"""Get all the keys in the collection.
1918
The keys are not ordered.
1921
# Identify all key prefixes.
1922
# XXX: A bit hacky, needs polish.
1923
if type(self._mapper) == ConstantMapper:
1927
for quoted_relpath in self._transport.iter_files_recursive():
1928
path, ext = os.path.splitext(quoted_relpath)
1930
prefixes = [self._mapper.unmap(path) for path in relpaths]
1931
self._load_prefixes(prefixes)
1932
for prefix in prefixes:
1933
for suffix in self._kndx_cache[prefix][1]:
1934
result.add(prefix + (suffix,))
1937
def _load_prefixes(self, prefixes):
1938
"""Load the indices for prefixes."""
1940
for prefix in prefixes:
1941
if prefix not in self._kndx_cache:
1942
# the load_data interface writes to these variables.
1945
self._filename = prefix
1947
path = self._mapper.map(prefix) + '.kndx'
1948
fp = self._transport.get(path)
1950
# _load_data may raise NoSuchFile if the target knit is
1952
_load_data(self, fp)
1955
self._kndx_cache[prefix] = (self._cache, self._history)
1960
self._kndx_cache[prefix] = ({}, [])
1961
if type(self._mapper) == ConstantMapper:
1962
# preserve behaviour for revisions.kndx etc.
1963
self._init_index(path)
1968
def _partition_keys(self, keys):
1969
"""Turn keys into a dict of prefix:suffix_list."""
1972
prefix_keys = result.setdefault(key[:-1], [])
1973
prefix_keys.append(key[-1])
1976
def _dictionary_compress(self, keys):
1977
"""Dictionary compress keys.
1979
:param keys: The keys to generate references to.
1980
:return: A string representation of keys. keys which are present are
1981
dictionary compressed, and others are emitted as fulltext with a
1494
def num_versions(self):
1495
return len(self._history)
1497
__len__ = num_versions
1499
def get_versions(self):
1500
"""Get all the versions in the file. not topologically sorted."""
1501
return self._history
1503
def _version_list_to_index(self, versions):
1986
1504
result_list = []
1987
prefix = keys[0][:-1]
1988
cache = self._kndx_cache[prefix][0]
1990
if key[:-1] != prefix:
1991
# kndx indices cannot refer across partitioned storage.
1992
raise ValueError("mismatched prefixes for %r" % keys)
1993
if key[-1] in cache:
1506
for version in versions:
1507
if version in cache:
1994
1508
# -- inlined lookup() --
1995
result_list.append(str(cache[key[-1]][5]))
1509
result_list.append(str(cache[version][5]))
1996
1510
# -- end lookup () --
1998
result_list.append('.' + key[-1])
1512
result_list.append('.' + version)
1999
1513
return ' '.join(result_list)
2001
def _reset_cache(self):
2002
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2003
# (cache_dict, history_vector) for parsed kndx files.
2004
self._kndx_cache = {}
2005
self._scope = self._get_scope()
2006
allow_writes = self._allow_writes()
1515
def add_version(self, version_id, options, index_memo, parents):
1516
"""Add a version record to the index."""
1517
self.add_versions(((version_id, options, index_memo, parents),))
1519
def add_versions(self, versions, random_id=False):
1520
"""Add multiple versions to the index.
1522
:param versions: a list of tuples:
1523
(version_id, options, pos, size, parents).
1524
:param random_id: If True the ids being added were randomly generated
1525
and no check for existence will be performed.
1528
orig_history = self._history[:]
1529
orig_cache = self._cache.copy()
1532
for version_id, options, (index, pos, size), parents in versions:
1533
line = "\n%s %s %s %s %s :" % (version_id,
1537
self._version_list_to_index(parents))
1539
self._cache_version(version_id, options, pos, size, tuple(parents))
1540
if not self._need_to_create:
1541
self._transport.append_bytes(self._filename, ''.join(lines))
1544
sio.write(self.HEADER)
1545
sio.writelines(lines)
1547
self._transport.put_file_non_atomic(self._filename, sio,
1548
create_parent_dir=self._create_parent_dir,
1549
mode=self._file_mode,
1550
dir_mode=self._dir_mode)
1551
self._need_to_create = False
1553
# If any problems happen, restore the original values and re-raise
1554
self._history = orig_history
1555
self._cache = orig_cache
1558
def has_version(self, version_id):
1559
"""True if the version is in the index."""
1560
return version_id in self._cache
1562
def get_position(self, version_id):
1563
"""Return details needed to access the version.
1565
.kndx indices do not support split-out data, so return None for the
1568
:return: a tuple (None, data position, size) to hand to the access
1569
logic to get the record.
1571
entry = self._cache[version_id]
1572
return None, entry[2], entry[3]
1574
def get_method(self, version_id):
1575
"""Return compression method of specified version."""
1577
options = self._cache[version_id][1]
1579
raise RevisionNotPresent(version_id, self._filename)
1580
if 'fulltext' in options:
2012
def _split_key(self, key):
2013
"""Split key into a prefix and suffix."""
2014
return key[:-1], key[-1]
2017
class _KnitGraphIndex(object):
2018
"""A KnitVersionedFiles index layered on GraphIndex."""
2020
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
1583
if 'line-delta' not in options:
1584
raise errors.KnitIndexUnknownMethod(self._full_path(), options)
1587
def get_options(self, version_id):
1588
"""Return a list representing options.
1592
return self._cache[version_id][1]
1594
def get_parent_map(self, version_ids):
1595
"""Passed through to by KnitVersionedFile.get_parent_map."""
1597
for version_id in version_ids:
1599
result[version_id] = tuple(self._cache[version_id][4])
1604
def get_parents_with_ghosts(self, version_id):
1605
"""Return parents of specified version with ghosts."""
1607
return self.get_parent_map([version_id])[version_id]
1609
raise RevisionNotPresent(version_id, self)
1611
def check_versions_present(self, version_ids):
1612
"""Check that all specified versions are present."""
1614
for version_id in version_ids:
1615
if version_id not in cache:
1616
raise RevisionNotPresent(version_id, self._filename)
1619
class KnitGraphIndex(object):
1620
"""A knit index that builds on GraphIndex."""
1622
def __init__(self, graph_index, deltas=False, parents=True, add_callback=None):
2022
1623
"""Construct a KnitGraphIndex on a graph_index.
2024
1625
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2025
:param is_locked: A callback to check whether the object should answer
2027
1626
:param deltas: Allow delta-compressed records.
2028
:param parents: If True, record knits parents, if not do not record
2030
1627
:param add_callback: If not None, allow additions to the index and call
2031
1628
this callback with a list of added GraphIndex nodes:
2032
1629
[(node, value, node_refs), ...]
2033
:param is_locked: A callback, returns True if the index is locked and
1630
:param parents: If True, record knits parents, if not do not record
2036
self._add_callback = add_callback
2037
1633
self._graph_index = graph_index
2038
1634
self._deltas = deltas
1635
self._add_callback = add_callback
2039
1636
self._parents = parents
2040
1637
if deltas and not parents:
2041
# XXX: TODO: Delta tree and parent graph should be conceptually
2043
1638
raise KnitCorrupt(self, "Cannot do delta compression without "
2044
1639
"parent tracking.")
2045
self.has_graph = parents
2046
self._is_locked = is_locked
2049
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2051
def add_records(self, records, random_id=False):
2052
"""Add multiple records to the index.
1641
def _check_write_ok(self):
1644
def _get_entries(self, keys, check_present=False):
1645
"""Get the entries for keys.
1647
:param keys: An iterable of index keys, - 1-tuples.
1652
for node in self._graph_index.iter_entries(keys):
1654
found_keys.add(node[1])
1656
# adapt parentless index to the rest of the code.
1657
for node in self._graph_index.iter_entries(keys):
1658
yield node[0], node[1], node[2], ()
1659
found_keys.add(node[1])
1661
missing_keys = keys.difference(found_keys)
1663
raise RevisionNotPresent(missing_keys.pop(), self)
1665
def _present_keys(self, version_ids):
1667
node[1] for node in self._get_entries(version_ids)])
1669
def _parentless_ancestry(self, versions):
1670
"""Honour the get_ancestry API for parentless knit indices."""
1671
wanted_keys = self._version_ids_to_keys(versions)
1672
present_keys = self._present_keys(wanted_keys)
1673
missing = set(wanted_keys).difference(present_keys)
1675
raise RevisionNotPresent(missing.pop(), self)
1676
return list(self._keys_to_version_ids(present_keys))
1678
def get_ancestry(self, versions, topo_sorted=True):
1679
"""See VersionedFile.get_ancestry."""
1680
if not self._parents:
1681
return self._parentless_ancestry(versions)
1682
# XXX: This will do len(history) index calls - perhaps
1683
# it should be altered to be a index core feature?
1684
# get a graph of all the mentioned versions:
1687
versions = self._version_ids_to_keys(versions)
1688
pending = set(versions)
1690
# get all pending nodes
1691
this_iteration = pending
1692
new_nodes = self._get_entries(this_iteration)
1695
for (index, key, value, node_refs) in new_nodes:
1696
# dont ask for ghosties - otherwise
1697
# we we can end up looping with pending
1698
# being entirely ghosted.
1699
graph[key] = [parent for parent in node_refs[0]
1700
if parent not in ghosts]
1702
for parent in graph[key]:
1703
# dont examine known nodes again
1708
ghosts.update(this_iteration.difference(found))
1709
if versions.difference(graph):
1710
raise RevisionNotPresent(versions.difference(graph).pop(), self)
1712
result_keys = topo_sort(graph.items())
1714
result_keys = graph.iterkeys()
1715
return [key[0] for key in result_keys]
1717
def get_ancestry_with_ghosts(self, versions):
1718
"""See VersionedFile.get_ancestry."""
1719
if not self._parents:
1720
return self._parentless_ancestry(versions)
1721
# XXX: This will do len(history) index calls - perhaps
1722
# it should be altered to be a index core feature?
1723
# get a graph of all the mentioned versions:
1725
versions = self._version_ids_to_keys(versions)
1726
pending = set(versions)
1728
# get all pending nodes
1729
this_iteration = pending
1730
new_nodes = self._get_entries(this_iteration)
1732
for (index, key, value, node_refs) in new_nodes:
1733
graph[key] = node_refs[0]
1735
for parent in graph[key]:
1736
# dont examine known nodes again
1740
missing_versions = this_iteration.difference(graph)
1741
missing_needed = versions.intersection(missing_versions)
1743
raise RevisionNotPresent(missing_needed.pop(), self)
1744
for missing_version in missing_versions:
1745
# add a key, no parents
1746
graph[missing_version] = []
1747
pending.discard(missing_version) # don't look for it
1748
result_keys = topo_sort(graph.items())
1749
return [key[0] for key in result_keys]
1751
def get_build_details(self, version_ids):
1752
"""Get the method, index_memo and compression parent for version_ids.
1754
Ghosts are omitted from the result.
1756
:param version_ids: An iterable of version_ids.
1757
:return: A dict of version_id:(index_memo, compression_parent,
1758
parents, record_details).
1760
opaque structure to pass to read_records to extract the raw
1763
Content that this record is built upon, may be None
1765
Logical parents of this node
1767
extra information about the content which needs to be passed to
1768
Factory.parse_record
1771
entries = self._get_entries(self._version_ids_to_keys(version_ids), True)
1772
for entry in entries:
1773
version_id = self._keys_to_version_ids((entry[1],))[0]
1774
if not self._parents:
1777
parents = self._keys_to_version_ids(entry[3][0])
1778
if not self._deltas:
1779
compression_parent = None
1781
compression_parent_key = self._compression_parent(entry)
1782
if compression_parent_key:
1783
compression_parent = self._keys_to_version_ids(
1784
(compression_parent_key,))[0]
1786
compression_parent = None
1787
noeol = (entry[2][0] == 'N')
1788
if compression_parent:
1789
method = 'line-delta'
1792
result[version_id] = (self._node_to_position(entry),
1793
compression_parent, parents,
1797
def _compression_parent(self, an_entry):
1798
# return the key that an_entry is compressed against, or None
1799
# Grab the second parent list (as deltas implies parents currently)
1800
compression_parents = an_entry[3][1]
1801
if not compression_parents:
1803
return compression_parents[0]
1805
def _get_method(self, node):
1806
if not self._deltas:
1808
if self._compression_parent(node):
1813
def num_versions(self):
1814
return len(list(self._graph_index.iter_all_entries()))
1816
__len__ = num_versions
1818
def get_versions(self):
1819
"""Get all the versions in the file. not topologically sorted."""
1820
return [node[1][0] for node in self._graph_index.iter_all_entries()]
1822
def has_version(self, version_id):
1823
"""True if the version is in the index."""
1824
return len(self._present_keys(self._version_ids_to_keys([version_id]))) == 1
1826
def _keys_to_version_ids(self, keys):
1827
return tuple(key[0] for key in keys)
1829
def get_position(self, version_id):
1830
"""Return details needed to access the version.
1832
:return: a tuple (index, data position, size) to hand to the access
1833
logic to get the record.
1835
node = self._get_node(version_id)
1836
return self._node_to_position(node)
1838
def _node_to_position(self, node):
1839
"""Convert an index value to position details."""
1840
bits = node[2][1:].split(' ')
1841
return node[0], int(bits[0]), int(bits[1])
1843
def get_method(self, version_id):
1844
"""Return compression method of specified version."""
1845
return self._get_method(self._get_node(version_id))
1847
def _get_node(self, version_id):
1849
return list(self._get_entries(self._version_ids_to_keys([version_id])))[0]
1851
raise RevisionNotPresent(version_id, self)
1853
def get_options(self, version_id):
1854
"""Return a list representing options.
1858
node = self._get_node(version_id)
1859
options = [self._get_method(node)]
1860
if node[2][0] == 'N':
1861
options.append('no-eol')
1864
def get_parent_map(self, version_ids):
1865
"""Passed through to by KnitVersionedFile.get_parent_map."""
1866
nodes = self._get_entries(self._version_ids_to_keys(version_ids))
1870
result[node[1][0]] = self._keys_to_version_ids(node[3][0])
1873
result[node[1][0]] = ()
1876
def get_parents_with_ghosts(self, version_id):
1877
"""Return parents of specified version with ghosts."""
1879
return self.get_parent_map([version_id])[version_id]
1881
raise RevisionNotPresent(version_id, self)
1883
def check_versions_present(self, version_ids):
1884
"""Check that all specified versions are present."""
1885
keys = self._version_ids_to_keys(version_ids)
1886
present = self._present_keys(keys)
1887
missing = keys.difference(present)
1889
raise RevisionNotPresent(missing.pop(), self)
1891
def add_version(self, version_id, options, access_memo, parents):
1892
"""Add a version record to the index."""
1893
return self.add_versions(((version_id, options, access_memo, parents),))
1895
def add_versions(self, versions, random_id=False):
1896
"""Add multiple versions to the index.
2054
1898
This function does not insert data into the Immutable GraphIndex
2055
1899
backing the KnitGraphIndex, instead it prepares data for insertion by
2056
1900
the caller and checks that it is safe to insert then calls
2057
1901
self._add_callback with the prepared GraphIndex nodes.
2059
:param records: a list of tuples:
2060
(key, options, access_memo, parents).
1903
:param versions: a list of tuples:
1904
(version_id, options, pos, size, parents).
2061
1905
:param random_id: If True the ids being added were randomly generated
2062
1906
and no check for existence will be performed.
2111
1954
result.append((key, value))
2112
1955
self._add_callback(result)
2114
def _check_read(self):
2115
"""raise if reads are not permitted."""
2116
if not self._is_locked():
2117
raise errors.ObjectNotLocked(self)
2119
def _check_write_ok(self):
2120
"""Assert if writes are not permitted."""
2121
if not self._is_locked():
2122
raise errors.ObjectNotLocked(self)
2124
def _compression_parent(self, an_entry):
2125
# return the key that an_entry is compressed against, or None
2126
# Grab the second parent list (as deltas implies parents currently)
2127
compression_parents = an_entry[3][1]
2128
if not compression_parents:
2130
if len(compression_parents) != 1:
2131
raise AssertionError(
2132
"Too many compression parents: %r" % compression_parents)
2133
return compression_parents[0]
2135
def get_build_details(self, keys):
2136
"""Get the method, index_memo and compression parent for version_ids.
2138
Ghosts are omitted from the result.
2140
:param keys: An iterable of keys.
2141
:return: A dict of key:
2142
(index_memo, compression_parent, parents, record_details).
2144
opaque structure to pass to read_records to extract the raw
2147
Content that this record is built upon, may be None
2149
Logical parents of this node
2151
extra information about the content which needs to be passed to
2152
Factory.parse_record
2156
entries = self._get_entries(keys, False)
2157
for entry in entries:
2159
if not self._parents:
2162
parents = entry[3][0]
2163
if not self._deltas:
2164
compression_parent_key = None
2166
compression_parent_key = self._compression_parent(entry)
2167
noeol = (entry[2][0] == 'N')
2168
if compression_parent_key:
2169
method = 'line-delta'
2172
result[key] = (self._node_to_position(entry),
2173
compression_parent_key, parents,
2177
def _get_entries(self, keys, check_present=False):
2178
"""Get the entries for keys.
2180
:param keys: An iterable of index key tuples.
2185
for node in self._graph_index.iter_entries(keys):
2187
found_keys.add(node[1])
2189
# adapt parentless index to the rest of the code.
2190
for node in self._graph_index.iter_entries(keys):
2191
yield node[0], node[1], node[2], ()
2192
found_keys.add(node[1])
2194
missing_keys = keys.difference(found_keys)
2196
raise RevisionNotPresent(missing_keys.pop(), self)
2198
def get_method(self, key):
2199
"""Return compression method of specified key."""
2200
return self._get_method(self._get_node(key))
2202
def _get_method(self, node):
2203
if not self._deltas:
2205
if self._compression_parent(node):
2210
def _get_node(self, key):
2212
return list(self._get_entries([key]))[0]
2214
raise RevisionNotPresent(key, self)
2216
def get_options(self, key):
2217
"""Return a list representing options.
2221
node = self._get_node(key)
2222
options = [self._get_method(node)]
2223
if node[2][0] == 'N':
2224
options.append('no-eol')
2227
def get_parent_map(self, keys):
2228
"""Get a map of the parents of keys.
2230
:param keys: The keys to look up parents for.
2231
:return: A mapping from keys to parents. Absent keys are absent from
2235
nodes = self._get_entries(keys)
2239
result[node[1]] = node[3][0]
2242
result[node[1]] = None
2245
def get_position(self, key):
2246
"""Return details needed to access the version.
2248
:return: a tuple (index, data position, size) to hand to the access
2249
logic to get the record.
2251
node = self._get_node(key)
2252
return self._node_to_position(node)
2255
"""Get all the keys in the collection.
2257
The keys are not ordered.
2260
return [node[1] for node in self._graph_index.iter_all_entries()]
2262
def _node_to_position(self, node):
2263
"""Convert an index value to position details."""
2264
bits = node[2][1:].split(' ')
2265
return node[0], int(bits[0]), int(bits[1])
2268
class _KnitKeyAccess(object):
2269
"""Access to records in .knit files."""
2271
def __init__(self, transport, mapper):
2272
"""Create a _KnitKeyAccess with transport and mapper.
2274
:param transport: The transport the access object is rooted at.
2275
:param mapper: The mapper used to map keys to .knit files.
1957
def _version_ids_to_keys(self, version_ids):
1958
return set((version_id, ) for version_id in version_ids)
1961
class _KnitAccess(object):
1962
"""Access to knit records in a .knit file."""
1964
def __init__(self, transport, filename, _file_mode, _dir_mode,
1965
_need_to_create, _create_parent_dir):
1966
"""Create a _KnitAccess for accessing and inserting data.
1968
:param transport: The transport the .knit is located on.
1969
:param filename: The filename of the .knit.
2277
1971
self._transport = transport
2278
self._mapper = mapper
1972
self._filename = filename
1973
self._file_mode = _file_mode
1974
self._dir_mode = _dir_mode
1975
self._need_to_create = _need_to_create
1976
self._create_parent_dir = _create_parent_dir
2280
def add_raw_records(self, key_sizes, raw_data):
1978
def add_raw_records(self, sizes, raw_data):
2281
1979
"""Add raw knit bytes to a storage area.
2283
The data is spooled to the container writer in one bytes-record per
1981
The data is spooled to whereever the access method is storing data.
2286
:param sizes: An iterable of tuples containing the key and size of each
1983
:param sizes: An iterable containing the size of each raw data segment.
2288
1984
:param raw_data: A bytestring containing the data.
2289
:return: A list of memos to retrieve the record later. Each memo is an
2290
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
2291
length), where the key is the record key.
1985
:return: A list of memos to retrieve the record later. Each memo is a
1986
tuple - (index, pos, length), where the index field is always None
1987
for the .knit access method.
2293
if type(raw_data) != str:
2294
raise AssertionError(
2295
'data must be plain bytes was %s' % type(raw_data))
1989
if not self._need_to_create:
1990
base = self._transport.append_bytes(self._filename, raw_data)
1992
self._transport.put_bytes_non_atomic(self._filename, raw_data,
1993
create_parent_dir=self._create_parent_dir,
1994
mode=self._file_mode,
1995
dir_mode=self._dir_mode)
1996
self._need_to_create = False
2298
# TODO: This can be tuned for writing to sftp and other servers where
2299
# append() is relatively expensive by grouping the writes to each key
2301
for key, size in key_sizes:
2302
path = self._mapper.map(key)
2304
base = self._transport.append_bytes(path + '.knit',
2305
raw_data[offset:offset+size])
2306
except errors.NoSuchFile:
2307
self._transport.mkdir(osutils.dirname(path))
2308
base = self._transport.append_bytes(path + '.knit',
2309
raw_data[offset:offset+size])
2313
result.append((key, base, size))
2000
result.append((None, base, size))
2005
"""IFF this data access has its own storage area, initialise it.
2009
self._transport.put_bytes_non_atomic(self._filename, '',
2010
mode=self._file_mode)
2012
def open_file(self):
2013
"""IFF this data access can be represented as a single file, open it.
2015
For knits that are not mapped to a single file on disk this will
2018
:return: None or a file handle.
2021
return self._transport.get(self._filename)
2316
2026
def get_raw_records(self, memos_for_retrieval):
2317
2027
"""Get the raw bytes for a records.
2319
:param memos_for_retrieval: An iterable containing the access memo for
2320
retrieving the bytes.
2029
:param memos_for_retrieval: An iterable containing the (index, pos,
2030
length) memo for retrieving the bytes. The .knit method ignores
2031
the index as there is always only a single file.
2321
2032
:return: An iterator over the bytes of the records.
2323
# first pass, group into same-index request to minimise readv's issued.
2325
current_prefix = None
2326
for (key, offset, length) in memos_for_retrieval:
2327
if current_prefix == key[:-1]:
2328
current_list.append((offset, length))
2330
if current_prefix is not None:
2331
request_lists.append((current_prefix, current_list))
2332
current_prefix = key[:-1]
2333
current_list = [(offset, length)]
2334
# handle the last entry
2335
if current_prefix is not None:
2336
request_lists.append((current_prefix, current_list))
2337
for prefix, read_vector in request_lists:
2338
path = self._mapper.map(prefix) + '.knit'
2339
for pos, data in self._transport.readv(path, read_vector):
2343
class _DirectPackAccess(object):
2344
"""Access to data in one or more packs with less translation."""
2346
def __init__(self, index_to_packs):
2347
"""Create a _DirectPackAccess object.
2034
read_vector = [(pos, size) for (index, pos, size) in memos_for_retrieval]
2035
for pos, data in self._transport.readv(self._filename, read_vector):
2039
class _PackAccess(object):
2040
"""Access to knit records via a collection of packs."""
2042
def __init__(self, index_to_packs, writer=None):
2043
"""Create a _PackAccess object.
2349
2045
:param index_to_packs: A dict mapping index objects to the transport
2350
2046
and file names for obtaining data.
2047
:param writer: A tuple (pack.ContainerWriter, write_index) which
2048
contains the pack to write, and the index that reads from it will
2352
self._container_writer = None
2353
self._write_index = None
2354
self._indices = index_to_packs
2052
self.container_writer = writer[0]
2053
self.write_index = writer[1]
2055
self.container_writer = None
2056
self.write_index = None
2057
self.indices = index_to_packs
2356
def add_raw_records(self, key_sizes, raw_data):
2059
def add_raw_records(self, sizes, raw_data):
2357
2060
"""Add raw knit bytes to a storage area.
2359
2062
The data is spooled to the container writer in one bytes-record per
2362
:param sizes: An iterable of tuples containing the key and size of each
2065
:param sizes: An iterable containing the size of each raw data segment.
2364
2066
:param raw_data: A bytestring containing the data.
2365
:return: A list of memos to retrieve the record later. Each memo is an
2366
opaque index memo. For _DirectPackAccess the memo is (index, pos,
2367
length), where the index field is the write_index object supplied
2368
to the PackAccess object.
2067
:return: A list of memos to retrieve the record later. Each memo is a
2068
tuple - (index, pos, length), where the index field is the
2069
write_index object supplied to the PackAccess object.
2370
if type(raw_data) != str:
2371
raise AssertionError(
2372
'data must be plain bytes was %s' % type(raw_data))
2375
for key, size in key_sizes:
2376
p_offset, p_length = self._container_writer.add_bytes_record(
2074
p_offset, p_length = self.container_writer.add_bytes_record(
2377
2075
raw_data[offset:offset+size], [])
2379
result.append((self._write_index, p_offset, p_length))
2077
result.append((self.write_index, p_offset, p_length))
2081
"""Pack based knits do not get individually created."""
2382
2083
def get_raw_records(self, memos_for_retrieval):
2383
2084
"""Get the raw bytes for a records.
2403
2104
if current_index is not None:
2404
2105
request_lists.append((current_index, current_list))
2405
2106
for index, offsets in request_lists:
2406
transport, path = self._indices[index]
2107
transport, path = self.indices[index]
2407
2108
reader = pack.make_readv_reader(transport, path, offsets)
2408
2109
for names, read_func in reader.iter_records():
2409
2110
yield read_func(None)
2411
def set_writer(self, writer, index, transport_packname):
2112
def open_file(self):
2113
"""Pack based knits have no single file."""
2116
def set_writer(self, writer, index, (transport, packname)):
2412
2117
"""Set a writer to use for adding data."""
2413
2118
if index is not None:
2414
self._indices[index] = transport_packname
2415
self._container_writer = writer
2416
self._write_index = index
2119
self.indices[index] = (transport, packname)
2120
self.container_writer = writer
2121
self.write_index = index
2124
class _StreamAccess(object):
2125
"""A Knit Access object that provides data from a datastream.
2127
It also provides a fallback to present as unannotated data, annotated data
2128
from a *backing* access object.
2130
This is triggered by a index_memo which is pointing to a different index
2131
than this was constructed with, and is used to allow extracting full
2132
unannotated texts for insertion into annotated knits.
2135
def __init__(self, reader_callable, stream_index, backing_knit,
2137
"""Create a _StreamAccess object.
2139
:param reader_callable: The reader_callable from the datastream.
2140
This is called to buffer all the data immediately, for
2142
:param stream_index: The index the data stream this provides access to
2143
which will be present in native index_memo's.
2144
:param backing_knit: The knit object that will provide access to
2145
annotated texts which are not available in the stream, so as to
2146
create unannotated texts.
2147
:param orig_factory: The original content factory used to generate the
2148
stream. This is used for checking whether the thunk code for
2149
supporting _copy_texts will generate the correct form of data.
2151
self.data = reader_callable(None)
2152
self.stream_index = stream_index
2153
self.backing_knit = backing_knit
2154
self.orig_factory = orig_factory
2156
def get_raw_records(self, memos_for_retrieval):
2157
"""Get the raw bytes for a records.
2159
:param memos_for_retrieval: An iterable of memos from the
2160
_StreamIndex object identifying bytes to read; for these classes
2161
they are (from_backing_knit, index, start, end) and can point to
2162
either the backing knit or streamed data.
2163
:return: An iterator yielding a byte string for each record in
2164
memos_for_retrieval.
2166
# use a generator for memory friendliness
2167
for from_backing_knit, version_id, start, end in memos_for_retrieval:
2168
if not from_backing_knit:
2169
if version_id is not self.stream_index:
2170
raise AssertionError()
2171
yield self.data[start:end]
2173
# we have been asked to thunk. This thunking only occurs when
2174
# we are obtaining plain texts from an annotated backing knit
2175
# so that _copy_texts will work.
2176
# We could improve performance here by scanning for where we need
2177
# to do this and using get_line_list, then interleaving the output
2178
# as desired. However, for now, this is sufficient.
2179
if self.orig_factory.__class__ != KnitPlainFactory:
2180
raise errors.KnitCorrupt(
2181
self, 'Bad thunk request %r cannot be backed by %r' %
2182
(version_id, self.orig_factory))
2183
lines = self.backing_knit.get_lines(version_id)
2184
line_bytes = ''.join(lines)
2185
digest = sha_string(line_bytes)
2186
# the packed form of the fulltext always has a trailing newline,
2187
# even if the actual text does not, unless the file is empty. the
2188
# record options including the noeol flag are passed through by
2189
# _StreamIndex, so this is safe.
2191
if lines[-1][-1] != '\n':
2192
lines[-1] = lines[-1] + '\n'
2194
# We want plain data, because we expect to thunk only to allow text
2196
size, bytes = self.backing_knit._data._record_to_data(version_id,
2197
digest, lines, line_bytes)
2201
class _StreamIndex(object):
2202
"""A Knit Index object that uses the data map from a datastream."""
2204
def __init__(self, data_list, backing_index):
2205
"""Create a _StreamIndex object.
2207
:param data_list: The data_list from the datastream.
2208
:param backing_index: The index which will supply values for nodes
2209
referenced outside of this stream.
2211
self.data_list = data_list
2212
self.backing_index = backing_index
2213
self._by_version = {}
2215
for key, options, length, parents in data_list:
2216
self._by_version[key] = options, (pos, pos + length), parents
2219
def get_ancestry(self, versions, topo_sorted):
2220
"""Get an ancestry list for versions."""
2222
# Not needed for basic joins
2223
raise NotImplementedError(self.get_ancestry)
2224
# get a graph of all the mentioned versions:
2225
# Little ugly - basically copied from KnitIndex, but don't want to
2226
# accidentally incorporate too much of that index's code.
2228
pending = set(versions)
2229
cache = self._by_version
2231
version = pending.pop()
2234
parents = [p for p in cache[version][2] if p in cache]
2236
raise RevisionNotPresent(version, self)
2237
# if not completed and not a ghost
2238
pending.update([p for p in parents if p not in ancestry])
2239
ancestry.add(version)
2240
return list(ancestry)
2242
def get_build_details(self, version_ids):
2243
"""Get the method, index_memo and compression parent for version_ids.
2245
Ghosts are omitted from the result.
2247
:param version_ids: An iterable of version_ids.
2248
:return: A dict of version_id:(index_memo, compression_parent,
2249
parents, record_details).
2251
opaque memo that can be passed to _StreamAccess.read_records
2252
to extract the raw data; for these classes it is
2253
(from_backing_knit, index, start, end)
2255
Content that this record is built upon, may be None
2257
Logical parents of this node
2259
extra information about the content which needs to be passed to
2260
Factory.parse_record
2263
for version_id in version_ids:
2265
method = self.get_method(version_id)
2266
except errors.RevisionNotPresent:
2267
# ghosts are omitted
2269
parent_ids = self.get_parents_with_ghosts(version_id)
2270
noeol = ('no-eol' in self.get_options(version_id))
2271
index_memo = self.get_position(version_id)
2272
from_backing_knit = index_memo[0]
2273
if from_backing_knit:
2274
# texts retrieved from the backing knit are always full texts
2276
if method == 'fulltext':
2277
compression_parent = None
2279
compression_parent = parent_ids[0]
2280
result[version_id] = (index_memo, compression_parent,
2281
parent_ids, (method, noeol))
2284
def get_method(self, version_id):
2285
"""Return compression method of specified version."""
2286
options = self.get_options(version_id)
2287
if 'fulltext' in options:
2289
elif 'line-delta' in options:
2292
raise errors.KnitIndexUnknownMethod(self, options)
2294
def get_options(self, version_id):
2295
"""Return a list representing options.
2300
return self._by_version[version_id][0]
2302
options = list(self.backing_index.get_options(version_id))
2303
if 'fulltext' in options:
2305
elif 'line-delta' in options:
2306
# Texts from the backing knit are always returned from the stream
2308
options.remove('line-delta')
2309
options.append('fulltext')
2311
raise errors.KnitIndexUnknownMethod(self, options)
2312
return tuple(options)
2314
def get_parent_map(self, version_ids):
2315
"""Passed through to by KnitVersionedFile.get_parent_map."""
2318
for version_id in version_ids:
2320
result[version_id] = self._by_version[version_id][2]
2322
pending_ids.add(version_id)
2323
result.update(self.backing_index.get_parent_map(pending_ids))
2326
def get_parents_with_ghosts(self, version_id):
2327
"""Return parents of specified version with ghosts."""
2329
return self.get_parent_map([version_id])[version_id]
2331
raise RevisionNotPresent(version_id, self)
2333
def get_position(self, version_id):
2334
"""Return details needed to access the version.
2336
_StreamAccess has the data as a big array, so we return slice
2337
coordinates into that (as index_memo's are opaque outside the
2338
index and matching access class).
2340
:return: a tuple (from_backing_knit, index, start, end) that can
2341
be passed e.g. to get_raw_records.
2342
If from_backing_knit is False, index will be self, otherwise it
2343
will be a version id.
2346
start, end = self._by_version[version_id][1]
2347
return False, self, start, end
2349
# Signal to the access object to handle this from the backing knit.
2350
return (True, version_id, None, None)
2352
def get_versions(self):
2353
"""Get all the versions in the stream."""
2354
return self._by_version.keys()
2357
class _KnitData(object):
2358
"""Manage extraction of data from a KnitAccess, caching and decompressing.
2360
The KnitData class provides the logic for parsing and using knit records,
2361
making use of an access method for the low level read and write operations.
2364
def __init__(self, access):
2365
"""Create a KnitData object.
2367
:param access: The access method to use. Access methods such as
2368
_KnitAccess manage the insertion of raw records and the subsequent
2369
retrieval of the same.
2371
self._access = access
2372
self._checked = False
2374
def _open_file(self):
2375
return self._access.open_file()
2377
def _record_to_data(self, version_id, digest, lines, dense_lines=None):
2378
"""Convert version_id, digest, lines into a raw data block.
2380
:param dense_lines: The bytes of lines but in a denser form. For
2381
instance, if lines is a list of 1000 bytestrings each ending in \n,
2382
dense_lines may be a list with one line in it, containing all the
2383
1000's lines and their \n's. Using dense_lines if it is already
2384
known is a win because the string join to create bytes in this
2385
function spends less time resizing the final string.
2386
:return: (len, a StringIO instance with the raw data ready to read.)
2388
# Note: using a string copy here increases memory pressure with e.g.
2389
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
2390
# when doing the initial commit of a mozilla tree. RBC 20070921
2391
bytes = ''.join(chain(
2392
["version %s %d %s\n" % (version_id,
2395
dense_lines or lines,
2396
["end %s\n" % version_id]))
2397
compressed_bytes = bytes_to_gzip(bytes)
2398
return len(compressed_bytes), compressed_bytes
2400
def add_raw_records(self, sizes, raw_data):
2401
"""Append a prepared record to the data file.
2403
:param sizes: An iterable containing the size of each raw data segment.
2404
:param raw_data: A bytestring containing the data.
2405
:return: a list of index data for the way the data was stored.
2406
See the access method add_raw_records documentation for more
2409
return self._access.add_raw_records(sizes, raw_data)
2411
def _parse_record_header(self, version_id, raw_data):
2412
"""Parse a record header for consistency.
2414
:return: the header and the decompressor stream.
2415
as (stream, header_record)
2417
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
2419
rec = self._check_header(version_id, df.readline())
2420
except Exception, e:
2421
raise KnitCorrupt(self._access,
2422
"While reading {%s} got %s(%s)"
2423
% (version_id, e.__class__.__name__, str(e)))
2426
def _check_header(self, version_id, line):
2429
raise KnitCorrupt(self._access,
2430
'unexpected number of elements in record header')
2431
if rec[1] != version_id:
2432
raise KnitCorrupt(self._access,
2433
'unexpected version, wanted %r, got %r'
2434
% (version_id, rec[1]))
2437
def _parse_record(self, version_id, data):
2439
# 4168 calls in 2880 217 internal
2440
# 4168 calls to _parse_record_header in 2121
2441
# 4168 calls to readlines in 330
2442
df = GzipFile(mode='rb', fileobj=StringIO(data))
2445
record_contents = df.readlines()
2446
except Exception, e:
2447
raise KnitCorrupt(self._access,
2448
"While reading {%s} got %s(%s)"
2449
% (version_id, e.__class__.__name__, str(e)))
2450
header = record_contents.pop(0)
2451
rec = self._check_header(version_id, header)
2453
last_line = record_contents.pop()
2454
if len(record_contents) != int(rec[2]):
2455
raise KnitCorrupt(self._access,
2456
'incorrect number of lines %s != %s'
2458
% (len(record_contents), int(rec[2]),
2460
if last_line != 'end %s\n' % rec[1]:
2461
raise KnitCorrupt(self._access,
2462
'unexpected version end line %r, wanted %r'
2463
% (last_line, version_id))
2465
return record_contents, rec[3]
2467
def read_records_iter_raw(self, records):
2468
"""Read text records from data file and yield raw data.
2470
This unpacks enough of the text record to validate the id is
2471
as expected but thats all.
2473
# setup an iterator of the external records:
2474
# uses readv so nice and fast we hope.
2476
# grab the disk data needed.
2477
needed_offsets = [index_memo for version_id, index_memo
2479
raw_records = self._access.get_raw_records(needed_offsets)
2481
for version_id, index_memo in records:
2482
data = raw_records.next()
2483
# validate the header
2484
df, rec = self._parse_record_header(version_id, data)
2486
yield version_id, data
2488
def read_records_iter(self, records):
2489
"""Read text records from data file and yield result.
2491
The result will be returned in whatever is the fastest to read.
2492
Not by the order requested. Also, multiple requests for the same
2493
record will only yield 1 response.
2494
:param records: A list of (version_id, pos, len) entries
2495
:return: Yields (version_id, contents, digest) in the order
2496
read, not the order requested
2501
needed_records = sorted(set(records), key=operator.itemgetter(1))
2502
if not needed_records:
2505
# The transport optimizes the fetching as well
2506
# (ie, reads continuous ranges.)
2507
raw_data = self._access.get_raw_records(
2508
[index_memo for version_id, index_memo in needed_records])
2510
for (version_id, index_memo), data in \
2511
izip(iter(needed_records), raw_data):
2512
content, digest = self._parse_record(version_id, data)
2513
yield version_id, content, digest
2515
def read_records(self, records):
2516
"""Read records into a dictionary."""
2518
for record_id, content, digest in \
2519
self.read_records_iter(records):
2520
components[record_id] = (content, digest)
2524
class InterKnit(InterVersionedFile):
2525
"""Optimised code paths for knit to knit operations."""
2527
_matching_file_from_factory = staticmethod(make_file_knit)
2528
_matching_file_to_factory = staticmethod(make_file_knit)
2531
def is_compatible(source, target):
2532
"""Be compatible with knits. """
2534
return (isinstance(source, KnitVersionedFile) and
2535
isinstance(target, KnitVersionedFile))
2536
except AttributeError:
2539
def _copy_texts(self, pb, msg, version_ids, ignore_missing=False):
2540
"""Copy texts to the target by extracting and adding them one by one.
2542
see join() for the parameter definitions.
2544
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2545
# --- the below is factorable out with VersionedFile.join, but wait for
2546
# VersionedFiles, it may all be simpler then.
2547
graph = Graph(self.source)
2548
search = graph._make_breadth_first_searcher(version_ids)
2549
transitive_ids = set()
2550
map(transitive_ids.update, list(search))
2551
parent_map = self.source.get_parent_map(transitive_ids)
2552
order = topo_sort(parent_map.items())
2554
def size_of_content(content):
2555
return sum(len(line) for line in content.text())
2556
# Cache at most 10MB of parent texts
2557
parent_cache = lru_cache.LRUSizeCache(max_size=10*1024*1024,
2558
compute_size=size_of_content)
2559
# TODO: jam 20071116 It would be nice to have a streaming interface to
2560
# get multiple texts from a source. The source could be smarter
2561
# about how it handled intermediate stages.
2562
# get_line_list() or make_mpdiffs() seem like a possibility, but
2563
# at the moment they extract all full texts into memory, which
2564
# causes us to store more than our 3x fulltext goal.
2565
# Repository.iter_files_bytes() may be another possibility
2566
to_process = [version for version in order
2567
if version not in self.target]
2568
total = len(to_process)
2569
pb = ui.ui_factory.nested_progress_bar()
2571
for index, version in enumerate(to_process):
2572
pb.update('Converting versioned data', index, total)
2573
sha1, num_bytes, parent_text = self.target.add_lines(version,
2574
self.source.get_parents_with_ghosts(version),
2575
self.source.get_lines(version),
2576
parent_texts=parent_cache)
2577
parent_cache[version] = parent_text
2582
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2583
"""See InterVersionedFile.join."""
2584
# If the source and target are mismatched w.r.t. annotations vs
2585
# plain, the data needs to be converted accordingly
2586
if self.source.factory.annotated == self.target.factory.annotated:
2588
elif self.source.factory.annotated:
2589
converter = self._anno_to_plain_converter
2591
# We're converting from a plain to an annotated knit. Copy them
2592
# across by full texts.
2593
return self._copy_texts(pb, msg, version_ids, ignore_missing)
2595
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2599
pb = ui.ui_factory.nested_progress_bar()
2601
version_ids = list(version_ids)
2602
if None in version_ids:
2603
version_ids.remove(None)
2605
self.source_ancestry = set(self.source.get_ancestry(version_ids,
2607
this_versions = set(self.target._index.get_versions())
2608
# XXX: For efficiency we should not look at the whole index,
2609
# we only need to consider the referenced revisions - they
2610
# must all be present, or the method must be full-text.
2611
# TODO, RBC 20070919
2612
needed_versions = self.source_ancestry - this_versions
2614
if not needed_versions:
2616
full_list = topo_sort(
2617
self.source.get_parent_map(self.source.versions()))
2619
version_list = [i for i in full_list if (not self.target.has_version(i)
2620
and i in needed_versions)]
2624
copy_queue_records = []
2626
for version_id in version_list:
2627
options = self.source._index.get_options(version_id)
2628
parents = self.source._index.get_parents_with_ghosts(version_id)
2629
# check that its will be a consistent copy:
2630
for parent in parents:
2631
# if source has the parent, we must :
2632
# * already have it or
2633
# * have it scheduled already
2634
# otherwise we don't care
2635
if not (self.target.has_version(parent) or
2636
parent in copy_set or
2637
not self.source.has_version(parent)):
2638
raise AssertionError("problem joining parent %r "
2640
% (parent, self.source, self.target))
2641
index_memo = self.source._index.get_position(version_id)
2642
copy_queue_records.append((version_id, index_memo))
2643
copy_queue.append((version_id, options, parents))
2644
copy_set.add(version_id)
2646
# data suck the join:
2648
total = len(version_list)
2651
for (version_id, raw_data), \
2652
(version_id2, options, parents) in \
2653
izip(self.source._data.read_records_iter_raw(copy_queue_records),
2655
if not (version_id == version_id2):
2656
raise AssertionError('logic error, inconsistent results')
2658
pb.update("Joining knit", count, total)
2660
size, raw_data = converter(raw_data, version_id, options,
2663
size = len(raw_data)
2664
raw_records.append((version_id, options, parents, size))
2665
raw_datum.append(raw_data)
2666
self.target._add_raw_records(raw_records, ''.join(raw_datum))
2671
def _anno_to_plain_converter(self, raw_data, version_id, options,
2673
"""Convert annotated content to plain content."""
2674
data, digest = self.source._data._parse_record(version_id, raw_data)
2675
if 'fulltext' in options:
2676
content = self.source.factory.parse_fulltext(data, version_id)
2677
lines = self.target.factory.lower_fulltext(content)
2679
delta = self.source.factory.parse_line_delta(data, version_id,
2681
lines = self.target.factory.lower_line_delta(delta)
2682
return self.target._data._record_to_data(version_id, digest, lines)
2685
InterVersionedFile.register_optimiser(InterKnit)
2688
class WeaveToKnit(InterVersionedFile):
2689
"""Optimised code paths for weave to knit operations."""
2691
_matching_file_from_factory = bzrlib.weave.WeaveFile
2692
_matching_file_to_factory = staticmethod(make_file_knit)
2695
def is_compatible(source, target):
2696
"""Be compatible with weaves to knits."""
2698
return (isinstance(source, bzrlib.weave.Weave) and
2699
isinstance(target, KnitVersionedFile))
2700
except AttributeError:
2703
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2704
"""See InterVersionedFile.join."""
2705
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2710
pb = ui.ui_factory.nested_progress_bar()
2712
version_ids = list(version_ids)
2714
self.source_ancestry = set(self.source.get_ancestry(version_ids))
2715
this_versions = set(self.target._index.get_versions())
2716
needed_versions = self.source_ancestry - this_versions
2718
if not needed_versions:
2720
full_list = topo_sort(
2721
self.source.get_parent_map(self.source.versions()))
2723
version_list = [i for i in full_list if (not self.target.has_version(i)
2724
and i in needed_versions)]
2728
total = len(version_list)
2729
parent_map = self.source.get_parent_map(version_list)
2730
for version_id in version_list:
2731
pb.update("Converting to knit", count, total)
2732
parents = parent_map[version_id]
2733
# check that its will be a consistent copy:
2734
for parent in parents:
2735
# if source has the parent, we must already have it
2736
if not self.target.has_version(parent):
2737
raise AssertionError("%r does not have parent %r"
2738
% (self.target, parent))
2739
self.target.add_lines(
2740
version_id, parents, self.source.get_lines(version_id))
2747
InterVersionedFile.register_optimiser(WeaveToKnit)
2419
2750
# Deprecated, use PatienceSequenceMatcher instead