1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
# 10:16 < lifeless> make partial index writes safe
56
# 10:16 < lifeless> implement 'knit.check()' like weave.check()
57
# 10:17 < lifeless> record known ghosts so we can detect when they are filled in rather than the current 'reweave
59
# move sha1 out of the content so that join is faster at verifying parents
60
# record content length ?
63
from cStringIO import StringIO
64
from itertools import izip, chain
68
from bzrlib.lazy_import import lazy_import
69
lazy_import(globals(), """
89
from bzrlib.errors import (
97
RevisionAlreadyPresent,
99
from bzrlib.osutils import (
106
from bzrlib.versionedfile import (
107
AbsentContentFactory,
111
FulltextContentFactory,
117
# TODO: Split out code specific to this format into an associated object.
119
# TODO: Can we put in some kind of value to check that the index and data
120
# files belong together?
122
# TODO: accommodate binaries, perhaps by storing a byte count
124
# TODO: function to check whole file
126
# TODO: atomically append data, then measure backwards from the cursor
127
# position after writing to work out where it was located. we may need to
128
# bypass python file buffering.
130
DATA_SUFFIX = '.knit'
131
INDEX_SUFFIX = '.kndx'
134
class KnitAdapter(object):
135
"""Base class for knit record adaption."""
137
def __init__(self, basis_vf):
138
"""Create an adapter which accesses full texts from basis_vf.
140
:param basis_vf: A versioned file to access basis texts of deltas from.
141
May be None for adapters that do not need to access basis texts.
143
self._data = KnitVersionedFiles(None, None)
144
self._annotate_factory = KnitAnnotateFactory()
145
self._plain_factory = KnitPlainFactory()
146
self._basis_vf = basis_vf
149
class FTAnnotatedToUnannotated(KnitAdapter):
150
"""An adapter from FT annotated knits to unannotated ones."""
152
def get_bytes(self, factory, annotated_compressed_bytes):
154
self._data._parse_record_unchecked(annotated_compressed_bytes)
155
content = self._annotate_factory.parse_fulltext(contents, rec[1])
156
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
160
class DeltaAnnotatedToUnannotated(KnitAdapter):
161
"""An adapter for deltas from annotated to unannotated."""
163
def get_bytes(self, factory, annotated_compressed_bytes):
165
self._data._parse_record_unchecked(annotated_compressed_bytes)
166
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
168
contents = self._plain_factory.lower_line_delta(delta)
169
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
173
class FTAnnotatedToFullText(KnitAdapter):
174
"""An adapter from FT annotated knits to unannotated ones."""
176
def get_bytes(self, factory, annotated_compressed_bytes):
178
self._data._parse_record_unchecked(annotated_compressed_bytes)
179
content, delta = self._annotate_factory.parse_record(factory.key[-1],
180
contents, factory._build_details, None)
181
return ''.join(content.text())
184
class DeltaAnnotatedToFullText(KnitAdapter):
185
"""An adapter for deltas from annotated to unannotated."""
187
def get_bytes(self, factory, annotated_compressed_bytes):
189
self._data._parse_record_unchecked(annotated_compressed_bytes)
190
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
192
compression_parent = factory.parents[0]
193
basis_entry = self._basis_vf.get_record_stream(
194
[compression_parent], 'unordered', True).next()
195
if basis_entry.storage_kind == 'absent':
196
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
197
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
198
# Manually apply the delta because we have one annotated content and
200
basis_content = PlainKnitContent(basis_lines, compression_parent)
201
basis_content.apply_delta(delta, rec[1])
202
basis_content._should_strip_eol = factory._build_details[1]
203
return ''.join(basis_content.text())
206
class FTPlainToFullText(KnitAdapter):
207
"""An adapter from FT plain knits to unannotated ones."""
209
def get_bytes(self, factory, compressed_bytes):
211
self._data._parse_record_unchecked(compressed_bytes)
212
content, delta = self._plain_factory.parse_record(factory.key[-1],
213
contents, factory._build_details, None)
214
return ''.join(content.text())
217
class DeltaPlainToFullText(KnitAdapter):
218
"""An adapter for deltas from annotated to unannotated."""
220
def get_bytes(self, factory, compressed_bytes):
222
self._data._parse_record_unchecked(compressed_bytes)
223
delta = self._plain_factory.parse_line_delta(contents, rec[1])
224
compression_parent = factory.parents[0]
225
# XXX: string splitting overhead.
226
basis_entry = self._basis_vf.get_record_stream(
227
[compression_parent], 'unordered', True).next()
228
if basis_entry.storage_kind == 'absent':
229
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
230
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
231
basis_content = PlainKnitContent(basis_lines, compression_parent)
232
# Manually apply the delta because we have one annotated content and
234
content, _ = self._plain_factory.parse_record(rec[1], contents,
235
factory._build_details, basis_content)
236
return ''.join(content.text())
239
class KnitContentFactory(ContentFactory):
240
"""Content factory for streaming from knits.
242
:seealso ContentFactory:
245
def __init__(self, key, parents, build_details, sha1, raw_record,
246
annotated, knit=None):
247
"""Create a KnitContentFactory for key.
250
:param parents: The parents.
251
:param build_details: The build details as returned from
253
:param sha1: The sha1 expected from the full text of this object.
254
:param raw_record: The bytes of the knit data from disk.
255
:param annotated: True if the raw data is annotated.
257
ContentFactory.__init__(self)
260
self.parents = parents
261
if build_details[0] == 'line-delta':
266
annotated_kind = 'annotated-'
269
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
270
self._raw_record = raw_record
271
self._build_details = build_details
274
def get_bytes_as(self, storage_kind):
275
if storage_kind == self.storage_kind:
276
return self._raw_record
277
if storage_kind == 'fulltext' and self._knit is not None:
278
return self._knit.get_text(self.key[0])
280
raise errors.UnavailableRepresentation(self.key, storage_kind,
284
class KnitContent(object):
285
"""Content of a knit version to which deltas can be applied.
287
This is always stored in memory as a list of lines with \n at the end,
288
plus a flag saying if the final ending is really there or not, because that
289
corresponds to the on-disk knit representation.
293
self._should_strip_eol = False
295
def apply_delta(self, delta, new_version_id):
296
"""Apply delta to this object to become new_version_id."""
297
raise NotImplementedError(self.apply_delta)
299
def line_delta_iter(self, new_lines):
300
"""Generate line-based delta from this content to new_lines."""
301
new_texts = new_lines.text()
302
old_texts = self.text()
303
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
304
for tag, i1, i2, j1, j2 in s.get_opcodes():
307
# ofrom, oto, length, data
308
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
310
def line_delta(self, new_lines):
311
return list(self.line_delta_iter(new_lines))
314
def get_line_delta_blocks(knit_delta, source, target):
315
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
316
target_len = len(target)
319
for s_begin, s_end, t_len, new_text in knit_delta:
320
true_n = s_begin - s_pos
323
# knit deltas do not provide reliable info about whether the
324
# last line of a file matches, due to eol handling.
325
if source[s_pos + n -1] != target[t_pos + n -1]:
328
yield s_pos, t_pos, n
329
t_pos += t_len + true_n
331
n = target_len - t_pos
333
if source[s_pos + n -1] != target[t_pos + n -1]:
336
yield s_pos, t_pos, n
337
yield s_pos + (target_len - t_pos), target_len, 0
340
class AnnotatedKnitContent(KnitContent):
341
"""Annotated content."""
343
def __init__(self, lines):
344
KnitContent.__init__(self)
348
"""Return a list of (origin, text) for each content line."""
349
lines = self._lines[:]
350
if self._should_strip_eol:
351
origin, last_line = lines[-1]
352
lines[-1] = (origin, last_line.rstrip('\n'))
355
def apply_delta(self, delta, new_version_id):
356
"""Apply delta to this object to become new_version_id."""
359
for start, end, count, delta_lines in delta:
360
lines[offset+start:offset+end] = delta_lines
361
offset = offset + (start - end) + count
365
lines = [text for origin, text in self._lines]
366
except ValueError, e:
367
# most commonly (only?) caused by the internal form of the knit
368
# missing annotation information because of a bug - see thread
370
raise KnitCorrupt(self,
371
"line in annotated knit missing annotation information: %s"
373
if self._should_strip_eol:
374
lines[-1] = lines[-1].rstrip('\n')
378
return AnnotatedKnitContent(self._lines[:])
381
class PlainKnitContent(KnitContent):
382
"""Unannotated content.
384
When annotate[_iter] is called on this content, the same version is reported
385
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
389
def __init__(self, lines, version_id):
390
KnitContent.__init__(self)
392
self._version_id = version_id
395
"""Return a list of (origin, text) for each content line."""
396
return [(self._version_id, line) for line in self._lines]
398
def apply_delta(self, delta, new_version_id):
399
"""Apply delta to this object to become new_version_id."""
402
for start, end, count, delta_lines in delta:
403
lines[offset+start:offset+end] = delta_lines
404
offset = offset + (start - end) + count
405
self._version_id = new_version_id
408
return PlainKnitContent(self._lines[:], self._version_id)
412
if self._should_strip_eol:
414
lines[-1] = lines[-1].rstrip('\n')
418
class _KnitFactory(object):
419
"""Base class for common Factory functions."""
421
def parse_record(self, version_id, record, record_details,
422
base_content, copy_base_content=True):
423
"""Parse a record into a full content object.
425
:param version_id: The official version id for this content
426
:param record: The data returned by read_records_iter()
427
:param record_details: Details about the record returned by
429
:param base_content: If get_build_details returns a compression_parent,
430
you must return a base_content here, else use None
431
:param copy_base_content: When building from the base_content, decide
432
you can either copy it and return a new object, or modify it in
434
:return: (content, delta) A Content object and possibly a line-delta,
437
method, noeol = record_details
438
if method == 'line-delta':
439
if copy_base_content:
440
content = base_content.copy()
442
content = base_content
443
delta = self.parse_line_delta(record, version_id)
444
content.apply_delta(delta, version_id)
446
content = self.parse_fulltext(record, version_id)
448
content._should_strip_eol = noeol
449
return (content, delta)
452
class KnitAnnotateFactory(_KnitFactory):
453
"""Factory for creating annotated Content objects."""
457
def make(self, lines, version_id):
458
num_lines = len(lines)
459
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
461
def parse_fulltext(self, content, version_id):
462
"""Convert fulltext to internal representation
464
fulltext content is of the format
465
revid(utf8) plaintext\n
466
internal representation is of the format:
469
# TODO: jam 20070209 The tests expect this to be returned as tuples,
470
# but the code itself doesn't really depend on that.
471
# Figure out a way to not require the overhead of turning the
472
# list back into tuples.
473
lines = [tuple(line.split(' ', 1)) for line in content]
474
return AnnotatedKnitContent(lines)
476
def parse_line_delta_iter(self, lines):
477
return iter(self.parse_line_delta(lines))
479
def parse_line_delta(self, lines, version_id, plain=False):
480
"""Convert a line based delta into internal representation.
482
line delta is in the form of:
483
intstart intend intcount
485
revid(utf8) newline\n
486
internal representation is
487
(start, end, count, [1..count tuples (revid, newline)])
489
:param plain: If True, the lines are returned as a plain
490
list without annotations, not as a list of (origin, content) tuples, i.e.
491
(start, end, count, [1..count newline])
498
def cache_and_return(line):
499
origin, text = line.split(' ', 1)
500
return cache.setdefault(origin, origin), text
502
# walk through the lines parsing.
503
# Note that the plain test is explicitly pulled out of the
504
# loop to minimise any performance impact
507
start, end, count = [int(n) for n in header.split(',')]
508
contents = [next().split(' ', 1)[1] for i in xrange(count)]
509
result.append((start, end, count, contents))
512
start, end, count = [int(n) for n in header.split(',')]
513
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
514
result.append((start, end, count, contents))
517
def get_fulltext_content(self, lines):
518
"""Extract just the content lines from a fulltext."""
519
return (line.split(' ', 1)[1] for line in lines)
521
def get_linedelta_content(self, lines):
522
"""Extract just the content from a line delta.
524
This doesn't return all of the extra information stored in a delta.
525
Only the actual content lines.
530
header = header.split(',')
531
count = int(header[2])
532
for i in xrange(count):
533
origin, text = next().split(' ', 1)
536
def lower_fulltext(self, content):
537
"""convert a fulltext content record into a serializable form.
539
see parse_fulltext which this inverts.
541
# TODO: jam 20070209 We only do the caching thing to make sure that
542
# the origin is a valid utf-8 line, eventually we could remove it
543
return ['%s %s' % (o, t) for o, t in content._lines]
545
def lower_line_delta(self, delta):
546
"""convert a delta into a serializable form.
548
See parse_line_delta which this inverts.
550
# TODO: jam 20070209 We only do the caching thing to make sure that
551
# the origin is a valid utf-8 line, eventually we could remove it
553
for start, end, c, lines in delta:
554
out.append('%d,%d,%d\n' % (start, end, c))
555
out.extend(origin + ' ' + text
556
for origin, text in lines)
559
def annotate(self, knit, key):
560
content = knit._get_content(key)
561
# adjust for the fact that serialised annotations are only key suffixes
563
if type(key) == tuple:
565
origins = content.annotate()
567
for origin, line in origins:
568
result.append((prefix + (origin,), line))
571
# XXX: This smells a bit. Why would key ever be a non-tuple here?
572
# Aren't keys defined to be tuples? -- spiv 20080618
573
return content.annotate()
576
class KnitPlainFactory(_KnitFactory):
577
"""Factory for creating plain Content objects."""
581
def make(self, lines, version_id):
582
return PlainKnitContent(lines, version_id)
584
def parse_fulltext(self, content, version_id):
585
"""This parses an unannotated fulltext.
587
Note that this is not a noop - the internal representation
588
has (versionid, line) - its just a constant versionid.
590
return self.make(content, version_id)
592
def parse_line_delta_iter(self, lines, version_id):
594
num_lines = len(lines)
595
while cur < num_lines:
598
start, end, c = [int(n) for n in header.split(',')]
599
yield start, end, c, lines[cur:cur+c]
602
def parse_line_delta(self, lines, version_id):
603
return list(self.parse_line_delta_iter(lines, version_id))
605
def get_fulltext_content(self, lines):
606
"""Extract just the content lines from a fulltext."""
609
def get_linedelta_content(self, lines):
610
"""Extract just the content from a line delta.
612
This doesn't return all of the extra information stored in a delta.
613
Only the actual content lines.
618
header = header.split(',')
619
count = int(header[2])
620
for i in xrange(count):
623
def lower_fulltext(self, content):
624
return content.text()
626
def lower_line_delta(self, delta):
628
for start, end, c, lines in delta:
629
out.append('%d,%d,%d\n' % (start, end, c))
633
def annotate(self, knit, key):
634
annotator = _KnitAnnotator(knit)
635
return annotator.annotate(key)
639
def make_file_factory(annotated, mapper):
640
"""Create a factory for creating a file based KnitVersionedFiles.
642
This is only functional enough to run interface tests, it doesn't try to
643
provide a full pack environment.
645
:param annotated: knit annotations are wanted.
646
:param mapper: The mapper from keys to paths.
648
def factory(transport):
649
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
650
access = _KnitKeyAccess(transport, mapper)
651
return KnitVersionedFiles(index, access, annotated=annotated)
655
def make_pack_factory(graph, delta, keylength):
656
"""Create a factory for creating a pack based VersionedFiles.
658
This is only functional enough to run interface tests, it doesn't try to
659
provide a full pack environment.
661
:param graph: Store a graph.
662
:param delta: Delta compress contents.
663
:param keylength: How long should keys be.
665
def factory(transport):
666
parents = graph or delta
672
max_delta_chain = 200
675
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
676
key_elements=keylength)
677
stream = transport.open_write_stream('newpack')
678
writer = pack.ContainerWriter(stream.write)
680
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
681
deltas=delta, add_callback=graph_index.add_nodes)
682
access = _DirectPackAccess({})
683
access.set_writer(writer, graph_index, (transport, 'newpack'))
684
result = KnitVersionedFiles(index, access,
685
max_delta_chain=max_delta_chain)
686
result.stream = stream
687
result.writer = writer
692
def cleanup_pack_knit(versioned_files):
693
versioned_files.stream.close()
694
versioned_files.writer.end()
697
class KnitVersionedFiles(VersionedFiles):
698
"""Storage for many versioned files using knit compression.
700
Backend storage is managed by indices and data objects.
702
:ivar _index: A _KnitGraphIndex or similar that can describe the
703
parents, graph, compression and data location of entries in this
704
KnitVersionedFiles. Note that this is only the index for
705
*this* vfs; if there are fallbacks they must be queried separately.
708
def __init__(self, index, data_access, max_delta_chain=200,
710
"""Create a KnitVersionedFiles with index and data_access.
712
:param index: The index for the knit data.
713
:param data_access: The access object to store and retrieve knit
715
:param max_delta_chain: The maximum number of deltas to permit during
716
insertion. Set to 0 to prohibit the use of deltas.
717
:param annotated: Set to True to cause annotations to be calculated and
718
stored during insertion.
721
self._access = data_access
722
self._max_delta_chain = max_delta_chain
724
self._factory = KnitAnnotateFactory()
726
self._factory = KnitPlainFactory()
727
self._fallback_vfs = []
730
return "%s(%r, %r)" % (
731
self.__class__.__name__,
735
def add_fallback_versioned_files(self, a_versioned_files):
736
"""Add a source of texts for texts not present in this knit.
738
:param a_versioned_files: A VersionedFiles object.
740
self._fallback_vfs.append(a_versioned_files)
742
def add_lines(self, key, parents, lines, parent_texts=None,
743
left_matching_blocks=None, nostore_sha=None, random_id=False,
745
"""See VersionedFiles.add_lines()."""
746
self._index._check_write_ok()
747
self._check_add(key, lines, random_id, check_content)
749
# The caller might pass None if there is no graph data, but kndx
750
# indexes can't directly store that, so we give them
751
# an empty tuple instead.
753
return self._add(key, lines, parents,
754
parent_texts, left_matching_blocks, nostore_sha, random_id)
756
def _add(self, key, lines, parents, parent_texts,
757
left_matching_blocks, nostore_sha, random_id):
758
"""Add a set of lines on top of version specified by parents.
760
Any versions not present will be converted into ghosts.
762
# first thing, if the content is something we don't need to store, find
764
line_bytes = ''.join(lines)
765
digest = sha_string(line_bytes)
766
if nostore_sha == digest:
767
raise errors.ExistingContent
770
if parent_texts is None:
772
# Do a single query to ascertain parent presence.
773
present_parent_map = self.get_parent_map(parents)
774
for parent in parents:
775
if parent in present_parent_map:
776
present_parents.append(parent)
778
# Currently we can only compress against the left most present parent.
779
if (len(present_parents) == 0 or
780
present_parents[0] != parents[0]):
783
# To speed the extract of texts the delta chain is limited
784
# to a fixed number of deltas. This should minimize both
785
# I/O and the time spend applying deltas.
786
delta = self._check_should_delta(present_parents[0])
788
text_length = len(line_bytes)
791
if lines[-1][-1] != '\n':
792
# copy the contents of lines.
794
options.append('no-eol')
795
lines[-1] = lines[-1] + '\n'
799
if type(element) != str:
800
raise TypeError("key contains non-strings: %r" % (key,))
801
# Knit hunks are still last-element only
803
content = self._factory.make(lines, version_id)
804
if 'no-eol' in options:
805
# Hint to the content object that its text() call should strip the
807
content._should_strip_eol = True
808
if delta or (self._factory.annotated and len(present_parents) > 0):
809
# Merge annotations from parent texts if needed.
810
delta_hunks = self._merge_annotations(content, present_parents,
811
parent_texts, delta, self._factory.annotated,
812
left_matching_blocks)
815
options.append('line-delta')
816
store_lines = self._factory.lower_line_delta(delta_hunks)
817
size, bytes = self._record_to_data(key, digest,
820
options.append('fulltext')
821
# isinstance is slower and we have no hierarchy.
822
if self._factory.__class__ == KnitPlainFactory:
823
# Use the already joined bytes saving iteration time in
825
size, bytes = self._record_to_data(key, digest,
828
# get mixed annotation + content and feed it into the
830
store_lines = self._factory.lower_fulltext(content)
831
size, bytes = self._record_to_data(key, digest,
834
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
835
self._index.add_records(
836
((key, options, access_memo, parents),),
838
return digest, text_length, content
840
def annotate(self, key):
841
"""See VersionedFiles.annotate."""
842
return self._factory.annotate(self, key)
844
def check(self, progress_bar=None):
845
"""See VersionedFiles.check()."""
846
# This doesn't actually test extraction of everything, but that will
847
# impact 'bzr check' substantially, and needs to be integrated with
848
# care. However, it does check for the obvious problem of a delta with
850
keys = self._index.keys()
851
parent_map = self.get_parent_map(keys)
853
if self._index.get_method(key) != 'fulltext':
854
compression_parent = parent_map[key][0]
855
if compression_parent not in parent_map:
856
raise errors.KnitCorrupt(self,
857
"Missing basis parent %s for %s" % (
858
compression_parent, key))
859
for fallback_vfs in self._fallback_vfs:
862
def _check_add(self, key, lines, random_id, check_content):
863
"""check that version_id and lines are safe to add."""
865
if contains_whitespace(version_id):
866
raise InvalidRevisionId(version_id, self)
867
self.check_not_reserved_id(version_id)
868
# TODO: If random_id==False and the key is already present, we should
869
# probably check that the existing content is identical to what is
870
# being inserted, and otherwise raise an exception. This would make
871
# the bundle code simpler.
873
self._check_lines_not_unicode(lines)
874
self._check_lines_are_lines(lines)
876
def _check_header(self, key, line):
877
rec = self._split_header(line)
878
self._check_header_version(rec, key[-1])
881
def _check_header_version(self, rec, version_id):
882
"""Checks the header version on original format knit records.
884
These have the last component of the key embedded in the record.
886
if rec[1] != version_id:
887
raise KnitCorrupt(self,
888
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
890
def _check_should_delta(self, parent):
891
"""Iterate back through the parent listing, looking for a fulltext.
893
This is used when we want to decide whether to add a delta or a new
894
fulltext. It searches for _max_delta_chain parents. When it finds a
895
fulltext parent, it sees if the total size of the deltas leading up to
896
it is large enough to indicate that we want a new full text anyway.
898
Return True if we should create a new delta, False if we should use a
903
for count in xrange(self._max_delta_chain):
904
# XXX: Collapse these two queries:
906
# Note that this only looks in the index of this particular
907
# KnitVersionedFiles, not in the fallbacks. This ensures that
908
# we won't store a delta spanning physical repository
910
method = self._index.get_method(parent)
911
except RevisionNotPresent:
912
# Some basis is not locally present: always delta
914
index, pos, size = self._index.get_position(parent)
915
if method == 'fulltext':
919
# We don't explicitly check for presence because this is in an
920
# inner loop, and if it's missing it'll fail anyhow.
921
# TODO: This should be asking for compression parent, not graph
923
parent = self._index.get_parent_map([parent])[parent][0]
925
# We couldn't find a fulltext, so we must create a new one
927
# Simple heuristic - if the total I/O wold be greater as a delta than
928
# the originally installed fulltext, we create a new fulltext.
929
return fulltext_size > delta_size
931
def _build_details_to_components(self, build_details):
932
"""Convert a build_details tuple to a position tuple."""
933
# record_details, access_memo, compression_parent
934
return build_details[3], build_details[0], build_details[1]
936
def _get_components_positions(self, keys, allow_missing=False):
937
"""Produce a map of position data for the components of keys.
939
This data is intended to be used for retrieving the knit records.
941
A dict of key to (record_details, index_memo, next, parents) is
943
method is the way referenced data should be applied.
944
index_memo is the handle to pass to the data access to actually get the
946
next is the build-parent of the version, or None for fulltexts.
947
parents is the version_ids of the parents of this version
949
:param allow_missing: If True do not raise an error on a missing component,
953
pending_components = keys
954
while pending_components:
955
build_details = self._index.get_build_details(pending_components)
956
current_components = set(pending_components)
957
pending_components = set()
958
for key, details in build_details.iteritems():
959
(index_memo, compression_parent, parents,
960
record_details) = details
961
method = record_details[0]
962
if compression_parent is not None:
963
pending_components.add(compression_parent)
964
component_data[key] = self._build_details_to_components(details)
965
missing = current_components.difference(build_details)
966
if missing and not allow_missing:
967
raise errors.RevisionNotPresent(missing.pop(), self)
968
return component_data
970
def _get_content(self, key, parent_texts={}):
971
"""Returns a content object that makes up the specified
973
cached_version = parent_texts.get(key, None)
974
if cached_version is not None:
975
# Ensure the cache dict is valid.
976
if not self.get_parent_map([key]):
977
raise RevisionNotPresent(key, self)
978
return cached_version
979
text_map, contents_map = self._get_content_maps([key])
980
return contents_map[key]
982
def _get_content_maps(self, keys, nonlocal_keys=None):
983
"""Produce maps of text and KnitContents
985
:param keys: The keys to produce content maps for.
986
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
987
which are known to not be in this knit, but rather in one of the
989
:return: (text_map, content_map) where text_map contains the texts for
990
the requested versions and content_map contains the KnitContents.
992
# FUTURE: This function could be improved for the 'extract many' case
993
# by tracking each component and only doing the copy when the number of
994
# children than need to apply delta's to it is > 1 or it is part of the
997
multiple_versions = len(keys) != 1
998
record_map = self._get_record_map(keys, allow_missing=True)
1003
if nonlocal_keys is None:
1004
nonlocal_keys = set()
1006
nonlocal_keys = frozenset(nonlocal_keys)
1007
missing_keys = set(nonlocal_keys)
1008
for source in self._fallback_vfs:
1009
if not missing_keys:
1011
for record in source.get_record_stream(missing_keys,
1013
if record.storage_kind == 'absent':
1015
missing_keys.remove(record.key)
1016
lines = split_lines(record.get_bytes_as('fulltext'))
1017
text_map[record.key] = lines
1018
content_map[record.key] = PlainKnitContent(lines, record.key)
1019
if record.key in keys:
1020
final_content[record.key] = content_map[record.key]
1022
if key in nonlocal_keys:
1027
while cursor is not None:
1029
record, record_details, digest, next = record_map[cursor]
1031
raise RevisionNotPresent(cursor, self)
1032
components.append((cursor, record, record_details, digest))
1034
if cursor in content_map:
1035
# no need to plan further back
1036
components.append((cursor, None, None, None))
1040
for (component_id, record, record_details,
1041
digest) in reversed(components):
1042
if component_id in content_map:
1043
content = content_map[component_id]
1045
content, delta = self._factory.parse_record(key[-1],
1046
record, record_details, content,
1047
copy_base_content=multiple_versions)
1048
if multiple_versions:
1049
content_map[component_id] = content
1051
final_content[key] = content
1053
# digest here is the digest from the last applied component.
1054
text = content.text()
1055
actual_sha = sha_strings(text)
1056
if actual_sha != digest:
1057
raise KnitCorrupt(self,
1059
'\n of reconstructed text does not match'
1061
'\n for version %s' %
1062
(actual_sha, digest, key))
1063
text_map[key] = text
1064
return text_map, final_content
1066
def get_parent_map(self, keys):
1067
"""Get a map of the graph parents of keys.
1069
:param keys: The keys to look up parents for.
1070
:return: A mapping from keys to parents. Absent keys are absent from
1073
return self._get_parent_map_with_sources(keys)[0]
1075
def _get_parent_map_with_sources(self, keys):
1076
"""Get a map of the parents of keys.
1078
:param keys: The keys to look up parents for.
1079
:return: A tuple. The first element is a mapping from keys to parents.
1080
Absent keys are absent from the mapping. The second element is a
1081
list with the locations each key was found in. The first element
1082
is the in-this-knit parents, the second the first fallback source,
1086
sources = [self._index] + self._fallback_vfs
1089
for source in sources:
1092
new_result = source.get_parent_map(missing)
1093
source_results.append(new_result)
1094
result.update(new_result)
1095
missing.difference_update(set(new_result))
1096
return result, source_results
1098
def _get_record_map(self, keys, allow_missing=False):
1099
"""Produce a dictionary of knit records.
1101
:return: {key:(record, record_details, digest, next)}
1103
data returned from read_records
1105
opaque information to pass to parse_record
1107
SHA1 digest of the full text after all steps are done
1109
build-parent of the version, i.e. the leftmost ancestor.
1110
Will be None if the record is not a delta.
1111
:param keys: The keys to build a map for
1112
:param allow_missing: If some records are missing, rather than
1113
error, just return the data that could be generated.
1115
position_map = self._get_components_positions(keys,
1116
allow_missing=allow_missing)
1117
# key = component_id, r = record_details, i_m = index_memo, n = next
1118
records = [(key, i_m) for key, (r, i_m, n)
1119
in position_map.iteritems()]
1121
for key, record, digest in \
1122
self._read_records_iter(records):
1123
(record_details, index_memo, next) = position_map[key]
1124
record_map[key] = record, record_details, digest, next
1127
def _split_by_prefix(self, keys):
1128
"""For the given keys, split them up based on their prefix.
1130
To keep memory pressure somewhat under control, split the
1131
requests back into per-file-id requests, otherwise "bzr co"
1132
extracts the full tree into memory before writing it to disk.
1133
This should be revisited if _get_content_maps() can ever cross
1136
:param keys: An iterable of key tuples
1137
:return: A dict of {prefix: [key_list]}
1139
split_by_prefix = {}
1142
split_by_prefix.setdefault('', []).append(key)
1144
split_by_prefix.setdefault(key[0], []).append(key)
1145
return split_by_prefix
1147
def get_record_stream(self, keys, ordering, include_delta_closure):
1148
"""Get a stream of records for keys.
1150
:param keys: The keys to include.
1151
:param ordering: Either 'unordered' or 'topological'. A topologically
1152
sorted stream has compression parents strictly before their
1154
:param include_delta_closure: If True then the closure across any
1155
compression parents will be included (in the opaque data).
1156
:return: An iterator of ContentFactory objects, each of which is only
1157
valid until the iterator is advanced.
1159
# keys might be a generator
1163
if not self._index.has_graph:
1164
# Cannot topological order when no graph has been stored.
1165
ordering = 'unordered'
1166
if include_delta_closure:
1167
positions = self._get_components_positions(keys, allow_missing=True)
1169
build_details = self._index.get_build_details(keys)
1171
# (record_details, access_memo, compression_parent_key)
1172
positions = dict((key, self._build_details_to_components(details))
1173
for key, details in build_details.iteritems())
1174
absent_keys = keys.difference(set(positions))
1175
# There may be more absent keys : if we're missing the basis component
1176
# and are trying to include the delta closure.
1177
if include_delta_closure:
1178
needed_from_fallback = set()
1179
# Build up reconstructable_keys dict. key:True in this dict means
1180
# the key can be reconstructed.
1181
reconstructable_keys = {}
1185
chain = [key, positions[key][2]]
1187
needed_from_fallback.add(key)
1190
while chain[-1] is not None:
1191
if chain[-1] in reconstructable_keys:
1192
result = reconstructable_keys[chain[-1]]
1196
chain.append(positions[chain[-1]][2])
1198
# missing basis component
1199
needed_from_fallback.add(chain[-1])
1202
for chain_key in chain[:-1]:
1203
reconstructable_keys[chain_key] = result
1205
needed_from_fallback.add(key)
1206
# Double index lookups here : need a unified api ?
1207
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1208
if ordering == 'topological':
1209
# Global topological sort
1210
present_keys = tsort.topo_sort(global_map)
1211
# Now group by source:
1213
current_source = None
1214
for key in present_keys:
1215
for parent_map in parent_maps:
1216
if key in parent_map:
1217
key_source = parent_map
1219
if current_source is not key_source:
1220
source_keys.append((key_source, []))
1221
current_source = key_source
1222
source_keys[-1][1].append(key)
1224
if ordering != 'unordered':
1225
raise AssertionError('valid values for ordering are:'
1226
' "unordered" or "topological" not: %r'
1228
# Just group by source; remote sources first.
1231
for parent_map in reversed(parent_maps):
1232
source_keys.append((parent_map, []))
1233
for key in parent_map:
1234
present_keys.append(key)
1235
source_keys[-1][1].append(key)
1236
absent_keys = keys - set(global_map)
1237
for key in absent_keys:
1238
yield AbsentContentFactory(key)
1239
# restrict our view to the keys we can answer.
1240
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1241
# XXX: At that point we need to consider the impact of double reads by
1242
# utilising components multiple times.
1243
if include_delta_closure:
1244
# XXX: get_content_maps performs its own index queries; allow state
1246
non_local_keys = needed_from_fallback - absent_keys
1247
prefix_split_keys = self._split_by_prefix(present_keys)
1248
prefix_split_non_local_keys = self._split_by_prefix(non_local_keys)
1249
for prefix, keys in prefix_split_keys.iteritems():
1250
non_local = prefix_split_non_local_keys.get(prefix, [])
1251
non_local = set(non_local)
1252
text_map, _ = self._get_content_maps(keys, non_local)
1254
lines = text_map.pop(key)
1255
text = ''.join(lines)
1256
yield FulltextContentFactory(key, global_map[key], None,
1259
for source, keys in source_keys:
1260
if source is parent_maps[0]:
1261
# this KnitVersionedFiles
1262
records = [(key, positions[key][1]) for key in keys]
1263
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1264
(record_details, index_memo, _) = positions[key]
1265
yield KnitContentFactory(key, global_map[key],
1266
record_details, sha1, raw_data, self._factory.annotated, None)
1268
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1269
for record in vf.get_record_stream(keys, ordering,
1270
include_delta_closure):
1273
def get_sha1s(self, keys):
1274
"""See VersionedFiles.get_sha1s()."""
1276
record_map = self._get_record_map(missing, allow_missing=True)
1278
for key, details in record_map.iteritems():
1279
if key not in missing:
1281
# record entry 2 is the 'digest'.
1282
result[key] = details[2]
1283
missing.difference_update(set(result))
1284
for source in self._fallback_vfs:
1287
new_result = source.get_sha1s(missing)
1288
result.update(new_result)
1289
missing.difference_update(set(new_result))
1292
def insert_record_stream(self, stream):
1293
"""Insert a record stream into this container.
1295
:param stream: A stream of records to insert.
1297
:seealso VersionedFiles.get_record_stream:
1299
def get_adapter(adapter_key):
1301
return adapters[adapter_key]
1303
adapter_factory = adapter_registry.get(adapter_key)
1304
adapter = adapter_factory(self)
1305
adapters[adapter_key] = adapter
1307
if self._factory.annotated:
1308
# self is annotated, we need annotated knits to use directly.
1309
annotated = "annotated-"
1312
# self is not annotated, but we can strip annotations cheaply.
1314
convertibles = set(["knit-annotated-ft-gz"])
1315
if self._max_delta_chain:
1316
convertibles.add("knit-annotated-delta-gz")
1317
# The set of types we can cheaply adapt without needing basis texts.
1318
native_types = set()
1319
if self._max_delta_chain:
1320
native_types.add("knit-%sdelta-gz" % annotated)
1321
native_types.add("knit-%sft-gz" % annotated)
1322
knit_types = native_types.union(convertibles)
1324
# Buffer all index entries that we can't add immediately because their
1325
# basis parent is missing. We don't buffer all because generating
1326
# annotations may require access to some of the new records. However we
1327
# can't generate annotations from new deltas until their basis parent
1328
# is present anyway, so we get away with not needing an index that
1329
# includes the new keys.
1330
# key = basis_parent, value = index entry to add
1331
buffered_index_entries = {}
1332
for record in stream:
1333
parents = record.parents
1334
# Raise an error when a record is missing.
1335
if record.storage_kind == 'absent':
1336
raise RevisionNotPresent([record.key], self)
1337
if record.storage_kind in knit_types:
1338
if record.storage_kind not in native_types:
1340
adapter_key = (record.storage_kind, "knit-delta-gz")
1341
adapter = get_adapter(adapter_key)
1343
adapter_key = (record.storage_kind, "knit-ft-gz")
1344
adapter = get_adapter(adapter_key)
1345
bytes = adapter.get_bytes(
1346
record, record.get_bytes_as(record.storage_kind))
1348
bytes = record.get_bytes_as(record.storage_kind)
1349
options = [record._build_details[0]]
1350
if record._build_details[1]:
1351
options.append('no-eol')
1352
# Just blat it across.
1353
# Note: This does end up adding data on duplicate keys. As
1354
# modern repositories use atomic insertions this should not
1355
# lead to excessive growth in the event of interrupted fetches.
1356
# 'knit' repositories may suffer excessive growth, but as a
1357
# deprecated format this is tolerable. It can be fixed if
1358
# needed by in the kndx index support raising on a duplicate
1359
# add with identical parents and options.
1360
access_memo = self._access.add_raw_records(
1361
[(record.key, len(bytes))], bytes)[0]
1362
index_entry = (record.key, options, access_memo, parents)
1364
if 'fulltext' not in options:
1365
basis_parent = parents[0]
1366
# Note that pack backed knits don't need to buffer here
1367
# because they buffer all writes to the transaction level,
1368
# but we don't expose that difference at the index level. If
1369
# the query here has sufficient cost to show up in
1370
# profiling we should do that.
1371
if basis_parent not in self.get_parent_map([basis_parent]):
1372
pending = buffered_index_entries.setdefault(
1374
pending.append(index_entry)
1377
self._index.add_records([index_entry])
1378
elif record.storage_kind == 'fulltext':
1379
self.add_lines(record.key, parents,
1380
split_lines(record.get_bytes_as('fulltext')))
1382
adapter_key = record.storage_kind, 'fulltext'
1383
adapter = get_adapter(adapter_key)
1384
lines = split_lines(adapter.get_bytes(
1385
record, record.get_bytes_as(record.storage_kind)))
1387
self.add_lines(record.key, parents, lines)
1388
except errors.RevisionAlreadyPresent:
1390
# Add any records whose basis parent is now available.
1391
added_keys = [record.key]
1393
key = added_keys.pop(0)
1394
if key in buffered_index_entries:
1395
index_entries = buffered_index_entries[key]
1396
self._index.add_records(index_entries)
1398
[index_entry[0] for index_entry in index_entries])
1399
del buffered_index_entries[key]
1400
# If there were any deltas which had a missing basis parent, error.
1401
if buffered_index_entries:
1402
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1405
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1406
"""Iterate over the lines in the versioned files from keys.
1408
This may return lines from other keys. Each item the returned
1409
iterator yields is a tuple of a line and a text version that that line
1410
is present in (not introduced in).
1412
Ordering of results is in whatever order is most suitable for the
1413
underlying storage format.
1415
If a progress bar is supplied, it may be used to indicate progress.
1416
The caller is responsible for cleaning up progress bars (because this
1420
* Lines are normalised by the underlying store: they will all have \n
1422
* Lines are returned in arbitrary order.
1424
:return: An iterator over (line, key).
1427
pb = progress.DummyProgress()
1430
# we don't care about inclusions, the caller cares.
1431
# but we need to setup a list of records to visit.
1432
# we need key, position, length
1434
build_details = self._index.get_build_details(keys)
1435
for key, details in build_details.iteritems():
1437
key_records.append((key, details[0]))
1439
records_iter = enumerate(self._read_records_iter(key_records))
1440
for (key_idx, (key, data, sha_value)) in records_iter:
1441
pb.update('Walking content.', key_idx, total)
1442
compression_parent = build_details[key][1]
1443
if compression_parent is None:
1445
line_iterator = self._factory.get_fulltext_content(data)
1448
line_iterator = self._factory.get_linedelta_content(data)
1449
# XXX: It might be more efficient to yield (key,
1450
# line_iterator) in the future. However for now, this is a simpler
1451
# change to integrate into the rest of the codebase. RBC 20071110
1452
for line in line_iterator:
1454
for source in self._fallback_vfs:
1458
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1459
source_keys.add(key)
1461
keys.difference_update(source_keys)
1463
# XXX: strictly the second parameter is meant to be the file id
1464
# but it's not easily accessible here.
1465
raise RevisionNotPresent(keys, repr(self))
1466
pb.update('Walking content.', total, total)
1468
def _make_line_delta(self, delta_seq, new_content):
1469
"""Generate a line delta from delta_seq and new_content."""
1471
for op in delta_seq.get_opcodes():
1472
if op[0] == 'equal':
1474
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1477
def _merge_annotations(self, content, parents, parent_texts={},
1478
delta=None, annotated=None,
1479
left_matching_blocks=None):
1480
"""Merge annotations for content and generate deltas.
1482
This is done by comparing the annotations based on changes to the text
1483
and generating a delta on the resulting full texts. If annotations are
1484
not being created then a simple delta is created.
1486
if left_matching_blocks is not None:
1487
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1491
for parent_key in parents:
1492
merge_content = self._get_content(parent_key, parent_texts)
1493
if (parent_key == parents[0] and delta_seq is not None):
1496
seq = patiencediff.PatienceSequenceMatcher(
1497
None, merge_content.text(), content.text())
1498
for i, j, n in seq.get_matching_blocks():
1501
# this copies (origin, text) pairs across to the new
1502
# content for any line that matches the last-checked
1504
content._lines[j:j+n] = merge_content._lines[i:i+n]
1505
# XXX: Robert says the following block is a workaround for a
1506
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1507
if content._lines and content._lines[-1][1][-1] != '\n':
1508
# The copied annotation was from a line without a trailing EOL,
1509
# reinstate one for the content object, to ensure correct
1511
line = content._lines[-1][1] + '\n'
1512
content._lines[-1] = (content._lines[-1][0], line)
1514
if delta_seq is None:
1515
reference_content = self._get_content(parents[0], parent_texts)
1516
new_texts = content.text()
1517
old_texts = reference_content.text()
1518
delta_seq = patiencediff.PatienceSequenceMatcher(
1519
None, old_texts, new_texts)
1520
return self._make_line_delta(delta_seq, content)
1522
def _parse_record(self, version_id, data):
1523
"""Parse an original format knit record.
1525
These have the last element of the key only present in the stored data.
1527
rec, record_contents = self._parse_record_unchecked(data)
1528
self._check_header_version(rec, version_id)
1529
return record_contents, rec[3]
1531
def _parse_record_header(self, key, raw_data):
1532
"""Parse a record header for consistency.
1534
:return: the header and the decompressor stream.
1535
as (stream, header_record)
1537
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1540
rec = self._check_header(key, df.readline())
1541
except Exception, e:
1542
raise KnitCorrupt(self,
1543
"While reading {%s} got %s(%s)"
1544
% (key, e.__class__.__name__, str(e)))
1547
def _parse_record_unchecked(self, data):
1549
# 4168 calls in 2880 217 internal
1550
# 4168 calls to _parse_record_header in 2121
1551
# 4168 calls to readlines in 330
1552
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1554
record_contents = df.readlines()
1555
except Exception, e:
1556
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1557
(data, e.__class__.__name__, str(e)))
1558
header = record_contents.pop(0)
1559
rec = self._split_header(header)
1560
last_line = record_contents.pop()
1561
if len(record_contents) != int(rec[2]):
1562
raise KnitCorrupt(self,
1563
'incorrect number of lines %s != %s'
1564
' for version {%s} %s'
1565
% (len(record_contents), int(rec[2]),
1566
rec[1], record_contents))
1567
if last_line != 'end %s\n' % rec[1]:
1568
raise KnitCorrupt(self,
1569
'unexpected version end line %r, wanted %r'
1570
% (last_line, rec[1]))
1572
return rec, record_contents
1574
def _read_records_iter(self, records):
1575
"""Read text records from data file and yield result.
1577
The result will be returned in whatever is the fastest to read.
1578
Not by the order requested. Also, multiple requests for the same
1579
record will only yield 1 response.
1580
:param records: A list of (key, access_memo) entries
1581
:return: Yields (key, contents, digest) in the order
1582
read, not the order requested
1587
# XXX: This smells wrong, IO may not be getting ordered right.
1588
needed_records = sorted(set(records), key=operator.itemgetter(1))
1589
if not needed_records:
1592
# The transport optimizes the fetching as well
1593
# (ie, reads continuous ranges.)
1594
raw_data = self._access.get_raw_records(
1595
[index_memo for key, index_memo in needed_records])
1597
for (key, index_memo), data in \
1598
izip(iter(needed_records), raw_data):
1599
content, digest = self._parse_record(key[-1], data)
1600
yield key, content, digest
1602
def _read_records_iter_raw(self, records):
1603
"""Read text records from data file and yield raw data.
1605
This unpacks enough of the text record to validate the id is
1606
as expected but thats all.
1608
Each item the iterator yields is (key, bytes, sha1_of_full_text).
1610
# setup an iterator of the external records:
1611
# uses readv so nice and fast we hope.
1613
# grab the disk data needed.
1614
needed_offsets = [index_memo for key, index_memo
1616
raw_records = self._access.get_raw_records(needed_offsets)
1618
for key, index_memo in records:
1619
data = raw_records.next()
1620
# validate the header (note that we can only use the suffix in
1621
# current knit records).
1622
df, rec = self._parse_record_header(key, data)
1624
yield key, data, rec[3]
1626
def _record_to_data(self, key, digest, lines, dense_lines=None):
1627
"""Convert key, digest, lines into a raw data block.
1629
:param key: The key of the record. Currently keys are always serialised
1630
using just the trailing component.
1631
:param dense_lines: The bytes of lines but in a denser form. For
1632
instance, if lines is a list of 1000 bytestrings each ending in \n,
1633
dense_lines may be a list with one line in it, containing all the
1634
1000's lines and their \n's. Using dense_lines if it is already
1635
known is a win because the string join to create bytes in this
1636
function spends less time resizing the final string.
1637
:return: (len, a StringIO instance with the raw data ready to read.)
1639
# Note: using a string copy here increases memory pressure with e.g.
1640
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
1641
# when doing the initial commit of a mozilla tree. RBC 20070921
1642
bytes = ''.join(chain(
1643
["version %s %d %s\n" % (key[-1],
1646
dense_lines or lines,
1647
["end %s\n" % key[-1]]))
1648
if type(bytes) != str:
1649
raise AssertionError(
1650
'data must be plain bytes was %s' % type(bytes))
1651
if lines and lines[-1][-1] != '\n':
1652
raise ValueError('corrupt lines value %r' % lines)
1653
compressed_bytes = tuned_gzip.bytes_to_gzip(bytes)
1654
return len(compressed_bytes), compressed_bytes
1656
def _split_header(self, line):
1659
raise KnitCorrupt(self,
1660
'unexpected number of elements in record header')
1664
"""See VersionedFiles.keys."""
1665
if 'evil' in debug.debug_flags:
1666
trace.mutter_callsite(2, "keys scales with size of history")
1667
sources = [self._index] + self._fallback_vfs
1669
for source in sources:
1670
result.update(source.keys())
1675
class _KndxIndex(object):
1676
"""Manages knit index files
1678
The index is kept in memory and read on startup, to enable
1679
fast lookups of revision information. The cursor of the index
1680
file is always pointing to the end, making it easy to append
1683
_cache is a cache for fast mapping from version id to a Index
1686
_history is a cache for fast mapping from indexes to version ids.
1688
The index data format is dictionary compressed when it comes to
1689
parent references; a index entry may only have parents that with a
1690
lover index number. As a result, the index is topological sorted.
1692
Duplicate entries may be written to the index for a single version id
1693
if this is done then the latter one completely replaces the former:
1694
this allows updates to correct version and parent information.
1695
Note that the two entries may share the delta, and that successive
1696
annotations and references MUST point to the first entry.
1698
The index file on disc contains a header, followed by one line per knit
1699
record. The same revision can be present in an index file more than once.
1700
The first occurrence gets assigned a sequence number starting from 0.
1702
The format of a single line is
1703
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
1704
REVISION_ID is a utf8-encoded revision id
1705
FLAGS is a comma separated list of flags about the record. Values include
1706
no-eol, line-delta, fulltext.
1707
BYTE_OFFSET is the ascii representation of the byte offset in the data file
1708
that the the compressed data starts at.
1709
LENGTH is the ascii representation of the length of the data file.
1710
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
1712
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
1713
revision id already in the knit that is a parent of REVISION_ID.
1714
The ' :' marker is the end of record marker.
1717
when a write is interrupted to the index file, it will result in a line
1718
that does not end in ' :'. If the ' :' is not present at the end of a line,
1719
or at the end of the file, then the record that is missing it will be
1720
ignored by the parser.
1722
When writing new records to the index file, the data is preceded by '\n'
1723
to ensure that records always start on new lines even if the last write was
1724
interrupted. As a result its normal for the last line in the index to be
1725
missing a trailing newline. One can be added with no harmful effects.
1727
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
1728
where prefix is e.g. the (fileid,) for .texts instances or () for
1729
constant-mapped things like .revisions, and the old state is
1730
tuple(history_vector, cache_dict). This is used to prevent having an
1731
ABI change with the C extension that reads .kndx files.
1734
HEADER = "# bzr knit index 8\n"
1736
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
1737
"""Create a _KndxIndex on transport using mapper."""
1738
self._transport = transport
1739
self._mapper = mapper
1740
self._get_scope = get_scope
1741
self._allow_writes = allow_writes
1742
self._is_locked = is_locked
1744
self.has_graph = True
1746
def add_records(self, records, random_id=False):
1747
"""Add multiple records to the index.
1749
:param records: a list of tuples:
1750
(key, options, access_memo, parents).
1751
:param random_id: If True the ids being added were randomly generated
1752
and no check for existence will be performed.
1755
for record in records:
1758
path = self._mapper.map(key) + '.kndx'
1759
path_keys = paths.setdefault(path, (prefix, []))
1760
path_keys[1].append(record)
1761
for path in sorted(paths):
1762
prefix, path_keys = paths[path]
1763
self._load_prefixes([prefix])
1765
orig_history = self._kndx_cache[prefix][1][:]
1766
orig_cache = self._kndx_cache[prefix][0].copy()
1769
for key, options, (_, pos, size), parents in path_keys:
1771
# kndx indices cannot be parentless.
1773
line = "\n%s %s %s %s %s :" % (
1774
key[-1], ','.join(options), pos, size,
1775
self._dictionary_compress(parents))
1776
if type(line) != str:
1777
raise AssertionError(
1778
'data must be utf8 was %s' % type(line))
1780
self._cache_key(key, options, pos, size, parents)
1781
if len(orig_history):
1782
self._transport.append_bytes(path, ''.join(lines))
1784
self._init_index(path, lines)
1786
# If any problems happen, restore the original values and re-raise
1787
self._kndx_cache[prefix] = (orig_cache, orig_history)
1790
def _cache_key(self, key, options, pos, size, parent_keys):
1791
"""Cache a version record in the history array and index cache.
1793
This is inlined into _load_data for performance. KEEP IN SYNC.
1794
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
1798
version_id = key[-1]
1799
# last-element only for compatibilty with the C load_data.
1800
parents = tuple(parent[-1] for parent in parent_keys)
1801
for parent in parent_keys:
1802
if parent[:-1] != prefix:
1803
raise ValueError("mismatched prefixes for %r, %r" % (
1805
cache, history = self._kndx_cache[prefix]
1806
# only want the _history index to reference the 1st index entry
1808
if version_id not in cache:
1809
index = len(history)
1810
history.append(version_id)
1812
index = cache[version_id][5]
1813
cache[version_id] = (version_id,
1820
def check_header(self, fp):
1821
line = fp.readline()
1823
# An empty file can actually be treated as though the file doesn't
1825
raise errors.NoSuchFile(self)
1826
if line != self.HEADER:
1827
raise KnitHeaderError(badline=line, filename=self)
1829
def _check_read(self):
1830
if not self._is_locked():
1831
raise errors.ObjectNotLocked(self)
1832
if self._get_scope() != self._scope:
1835
def _check_write_ok(self):
1836
"""Assert if not writes are permitted."""
1837
if not self._is_locked():
1838
raise errors.ObjectNotLocked(self)
1839
if self._get_scope() != self._scope:
1841
if self._mode != 'w':
1842
raise errors.ReadOnlyObjectDirtiedError(self)
1844
def get_build_details(self, keys):
1845
"""Get the method, index_memo and compression parent for keys.
1847
Ghosts are omitted from the result.
1849
:param keys: An iterable of keys.
1850
:return: A dict of key:(index_memo, compression_parent, parents,
1853
opaque structure to pass to read_records to extract the raw
1856
Content that this record is built upon, may be None
1858
Logical parents of this node
1860
extra information about the content which needs to be passed to
1861
Factory.parse_record
1863
prefixes = self._partition_keys(keys)
1864
parent_map = self.get_parent_map(keys)
1867
if key not in parent_map:
1869
method = self.get_method(key)
1870
parents = parent_map[key]
1871
if method == 'fulltext':
1872
compression_parent = None
1874
compression_parent = parents[0]
1875
noeol = 'no-eol' in self.get_options(key)
1876
index_memo = self.get_position(key)
1877
result[key] = (index_memo, compression_parent,
1878
parents, (method, noeol))
1881
def get_method(self, key):
1882
"""Return compression method of specified key."""
1883
options = self.get_options(key)
1884
if 'fulltext' in options:
1886
elif 'line-delta' in options:
1889
raise errors.KnitIndexUnknownMethod(self, options)
1891
def get_options(self, key):
1892
"""Return a list representing options.
1896
prefix, suffix = self._split_key(key)
1897
self._load_prefixes([prefix])
1899
return self._kndx_cache[prefix][0][suffix][1]
1901
raise RevisionNotPresent(key, self)
1903
def get_parent_map(self, keys):
1904
"""Get a map of the parents of keys.
1906
:param keys: The keys to look up parents for.
1907
:return: A mapping from keys to parents. Absent keys are absent from
1910
# Parse what we need to up front, this potentially trades off I/O
1911
# locality (.kndx and .knit in the same block group for the same file
1912
# id) for less checking in inner loops.
1913
prefixes = set(key[:-1] for key in keys)
1914
self._load_prefixes(prefixes)
1919
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
1923
result[key] = tuple(prefix + (suffix,) for
1924
suffix in suffix_parents)
1927
def get_position(self, key):
1928
"""Return details needed to access the version.
1930
:return: a tuple (key, data position, size) to hand to the access
1931
logic to get the record.
1933
prefix, suffix = self._split_key(key)
1934
self._load_prefixes([prefix])
1935
entry = self._kndx_cache[prefix][0][suffix]
1936
return key, entry[2], entry[3]
1938
def _init_index(self, path, extra_lines=[]):
1939
"""Initialize an index."""
1941
sio.write(self.HEADER)
1942
sio.writelines(extra_lines)
1944
self._transport.put_file_non_atomic(path, sio,
1945
create_parent_dir=True)
1946
# self._create_parent_dir)
1947
# mode=self._file_mode,
1948
# dir_mode=self._dir_mode)
1951
"""Get all the keys in the collection.
1953
The keys are not ordered.
1956
# Identify all key prefixes.
1957
# XXX: A bit hacky, needs polish.
1958
if type(self._mapper) == ConstantMapper:
1962
for quoted_relpath in self._transport.iter_files_recursive():
1963
path, ext = os.path.splitext(quoted_relpath)
1965
prefixes = [self._mapper.unmap(path) for path in relpaths]
1966
self._load_prefixes(prefixes)
1967
for prefix in prefixes:
1968
for suffix in self._kndx_cache[prefix][1]:
1969
result.add(prefix + (suffix,))
1972
def _load_prefixes(self, prefixes):
1973
"""Load the indices for prefixes."""
1975
for prefix in prefixes:
1976
if prefix not in self._kndx_cache:
1977
# the load_data interface writes to these variables.
1980
self._filename = prefix
1982
path = self._mapper.map(prefix) + '.kndx'
1983
fp = self._transport.get(path)
1985
# _load_data may raise NoSuchFile if the target knit is
1987
_load_data(self, fp)
1990
self._kndx_cache[prefix] = (self._cache, self._history)
1995
self._kndx_cache[prefix] = ({}, [])
1996
if type(self._mapper) == ConstantMapper:
1997
# preserve behaviour for revisions.kndx etc.
1998
self._init_index(path)
2003
def _partition_keys(self, keys):
2004
"""Turn keys into a dict of prefix:suffix_list."""
2007
prefix_keys = result.setdefault(key[:-1], [])
2008
prefix_keys.append(key[-1])
2011
def _dictionary_compress(self, keys):
2012
"""Dictionary compress keys.
2014
:param keys: The keys to generate references to.
2015
:return: A string representation of keys. keys which are present are
2016
dictionary compressed, and others are emitted as fulltext with a
2022
prefix = keys[0][:-1]
2023
cache = self._kndx_cache[prefix][0]
2025
if key[:-1] != prefix:
2026
# kndx indices cannot refer across partitioned storage.
2027
raise ValueError("mismatched prefixes for %r" % keys)
2028
if key[-1] in cache:
2029
# -- inlined lookup() --
2030
result_list.append(str(cache[key[-1]][5]))
2031
# -- end lookup () --
2033
result_list.append('.' + key[-1])
2034
return ' '.join(result_list)
2036
def _reset_cache(self):
2037
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2038
# (cache_dict, history_vector) for parsed kndx files.
2039
self._kndx_cache = {}
2040
self._scope = self._get_scope()
2041
allow_writes = self._allow_writes()
2047
def _split_key(self, key):
2048
"""Split key into a prefix and suffix."""
2049
return key[:-1], key[-1]
2052
class _KnitGraphIndex(object):
2053
"""A KnitVersionedFiles index layered on GraphIndex."""
2055
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2057
"""Construct a KnitGraphIndex on a graph_index.
2059
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2060
:param is_locked: A callback to check whether the object should answer
2062
:param deltas: Allow delta-compressed records.
2063
:param parents: If True, record knits parents, if not do not record
2065
:param add_callback: If not None, allow additions to the index and call
2066
this callback with a list of added GraphIndex nodes:
2067
[(node, value, node_refs), ...]
2068
:param is_locked: A callback, returns True if the index is locked and
2071
self._add_callback = add_callback
2072
self._graph_index = graph_index
2073
self._deltas = deltas
2074
self._parents = parents
2075
if deltas and not parents:
2076
# XXX: TODO: Delta tree and parent graph should be conceptually
2078
raise KnitCorrupt(self, "Cannot do delta compression without "
2080
self.has_graph = parents
2081
self._is_locked = is_locked
2084
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2086
def add_records(self, records, random_id=False):
2087
"""Add multiple records to the index.
2089
This function does not insert data into the Immutable GraphIndex
2090
backing the KnitGraphIndex, instead it prepares data for insertion by
2091
the caller and checks that it is safe to insert then calls
2092
self._add_callback with the prepared GraphIndex nodes.
2094
:param records: a list of tuples:
2095
(key, options, access_memo, parents).
2096
:param random_id: If True the ids being added were randomly generated
2097
and no check for existence will be performed.
2099
if not self._add_callback:
2100
raise errors.ReadOnlyError(self)
2101
# we hope there are no repositories with inconsistent parentage
2105
for (key, options, access_memo, parents) in records:
2107
parents = tuple(parents)
2108
index, pos, size = access_memo
2109
if 'no-eol' in options:
2113
value += "%d %d" % (pos, size)
2114
if not self._deltas:
2115
if 'line-delta' in options:
2116
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2119
if 'line-delta' in options:
2120
node_refs = (parents, (parents[0],))
2122
node_refs = (parents, ())
2124
node_refs = (parents, )
2127
raise KnitCorrupt(self, "attempt to add node with parents "
2128
"in parentless index.")
2130
keys[key] = (value, node_refs)
2133
present_nodes = self._get_entries(keys)
2134
for (index, key, value, node_refs) in present_nodes:
2135
if (value[0] != keys[key][0][0] or
2136
node_refs != keys[key][1]):
2137
raise KnitCorrupt(self, "inconsistent details in add_records"
2138
": %s %s" % ((value, node_refs), keys[key]))
2142
for key, (value, node_refs) in keys.iteritems():
2143
result.append((key, value, node_refs))
2145
for key, (value, node_refs) in keys.iteritems():
2146
result.append((key, value))
2147
self._add_callback(result)
2149
def _check_read(self):
2150
"""raise if reads are not permitted."""
2151
if not self._is_locked():
2152
raise errors.ObjectNotLocked(self)
2154
def _check_write_ok(self):
2155
"""Assert if writes are not permitted."""
2156
if not self._is_locked():
2157
raise errors.ObjectNotLocked(self)
2159
def _compression_parent(self, an_entry):
2160
# return the key that an_entry is compressed against, or None
2161
# Grab the second parent list (as deltas implies parents currently)
2162
compression_parents = an_entry[3][1]
2163
if not compression_parents:
2165
if len(compression_parents) != 1:
2166
raise AssertionError(
2167
"Too many compression parents: %r" % compression_parents)
2168
return compression_parents[0]
2170
def get_build_details(self, keys):
2171
"""Get the method, index_memo and compression parent for version_ids.
2173
Ghosts are omitted from the result.
2175
:param keys: An iterable of keys.
2176
:return: A dict of key:
2177
(index_memo, compression_parent, parents, record_details).
2179
opaque structure to pass to read_records to extract the raw
2182
Content that this record is built upon, may be None
2184
Logical parents of this node
2186
extra information about the content which needs to be passed to
2187
Factory.parse_record
2191
entries = self._get_entries(keys, False)
2192
for entry in entries:
2194
if not self._parents:
2197
parents = entry[3][0]
2198
if not self._deltas:
2199
compression_parent_key = None
2201
compression_parent_key = self._compression_parent(entry)
2202
noeol = (entry[2][0] == 'N')
2203
if compression_parent_key:
2204
method = 'line-delta'
2207
result[key] = (self._node_to_position(entry),
2208
compression_parent_key, parents,
2212
def _get_entries(self, keys, check_present=False):
2213
"""Get the entries for keys.
2215
:param keys: An iterable of index key tuples.
2220
for node in self._graph_index.iter_entries(keys):
2222
found_keys.add(node[1])
2224
# adapt parentless index to the rest of the code.
2225
for node in self._graph_index.iter_entries(keys):
2226
yield node[0], node[1], node[2], ()
2227
found_keys.add(node[1])
2229
missing_keys = keys.difference(found_keys)
2231
raise RevisionNotPresent(missing_keys.pop(), self)
2233
def get_method(self, key):
2234
"""Return compression method of specified key."""
2235
return self._get_method(self._get_node(key))
2237
def _get_method(self, node):
2238
if not self._deltas:
2240
if self._compression_parent(node):
2245
def _get_node(self, key):
2247
return list(self._get_entries([key]))[0]
2249
raise RevisionNotPresent(key, self)
2251
def get_options(self, key):
2252
"""Return a list representing options.
2256
node = self._get_node(key)
2257
options = [self._get_method(node)]
2258
if node[2][0] == 'N':
2259
options.append('no-eol')
2262
def get_parent_map(self, keys):
2263
"""Get a map of the parents of keys.
2265
:param keys: The keys to look up parents for.
2266
:return: A mapping from keys to parents. Absent keys are absent from
2270
nodes = self._get_entries(keys)
2274
result[node[1]] = node[3][0]
2277
result[node[1]] = None
2280
def get_position(self, key):
2281
"""Return details needed to access the version.
2283
:return: a tuple (index, data position, size) to hand to the access
2284
logic to get the record.
2286
node = self._get_node(key)
2287
return self._node_to_position(node)
2290
"""Get all the keys in the collection.
2292
The keys are not ordered.
2295
return [node[1] for node in self._graph_index.iter_all_entries()]
2297
def _node_to_position(self, node):
2298
"""Convert an index value to position details."""
2299
bits = node[2][1:].split(' ')
2300
return node[0], int(bits[0]), int(bits[1])
2303
class _KnitKeyAccess(object):
2304
"""Access to records in .knit files."""
2306
def __init__(self, transport, mapper):
2307
"""Create a _KnitKeyAccess with transport and mapper.
2309
:param transport: The transport the access object is rooted at.
2310
:param mapper: The mapper used to map keys to .knit files.
2312
self._transport = transport
2313
self._mapper = mapper
2315
def add_raw_records(self, key_sizes, raw_data):
2316
"""Add raw knit bytes to a storage area.
2318
The data is spooled to the container writer in one bytes-record per
2321
:param sizes: An iterable of tuples containing the key and size of each
2323
:param raw_data: A bytestring containing the data.
2324
:return: A list of memos to retrieve the record later. Each memo is an
2325
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
2326
length), where the key is the record key.
2328
if type(raw_data) != str:
2329
raise AssertionError(
2330
'data must be plain bytes was %s' % type(raw_data))
2333
# TODO: This can be tuned for writing to sftp and other servers where
2334
# append() is relatively expensive by grouping the writes to each key
2336
for key, size in key_sizes:
2337
path = self._mapper.map(key)
2339
base = self._transport.append_bytes(path + '.knit',
2340
raw_data[offset:offset+size])
2341
except errors.NoSuchFile:
2342
self._transport.mkdir(osutils.dirname(path))
2343
base = self._transport.append_bytes(path + '.knit',
2344
raw_data[offset:offset+size])
2348
result.append((key, base, size))
2351
def get_raw_records(self, memos_for_retrieval):
2352
"""Get the raw bytes for a records.
2354
:param memos_for_retrieval: An iterable containing the access memo for
2355
retrieving the bytes.
2356
:return: An iterator over the bytes of the records.
2358
# first pass, group into same-index request to minimise readv's issued.
2360
current_prefix = None
2361
for (key, offset, length) in memos_for_retrieval:
2362
if current_prefix == key[:-1]:
2363
current_list.append((offset, length))
2365
if current_prefix is not None:
2366
request_lists.append((current_prefix, current_list))
2367
current_prefix = key[:-1]
2368
current_list = [(offset, length)]
2369
# handle the last entry
2370
if current_prefix is not None:
2371
request_lists.append((current_prefix, current_list))
2372
for prefix, read_vector in request_lists:
2373
path = self._mapper.map(prefix) + '.knit'
2374
for pos, data in self._transport.readv(path, read_vector):
2378
class _DirectPackAccess(object):
2379
"""Access to data in one or more packs with less translation."""
2381
def __init__(self, index_to_packs):
2382
"""Create a _DirectPackAccess object.
2384
:param index_to_packs: A dict mapping index objects to the transport
2385
and file names for obtaining data.
2387
self._container_writer = None
2388
self._write_index = None
2389
self._indices = index_to_packs
2391
def add_raw_records(self, key_sizes, raw_data):
2392
"""Add raw knit bytes to a storage area.
2394
The data is spooled to the container writer in one bytes-record per
2397
:param sizes: An iterable of tuples containing the key and size of each
2399
:param raw_data: A bytestring containing the data.
2400
:return: A list of memos to retrieve the record later. Each memo is an
2401
opaque index memo. For _DirectPackAccess the memo is (index, pos,
2402
length), where the index field is the write_index object supplied
2403
to the PackAccess object.
2405
if type(raw_data) != str:
2406
raise AssertionError(
2407
'data must be plain bytes was %s' % type(raw_data))
2410
for key, size in key_sizes:
2411
p_offset, p_length = self._container_writer.add_bytes_record(
2412
raw_data[offset:offset+size], [])
2414
result.append((self._write_index, p_offset, p_length))
2417
def get_raw_records(self, memos_for_retrieval):
2418
"""Get the raw bytes for a records.
2420
:param memos_for_retrieval: An iterable containing the (index, pos,
2421
length) memo for retrieving the bytes. The Pack access method
2422
looks up the pack to use for a given record in its index_to_pack
2424
:return: An iterator over the bytes of the records.
2426
# first pass, group into same-index requests
2428
current_index = None
2429
for (index, offset, length) in memos_for_retrieval:
2430
if current_index == index:
2431
current_list.append((offset, length))
2433
if current_index is not None:
2434
request_lists.append((current_index, current_list))
2435
current_index = index
2436
current_list = [(offset, length)]
2437
# handle the last entry
2438
if current_index is not None:
2439
request_lists.append((current_index, current_list))
2440
for index, offsets in request_lists:
2441
transport, path = self._indices[index]
2442
reader = pack.make_readv_reader(transport, path, offsets)
2443
for names, read_func in reader.iter_records():
2444
yield read_func(None)
2446
def set_writer(self, writer, index, transport_packname):
2447
"""Set a writer to use for adding data."""
2448
if index is not None:
2449
self._indices[index] = transport_packname
2450
self._container_writer = writer
2451
self._write_index = index
2454
# Deprecated, use PatienceSequenceMatcher instead
2455
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
2458
def annotate_knit(knit, revision_id):
2459
"""Annotate a knit with no cached annotations.
2461
This implementation is for knits with no cached annotations.
2462
It will work for knits with cached annotations, but this is not
2465
annotator = _KnitAnnotator(knit)
2466
return iter(annotator.annotate(revision_id))
2469
class _KnitAnnotator(object):
2470
"""Build up the annotations for a text."""
2472
def __init__(self, knit):
2475
# Content objects, differs from fulltexts because of how final newlines
2476
# are treated by knits. the content objects here will always have a
2478
self._fulltext_contents = {}
2480
# Annotated lines of specific revisions
2481
self._annotated_lines = {}
2483
# Track the raw data for nodes that we could not process yet.
2484
# This maps the revision_id of the base to a list of children that will
2485
# annotated from it.
2486
self._pending_children = {}
2488
# Nodes which cannot be extracted
2489
self._ghosts = set()
2491
# Track how many children this node has, so we know if we need to keep
2493
self._annotate_children = {}
2494
self._compression_children = {}
2496
self._all_build_details = {}
2497
# The children => parent revision_id graph
2498
self._revision_id_graph = {}
2500
self._heads_provider = None
2502
self._nodes_to_keep_annotations = set()
2503
self._generations_until_keep = 100
2505
def set_generations_until_keep(self, value):
2506
"""Set the number of generations before caching a node.
2508
Setting this to -1 will cache every merge node, setting this higher
2509
will cache fewer nodes.
2511
self._generations_until_keep = value
2513
def _add_fulltext_content(self, revision_id, content_obj):
2514
self._fulltext_contents[revision_id] = content_obj
2515
# TODO: jam 20080305 It might be good to check the sha1digest here
2516
return content_obj.text()
2518
def _check_parents(self, child, nodes_to_annotate):
2519
"""Check if all parents have been processed.
2521
:param child: A tuple of (rev_id, parents, raw_content)
2522
:param nodes_to_annotate: If child is ready, add it to
2523
nodes_to_annotate, otherwise put it back in self._pending_children
2525
for parent_id in child[1]:
2526
if (parent_id not in self._annotated_lines):
2527
# This parent is present, but another parent is missing
2528
self._pending_children.setdefault(parent_id,
2532
# This one is ready to be processed
2533
nodes_to_annotate.append(child)
2535
def _add_annotation(self, revision_id, fulltext, parent_ids,
2536
left_matching_blocks=None):
2537
"""Add an annotation entry.
2539
All parents should already have been annotated.
2540
:return: A list of children that now have their parents satisfied.
2542
a = self._annotated_lines
2543
annotated_parent_lines = [a[p] for p in parent_ids]
2544
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
2545
fulltext, revision_id, left_matching_blocks,
2546
heads_provider=self._get_heads_provider()))
2547
self._annotated_lines[revision_id] = annotated_lines
2548
for p in parent_ids:
2549
ann_children = self._annotate_children[p]
2550
ann_children.remove(revision_id)
2551
if (not ann_children
2552
and p not in self._nodes_to_keep_annotations):
2553
del self._annotated_lines[p]
2554
del self._all_build_details[p]
2555
if p in self._fulltext_contents:
2556
del self._fulltext_contents[p]
2557
# Now that we've added this one, see if there are any pending
2558
# deltas to be done, certainly this parent is finished
2559
nodes_to_annotate = []
2560
for child in self._pending_children.pop(revision_id, []):
2561
self._check_parents(child, nodes_to_annotate)
2562
return nodes_to_annotate
2564
def _get_build_graph(self, key):
2565
"""Get the graphs for building texts and annotations.
2567
The data you need for creating a full text may be different than the
2568
data you need to annotate that text. (At a minimum, you need both
2569
parents to create an annotation, but only need 1 parent to generate the
2572
:return: A list of (key, index_memo) records, suitable for
2573
passing to read_records_iter to start reading in the raw data fro/
2576
if key in self._annotated_lines:
2579
pending = set([key])
2584
# get all pending nodes
2586
this_iteration = pending
2587
build_details = self._knit._index.get_build_details(this_iteration)
2588
self._all_build_details.update(build_details)
2589
# new_nodes = self._knit._index._get_entries(this_iteration)
2591
for key, details in build_details.iteritems():
2592
(index_memo, compression_parent, parents,
2593
record_details) = details
2594
self._revision_id_graph[key] = parents
2595
records.append((key, index_memo))
2596
# Do we actually need to check _annotated_lines?
2597
pending.update(p for p in parents
2598
if p not in self._all_build_details)
2599
if compression_parent:
2600
self._compression_children.setdefault(compression_parent,
2603
for parent in parents:
2604
self._annotate_children.setdefault(parent,
2606
num_gens = generation - kept_generation
2607
if ((num_gens >= self._generations_until_keep)
2608
and len(parents) > 1):
2609
kept_generation = generation
2610
self._nodes_to_keep_annotations.add(key)
2612
missing_versions = this_iteration.difference(build_details.keys())
2613
self._ghosts.update(missing_versions)
2614
for missing_version in missing_versions:
2615
# add a key, no parents
2616
self._revision_id_graph[missing_version] = ()
2617
pending.discard(missing_version) # don't look for it
2618
if self._ghosts.intersection(self._compression_children):
2620
"We cannot have nodes which have a ghost compression parent:\n"
2622
"compression children: %r"
2623
% (self._ghosts, self._compression_children))
2624
# Cleanout anything that depends on a ghost so that we don't wait for
2625
# the ghost to show up
2626
for node in self._ghosts:
2627
if node in self._annotate_children:
2628
# We won't be building this node
2629
del self._annotate_children[node]
2630
# Generally we will want to read the records in reverse order, because
2631
# we find the parent nodes after the children
2635
def _annotate_records(self, records):
2636
"""Build the annotations for the listed records."""
2637
# We iterate in the order read, rather than a strict order requested
2638
# However, process what we can, and put off to the side things that
2639
# still need parents, cleaning them up when those parents are
2641
for (rev_id, record,
2642
digest) in self._knit._read_records_iter(records):
2643
if rev_id in self._annotated_lines:
2645
parent_ids = self._revision_id_graph[rev_id]
2646
parent_ids = [p for p in parent_ids if p not in self._ghosts]
2647
details = self._all_build_details[rev_id]
2648
(index_memo, compression_parent, parents,
2649
record_details) = details
2650
nodes_to_annotate = []
2651
# TODO: Remove the punning between compression parents, and
2652
# parent_ids, we should be able to do this without assuming
2654
if len(parent_ids) == 0:
2655
# There are no parents for this node, so just add it
2656
# TODO: This probably needs to be decoupled
2657
fulltext_content, delta = self._knit._factory.parse_record(
2658
rev_id, record, record_details, None)
2659
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
2660
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
2661
parent_ids, left_matching_blocks=None))
2663
child = (rev_id, parent_ids, record)
2664
# Check if all the parents are present
2665
self._check_parents(child, nodes_to_annotate)
2666
while nodes_to_annotate:
2667
# Should we use a queue here instead of a stack?
2668
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
2669
(index_memo, compression_parent, parents,
2670
record_details) = self._all_build_details[rev_id]
2671
if compression_parent is not None:
2672
comp_children = self._compression_children[compression_parent]
2673
if rev_id not in comp_children:
2674
raise AssertionError("%r not in compression children %r"
2675
% (rev_id, comp_children))
2676
# If there is only 1 child, it is safe to reuse this
2678
reuse_content = (len(comp_children) == 1
2679
and compression_parent not in
2680
self._nodes_to_keep_annotations)
2682
# Remove it from the cache since it will be changing
2683
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
2684
# Make sure to copy the fulltext since it might be
2686
parent_fulltext = list(parent_fulltext_content.text())
2688
parent_fulltext_content = self._fulltext_contents[compression_parent]
2689
parent_fulltext = parent_fulltext_content.text()
2690
comp_children.remove(rev_id)
2691
fulltext_content, delta = self._knit._factory.parse_record(
2692
rev_id, record, record_details,
2693
parent_fulltext_content,
2694
copy_base_content=(not reuse_content))
2695
fulltext = self._add_fulltext_content(rev_id,
2697
blocks = KnitContent.get_line_delta_blocks(delta,
2698
parent_fulltext, fulltext)
2700
fulltext_content = self._knit._factory.parse_fulltext(
2702
fulltext = self._add_fulltext_content(rev_id,
2705
nodes_to_annotate.extend(
2706
self._add_annotation(rev_id, fulltext, parent_ids,
2707
left_matching_blocks=blocks))
2709
def _get_heads_provider(self):
2710
"""Create a heads provider for resolving ancestry issues."""
2711
if self._heads_provider is not None:
2712
return self._heads_provider
2713
parent_provider = _mod_graph.DictParentsProvider(
2714
self._revision_id_graph)
2715
graph_obj = _mod_graph.Graph(parent_provider)
2716
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
2717
self._heads_provider = head_cache
2720
def annotate(self, key):
2721
"""Return the annotated fulltext at the given key.
2723
:param key: The key to annotate.
2725
if True or len(self._knit._fallback_vfs) > 0:
2726
# stacked knits can't use the fast path at present.
2727
return self._simple_annotate(key)
2728
records = self._get_build_graph(key)
2729
if key in self._ghosts:
2730
raise errors.RevisionNotPresent(key, self._knit)
2731
self._annotate_records(records)
2732
return self._annotated_lines[key]
2734
def _simple_annotate(self, key):
2735
"""Return annotated fulltext, rediffing from the full texts.
2737
This is slow but makes no assumptions about the repository
2738
being able to produce line deltas.
2740
# TODO: this code generates a parent maps of present ancestors; it
2741
# could be split out into a separate method, and probably should use
2742
# iter_ancestry instead. -- mbp and robertc 20080704
2743
graph = _mod_graph.Graph(self._knit)
2744
head_cache = _mod_graph.FrozenHeadsCache(graph)
2745
search = graph._make_breadth_first_searcher([key])
2749
present, ghosts = search.next_with_ghosts()
2750
except StopIteration:
2752
keys.update(present)
2753
parent_map = self._knit.get_parent_map(keys)
2755
reannotate = annotate.reannotate
2756
for record in self._knit.get_record_stream(keys, 'topological', True):
2758
fulltext = split_lines(record.get_bytes_as('fulltext'))
2759
parents = parent_map[key]
2760
if parents is not None:
2761
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
2764
parent_cache[key] = list(
2765
reannotate(parent_lines, fulltext, key, None, head_cache))
2767
return parent_cache[key]
2769
raise errors.RevisionNotPresent(key, self._knit)
2773
from bzrlib._knit_load_data_c import _load_data_c as _load_data
2775
from bzrlib._knit_load_data_py import _load_data_py as _load_data