1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
from cStringIO import StringIO
56
from itertools import izip
61
from bzrlib.lazy_import import lazy_import
62
lazy_import(globals(), """
82
from bzrlib.errors import (
90
RevisionAlreadyPresent,
93
from bzrlib.osutils import (
100
from bzrlib.versionedfile import (
101
AbsentContentFactory,
105
ChunkedContentFactory,
112
# TODO: Split out code specific to this format into an associated object.
114
# TODO: Can we put in some kind of value to check that the index and data
115
# files belong together?
117
# TODO: accommodate binaries, perhaps by storing a byte count
119
# TODO: function to check whole file
121
# TODO: atomically append data, then measure backwards from the cursor
122
# position after writing to work out where it was located. we may need to
123
# bypass python file buffering.
125
DATA_SUFFIX = '.knit'
126
INDEX_SUFFIX = '.kndx'
127
_STREAM_MIN_BUFFER_SIZE = 5*1024*1024
130
class KnitAdapter(object):
131
"""Base class for knit record adaption."""
133
def __init__(self, basis_vf):
134
"""Create an adapter which accesses full texts from basis_vf.
136
:param basis_vf: A versioned file to access basis texts of deltas from.
137
May be None for adapters that do not need to access basis texts.
139
self._data = KnitVersionedFiles(None, None)
140
self._annotate_factory = KnitAnnotateFactory()
141
self._plain_factory = KnitPlainFactory()
142
self._basis_vf = basis_vf
145
class FTAnnotatedToUnannotated(KnitAdapter):
146
"""An adapter from FT annotated knits to unannotated ones."""
148
def get_bytes(self, factory):
149
annotated_compressed_bytes = factory._raw_record
151
self._data._parse_record_unchecked(annotated_compressed_bytes)
152
content = self._annotate_factory.parse_fulltext(contents, rec[1])
153
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
157
class DeltaAnnotatedToUnannotated(KnitAdapter):
158
"""An adapter for deltas from annotated to unannotated."""
160
def get_bytes(self, factory):
161
annotated_compressed_bytes = factory._raw_record
163
self._data._parse_record_unchecked(annotated_compressed_bytes)
164
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
166
contents = self._plain_factory.lower_line_delta(delta)
167
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
171
class FTAnnotatedToFullText(KnitAdapter):
172
"""An adapter from FT annotated knits to unannotated ones."""
174
def get_bytes(self, factory):
175
annotated_compressed_bytes = factory._raw_record
177
self._data._parse_record_unchecked(annotated_compressed_bytes)
178
content, delta = self._annotate_factory.parse_record(factory.key[-1],
179
contents, factory._build_details, None)
180
return ''.join(content.text())
183
class DeltaAnnotatedToFullText(KnitAdapter):
184
"""An adapter for deltas from annotated to unannotated."""
186
def get_bytes(self, factory):
187
annotated_compressed_bytes = factory._raw_record
189
self._data._parse_record_unchecked(annotated_compressed_bytes)
190
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
192
compression_parent = factory.parents[0]
193
basis_entry = self._basis_vf.get_record_stream(
194
[compression_parent], 'unordered', True).next()
195
if basis_entry.storage_kind == 'absent':
196
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
197
basis_chunks = basis_entry.get_bytes_as('chunked')
198
basis_lines = osutils.chunks_to_lines(basis_chunks)
199
# Manually apply the delta because we have one annotated content and
201
basis_content = PlainKnitContent(basis_lines, compression_parent)
202
basis_content.apply_delta(delta, rec[1])
203
basis_content._should_strip_eol = factory._build_details[1]
204
return ''.join(basis_content.text())
207
class FTPlainToFullText(KnitAdapter):
208
"""An adapter from FT plain knits to unannotated ones."""
210
def get_bytes(self, factory):
211
compressed_bytes = factory._raw_record
213
self._data._parse_record_unchecked(compressed_bytes)
214
content, delta = self._plain_factory.parse_record(factory.key[-1],
215
contents, factory._build_details, None)
216
return ''.join(content.text())
219
class DeltaPlainToFullText(KnitAdapter):
220
"""An adapter for deltas from annotated to unannotated."""
222
def get_bytes(self, factory):
223
compressed_bytes = factory._raw_record
225
self._data._parse_record_unchecked(compressed_bytes)
226
delta = self._plain_factory.parse_line_delta(contents, rec[1])
227
compression_parent = factory.parents[0]
228
# XXX: string splitting overhead.
229
basis_entry = self._basis_vf.get_record_stream(
230
[compression_parent], 'unordered', True).next()
231
if basis_entry.storage_kind == 'absent':
232
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
233
basis_chunks = basis_entry.get_bytes_as('chunked')
234
basis_lines = osutils.chunks_to_lines(basis_chunks)
235
basis_content = PlainKnitContent(basis_lines, compression_parent)
236
# Manually apply the delta because we have one annotated content and
238
content, _ = self._plain_factory.parse_record(rec[1], contents,
239
factory._build_details, basis_content)
240
return ''.join(content.text())
243
class KnitContentFactory(ContentFactory):
244
"""Content factory for streaming from knits.
246
:seealso ContentFactory:
249
def __init__(self, key, parents, build_details, sha1, raw_record,
250
annotated, knit=None, network_bytes=None):
251
"""Create a KnitContentFactory for key.
254
:param parents: The parents.
255
:param build_details: The build details as returned from
257
:param sha1: The sha1 expected from the full text of this object.
258
:param raw_record: The bytes of the knit data from disk.
259
:param annotated: True if the raw data is annotated.
260
:param network_bytes: None to calculate the network bytes on demand,
261
not-none if they are already known.
263
ContentFactory.__init__(self)
266
self.parents = parents
267
if build_details[0] == 'line-delta':
272
annotated_kind = 'annotated-'
275
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
276
self._raw_record = raw_record
277
self._network_bytes = network_bytes
278
self._build_details = build_details
281
def _create_network_bytes(self):
282
"""Create a fully serialised network version for transmission."""
283
# storage_kind, key, parents, Noeol, raw_record
284
key_bytes = '\x00'.join(self.key)
285
if self.parents is None:
286
parent_bytes = 'None:'
288
parent_bytes = '\t'.join('\x00'.join(key) for key in self.parents)
289
if self._build_details[1]:
293
network_bytes = "%s\n%s\n%s\n%s%s" % (self.storage_kind, key_bytes,
294
parent_bytes, noeol, self._raw_record)
295
self._network_bytes = network_bytes
297
def get_bytes_as(self, storage_kind):
298
if storage_kind == self.storage_kind:
299
if self._network_bytes is None:
300
self._create_network_bytes()
301
return self._network_bytes
302
if ('-ft-' in self.storage_kind and
303
storage_kind in ('chunked', 'fulltext')):
304
adapter_key = (self.storage_kind, 'fulltext')
305
adapter_factory = adapter_registry.get(adapter_key)
306
adapter = adapter_factory(None)
307
bytes = adapter.get_bytes(self)
308
if storage_kind == 'chunked':
312
if self._knit is not None:
313
# Not redundant with direct conversion above - that only handles
315
if storage_kind == 'chunked':
316
return self._knit.get_lines(self.key[0])
317
elif storage_kind == 'fulltext':
318
return self._knit.get_text(self.key[0])
319
raise errors.UnavailableRepresentation(self.key, storage_kind,
323
class LazyKnitContentFactory(ContentFactory):
324
"""A ContentFactory which can either generate full text or a wire form.
326
:seealso ContentFactory:
329
def __init__(self, key, parents, generator, first):
330
"""Create a LazyKnitContentFactory.
332
:param key: The key of the record.
333
:param parents: The parents of the record.
334
:param generator: A _ContentMapGenerator containing the record for this
336
:param first: Is this the first content object returned from generator?
337
if it is, its storage kind is knit-delta-closure, otherwise it is
338
knit-delta-closure-ref
341
self.parents = parents
343
self._generator = generator
344
self.storage_kind = "knit-delta-closure"
346
self.storage_kind = self.storage_kind + "-ref"
349
def get_bytes_as(self, storage_kind):
350
if storage_kind == self.storage_kind:
352
return self._generator._wire_bytes()
354
# all the keys etc are contained in the bytes returned in the
357
if storage_kind in ('chunked', 'fulltext'):
358
chunks = self._generator._get_one_work(self.key).text()
359
if storage_kind == 'chunked':
362
return ''.join(chunks)
363
raise errors.UnavailableRepresentation(self.key, storage_kind,
367
def knit_delta_closure_to_records(storage_kind, bytes, line_end):
368
"""Convert a network record to a iterator over stream records.
370
:param storage_kind: The storage kind of the record.
371
Must be 'knit-delta-closure'.
372
:param bytes: The bytes of the record on the network.
374
generator = _NetworkContentMapGenerator(bytes, line_end)
375
return generator.get_record_stream()
378
def knit_network_to_record(storage_kind, bytes, line_end):
379
"""Convert a network record to a record object.
381
:param storage_kind: The storage kind of the record.
382
:param bytes: The bytes of the record on the network.
385
line_end = bytes.find('\n', start)
386
key = tuple(bytes[start:line_end].split('\x00'))
388
line_end = bytes.find('\n', start)
389
parent_line = bytes[start:line_end]
390
if parent_line == 'None:':
394
[tuple(segment.split('\x00')) for segment in parent_line.split('\t')
397
noeol = bytes[start] == 'N'
398
if 'ft' in storage_kind:
401
method = 'line-delta'
402
build_details = (method, noeol)
404
raw_record = bytes[start:]
405
annotated = 'annotated' in storage_kind
406
return [KnitContentFactory(key, parents, build_details, None, raw_record,
407
annotated, network_bytes=bytes)]
410
class KnitContent(object):
411
"""Content of a knit version to which deltas can be applied.
413
This is always stored in memory as a list of lines with \n at the end,
414
plus a flag saying if the final ending is really there or not, because that
415
corresponds to the on-disk knit representation.
419
self._should_strip_eol = False
421
def apply_delta(self, delta, new_version_id):
422
"""Apply delta to this object to become new_version_id."""
423
raise NotImplementedError(self.apply_delta)
425
def line_delta_iter(self, new_lines):
426
"""Generate line-based delta from this content to new_lines."""
427
new_texts = new_lines.text()
428
old_texts = self.text()
429
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
430
for tag, i1, i2, j1, j2 in s.get_opcodes():
433
# ofrom, oto, length, data
434
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
436
def line_delta(self, new_lines):
437
return list(self.line_delta_iter(new_lines))
440
def get_line_delta_blocks(knit_delta, source, target):
441
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
442
target_len = len(target)
445
for s_begin, s_end, t_len, new_text in knit_delta:
446
true_n = s_begin - s_pos
449
# knit deltas do not provide reliable info about whether the
450
# last line of a file matches, due to eol handling.
451
if source[s_pos + n -1] != target[t_pos + n -1]:
454
yield s_pos, t_pos, n
455
t_pos += t_len + true_n
457
n = target_len - t_pos
459
if source[s_pos + n -1] != target[t_pos + n -1]:
462
yield s_pos, t_pos, n
463
yield s_pos + (target_len - t_pos), target_len, 0
466
class AnnotatedKnitContent(KnitContent):
467
"""Annotated content."""
469
def __init__(self, lines):
470
KnitContent.__init__(self)
474
"""Return a list of (origin, text) for each content line."""
475
lines = self._lines[:]
476
if self._should_strip_eol:
477
origin, last_line = lines[-1]
478
lines[-1] = (origin, last_line.rstrip('\n'))
481
def apply_delta(self, delta, new_version_id):
482
"""Apply delta to this object to become new_version_id."""
485
for start, end, count, delta_lines in delta:
486
lines[offset+start:offset+end] = delta_lines
487
offset = offset + (start - end) + count
491
lines = [text for origin, text in self._lines]
492
except ValueError, e:
493
# most commonly (only?) caused by the internal form of the knit
494
# missing annotation information because of a bug - see thread
496
raise KnitCorrupt(self,
497
"line in annotated knit missing annotation information: %s"
499
if self._should_strip_eol:
500
lines[-1] = lines[-1].rstrip('\n')
504
return AnnotatedKnitContent(self._lines[:])
507
class PlainKnitContent(KnitContent):
508
"""Unannotated content.
510
When annotate[_iter] is called on this content, the same version is reported
511
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
515
def __init__(self, lines, version_id):
516
KnitContent.__init__(self)
518
self._version_id = version_id
521
"""Return a list of (origin, text) for each content line."""
522
return [(self._version_id, line) for line in self._lines]
524
def apply_delta(self, delta, new_version_id):
525
"""Apply delta to this object to become new_version_id."""
528
for start, end, count, delta_lines in delta:
529
lines[offset+start:offset+end] = delta_lines
530
offset = offset + (start - end) + count
531
self._version_id = new_version_id
534
return PlainKnitContent(self._lines[:], self._version_id)
538
if self._should_strip_eol:
540
lines[-1] = lines[-1].rstrip('\n')
544
class _KnitFactory(object):
545
"""Base class for common Factory functions."""
547
def parse_record(self, version_id, record, record_details,
548
base_content, copy_base_content=True):
549
"""Parse a record into a full content object.
551
:param version_id: The official version id for this content
552
:param record: The data returned by read_records_iter()
553
:param record_details: Details about the record returned by
555
:param base_content: If get_build_details returns a compression_parent,
556
you must return a base_content here, else use None
557
:param copy_base_content: When building from the base_content, decide
558
you can either copy it and return a new object, or modify it in
560
:return: (content, delta) A Content object and possibly a line-delta,
563
method, noeol = record_details
564
if method == 'line-delta':
565
if copy_base_content:
566
content = base_content.copy()
568
content = base_content
569
delta = self.parse_line_delta(record, version_id)
570
content.apply_delta(delta, version_id)
572
content = self.parse_fulltext(record, version_id)
574
content._should_strip_eol = noeol
575
return (content, delta)
578
class KnitAnnotateFactory(_KnitFactory):
579
"""Factory for creating annotated Content objects."""
583
def make(self, lines, version_id):
584
num_lines = len(lines)
585
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
587
def parse_fulltext(self, content, version_id):
588
"""Convert fulltext to internal representation
590
fulltext content is of the format
591
revid(utf8) plaintext\n
592
internal representation is of the format:
595
# TODO: jam 20070209 The tests expect this to be returned as tuples,
596
# but the code itself doesn't really depend on that.
597
# Figure out a way to not require the overhead of turning the
598
# list back into tuples.
599
lines = [tuple(line.split(' ', 1)) for line in content]
600
return AnnotatedKnitContent(lines)
602
def parse_line_delta_iter(self, lines):
603
return iter(self.parse_line_delta(lines))
605
def parse_line_delta(self, lines, version_id, plain=False):
606
"""Convert a line based delta into internal representation.
608
line delta is in the form of:
609
intstart intend intcount
611
revid(utf8) newline\n
612
internal representation is
613
(start, end, count, [1..count tuples (revid, newline)])
615
:param plain: If True, the lines are returned as a plain
616
list without annotations, not as a list of (origin, content) tuples, i.e.
617
(start, end, count, [1..count newline])
624
def cache_and_return(line):
625
origin, text = line.split(' ', 1)
626
return cache.setdefault(origin, origin), text
628
# walk through the lines parsing.
629
# Note that the plain test is explicitly pulled out of the
630
# loop to minimise any performance impact
633
start, end, count = [int(n) for n in header.split(',')]
634
contents = [next().split(' ', 1)[1] for i in xrange(count)]
635
result.append((start, end, count, contents))
638
start, end, count = [int(n) for n in header.split(',')]
639
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
640
result.append((start, end, count, contents))
643
def get_fulltext_content(self, lines):
644
"""Extract just the content lines from a fulltext."""
645
return (line.split(' ', 1)[1] for line in lines)
647
def get_linedelta_content(self, lines):
648
"""Extract just the content from a line delta.
650
This doesn't return all of the extra information stored in a delta.
651
Only the actual content lines.
656
header = header.split(',')
657
count = int(header[2])
658
for i in xrange(count):
659
origin, text = next().split(' ', 1)
662
def lower_fulltext(self, content):
663
"""convert a fulltext content record into a serializable form.
665
see parse_fulltext which this inverts.
667
return ['%s %s' % (o, t) for o, t in content._lines]
669
def lower_line_delta(self, delta):
670
"""convert a delta into a serializable form.
672
See parse_line_delta which this inverts.
674
# TODO: jam 20070209 We only do the caching thing to make sure that
675
# the origin is a valid utf-8 line, eventually we could remove it
677
for start, end, c, lines in delta:
678
out.append('%d,%d,%d\n' % (start, end, c))
679
out.extend(origin + ' ' + text
680
for origin, text in lines)
683
def annotate(self, knit, key):
684
content = knit._get_content(key)
685
# adjust for the fact that serialised annotations are only key suffixes
687
if type(key) is tuple:
689
origins = content.annotate()
691
for origin, line in origins:
692
result.append((prefix + (origin,), line))
695
# XXX: This smells a bit. Why would key ever be a non-tuple here?
696
# Aren't keys defined to be tuples? -- spiv 20080618
697
return content.annotate()
700
class KnitPlainFactory(_KnitFactory):
701
"""Factory for creating plain Content objects."""
705
def make(self, lines, version_id):
706
return PlainKnitContent(lines, version_id)
708
def parse_fulltext(self, content, version_id):
709
"""This parses an unannotated fulltext.
711
Note that this is not a noop - the internal representation
712
has (versionid, line) - its just a constant versionid.
714
return self.make(content, version_id)
716
def parse_line_delta_iter(self, lines, version_id):
718
num_lines = len(lines)
719
while cur < num_lines:
722
start, end, c = [int(n) for n in header.split(',')]
723
yield start, end, c, lines[cur:cur+c]
726
def parse_line_delta(self, lines, version_id):
727
return list(self.parse_line_delta_iter(lines, version_id))
729
def get_fulltext_content(self, lines):
730
"""Extract just the content lines from a fulltext."""
733
def get_linedelta_content(self, lines):
734
"""Extract just the content from a line delta.
736
This doesn't return all of the extra information stored in a delta.
737
Only the actual content lines.
742
header = header.split(',')
743
count = int(header[2])
744
for i in xrange(count):
747
def lower_fulltext(self, content):
748
return content.text()
750
def lower_line_delta(self, delta):
752
for start, end, c, lines in delta:
753
out.append('%d,%d,%d\n' % (start, end, c))
757
def annotate(self, knit, key):
758
annotator = _KnitAnnotator(knit)
759
return annotator.annotate_flat(key)
763
def make_file_factory(annotated, mapper):
764
"""Create a factory for creating a file based KnitVersionedFiles.
766
This is only functional enough to run interface tests, it doesn't try to
767
provide a full pack environment.
769
:param annotated: knit annotations are wanted.
770
:param mapper: The mapper from keys to paths.
772
def factory(transport):
773
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
774
access = _KnitKeyAccess(transport, mapper)
775
return KnitVersionedFiles(index, access, annotated=annotated)
779
def make_pack_factory(graph, delta, keylength):
780
"""Create a factory for creating a pack based VersionedFiles.
782
This is only functional enough to run interface tests, it doesn't try to
783
provide a full pack environment.
785
:param graph: Store a graph.
786
:param delta: Delta compress contents.
787
:param keylength: How long should keys be.
789
def factory(transport):
790
parents = graph or delta
796
max_delta_chain = 200
799
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
800
key_elements=keylength)
801
stream = transport.open_write_stream('newpack')
802
writer = pack.ContainerWriter(stream.write)
804
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
805
deltas=delta, add_callback=graph_index.add_nodes)
806
access = _DirectPackAccess({})
807
access.set_writer(writer, graph_index, (transport, 'newpack'))
808
result = KnitVersionedFiles(index, access,
809
max_delta_chain=max_delta_chain)
810
result.stream = stream
811
result.writer = writer
816
def cleanup_pack_knit(versioned_files):
817
versioned_files.stream.close()
818
versioned_files.writer.end()
821
def _get_total_build_size(self, keys, positions):
822
"""Determine the total bytes to build these keys.
824
(helper function because _KnitGraphIndex and _KndxIndex work the same, but
825
don't inherit from a common base.)
827
:param keys: Keys that we want to build
828
:param positions: dict of {key, (info, index_memo, comp_parent)} (such
829
as returned by _get_components_positions)
830
:return: Number of bytes to build those keys
832
all_build_index_memos = {}
836
for key in build_keys:
837
# This is mostly for the 'stacked' case
838
# Where we will be getting the data from a fallback
839
if key not in positions:
841
_, index_memo, compression_parent = positions[key]
842
all_build_index_memos[key] = index_memo
843
if compression_parent not in all_build_index_memos:
844
next_keys.add(compression_parent)
845
build_keys = next_keys
846
return sum([index_memo[2] for index_memo
847
in all_build_index_memos.itervalues()])
850
class KnitVersionedFiles(VersionedFiles):
851
"""Storage for many versioned files using knit compression.
853
Backend storage is managed by indices and data objects.
855
:ivar _index: A _KnitGraphIndex or similar that can describe the
856
parents, graph, compression and data location of entries in this
857
KnitVersionedFiles. Note that this is only the index for
858
*this* vfs; if there are fallbacks they must be queried separately.
861
def __init__(self, index, data_access, max_delta_chain=200,
862
annotated=False, reload_func=None):
863
"""Create a KnitVersionedFiles with index and data_access.
865
:param index: The index for the knit data.
866
:param data_access: The access object to store and retrieve knit
868
:param max_delta_chain: The maximum number of deltas to permit during
869
insertion. Set to 0 to prohibit the use of deltas.
870
:param annotated: Set to True to cause annotations to be calculated and
871
stored during insertion.
872
:param reload_func: An function that can be called if we think we need
873
to reload the pack listing and try again. See
874
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
877
self._access = data_access
878
self._max_delta_chain = max_delta_chain
880
self._factory = KnitAnnotateFactory()
882
self._factory = KnitPlainFactory()
883
self._fallback_vfs = []
884
self._reload_func = reload_func
887
return "%s(%r, %r)" % (
888
self.__class__.__name__,
892
def add_fallback_versioned_files(self, a_versioned_files):
893
"""Add a source of texts for texts not present in this knit.
895
:param a_versioned_files: A VersionedFiles object.
897
self._fallback_vfs.append(a_versioned_files)
899
def add_lines(self, key, parents, lines, parent_texts=None,
900
left_matching_blocks=None, nostore_sha=None, random_id=False,
902
"""See VersionedFiles.add_lines()."""
903
self._index._check_write_ok()
904
self._check_add(key, lines, random_id, check_content)
906
# The caller might pass None if there is no graph data, but kndx
907
# indexes can't directly store that, so we give them
908
# an empty tuple instead.
910
line_bytes = ''.join(lines)
911
return self._add(key, lines, parents,
912
parent_texts, left_matching_blocks, nostore_sha, random_id,
913
line_bytes=line_bytes)
915
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
916
"""See VersionedFiles._add_text()."""
917
self._index._check_write_ok()
918
self._check_add(key, None, random_id, check_content=False)
919
if text.__class__ is not str:
920
raise errors.BzrBadParameterUnicode("text")
922
# The caller might pass None if there is no graph data, but kndx
923
# indexes can't directly store that, so we give them
924
# an empty tuple instead.
926
return self._add(key, None, parents,
927
None, None, nostore_sha, random_id,
930
def _add(self, key, lines, parents, parent_texts,
931
left_matching_blocks, nostore_sha, random_id,
933
"""Add a set of lines on top of version specified by parents.
935
Any versions not present will be converted into ghosts.
937
:param lines: A list of strings where each one is a single line (has a
938
single newline at the end of the string) This is now optional
939
(callers can pass None). It is left in its location for backwards
940
compatibility. It should ''.join(lines) must == line_bytes
941
:param line_bytes: A single string containing the content
943
We pass both lines and line_bytes because different routes bring the
944
values to this function. And for memory efficiency, we don't want to
945
have to split/join on-demand.
947
# first thing, if the content is something we don't need to store, find
949
digest = sha_string(line_bytes)
950
if nostore_sha == digest:
951
raise errors.ExistingContent
954
if parent_texts is None:
956
# Do a single query to ascertain parent presence; we only compress
957
# against parents in the same kvf.
958
present_parent_map = self._index.get_parent_map(parents)
959
for parent in parents:
960
if parent in present_parent_map:
961
present_parents.append(parent)
963
# Currently we can only compress against the left most present parent.
964
if (len(present_parents) == 0 or
965
present_parents[0] != parents[0]):
968
# To speed the extract of texts the delta chain is limited
969
# to a fixed number of deltas. This should minimize both
970
# I/O and the time spend applying deltas.
971
delta = self._check_should_delta(present_parents[0])
973
text_length = len(line_bytes)
976
# Note: line_bytes is not modified to add a newline, that is tracked
977
# via the no_eol flag. 'lines' *is* modified, because that is the
978
# general values needed by the Content code.
979
if line_bytes and line_bytes[-1] != '\n':
980
options.append('no-eol')
982
# Copy the existing list, or create a new one
984
lines = osutils.split_lines(line_bytes)
987
# Replace the last line with one that ends in a final newline
988
lines[-1] = lines[-1] + '\n'
990
lines = osutils.split_lines(line_bytes)
992
for element in key[:-1]:
993
if type(element) is not str:
994
raise TypeError("key contains non-strings: %r" % (key,))
996
key = key[:-1] + ('sha1:' + digest,)
997
elif type(key[-1]) is not str:
998
raise TypeError("key contains non-strings: %r" % (key,))
999
# Knit hunks are still last-element only
1000
version_id = key[-1]
1001
content = self._factory.make(lines, version_id)
1003
# Hint to the content object that its text() call should strip the
1005
content._should_strip_eol = True
1006
if delta or (self._factory.annotated and len(present_parents) > 0):
1007
# Merge annotations from parent texts if needed.
1008
delta_hunks = self._merge_annotations(content, present_parents,
1009
parent_texts, delta, self._factory.annotated,
1010
left_matching_blocks)
1013
options.append('line-delta')
1014
store_lines = self._factory.lower_line_delta(delta_hunks)
1015
size, bytes = self._record_to_data(key, digest,
1018
options.append('fulltext')
1019
# isinstance is slower and we have no hierarchy.
1020
if self._factory.__class__ is KnitPlainFactory:
1021
# Use the already joined bytes saving iteration time in
1023
dense_lines = [line_bytes]
1025
dense_lines.append('\n')
1026
size, bytes = self._record_to_data(key, digest,
1029
# get mixed annotation + content and feed it into the
1031
store_lines = self._factory.lower_fulltext(content)
1032
size, bytes = self._record_to_data(key, digest,
1035
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
1036
self._index.add_records(
1037
((key, options, access_memo, parents),),
1038
random_id=random_id)
1039
return digest, text_length, content
1041
def annotate(self, key):
1042
"""See VersionedFiles.annotate."""
1043
return self._factory.annotate(self, key)
1045
def get_annotator(self):
1046
return _KnitAnnotator(self)
1048
def check(self, progress_bar=None, keys=None):
1049
"""See VersionedFiles.check()."""
1051
return self._logical_check()
1053
# At the moment, check does not extra work over get_record_stream
1054
return self.get_record_stream(keys, 'unordered', True)
1056
def _logical_check(self):
1057
# This doesn't actually test extraction of everything, but that will
1058
# impact 'bzr check' substantially, and needs to be integrated with
1059
# care. However, it does check for the obvious problem of a delta with
1061
keys = self._index.keys()
1062
parent_map = self.get_parent_map(keys)
1064
if self._index.get_method(key) != 'fulltext':
1065
compression_parent = parent_map[key][0]
1066
if compression_parent not in parent_map:
1067
raise errors.KnitCorrupt(self,
1068
"Missing basis parent %s for %s" % (
1069
compression_parent, key))
1070
for fallback_vfs in self._fallback_vfs:
1071
fallback_vfs.check()
1073
def _check_add(self, key, lines, random_id, check_content):
1074
"""check that version_id and lines are safe to add."""
1075
version_id = key[-1]
1076
if version_id is not None:
1077
if contains_whitespace(version_id):
1078
raise InvalidRevisionId(version_id, self)
1079
self.check_not_reserved_id(version_id)
1080
# TODO: If random_id==False and the key is already present, we should
1081
# probably check that the existing content is identical to what is
1082
# being inserted, and otherwise raise an exception. This would make
1083
# the bundle code simpler.
1085
self._check_lines_not_unicode(lines)
1086
self._check_lines_are_lines(lines)
1088
def _check_header(self, key, line):
1089
rec = self._split_header(line)
1090
self._check_header_version(rec, key[-1])
1093
def _check_header_version(self, rec, version_id):
1094
"""Checks the header version on original format knit records.
1096
These have the last component of the key embedded in the record.
1098
if rec[1] != version_id:
1099
raise KnitCorrupt(self,
1100
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
1102
def _check_should_delta(self, parent):
1103
"""Iterate back through the parent listing, looking for a fulltext.
1105
This is used when we want to decide whether to add a delta or a new
1106
fulltext. It searches for _max_delta_chain parents. When it finds a
1107
fulltext parent, it sees if the total size of the deltas leading up to
1108
it is large enough to indicate that we want a new full text anyway.
1110
Return True if we should create a new delta, False if we should use a
1114
fulltext_size = None
1115
for count in xrange(self._max_delta_chain):
1117
# Note that this only looks in the index of this particular
1118
# KnitVersionedFiles, not in the fallbacks. This ensures that
1119
# we won't store a delta spanning physical repository
1121
build_details = self._index.get_build_details([parent])
1122
parent_details = build_details[parent]
1123
except (RevisionNotPresent, KeyError), e:
1124
# Some basis is not locally present: always fulltext
1126
index_memo, compression_parent, _, _ = parent_details
1127
_, _, size = index_memo
1128
if compression_parent is None:
1129
fulltext_size = size
1132
# We don't explicitly check for presence because this is in an
1133
# inner loop, and if it's missing it'll fail anyhow.
1134
parent = compression_parent
1136
# We couldn't find a fulltext, so we must create a new one
1138
# Simple heuristic - if the total I/O wold be greater as a delta than
1139
# the originally installed fulltext, we create a new fulltext.
1140
return fulltext_size > delta_size
1142
def _build_details_to_components(self, build_details):
1143
"""Convert a build_details tuple to a position tuple."""
1144
# record_details, access_memo, compression_parent
1145
return build_details[3], build_details[0], build_details[1]
1147
def _get_components_positions(self, keys, allow_missing=False):
1148
"""Produce a map of position data for the components of keys.
1150
This data is intended to be used for retrieving the knit records.
1152
A dict of key to (record_details, index_memo, next, parents) is
1154
method is the way referenced data should be applied.
1155
index_memo is the handle to pass to the data access to actually get the
1157
next is the build-parent of the version, or None for fulltexts.
1158
parents is the version_ids of the parents of this version
1160
:param allow_missing: If True do not raise an error on a missing component,
1164
pending_components = keys
1165
while pending_components:
1166
build_details = self._index.get_build_details(pending_components)
1167
current_components = set(pending_components)
1168
pending_components = set()
1169
for key, details in build_details.iteritems():
1170
(index_memo, compression_parent, parents,
1171
record_details) = details
1172
method = record_details[0]
1173
if compression_parent is not None:
1174
pending_components.add(compression_parent)
1175
component_data[key] = self._build_details_to_components(details)
1176
missing = current_components.difference(build_details)
1177
if missing and not allow_missing:
1178
raise errors.RevisionNotPresent(missing.pop(), self)
1179
return component_data
1181
def _get_content(self, key, parent_texts={}):
1182
"""Returns a content object that makes up the specified
1184
cached_version = parent_texts.get(key, None)
1185
if cached_version is not None:
1186
# Ensure the cache dict is valid.
1187
if not self.get_parent_map([key]):
1188
raise RevisionNotPresent(key, self)
1189
return cached_version
1190
generator = _VFContentMapGenerator(self, [key])
1191
return generator._get_content(key)
1193
def get_parent_map(self, keys):
1194
"""Get a map of the graph parents of keys.
1196
:param keys: The keys to look up parents for.
1197
:return: A mapping from keys to parents. Absent keys are absent from
1200
return self._get_parent_map_with_sources(keys)[0]
1202
def _get_parent_map_with_sources(self, keys):
1203
"""Get a map of the parents of keys.
1205
:param keys: The keys to look up parents for.
1206
:return: A tuple. The first element is a mapping from keys to parents.
1207
Absent keys are absent from the mapping. The second element is a
1208
list with the locations each key was found in. The first element
1209
is the in-this-knit parents, the second the first fallback source,
1213
sources = [self._index] + self._fallback_vfs
1216
for source in sources:
1219
new_result = source.get_parent_map(missing)
1220
source_results.append(new_result)
1221
result.update(new_result)
1222
missing.difference_update(set(new_result))
1223
return result, source_results
1225
def _get_record_map(self, keys, allow_missing=False):
1226
"""Produce a dictionary of knit records.
1228
:return: {key:(record, record_details, digest, next)}
1230
data returned from read_records (a KnitContentobject)
1232
opaque information to pass to parse_record
1234
SHA1 digest of the full text after all steps are done
1236
build-parent of the version, i.e. the leftmost ancestor.
1237
Will be None if the record is not a delta.
1238
:param keys: The keys to build a map for
1239
:param allow_missing: If some records are missing, rather than
1240
error, just return the data that could be generated.
1242
raw_map = self._get_record_map_unparsed(keys,
1243
allow_missing=allow_missing)
1244
return self._raw_map_to_record_map(raw_map)
1246
def _raw_map_to_record_map(self, raw_map):
1247
"""Parse the contents of _get_record_map_unparsed.
1249
:return: see _get_record_map.
1253
data, record_details, next = raw_map[key]
1254
content, digest = self._parse_record(key[-1], data)
1255
result[key] = content, record_details, digest, next
1258
def _get_record_map_unparsed(self, keys, allow_missing=False):
1259
"""Get the raw data for reconstructing keys without parsing it.
1261
:return: A dict suitable for parsing via _raw_map_to_record_map.
1262
key-> raw_bytes, (method, noeol), compression_parent
1264
# This retries the whole request if anything fails. Potentially we
1265
# could be a bit more selective. We could track the keys whose records
1266
# we have successfully found, and then only request the new records
1267
# from there. However, _get_components_positions grabs the whole build
1268
# chain, which means we'll likely try to grab the same records again
1269
# anyway. Also, can the build chains change as part of a pack
1270
# operation? We wouldn't want to end up with a broken chain.
1273
position_map = self._get_components_positions(keys,
1274
allow_missing=allow_missing)
1275
# key = component_id, r = record_details, i_m = index_memo,
1277
records = [(key, i_m) for key, (r, i_m, n)
1278
in position_map.iteritems()]
1279
# Sort by the index memo, so that we request records from the
1280
# same pack file together, and in forward-sorted order
1281
records.sort(key=operator.itemgetter(1))
1283
for key, data in self._read_records_iter_unchecked(records):
1284
(record_details, index_memo, next) = position_map[key]
1285
raw_record_map[key] = data, record_details, next
1286
return raw_record_map
1287
except errors.RetryWithNewPacks, e:
1288
self._access.reload_or_raise(e)
1291
def _split_by_prefix(cls, keys):
1292
"""For the given keys, split them up based on their prefix.
1294
To keep memory pressure somewhat under control, split the
1295
requests back into per-file-id requests, otherwise "bzr co"
1296
extracts the full tree into memory before writing it to disk.
1297
This should be revisited if _get_content_maps() can ever cross
1300
The keys for a given file_id are kept in the same relative order.
1301
Ordering between file_ids is not, though prefix_order will return the
1302
order that the key was first seen.
1304
:param keys: An iterable of key tuples
1305
:return: (split_map, prefix_order)
1306
split_map A dictionary mapping prefix => keys
1307
prefix_order The order that we saw the various prefixes
1309
split_by_prefix = {}
1317
if prefix in split_by_prefix:
1318
split_by_prefix[prefix].append(key)
1320
split_by_prefix[prefix] = [key]
1321
prefix_order.append(prefix)
1322
return split_by_prefix, prefix_order
1324
def _group_keys_for_io(self, keys, non_local_keys, positions,
1325
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
1326
"""For the given keys, group them into 'best-sized' requests.
1328
The idea is to avoid making 1 request per file, but to never try to
1329
unpack an entire 1.5GB source tree in a single pass. Also when
1330
possible, we should try to group requests to the same pack file
1333
:return: list of (keys, non_local) tuples that indicate what keys
1334
should be fetched next.
1336
# TODO: Ideally we would group on 2 factors. We want to extract texts
1337
# from the same pack file together, and we want to extract all
1338
# the texts for a given build-chain together. Ultimately it
1339
# probably needs a better global view.
1340
total_keys = len(keys)
1341
prefix_split_keys, prefix_order = self._split_by_prefix(keys)
1342
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
1344
cur_non_local = set()
1348
for prefix in prefix_order:
1349
keys = prefix_split_keys[prefix]
1350
non_local = prefix_split_non_local_keys.get(prefix, [])
1352
this_size = self._index._get_total_build_size(keys, positions)
1353
cur_size += this_size
1354
cur_keys.extend(keys)
1355
cur_non_local.update(non_local)
1356
if cur_size > _min_buffer_size:
1357
result.append((cur_keys, cur_non_local))
1358
sizes.append(cur_size)
1360
cur_non_local = set()
1363
result.append((cur_keys, cur_non_local))
1364
sizes.append(cur_size)
1367
def get_record_stream(self, keys, ordering, include_delta_closure):
1368
"""Get a stream of records for keys.
1370
:param keys: The keys to include.
1371
:param ordering: Either 'unordered' or 'topological'. A topologically
1372
sorted stream has compression parents strictly before their
1374
:param include_delta_closure: If True then the closure across any
1375
compression parents will be included (in the opaque data).
1376
:return: An iterator of ContentFactory objects, each of which is only
1377
valid until the iterator is advanced.
1379
# keys might be a generator
1383
if not self._index.has_graph:
1384
# Cannot sort when no graph has been stored.
1385
ordering = 'unordered'
1387
remaining_keys = keys
1390
keys = set(remaining_keys)
1391
for content_factory in self._get_remaining_record_stream(keys,
1392
ordering, include_delta_closure):
1393
remaining_keys.discard(content_factory.key)
1394
yield content_factory
1396
except errors.RetryWithNewPacks, e:
1397
self._access.reload_or_raise(e)
1399
def _get_remaining_record_stream(self, keys, ordering,
1400
include_delta_closure):
1401
"""This function is the 'retry' portion for get_record_stream."""
1402
if include_delta_closure:
1403
positions = self._get_components_positions(keys, allow_missing=True)
1405
build_details = self._index.get_build_details(keys)
1407
# (record_details, access_memo, compression_parent_key)
1408
positions = dict((key, self._build_details_to_components(details))
1409
for key, details in build_details.iteritems())
1410
absent_keys = keys.difference(set(positions))
1411
# There may be more absent keys : if we're missing the basis component
1412
# and are trying to include the delta closure.
1413
# XXX: We should not ever need to examine remote sources because we do
1414
# not permit deltas across versioned files boundaries.
1415
if include_delta_closure:
1416
needed_from_fallback = set()
1417
# Build up reconstructable_keys dict. key:True in this dict means
1418
# the key can be reconstructed.
1419
reconstructable_keys = {}
1423
chain = [key, positions[key][2]]
1425
needed_from_fallback.add(key)
1428
while chain[-1] is not None:
1429
if chain[-1] in reconstructable_keys:
1430
result = reconstructable_keys[chain[-1]]
1434
chain.append(positions[chain[-1]][2])
1436
# missing basis component
1437
needed_from_fallback.add(chain[-1])
1440
for chain_key in chain[:-1]:
1441
reconstructable_keys[chain_key] = result
1443
needed_from_fallback.add(key)
1444
# Double index lookups here : need a unified api ?
1445
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1446
if ordering in ('topological', 'groupcompress'):
1447
if ordering == 'topological':
1448
# Global topological sort
1449
present_keys = tsort.topo_sort(global_map)
1451
present_keys = sort_groupcompress(global_map)
1452
# Now group by source:
1454
current_source = None
1455
for key in present_keys:
1456
for parent_map in parent_maps:
1457
if key in parent_map:
1458
key_source = parent_map
1460
if current_source is not key_source:
1461
source_keys.append((key_source, []))
1462
current_source = key_source
1463
source_keys[-1][1].append(key)
1465
if ordering != 'unordered':
1466
raise AssertionError('valid values for ordering are:'
1467
' "unordered", "groupcompress" or "topological" not: %r'
1469
# Just group by source; remote sources first.
1472
for parent_map in reversed(parent_maps):
1473
source_keys.append((parent_map, []))
1474
for key in parent_map:
1475
present_keys.append(key)
1476
source_keys[-1][1].append(key)
1477
# We have been requested to return these records in an order that
1478
# suits us. So we ask the index to give us an optimally sorted
1480
for source, sub_keys in source_keys:
1481
if source is parent_maps[0]:
1482
# Only sort the keys for this VF
1483
self._index._sort_keys_by_io(sub_keys, positions)
1484
absent_keys = keys - set(global_map)
1485
for key in absent_keys:
1486
yield AbsentContentFactory(key)
1487
# restrict our view to the keys we can answer.
1488
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1489
# XXX: At that point we need to consider the impact of double reads by
1490
# utilising components multiple times.
1491
if include_delta_closure:
1492
# XXX: get_content_maps performs its own index queries; allow state
1494
non_local_keys = needed_from_fallback - absent_keys
1495
for keys, non_local_keys in self._group_keys_for_io(present_keys,
1498
generator = _VFContentMapGenerator(self, keys, non_local_keys,
1501
for record in generator.get_record_stream():
1504
for source, keys in source_keys:
1505
if source is parent_maps[0]:
1506
# this KnitVersionedFiles
1507
records = [(key, positions[key][1]) for key in keys]
1508
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1509
(record_details, index_memo, _) = positions[key]
1510
yield KnitContentFactory(key, global_map[key],
1511
record_details, sha1, raw_data, self._factory.annotated, None)
1513
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1514
for record in vf.get_record_stream(keys, ordering,
1515
include_delta_closure):
1518
def get_sha1s(self, keys):
1519
"""See VersionedFiles.get_sha1s()."""
1521
record_map = self._get_record_map(missing, allow_missing=True)
1523
for key, details in record_map.iteritems():
1524
if key not in missing:
1526
# record entry 2 is the 'digest'.
1527
result[key] = details[2]
1528
missing.difference_update(set(result))
1529
for source in self._fallback_vfs:
1532
new_result = source.get_sha1s(missing)
1533
result.update(new_result)
1534
missing.difference_update(set(new_result))
1537
def insert_record_stream(self, stream):
1538
"""Insert a record stream into this container.
1540
:param stream: A stream of records to insert.
1542
:seealso VersionedFiles.get_record_stream:
1544
def get_adapter(adapter_key):
1546
return adapters[adapter_key]
1548
adapter_factory = adapter_registry.get(adapter_key)
1549
adapter = adapter_factory(self)
1550
adapters[adapter_key] = adapter
1553
if self._factory.annotated:
1554
# self is annotated, we need annotated knits to use directly.
1555
annotated = "annotated-"
1558
# self is not annotated, but we can strip annotations cheaply.
1560
convertibles = set(["knit-annotated-ft-gz"])
1561
if self._max_delta_chain:
1562
delta_types.add("knit-annotated-delta-gz")
1563
convertibles.add("knit-annotated-delta-gz")
1564
# The set of types we can cheaply adapt without needing basis texts.
1565
native_types = set()
1566
if self._max_delta_chain:
1567
native_types.add("knit-%sdelta-gz" % annotated)
1568
delta_types.add("knit-%sdelta-gz" % annotated)
1569
native_types.add("knit-%sft-gz" % annotated)
1570
knit_types = native_types.union(convertibles)
1572
# Buffer all index entries that we can't add immediately because their
1573
# basis parent is missing. We don't buffer all because generating
1574
# annotations may require access to some of the new records. However we
1575
# can't generate annotations from new deltas until their basis parent
1576
# is present anyway, so we get away with not needing an index that
1577
# includes the new keys.
1579
# See <http://launchpad.net/bugs/300177> about ordering of compression
1580
# parents in the records - to be conservative, we insist that all
1581
# parents must be present to avoid expanding to a fulltext.
1583
# key = basis_parent, value = index entry to add
1584
buffered_index_entries = {}
1585
for record in stream:
1587
parents = record.parents
1588
if record.storage_kind in delta_types:
1589
# TODO: eventually the record itself should track
1590
# compression_parent
1591
compression_parent = parents[0]
1593
compression_parent = None
1594
# Raise an error when a record is missing.
1595
if record.storage_kind == 'absent':
1596
raise RevisionNotPresent([record.key], self)
1597
elif ((record.storage_kind in knit_types)
1598
and (compression_parent is None
1599
or not self._fallback_vfs
1600
or self._index.has_key(compression_parent)
1601
or not self.has_key(compression_parent))):
1602
# we can insert the knit record literally if either it has no
1603
# compression parent OR we already have its basis in this kvf
1604
# OR the basis is not present even in the fallbacks. In the
1605
# last case it will either turn up later in the stream and all
1606
# will be well, or it won't turn up at all and we'll raise an
1609
# TODO: self.has_key is somewhat redundant with
1610
# self._index.has_key; we really want something that directly
1611
# asks if it's only present in the fallbacks. -- mbp 20081119
1612
if record.storage_kind not in native_types:
1614
adapter_key = (record.storage_kind, "knit-delta-gz")
1615
adapter = get_adapter(adapter_key)
1617
adapter_key = (record.storage_kind, "knit-ft-gz")
1618
adapter = get_adapter(adapter_key)
1619
bytes = adapter.get_bytes(record)
1621
# It's a knit record, it has a _raw_record field (even if
1622
# it was reconstituted from a network stream).
1623
bytes = record._raw_record
1624
options = [record._build_details[0]]
1625
if record._build_details[1]:
1626
options.append('no-eol')
1627
# Just blat it across.
1628
# Note: This does end up adding data on duplicate keys. As
1629
# modern repositories use atomic insertions this should not
1630
# lead to excessive growth in the event of interrupted fetches.
1631
# 'knit' repositories may suffer excessive growth, but as a
1632
# deprecated format this is tolerable. It can be fixed if
1633
# needed by in the kndx index support raising on a duplicate
1634
# add with identical parents and options.
1635
access_memo = self._access.add_raw_records(
1636
[(record.key, len(bytes))], bytes)[0]
1637
index_entry = (record.key, options, access_memo, parents)
1638
if 'fulltext' not in options:
1639
# Not a fulltext, so we need to make sure the compression
1640
# parent will also be present.
1641
# Note that pack backed knits don't need to buffer here
1642
# because they buffer all writes to the transaction level,
1643
# but we don't expose that difference at the index level. If
1644
# the query here has sufficient cost to show up in
1645
# profiling we should do that.
1647
# They're required to be physically in this
1648
# KnitVersionedFiles, not in a fallback.
1649
if not self._index.has_key(compression_parent):
1650
pending = buffered_index_entries.setdefault(
1651
compression_parent, [])
1652
pending.append(index_entry)
1655
self._index.add_records([index_entry])
1656
elif record.storage_kind == 'chunked':
1657
self.add_lines(record.key, parents,
1658
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1660
# Not suitable for direct insertion as a
1661
# delta, either because it's not the right format, or this
1662
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1663
# 0) or because it depends on a base only present in the
1665
self._access.flush()
1667
# Try getting a fulltext directly from the record.
1668
bytes = record.get_bytes_as('fulltext')
1669
except errors.UnavailableRepresentation:
1670
adapter_key = record.storage_kind, 'fulltext'
1671
adapter = get_adapter(adapter_key)
1672
bytes = adapter.get_bytes(record)
1673
lines = split_lines(bytes)
1675
self.add_lines(record.key, parents, lines)
1676
except errors.RevisionAlreadyPresent:
1678
# Add any records whose basis parent is now available.
1680
added_keys = [record.key]
1682
key = added_keys.pop(0)
1683
if key in buffered_index_entries:
1684
index_entries = buffered_index_entries[key]
1685
self._index.add_records(index_entries)
1687
[index_entry[0] for index_entry in index_entries])
1688
del buffered_index_entries[key]
1689
if buffered_index_entries:
1690
# There were index entries buffered at the end of the stream,
1691
# So these need to be added (if the index supports holding such
1692
# entries for later insertion)
1693
for key in buffered_index_entries:
1694
index_entries = buffered_index_entries[key]
1695
self._index.add_records(index_entries,
1696
missing_compression_parents=True)
1698
def get_missing_compression_parent_keys(self):
1699
"""Return an iterable of keys of missing compression parents.
1701
Check this after calling insert_record_stream to find out if there are
1702
any missing compression parents. If there are, the records that
1703
depend on them are not able to be inserted safely. For atomic
1704
KnitVersionedFiles built on packs, the transaction should be aborted or
1705
suspended - commit will fail at this point. Nonatomic knits will error
1706
earlier because they have no staging area to put pending entries into.
1708
return self._index.get_missing_compression_parents()
1710
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1711
"""Iterate over the lines in the versioned files from keys.
1713
This may return lines from other keys. Each item the returned
1714
iterator yields is a tuple of a line and a text version that that line
1715
is present in (not introduced in).
1717
Ordering of results is in whatever order is most suitable for the
1718
underlying storage format.
1720
If a progress bar is supplied, it may be used to indicate progress.
1721
The caller is responsible for cleaning up progress bars (because this
1725
* Lines are normalised by the underlying store: they will all have \\n
1727
* Lines are returned in arbitrary order.
1728
* If a requested key did not change any lines (or didn't have any
1729
lines), it may not be mentioned at all in the result.
1731
:param pb: Progress bar supplied by caller.
1732
:return: An iterator over (line, key).
1735
pb = progress.DummyProgress()
1741
# we don't care about inclusions, the caller cares.
1742
# but we need to setup a list of records to visit.
1743
# we need key, position, length
1745
build_details = self._index.get_build_details(keys)
1746
for key, details in build_details.iteritems():
1748
key_records.append((key, details[0]))
1749
records_iter = enumerate(self._read_records_iter(key_records))
1750
for (key_idx, (key, data, sha_value)) in records_iter:
1751
pb.update('Walking content', key_idx, total)
1752
compression_parent = build_details[key][1]
1753
if compression_parent is None:
1755
line_iterator = self._factory.get_fulltext_content(data)
1758
line_iterator = self._factory.get_linedelta_content(data)
1759
# Now that we are yielding the data for this key, remove it
1762
# XXX: It might be more efficient to yield (key,
1763
# line_iterator) in the future. However for now, this is a
1764
# simpler change to integrate into the rest of the
1765
# codebase. RBC 20071110
1766
for line in line_iterator:
1769
except errors.RetryWithNewPacks, e:
1770
self._access.reload_or_raise(e)
1771
# If there are still keys we've not yet found, we look in the fallback
1772
# vfs, and hope to find them there. Note that if the keys are found
1773
# but had no changes or no content, the fallback may not return
1775
if keys and not self._fallback_vfs:
1776
# XXX: strictly the second parameter is meant to be the file id
1777
# but it's not easily accessible here.
1778
raise RevisionNotPresent(keys, repr(self))
1779
for source in self._fallback_vfs:
1783
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1784
source_keys.add(key)
1786
keys.difference_update(source_keys)
1787
pb.update('Walking content', total, total)
1789
def _make_line_delta(self, delta_seq, new_content):
1790
"""Generate a line delta from delta_seq and new_content."""
1792
for op in delta_seq.get_opcodes():
1793
if op[0] == 'equal':
1795
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1798
def _merge_annotations(self, content, parents, parent_texts={},
1799
delta=None, annotated=None,
1800
left_matching_blocks=None):
1801
"""Merge annotations for content and generate deltas.
1803
This is done by comparing the annotations based on changes to the text
1804
and generating a delta on the resulting full texts. If annotations are
1805
not being created then a simple delta is created.
1807
if left_matching_blocks is not None:
1808
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1812
for parent_key in parents:
1813
merge_content = self._get_content(parent_key, parent_texts)
1814
if (parent_key == parents[0] and delta_seq is not None):
1817
seq = patiencediff.PatienceSequenceMatcher(
1818
None, merge_content.text(), content.text())
1819
for i, j, n in seq.get_matching_blocks():
1822
# this copies (origin, text) pairs across to the new
1823
# content for any line that matches the last-checked
1825
content._lines[j:j+n] = merge_content._lines[i:i+n]
1826
# XXX: Robert says the following block is a workaround for a
1827
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1828
if content._lines and content._lines[-1][1][-1] != '\n':
1829
# The copied annotation was from a line without a trailing EOL,
1830
# reinstate one for the content object, to ensure correct
1832
line = content._lines[-1][1] + '\n'
1833
content._lines[-1] = (content._lines[-1][0], line)
1835
if delta_seq is None:
1836
reference_content = self._get_content(parents[0], parent_texts)
1837
new_texts = content.text()
1838
old_texts = reference_content.text()
1839
delta_seq = patiencediff.PatienceSequenceMatcher(
1840
None, old_texts, new_texts)
1841
return self._make_line_delta(delta_seq, content)
1843
def _parse_record(self, version_id, data):
1844
"""Parse an original format knit record.
1846
These have the last element of the key only present in the stored data.
1848
rec, record_contents = self._parse_record_unchecked(data)
1849
self._check_header_version(rec, version_id)
1850
return record_contents, rec[3]
1852
def _parse_record_header(self, key, raw_data):
1853
"""Parse a record header for consistency.
1855
:return: the header and the decompressor stream.
1856
as (stream, header_record)
1858
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1861
rec = self._check_header(key, df.readline())
1862
except Exception, e:
1863
raise KnitCorrupt(self,
1864
"While reading {%s} got %s(%s)"
1865
% (key, e.__class__.__name__, str(e)))
1868
def _parse_record_unchecked(self, data):
1870
# 4168 calls in 2880 217 internal
1871
# 4168 calls to _parse_record_header in 2121
1872
# 4168 calls to readlines in 330
1873
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1875
record_contents = df.readlines()
1876
except Exception, e:
1877
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1878
(data, e.__class__.__name__, str(e)))
1879
header = record_contents.pop(0)
1880
rec = self._split_header(header)
1881
last_line = record_contents.pop()
1882
if len(record_contents) != int(rec[2]):
1883
raise KnitCorrupt(self,
1884
'incorrect number of lines %s != %s'
1885
' for version {%s} %s'
1886
% (len(record_contents), int(rec[2]),
1887
rec[1], record_contents))
1888
if last_line != 'end %s\n' % rec[1]:
1889
raise KnitCorrupt(self,
1890
'unexpected version end line %r, wanted %r'
1891
% (last_line, rec[1]))
1893
return rec, record_contents
1895
def _read_records_iter(self, records):
1896
"""Read text records from data file and yield result.
1898
The result will be returned in whatever is the fastest to read.
1899
Not by the order requested. Also, multiple requests for the same
1900
record will only yield 1 response.
1901
:param records: A list of (key, access_memo) entries
1902
:return: Yields (key, contents, digest) in the order
1903
read, not the order requested
1908
# XXX: This smells wrong, IO may not be getting ordered right.
1909
needed_records = sorted(set(records), key=operator.itemgetter(1))
1910
if not needed_records:
1913
# The transport optimizes the fetching as well
1914
# (ie, reads continuous ranges.)
1915
raw_data = self._access.get_raw_records(
1916
[index_memo for key, index_memo in needed_records])
1918
for (key, index_memo), data in \
1919
izip(iter(needed_records), raw_data):
1920
content, digest = self._parse_record(key[-1], data)
1921
yield key, content, digest
1923
def _read_records_iter_raw(self, records):
1924
"""Read text records from data file and yield raw data.
1926
This unpacks enough of the text record to validate the id is
1927
as expected but thats all.
1929
Each item the iterator yields is (key, bytes,
1930
expected_sha1_of_full_text).
1932
for key, data in self._read_records_iter_unchecked(records):
1933
# validate the header (note that we can only use the suffix in
1934
# current knit records).
1935
df, rec = self._parse_record_header(key, data)
1937
yield key, data, rec[3]
1939
def _read_records_iter_unchecked(self, records):
1940
"""Read text records from data file and yield raw data.
1942
No validation is done.
1944
Yields tuples of (key, data).
1946
# setup an iterator of the external records:
1947
# uses readv so nice and fast we hope.
1949
# grab the disk data needed.
1950
needed_offsets = [index_memo for key, index_memo
1952
raw_records = self._access.get_raw_records(needed_offsets)
1954
for key, index_memo in records:
1955
data = raw_records.next()
1958
def _record_to_data(self, key, digest, lines, dense_lines=None):
1959
"""Convert key, digest, lines into a raw data block.
1961
:param key: The key of the record. Currently keys are always serialised
1962
using just the trailing component.
1963
:param dense_lines: The bytes of lines but in a denser form. For
1964
instance, if lines is a list of 1000 bytestrings each ending in \n,
1965
dense_lines may be a list with one line in it, containing all the
1966
1000's lines and their \n's. Using dense_lines if it is already
1967
known is a win because the string join to create bytes in this
1968
function spends less time resizing the final string.
1969
:return: (len, a StringIO instance with the raw data ready to read.)
1971
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
1972
chunks.extend(dense_lines or lines)
1973
chunks.append("end %s\n" % key[-1])
1974
for chunk in chunks:
1975
if type(chunk) is not str:
1976
raise AssertionError(
1977
'data must be plain bytes was %s' % type(chunk))
1978
if lines and lines[-1][-1] != '\n':
1979
raise ValueError('corrupt lines value %r' % lines)
1980
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
1981
return len(compressed_bytes), compressed_bytes
1983
def _split_header(self, line):
1986
raise KnitCorrupt(self,
1987
'unexpected number of elements in record header')
1991
"""See VersionedFiles.keys."""
1992
if 'evil' in debug.debug_flags:
1993
trace.mutter_callsite(2, "keys scales with size of history")
1994
sources = [self._index] + self._fallback_vfs
1996
for source in sources:
1997
result.update(source.keys())
2001
class _ContentMapGenerator(object):
2002
"""Generate texts or expose raw deltas for a set of texts."""
2004
def __init__(self, ordering='unordered'):
2005
self._ordering = ordering
2007
def _get_content(self, key):
2008
"""Get the content object for key."""
2009
# Note that _get_content is only called when the _ContentMapGenerator
2010
# has been constructed with just one key requested for reconstruction.
2011
if key in self.nonlocal_keys:
2012
record = self.get_record_stream().next()
2013
# Create a content object on the fly
2014
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
2015
return PlainKnitContent(lines, record.key)
2017
# local keys we can ask for directly
2018
return self._get_one_work(key)
2020
def get_record_stream(self):
2021
"""Get a record stream for the keys requested during __init__."""
2022
for record in self._work():
2026
"""Produce maps of text and KnitContents as dicts.
2028
:return: (text_map, content_map) where text_map contains the texts for
2029
the requested versions and content_map contains the KnitContents.
2031
# NB: By definition we never need to read remote sources unless texts
2032
# are requested from them: we don't delta across stores - and we
2033
# explicitly do not want to to prevent data loss situations.
2034
if self.global_map is None:
2035
self.global_map = self.vf.get_parent_map(self.keys)
2036
nonlocal_keys = self.nonlocal_keys
2038
missing_keys = set(nonlocal_keys)
2039
# Read from remote versioned file instances and provide to our caller.
2040
for source in self.vf._fallback_vfs:
2041
if not missing_keys:
2043
# Loop over fallback repositories asking them for texts - ignore
2044
# any missing from a particular fallback.
2045
for record in source.get_record_stream(missing_keys,
2046
self._ordering, True):
2047
if record.storage_kind == 'absent':
2048
# Not in thie particular stream, may be in one of the
2049
# other fallback vfs objects.
2051
missing_keys.remove(record.key)
2054
if self._raw_record_map is None:
2055
raise AssertionError('_raw_record_map should have been filled')
2057
for key in self.keys:
2058
if key in self.nonlocal_keys:
2060
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2063
def _get_one_work(self, requested_key):
2064
# Now, if we have calculated everything already, just return the
2066
if requested_key in self._contents_map:
2067
return self._contents_map[requested_key]
2068
# To simplify things, parse everything at once - code that wants one text
2069
# probably wants them all.
2070
# FUTURE: This function could be improved for the 'extract many' case
2071
# by tracking each component and only doing the copy when the number of
2072
# children than need to apply delta's to it is > 1 or it is part of the
2074
multiple_versions = len(self.keys) != 1
2075
if self._record_map is None:
2076
self._record_map = self.vf._raw_map_to_record_map(
2077
self._raw_record_map)
2078
record_map = self._record_map
2079
# raw_record_map is key:
2080
# Have read and parsed records at this point.
2081
for key in self.keys:
2082
if key in self.nonlocal_keys:
2087
while cursor is not None:
2089
record, record_details, digest, next = record_map[cursor]
2091
raise RevisionNotPresent(cursor, self)
2092
components.append((cursor, record, record_details, digest))
2094
if cursor in self._contents_map:
2095
# no need to plan further back
2096
components.append((cursor, None, None, None))
2100
for (component_id, record, record_details,
2101
digest) in reversed(components):
2102
if component_id in self._contents_map:
2103
content = self._contents_map[component_id]
2105
content, delta = self._factory.parse_record(key[-1],
2106
record, record_details, content,
2107
copy_base_content=multiple_versions)
2108
if multiple_versions:
2109
self._contents_map[component_id] = content
2111
# digest here is the digest from the last applied component.
2112
text = content.text()
2113
actual_sha = sha_strings(text)
2114
if actual_sha != digest:
2115
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
2116
if multiple_versions:
2117
return self._contents_map[requested_key]
2121
def _wire_bytes(self):
2122
"""Get the bytes to put on the wire for 'key'.
2124
The first collection of bytes asked for returns the serialised
2125
raw_record_map and the additional details (key, parent) for key.
2126
Subsequent calls return just the additional details (key, parent).
2127
The wire storage_kind given for the first key is 'knit-delta-closure',
2128
For subsequent keys it is 'knit-delta-closure-ref'.
2130
:param key: A key from the content generator.
2131
:return: Bytes to put on the wire.
2134
# kind marker for dispatch on the far side,
2135
lines.append('knit-delta-closure')
2137
if self.vf._factory.annotated:
2138
lines.append('annotated')
2141
# then the list of keys
2142
lines.append('\t'.join(['\x00'.join(key) for key in self.keys
2143
if key not in self.nonlocal_keys]))
2144
# then the _raw_record_map in serialised form:
2146
# for each item in the map:
2148
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
2149
# one line with method
2150
# one line with noeol
2151
# one line with next ('' for None)
2152
# one line with byte count of the record bytes
2154
for key, (record_bytes, (method, noeol), next) in \
2155
self._raw_record_map.iteritems():
2156
key_bytes = '\x00'.join(key)
2157
parents = self.global_map.get(key, None)
2159
parent_bytes = 'None:'
2161
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2162
method_bytes = method
2168
next_bytes = '\x00'.join(next)
2171
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2172
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2173
len(record_bytes), record_bytes))
2174
map_bytes = ''.join(map_byte_list)
2175
lines.append(map_bytes)
2176
bytes = '\n'.join(lines)
2180
class _VFContentMapGenerator(_ContentMapGenerator):
2181
"""Content map generator reading from a VersionedFiles object."""
2183
def __init__(self, versioned_files, keys, nonlocal_keys=None,
2184
global_map=None, raw_record_map=None, ordering='unordered'):
2185
"""Create a _ContentMapGenerator.
2187
:param versioned_files: The versioned files that the texts are being
2189
:param keys: The keys to produce content maps for.
2190
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
2191
which are known to not be in this knit, but rather in one of the
2193
:param global_map: The result of get_parent_map(keys) (or a supermap).
2194
This is required if get_record_stream() is to be used.
2195
:param raw_record_map: A unparsed raw record map to use for answering
2198
_ContentMapGenerator.__init__(self, ordering=ordering)
2199
# The vf to source data from
2200
self.vf = versioned_files
2202
self.keys = list(keys)
2203
# Keys known to be in fallback vfs objects
2204
if nonlocal_keys is None:
2205
self.nonlocal_keys = set()
2207
self.nonlocal_keys = frozenset(nonlocal_keys)
2208
# Parents data for keys to be returned in get_record_stream
2209
self.global_map = global_map
2210
# The chunked lists for self.keys in text form
2212
# A cache of KnitContent objects used in extracting texts.
2213
self._contents_map = {}
2214
# All the knit records needed to assemble the requested keys as full
2216
self._record_map = None
2217
if raw_record_map is None:
2218
self._raw_record_map = self.vf._get_record_map_unparsed(keys,
2221
self._raw_record_map = raw_record_map
2222
# the factory for parsing records
2223
self._factory = self.vf._factory
2226
class _NetworkContentMapGenerator(_ContentMapGenerator):
2227
"""Content map generator sourced from a network stream."""
2229
def __init__(self, bytes, line_end):
2230
"""Construct a _NetworkContentMapGenerator from a bytes block."""
2232
self.global_map = {}
2233
self._raw_record_map = {}
2234
self._contents_map = {}
2235
self._record_map = None
2236
self.nonlocal_keys = []
2237
# Get access to record parsing facilities
2238
self.vf = KnitVersionedFiles(None, None)
2241
line_end = bytes.find('\n', start)
2242
line = bytes[start:line_end]
2243
start = line_end + 1
2244
if line == 'annotated':
2245
self._factory = KnitAnnotateFactory()
2247
self._factory = KnitPlainFactory()
2248
# list of keys to emit in get_record_stream
2249
line_end = bytes.find('\n', start)
2250
line = bytes[start:line_end]
2251
start = line_end + 1
2253
tuple(segment.split('\x00')) for segment in line.split('\t')
2255
# now a loop until the end. XXX: It would be nice if this was just a
2256
# bunch of the same records as get_record_stream(..., False) gives, but
2257
# there is a decent sized gap stopping that at the moment.
2261
line_end = bytes.find('\n', start)
2262
key = tuple(bytes[start:line_end].split('\x00'))
2263
start = line_end + 1
2264
# 1 line with parents (None: for None, '' for ())
2265
line_end = bytes.find('\n', start)
2266
line = bytes[start:line_end]
2271
[tuple(segment.split('\x00')) for segment in line.split('\t')
2273
self.global_map[key] = parents
2274
start = line_end + 1
2275
# one line with method
2276
line_end = bytes.find('\n', start)
2277
line = bytes[start:line_end]
2279
start = line_end + 1
2280
# one line with noeol
2281
line_end = bytes.find('\n', start)
2282
line = bytes[start:line_end]
2284
start = line_end + 1
2285
# one line with next ('' for None)
2286
line_end = bytes.find('\n', start)
2287
line = bytes[start:line_end]
2291
next = tuple(bytes[start:line_end].split('\x00'))
2292
start = line_end + 1
2293
# one line with byte count of the record bytes
2294
line_end = bytes.find('\n', start)
2295
line = bytes[start:line_end]
2297
start = line_end + 1
2299
record_bytes = bytes[start:start+count]
2300
start = start + count
2302
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2304
def get_record_stream(self):
2305
"""Get a record stream for for keys requested by the bytestream."""
2307
for key in self.keys:
2308
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2311
def _wire_bytes(self):
2315
class _KndxIndex(object):
2316
"""Manages knit index files
2318
The index is kept in memory and read on startup, to enable
2319
fast lookups of revision information. The cursor of the index
2320
file is always pointing to the end, making it easy to append
2323
_cache is a cache for fast mapping from version id to a Index
2326
_history is a cache for fast mapping from indexes to version ids.
2328
The index data format is dictionary compressed when it comes to
2329
parent references; a index entry may only have parents that with a
2330
lover index number. As a result, the index is topological sorted.
2332
Duplicate entries may be written to the index for a single version id
2333
if this is done then the latter one completely replaces the former:
2334
this allows updates to correct version and parent information.
2335
Note that the two entries may share the delta, and that successive
2336
annotations and references MUST point to the first entry.
2338
The index file on disc contains a header, followed by one line per knit
2339
record. The same revision can be present in an index file more than once.
2340
The first occurrence gets assigned a sequence number starting from 0.
2342
The format of a single line is
2343
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
2344
REVISION_ID is a utf8-encoded revision id
2345
FLAGS is a comma separated list of flags about the record. Values include
2346
no-eol, line-delta, fulltext.
2347
BYTE_OFFSET is the ascii representation of the byte offset in the data file
2348
that the the compressed data starts at.
2349
LENGTH is the ascii representation of the length of the data file.
2350
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
2352
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
2353
revision id already in the knit that is a parent of REVISION_ID.
2354
The ' :' marker is the end of record marker.
2357
when a write is interrupted to the index file, it will result in a line
2358
that does not end in ' :'. If the ' :' is not present at the end of a line,
2359
or at the end of the file, then the record that is missing it will be
2360
ignored by the parser.
2362
When writing new records to the index file, the data is preceded by '\n'
2363
to ensure that records always start on new lines even if the last write was
2364
interrupted. As a result its normal for the last line in the index to be
2365
missing a trailing newline. One can be added with no harmful effects.
2367
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
2368
where prefix is e.g. the (fileid,) for .texts instances or () for
2369
constant-mapped things like .revisions, and the old state is
2370
tuple(history_vector, cache_dict). This is used to prevent having an
2371
ABI change with the C extension that reads .kndx files.
2374
HEADER = "# bzr knit index 8\n"
2376
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
2377
"""Create a _KndxIndex on transport using mapper."""
2378
self._transport = transport
2379
self._mapper = mapper
2380
self._get_scope = get_scope
2381
self._allow_writes = allow_writes
2382
self._is_locked = is_locked
2384
self.has_graph = True
2386
def add_records(self, records, random_id=False, missing_compression_parents=False):
2387
"""Add multiple records to the index.
2389
:param records: a list of tuples:
2390
(key, options, access_memo, parents).
2391
:param random_id: If True the ids being added were randomly generated
2392
and no check for existence will be performed.
2393
:param missing_compression_parents: If True the records being added are
2394
only compressed against texts already in the index (or inside
2395
records). If False the records all refer to unavailable texts (or
2396
texts inside records) as compression parents.
2398
if missing_compression_parents:
2399
# It might be nice to get the edge of the records. But keys isn't
2401
keys = sorted(record[0] for record in records)
2402
raise errors.RevisionNotPresent(keys, self)
2404
for record in records:
2407
path = self._mapper.map(key) + '.kndx'
2408
path_keys = paths.setdefault(path, (prefix, []))
2409
path_keys[1].append(record)
2410
for path in sorted(paths):
2411
prefix, path_keys = paths[path]
2412
self._load_prefixes([prefix])
2414
orig_history = self._kndx_cache[prefix][1][:]
2415
orig_cache = self._kndx_cache[prefix][0].copy()
2418
for key, options, (_, pos, size), parents in path_keys:
2420
# kndx indices cannot be parentless.
2422
line = "\n%s %s %s %s %s :" % (
2423
key[-1], ','.join(options), pos, size,
2424
self._dictionary_compress(parents))
2425
if type(line) is not str:
2426
raise AssertionError(
2427
'data must be utf8 was %s' % type(line))
2429
self._cache_key(key, options, pos, size, parents)
2430
if len(orig_history):
2431
self._transport.append_bytes(path, ''.join(lines))
2433
self._init_index(path, lines)
2435
# If any problems happen, restore the original values and re-raise
2436
self._kndx_cache[prefix] = (orig_cache, orig_history)
2439
def scan_unvalidated_index(self, graph_index):
2440
"""See _KnitGraphIndex.scan_unvalidated_index."""
2441
# Because kndx files do not support atomic insertion via separate index
2442
# files, they do not support this method.
2443
raise NotImplementedError(self.scan_unvalidated_index)
2445
def get_missing_compression_parents(self):
2446
"""See _KnitGraphIndex.get_missing_compression_parents."""
2447
# Because kndx files do not support atomic insertion via separate index
2448
# files, they do not support this method.
2449
raise NotImplementedError(self.get_missing_compression_parents)
2451
def _cache_key(self, key, options, pos, size, parent_keys):
2452
"""Cache a version record in the history array and index cache.
2454
This is inlined into _load_data for performance. KEEP IN SYNC.
2455
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
2459
version_id = key[-1]
2460
# last-element only for compatibilty with the C load_data.
2461
parents = tuple(parent[-1] for parent in parent_keys)
2462
for parent in parent_keys:
2463
if parent[:-1] != prefix:
2464
raise ValueError("mismatched prefixes for %r, %r" % (
2466
cache, history = self._kndx_cache[prefix]
2467
# only want the _history index to reference the 1st index entry
2469
if version_id not in cache:
2470
index = len(history)
2471
history.append(version_id)
2473
index = cache[version_id][5]
2474
cache[version_id] = (version_id,
2481
def check_header(self, fp):
2482
line = fp.readline()
2484
# An empty file can actually be treated as though the file doesn't
2486
raise errors.NoSuchFile(self)
2487
if line != self.HEADER:
2488
raise KnitHeaderError(badline=line, filename=self)
2490
def _check_read(self):
2491
if not self._is_locked():
2492
raise errors.ObjectNotLocked(self)
2493
if self._get_scope() != self._scope:
2496
def _check_write_ok(self):
2497
"""Assert if not writes are permitted."""
2498
if not self._is_locked():
2499
raise errors.ObjectNotLocked(self)
2500
if self._get_scope() != self._scope:
2502
if self._mode != 'w':
2503
raise errors.ReadOnlyObjectDirtiedError(self)
2505
def get_build_details(self, keys):
2506
"""Get the method, index_memo and compression parent for keys.
2508
Ghosts are omitted from the result.
2510
:param keys: An iterable of keys.
2511
:return: A dict of key:(index_memo, compression_parent, parents,
2514
opaque structure to pass to read_records to extract the raw
2517
Content that this record is built upon, may be None
2519
Logical parents of this node
2521
extra information about the content which needs to be passed to
2522
Factory.parse_record
2524
parent_map = self.get_parent_map(keys)
2527
if key not in parent_map:
2529
method = self.get_method(key)
2530
parents = parent_map[key]
2531
if method == 'fulltext':
2532
compression_parent = None
2534
compression_parent = parents[0]
2535
noeol = 'no-eol' in self.get_options(key)
2536
index_memo = self.get_position(key)
2537
result[key] = (index_memo, compression_parent,
2538
parents, (method, noeol))
2541
def get_method(self, key):
2542
"""Return compression method of specified key."""
2543
options = self.get_options(key)
2544
if 'fulltext' in options:
2546
elif 'line-delta' in options:
2549
raise errors.KnitIndexUnknownMethod(self, options)
2551
def get_options(self, key):
2552
"""Return a list representing options.
2556
prefix, suffix = self._split_key(key)
2557
self._load_prefixes([prefix])
2559
return self._kndx_cache[prefix][0][suffix][1]
2561
raise RevisionNotPresent(key, self)
2563
def get_parent_map(self, keys):
2564
"""Get a map of the parents of keys.
2566
:param keys: The keys to look up parents for.
2567
:return: A mapping from keys to parents. Absent keys are absent from
2570
# Parse what we need to up front, this potentially trades off I/O
2571
# locality (.kndx and .knit in the same block group for the same file
2572
# id) for less checking in inner loops.
2573
prefixes = set(key[:-1] for key in keys)
2574
self._load_prefixes(prefixes)
2579
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2583
result[key] = tuple(prefix + (suffix,) for
2584
suffix in suffix_parents)
2587
def get_position(self, key):
2588
"""Return details needed to access the version.
2590
:return: a tuple (key, data position, size) to hand to the access
2591
logic to get the record.
2593
prefix, suffix = self._split_key(key)
2594
self._load_prefixes([prefix])
2595
entry = self._kndx_cache[prefix][0][suffix]
2596
return key, entry[2], entry[3]
2598
has_key = _mod_index._has_key_from_parent_map
2600
def _init_index(self, path, extra_lines=[]):
2601
"""Initialize an index."""
2603
sio.write(self.HEADER)
2604
sio.writelines(extra_lines)
2606
self._transport.put_file_non_atomic(path, sio,
2607
create_parent_dir=True)
2608
# self._create_parent_dir)
2609
# mode=self._file_mode,
2610
# dir_mode=self._dir_mode)
2613
"""Get all the keys in the collection.
2615
The keys are not ordered.
2618
# Identify all key prefixes.
2619
# XXX: A bit hacky, needs polish.
2620
if type(self._mapper) is ConstantMapper:
2624
for quoted_relpath in self._transport.iter_files_recursive():
2625
path, ext = os.path.splitext(quoted_relpath)
2627
prefixes = [self._mapper.unmap(path) for path in relpaths]
2628
self._load_prefixes(prefixes)
2629
for prefix in prefixes:
2630
for suffix in self._kndx_cache[prefix][1]:
2631
result.add(prefix + (suffix,))
2634
def _load_prefixes(self, prefixes):
2635
"""Load the indices for prefixes."""
2637
for prefix in prefixes:
2638
if prefix not in self._kndx_cache:
2639
# the load_data interface writes to these variables.
2642
self._filename = prefix
2644
path = self._mapper.map(prefix) + '.kndx'
2645
fp = self._transport.get(path)
2647
# _load_data may raise NoSuchFile if the target knit is
2649
_load_data(self, fp)
2652
self._kndx_cache[prefix] = (self._cache, self._history)
2657
self._kndx_cache[prefix] = ({}, [])
2658
if type(self._mapper) is ConstantMapper:
2659
# preserve behaviour for revisions.kndx etc.
2660
self._init_index(path)
2665
missing_keys = _mod_index._missing_keys_from_parent_map
2667
def _partition_keys(self, keys):
2668
"""Turn keys into a dict of prefix:suffix_list."""
2671
prefix_keys = result.setdefault(key[:-1], [])
2672
prefix_keys.append(key[-1])
2675
def _dictionary_compress(self, keys):
2676
"""Dictionary compress keys.
2678
:param keys: The keys to generate references to.
2679
:return: A string representation of keys. keys which are present are
2680
dictionary compressed, and others are emitted as fulltext with a
2686
prefix = keys[0][:-1]
2687
cache = self._kndx_cache[prefix][0]
2689
if key[:-1] != prefix:
2690
# kndx indices cannot refer across partitioned storage.
2691
raise ValueError("mismatched prefixes for %r" % keys)
2692
if key[-1] in cache:
2693
# -- inlined lookup() --
2694
result_list.append(str(cache[key[-1]][5]))
2695
# -- end lookup () --
2697
result_list.append('.' + key[-1])
2698
return ' '.join(result_list)
2700
def _reset_cache(self):
2701
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2702
# (cache_dict, history_vector) for parsed kndx files.
2703
self._kndx_cache = {}
2704
self._scope = self._get_scope()
2705
allow_writes = self._allow_writes()
2711
def _sort_keys_by_io(self, keys, positions):
2712
"""Figure out an optimal order to read the records for the given keys.
2714
Sort keys, grouped by index and sorted by position.
2716
:param keys: A list of keys whose records we want to read. This will be
2718
:param positions: A dict, such as the one returned by
2719
_get_components_positions()
2722
def get_sort_key(key):
2723
index_memo = positions[key][1]
2724
# Group by prefix and position. index_memo[0] is the key, so it is
2725
# (file_id, revision_id) and we don't want to sort on revision_id,
2726
# index_memo[1] is the position, and index_memo[2] is the size,
2727
# which doesn't matter for the sort
2728
return index_memo[0][:-1], index_memo[1]
2729
return keys.sort(key=get_sort_key)
2731
_get_total_build_size = _get_total_build_size
2733
def _split_key(self, key):
2734
"""Split key into a prefix and suffix."""
2735
return key[:-1], key[-1]
2738
class _KeyRefs(object):
2741
# dict mapping 'key' to 'set of keys referring to that key'
2744
def add_references(self, key, refs):
2745
# Record the new references
2746
for referenced in refs:
2748
needed_by = self.refs[referenced]
2750
needed_by = self.refs[referenced] = set()
2752
# Discard references satisfied by the new key
2755
def get_unsatisfied_refs(self):
2756
return self.refs.iterkeys()
2758
def add_key(self, key):
2762
# No keys depended on this key. That's ok.
2765
def add_keys(self, keys):
2769
def get_referrers(self):
2771
for referrers in self.refs.itervalues():
2772
result.update(referrers)
2776
class _KnitGraphIndex(object):
2777
"""A KnitVersionedFiles index layered on GraphIndex."""
2779
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2780
add_callback=None, track_external_parent_refs=False):
2781
"""Construct a KnitGraphIndex on a graph_index.
2783
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2784
:param is_locked: A callback to check whether the object should answer
2786
:param deltas: Allow delta-compressed records.
2787
:param parents: If True, record knits parents, if not do not record
2789
:param add_callback: If not None, allow additions to the index and call
2790
this callback with a list of added GraphIndex nodes:
2791
[(node, value, node_refs), ...]
2792
:param is_locked: A callback, returns True if the index is locked and
2794
:param track_external_parent_refs: If True, record all external parent
2795
references parents from added records. These can be retrieved
2796
later by calling get_missing_parents().
2798
self._add_callback = add_callback
2799
self._graph_index = graph_index
2800
self._deltas = deltas
2801
self._parents = parents
2802
if deltas and not parents:
2803
# XXX: TODO: Delta tree and parent graph should be conceptually
2805
raise KnitCorrupt(self, "Cannot do delta compression without "
2807
self.has_graph = parents
2808
self._is_locked = is_locked
2809
self._missing_compression_parents = set()
2810
if track_external_parent_refs:
2811
self._key_dependencies = _KeyRefs()
2813
self._key_dependencies = None
2816
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2818
def add_records(self, records, random_id=False,
2819
missing_compression_parents=False):
2820
"""Add multiple records to the index.
2822
This function does not insert data into the Immutable GraphIndex
2823
backing the KnitGraphIndex, instead it prepares data for insertion by
2824
the caller and checks that it is safe to insert then calls
2825
self._add_callback with the prepared GraphIndex nodes.
2827
:param records: a list of tuples:
2828
(key, options, access_memo, parents).
2829
:param random_id: If True the ids being added were randomly generated
2830
and no check for existence will be performed.
2831
:param missing_compression_parents: If True the records being added are
2832
only compressed against texts already in the index (or inside
2833
records). If False the records all refer to unavailable texts (or
2834
texts inside records) as compression parents.
2836
if not self._add_callback:
2837
raise errors.ReadOnlyError(self)
2838
# we hope there are no repositories with inconsistent parentage
2842
compression_parents = set()
2843
key_dependencies = self._key_dependencies
2844
for (key, options, access_memo, parents) in records:
2846
parents = tuple(parents)
2847
if key_dependencies is not None:
2848
key_dependencies.add_references(key, parents)
2849
index, pos, size = access_memo
2850
if 'no-eol' in options:
2854
value += "%d %d" % (pos, size)
2855
if not self._deltas:
2856
if 'line-delta' in options:
2857
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2860
if 'line-delta' in options:
2861
node_refs = (parents, (parents[0],))
2862
if missing_compression_parents:
2863
compression_parents.add(parents[0])
2865
node_refs = (parents, ())
2867
node_refs = (parents, )
2870
raise KnitCorrupt(self, "attempt to add node with parents "
2871
"in parentless index.")
2873
keys[key] = (value, node_refs)
2876
present_nodes = self._get_entries(keys)
2877
for (index, key, value, node_refs) in present_nodes:
2878
if (value[0] != keys[key][0][0] or
2879
node_refs[:1] != keys[key][1][:1]):
2880
raise KnitCorrupt(self, "inconsistent details in add_records"
2881
": %s %s" % ((value, node_refs), keys[key]))
2885
for key, (value, node_refs) in keys.iteritems():
2886
result.append((key, value, node_refs))
2888
for key, (value, node_refs) in keys.iteritems():
2889
result.append((key, value))
2890
self._add_callback(result)
2891
if missing_compression_parents:
2892
# This may appear to be incorrect (it does not check for
2893
# compression parents that are in the existing graph index),
2894
# but such records won't have been buffered, so this is
2895
# actually correct: every entry when
2896
# missing_compression_parents==True either has a missing parent, or
2897
# a parent that is one of the keys in records.
2898
compression_parents.difference_update(keys)
2899
self._missing_compression_parents.update(compression_parents)
2900
# Adding records may have satisfied missing compression parents.
2901
self._missing_compression_parents.difference_update(keys)
2903
def scan_unvalidated_index(self, graph_index):
2904
"""Inform this _KnitGraphIndex that there is an unvalidated index.
2906
This allows this _KnitGraphIndex to keep track of any missing
2907
compression parents we may want to have filled in to make those
2910
:param graph_index: A GraphIndex
2913
new_missing = graph_index.external_references(ref_list_num=1)
2914
new_missing.difference_update(self.get_parent_map(new_missing))
2915
self._missing_compression_parents.update(new_missing)
2916
if self._key_dependencies is not None:
2917
# Add parent refs from graph_index (and discard parent refs that
2918
# the graph_index has).
2919
for node in graph_index.iter_all_entries():
2920
self._key_dependencies.add_references(node[1], node[3][0])
2922
def get_missing_compression_parents(self):
2923
"""Return the keys of missing compression parents.
2925
Missing compression parents occur when a record stream was missing
2926
basis texts, or a index was scanned that had missing basis texts.
2928
return frozenset(self._missing_compression_parents)
2930
def get_missing_parents(self):
2931
"""Return the keys of missing parents."""
2932
# If updating this, you should also update
2933
# groupcompress._GCGraphIndex.get_missing_parents
2934
# We may have false positives, so filter those out.
2935
self._key_dependencies.add_keys(
2936
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
2937
return frozenset(self._key_dependencies.get_unsatisfied_refs())
2939
def _check_read(self):
2940
"""raise if reads are not permitted."""
2941
if not self._is_locked():
2942
raise errors.ObjectNotLocked(self)
2944
def _check_write_ok(self):
2945
"""Assert if writes are not permitted."""
2946
if not self._is_locked():
2947
raise errors.ObjectNotLocked(self)
2949
def _compression_parent(self, an_entry):
2950
# return the key that an_entry is compressed against, or None
2951
# Grab the second parent list (as deltas implies parents currently)
2952
compression_parents = an_entry[3][1]
2953
if not compression_parents:
2955
if len(compression_parents) != 1:
2956
raise AssertionError(
2957
"Too many compression parents: %r" % compression_parents)
2958
return compression_parents[0]
2960
def get_build_details(self, keys):
2961
"""Get the method, index_memo and compression parent for version_ids.
2963
Ghosts are omitted from the result.
2965
:param keys: An iterable of keys.
2966
:return: A dict of key:
2967
(index_memo, compression_parent, parents, record_details).
2969
opaque structure to pass to read_records to extract the raw
2972
Content that this record is built upon, may be None
2974
Logical parents of this node
2976
extra information about the content which needs to be passed to
2977
Factory.parse_record
2981
entries = self._get_entries(keys, False)
2982
for entry in entries:
2984
if not self._parents:
2987
parents = entry[3][0]
2988
if not self._deltas:
2989
compression_parent_key = None
2991
compression_parent_key = self._compression_parent(entry)
2992
noeol = (entry[2][0] == 'N')
2993
if compression_parent_key:
2994
method = 'line-delta'
2997
result[key] = (self._node_to_position(entry),
2998
compression_parent_key, parents,
3002
def _get_entries(self, keys, check_present=False):
3003
"""Get the entries for keys.
3005
:param keys: An iterable of index key tuples.
3010
for node in self._graph_index.iter_entries(keys):
3012
found_keys.add(node[1])
3014
# adapt parentless index to the rest of the code.
3015
for node in self._graph_index.iter_entries(keys):
3016
yield node[0], node[1], node[2], ()
3017
found_keys.add(node[1])
3019
missing_keys = keys.difference(found_keys)
3021
raise RevisionNotPresent(missing_keys.pop(), self)
3023
def get_method(self, key):
3024
"""Return compression method of specified key."""
3025
return self._get_method(self._get_node(key))
3027
def _get_method(self, node):
3028
if not self._deltas:
3030
if self._compression_parent(node):
3035
def _get_node(self, key):
3037
return list(self._get_entries([key]))[0]
3039
raise RevisionNotPresent(key, self)
3041
def get_options(self, key):
3042
"""Return a list representing options.
3046
node = self._get_node(key)
3047
options = [self._get_method(node)]
3048
if node[2][0] == 'N':
3049
options.append('no-eol')
3052
def get_parent_map(self, keys):
3053
"""Get a map of the parents of keys.
3055
:param keys: The keys to look up parents for.
3056
:return: A mapping from keys to parents. Absent keys are absent from
3060
nodes = self._get_entries(keys)
3064
result[node[1]] = node[3][0]
3067
result[node[1]] = None
3070
def get_position(self, key):
3071
"""Return details needed to access the version.
3073
:return: a tuple (index, data position, size) to hand to the access
3074
logic to get the record.
3076
node = self._get_node(key)
3077
return self._node_to_position(node)
3079
has_key = _mod_index._has_key_from_parent_map
3082
"""Get all the keys in the collection.
3084
The keys are not ordered.
3087
return [node[1] for node in self._graph_index.iter_all_entries()]
3089
missing_keys = _mod_index._missing_keys_from_parent_map
3091
def _node_to_position(self, node):
3092
"""Convert an index value to position details."""
3093
bits = node[2][1:].split(' ')
3094
return node[0], int(bits[0]), int(bits[1])
3096
def _sort_keys_by_io(self, keys, positions):
3097
"""Figure out an optimal order to read the records for the given keys.
3099
Sort keys, grouped by index and sorted by position.
3101
:param keys: A list of keys whose records we want to read. This will be
3103
:param positions: A dict, such as the one returned by
3104
_get_components_positions()
3107
def get_index_memo(key):
3108
# index_memo is at offset [1]. It is made up of (GraphIndex,
3109
# position, size). GI is an object, which will be unique for each
3110
# pack file. This causes us to group by pack file, then sort by
3111
# position. Size doesn't matter, but it isn't worth breaking up the
3113
return positions[key][1]
3114
return keys.sort(key=get_index_memo)
3116
_get_total_build_size = _get_total_build_size
3119
class _KnitKeyAccess(object):
3120
"""Access to records in .knit files."""
3122
def __init__(self, transport, mapper):
3123
"""Create a _KnitKeyAccess with transport and mapper.
3125
:param transport: The transport the access object is rooted at.
3126
:param mapper: The mapper used to map keys to .knit files.
3128
self._transport = transport
3129
self._mapper = mapper
3131
def add_raw_records(self, key_sizes, raw_data):
3132
"""Add raw knit bytes to a storage area.
3134
The data is spooled to the container writer in one bytes-record per
3137
:param sizes: An iterable of tuples containing the key and size of each
3139
:param raw_data: A bytestring containing the data.
3140
:return: A list of memos to retrieve the record later. Each memo is an
3141
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
3142
length), where the key is the record key.
3144
if type(raw_data) is not str:
3145
raise AssertionError(
3146
'data must be plain bytes was %s' % type(raw_data))
3149
# TODO: This can be tuned for writing to sftp and other servers where
3150
# append() is relatively expensive by grouping the writes to each key
3152
for key, size in key_sizes:
3153
path = self._mapper.map(key)
3155
base = self._transport.append_bytes(path + '.knit',
3156
raw_data[offset:offset+size])
3157
except errors.NoSuchFile:
3158
self._transport.mkdir(osutils.dirname(path))
3159
base = self._transport.append_bytes(path + '.knit',
3160
raw_data[offset:offset+size])
3164
result.append((key, base, size))
3168
"""Flush pending writes on this access object.
3170
For .knit files this is a no-op.
3174
def get_raw_records(self, memos_for_retrieval):
3175
"""Get the raw bytes for a records.
3177
:param memos_for_retrieval: An iterable containing the access memo for
3178
retrieving the bytes.
3179
:return: An iterator over the bytes of the records.
3181
# first pass, group into same-index request to minimise readv's issued.
3183
current_prefix = None
3184
for (key, offset, length) in memos_for_retrieval:
3185
if current_prefix == key[:-1]:
3186
current_list.append((offset, length))
3188
if current_prefix is not None:
3189
request_lists.append((current_prefix, current_list))
3190
current_prefix = key[:-1]
3191
current_list = [(offset, length)]
3192
# handle the last entry
3193
if current_prefix is not None:
3194
request_lists.append((current_prefix, current_list))
3195
for prefix, read_vector in request_lists:
3196
path = self._mapper.map(prefix) + '.knit'
3197
for pos, data in self._transport.readv(path, read_vector):
3201
class _DirectPackAccess(object):
3202
"""Access to data in one or more packs with less translation."""
3204
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3205
"""Create a _DirectPackAccess object.
3207
:param index_to_packs: A dict mapping index objects to the transport
3208
and file names for obtaining data.
3209
:param reload_func: A function to call if we determine that the pack
3210
files have moved and we need to reload our caches. See
3211
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
3213
self._container_writer = None
3214
self._write_index = None
3215
self._indices = index_to_packs
3216
self._reload_func = reload_func
3217
self._flush_func = flush_func
3219
def add_raw_records(self, key_sizes, raw_data):
3220
"""Add raw knit bytes to a storage area.
3222
The data is spooled to the container writer in one bytes-record per
3225
:param sizes: An iterable of tuples containing the key and size of each
3227
:param raw_data: A bytestring containing the data.
3228
:return: A list of memos to retrieve the record later. Each memo is an
3229
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3230
length), where the index field is the write_index object supplied
3231
to the PackAccess object.
3233
if type(raw_data) is not str:
3234
raise AssertionError(
3235
'data must be plain bytes was %s' % type(raw_data))
3238
for key, size in key_sizes:
3239
p_offset, p_length = self._container_writer.add_bytes_record(
3240
raw_data[offset:offset+size], [])
3242
result.append((self._write_index, p_offset, p_length))
3246
"""Flush pending writes on this access object.
3248
This will flush any buffered writes to a NewPack.
3250
if self._flush_func is not None:
3253
def get_raw_records(self, memos_for_retrieval):
3254
"""Get the raw bytes for a records.
3256
:param memos_for_retrieval: An iterable containing the (index, pos,
3257
length) memo for retrieving the bytes. The Pack access method
3258
looks up the pack to use for a given record in its index_to_pack
3260
:return: An iterator over the bytes of the records.
3262
# first pass, group into same-index requests
3264
current_index = None
3265
for (index, offset, length) in memos_for_retrieval:
3266
if current_index == index:
3267
current_list.append((offset, length))
3269
if current_index is not None:
3270
request_lists.append((current_index, current_list))
3271
current_index = index
3272
current_list = [(offset, length)]
3273
# handle the last entry
3274
if current_index is not None:
3275
request_lists.append((current_index, current_list))
3276
for index, offsets in request_lists:
3278
transport, path = self._indices[index]
3280
# A KeyError here indicates that someone has triggered an index
3281
# reload, and this index has gone missing, we need to start
3283
if self._reload_func is None:
3284
# If we don't have a _reload_func there is nothing that can
3287
raise errors.RetryWithNewPacks(index,
3288
reload_occurred=True,
3289
exc_info=sys.exc_info())
3291
reader = pack.make_readv_reader(transport, path, offsets)
3292
for names, read_func in reader.iter_records():
3293
yield read_func(None)
3294
except errors.NoSuchFile:
3295
# A NoSuchFile error indicates that a pack file has gone
3296
# missing on disk, we need to trigger a reload, and start over.
3297
if self._reload_func is None:
3299
raise errors.RetryWithNewPacks(transport.abspath(path),
3300
reload_occurred=False,
3301
exc_info=sys.exc_info())
3303
def set_writer(self, writer, index, transport_packname):
3304
"""Set a writer to use for adding data."""
3305
if index is not None:
3306
self._indices[index] = transport_packname
3307
self._container_writer = writer
3308
self._write_index = index
3310
def reload_or_raise(self, retry_exc):
3311
"""Try calling the reload function, or re-raise the original exception.
3313
This should be called after _DirectPackAccess raises a
3314
RetryWithNewPacks exception. This function will handle the common logic
3315
of determining when the error is fatal versus being temporary.
3316
It will also make sure that the original exception is raised, rather
3317
than the RetryWithNewPacks exception.
3319
If this function returns, then the calling function should retry
3320
whatever operation was being performed. Otherwise an exception will
3323
:param retry_exc: A RetryWithNewPacks exception.
3326
if self._reload_func is None:
3328
elif not self._reload_func():
3329
# The reload claimed that nothing changed
3330
if not retry_exc.reload_occurred:
3331
# If there wasn't an earlier reload, then we really were
3332
# expecting to find changes. We didn't find them, so this is a
3336
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3337
raise exc_class, exc_value, exc_traceback
3340
# Deprecated, use PatienceSequenceMatcher instead
3341
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
3344
def annotate_knit(knit, revision_id):
3345
"""Annotate a knit with no cached annotations.
3347
This implementation is for knits with no cached annotations.
3348
It will work for knits with cached annotations, but this is not
3351
annotator = _KnitAnnotator(knit)
3352
return iter(annotator.annotate_flat(revision_id))
3355
class _KnitAnnotator(annotate.Annotator):
3356
"""Build up the annotations for a text."""
3358
def __init__(self, vf):
3359
annotate.Annotator.__init__(self, vf)
3361
# TODO: handle Nodes which cannot be extracted
3362
# self._ghosts = set()
3364
# Map from (key, parent_key) => matching_blocks, should be 'use once'
3365
self._matching_blocks = {}
3367
# KnitContent objects
3368
self._content_objects = {}
3369
# The number of children that depend on this fulltext content object
3370
self._num_compression_children = {}
3371
# Delta records that need their compression parent before they can be
3373
self._pending_deltas = {}
3374
# Fulltext records that are waiting for their parents fulltexts before
3375
# they can be yielded for annotation
3376
self._pending_annotation = {}
3378
self._all_build_details = {}
3380
def _get_build_graph(self, key):
3381
"""Get the graphs for building texts and annotations.
3383
The data you need for creating a full text may be different than the
3384
data you need to annotate that text. (At a minimum, you need both
3385
parents to create an annotation, but only need 1 parent to generate the
3388
:return: A list of (key, index_memo) records, suitable for
3389
passing to read_records_iter to start reading in the raw data from
3392
pending = set([key])
3395
self._num_needed_children[key] = 1
3397
# get all pending nodes
3398
this_iteration = pending
3399
build_details = self._vf._index.get_build_details(this_iteration)
3400
self._all_build_details.update(build_details)
3401
# new_nodes = self._vf._index._get_entries(this_iteration)
3403
for key, details in build_details.iteritems():
3404
(index_memo, compression_parent, parent_keys,
3405
record_details) = details
3406
self._parent_map[key] = parent_keys
3407
self._heads_provider = None
3408
records.append((key, index_memo))
3409
# Do we actually need to check _annotated_lines?
3410
pending.update([p for p in parent_keys
3411
if p not in self._all_build_details])
3413
for parent_key in parent_keys:
3414
if parent_key in self._num_needed_children:
3415
self._num_needed_children[parent_key] += 1
3417
self._num_needed_children[parent_key] = 1
3418
if compression_parent:
3419
if compression_parent in self._num_compression_children:
3420
self._num_compression_children[compression_parent] += 1
3422
self._num_compression_children[compression_parent] = 1
3424
missing_versions = this_iteration.difference(build_details.keys())
3425
if missing_versions:
3426
for key in missing_versions:
3427
if key in self._parent_map and key in self._text_cache:
3428
# We already have this text ready, we just need to
3429
# yield it later so we get it annotated
3431
parent_keys = self._parent_map[key]
3432
for parent_key in parent_keys:
3433
if parent_key in self._num_needed_children:
3434
self._num_needed_children[parent_key] += 1
3436
self._num_needed_children[parent_key] = 1
3437
pending.update([p for p in parent_keys
3438
if p not in self._all_build_details])
3440
raise errors.RevisionNotPresent(key, self._vf)
3441
# Generally we will want to read the records in reverse order, because
3442
# we find the parent nodes after the children
3444
return records, ann_keys
3446
def _get_needed_texts(self, key, pb=None):
3447
# if True or len(self._vf._fallback_vfs) > 0:
3448
if len(self._vf._fallback_vfs) > 0:
3449
# If we have fallbacks, go to the generic path
3450
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3455
records, ann_keys = self._get_build_graph(key)
3456
for idx, (sub_key, text, num_lines) in enumerate(
3457
self._extract_texts(records)):
3459
pb.update('annotating', idx, len(records))
3460
yield sub_key, text, num_lines
3461
for sub_key in ann_keys:
3462
text = self._text_cache[sub_key]
3463
num_lines = len(text) # bad assumption
3464
yield sub_key, text, num_lines
3466
except errors.RetryWithNewPacks, e:
3467
self._vf._access.reload_or_raise(e)
3468
# The cached build_details are no longer valid
3469
self._all_build_details.clear()
3471
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3472
parent_lines = self._text_cache[compression_parent]
3473
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3474
self._matching_blocks[(key, compression_parent)] = blocks
3476
def _expand_record(self, key, parent_keys, compression_parent, record,
3479
if compression_parent:
3480
if compression_parent not in self._content_objects:
3481
# Waiting for the parent
3482
self._pending_deltas.setdefault(compression_parent, []).append(
3483
(key, parent_keys, record, record_details))
3485
# We have the basis parent, so expand the delta
3486
num = self._num_compression_children[compression_parent]
3489
base_content = self._content_objects.pop(compression_parent)
3490
self._num_compression_children.pop(compression_parent)
3492
self._num_compression_children[compression_parent] = num
3493
base_content = self._content_objects[compression_parent]
3494
# It is tempting to want to copy_base_content=False for the last
3495
# child object. However, whenever noeol=False,
3496
# self._text_cache[parent_key] is content._lines. So mutating it
3497
# gives very bad results.
3498
# The alternative is to copy the lines into text cache, but then we
3499
# are copying anyway, so just do it here.
3500
content, delta = self._vf._factory.parse_record(
3501
key, record, record_details, base_content,
3502
copy_base_content=True)
3505
content, _ = self._vf._factory.parse_record(
3506
key, record, record_details, None)
3507
if self._num_compression_children.get(key, 0) > 0:
3508
self._content_objects[key] = content
3509
lines = content.text()
3510
self._text_cache[key] = lines
3511
if delta is not None:
3512
self._cache_delta_blocks(key, compression_parent, delta, lines)
3515
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3516
"""Get the list of annotations for the parent, and the matching lines.
3518
:param text: The opaque value given by _get_needed_texts
3519
:param parent_key: The key for the parent text
3520
:return: (parent_annotations, matching_blocks)
3521
parent_annotations is a list as long as the number of lines in
3523
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3524
indicating which lines match between the two texts
3526
block_key = (key, parent_key)
3527
if block_key in self._matching_blocks:
3528
blocks = self._matching_blocks.pop(block_key)
3529
parent_annotations = self._annotations_cache[parent_key]
3530
return parent_annotations, blocks
3531
return annotate.Annotator._get_parent_annotations_and_matches(self,
3532
key, text, parent_key)
3534
def _process_pending(self, key):
3535
"""The content for 'key' was just processed.
3537
Determine if there is any more pending work to be processed.
3540
if key in self._pending_deltas:
3541
compression_parent = key
3542
children = self._pending_deltas.pop(key)
3543
for child_key, parent_keys, record, record_details in children:
3544
lines = self._expand_record(child_key, parent_keys,
3546
record, record_details)
3547
if self._check_ready_for_annotations(child_key, parent_keys):
3548
to_return.append(child_key)
3549
# Also check any children that are waiting for this parent to be
3551
if key in self._pending_annotation:
3552
children = self._pending_annotation.pop(key)
3553
to_return.extend([c for c, p_keys in children
3554
if self._check_ready_for_annotations(c, p_keys)])
3557
def _check_ready_for_annotations(self, key, parent_keys):
3558
"""return true if this text is ready to be yielded.
3560
Otherwise, this will return False, and queue the text into
3561
self._pending_annotation
3563
for parent_key in parent_keys:
3564
if parent_key not in self._annotations_cache:
3565
# still waiting on at least one parent text, so queue it up
3566
# Note that if there are multiple parents, we need to wait
3568
self._pending_annotation.setdefault(parent_key,
3569
[]).append((key, parent_keys))
3573
def _extract_texts(self, records):
3574
"""Extract the various texts needed based on records"""
3575
# We iterate in the order read, rather than a strict order requested
3576
# However, process what we can, and put off to the side things that
3577
# still need parents, cleaning them up when those parents are
3580
# 1) As 'records' are read, see if we can expand these records into
3581
# Content objects (and thus lines)
3582
# 2) If a given line-delta is waiting on its compression parent, it
3583
# gets queued up into self._pending_deltas, otherwise we expand
3584
# it, and put it into self._text_cache and self._content_objects
3585
# 3) If we expanded the text, we will then check to see if all
3586
# parents have also been processed. If so, this text gets yielded,
3587
# else this record gets set aside into pending_annotation
3588
# 4) Further, if we expanded the text in (2), we will then check to
3589
# see if there are any children in self._pending_deltas waiting to
3590
# also be processed. If so, we go back to (2) for those
3591
# 5) Further again, if we yielded the text, we can then check if that
3592
# 'unlocks' any of the texts in pending_annotations, which should
3593
# then get yielded as well
3594
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3595
# compression child could unlock yet another, and yielding a fulltext
3596
# will also 'unlock' the children that are waiting on that annotation.
3597
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3598
# if other parents are also waiting.)
3599
# We want to yield content before expanding child content objects, so
3600
# that we know when we can re-use the content lines, and the annotation
3601
# code can know when it can stop caching fulltexts, as well.
3603
# Children that are missing their compression parent
3605
for (key, record, digest) in self._vf._read_records_iter(records):
3607
details = self._all_build_details[key]
3608
(_, compression_parent, parent_keys, record_details) = details
3609
lines = self._expand_record(key, parent_keys, compression_parent,
3610
record, record_details)
3612
# Pending delta should be queued up
3614
# At this point, we may be able to yield this content, if all
3615
# parents are also finished
3616
yield_this_text = self._check_ready_for_annotations(key,
3619
# All parents present
3620
yield key, lines, len(lines)
3621
to_process = self._process_pending(key)
3623
this_process = to_process
3625
for key in this_process:
3626
lines = self._text_cache[key]
3627
yield key, lines, len(lines)
3628
to_process.extend(self._process_pending(key))
3631
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3633
from bzrlib._knit_load_data_py import _load_data_py as _load_data