1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
from cStringIO import StringIO
56
from itertools import izip
61
from bzrlib.lazy_import import lazy_import
62
lazy_import(globals(), """
82
from bzrlib.errors import (
90
RevisionAlreadyPresent,
93
from bzrlib.osutils import (
100
from bzrlib.versionedfile import (
101
AbsentContentFactory,
105
ChunkedContentFactory,
112
# TODO: Split out code specific to this format into an associated object.
114
# TODO: Can we put in some kind of value to check that the index and data
115
# files belong together?
117
# TODO: accommodate binaries, perhaps by storing a byte count
119
# TODO: function to check whole file
121
# TODO: atomically append data, then measure backwards from the cursor
122
# position after writing to work out where it was located. we may need to
123
# bypass python file buffering.
125
DATA_SUFFIX = '.knit'
126
INDEX_SUFFIX = '.kndx'
127
_STREAM_MIN_BUFFER_SIZE = 5*1024*1024
130
class KnitAdapter(object):
131
"""Base class for knit record adaption."""
133
def __init__(self, basis_vf):
134
"""Create an adapter which accesses full texts from basis_vf.
136
:param basis_vf: A versioned file to access basis texts of deltas from.
137
May be None for adapters that do not need to access basis texts.
139
self._data = KnitVersionedFiles(None, None)
140
self._annotate_factory = KnitAnnotateFactory()
141
self._plain_factory = KnitPlainFactory()
142
self._basis_vf = basis_vf
145
class FTAnnotatedToUnannotated(KnitAdapter):
146
"""An adapter from FT annotated knits to unannotated ones."""
148
def get_bytes(self, factory):
149
annotated_compressed_bytes = factory._raw_record
151
self._data._parse_record_unchecked(annotated_compressed_bytes)
152
content = self._annotate_factory.parse_fulltext(contents, rec[1])
153
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
157
class DeltaAnnotatedToUnannotated(KnitAdapter):
158
"""An adapter for deltas from annotated to unannotated."""
160
def get_bytes(self, factory):
161
annotated_compressed_bytes = factory._raw_record
163
self._data._parse_record_unchecked(annotated_compressed_bytes)
164
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
166
contents = self._plain_factory.lower_line_delta(delta)
167
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
171
class FTAnnotatedToFullText(KnitAdapter):
172
"""An adapter from FT annotated knits to unannotated ones."""
174
def get_bytes(self, factory):
175
annotated_compressed_bytes = factory._raw_record
177
self._data._parse_record_unchecked(annotated_compressed_bytes)
178
content, delta = self._annotate_factory.parse_record(factory.key[-1],
179
contents, factory._build_details, None)
180
return ''.join(content.text())
183
class DeltaAnnotatedToFullText(KnitAdapter):
184
"""An adapter for deltas from annotated to unannotated."""
186
def get_bytes(self, factory):
187
annotated_compressed_bytes = factory._raw_record
189
self._data._parse_record_unchecked(annotated_compressed_bytes)
190
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
192
compression_parent = factory.parents[0]
193
basis_entry = self._basis_vf.get_record_stream(
194
[compression_parent], 'unordered', True).next()
195
if basis_entry.storage_kind == 'absent':
196
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
197
basis_chunks = basis_entry.get_bytes_as('chunked')
198
basis_lines = osutils.chunks_to_lines(basis_chunks)
199
# Manually apply the delta because we have one annotated content and
201
basis_content = PlainKnitContent(basis_lines, compression_parent)
202
basis_content.apply_delta(delta, rec[1])
203
basis_content._should_strip_eol = factory._build_details[1]
204
return ''.join(basis_content.text())
207
class FTPlainToFullText(KnitAdapter):
208
"""An adapter from FT plain knits to unannotated ones."""
210
def get_bytes(self, factory):
211
compressed_bytes = factory._raw_record
213
self._data._parse_record_unchecked(compressed_bytes)
214
content, delta = self._plain_factory.parse_record(factory.key[-1],
215
contents, factory._build_details, None)
216
return ''.join(content.text())
219
class DeltaPlainToFullText(KnitAdapter):
220
"""An adapter for deltas from annotated to unannotated."""
222
def get_bytes(self, factory):
223
compressed_bytes = factory._raw_record
225
self._data._parse_record_unchecked(compressed_bytes)
226
delta = self._plain_factory.parse_line_delta(contents, rec[1])
227
compression_parent = factory.parents[0]
228
# XXX: string splitting overhead.
229
basis_entry = self._basis_vf.get_record_stream(
230
[compression_parent], 'unordered', True).next()
231
if basis_entry.storage_kind == 'absent':
232
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
233
basis_chunks = basis_entry.get_bytes_as('chunked')
234
basis_lines = osutils.chunks_to_lines(basis_chunks)
235
basis_content = PlainKnitContent(basis_lines, compression_parent)
236
# Manually apply the delta because we have one annotated content and
238
content, _ = self._plain_factory.parse_record(rec[1], contents,
239
factory._build_details, basis_content)
240
return ''.join(content.text())
243
class KnitContentFactory(ContentFactory):
244
"""Content factory for streaming from knits.
246
:seealso ContentFactory:
249
def __init__(self, key, parents, build_details, sha1, raw_record,
250
annotated, knit=None, network_bytes=None):
251
"""Create a KnitContentFactory for key.
254
:param parents: The parents.
255
:param build_details: The build details as returned from
257
:param sha1: The sha1 expected from the full text of this object.
258
:param raw_record: The bytes of the knit data from disk.
259
:param annotated: True if the raw data is annotated.
260
:param network_bytes: None to calculate the network bytes on demand,
261
not-none if they are already known.
263
ContentFactory.__init__(self)
266
self.parents = parents
267
if build_details[0] == 'line-delta':
272
annotated_kind = 'annotated-'
275
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
276
self._raw_record = raw_record
277
self._network_bytes = network_bytes
278
self._build_details = build_details
281
def _create_network_bytes(self):
282
"""Create a fully serialised network version for transmission."""
283
# storage_kind, key, parents, Noeol, raw_record
284
key_bytes = '\x00'.join(self.key)
285
if self.parents is None:
286
parent_bytes = 'None:'
288
parent_bytes = '\t'.join('\x00'.join(key) for key in self.parents)
289
if self._build_details[1]:
293
network_bytes = "%s\n%s\n%s\n%s%s" % (self.storage_kind, key_bytes,
294
parent_bytes, noeol, self._raw_record)
295
self._network_bytes = network_bytes
297
def get_bytes_as(self, storage_kind):
298
if storage_kind == self.storage_kind:
299
if self._network_bytes is None:
300
self._create_network_bytes()
301
return self._network_bytes
302
if ('-ft-' in self.storage_kind and
303
storage_kind in ('chunked', 'fulltext')):
304
adapter_key = (self.storage_kind, 'fulltext')
305
adapter_factory = adapter_registry.get(adapter_key)
306
adapter = adapter_factory(None)
307
bytes = adapter.get_bytes(self)
308
if storage_kind == 'chunked':
312
if self._knit is not None:
313
# Not redundant with direct conversion above - that only handles
315
if storage_kind == 'chunked':
316
return self._knit.get_lines(self.key[0])
317
elif storage_kind == 'fulltext':
318
return self._knit.get_text(self.key[0])
319
raise errors.UnavailableRepresentation(self.key, storage_kind,
323
class LazyKnitContentFactory(ContentFactory):
324
"""A ContentFactory which can either generate full text or a wire form.
326
:seealso ContentFactory:
329
def __init__(self, key, parents, generator, first):
330
"""Create a LazyKnitContentFactory.
332
:param key: The key of the record.
333
:param parents: The parents of the record.
334
:param generator: A _ContentMapGenerator containing the record for this
336
:param first: Is this the first content object returned from generator?
337
if it is, its storage kind is knit-delta-closure, otherwise it is
338
knit-delta-closure-ref
341
self.parents = parents
343
self._generator = generator
344
self.storage_kind = "knit-delta-closure"
346
self.storage_kind = self.storage_kind + "-ref"
349
def get_bytes_as(self, storage_kind):
350
if storage_kind == self.storage_kind:
352
return self._generator._wire_bytes()
354
# all the keys etc are contained in the bytes returned in the
357
if storage_kind in ('chunked', 'fulltext'):
358
chunks = self._generator._get_one_work(self.key).text()
359
if storage_kind == 'chunked':
362
return ''.join(chunks)
363
raise errors.UnavailableRepresentation(self.key, storage_kind,
367
def knit_delta_closure_to_records(storage_kind, bytes, line_end):
368
"""Convert a network record to a iterator over stream records.
370
:param storage_kind: The storage kind of the record.
371
Must be 'knit-delta-closure'.
372
:param bytes: The bytes of the record on the network.
374
generator = _NetworkContentMapGenerator(bytes, line_end)
375
return generator.get_record_stream()
378
def knit_network_to_record(storage_kind, bytes, line_end):
379
"""Convert a network record to a record object.
381
:param storage_kind: The storage kind of the record.
382
:param bytes: The bytes of the record on the network.
385
line_end = bytes.find('\n', start)
386
key = tuple(bytes[start:line_end].split('\x00'))
388
line_end = bytes.find('\n', start)
389
parent_line = bytes[start:line_end]
390
if parent_line == 'None:':
394
[tuple(segment.split('\x00')) for segment in parent_line.split('\t')
397
noeol = bytes[start] == 'N'
398
if 'ft' in storage_kind:
401
method = 'line-delta'
402
build_details = (method, noeol)
404
raw_record = bytes[start:]
405
annotated = 'annotated' in storage_kind
406
return [KnitContentFactory(key, parents, build_details, None, raw_record,
407
annotated, network_bytes=bytes)]
410
class KnitContent(object):
411
"""Content of a knit version to which deltas can be applied.
413
This is always stored in memory as a list of lines with \n at the end,
414
plus a flag saying if the final ending is really there or not, because that
415
corresponds to the on-disk knit representation.
419
self._should_strip_eol = False
421
def apply_delta(self, delta, new_version_id):
422
"""Apply delta to this object to become new_version_id."""
423
raise NotImplementedError(self.apply_delta)
425
def line_delta_iter(self, new_lines):
426
"""Generate line-based delta from this content to new_lines."""
427
new_texts = new_lines.text()
428
old_texts = self.text()
429
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
430
for tag, i1, i2, j1, j2 in s.get_opcodes():
433
# ofrom, oto, length, data
434
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
436
def line_delta(self, new_lines):
437
return list(self.line_delta_iter(new_lines))
440
def get_line_delta_blocks(knit_delta, source, target):
441
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
442
target_len = len(target)
445
for s_begin, s_end, t_len, new_text in knit_delta:
446
true_n = s_begin - s_pos
449
# knit deltas do not provide reliable info about whether the
450
# last line of a file matches, due to eol handling.
451
if source[s_pos + n -1] != target[t_pos + n -1]:
454
yield s_pos, t_pos, n
455
t_pos += t_len + true_n
457
n = target_len - t_pos
459
if source[s_pos + n -1] != target[t_pos + n -1]:
462
yield s_pos, t_pos, n
463
yield s_pos + (target_len - t_pos), target_len, 0
466
class AnnotatedKnitContent(KnitContent):
467
"""Annotated content."""
469
def __init__(self, lines):
470
KnitContent.__init__(self)
474
"""Return a list of (origin, text) for each content line."""
475
lines = self._lines[:]
476
if self._should_strip_eol:
477
origin, last_line = lines[-1]
478
lines[-1] = (origin, last_line.rstrip('\n'))
481
def apply_delta(self, delta, new_version_id):
482
"""Apply delta to this object to become new_version_id."""
485
for start, end, count, delta_lines in delta:
486
lines[offset+start:offset+end] = delta_lines
487
offset = offset + (start - end) + count
491
lines = [text for origin, text in self._lines]
492
except ValueError, e:
493
# most commonly (only?) caused by the internal form of the knit
494
# missing annotation information because of a bug - see thread
496
raise KnitCorrupt(self,
497
"line in annotated knit missing annotation information: %s"
499
if self._should_strip_eol:
500
lines[-1] = lines[-1].rstrip('\n')
504
return AnnotatedKnitContent(self._lines[:])
507
class PlainKnitContent(KnitContent):
508
"""Unannotated content.
510
When annotate[_iter] is called on this content, the same version is reported
511
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
515
def __init__(self, lines, version_id):
516
KnitContent.__init__(self)
518
self._version_id = version_id
521
"""Return a list of (origin, text) for each content line."""
522
return [(self._version_id, line) for line in self._lines]
524
def apply_delta(self, delta, new_version_id):
525
"""Apply delta to this object to become new_version_id."""
528
for start, end, count, delta_lines in delta:
529
lines[offset+start:offset+end] = delta_lines
530
offset = offset + (start - end) + count
531
self._version_id = new_version_id
534
return PlainKnitContent(self._lines[:], self._version_id)
538
if self._should_strip_eol:
540
lines[-1] = lines[-1].rstrip('\n')
544
class _KnitFactory(object):
545
"""Base class for common Factory functions."""
547
def parse_record(self, version_id, record, record_details,
548
base_content, copy_base_content=True):
549
"""Parse a record into a full content object.
551
:param version_id: The official version id for this content
552
:param record: The data returned by read_records_iter()
553
:param record_details: Details about the record returned by
555
:param base_content: If get_build_details returns a compression_parent,
556
you must return a base_content here, else use None
557
:param copy_base_content: When building from the base_content, decide
558
you can either copy it and return a new object, or modify it in
560
:return: (content, delta) A Content object and possibly a line-delta,
563
method, noeol = record_details
564
if method == 'line-delta':
565
if copy_base_content:
566
content = base_content.copy()
568
content = base_content
569
delta = self.parse_line_delta(record, version_id)
570
content.apply_delta(delta, version_id)
572
content = self.parse_fulltext(record, version_id)
574
content._should_strip_eol = noeol
575
return (content, delta)
578
class KnitAnnotateFactory(_KnitFactory):
579
"""Factory for creating annotated Content objects."""
583
def make(self, lines, version_id):
584
num_lines = len(lines)
585
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
587
def parse_fulltext(self, content, version_id):
588
"""Convert fulltext to internal representation
590
fulltext content is of the format
591
revid(utf8) plaintext\n
592
internal representation is of the format:
595
# TODO: jam 20070209 The tests expect this to be returned as tuples,
596
# but the code itself doesn't really depend on that.
597
# Figure out a way to not require the overhead of turning the
598
# list back into tuples.
599
lines = [tuple(line.split(' ', 1)) for line in content]
600
return AnnotatedKnitContent(lines)
602
def parse_line_delta_iter(self, lines):
603
return iter(self.parse_line_delta(lines))
605
def parse_line_delta(self, lines, version_id, plain=False):
606
"""Convert a line based delta into internal representation.
608
line delta is in the form of:
609
intstart intend intcount
611
revid(utf8) newline\n
612
internal representation is
613
(start, end, count, [1..count tuples (revid, newline)])
615
:param plain: If True, the lines are returned as a plain
616
list without annotations, not as a list of (origin, content) tuples, i.e.
617
(start, end, count, [1..count newline])
624
def cache_and_return(line):
625
origin, text = line.split(' ', 1)
626
return cache.setdefault(origin, origin), text
628
# walk through the lines parsing.
629
# Note that the plain test is explicitly pulled out of the
630
# loop to minimise any performance impact
633
start, end, count = [int(n) for n in header.split(',')]
634
contents = [next().split(' ', 1)[1] for i in xrange(count)]
635
result.append((start, end, count, contents))
638
start, end, count = [int(n) for n in header.split(',')]
639
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
640
result.append((start, end, count, contents))
643
def get_fulltext_content(self, lines):
644
"""Extract just the content lines from a fulltext."""
645
return (line.split(' ', 1)[1] for line in lines)
647
def get_linedelta_content(self, lines):
648
"""Extract just the content from a line delta.
650
This doesn't return all of the extra information stored in a delta.
651
Only the actual content lines.
656
header = header.split(',')
657
count = int(header[2])
658
for i in xrange(count):
659
origin, text = next().split(' ', 1)
662
def lower_fulltext(self, content):
663
"""convert a fulltext content record into a serializable form.
665
see parse_fulltext which this inverts.
667
return ['%s %s' % (o, t) for o, t in content._lines]
669
def lower_line_delta(self, delta):
670
"""convert a delta into a serializable form.
672
See parse_line_delta which this inverts.
674
# TODO: jam 20070209 We only do the caching thing to make sure that
675
# the origin is a valid utf-8 line, eventually we could remove it
677
for start, end, c, lines in delta:
678
out.append('%d,%d,%d\n' % (start, end, c))
679
out.extend(origin + ' ' + text
680
for origin, text in lines)
683
def annotate(self, knit, key):
684
content = knit._get_content(key)
685
# adjust for the fact that serialised annotations are only key suffixes
687
if type(key) is tuple:
689
origins = content.annotate()
691
for origin, line in origins:
692
result.append((prefix + (origin,), line))
695
# XXX: This smells a bit. Why would key ever be a non-tuple here?
696
# Aren't keys defined to be tuples? -- spiv 20080618
697
return content.annotate()
700
class KnitPlainFactory(_KnitFactory):
701
"""Factory for creating plain Content objects."""
705
def make(self, lines, version_id):
706
return PlainKnitContent(lines, version_id)
708
def parse_fulltext(self, content, version_id):
709
"""This parses an unannotated fulltext.
711
Note that this is not a noop - the internal representation
712
has (versionid, line) - its just a constant versionid.
714
return self.make(content, version_id)
716
def parse_line_delta_iter(self, lines, version_id):
718
num_lines = len(lines)
719
while cur < num_lines:
722
start, end, c = [int(n) for n in header.split(',')]
723
yield start, end, c, lines[cur:cur+c]
726
def parse_line_delta(self, lines, version_id):
727
return list(self.parse_line_delta_iter(lines, version_id))
729
def get_fulltext_content(self, lines):
730
"""Extract just the content lines from a fulltext."""
733
def get_linedelta_content(self, lines):
734
"""Extract just the content from a line delta.
736
This doesn't return all of the extra information stored in a delta.
737
Only the actual content lines.
742
header = header.split(',')
743
count = int(header[2])
744
for i in xrange(count):
747
def lower_fulltext(self, content):
748
return content.text()
750
def lower_line_delta(self, delta):
752
for start, end, c, lines in delta:
753
out.append('%d,%d,%d\n' % (start, end, c))
757
def annotate(self, knit, key):
758
annotator = _KnitAnnotator(knit)
759
return annotator.annotate_flat(key)
763
def make_file_factory(annotated, mapper):
764
"""Create a factory for creating a file based KnitVersionedFiles.
766
This is only functional enough to run interface tests, it doesn't try to
767
provide a full pack environment.
769
:param annotated: knit annotations are wanted.
770
:param mapper: The mapper from keys to paths.
772
def factory(transport):
773
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
774
access = _KnitKeyAccess(transport, mapper)
775
return KnitVersionedFiles(index, access, annotated=annotated)
779
def make_pack_factory(graph, delta, keylength):
780
"""Create a factory for creating a pack based VersionedFiles.
782
This is only functional enough to run interface tests, it doesn't try to
783
provide a full pack environment.
785
:param graph: Store a graph.
786
:param delta: Delta compress contents.
787
:param keylength: How long should keys be.
789
def factory(transport):
790
parents = graph or delta
796
max_delta_chain = 200
799
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
800
key_elements=keylength)
801
stream = transport.open_write_stream('newpack')
802
writer = pack.ContainerWriter(stream.write)
804
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
805
deltas=delta, add_callback=graph_index.add_nodes)
806
access = _DirectPackAccess({})
807
access.set_writer(writer, graph_index, (transport, 'newpack'))
808
result = KnitVersionedFiles(index, access,
809
max_delta_chain=max_delta_chain)
810
result.stream = stream
811
result.writer = writer
816
def cleanup_pack_knit(versioned_files):
817
versioned_files.stream.close()
818
versioned_files.writer.end()
821
def _get_total_build_size(self, keys, positions):
822
"""Determine the total bytes to build these keys.
824
(helper function because _KnitGraphIndex and _KndxIndex work the same, but
825
don't inherit from a common base.)
827
:param keys: Keys that we want to build
828
:param positions: dict of {key, (info, index_memo, comp_parent)} (such
829
as returned by _get_components_positions)
830
:return: Number of bytes to build those keys
832
all_build_index_memos = {}
836
for key in build_keys:
837
# This is mostly for the 'stacked' case
838
# Where we will be getting the data from a fallback
839
if key not in positions:
841
_, index_memo, compression_parent = positions[key]
842
all_build_index_memos[key] = index_memo
843
if compression_parent not in all_build_index_memos:
844
next_keys.add(compression_parent)
845
build_keys = next_keys
846
return sum([index_memo[2] for index_memo
847
in all_build_index_memos.itervalues()])
850
class KnitVersionedFiles(VersionedFiles):
851
"""Storage for many versioned files using knit compression.
853
Backend storage is managed by indices and data objects.
855
:ivar _index: A _KnitGraphIndex or similar that can describe the
856
parents, graph, compression and data location of entries in this
857
KnitVersionedFiles. Note that this is only the index for
858
*this* vfs; if there are fallbacks they must be queried separately.
861
def __init__(self, index, data_access, max_delta_chain=200,
862
annotated=False, reload_func=None):
863
"""Create a KnitVersionedFiles with index and data_access.
865
:param index: The index for the knit data.
866
:param data_access: The access object to store and retrieve knit
868
:param max_delta_chain: The maximum number of deltas to permit during
869
insertion. Set to 0 to prohibit the use of deltas.
870
:param annotated: Set to True to cause annotations to be calculated and
871
stored during insertion.
872
:param reload_func: An function that can be called if we think we need
873
to reload the pack listing and try again. See
874
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
877
self._access = data_access
878
self._max_delta_chain = max_delta_chain
880
self._factory = KnitAnnotateFactory()
882
self._factory = KnitPlainFactory()
883
self._fallback_vfs = []
884
self._reload_func = reload_func
887
return "%s(%r, %r)" % (
888
self.__class__.__name__,
892
def add_fallback_versioned_files(self, a_versioned_files):
893
"""Add a source of texts for texts not present in this knit.
895
:param a_versioned_files: A VersionedFiles object.
897
self._fallback_vfs.append(a_versioned_files)
899
def add_lines(self, key, parents, lines, parent_texts=None,
900
left_matching_blocks=None, nostore_sha=None, random_id=False,
902
"""See VersionedFiles.add_lines()."""
903
self._index._check_write_ok()
904
self._check_add(key, lines, random_id, check_content)
906
# The caller might pass None if there is no graph data, but kndx
907
# indexes can't directly store that, so we give them
908
# an empty tuple instead.
910
line_bytes = ''.join(lines)
911
return self._add(key, lines, parents,
912
parent_texts, left_matching_blocks, nostore_sha, random_id,
913
line_bytes=line_bytes)
915
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
916
"""See VersionedFiles._add_text()."""
917
self._index._check_write_ok()
918
self._check_add(key, None, random_id, check_content=False)
919
if text.__class__ is not str:
920
raise errors.BzrBadParameterUnicode("text")
922
# The caller might pass None if there is no graph data, but kndx
923
# indexes can't directly store that, so we give them
924
# an empty tuple instead.
926
return self._add(key, None, parents,
927
None, None, nostore_sha, random_id,
930
def _add(self, key, lines, parents, parent_texts,
931
left_matching_blocks, nostore_sha, random_id,
933
"""Add a set of lines on top of version specified by parents.
935
Any versions not present will be converted into ghosts.
937
:param lines: A list of strings where each one is a single line (has a
938
single newline at the end of the string) This is now optional
939
(callers can pass None). It is left in its location for backwards
940
compatibility. It should ''.join(lines) must == line_bytes
941
:param line_bytes: A single string containing the content
943
We pass both lines and line_bytes because different routes bring the
944
values to this function. And for memory efficiency, we don't want to
945
have to split/join on-demand.
947
# first thing, if the content is something we don't need to store, find
949
digest = sha_string(line_bytes)
950
if nostore_sha == digest:
951
raise errors.ExistingContent
954
if parent_texts is None:
956
# Do a single query to ascertain parent presence; we only compress
957
# against parents in the same kvf.
958
present_parent_map = self._index.get_parent_map(parents)
959
for parent in parents:
960
if parent in present_parent_map:
961
present_parents.append(parent)
963
# Currently we can only compress against the left most present parent.
964
if (len(present_parents) == 0 or
965
present_parents[0] != parents[0]):
968
# To speed the extract of texts the delta chain is limited
969
# to a fixed number of deltas. This should minimize both
970
# I/O and the time spend applying deltas.
971
delta = self._check_should_delta(present_parents[0])
973
text_length = len(line_bytes)
976
# Note: line_bytes is not modified to add a newline, that is tracked
977
# via the no_eol flag. 'lines' *is* modified, because that is the
978
# general values needed by the Content code.
979
if line_bytes and line_bytes[-1] != '\n':
980
options.append('no-eol')
982
# Copy the existing list, or create a new one
984
lines = osutils.split_lines(line_bytes)
987
# Replace the last line with one that ends in a final newline
988
lines[-1] = lines[-1] + '\n'
990
lines = osutils.split_lines(line_bytes)
992
for element in key[:-1]:
993
if type(element) is not str:
994
raise TypeError("key contains non-strings: %r" % (key,))
996
key = key[:-1] + ('sha1:' + digest,)
997
elif type(key[-1]) is not str:
998
raise TypeError("key contains non-strings: %r" % (key,))
999
# Knit hunks are still last-element only
1000
version_id = key[-1]
1001
content = self._factory.make(lines, version_id)
1003
# Hint to the content object that its text() call should strip the
1005
content._should_strip_eol = True
1006
if delta or (self._factory.annotated and len(present_parents) > 0):
1007
# Merge annotations from parent texts if needed.
1008
delta_hunks = self._merge_annotations(content, present_parents,
1009
parent_texts, delta, self._factory.annotated,
1010
left_matching_blocks)
1013
options.append('line-delta')
1014
store_lines = self._factory.lower_line_delta(delta_hunks)
1015
size, bytes = self._record_to_data(key, digest,
1018
options.append('fulltext')
1019
# isinstance is slower and we have no hierarchy.
1020
if self._factory.__class__ is KnitPlainFactory:
1021
# Use the already joined bytes saving iteration time in
1023
dense_lines = [line_bytes]
1025
dense_lines.append('\n')
1026
size, bytes = self._record_to_data(key, digest,
1029
# get mixed annotation + content and feed it into the
1031
store_lines = self._factory.lower_fulltext(content)
1032
size, bytes = self._record_to_data(key, digest,
1035
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
1036
self._index.add_records(
1037
((key, options, access_memo, parents),),
1038
random_id=random_id)
1039
return digest, text_length, content
1041
def annotate(self, key):
1042
"""See VersionedFiles.annotate."""
1043
return self._factory.annotate(self, key)
1045
def get_annotator(self):
1046
return _KnitAnnotator(self)
1048
def check(self, progress_bar=None):
1049
"""See VersionedFiles.check()."""
1050
# This doesn't actually test extraction of everything, but that will
1051
# impact 'bzr check' substantially, and needs to be integrated with
1052
# care. However, it does check for the obvious problem of a delta with
1054
keys = self._index.keys()
1055
parent_map = self.get_parent_map(keys)
1057
if self._index.get_method(key) != 'fulltext':
1058
compression_parent = parent_map[key][0]
1059
if compression_parent not in parent_map:
1060
raise errors.KnitCorrupt(self,
1061
"Missing basis parent %s for %s" % (
1062
compression_parent, key))
1063
for fallback_vfs in self._fallback_vfs:
1064
fallback_vfs.check()
1066
def _check_add(self, key, lines, random_id, check_content):
1067
"""check that version_id and lines are safe to add."""
1068
version_id = key[-1]
1069
if version_id is not None:
1070
if contains_whitespace(version_id):
1071
raise InvalidRevisionId(version_id, self)
1072
self.check_not_reserved_id(version_id)
1073
# TODO: If random_id==False and the key is already present, we should
1074
# probably check that the existing content is identical to what is
1075
# being inserted, and otherwise raise an exception. This would make
1076
# the bundle code simpler.
1078
self._check_lines_not_unicode(lines)
1079
self._check_lines_are_lines(lines)
1081
def _check_header(self, key, line):
1082
rec = self._split_header(line)
1083
self._check_header_version(rec, key[-1])
1086
def _check_header_version(self, rec, version_id):
1087
"""Checks the header version on original format knit records.
1089
These have the last component of the key embedded in the record.
1091
if rec[1] != version_id:
1092
raise KnitCorrupt(self,
1093
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
1095
def _check_should_delta(self, parent):
1096
"""Iterate back through the parent listing, looking for a fulltext.
1098
This is used when we want to decide whether to add a delta or a new
1099
fulltext. It searches for _max_delta_chain parents. When it finds a
1100
fulltext parent, it sees if the total size of the deltas leading up to
1101
it is large enough to indicate that we want a new full text anyway.
1103
Return True if we should create a new delta, False if we should use a
1107
fulltext_size = None
1108
for count in xrange(self._max_delta_chain):
1110
# Note that this only looks in the index of this particular
1111
# KnitVersionedFiles, not in the fallbacks. This ensures that
1112
# we won't store a delta spanning physical repository
1114
build_details = self._index.get_build_details([parent])
1115
parent_details = build_details[parent]
1116
except (RevisionNotPresent, KeyError), e:
1117
# Some basis is not locally present: always fulltext
1119
index_memo, compression_parent, _, _ = parent_details
1120
_, _, size = index_memo
1121
if compression_parent is None:
1122
fulltext_size = size
1125
# We don't explicitly check for presence because this is in an
1126
# inner loop, and if it's missing it'll fail anyhow.
1127
parent = compression_parent
1129
# We couldn't find a fulltext, so we must create a new one
1131
# Simple heuristic - if the total I/O wold be greater as a delta than
1132
# the originally installed fulltext, we create a new fulltext.
1133
return fulltext_size > delta_size
1135
def _build_details_to_components(self, build_details):
1136
"""Convert a build_details tuple to a position tuple."""
1137
# record_details, access_memo, compression_parent
1138
return build_details[3], build_details[0], build_details[1]
1140
def _get_components_positions(self, keys, allow_missing=False):
1141
"""Produce a map of position data for the components of keys.
1143
This data is intended to be used for retrieving the knit records.
1145
A dict of key to (record_details, index_memo, next, parents) is
1147
method is the way referenced data should be applied.
1148
index_memo is the handle to pass to the data access to actually get the
1150
next is the build-parent of the version, or None for fulltexts.
1151
parents is the version_ids of the parents of this version
1153
:param allow_missing: If True do not raise an error on a missing component,
1157
pending_components = keys
1158
while pending_components:
1159
build_details = self._index.get_build_details(pending_components)
1160
current_components = set(pending_components)
1161
pending_components = set()
1162
for key, details in build_details.iteritems():
1163
(index_memo, compression_parent, parents,
1164
record_details) = details
1165
method = record_details[0]
1166
if compression_parent is not None:
1167
pending_components.add(compression_parent)
1168
component_data[key] = self._build_details_to_components(details)
1169
missing = current_components.difference(build_details)
1170
if missing and not allow_missing:
1171
raise errors.RevisionNotPresent(missing.pop(), self)
1172
return component_data
1174
def _get_content(self, key, parent_texts={}):
1175
"""Returns a content object that makes up the specified
1177
cached_version = parent_texts.get(key, None)
1178
if cached_version is not None:
1179
# Ensure the cache dict is valid.
1180
if not self.get_parent_map([key]):
1181
raise RevisionNotPresent(key, self)
1182
return cached_version
1183
generator = _VFContentMapGenerator(self, [key])
1184
return generator._get_content(key)
1186
def get_parent_map(self, keys):
1187
"""Get a map of the graph parents of keys.
1189
:param keys: The keys to look up parents for.
1190
:return: A mapping from keys to parents. Absent keys are absent from
1193
return self._get_parent_map_with_sources(keys)[0]
1195
def _get_parent_map_with_sources(self, keys):
1196
"""Get a map of the parents of keys.
1198
:param keys: The keys to look up parents for.
1199
:return: A tuple. The first element is a mapping from keys to parents.
1200
Absent keys are absent from the mapping. The second element is a
1201
list with the locations each key was found in. The first element
1202
is the in-this-knit parents, the second the first fallback source,
1206
sources = [self._index] + self._fallback_vfs
1209
for source in sources:
1212
new_result = source.get_parent_map(missing)
1213
source_results.append(new_result)
1214
result.update(new_result)
1215
missing.difference_update(set(new_result))
1216
return result, source_results
1218
def _get_record_map(self, keys, allow_missing=False):
1219
"""Produce a dictionary of knit records.
1221
:return: {key:(record, record_details, digest, next)}
1223
data returned from read_records (a KnitContentobject)
1225
opaque information to pass to parse_record
1227
SHA1 digest of the full text after all steps are done
1229
build-parent of the version, i.e. the leftmost ancestor.
1230
Will be None if the record is not a delta.
1231
:param keys: The keys to build a map for
1232
:param allow_missing: If some records are missing, rather than
1233
error, just return the data that could be generated.
1235
raw_map = self._get_record_map_unparsed(keys,
1236
allow_missing=allow_missing)
1237
return self._raw_map_to_record_map(raw_map)
1239
def _raw_map_to_record_map(self, raw_map):
1240
"""Parse the contents of _get_record_map_unparsed.
1242
:return: see _get_record_map.
1246
data, record_details, next = raw_map[key]
1247
content, digest = self._parse_record(key[-1], data)
1248
result[key] = content, record_details, digest, next
1251
def _get_record_map_unparsed(self, keys, allow_missing=False):
1252
"""Get the raw data for reconstructing keys without parsing it.
1254
:return: A dict suitable for parsing via _raw_map_to_record_map.
1255
key-> raw_bytes, (method, noeol), compression_parent
1257
# This retries the whole request if anything fails. Potentially we
1258
# could be a bit more selective. We could track the keys whose records
1259
# we have successfully found, and then only request the new records
1260
# from there. However, _get_components_positions grabs the whole build
1261
# chain, which means we'll likely try to grab the same records again
1262
# anyway. Also, can the build chains change as part of a pack
1263
# operation? We wouldn't want to end up with a broken chain.
1266
position_map = self._get_components_positions(keys,
1267
allow_missing=allow_missing)
1268
# key = component_id, r = record_details, i_m = index_memo,
1270
records = [(key, i_m) for key, (r, i_m, n)
1271
in position_map.iteritems()]
1272
# Sort by the index memo, so that we request records from the
1273
# same pack file together, and in forward-sorted order
1274
records.sort(key=operator.itemgetter(1))
1276
for key, data in self._read_records_iter_unchecked(records):
1277
(record_details, index_memo, next) = position_map[key]
1278
raw_record_map[key] = data, record_details, next
1279
return raw_record_map
1280
except errors.RetryWithNewPacks, e:
1281
self._access.reload_or_raise(e)
1284
def _split_by_prefix(cls, keys):
1285
"""For the given keys, split them up based on their prefix.
1287
To keep memory pressure somewhat under control, split the
1288
requests back into per-file-id requests, otherwise "bzr co"
1289
extracts the full tree into memory before writing it to disk.
1290
This should be revisited if _get_content_maps() can ever cross
1293
The keys for a given file_id are kept in the same relative order.
1294
Ordering between file_ids is not, though prefix_order will return the
1295
order that the key was first seen.
1297
:param keys: An iterable of key tuples
1298
:return: (split_map, prefix_order)
1299
split_map A dictionary mapping prefix => keys
1300
prefix_order The order that we saw the various prefixes
1302
split_by_prefix = {}
1310
if prefix in split_by_prefix:
1311
split_by_prefix[prefix].append(key)
1313
split_by_prefix[prefix] = [key]
1314
prefix_order.append(prefix)
1315
return split_by_prefix, prefix_order
1317
def _group_keys_for_io(self, keys, non_local_keys, positions,
1318
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
1319
"""For the given keys, group them into 'best-sized' requests.
1321
The idea is to avoid making 1 request per file, but to never try to
1322
unpack an entire 1.5GB source tree in a single pass. Also when
1323
possible, we should try to group requests to the same pack file
1326
:return: list of (keys, non_local) tuples that indicate what keys
1327
should be fetched next.
1329
# TODO: Ideally we would group on 2 factors. We want to extract texts
1330
# from the same pack file together, and we want to extract all
1331
# the texts for a given build-chain together. Ultimately it
1332
# probably needs a better global view.
1333
total_keys = len(keys)
1334
prefix_split_keys, prefix_order = self._split_by_prefix(keys)
1335
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
1337
cur_non_local = set()
1341
for prefix in prefix_order:
1342
keys = prefix_split_keys[prefix]
1343
non_local = prefix_split_non_local_keys.get(prefix, [])
1345
this_size = self._index._get_total_build_size(keys, positions)
1346
cur_size += this_size
1347
cur_keys.extend(keys)
1348
cur_non_local.update(non_local)
1349
if cur_size > _min_buffer_size:
1350
result.append((cur_keys, cur_non_local))
1351
sizes.append(cur_size)
1353
cur_non_local = set()
1356
result.append((cur_keys, cur_non_local))
1357
sizes.append(cur_size)
1360
def get_record_stream(self, keys, ordering, include_delta_closure):
1361
"""Get a stream of records for keys.
1363
:param keys: The keys to include.
1364
:param ordering: Either 'unordered' or 'topological'. A topologically
1365
sorted stream has compression parents strictly before their
1367
:param include_delta_closure: If True then the closure across any
1368
compression parents will be included (in the opaque data).
1369
:return: An iterator of ContentFactory objects, each of which is only
1370
valid until the iterator is advanced.
1372
# keys might be a generator
1376
if not self._index.has_graph:
1377
# Cannot sort when no graph has been stored.
1378
ordering = 'unordered'
1380
remaining_keys = keys
1383
keys = set(remaining_keys)
1384
for content_factory in self._get_remaining_record_stream(keys,
1385
ordering, include_delta_closure):
1386
remaining_keys.discard(content_factory.key)
1387
yield content_factory
1389
except errors.RetryWithNewPacks, e:
1390
self._access.reload_or_raise(e)
1392
def _get_remaining_record_stream(self, keys, ordering,
1393
include_delta_closure):
1394
"""This function is the 'retry' portion for get_record_stream."""
1395
if include_delta_closure:
1396
positions = self._get_components_positions(keys, allow_missing=True)
1398
build_details = self._index.get_build_details(keys)
1400
# (record_details, access_memo, compression_parent_key)
1401
positions = dict((key, self._build_details_to_components(details))
1402
for key, details in build_details.iteritems())
1403
absent_keys = keys.difference(set(positions))
1404
# There may be more absent keys : if we're missing the basis component
1405
# and are trying to include the delta closure.
1406
# XXX: We should not ever need to examine remote sources because we do
1407
# not permit deltas across versioned files boundaries.
1408
if include_delta_closure:
1409
needed_from_fallback = set()
1410
# Build up reconstructable_keys dict. key:True in this dict means
1411
# the key can be reconstructed.
1412
reconstructable_keys = {}
1416
chain = [key, positions[key][2]]
1418
needed_from_fallback.add(key)
1421
while chain[-1] is not None:
1422
if chain[-1] in reconstructable_keys:
1423
result = reconstructable_keys[chain[-1]]
1427
chain.append(positions[chain[-1]][2])
1429
# missing basis component
1430
needed_from_fallback.add(chain[-1])
1433
for chain_key in chain[:-1]:
1434
reconstructable_keys[chain_key] = result
1436
needed_from_fallback.add(key)
1437
# Double index lookups here : need a unified api ?
1438
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1439
if ordering in ('topological', 'groupcompress'):
1440
if ordering == 'topological':
1441
# Global topological sort
1442
present_keys = tsort.topo_sort(global_map)
1444
present_keys = sort_groupcompress(global_map)
1445
# Now group by source:
1447
current_source = None
1448
for key in present_keys:
1449
for parent_map in parent_maps:
1450
if key in parent_map:
1451
key_source = parent_map
1453
if current_source is not key_source:
1454
source_keys.append((key_source, []))
1455
current_source = key_source
1456
source_keys[-1][1].append(key)
1458
if ordering != 'unordered':
1459
raise AssertionError('valid values for ordering are:'
1460
' "unordered", "groupcompress" or "topological" not: %r'
1462
# Just group by source; remote sources first.
1465
for parent_map in reversed(parent_maps):
1466
source_keys.append((parent_map, []))
1467
for key in parent_map:
1468
present_keys.append(key)
1469
source_keys[-1][1].append(key)
1470
# We have been requested to return these records in an order that
1471
# suits us. So we ask the index to give us an optimally sorted
1473
for source, sub_keys in source_keys:
1474
if source is parent_maps[0]:
1475
# Only sort the keys for this VF
1476
self._index._sort_keys_by_io(sub_keys, positions)
1477
absent_keys = keys - set(global_map)
1478
for key in absent_keys:
1479
yield AbsentContentFactory(key)
1480
# restrict our view to the keys we can answer.
1481
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1482
# XXX: At that point we need to consider the impact of double reads by
1483
# utilising components multiple times.
1484
if include_delta_closure:
1485
# XXX: get_content_maps performs its own index queries; allow state
1487
non_local_keys = needed_from_fallback - absent_keys
1488
for keys, non_local_keys in self._group_keys_for_io(present_keys,
1491
generator = _VFContentMapGenerator(self, keys, non_local_keys,
1494
for record in generator.get_record_stream():
1497
for source, keys in source_keys:
1498
if source is parent_maps[0]:
1499
# this KnitVersionedFiles
1500
records = [(key, positions[key][1]) for key in keys]
1501
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1502
(record_details, index_memo, _) = positions[key]
1503
yield KnitContentFactory(key, global_map[key],
1504
record_details, sha1, raw_data, self._factory.annotated, None)
1506
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1507
for record in vf.get_record_stream(keys, ordering,
1508
include_delta_closure):
1511
def get_sha1s(self, keys):
1512
"""See VersionedFiles.get_sha1s()."""
1514
record_map = self._get_record_map(missing, allow_missing=True)
1516
for key, details in record_map.iteritems():
1517
if key not in missing:
1519
# record entry 2 is the 'digest'.
1520
result[key] = details[2]
1521
missing.difference_update(set(result))
1522
for source in self._fallback_vfs:
1525
new_result = source.get_sha1s(missing)
1526
result.update(new_result)
1527
missing.difference_update(set(new_result))
1530
def insert_record_stream(self, stream):
1531
"""Insert a record stream into this container.
1533
:param stream: A stream of records to insert.
1535
:seealso VersionedFiles.get_record_stream:
1537
def get_adapter(adapter_key):
1539
return adapters[adapter_key]
1541
adapter_factory = adapter_registry.get(adapter_key)
1542
adapter = adapter_factory(self)
1543
adapters[adapter_key] = adapter
1546
if self._factory.annotated:
1547
# self is annotated, we need annotated knits to use directly.
1548
annotated = "annotated-"
1551
# self is not annotated, but we can strip annotations cheaply.
1553
convertibles = set(["knit-annotated-ft-gz"])
1554
if self._max_delta_chain:
1555
delta_types.add("knit-annotated-delta-gz")
1556
convertibles.add("knit-annotated-delta-gz")
1557
# The set of types we can cheaply adapt without needing basis texts.
1558
native_types = set()
1559
if self._max_delta_chain:
1560
native_types.add("knit-%sdelta-gz" % annotated)
1561
delta_types.add("knit-%sdelta-gz" % annotated)
1562
native_types.add("knit-%sft-gz" % annotated)
1563
knit_types = native_types.union(convertibles)
1565
# Buffer all index entries that we can't add immediately because their
1566
# basis parent is missing. We don't buffer all because generating
1567
# annotations may require access to some of the new records. However we
1568
# can't generate annotations from new deltas until their basis parent
1569
# is present anyway, so we get away with not needing an index that
1570
# includes the new keys.
1572
# See <http://launchpad.net/bugs/300177> about ordering of compression
1573
# parents in the records - to be conservative, we insist that all
1574
# parents must be present to avoid expanding to a fulltext.
1576
# key = basis_parent, value = index entry to add
1577
buffered_index_entries = {}
1578
for record in stream:
1580
parents = record.parents
1581
if record.storage_kind in delta_types:
1582
# TODO: eventually the record itself should track
1583
# compression_parent
1584
compression_parent = parents[0]
1586
compression_parent = None
1587
# Raise an error when a record is missing.
1588
if record.storage_kind == 'absent':
1589
raise RevisionNotPresent([record.key], self)
1590
elif ((record.storage_kind in knit_types)
1591
and (compression_parent is None
1592
or not self._fallback_vfs
1593
or self._index.has_key(compression_parent)
1594
or not self.has_key(compression_parent))):
1595
# we can insert the knit record literally if either it has no
1596
# compression parent OR we already have its basis in this kvf
1597
# OR the basis is not present even in the fallbacks. In the
1598
# last case it will either turn up later in the stream and all
1599
# will be well, or it won't turn up at all and we'll raise an
1602
# TODO: self.has_key is somewhat redundant with
1603
# self._index.has_key; we really want something that directly
1604
# asks if it's only present in the fallbacks. -- mbp 20081119
1605
if record.storage_kind not in native_types:
1607
adapter_key = (record.storage_kind, "knit-delta-gz")
1608
adapter = get_adapter(adapter_key)
1610
adapter_key = (record.storage_kind, "knit-ft-gz")
1611
adapter = get_adapter(adapter_key)
1612
bytes = adapter.get_bytes(record)
1614
# It's a knit record, it has a _raw_record field (even if
1615
# it was reconstituted from a network stream).
1616
bytes = record._raw_record
1617
options = [record._build_details[0]]
1618
if record._build_details[1]:
1619
options.append('no-eol')
1620
# Just blat it across.
1621
# Note: This does end up adding data on duplicate keys. As
1622
# modern repositories use atomic insertions this should not
1623
# lead to excessive growth in the event of interrupted fetches.
1624
# 'knit' repositories may suffer excessive growth, but as a
1625
# deprecated format this is tolerable. It can be fixed if
1626
# needed by in the kndx index support raising on a duplicate
1627
# add with identical parents and options.
1628
access_memo = self._access.add_raw_records(
1629
[(record.key, len(bytes))], bytes)[0]
1630
index_entry = (record.key, options, access_memo, parents)
1631
if 'fulltext' not in options:
1632
# Not a fulltext, so we need to make sure the compression
1633
# parent will also be present.
1634
# Note that pack backed knits don't need to buffer here
1635
# because they buffer all writes to the transaction level,
1636
# but we don't expose that difference at the index level. If
1637
# the query here has sufficient cost to show up in
1638
# profiling we should do that.
1640
# They're required to be physically in this
1641
# KnitVersionedFiles, not in a fallback.
1642
if not self._index.has_key(compression_parent):
1643
pending = buffered_index_entries.setdefault(
1644
compression_parent, [])
1645
pending.append(index_entry)
1648
self._index.add_records([index_entry])
1649
elif record.storage_kind == 'chunked':
1650
self.add_lines(record.key, parents,
1651
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1653
# Not suitable for direct insertion as a
1654
# delta, either because it's not the right format, or this
1655
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1656
# 0) or because it depends on a base only present in the
1658
self._access.flush()
1660
# Try getting a fulltext directly from the record.
1661
bytes = record.get_bytes_as('fulltext')
1662
except errors.UnavailableRepresentation:
1663
adapter_key = record.storage_kind, 'fulltext'
1664
adapter = get_adapter(adapter_key)
1665
bytes = adapter.get_bytes(record)
1666
lines = split_lines(bytes)
1668
self.add_lines(record.key, parents, lines)
1669
except errors.RevisionAlreadyPresent:
1671
# Add any records whose basis parent is now available.
1673
added_keys = [record.key]
1675
key = added_keys.pop(0)
1676
if key in buffered_index_entries:
1677
index_entries = buffered_index_entries[key]
1678
self._index.add_records(index_entries)
1680
[index_entry[0] for index_entry in index_entries])
1681
del buffered_index_entries[key]
1682
if buffered_index_entries:
1683
# There were index entries buffered at the end of the stream,
1684
# So these need to be added (if the index supports holding such
1685
# entries for later insertion)
1686
for key in buffered_index_entries:
1687
index_entries = buffered_index_entries[key]
1688
self._index.add_records(index_entries,
1689
missing_compression_parents=True)
1691
def get_missing_compression_parent_keys(self):
1692
"""Return an iterable of keys of missing compression parents.
1694
Check this after calling insert_record_stream to find out if there are
1695
any missing compression parents. If there are, the records that
1696
depend on them are not able to be inserted safely. For atomic
1697
KnitVersionedFiles built on packs, the transaction should be aborted or
1698
suspended - commit will fail at this point. Nonatomic knits will error
1699
earlier because they have no staging area to put pending entries into.
1701
return self._index.get_missing_compression_parents()
1703
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1704
"""Iterate over the lines in the versioned files from keys.
1706
This may return lines from other keys. Each item the returned
1707
iterator yields is a tuple of a line and a text version that that line
1708
is present in (not introduced in).
1710
Ordering of results is in whatever order is most suitable for the
1711
underlying storage format.
1713
If a progress bar is supplied, it may be used to indicate progress.
1714
The caller is responsible for cleaning up progress bars (because this
1718
* Lines are normalised by the underlying store: they will all have \\n
1720
* Lines are returned in arbitrary order.
1721
* If a requested key did not change any lines (or didn't have any
1722
lines), it may not be mentioned at all in the result.
1724
:param pb: Progress bar supplied by caller.
1725
:return: An iterator over (line, key).
1728
pb = progress.DummyProgress()
1734
# we don't care about inclusions, the caller cares.
1735
# but we need to setup a list of records to visit.
1736
# we need key, position, length
1738
build_details = self._index.get_build_details(keys)
1739
for key, details in build_details.iteritems():
1741
key_records.append((key, details[0]))
1742
records_iter = enumerate(self._read_records_iter(key_records))
1743
for (key_idx, (key, data, sha_value)) in records_iter:
1744
pb.update('Walking content', key_idx, total)
1745
compression_parent = build_details[key][1]
1746
if compression_parent is None:
1748
line_iterator = self._factory.get_fulltext_content(data)
1751
line_iterator = self._factory.get_linedelta_content(data)
1752
# Now that we are yielding the data for this key, remove it
1755
# XXX: It might be more efficient to yield (key,
1756
# line_iterator) in the future. However for now, this is a
1757
# simpler change to integrate into the rest of the
1758
# codebase. RBC 20071110
1759
for line in line_iterator:
1762
except errors.RetryWithNewPacks, e:
1763
self._access.reload_or_raise(e)
1764
# If there are still keys we've not yet found, we look in the fallback
1765
# vfs, and hope to find them there. Note that if the keys are found
1766
# but had no changes or no content, the fallback may not return
1768
if keys and not self._fallback_vfs:
1769
# XXX: strictly the second parameter is meant to be the file id
1770
# but it's not easily accessible here.
1771
raise RevisionNotPresent(keys, repr(self))
1772
for source in self._fallback_vfs:
1776
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1777
source_keys.add(key)
1779
keys.difference_update(source_keys)
1780
pb.update('Walking content', total, total)
1782
def _make_line_delta(self, delta_seq, new_content):
1783
"""Generate a line delta from delta_seq and new_content."""
1785
for op in delta_seq.get_opcodes():
1786
if op[0] == 'equal':
1788
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1791
def _merge_annotations(self, content, parents, parent_texts={},
1792
delta=None, annotated=None,
1793
left_matching_blocks=None):
1794
"""Merge annotations for content and generate deltas.
1796
This is done by comparing the annotations based on changes to the text
1797
and generating a delta on the resulting full texts. If annotations are
1798
not being created then a simple delta is created.
1800
if left_matching_blocks is not None:
1801
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1805
for parent_key in parents:
1806
merge_content = self._get_content(parent_key, parent_texts)
1807
if (parent_key == parents[0] and delta_seq is not None):
1810
seq = patiencediff.PatienceSequenceMatcher(
1811
None, merge_content.text(), content.text())
1812
for i, j, n in seq.get_matching_blocks():
1815
# this copies (origin, text) pairs across to the new
1816
# content for any line that matches the last-checked
1818
content._lines[j:j+n] = merge_content._lines[i:i+n]
1819
# XXX: Robert says the following block is a workaround for a
1820
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1821
if content._lines and content._lines[-1][1][-1] != '\n':
1822
# The copied annotation was from a line without a trailing EOL,
1823
# reinstate one for the content object, to ensure correct
1825
line = content._lines[-1][1] + '\n'
1826
content._lines[-1] = (content._lines[-1][0], line)
1828
if delta_seq is None:
1829
reference_content = self._get_content(parents[0], parent_texts)
1830
new_texts = content.text()
1831
old_texts = reference_content.text()
1832
delta_seq = patiencediff.PatienceSequenceMatcher(
1833
None, old_texts, new_texts)
1834
return self._make_line_delta(delta_seq, content)
1836
def _parse_record(self, version_id, data):
1837
"""Parse an original format knit record.
1839
These have the last element of the key only present in the stored data.
1841
rec, record_contents = self._parse_record_unchecked(data)
1842
self._check_header_version(rec, version_id)
1843
return record_contents, rec[3]
1845
def _parse_record_header(self, key, raw_data):
1846
"""Parse a record header for consistency.
1848
:return: the header and the decompressor stream.
1849
as (stream, header_record)
1851
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1854
rec = self._check_header(key, df.readline())
1855
except Exception, e:
1856
raise KnitCorrupt(self,
1857
"While reading {%s} got %s(%s)"
1858
% (key, e.__class__.__name__, str(e)))
1861
def _parse_record_unchecked(self, data):
1863
# 4168 calls in 2880 217 internal
1864
# 4168 calls to _parse_record_header in 2121
1865
# 4168 calls to readlines in 330
1866
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1868
record_contents = df.readlines()
1869
except Exception, e:
1870
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1871
(data, e.__class__.__name__, str(e)))
1872
header = record_contents.pop(0)
1873
rec = self._split_header(header)
1874
last_line = record_contents.pop()
1875
if len(record_contents) != int(rec[2]):
1876
raise KnitCorrupt(self,
1877
'incorrect number of lines %s != %s'
1878
' for version {%s} %s'
1879
% (len(record_contents), int(rec[2]),
1880
rec[1], record_contents))
1881
if last_line != 'end %s\n' % rec[1]:
1882
raise KnitCorrupt(self,
1883
'unexpected version end line %r, wanted %r'
1884
% (last_line, rec[1]))
1886
return rec, record_contents
1888
def _read_records_iter(self, records):
1889
"""Read text records from data file and yield result.
1891
The result will be returned in whatever is the fastest to read.
1892
Not by the order requested. Also, multiple requests for the same
1893
record will only yield 1 response.
1894
:param records: A list of (key, access_memo) entries
1895
:return: Yields (key, contents, digest) in the order
1896
read, not the order requested
1901
# XXX: This smells wrong, IO may not be getting ordered right.
1902
needed_records = sorted(set(records), key=operator.itemgetter(1))
1903
if not needed_records:
1906
# The transport optimizes the fetching as well
1907
# (ie, reads continuous ranges.)
1908
raw_data = self._access.get_raw_records(
1909
[index_memo for key, index_memo in needed_records])
1911
for (key, index_memo), data in \
1912
izip(iter(needed_records), raw_data):
1913
content, digest = self._parse_record(key[-1], data)
1914
yield key, content, digest
1916
def _read_records_iter_raw(self, records):
1917
"""Read text records from data file and yield raw data.
1919
This unpacks enough of the text record to validate the id is
1920
as expected but thats all.
1922
Each item the iterator yields is (key, bytes,
1923
expected_sha1_of_full_text).
1925
for key, data in self._read_records_iter_unchecked(records):
1926
# validate the header (note that we can only use the suffix in
1927
# current knit records).
1928
df, rec = self._parse_record_header(key, data)
1930
yield key, data, rec[3]
1932
def _read_records_iter_unchecked(self, records):
1933
"""Read text records from data file and yield raw data.
1935
No validation is done.
1937
Yields tuples of (key, data).
1939
# setup an iterator of the external records:
1940
# uses readv so nice and fast we hope.
1942
# grab the disk data needed.
1943
needed_offsets = [index_memo for key, index_memo
1945
raw_records = self._access.get_raw_records(needed_offsets)
1947
for key, index_memo in records:
1948
data = raw_records.next()
1951
def _record_to_data(self, key, digest, lines, dense_lines=None):
1952
"""Convert key, digest, lines into a raw data block.
1954
:param key: The key of the record. Currently keys are always serialised
1955
using just the trailing component.
1956
:param dense_lines: The bytes of lines but in a denser form. For
1957
instance, if lines is a list of 1000 bytestrings each ending in \n,
1958
dense_lines may be a list with one line in it, containing all the
1959
1000's lines and their \n's. Using dense_lines if it is already
1960
known is a win because the string join to create bytes in this
1961
function spends less time resizing the final string.
1962
:return: (len, a StringIO instance with the raw data ready to read.)
1964
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
1965
chunks.extend(dense_lines or lines)
1966
chunks.append("end %s\n" % key[-1])
1967
for chunk in chunks:
1968
if type(chunk) is not str:
1969
raise AssertionError(
1970
'data must be plain bytes was %s' % type(chunk))
1971
if lines and lines[-1][-1] != '\n':
1972
raise ValueError('corrupt lines value %r' % lines)
1973
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
1974
return len(compressed_bytes), compressed_bytes
1976
def _split_header(self, line):
1979
raise KnitCorrupt(self,
1980
'unexpected number of elements in record header')
1984
"""See VersionedFiles.keys."""
1985
if 'evil' in debug.debug_flags:
1986
trace.mutter_callsite(2, "keys scales with size of history")
1987
sources = [self._index] + self._fallback_vfs
1989
for source in sources:
1990
result.update(source.keys())
1994
class _ContentMapGenerator(object):
1995
"""Generate texts or expose raw deltas for a set of texts."""
1997
def __init__(self, ordering='unordered'):
1998
self._ordering = ordering
2000
def _get_content(self, key):
2001
"""Get the content object for key."""
2002
# Note that _get_content is only called when the _ContentMapGenerator
2003
# has been constructed with just one key requested for reconstruction.
2004
if key in self.nonlocal_keys:
2005
record = self.get_record_stream().next()
2006
# Create a content object on the fly
2007
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
2008
return PlainKnitContent(lines, record.key)
2010
# local keys we can ask for directly
2011
return self._get_one_work(key)
2013
def get_record_stream(self):
2014
"""Get a record stream for the keys requested during __init__."""
2015
for record in self._work():
2019
"""Produce maps of text and KnitContents as dicts.
2021
:return: (text_map, content_map) where text_map contains the texts for
2022
the requested versions and content_map contains the KnitContents.
2024
# NB: By definition we never need to read remote sources unless texts
2025
# are requested from them: we don't delta across stores - and we
2026
# explicitly do not want to to prevent data loss situations.
2027
if self.global_map is None:
2028
self.global_map = self.vf.get_parent_map(self.keys)
2029
nonlocal_keys = self.nonlocal_keys
2031
missing_keys = set(nonlocal_keys)
2032
# Read from remote versioned file instances and provide to our caller.
2033
for source in self.vf._fallback_vfs:
2034
if not missing_keys:
2036
# Loop over fallback repositories asking them for texts - ignore
2037
# any missing from a particular fallback.
2038
for record in source.get_record_stream(missing_keys,
2039
self._ordering, True):
2040
if record.storage_kind == 'absent':
2041
# Not in thie particular stream, may be in one of the
2042
# other fallback vfs objects.
2044
missing_keys.remove(record.key)
2047
if self._raw_record_map is None:
2048
raise AssertionError('_raw_record_map should have been filled')
2050
for key in self.keys:
2051
if key in self.nonlocal_keys:
2053
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2056
def _get_one_work(self, requested_key):
2057
# Now, if we have calculated everything already, just return the
2059
if requested_key in self._contents_map:
2060
return self._contents_map[requested_key]
2061
# To simplify things, parse everything at once - code that wants one text
2062
# probably wants them all.
2063
# FUTURE: This function could be improved for the 'extract many' case
2064
# by tracking each component and only doing the copy when the number of
2065
# children than need to apply delta's to it is > 1 or it is part of the
2067
multiple_versions = len(self.keys) != 1
2068
if self._record_map is None:
2069
self._record_map = self.vf._raw_map_to_record_map(
2070
self._raw_record_map)
2071
record_map = self._record_map
2072
# raw_record_map is key:
2073
# Have read and parsed records at this point.
2074
for key in self.keys:
2075
if key in self.nonlocal_keys:
2080
while cursor is not None:
2082
record, record_details, digest, next = record_map[cursor]
2084
raise RevisionNotPresent(cursor, self)
2085
components.append((cursor, record, record_details, digest))
2087
if cursor in self._contents_map:
2088
# no need to plan further back
2089
components.append((cursor, None, None, None))
2093
for (component_id, record, record_details,
2094
digest) in reversed(components):
2095
if component_id in self._contents_map:
2096
content = self._contents_map[component_id]
2098
content, delta = self._factory.parse_record(key[-1],
2099
record, record_details, content,
2100
copy_base_content=multiple_versions)
2101
if multiple_versions:
2102
self._contents_map[component_id] = content
2104
# digest here is the digest from the last applied component.
2105
text = content.text()
2106
actual_sha = sha_strings(text)
2107
if actual_sha != digest:
2108
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
2109
if multiple_versions:
2110
return self._contents_map[requested_key]
2114
def _wire_bytes(self):
2115
"""Get the bytes to put on the wire for 'key'.
2117
The first collection of bytes asked for returns the serialised
2118
raw_record_map and the additional details (key, parent) for key.
2119
Subsequent calls return just the additional details (key, parent).
2120
The wire storage_kind given for the first key is 'knit-delta-closure',
2121
For subsequent keys it is 'knit-delta-closure-ref'.
2123
:param key: A key from the content generator.
2124
:return: Bytes to put on the wire.
2127
# kind marker for dispatch on the far side,
2128
lines.append('knit-delta-closure')
2130
if self.vf._factory.annotated:
2131
lines.append('annotated')
2134
# then the list of keys
2135
lines.append('\t'.join(['\x00'.join(key) for key in self.keys
2136
if key not in self.nonlocal_keys]))
2137
# then the _raw_record_map in serialised form:
2139
# for each item in the map:
2141
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
2142
# one line with method
2143
# one line with noeol
2144
# one line with next ('' for None)
2145
# one line with byte count of the record bytes
2147
for key, (record_bytes, (method, noeol), next) in \
2148
self._raw_record_map.iteritems():
2149
key_bytes = '\x00'.join(key)
2150
parents = self.global_map.get(key, None)
2152
parent_bytes = 'None:'
2154
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2155
method_bytes = method
2161
next_bytes = '\x00'.join(next)
2164
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2165
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2166
len(record_bytes), record_bytes))
2167
map_bytes = ''.join(map_byte_list)
2168
lines.append(map_bytes)
2169
bytes = '\n'.join(lines)
2173
class _VFContentMapGenerator(_ContentMapGenerator):
2174
"""Content map generator reading from a VersionedFiles object."""
2176
def __init__(self, versioned_files, keys, nonlocal_keys=None,
2177
global_map=None, raw_record_map=None, ordering='unordered'):
2178
"""Create a _ContentMapGenerator.
2180
:param versioned_files: The versioned files that the texts are being
2182
:param keys: The keys to produce content maps for.
2183
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
2184
which are known to not be in this knit, but rather in one of the
2186
:param global_map: The result of get_parent_map(keys) (or a supermap).
2187
This is required if get_record_stream() is to be used.
2188
:param raw_record_map: A unparsed raw record map to use for answering
2191
_ContentMapGenerator.__init__(self, ordering=ordering)
2192
# The vf to source data from
2193
self.vf = versioned_files
2195
self.keys = list(keys)
2196
# Keys known to be in fallback vfs objects
2197
if nonlocal_keys is None:
2198
self.nonlocal_keys = set()
2200
self.nonlocal_keys = frozenset(nonlocal_keys)
2201
# Parents data for keys to be returned in get_record_stream
2202
self.global_map = global_map
2203
# The chunked lists for self.keys in text form
2205
# A cache of KnitContent objects used in extracting texts.
2206
self._contents_map = {}
2207
# All the knit records needed to assemble the requested keys as full
2209
self._record_map = None
2210
if raw_record_map is None:
2211
self._raw_record_map = self.vf._get_record_map_unparsed(keys,
2214
self._raw_record_map = raw_record_map
2215
# the factory for parsing records
2216
self._factory = self.vf._factory
2219
class _NetworkContentMapGenerator(_ContentMapGenerator):
2220
"""Content map generator sourced from a network stream."""
2222
def __init__(self, bytes, line_end):
2223
"""Construct a _NetworkContentMapGenerator from a bytes block."""
2225
self.global_map = {}
2226
self._raw_record_map = {}
2227
self._contents_map = {}
2228
self._record_map = None
2229
self.nonlocal_keys = []
2230
# Get access to record parsing facilities
2231
self.vf = KnitVersionedFiles(None, None)
2234
line_end = bytes.find('\n', start)
2235
line = bytes[start:line_end]
2236
start = line_end + 1
2237
if line == 'annotated':
2238
self._factory = KnitAnnotateFactory()
2240
self._factory = KnitPlainFactory()
2241
# list of keys to emit in get_record_stream
2242
line_end = bytes.find('\n', start)
2243
line = bytes[start:line_end]
2244
start = line_end + 1
2246
tuple(segment.split('\x00')) for segment in line.split('\t')
2248
# now a loop until the end. XXX: It would be nice if this was just a
2249
# bunch of the same records as get_record_stream(..., False) gives, but
2250
# there is a decent sized gap stopping that at the moment.
2254
line_end = bytes.find('\n', start)
2255
key = tuple(bytes[start:line_end].split('\x00'))
2256
start = line_end + 1
2257
# 1 line with parents (None: for None, '' for ())
2258
line_end = bytes.find('\n', start)
2259
line = bytes[start:line_end]
2264
[tuple(segment.split('\x00')) for segment in line.split('\t')
2266
self.global_map[key] = parents
2267
start = line_end + 1
2268
# one line with method
2269
line_end = bytes.find('\n', start)
2270
line = bytes[start:line_end]
2272
start = line_end + 1
2273
# one line with noeol
2274
line_end = bytes.find('\n', start)
2275
line = bytes[start:line_end]
2277
start = line_end + 1
2278
# one line with next ('' for None)
2279
line_end = bytes.find('\n', start)
2280
line = bytes[start:line_end]
2284
next = tuple(bytes[start:line_end].split('\x00'))
2285
start = line_end + 1
2286
# one line with byte count of the record bytes
2287
line_end = bytes.find('\n', start)
2288
line = bytes[start:line_end]
2290
start = line_end + 1
2292
record_bytes = bytes[start:start+count]
2293
start = start + count
2295
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2297
def get_record_stream(self):
2298
"""Get a record stream for for keys requested by the bytestream."""
2300
for key in self.keys:
2301
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2304
def _wire_bytes(self):
2308
class _KndxIndex(object):
2309
"""Manages knit index files
2311
The index is kept in memory and read on startup, to enable
2312
fast lookups of revision information. The cursor of the index
2313
file is always pointing to the end, making it easy to append
2316
_cache is a cache for fast mapping from version id to a Index
2319
_history is a cache for fast mapping from indexes to version ids.
2321
The index data format is dictionary compressed when it comes to
2322
parent references; a index entry may only have parents that with a
2323
lover index number. As a result, the index is topological sorted.
2325
Duplicate entries may be written to the index for a single version id
2326
if this is done then the latter one completely replaces the former:
2327
this allows updates to correct version and parent information.
2328
Note that the two entries may share the delta, and that successive
2329
annotations and references MUST point to the first entry.
2331
The index file on disc contains a header, followed by one line per knit
2332
record. The same revision can be present in an index file more than once.
2333
The first occurrence gets assigned a sequence number starting from 0.
2335
The format of a single line is
2336
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
2337
REVISION_ID is a utf8-encoded revision id
2338
FLAGS is a comma separated list of flags about the record. Values include
2339
no-eol, line-delta, fulltext.
2340
BYTE_OFFSET is the ascii representation of the byte offset in the data file
2341
that the the compressed data starts at.
2342
LENGTH is the ascii representation of the length of the data file.
2343
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
2345
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
2346
revision id already in the knit that is a parent of REVISION_ID.
2347
The ' :' marker is the end of record marker.
2350
when a write is interrupted to the index file, it will result in a line
2351
that does not end in ' :'. If the ' :' is not present at the end of a line,
2352
or at the end of the file, then the record that is missing it will be
2353
ignored by the parser.
2355
When writing new records to the index file, the data is preceded by '\n'
2356
to ensure that records always start on new lines even if the last write was
2357
interrupted. As a result its normal for the last line in the index to be
2358
missing a trailing newline. One can be added with no harmful effects.
2360
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
2361
where prefix is e.g. the (fileid,) for .texts instances or () for
2362
constant-mapped things like .revisions, and the old state is
2363
tuple(history_vector, cache_dict). This is used to prevent having an
2364
ABI change with the C extension that reads .kndx files.
2367
HEADER = "# bzr knit index 8\n"
2369
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
2370
"""Create a _KndxIndex on transport using mapper."""
2371
self._transport = transport
2372
self._mapper = mapper
2373
self._get_scope = get_scope
2374
self._allow_writes = allow_writes
2375
self._is_locked = is_locked
2377
self.has_graph = True
2379
def add_records(self, records, random_id=False, missing_compression_parents=False):
2380
"""Add multiple records to the index.
2382
:param records: a list of tuples:
2383
(key, options, access_memo, parents).
2384
:param random_id: If True the ids being added were randomly generated
2385
and no check for existence will be performed.
2386
:param missing_compression_parents: If True the records being added are
2387
only compressed against texts already in the index (or inside
2388
records). If False the records all refer to unavailable texts (or
2389
texts inside records) as compression parents.
2391
if missing_compression_parents:
2392
# It might be nice to get the edge of the records. But keys isn't
2394
keys = sorted(record[0] for record in records)
2395
raise errors.RevisionNotPresent(keys, self)
2397
for record in records:
2400
path = self._mapper.map(key) + '.kndx'
2401
path_keys = paths.setdefault(path, (prefix, []))
2402
path_keys[1].append(record)
2403
for path in sorted(paths):
2404
prefix, path_keys = paths[path]
2405
self._load_prefixes([prefix])
2407
orig_history = self._kndx_cache[prefix][1][:]
2408
orig_cache = self._kndx_cache[prefix][0].copy()
2411
for key, options, (_, pos, size), parents in path_keys:
2413
# kndx indices cannot be parentless.
2415
line = "\n%s %s %s %s %s :" % (
2416
key[-1], ','.join(options), pos, size,
2417
self._dictionary_compress(parents))
2418
if type(line) is not str:
2419
raise AssertionError(
2420
'data must be utf8 was %s' % type(line))
2422
self._cache_key(key, options, pos, size, parents)
2423
if len(orig_history):
2424
self._transport.append_bytes(path, ''.join(lines))
2426
self._init_index(path, lines)
2428
# If any problems happen, restore the original values and re-raise
2429
self._kndx_cache[prefix] = (orig_cache, orig_history)
2432
def scan_unvalidated_index(self, graph_index):
2433
"""See _KnitGraphIndex.scan_unvalidated_index."""
2434
# Because kndx files do not support atomic insertion via separate index
2435
# files, they do not support this method.
2436
raise NotImplementedError(self.scan_unvalidated_index)
2438
def get_missing_compression_parents(self):
2439
"""See _KnitGraphIndex.get_missing_compression_parents."""
2440
# Because kndx files do not support atomic insertion via separate index
2441
# files, they do not support this method.
2442
raise NotImplementedError(self.get_missing_compression_parents)
2444
def _cache_key(self, key, options, pos, size, parent_keys):
2445
"""Cache a version record in the history array and index cache.
2447
This is inlined into _load_data for performance. KEEP IN SYNC.
2448
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
2452
version_id = key[-1]
2453
# last-element only for compatibilty with the C load_data.
2454
parents = tuple(parent[-1] for parent in parent_keys)
2455
for parent in parent_keys:
2456
if parent[:-1] != prefix:
2457
raise ValueError("mismatched prefixes for %r, %r" % (
2459
cache, history = self._kndx_cache[prefix]
2460
# only want the _history index to reference the 1st index entry
2462
if version_id not in cache:
2463
index = len(history)
2464
history.append(version_id)
2466
index = cache[version_id][5]
2467
cache[version_id] = (version_id,
2474
def check_header(self, fp):
2475
line = fp.readline()
2477
# An empty file can actually be treated as though the file doesn't
2479
raise errors.NoSuchFile(self)
2480
if line != self.HEADER:
2481
raise KnitHeaderError(badline=line, filename=self)
2483
def _check_read(self):
2484
if not self._is_locked():
2485
raise errors.ObjectNotLocked(self)
2486
if self._get_scope() != self._scope:
2489
def _check_write_ok(self):
2490
"""Assert if not writes are permitted."""
2491
if not self._is_locked():
2492
raise errors.ObjectNotLocked(self)
2493
if self._get_scope() != self._scope:
2495
if self._mode != 'w':
2496
raise errors.ReadOnlyObjectDirtiedError(self)
2498
def get_build_details(self, keys):
2499
"""Get the method, index_memo and compression parent for keys.
2501
Ghosts are omitted from the result.
2503
:param keys: An iterable of keys.
2504
:return: A dict of key:(index_memo, compression_parent, parents,
2507
opaque structure to pass to read_records to extract the raw
2510
Content that this record is built upon, may be None
2512
Logical parents of this node
2514
extra information about the content which needs to be passed to
2515
Factory.parse_record
2517
parent_map = self.get_parent_map(keys)
2520
if key not in parent_map:
2522
method = self.get_method(key)
2523
parents = parent_map[key]
2524
if method == 'fulltext':
2525
compression_parent = None
2527
compression_parent = parents[0]
2528
noeol = 'no-eol' in self.get_options(key)
2529
index_memo = self.get_position(key)
2530
result[key] = (index_memo, compression_parent,
2531
parents, (method, noeol))
2534
def get_method(self, key):
2535
"""Return compression method of specified key."""
2536
options = self.get_options(key)
2537
if 'fulltext' in options:
2539
elif 'line-delta' in options:
2542
raise errors.KnitIndexUnknownMethod(self, options)
2544
def get_options(self, key):
2545
"""Return a list representing options.
2549
prefix, suffix = self._split_key(key)
2550
self._load_prefixes([prefix])
2552
return self._kndx_cache[prefix][0][suffix][1]
2554
raise RevisionNotPresent(key, self)
2556
def get_parent_map(self, keys):
2557
"""Get a map of the parents of keys.
2559
:param keys: The keys to look up parents for.
2560
:return: A mapping from keys to parents. Absent keys are absent from
2563
# Parse what we need to up front, this potentially trades off I/O
2564
# locality (.kndx and .knit in the same block group for the same file
2565
# id) for less checking in inner loops.
2566
prefixes = set(key[:-1] for key in keys)
2567
self._load_prefixes(prefixes)
2572
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2576
result[key] = tuple(prefix + (suffix,) for
2577
suffix in suffix_parents)
2580
def get_position(self, key):
2581
"""Return details needed to access the version.
2583
:return: a tuple (key, data position, size) to hand to the access
2584
logic to get the record.
2586
prefix, suffix = self._split_key(key)
2587
self._load_prefixes([prefix])
2588
entry = self._kndx_cache[prefix][0][suffix]
2589
return key, entry[2], entry[3]
2591
has_key = _mod_index._has_key_from_parent_map
2593
def _init_index(self, path, extra_lines=[]):
2594
"""Initialize an index."""
2596
sio.write(self.HEADER)
2597
sio.writelines(extra_lines)
2599
self._transport.put_file_non_atomic(path, sio,
2600
create_parent_dir=True)
2601
# self._create_parent_dir)
2602
# mode=self._file_mode,
2603
# dir_mode=self._dir_mode)
2606
"""Get all the keys in the collection.
2608
The keys are not ordered.
2611
# Identify all key prefixes.
2612
# XXX: A bit hacky, needs polish.
2613
if type(self._mapper) is ConstantMapper:
2617
for quoted_relpath in self._transport.iter_files_recursive():
2618
path, ext = os.path.splitext(quoted_relpath)
2620
prefixes = [self._mapper.unmap(path) for path in relpaths]
2621
self._load_prefixes(prefixes)
2622
for prefix in prefixes:
2623
for suffix in self._kndx_cache[prefix][1]:
2624
result.add(prefix + (suffix,))
2627
def _load_prefixes(self, prefixes):
2628
"""Load the indices for prefixes."""
2630
for prefix in prefixes:
2631
if prefix not in self._kndx_cache:
2632
# the load_data interface writes to these variables.
2635
self._filename = prefix
2637
path = self._mapper.map(prefix) + '.kndx'
2638
fp = self._transport.get(path)
2640
# _load_data may raise NoSuchFile if the target knit is
2642
_load_data(self, fp)
2645
self._kndx_cache[prefix] = (self._cache, self._history)
2650
self._kndx_cache[prefix] = ({}, [])
2651
if type(self._mapper) is ConstantMapper:
2652
# preserve behaviour for revisions.kndx etc.
2653
self._init_index(path)
2658
missing_keys = _mod_index._missing_keys_from_parent_map
2660
def _partition_keys(self, keys):
2661
"""Turn keys into a dict of prefix:suffix_list."""
2664
prefix_keys = result.setdefault(key[:-1], [])
2665
prefix_keys.append(key[-1])
2668
def _dictionary_compress(self, keys):
2669
"""Dictionary compress keys.
2671
:param keys: The keys to generate references to.
2672
:return: A string representation of keys. keys which are present are
2673
dictionary compressed, and others are emitted as fulltext with a
2679
prefix = keys[0][:-1]
2680
cache = self._kndx_cache[prefix][0]
2682
if key[:-1] != prefix:
2683
# kndx indices cannot refer across partitioned storage.
2684
raise ValueError("mismatched prefixes for %r" % keys)
2685
if key[-1] in cache:
2686
# -- inlined lookup() --
2687
result_list.append(str(cache[key[-1]][5]))
2688
# -- end lookup () --
2690
result_list.append('.' + key[-1])
2691
return ' '.join(result_list)
2693
def _reset_cache(self):
2694
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2695
# (cache_dict, history_vector) for parsed kndx files.
2696
self._kndx_cache = {}
2697
self._scope = self._get_scope()
2698
allow_writes = self._allow_writes()
2704
def _sort_keys_by_io(self, keys, positions):
2705
"""Figure out an optimal order to read the records for the given keys.
2707
Sort keys, grouped by index and sorted by position.
2709
:param keys: A list of keys whose records we want to read. This will be
2711
:param positions: A dict, such as the one returned by
2712
_get_components_positions()
2715
def get_sort_key(key):
2716
index_memo = positions[key][1]
2717
# Group by prefix and position. index_memo[0] is the key, so it is
2718
# (file_id, revision_id) and we don't want to sort on revision_id,
2719
# index_memo[1] is the position, and index_memo[2] is the size,
2720
# which doesn't matter for the sort
2721
return index_memo[0][:-1], index_memo[1]
2722
return keys.sort(key=get_sort_key)
2724
_get_total_build_size = _get_total_build_size
2726
def _split_key(self, key):
2727
"""Split key into a prefix and suffix."""
2728
return key[:-1], key[-1]
2731
class _KeyRefs(object):
2734
# dict mapping 'key' to 'set of keys referring to that key'
2737
def add_references(self, key, refs):
2738
# Record the new references
2739
for referenced in refs:
2741
needed_by = self.refs[referenced]
2743
needed_by = self.refs[referenced] = set()
2745
# Discard references satisfied by the new key
2748
def get_unsatisfied_refs(self):
2749
return self.refs.iterkeys()
2751
def add_key(self, key):
2755
# No keys depended on this key. That's ok.
2758
def add_keys(self, keys):
2762
def get_referrers(self):
2764
for referrers in self.refs.itervalues():
2765
result.update(referrers)
2769
class _KnitGraphIndex(object):
2770
"""A KnitVersionedFiles index layered on GraphIndex."""
2772
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2773
add_callback=None, track_external_parent_refs=False):
2774
"""Construct a KnitGraphIndex on a graph_index.
2776
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2777
:param is_locked: A callback to check whether the object should answer
2779
:param deltas: Allow delta-compressed records.
2780
:param parents: If True, record knits parents, if not do not record
2782
:param add_callback: If not None, allow additions to the index and call
2783
this callback with a list of added GraphIndex nodes:
2784
[(node, value, node_refs), ...]
2785
:param is_locked: A callback, returns True if the index is locked and
2787
:param track_external_parent_refs: If True, record all external parent
2788
references parents from added records. These can be retrieved
2789
later by calling get_missing_parents().
2791
self._add_callback = add_callback
2792
self._graph_index = graph_index
2793
self._deltas = deltas
2794
self._parents = parents
2795
if deltas and not parents:
2796
# XXX: TODO: Delta tree and parent graph should be conceptually
2798
raise KnitCorrupt(self, "Cannot do delta compression without "
2800
self.has_graph = parents
2801
self._is_locked = is_locked
2802
self._missing_compression_parents = set()
2803
if track_external_parent_refs:
2804
self._key_dependencies = _KeyRefs()
2806
self._key_dependencies = None
2809
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2811
def add_records(self, records, random_id=False,
2812
missing_compression_parents=False):
2813
"""Add multiple records to the index.
2815
This function does not insert data into the Immutable GraphIndex
2816
backing the KnitGraphIndex, instead it prepares data for insertion by
2817
the caller and checks that it is safe to insert then calls
2818
self._add_callback with the prepared GraphIndex nodes.
2820
:param records: a list of tuples:
2821
(key, options, access_memo, parents).
2822
:param random_id: If True the ids being added were randomly generated
2823
and no check for existence will be performed.
2824
:param missing_compression_parents: If True the records being added are
2825
only compressed against texts already in the index (or inside
2826
records). If False the records all refer to unavailable texts (or
2827
texts inside records) as compression parents.
2829
if not self._add_callback:
2830
raise errors.ReadOnlyError(self)
2831
# we hope there are no repositories with inconsistent parentage
2835
compression_parents = set()
2836
key_dependencies = self._key_dependencies
2837
for (key, options, access_memo, parents) in records:
2839
parents = tuple(parents)
2840
if key_dependencies is not None:
2841
key_dependencies.add_references(key, parents)
2842
index, pos, size = access_memo
2843
if 'no-eol' in options:
2847
value += "%d %d" % (pos, size)
2848
if not self._deltas:
2849
if 'line-delta' in options:
2850
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2853
if 'line-delta' in options:
2854
node_refs = (parents, (parents[0],))
2855
if missing_compression_parents:
2856
compression_parents.add(parents[0])
2858
node_refs = (parents, ())
2860
node_refs = (parents, )
2863
raise KnitCorrupt(self, "attempt to add node with parents "
2864
"in parentless index.")
2866
keys[key] = (value, node_refs)
2869
present_nodes = self._get_entries(keys)
2870
for (index, key, value, node_refs) in present_nodes:
2871
if (value[0] != keys[key][0][0] or
2872
node_refs[:1] != keys[key][1][:1]):
2873
raise KnitCorrupt(self, "inconsistent details in add_records"
2874
": %s %s" % ((value, node_refs), keys[key]))
2878
for key, (value, node_refs) in keys.iteritems():
2879
result.append((key, value, node_refs))
2881
for key, (value, node_refs) in keys.iteritems():
2882
result.append((key, value))
2883
self._add_callback(result)
2884
if missing_compression_parents:
2885
# This may appear to be incorrect (it does not check for
2886
# compression parents that are in the existing graph index),
2887
# but such records won't have been buffered, so this is
2888
# actually correct: every entry when
2889
# missing_compression_parents==True either has a missing parent, or
2890
# a parent that is one of the keys in records.
2891
compression_parents.difference_update(keys)
2892
self._missing_compression_parents.update(compression_parents)
2893
# Adding records may have satisfied missing compression parents.
2894
self._missing_compression_parents.difference_update(keys)
2896
def scan_unvalidated_index(self, graph_index):
2897
"""Inform this _KnitGraphIndex that there is an unvalidated index.
2899
This allows this _KnitGraphIndex to keep track of any missing
2900
compression parents we may want to have filled in to make those
2903
:param graph_index: A GraphIndex
2906
new_missing = graph_index.external_references(ref_list_num=1)
2907
new_missing.difference_update(self.get_parent_map(new_missing))
2908
self._missing_compression_parents.update(new_missing)
2909
if self._key_dependencies is not None:
2910
# Add parent refs from graph_index (and discard parent refs that
2911
# the graph_index has).
2912
for node in graph_index.iter_all_entries():
2913
self._key_dependencies.add_references(node[1], node[3][0])
2915
def get_missing_compression_parents(self):
2916
"""Return the keys of missing compression parents.
2918
Missing compression parents occur when a record stream was missing
2919
basis texts, or a index was scanned that had missing basis texts.
2921
return frozenset(self._missing_compression_parents)
2923
def get_missing_parents(self):
2924
"""Return the keys of missing parents."""
2925
# If updating this, you should also update
2926
# groupcompress._GCGraphIndex.get_missing_parents
2927
# We may have false positives, so filter those out.
2928
self._key_dependencies.add_keys(
2929
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
2930
return frozenset(self._key_dependencies.get_unsatisfied_refs())
2932
def _check_read(self):
2933
"""raise if reads are not permitted."""
2934
if not self._is_locked():
2935
raise errors.ObjectNotLocked(self)
2937
def _check_write_ok(self):
2938
"""Assert if writes are not permitted."""
2939
if not self._is_locked():
2940
raise errors.ObjectNotLocked(self)
2942
def _compression_parent(self, an_entry):
2943
# return the key that an_entry is compressed against, or None
2944
# Grab the second parent list (as deltas implies parents currently)
2945
compression_parents = an_entry[3][1]
2946
if not compression_parents:
2948
if len(compression_parents) != 1:
2949
raise AssertionError(
2950
"Too many compression parents: %r" % compression_parents)
2951
return compression_parents[0]
2953
def get_build_details(self, keys):
2954
"""Get the method, index_memo and compression parent for version_ids.
2956
Ghosts are omitted from the result.
2958
:param keys: An iterable of keys.
2959
:return: A dict of key:
2960
(index_memo, compression_parent, parents, record_details).
2962
opaque structure to pass to read_records to extract the raw
2965
Content that this record is built upon, may be None
2967
Logical parents of this node
2969
extra information about the content which needs to be passed to
2970
Factory.parse_record
2974
entries = self._get_entries(keys, False)
2975
for entry in entries:
2977
if not self._parents:
2980
parents = entry[3][0]
2981
if not self._deltas:
2982
compression_parent_key = None
2984
compression_parent_key = self._compression_parent(entry)
2985
noeol = (entry[2][0] == 'N')
2986
if compression_parent_key:
2987
method = 'line-delta'
2990
result[key] = (self._node_to_position(entry),
2991
compression_parent_key, parents,
2995
def _get_entries(self, keys, check_present=False):
2996
"""Get the entries for keys.
2998
:param keys: An iterable of index key tuples.
3003
for node in self._graph_index.iter_entries(keys):
3005
found_keys.add(node[1])
3007
# adapt parentless index to the rest of the code.
3008
for node in self._graph_index.iter_entries(keys):
3009
yield node[0], node[1], node[2], ()
3010
found_keys.add(node[1])
3012
missing_keys = keys.difference(found_keys)
3014
raise RevisionNotPresent(missing_keys.pop(), self)
3016
def get_method(self, key):
3017
"""Return compression method of specified key."""
3018
return self._get_method(self._get_node(key))
3020
def _get_method(self, node):
3021
if not self._deltas:
3023
if self._compression_parent(node):
3028
def _get_node(self, key):
3030
return list(self._get_entries([key]))[0]
3032
raise RevisionNotPresent(key, self)
3034
def get_options(self, key):
3035
"""Return a list representing options.
3039
node = self._get_node(key)
3040
options = [self._get_method(node)]
3041
if node[2][0] == 'N':
3042
options.append('no-eol')
3045
def get_parent_map(self, keys):
3046
"""Get a map of the parents of keys.
3048
:param keys: The keys to look up parents for.
3049
:return: A mapping from keys to parents. Absent keys are absent from
3053
nodes = self._get_entries(keys)
3057
result[node[1]] = node[3][0]
3060
result[node[1]] = None
3063
def get_position(self, key):
3064
"""Return details needed to access the version.
3066
:return: a tuple (index, data position, size) to hand to the access
3067
logic to get the record.
3069
node = self._get_node(key)
3070
return self._node_to_position(node)
3072
has_key = _mod_index._has_key_from_parent_map
3075
"""Get all the keys in the collection.
3077
The keys are not ordered.
3080
return [node[1] for node in self._graph_index.iter_all_entries()]
3082
missing_keys = _mod_index._missing_keys_from_parent_map
3084
def _node_to_position(self, node):
3085
"""Convert an index value to position details."""
3086
bits = node[2][1:].split(' ')
3087
return node[0], int(bits[0]), int(bits[1])
3089
def _sort_keys_by_io(self, keys, positions):
3090
"""Figure out an optimal order to read the records for the given keys.
3092
Sort keys, grouped by index and sorted by position.
3094
:param keys: A list of keys whose records we want to read. This will be
3096
:param positions: A dict, such as the one returned by
3097
_get_components_positions()
3100
def get_index_memo(key):
3101
# index_memo is at offset [1]. It is made up of (GraphIndex,
3102
# position, size). GI is an object, which will be unique for each
3103
# pack file. This causes us to group by pack file, then sort by
3104
# position. Size doesn't matter, but it isn't worth breaking up the
3106
return positions[key][1]
3107
return keys.sort(key=get_index_memo)
3109
_get_total_build_size = _get_total_build_size
3112
class _KnitKeyAccess(object):
3113
"""Access to records in .knit files."""
3115
def __init__(self, transport, mapper):
3116
"""Create a _KnitKeyAccess with transport and mapper.
3118
:param transport: The transport the access object is rooted at.
3119
:param mapper: The mapper used to map keys to .knit files.
3121
self._transport = transport
3122
self._mapper = mapper
3124
def add_raw_records(self, key_sizes, raw_data):
3125
"""Add raw knit bytes to a storage area.
3127
The data is spooled to the container writer in one bytes-record per
3130
:param sizes: An iterable of tuples containing the key and size of each
3132
:param raw_data: A bytestring containing the data.
3133
:return: A list of memos to retrieve the record later. Each memo is an
3134
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
3135
length), where the key is the record key.
3137
if type(raw_data) is not str:
3138
raise AssertionError(
3139
'data must be plain bytes was %s' % type(raw_data))
3142
# TODO: This can be tuned for writing to sftp and other servers where
3143
# append() is relatively expensive by grouping the writes to each key
3145
for key, size in key_sizes:
3146
path = self._mapper.map(key)
3148
base = self._transport.append_bytes(path + '.knit',
3149
raw_data[offset:offset+size])
3150
except errors.NoSuchFile:
3151
self._transport.mkdir(osutils.dirname(path))
3152
base = self._transport.append_bytes(path + '.knit',
3153
raw_data[offset:offset+size])
3157
result.append((key, base, size))
3161
"""Flush pending writes on this access object.
3163
For .knit files this is a no-op.
3167
def get_raw_records(self, memos_for_retrieval):
3168
"""Get the raw bytes for a records.
3170
:param memos_for_retrieval: An iterable containing the access memo for
3171
retrieving the bytes.
3172
:return: An iterator over the bytes of the records.
3174
# first pass, group into same-index request to minimise readv's issued.
3176
current_prefix = None
3177
for (key, offset, length) in memos_for_retrieval:
3178
if current_prefix == key[:-1]:
3179
current_list.append((offset, length))
3181
if current_prefix is not None:
3182
request_lists.append((current_prefix, current_list))
3183
current_prefix = key[:-1]
3184
current_list = [(offset, length)]
3185
# handle the last entry
3186
if current_prefix is not None:
3187
request_lists.append((current_prefix, current_list))
3188
for prefix, read_vector in request_lists:
3189
path = self._mapper.map(prefix) + '.knit'
3190
for pos, data in self._transport.readv(path, read_vector):
3194
class _DirectPackAccess(object):
3195
"""Access to data in one or more packs with less translation."""
3197
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3198
"""Create a _DirectPackAccess object.
3200
:param index_to_packs: A dict mapping index objects to the transport
3201
and file names for obtaining data.
3202
:param reload_func: A function to call if we determine that the pack
3203
files have moved and we need to reload our caches. See
3204
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
3206
self._container_writer = None
3207
self._write_index = None
3208
self._indices = index_to_packs
3209
self._reload_func = reload_func
3210
self._flush_func = flush_func
3212
def add_raw_records(self, key_sizes, raw_data):
3213
"""Add raw knit bytes to a storage area.
3215
The data is spooled to the container writer in one bytes-record per
3218
:param sizes: An iterable of tuples containing the key and size of each
3220
:param raw_data: A bytestring containing the data.
3221
:return: A list of memos to retrieve the record later. Each memo is an
3222
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3223
length), where the index field is the write_index object supplied
3224
to the PackAccess object.
3226
if type(raw_data) is not str:
3227
raise AssertionError(
3228
'data must be plain bytes was %s' % type(raw_data))
3231
for key, size in key_sizes:
3232
p_offset, p_length = self._container_writer.add_bytes_record(
3233
raw_data[offset:offset+size], [])
3235
result.append((self._write_index, p_offset, p_length))
3239
"""Flush pending writes on this access object.
3241
This will flush any buffered writes to a NewPack.
3243
if self._flush_func is not None:
3246
def get_raw_records(self, memos_for_retrieval):
3247
"""Get the raw bytes for a records.
3249
:param memos_for_retrieval: An iterable containing the (index, pos,
3250
length) memo for retrieving the bytes. The Pack access method
3251
looks up the pack to use for a given record in its index_to_pack
3253
:return: An iterator over the bytes of the records.
3255
# first pass, group into same-index requests
3257
current_index = None
3258
for (index, offset, length) in memos_for_retrieval:
3259
if current_index == index:
3260
current_list.append((offset, length))
3262
if current_index is not None:
3263
request_lists.append((current_index, current_list))
3264
current_index = index
3265
current_list = [(offset, length)]
3266
# handle the last entry
3267
if current_index is not None:
3268
request_lists.append((current_index, current_list))
3269
for index, offsets in request_lists:
3271
transport, path = self._indices[index]
3273
# A KeyError here indicates that someone has triggered an index
3274
# reload, and this index has gone missing, we need to start
3276
if self._reload_func is None:
3277
# If we don't have a _reload_func there is nothing that can
3280
raise errors.RetryWithNewPacks(index,
3281
reload_occurred=True,
3282
exc_info=sys.exc_info())
3284
reader = pack.make_readv_reader(transport, path, offsets)
3285
for names, read_func in reader.iter_records():
3286
yield read_func(None)
3287
except errors.NoSuchFile:
3288
# A NoSuchFile error indicates that a pack file has gone
3289
# missing on disk, we need to trigger a reload, and start over.
3290
if self._reload_func is None:
3292
raise errors.RetryWithNewPacks(transport.abspath(path),
3293
reload_occurred=False,
3294
exc_info=sys.exc_info())
3296
def set_writer(self, writer, index, transport_packname):
3297
"""Set a writer to use for adding data."""
3298
if index is not None:
3299
self._indices[index] = transport_packname
3300
self._container_writer = writer
3301
self._write_index = index
3303
def reload_or_raise(self, retry_exc):
3304
"""Try calling the reload function, or re-raise the original exception.
3306
This should be called after _DirectPackAccess raises a
3307
RetryWithNewPacks exception. This function will handle the common logic
3308
of determining when the error is fatal versus being temporary.
3309
It will also make sure that the original exception is raised, rather
3310
than the RetryWithNewPacks exception.
3312
If this function returns, then the calling function should retry
3313
whatever operation was being performed. Otherwise an exception will
3316
:param retry_exc: A RetryWithNewPacks exception.
3319
if self._reload_func is None:
3321
elif not self._reload_func():
3322
# The reload claimed that nothing changed
3323
if not retry_exc.reload_occurred:
3324
# If there wasn't an earlier reload, then we really were
3325
# expecting to find changes. We didn't find them, so this is a
3329
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3330
raise exc_class, exc_value, exc_traceback
3333
# Deprecated, use PatienceSequenceMatcher instead
3334
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
3337
def annotate_knit(knit, revision_id):
3338
"""Annotate a knit with no cached annotations.
3340
This implementation is for knits with no cached annotations.
3341
It will work for knits with cached annotations, but this is not
3344
annotator = _KnitAnnotator(knit)
3345
return iter(annotator.annotate_flat(revision_id))
3348
class _KnitAnnotator(annotate.Annotator):
3349
"""Build up the annotations for a text."""
3351
def __init__(self, vf):
3352
annotate.Annotator.__init__(self, vf)
3354
# TODO: handle Nodes which cannot be extracted
3355
# self._ghosts = set()
3357
# Map from (key, parent_key) => matching_blocks, should be 'use once'
3358
self._matching_blocks = {}
3360
# KnitContent objects
3361
self._content_objects = {}
3362
# The number of children that depend on this fulltext content object
3363
self._num_compression_children = {}
3364
# Delta records that need their compression parent before they can be
3366
self._pending_deltas = {}
3367
# Fulltext records that are waiting for their parents fulltexts before
3368
# they can be yielded for annotation
3369
self._pending_annotation = {}
3371
self._all_build_details = {}
3373
def _get_build_graph(self, key):
3374
"""Get the graphs for building texts and annotations.
3376
The data you need for creating a full text may be different than the
3377
data you need to annotate that text. (At a minimum, you need both
3378
parents to create an annotation, but only need 1 parent to generate the
3381
:return: A list of (key, index_memo) records, suitable for
3382
passing to read_records_iter to start reading in the raw data from
3385
pending = set([key])
3388
self._num_needed_children[key] = 1
3390
# get all pending nodes
3391
this_iteration = pending
3392
build_details = self._vf._index.get_build_details(this_iteration)
3393
self._all_build_details.update(build_details)
3394
# new_nodes = self._vf._index._get_entries(this_iteration)
3396
for key, details in build_details.iteritems():
3397
(index_memo, compression_parent, parent_keys,
3398
record_details) = details
3399
self._parent_map[key] = parent_keys
3400
self._heads_provider = None
3401
records.append((key, index_memo))
3402
# Do we actually need to check _annotated_lines?
3403
pending.update([p for p in parent_keys
3404
if p not in self._all_build_details])
3406
for parent_key in parent_keys:
3407
if parent_key in self._num_needed_children:
3408
self._num_needed_children[parent_key] += 1
3410
self._num_needed_children[parent_key] = 1
3411
if compression_parent:
3412
if compression_parent in self._num_compression_children:
3413
self._num_compression_children[compression_parent] += 1
3415
self._num_compression_children[compression_parent] = 1
3417
missing_versions = this_iteration.difference(build_details.keys())
3418
if missing_versions:
3419
for key in missing_versions:
3420
if key in self._parent_map and key in self._text_cache:
3421
# We already have this text ready, we just need to
3422
# yield it later so we get it annotated
3424
parent_keys = self._parent_map[key]
3425
for parent_key in parent_keys:
3426
if parent_key in self._num_needed_children:
3427
self._num_needed_children[parent_key] += 1
3429
self._num_needed_children[parent_key] = 1
3430
pending.update([p for p in parent_keys
3431
if p not in self._all_build_details])
3433
raise errors.RevisionNotPresent(key, self._vf)
3434
# Generally we will want to read the records in reverse order, because
3435
# we find the parent nodes after the children
3437
return records, ann_keys
3439
def _get_needed_texts(self, key, pb=None):
3440
# if True or len(self._vf._fallback_vfs) > 0:
3441
if len(self._vf._fallback_vfs) > 0:
3442
# If we have fallbacks, go to the generic path
3443
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3448
records, ann_keys = self._get_build_graph(key)
3449
for idx, (sub_key, text, num_lines) in enumerate(
3450
self._extract_texts(records)):
3452
pb.update('annotating', idx, len(records))
3453
yield sub_key, text, num_lines
3454
for sub_key in ann_keys:
3455
text = self._text_cache[sub_key]
3456
num_lines = len(text) # bad assumption
3457
yield sub_key, text, num_lines
3459
except errors.RetryWithNewPacks, e:
3460
self._vf._access.reload_or_raise(e)
3461
# The cached build_details are no longer valid
3462
self._all_build_details.clear()
3464
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3465
parent_lines = self._text_cache[compression_parent]
3466
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3467
self._matching_blocks[(key, compression_parent)] = blocks
3469
def _expand_record(self, key, parent_keys, compression_parent, record,
3472
if compression_parent:
3473
if compression_parent not in self._content_objects:
3474
# Waiting for the parent
3475
self._pending_deltas.setdefault(compression_parent, []).append(
3476
(key, parent_keys, record, record_details))
3478
# We have the basis parent, so expand the delta
3479
num = self._num_compression_children[compression_parent]
3482
base_content = self._content_objects.pop(compression_parent)
3483
self._num_compression_children.pop(compression_parent)
3485
self._num_compression_children[compression_parent] = num
3486
base_content = self._content_objects[compression_parent]
3487
# It is tempting to want to copy_base_content=False for the last
3488
# child object. However, whenever noeol=False,
3489
# self._text_cache[parent_key] is content._lines. So mutating it
3490
# gives very bad results.
3491
# The alternative is to copy the lines into text cache, but then we
3492
# are copying anyway, so just do it here.
3493
content, delta = self._vf._factory.parse_record(
3494
key, record, record_details, base_content,
3495
copy_base_content=True)
3498
content, _ = self._vf._factory.parse_record(
3499
key, record, record_details, None)
3500
if self._num_compression_children.get(key, 0) > 0:
3501
self._content_objects[key] = content
3502
lines = content.text()
3503
self._text_cache[key] = lines
3504
if delta is not None:
3505
self._cache_delta_blocks(key, compression_parent, delta, lines)
3508
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3509
"""Get the list of annotations for the parent, and the matching lines.
3511
:param text: The opaque value given by _get_needed_texts
3512
:param parent_key: The key for the parent text
3513
:return: (parent_annotations, matching_blocks)
3514
parent_annotations is a list as long as the number of lines in
3516
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3517
indicating which lines match between the two texts
3519
block_key = (key, parent_key)
3520
if block_key in self._matching_blocks:
3521
blocks = self._matching_blocks.pop(block_key)
3522
parent_annotations = self._annotations_cache[parent_key]
3523
return parent_annotations, blocks
3524
return annotate.Annotator._get_parent_annotations_and_matches(self,
3525
key, text, parent_key)
3527
def _process_pending(self, key):
3528
"""The content for 'key' was just processed.
3530
Determine if there is any more pending work to be processed.
3533
if key in self._pending_deltas:
3534
compression_parent = key
3535
children = self._pending_deltas.pop(key)
3536
for child_key, parent_keys, record, record_details in children:
3537
lines = self._expand_record(child_key, parent_keys,
3539
record, record_details)
3540
if self._check_ready_for_annotations(child_key, parent_keys):
3541
to_return.append(child_key)
3542
# Also check any children that are waiting for this parent to be
3544
if key in self._pending_annotation:
3545
children = self._pending_annotation.pop(key)
3546
to_return.extend([c for c, p_keys in children
3547
if self._check_ready_for_annotations(c, p_keys)])
3550
def _check_ready_for_annotations(self, key, parent_keys):
3551
"""return true if this text is ready to be yielded.
3553
Otherwise, this will return False, and queue the text into
3554
self._pending_annotation
3556
for parent_key in parent_keys:
3557
if parent_key not in self._annotations_cache:
3558
# still waiting on at least one parent text, so queue it up
3559
# Note that if there are multiple parents, we need to wait
3561
self._pending_annotation.setdefault(parent_key,
3562
[]).append((key, parent_keys))
3566
def _extract_texts(self, records):
3567
"""Extract the various texts needed based on records"""
3568
# We iterate in the order read, rather than a strict order requested
3569
# However, process what we can, and put off to the side things that
3570
# still need parents, cleaning them up when those parents are
3573
# 1) As 'records' are read, see if we can expand these records into
3574
# Content objects (and thus lines)
3575
# 2) If a given line-delta is waiting on its compression parent, it
3576
# gets queued up into self._pending_deltas, otherwise we expand
3577
# it, and put it into self._text_cache and self._content_objects
3578
# 3) If we expanded the text, we will then check to see if all
3579
# parents have also been processed. If so, this text gets yielded,
3580
# else this record gets set aside into pending_annotation
3581
# 4) Further, if we expanded the text in (2), we will then check to
3582
# see if there are any children in self._pending_deltas waiting to
3583
# also be processed. If so, we go back to (2) for those
3584
# 5) Further again, if we yielded the text, we can then check if that
3585
# 'unlocks' any of the texts in pending_annotations, which should
3586
# then get yielded as well
3587
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3588
# compression child could unlock yet another, and yielding a fulltext
3589
# will also 'unlock' the children that are waiting on that annotation.
3590
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3591
# if other parents are also waiting.)
3592
# We want to yield content before expanding child content objects, so
3593
# that we know when we can re-use the content lines, and the annotation
3594
# code can know when it can stop caching fulltexts, as well.
3596
# Children that are missing their compression parent
3598
for (key, record, digest) in self._vf._read_records_iter(records):
3600
details = self._all_build_details[key]
3601
(_, compression_parent, parent_keys, record_details) = details
3602
lines = self._expand_record(key, parent_keys, compression_parent,
3603
record, record_details)
3605
# Pending delta should be queued up
3607
# At this point, we may be able to yield this content, if all
3608
# parents are also finished
3609
yield_this_text = self._check_ready_for_annotations(key,
3612
# All parents present
3613
yield key, lines, len(lines)
3614
to_process = self._process_pending(key)
3616
this_process = to_process
3618
for key in this_process:
3619
lines = self._text_cache[key]
3620
yield key, lines, len(lines)
3621
to_process.extend(self._process_pending(key))
3624
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3626
from bzrlib._knit_load_data_py import _load_data_py as _load_data