1
# Copyright (C) 2005, 2006 by Canonical Ltd
2
# Written by Martin Pool.
3
# Modified by Johan Rydberg <jrydberg@gnu.org>
4
# Modified by Robert Collins <robert.collins@canonical.com>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
"""Knit versionedfile implementation.
22
A knit is a versioned file implementation that supports efficient append only
26
lifeless: the data file is made up of "delta records". each delta record has a delta header
27
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
28
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
29
end-marker; simply "end VERSION"
31
delta can be line or full contents.a
32
... the 8's there are the index number of the annotation.
33
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
37
8 e.set('executable', 'yes')
39
8 if elt.get('executable') == 'yes':
40
8 ie.executable = True
41
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
45
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
46
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
47
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
48
09:33 < lifeless> right
49
09:33 < jrydberg> lifeless: the position and size is the range in the data file
52
so the index sequence is the dictionary compressed sequence number used
53
in the deltas to provide line annotation
58
# 10:16 < lifeless> make partial index writes safe
59
# 10:16 < lifeless> implement 'knit.check()' like weave.check()
60
# 10:17 < lifeless> record known ghosts so we can detect when they are filled in rather than the current 'reweave
62
# move sha1 out of the content so that join is faster at verifying parents
63
# record content length ?
67
from cStringIO import StringIO
69
from itertools import izip, chain
74
import bzrlib.errors as errors
75
from bzrlib.errors import FileExists, NoSuchFile, KnitError, \
76
InvalidRevisionId, KnitCorrupt, KnitHeaderError, \
77
RevisionNotPresent, RevisionAlreadyPresent
78
from bzrlib.tuned_gzip import *
79
from bzrlib.trace import mutter
80
from bzrlib.osutils import contains_whitespace, contains_linebreaks, \
82
from bzrlib.versionedfile import VersionedFile, InterVersionedFile
83
from bzrlib.tsort import topo_sort
87
# TODO: Split out code specific to this format into an associated object.
89
# TODO: Can we put in some kind of value to check that the index and data
90
# files belong together?
92
# TODO: accomodate binaries, perhaps by storing a byte count
94
# TODO: function to check whole file
96
# TODO: atomically append data, then measure backwards from the cursor
97
# position after writing to work out where it was located. we may need to
98
# bypass python file buffering.
100
DATA_SUFFIX = '.knit'
101
INDEX_SUFFIX = '.kndx'
104
class KnitContent(object):
105
"""Content of a knit version to which deltas can be applied."""
107
def __init__(self, lines):
110
def annotate_iter(self):
111
"""Yield tuples of (origin, text) for each content line."""
112
for origin, text in self._lines:
116
"""Return a list of (origin, text) tuples."""
117
return list(self.annotate_iter())
119
def line_delta_iter(self, new_lines):
120
"""Generate line-based delta from this content to new_lines."""
121
new_texts = [text for origin, text in new_lines._lines]
122
old_texts = [text for origin, text in self._lines]
123
s = SequenceMatcher(None, old_texts, new_texts)
124
for op in s.get_opcodes():
127
# ofrom oto length data
128
yield (op[1], op[2], op[4]-op[3], new_lines._lines[op[3]:op[4]])
130
def line_delta(self, new_lines):
131
return list(self.line_delta_iter(new_lines))
134
return [text for origin, text in self._lines]
137
class _KnitFactory(object):
138
"""Base factory for creating content objects."""
140
def make(self, lines, version):
141
num_lines = len(lines)
142
return KnitContent(zip([version] * num_lines, lines))
145
class KnitAnnotateFactory(_KnitFactory):
146
"""Factory for creating annotated Content objects."""
150
def parse_fulltext(self, content, version):
151
"""Convert fulltext to internal representation
153
fulltext content is of the format
154
revid(utf8) plaintext\n
155
internal representation is of the format:
160
origin, text = line.split(' ', 1)
161
lines.append((origin.decode('utf-8'), text))
162
return KnitContent(lines)
164
def parse_line_delta_iter(self, lines):
165
for result_item in self.parse_line_delta[lines]:
168
def parse_line_delta(self, lines, version):
169
"""Convert a line based delta into internal representation.
171
line delta is in the form of:
172
intstart intend intcount
174
revid(utf8) newline\n
175
internal represnetation is
176
(start, end, count, [1..count tuples (revid, newline)])
181
# walk through the lines parsing.
183
start, end, count = [int(n) for n in header.split(',')]
187
origin, text = next().split(' ', 1)
189
contents.append((origin.decode('utf-8'), text))
190
result.append((start, end, count, contents))
193
def lower_fulltext(self, content):
194
"""convert a fulltext content record into a serializable form.
196
see parse_fulltext which this inverts.
198
return ['%s %s' % (o.encode('utf-8'), t) for o, t in content._lines]
200
def lower_line_delta(self, delta):
201
"""convert a delta into a serializable form.
203
See parse_line_delta which this inverts.
206
for start, end, c, lines in delta:
207
out.append('%d,%d,%d\n' % (start, end, c))
208
for origin, text in lines:
209
out.append('%s %s' % (origin.encode('utf-8'), text))
213
class KnitPlainFactory(_KnitFactory):
214
"""Factory for creating plain Content objects."""
218
def parse_fulltext(self, content, version):
219
"""This parses an unannotated fulltext.
221
Note that this is not a noop - the internal representation
222
has (versionid, line) - its just a constant versionid.
224
return self.make(content, version)
226
def parse_line_delta_iter(self, lines, version):
228
header = lines.pop(0)
229
start, end, c = [int(n) for n in header.split(',')]
230
yield start, end, c, zip([version] * c, lines[:c])
233
def parse_line_delta(self, lines, version):
234
return list(self.parse_line_delta_iter(lines, version))
236
def lower_fulltext(self, content):
237
return content.text()
239
def lower_line_delta(self, delta):
241
for start, end, c, lines in delta:
242
out.append('%d,%d,%d\n' % (start, end, c))
243
out.extend([text for origin, text in lines])
247
def make_empty_knit(transport, relpath):
248
"""Construct a empty knit at the specified location."""
249
k = KnitVersionedFile(transport, relpath, 'w', KnitPlainFactory)
253
class KnitVersionedFile(VersionedFile):
254
"""Weave-like structure with faster random access.
256
A knit stores a number of texts and a summary of the relationships
257
between them. Texts are identified by a string version-id. Texts
258
are normally stored and retrieved as a series of lines, but can
259
also be passed as single strings.
261
Lines are stored with the trailing newline (if any) included, to
262
avoid special cases for files with no final newline. Lines are
263
composed of 8-bit characters, not unicode. The combination of
264
these approaches should mean any 'binary' file can be safely
265
stored and retrieved.
268
def __init__(self, relpath, transport, file_mode=None, access_mode=None, factory=None,
269
basis_knit=None, delta=True, create=False):
270
"""Construct a knit at location specified by relpath.
272
:param create: If not True, only open an existing knit.
274
if access_mode is None:
276
super(KnitVersionedFile, self).__init__(access_mode)
277
assert access_mode in ('r', 'w'), "invalid mode specified %r" % access_mode
278
assert not basis_knit or isinstance(basis_knit, KnitVersionedFile), \
281
self.transport = transport
282
self.filename = relpath
283
self.basis_knit = basis_knit
284
self.factory = factory or KnitAnnotateFactory()
285
self.writable = (access_mode == 'w')
288
self._index = _KnitIndex(transport, relpath + INDEX_SUFFIX,
289
access_mode, create=create, file_mode=file_mode)
290
self._data = _KnitData(transport, relpath + DATA_SUFFIX,
291
access_mode, create=create and not len(self), file_mode=file_mode)
293
def _add_delta(self, version_id, parents, delta_parent, sha1, noeol, delta):
294
"""See VersionedFile._add_delta()."""
295
self._check_add(version_id, []) # should we check the lines ?
296
self._check_versions_present(parents)
300
for parent in parents:
301
if not self.has_version(parent):
302
ghosts.append(parent)
304
present_parents.append(parent)
306
if delta_parent is None:
307
# reconstitute as full text.
308
assert len(delta) == 1 or len(delta) == 0
310
assert delta[0][0] == 0
311
assert delta[0][1] == 0, delta[0][1]
312
return super(KnitVersionedFile, self)._add_delta(version_id,
323
options.append('no-eol')
325
if delta_parent is not None:
326
# determine the current delta chain length.
327
# To speed the extract of texts the delta chain is limited
328
# to a fixed number of deltas. This should minimize both
329
# I/O and the time spend applying deltas.
331
delta_parents = [delta_parent]
333
parent = delta_parents[0]
334
method = self._index.get_method(parent)
335
if method == 'fulltext':
337
delta_parents = self._index.get_parents(parent)
339
if method == 'line-delta':
340
# did not find a fulltext in the delta limit.
341
# just do a normal insertion.
342
return super(KnitVersionedFile, self)._add_delta(version_id,
349
options.append('line-delta')
350
store_lines = self.factory.lower_line_delta(delta)
352
where, size = self._data.add_record(version_id, digest, store_lines)
353
self._index.add_version(version_id, options, where, size, parents)
355
def _add_raw_records(self, records, data):
356
"""Add all the records 'records' with data pre-joined in 'data'.
358
:param records: A list of tuples(version_id, options, parents, size).
359
:param data: The data for the records. When it is written, the records
360
are adjusted to have pos pointing into data by the sum of
361
the preceeding records sizes.
364
pos = self._data.add_raw_record(data)
366
for (version_id, options, parents, size) in records:
367
index_entries.append((version_id, options, pos, size, parents))
369
self._index.add_versions(index_entries)
371
def clear_cache(self):
372
"""Clear the data cache only."""
373
self._data.clear_cache()
375
def copy_to(self, name, transport):
376
"""See VersionedFile.copy_to()."""
377
# copy the current index to a temp index to avoid racing with local
379
transport.put(name + INDEX_SUFFIX + '.tmp', self.transport.get(self._index._filename),)
381
transport.put(name + DATA_SUFFIX, self._data._open_file())
382
# rename the copied index into place
383
transport.rename(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
385
def create_empty(self, name, transport, mode=None):
386
return KnitVersionedFile(name, transport, factory=self.factory, delta=self.delta, create=True)
388
def _fix_parents(self, version, new_parents):
389
"""Fix the parents list for version.
391
This is done by appending a new version to the index
392
with identical data except for the parents list.
393
the parents list must be a superset of the current
396
current_values = self._index._cache[version]
397
assert set(current_values[4]).difference(set(new_parents)) == set()
398
self._index.add_version(version,
404
def get_delta(self, version_id):
405
"""Get a delta for constructing version from some other version."""
406
if not self.has_version(version_id):
407
raise RevisionNotPresent(version_id, self.filename)
409
parents = self.get_parents(version_id)
414
data_pos, data_size = self._index.get_position(version_id)
415
data, sha1 = self._data.read_records(((version_id, data_pos, data_size),))[version_id]
416
version_idx = self._index.lookup(version_id)
417
noeol = 'no-eol' in self._index.get_options(version_id)
418
if 'fulltext' == self._index.get_method(version_id):
419
new_content = self.factory.parse_fulltext(data, version_idx)
420
if parent is not None:
421
reference_content = self._get_content(parent)
422
old_texts = reference_content.text()
425
new_texts = new_content.text()
426
delta_seq = SequenceMatcher(None, old_texts, new_texts)
427
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
429
delta = self.factory.parse_line_delta(data, version_idx)
430
return parent, sha1, noeol, delta
432
def get_graph_with_ghosts(self):
433
"""See VersionedFile.get_graph_with_ghosts()."""
434
graph_items = self._index.get_graph()
435
return dict(graph_items)
437
def get_sha1(self, version_id):
438
"""See VersionedFile.get_sha1()."""
439
components = self._get_components(version_id)
440
return components[-1][-1][-1]
444
"""See VersionedFile.get_suffixes()."""
445
return [DATA_SUFFIX, INDEX_SUFFIX]
447
def has_ghost(self, version_id):
448
"""True if there is a ghost reference in the file to version_id."""
450
if self.has_version(version_id):
452
# optimisable if needed by memoising the _ghosts set.
453
items = self._index.get_graph()
454
for node, parents in items:
455
for parent in parents:
456
if parent not in self._index._cache:
457
if parent == version_id:
462
"""See VersionedFile.versions."""
463
return self._index.get_versions()
465
def has_version(self, version_id):
466
"""See VersionedFile.has_version."""
467
return self._index.has_version(version_id)
469
__contains__ = has_version
471
def _merge_annotations(self, content, parents, parent_texts={},
472
delta=None, annotated=None):
473
"""Merge annotations for content. This is done by comparing
474
the annotations based on changed to the text.
478
for parent_id in parents:
479
merge_content = self._get_content(parent_id, parent_texts)
480
seq = SequenceMatcher(None, merge_content.text(), content.text())
481
if delta_seq is None:
482
# setup a delta seq to reuse.
484
for i, j, n in seq.get_matching_blocks():
487
# this appears to copy (origin, text) pairs across to the new
488
# content for any line that matches the last-checked parent.
489
# FIXME: save the sequence control data for delta compression
490
# against the most relevant parent rather than rediffing.
491
content._lines[j:j+n] = merge_content._lines[i:i+n]
494
reference_content = self._get_content(parents[0], parent_texts)
495
new_texts = content.text()
496
old_texts = reference_content.text()
497
delta_seq = SequenceMatcher(None, old_texts, new_texts)
498
return self._make_line_delta(delta_seq, content)
500
def _make_line_delta(self, delta_seq, new_content):
501
"""Generate a line delta from delta_seq and new_content."""
503
for op in delta_seq.get_opcodes():
506
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
509
def _get_components(self, version_id):
510
"""Return a list of (version_id, method, data) tuples that
511
makes up version specified by version_id of the knit.
513
The components should be applied in the order of the returned
516
The basis knit will be used to the largest extent possible
517
since it is assumed that accesses to it is faster.
520
# 4168 calls in 14912, 2289 internal
521
# 4168 in 9711 to read_records
522
# 52554 in 1250 to get_parents
523
# 170166 in 865 to list.append
525
# needed_revisions holds a list of (method, version_id) of
526
# versions that is needed to be fetched to construct the final
527
# version of the file.
529
# basis_revisions is a list of versions that needs to be
530
# fetched but exists in the basis knit.
532
basis = self.basis_knit
539
if basis and basis._index.has_version(cursor):
541
basis_versions.append(cursor)
542
method = picked_knit._index.get_method(cursor)
543
needed_versions.append((method, cursor))
544
if method == 'fulltext':
546
cursor = picked_knit.get_parents(cursor)[0]
551
for comp_id in basis_versions:
552
data_pos, data_size = basis._index.get_data_position(comp_id)
553
records.append((piece_id, data_pos, data_size))
554
components.update(basis._data.read_records(records))
557
for comp_id in [vid for method, vid in needed_versions
558
if vid not in basis_versions]:
559
data_pos, data_size = self._index.get_position(comp_id)
560
records.append((comp_id, data_pos, data_size))
561
components.update(self._data.read_records(records))
563
# get_data_records returns a mapping with the version id as
564
# index and the value as data. The order the components need
565
# to be applied is held by needed_versions (reversed).
567
for method, comp_id in reversed(needed_versions):
568
out.append((comp_id, method, components[comp_id]))
572
def _get_content(self, version_id, parent_texts={}):
573
"""Returns a content object that makes up the specified
575
if not self.has_version(version_id):
576
raise RevisionNotPresent(version_id, self.filename)
578
cached_version = parent_texts.get(version_id, None)
579
if cached_version is not None:
580
return cached_version
582
if self.basis_knit and version_id in self.basis_knit:
583
return self.basis_knit._get_content(version_id)
586
components = self._get_components(version_id)
587
for component_id, method, (data, digest) in components:
588
version_idx = self._index.lookup(component_id)
589
if method == 'fulltext':
590
assert content is None
591
content = self.factory.parse_fulltext(data, version_idx)
592
elif method == 'line-delta':
593
delta = self.factory.parse_line_delta(data, version_idx)
594
content._lines = self._apply_delta(content._lines, delta)
596
if 'no-eol' in self._index.get_options(version_id):
597
line = content._lines[-1][1].rstrip('\n')
598
content._lines[-1] = (content._lines[-1][0], line)
600
# digest here is the digest from the last applied component.
601
if sha_strings(content.text()) != digest:
602
import pdb;pdb.set_trace()
603
raise KnitCorrupt(self.filename, 'sha-1 does not match %s' % version_id)
607
def _check_versions_present(self, version_ids):
608
"""Check that all specified versions are present."""
609
version_ids = set(version_ids)
610
for r in list(version_ids):
611
if self._index.has_version(r):
612
version_ids.remove(r)
614
raise RevisionNotPresent(list(version_ids)[0], self.filename)
616
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts):
617
"""See VersionedFile.add_lines_with_ghosts()."""
618
self._check_add(version_id, lines)
619
return self._add(version_id, lines[:], parents, self.delta, parent_texts)
621
def _add_lines(self, version_id, parents, lines, parent_texts):
622
"""See VersionedFile.add_lines."""
623
self._check_add(version_id, lines)
624
self._check_versions_present(parents)
625
return self._add(version_id, lines[:], parents, self.delta, parent_texts)
627
def _check_add(self, version_id, lines):
628
"""check that version_id and lines are safe to add."""
629
assert self.writable, "knit is not opened for write"
630
### FIXME escape. RBC 20060228
631
if contains_whitespace(version_id):
632
raise InvalidRevisionId(version_id)
633
if self.has_version(version_id):
634
raise RevisionAlreadyPresent(version_id, self.filename)
635
self._check_lines_not_unicode(lines)
636
self._check_lines_are_lines(lines)
638
def _add(self, version_id, lines, parents, delta, parent_texts):
639
"""Add a set of lines on top of version specified by parents.
641
If delta is true, compress the text as a line-delta against
644
Any versions not present will be converted into ghosts.
646
# 461 0 6546.0390 43.9100 bzrlib.knit:489(_add)
647
# +400 0 889.4890 418.9790 +bzrlib.knit:192(lower_fulltext)
648
# +461 0 1364.8070 108.8030 +bzrlib.knit:996(add_record)
649
# +461 0 193.3940 41.5720 +bzrlib.knit:898(add_version)
650
# +461 0 134.0590 18.3810 +bzrlib.osutils:361(sha_strings)
651
# +461 0 36.3420 15.4540 +bzrlib.knit:146(make)
652
# +1383 0 8.0370 8.0370 +<len>
653
# +61 0 13.5770 7.9190 +bzrlib.knit:199(lower_line_delta)
654
# +61 0 963.3470 7.8740 +bzrlib.knit:427(_get_content)
655
# +61 0 973.9950 5.2950 +bzrlib.knit:136(line_delta)
656
# +61 0 1918.1800 5.2640 +bzrlib.knit:359(_merge_annotations)
660
if parent_texts is None:
662
for parent in parents:
663
if not self.has_version(parent):
664
ghosts.append(parent)
666
present_parents.append(parent)
668
if delta and not len(present_parents):
671
digest = sha_strings(lines)
674
if lines[-1][-1] != '\n':
675
options.append('no-eol')
676
lines[-1] = lines[-1] + '\n'
678
if len(present_parents) and delta:
679
# To speed the extract of texts the delta chain is limited
680
# to a fixed number of deltas. This should minimize both
681
# I/O and the time spend applying deltas.
683
delta_parents = present_parents
685
parent = delta_parents[0]
686
method = self._index.get_method(parent)
687
if method == 'fulltext':
689
delta_parents = self._index.get_parents(parent)
691
if method == 'line-delta':
694
lines = self.factory.make(lines, version_id)
695
if delta or (self.factory.annotated and len(present_parents) > 0):
696
# Merge annotations from parent texts if so is needed.
697
delta_hunks = self._merge_annotations(lines, present_parents, parent_texts,
698
delta, self.factory.annotated)
701
options.append('line-delta')
702
store_lines = self.factory.lower_line_delta(delta_hunks)
704
options.append('fulltext')
705
store_lines = self.factory.lower_fulltext(lines)
707
where, size = self._data.add_record(version_id, digest, store_lines)
708
self._index.add_version(version_id, options, where, size, parents)
711
def check(self, progress_bar=None):
712
"""See VersionedFile.check()."""
714
def _clone_text(self, new_version_id, old_version_id, parents):
715
"""See VersionedFile.clone_text()."""
716
# FIXME RBC 20060228 make fast by only inserting an index with null delta.
717
self.add_lines(new_version_id, parents, self.get_lines(old_version_id))
719
def get_lines(self, version_id):
720
"""See VersionedFile.get_lines()."""
721
return self._get_content(version_id).text()
723
def iter_lines_added_or_present_in_versions(self, version_ids=None):
724
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
725
if version_ids is None:
726
version_ids = self.versions()
727
# we dont care about inclusions, the caller cares.
728
# but we need to setup a list of records to visit.
729
# we need version_id, position, length
730
version_id_records = []
731
requested_versions = list(version_ids)
732
# filter for available versions
733
for version_id in requested_versions:
734
if not self.has_version(version_id):
735
raise RevisionNotPresent(version_id, self.filename)
736
# get a in-component-order queue:
738
for version_id in self.versions():
739
if version_id in requested_versions:
740
version_ids.append(version_id)
741
data_pos, length = self._index.get_position(version_id)
742
version_id_records.append((version_id, data_pos, length))
744
pb = bzrlib.ui.ui_factory.nested_progress_bar()
746
total = len(version_id_records)
748
pb.update('Walking content.', count, total)
749
for version_id, data, sha_value in \
750
self._data.read_records_iter(version_id_records):
751
pb.update('Walking content.', count, total)
752
method = self._index.get_method(version_id)
753
version_idx = self._index.lookup(version_id)
754
assert method in ('fulltext', 'line-delta')
755
if method == 'fulltext':
756
content = self.factory.parse_fulltext(data, version_idx)
757
for line in content.text():
760
delta = self.factory.parse_line_delta(data, version_idx)
761
for start, end, count, lines in delta:
762
for origin, line in lines:
765
pb.update('Walking content.', total, total)
768
pb.update('Walking content.', total, total)
772
def num_versions(self):
773
"""See VersionedFile.num_versions()."""
774
return self._index.num_versions()
776
__len__ = num_versions
778
def annotate_iter(self, version_id):
779
"""See VersionedFile.annotate_iter."""
780
content = self._get_content(version_id)
781
for origin, text in content.annotate_iter():
784
def get_parents(self, version_id):
785
"""See VersionedFile.get_parents."""
788
# 52554 calls in 1264 872 internal down from 3674
790
return self._index.get_parents(version_id)
792
raise RevisionNotPresent(version_id, self.filename)
794
def get_parents_with_ghosts(self, version_id):
795
"""See VersionedFile.get_parents."""
797
return self._index.get_parents_with_ghosts(version_id)
799
raise RevisionNotPresent(version_id, self.filename)
801
def get_ancestry(self, versions):
802
"""See VersionedFile.get_ancestry."""
803
if isinstance(versions, basestring):
804
versions = [versions]
807
self._check_versions_present(versions)
808
return self._index.get_ancestry(versions)
810
def get_ancestry_with_ghosts(self, versions):
811
"""See VersionedFile.get_ancestry_with_ghosts."""
812
if isinstance(versions, basestring):
813
versions = [versions]
816
self._check_versions_present(versions)
817
return self._index.get_ancestry_with_ghosts(versions)
819
#@deprecated_method(zero_eight)
820
def walk(self, version_ids):
821
"""See VersionedFile.walk."""
822
# We take the short path here, and extract all relevant texts
823
# and put them in a weave and let that do all the work. Far
824
# from optimal, but is much simpler.
825
# FIXME RB 20060228 this really is inefficient!
826
from bzrlib.weave import Weave
828
w = Weave(self.filename)
829
ancestry = self.get_ancestry(version_ids)
830
sorted_graph = topo_sort(self._index.get_graph())
831
version_list = [vid for vid in sorted_graph if vid in ancestry]
833
for version_id in version_list:
834
lines = self.get_lines(version_id)
835
w.add_lines(version_id, self.get_parents(version_id), lines)
837
for lineno, insert_id, dset, line in w.walk(version_ids):
838
yield lineno, insert_id, dset, line
840
def plan_merge(self, ver_a, ver_b):
841
"""See VersionedFile.plan_merge."""
842
ancestors_b = set(self.get_ancestry(ver_b))
843
def status_a(revision, text):
844
if revision in ancestors_b:
845
return 'killed-b', text
849
ancestors_a = set(self.get_ancestry(ver_a))
850
def status_b(revision, text):
851
if revision in ancestors_a:
852
return 'killed-a', text
856
annotated_a = self.annotate(ver_a)
857
annotated_b = self.annotate(ver_b)
858
plain_a = [t for (a, t) in annotated_a]
859
plain_b = [t for (a, t) in annotated_b]
860
blocks = SequenceMatcher(None, plain_a, plain_b).get_matching_blocks()
863
for ai, bi, l in blocks:
864
# process all mismatched sections
865
# (last mismatched section is handled because blocks always
866
# includes a 0-length last block)
867
for revision, text in annotated_a[a_cur:ai]:
868
yield status_a(revision, text)
869
for revision, text in annotated_b[b_cur:bi]:
870
yield status_b(revision, text)
872
# and now the matched section
875
for text_a, text_b in zip(plain_a[ai:a_cur], plain_b[bi:b_cur]):
876
assert text_a == text_b
877
yield "unchanged", text_a
880
class _KnitComponentFile(object):
881
"""One of the files used to implement a knit database"""
883
def __init__(self, transport, filename, mode, file_mode=None):
884
self._transport = transport
885
self._filename = filename
887
self._file_mode=file_mode
889
def write_header(self):
890
if self._transport.append(self._filename, StringIO(self.HEADER),
891
mode=self._file_mode):
892
raise KnitCorrupt(self._filename, 'misaligned after writing header')
894
def check_header(self, fp):
896
if line != self.HEADER:
897
raise KnitHeaderError(badline=line)
900
"""Commit is a nop."""
903
return '%s(%s)' % (self.__class__.__name__, self._filename)
906
class _KnitIndex(_KnitComponentFile):
907
"""Manages knit index file.
909
The index is already kept in memory and read on startup, to enable
910
fast lookups of revision information. The cursor of the index
911
file is always pointing to the end, making it easy to append
914
_cache is a cache for fast mapping from version id to a Index
917
_history is a cache for fast mapping from indexes to version ids.
919
The index data format is dictionary compressed when it comes to
920
parent references; a index entry may only have parents that with a
921
lover index number. As a result, the index is topological sorted.
923
Duplicate entries may be written to the index for a single version id
924
if this is done then the latter one completely replaces the former:
925
this allows updates to correct version and parent information.
926
Note that the two entries may share the delta, and that successive
927
annotations and references MUST point to the first entry.
929
The index file on disc contains a header, followed by one line per knit
930
record. The same revision can be present in an index file more than once.
931
The first occurence gets assigned a sequence number starting from 0.
933
The format of a single line is
934
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
935
REVISION_ID is a utf8-encoded revision id
936
FLAGS is a comma separated list of flags about the record. Values include
937
no-eol, line-delta, fulltext.
938
BYTE_OFFSET is the ascii representation of the byte offset in the data file
939
that the the compressed data starts at.
940
LENGTH is the ascii representation of the length of the data file.
941
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
943
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
944
revision id already in the knit that is a parent of REVISION_ID.
945
The ' :' marker is the end of record marker.
948
when a write is interrupted to the index file, it will result in a line that
949
does not end in ' :'. If the ' :' is not present at the end of a line, or at
950
the end of the file, then the record that is missing it will be ignored by
953
When writing new records to the index file, the data is preceeded by '\n'
954
to ensure that records always start on new lines even if the last write was
955
interrupted. As a result its normal for the last line in the index to be
956
missing a trailing newline. One can be added with no harmful effects.
959
HEADER = "# bzr knit index 8\n"
961
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
962
# __slots__ = ['_cache', '_history', '_transport', '_filename']
964
def _cache_version(self, version_id, options, pos, size, parents):
965
"""Cache a version record in the history array and index cache.
967
This is inlined into __init__ for performance. KEEP IN SYNC.
968
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
971
# only want the _history index to reference the 1st index entry
973
if version_id not in self._cache:
974
index = len(self._history)
975
self._history.append(version_id)
977
index = self._cache[version_id][5]
978
self._cache[version_id] = (version_id,
985
def __init__(self, transport, filename, mode, create=False, file_mode=None):
986
_KnitComponentFile.__init__(self, transport, filename, mode, file_mode)
988
# position in _history is the 'official' index for a revision
989
# but the values may have come from a newer entry.
990
# so - wc -l of a knit index is != the number of uniqe names
993
pb = bzrlib.ui.ui_factory.nested_progress_bar()
998
pb.update('read knit index', count, total)
999
fp = self._transport.get(self._filename)
1000
self.check_header(fp)
1001
# readlines reads the whole file at once:
1002
# bad for transports like http, good for local disk
1003
# we save 60 ms doing this one change (
1004
# from calling readline each time to calling
1006
# probably what we want for nice behaviour on
1007
# http is a incremental readlines that yields, or
1008
# a check for local vs non local indexes,
1009
for l in fp.readlines():
1011
if len(rec) < 5 or rec[-1] != ':':
1013
# FIXME: in the future we should determine if its a
1014
# short write - and ignore it
1015
# or a different failure, and raise. RBC 20060407
1019
#pb.update('read knit index', count, total)
1020
# See self._parse_parents
1022
for value in rec[4:-1]:
1024
# uncompressed reference
1025
parents.append(value[1:])
1027
# this is 15/4000ms faster than isinstance,
1029
# this function is called thousands of times a
1030
# second so small variations add up.
1031
assert value.__class__ is str
1032
parents.append(self._history[int(value)])
1033
# end self._parse_parents
1034
# self._cache_version(rec[0],
1035
# rec[1].split(','),
1039
# --- self._cache_version
1040
# only want the _history index to reference the 1st
1041
# index entry for version_id
1043
if version_id not in self._cache:
1044
index = len(self._history)
1045
self._history.append(version_id)
1047
index = self._cache[version_id][5]
1048
self._cache[version_id] = (version_id,
1054
# --- self._cache_version
1055
except NoSuchFile, e:
1056
if mode != 'w' or not create:
1060
pb.update('read knit index', total, total)
1063
def _parse_parents(self, compressed_parents):
1064
"""convert a list of string parent values into version ids.
1066
ints are looked up in the index.
1067
.FOO values are ghosts and converted in to FOO.
1069
NOTE: the function is retained here for clarity, and for possible
1070
use in partial index reads. However bulk processing now has
1071
it inlined in __init__ for inner-loop optimisation.
1074
for value in compressed_parents:
1075
if value[-1] == '.':
1076
# uncompressed reference
1077
result.append(value[1:])
1079
# this is 15/4000ms faster than isinstance,
1080
# this function is called thousands of times a
1081
# second so small variations add up.
1082
assert value.__class__ is str
1083
result.append(self._history[int(value)])
1086
def get_graph(self):
1088
for version_id, index in self._cache.iteritems():
1089
graph.append((version_id, index[4]))
1092
def get_ancestry(self, versions):
1093
"""See VersionedFile.get_ancestry."""
1094
# get a graph of all the mentioned versions:
1096
pending = set(versions)
1098
version = pending.pop()
1099
parents = self._cache[version][4]
1100
# got the parents ok
1102
parents = [parent for parent in parents if parent in self._cache]
1103
for parent in parents:
1104
# if not completed and not a ghost
1105
if parent not in graph:
1107
graph[version] = parents
1108
return topo_sort(graph.items())
1110
def get_ancestry_with_ghosts(self, versions):
1111
"""See VersionedFile.get_ancestry_with_ghosts."""
1112
# get a graph of all the mentioned versions:
1114
pending = set(versions)
1116
version = pending.pop()
1118
parents = self._cache[version][4]
1124
# got the parents ok
1125
for parent in parents:
1126
if parent not in graph:
1128
graph[version] = parents
1129
return topo_sort(graph.items())
1131
def num_versions(self):
1132
return len(self._history)
1134
__len__ = num_versions
1136
def get_versions(self):
1137
return self._history
1139
def idx_to_name(self, idx):
1140
return self._history[idx]
1142
def lookup(self, version_id):
1143
assert version_id in self._cache
1144
return self._cache[version_id][5]
1146
def _version_list_to_index(self, versions):
1148
for version in versions:
1149
if version in self._cache:
1150
# -- inlined lookup() --
1151
result_list.append(str(self._cache[version][5]))
1152
# -- end lookup () --
1154
result_list.append('.' + version.encode('utf-8'))
1155
return ' '.join(result_list)
1157
def add_version(self, version_id, options, pos, size, parents):
1158
"""Add a version record to the index."""
1159
self.add_versions(((version_id, options, pos, size, parents),))
1161
def add_versions(self, versions):
1162
"""Add multiple versions to the index.
1164
:param versions: a list of tuples:
1165
(version_id, options, pos, size, parents).
1168
for version_id, options, pos, size, parents in versions:
1169
line = "\n%s %s %s %s %s :" % (version_id.encode('utf-8'),
1173
self._version_list_to_index(parents))
1174
assert isinstance(line, str), \
1175
'content must be utf-8 encoded: %r' % (line,)
1177
self._transport.append(self._filename, StringIO(''.join(lines)))
1178
# cache after writing, so that a failed write leads to missing cache
1179
# entries not extra ones. XXX TODO: RBC 20060502 in the event of a
1180
# failure, reload the index or flush it or some such, to prevent
1181
# writing records that did complete twice.
1182
for version_id, options, pos, size, parents in versions:
1183
self._cache_version(version_id, options, pos, size, parents)
1185
def has_version(self, version_id):
1186
"""True if the version is in the index."""
1187
return self._cache.has_key(version_id)
1189
def get_position(self, version_id):
1190
"""Return data position and size of specified version."""
1191
return (self._cache[version_id][2], \
1192
self._cache[version_id][3])
1194
def get_method(self, version_id):
1195
"""Return compression method of specified version."""
1196
options = self._cache[version_id][1]
1197
if 'fulltext' in options:
1200
assert 'line-delta' in options
1203
def get_options(self, version_id):
1204
return self._cache[version_id][1]
1206
def get_parents(self, version_id):
1207
"""Return parents of specified version ignoring ghosts."""
1208
return [parent for parent in self._cache[version_id][4]
1209
if parent in self._cache]
1211
def get_parents_with_ghosts(self, version_id):
1212
"""Return parents of specified version wth ghosts."""
1213
return self._cache[version_id][4]
1215
def check_versions_present(self, version_ids):
1216
"""Check that all specified versions are present."""
1217
version_ids = set(version_ids)
1218
for version_id in list(version_ids):
1219
if version_id in self._cache:
1220
version_ids.remove(version_id)
1222
raise RevisionNotPresent(list(version_ids)[0], self.filename)
1225
class _KnitData(_KnitComponentFile):
1226
"""Contents of the knit data file"""
1228
HEADER = "# bzr knit data 8\n"
1230
def __init__(self, transport, filename, mode, create=False, file_mode=None):
1231
_KnitComponentFile.__init__(self, transport, filename, mode)
1233
self._checked = False
1235
self._transport.put(self._filename, StringIO(''), mode=file_mode)
1238
def clear_cache(self):
1239
"""Clear the record cache."""
1242
def _open_file(self):
1243
if self._file is None:
1245
self._file = self._transport.get(self._filename)
1250
def _record_to_data(self, version_id, digest, lines):
1251
"""Convert version_id, digest, lines into a raw data block.
1253
:return: (len, a StringIO instance with the raw data ready to read.)
1256
data_file = GzipFile(None, mode='wb', fileobj=sio)
1257
data_file.writelines(chain(
1258
["version %s %d %s\n" % (version_id.encode('utf-8'),
1262
["end %s\n" % version_id.encode('utf-8')]))
1269
def add_raw_record(self, raw_data):
1270
"""Append a prepared record to the data file.
1272
:return: the offset in the data file raw_data was written.
1274
assert isinstance(raw_data, str), 'data must be plain bytes'
1275
return self._transport.append(self._filename, StringIO(raw_data))
1277
def add_record(self, version_id, digest, lines):
1278
"""Write new text record to disk. Returns the position in the
1279
file where it was written."""
1280
size, sio = self._record_to_data(version_id, digest, lines)
1282
self._records[version_id] = (digest, lines)
1284
start_pos = self._transport.append(self._filename, sio)
1285
return start_pos, size
1287
def _parse_record_header(self, version_id, raw_data):
1288
"""Parse a record header for consistency.
1290
:return: the header and the decompressor stream.
1291
as (stream, header_record)
1293
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
1294
rec = df.readline().split()
1296
raise KnitCorrupt(self._filename, 'unexpected number of elements in record header')
1297
if rec[1].decode('utf-8')!= version_id:
1298
raise KnitCorrupt(self._filename,
1299
'unexpected version, wanted %r, got %r' % (
1300
version_id, rec[1]))
1303
def _parse_record(self, version_id, data):
1305
# 4168 calls in 2880 217 internal
1306
# 4168 calls to _parse_record_header in 2121
1307
# 4168 calls to readlines in 330
1308
df, rec = self._parse_record_header(version_id, data)
1309
record_contents = df.readlines()
1310
l = record_contents.pop()
1311
assert len(record_contents) == int(rec[2])
1312
if l.decode('utf-8') != 'end %s\n' % version_id:
1313
raise KnitCorrupt(self._filename, 'unexpected version end line %r, wanted %r'
1316
return record_contents, rec[3]
1318
def read_records_iter_raw(self, records):
1319
"""Read text records from data file and yield raw data.
1321
This unpacks enough of the text record to validate the id is
1322
as expected but thats all.
1324
It will actively recompress currently cached records on the
1325
basis that that is cheaper than I/O activity.
1328
for version_id, pos, size in records:
1329
if version_id not in self._records:
1330
needed_records.append((version_id, pos, size))
1332
# setup an iterator of the external records:
1333
# uses readv so nice and fast we hope.
1334
if len(needed_records):
1335
# grab the disk data needed.
1336
raw_records = self._transport.readv(self._filename,
1337
[(pos, size) for version_id, pos, size in needed_records])
1339
for version_id, pos, size in records:
1340
if version_id in self._records:
1341
# compress a new version
1342
size, sio = self._record_to_data(version_id,
1343
self._records[version_id][0],
1344
self._records[version_id][1])
1345
yield version_id, sio.getvalue()
1347
pos, data = raw_records.next()
1348
# validate the header
1349
df, rec = self._parse_record_header(version_id, data)
1351
yield version_id, data
1354
def read_records_iter(self, records):
1355
"""Read text records from data file and yield result.
1357
Each passed record is a tuple of (version_id, pos, len) and
1358
will be read in the given order. Yields (version_id,
1362
# 60890 calls for 4168 extractions in 5045, 683 internal.
1363
# 4168 calls to readv in 1411
1364
# 4168 calls to parse_record in 2880
1367
for version_id, pos, size in records:
1368
if version_id not in self._records:
1369
needed_records.append((version_id, pos, size))
1371
if len(needed_records):
1372
# We take it that the transport optimizes the fetching as good
1373
# as possible (ie, reads continous ranges.)
1374
response = self._transport.readv(self._filename,
1375
[(pos, size) for version_id, pos, size in needed_records])
1377
for (record_id, pos, size), (pos, data) in izip(iter(needed_records), response):
1378
content, digest = self._parse_record(record_id, data)
1379
self._records[record_id] = (digest, content)
1381
for version_id, pos, size in records:
1382
yield version_id, list(self._records[version_id][1]), self._records[version_id][0]
1384
def read_records(self, records):
1385
"""Read records into a dictionary."""
1387
for record_id, content, digest in self.read_records_iter(records):
1388
components[record_id] = (content, digest)
1392
class InterKnit(InterVersionedFile):
1393
"""Optimised code paths for knit to knit operations."""
1395
_matching_file_from_factory = KnitVersionedFile
1396
_matching_file_to_factory = KnitVersionedFile
1399
def is_compatible(source, target):
1400
"""Be compatible with knits. """
1402
return (isinstance(source, KnitVersionedFile) and
1403
isinstance(target, KnitVersionedFile))
1404
except AttributeError:
1407
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
1408
"""See InterVersionedFile.join."""
1409
assert isinstance(self.source, KnitVersionedFile)
1410
assert isinstance(self.target, KnitVersionedFile)
1412
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
1417
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1419
version_ids = list(version_ids)
1420
if None in version_ids:
1421
version_ids.remove(None)
1423
self.source_ancestry = set(self.source.get_ancestry(version_ids))
1424
this_versions = set(self.target._index.get_versions())
1425
needed_versions = self.source_ancestry - this_versions
1426
cross_check_versions = self.source_ancestry.intersection(this_versions)
1427
mismatched_versions = set()
1428
for version in cross_check_versions:
1429
# scan to include needed parents.
1430
n1 = set(self.target.get_parents_with_ghosts(version))
1431
n2 = set(self.source.get_parents_with_ghosts(version))
1433
# FIXME TEST this check for cycles being introduced works
1434
# the logic is we have a cycle if in our graph we are an
1435
# ancestor of any of the n2 revisions.
1441
parent_ancestors = self.source.get_ancestry(parent)
1442
if version in parent_ancestors:
1443
raise errors.GraphCycleError([parent, version])
1444
# ensure this parent will be available later.
1445
new_parents = n2.difference(n1)
1446
needed_versions.update(new_parents.difference(this_versions))
1447
mismatched_versions.add(version)
1449
if not needed_versions and not mismatched_versions:
1451
full_list = topo_sort(self.source.get_graph())
1453
version_list = [i for i in full_list if (not self.target.has_version(i)
1454
and i in needed_versions)]
1458
copy_queue_records = []
1460
for version_id in version_list:
1461
options = self.source._index.get_options(version_id)
1462
parents = self.source._index.get_parents_with_ghosts(version_id)
1463
# check that its will be a consistent copy:
1464
for parent in parents:
1465
# if source has the parent, we must :
1466
# * already have it or
1467
# * have it scheduled already
1468
# otherwise we dont care
1469
assert (self.target.has_version(parent) or
1470
parent in copy_set or
1471
not self.source.has_version(parent))
1472
data_pos, data_size = self.source._index.get_position(version_id)
1473
copy_queue_records.append((version_id, data_pos, data_size))
1474
copy_queue.append((version_id, options, parents))
1475
copy_set.add(version_id)
1477
# data suck the join:
1479
total = len(version_list)
1482
for (version_id, raw_data), \
1483
(version_id2, options, parents) in \
1484
izip(self.source._data.read_records_iter_raw(copy_queue_records),
1486
assert version_id == version_id2, 'logic error, inconsistent results'
1488
pb.update("Joining knit", count, total)
1489
raw_records.append((version_id, options, parents, len(raw_data)))
1490
raw_datum.append(raw_data)
1491
self.target._add_raw_records(raw_records, ''.join(raw_datum))
1493
for version in mismatched_versions:
1494
# FIXME RBC 20060309 is this needed?
1495
n1 = set(self.target.get_parents_with_ghosts(version))
1496
n2 = set(self.source.get_parents_with_ghosts(version))
1497
# write a combined record to our history preserving the current
1498
# parents as first in the list
1499
new_parents = self.target.get_parents_with_ghosts(version) + list(n2.difference(n1))
1500
self.target.fix_parents(version, new_parents)
1506
InterVersionedFile.register_optimiser(InterKnit)
1509
class WeaveToKnit(InterVersionedFile):
1510
"""Optimised code paths for weave to knit operations."""
1512
_matching_file_from_factory = bzrlib.weave.WeaveFile
1513
_matching_file_to_factory = KnitVersionedFile
1516
def is_compatible(source, target):
1517
"""Be compatible with weaves to knits."""
1519
return (isinstance(source, bzrlib.weave.Weave) and
1520
isinstance(target, KnitVersionedFile))
1521
except AttributeError:
1524
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
1525
"""See InterVersionedFile.join."""
1526
assert isinstance(self.source, bzrlib.weave.Weave)
1527
assert isinstance(self.target, KnitVersionedFile)
1529
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
1534
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1536
version_ids = list(version_ids)
1538
self.source_ancestry = set(self.source.get_ancestry(version_ids))
1539
this_versions = set(self.target._index.get_versions())
1540
needed_versions = self.source_ancestry - this_versions
1541
cross_check_versions = self.source_ancestry.intersection(this_versions)
1542
mismatched_versions = set()
1543
for version in cross_check_versions:
1544
# scan to include needed parents.
1545
n1 = set(self.target.get_parents_with_ghosts(version))
1546
n2 = set(self.source.get_parents(version))
1547
# if all of n2's parents are in n1, then its fine.
1548
if n2.difference(n1):
1549
# FIXME TEST this check for cycles being introduced works
1550
# the logic is we have a cycle if in our graph we are an
1551
# ancestor of any of the n2 revisions.
1557
parent_ancestors = self.source.get_ancestry(parent)
1558
if version in parent_ancestors:
1559
raise errors.GraphCycleError([parent, version])
1560
# ensure this parent will be available later.
1561
new_parents = n2.difference(n1)
1562
needed_versions.update(new_parents.difference(this_versions))
1563
mismatched_versions.add(version)
1565
if not needed_versions and not mismatched_versions:
1567
full_list = topo_sort(self.source.get_graph())
1569
version_list = [i for i in full_list if (not self.target.has_version(i)
1570
and i in needed_versions)]
1574
total = len(version_list)
1575
for version_id in version_list:
1576
pb.update("Converting to knit", count, total)
1577
parents = self.source.get_parents(version_id)
1578
# check that its will be a consistent copy:
1579
for parent in parents:
1580
# if source has the parent, we must already have it
1581
assert (self.target.has_version(parent))
1582
self.target.add_lines(
1583
version_id, parents, self.source.get_lines(version_id))
1586
for version in mismatched_versions:
1587
# FIXME RBC 20060309 is this needed?
1588
n1 = set(self.target.get_parents_with_ghosts(version))
1589
n2 = set(self.source.get_parents(version))
1590
# write a combined record to our history preserving the current
1591
# parents as first in the list
1592
new_parents = self.target.get_parents_with_ghosts(version) + list(n2.difference(n1))
1593
self.target.fix_parents(version, new_parents)
1599
InterVersionedFile.register_optimiser(WeaveToKnit)
1602
class SequenceMatcher(difflib.SequenceMatcher):
1603
"""Knit tuned sequence matcher.
1605
This is based on profiling of difflib which indicated some improvements
1606
for our usage pattern.
1609
def find_longest_match(self, alo, ahi, blo, bhi):
1610
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
1612
If isjunk is not defined:
1614
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
1615
alo <= i <= i+k <= ahi
1616
blo <= j <= j+k <= bhi
1617
and for all (i',j',k') meeting those conditions,
1620
and if i == i', j <= j'
1622
In other words, of all maximal matching blocks, return one that
1623
starts earliest in a, and of all those maximal matching blocks that
1624
start earliest in a, return the one that starts earliest in b.
1626
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
1627
>>> s.find_longest_match(0, 5, 0, 9)
1630
If isjunk is defined, first the longest matching block is
1631
determined as above, but with the additional restriction that no
1632
junk element appears in the block. Then that block is extended as
1633
far as possible by matching (only) junk elements on both sides. So
1634
the resulting block never matches on junk except as identical junk
1635
happens to be adjacent to an "interesting" match.
1637
Here's the same example as before, but considering blanks to be
1638
junk. That prevents " abcd" from matching the " abcd" at the tail
1639
end of the second sequence directly. Instead only the "abcd" can
1640
match, and matches the leftmost "abcd" in the second sequence:
1642
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
1643
>>> s.find_longest_match(0, 5, 0, 9)
1646
If no blocks match, return (alo, blo, 0).
1648
>>> s = SequenceMatcher(None, "ab", "c")
1649
>>> s.find_longest_match(0, 2, 0, 1)
1653
# CAUTION: stripping common prefix or suffix would be incorrect.
1657
# Longest matching block is "ab", but if common prefix is
1658
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
1659
# strip, so ends up claiming that ab is changed to acab by
1660
# inserting "ca" in the middle. That's minimal but unintuitive:
1661
# "it's obvious" that someone inserted "ac" at the front.
1662
# Windiff ends up at the same place as diff, but by pairing up
1663
# the unique 'b's and then matching the first two 'a's.
1665
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
1666
besti, bestj, bestsize = alo, blo, 0
1667
# find longest junk-free match
1668
# during an iteration of the loop, j2len[j] = length of longest
1669
# junk-free match ending with a[i-1] and b[j]
1673
for i in xrange(alo, ahi):
1674
# look at all instances of a[i] in b; note that because
1675
# b2j has no junk keys, the loop is skipped if a[i] is junk
1676
j2lenget = j2len.get
1679
# changing b2j.get(a[i], nothing) to a try:Keyerror pair produced the
1680
# following improvement
1681
# 704 0 4650.5320 2620.7410 bzrlib.knit:1336(find_longest_match)
1682
# +326674 0 1655.1210 1655.1210 +<method 'get' of 'dict' objects>
1683
# +76519 0 374.6700 374.6700 +<method 'has_key' of 'dict' objects>
1685
# 704 0 3733.2820 2209.6520 bzrlib.knit:1336(find_longest_match)
1686
# +211400 0 1147.3520 1147.3520 +<method 'get' of 'dict' objects>
1687
# +76519 0 376.2780 376.2780 +<method 'has_key' of 'dict' objects>
1699
k = newj2len[j] = 1 + j2lenget(-1 + j, 0)
1701
besti, bestj, bestsize = 1 + i-k, 1 + j-k, k
1704
# Extend the best by non-junk elements on each end. In particular,
1705
# "popular" non-junk elements aren't in b2j, which greatly speeds
1706
# the inner loop above, but also means "the best" match so far
1707
# doesn't contain any junk *or* popular non-junk elements.
1708
while besti > alo and bestj > blo and \
1709
not isbjunk(b[bestj-1]) and \
1710
a[besti-1] == b[bestj-1]:
1711
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
1712
while besti+bestsize < ahi and bestj+bestsize < bhi and \
1713
not isbjunk(b[bestj+bestsize]) and \
1714
a[besti+bestsize] == b[bestj+bestsize]:
1717
# Now that we have a wholly interesting match (albeit possibly
1718
# empty!), we may as well suck up the matching junk on each
1719
# side of it too. Can't think of a good reason not to, and it
1720
# saves post-processing the (possibly considerable) expense of
1721
# figuring out what to do with it. In the case of an empty
1722
# interesting match, this is clearly the right thing to do,
1723
# because no other kind of match is possible in the regions.
1724
while besti > alo and bestj > blo and \
1725
isbjunk(b[bestj-1]) and \
1726
a[besti-1] == b[bestj-1]:
1727
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
1728
while besti+bestsize < ahi and bestj+bestsize < bhi and \
1729
isbjunk(b[bestj+bestsize]) and \
1730
a[besti+bestsize] == b[bestj+bestsize]:
1731
bestsize = bestsize + 1
1733
return besti, bestj, bestsize