387
1067
fulltext_size = None
388
delta_parents = first_parents
389
1068
for count in xrange(self._max_delta_chain):
390
parent = delta_parents[0]
391
method = self._index.get_method(parent)
392
pos, size = self._index.get_position(parent)
393
if method == 'fulltext':
1070
# Note that this only looks in the index of this particular
1071
# KnitVersionedFiles, not in the fallbacks. This ensures that
1072
# we won't store a delta spanning physical repository
1074
build_details = self._index.get_build_details([parent])
1075
parent_details = build_details[parent]
1076
except (RevisionNotPresent, KeyError), e:
1077
# Some basis is not locally present: always fulltext
1079
index_memo, compression_parent, _, _ = parent_details
1080
_, _, size = index_memo
1081
if compression_parent is None:
394
1082
fulltext_size = size
396
1084
delta_size += size
397
delta_parents = self._index.get_parents(parent)
1085
# We don't explicitly check for presence because this is in an
1086
# inner loop, and if it's missing it'll fail anyhow.
1087
parent = compression_parent
399
1089
# We couldn't find a fulltext, so we must create a new one
1091
# Simple heuristic - if the total I/O wold be greater as a delta than
1092
# the originally installed fulltext, we create a new fulltext.
402
1093
return fulltext_size > delta_size
404
def _add_delta(self, version_id, parents, delta_parent, sha1, noeol, delta):
405
"""See VersionedFile._add_delta()."""
406
self._check_add(version_id, []) # should we check the lines ?
407
self._check_versions_present(parents)
411
for parent in parents:
412
if not self.has_version(parent):
413
ghosts.append(parent)
415
present_parents.append(parent)
417
if delta_parent is None:
418
# reconstitute as full text.
419
assert len(delta) == 1 or len(delta) == 0
421
assert delta[0][0] == 0
422
assert delta[0][1] == 0, delta[0][1]
423
return super(KnitVersionedFile, self)._add_delta(version_id,
434
options.append('no-eol')
436
if delta_parent is not None:
437
# determine the current delta chain length.
438
# To speed the extract of texts the delta chain is limited
439
# to a fixed number of deltas. This should minimize both
440
# I/O and the time spend applying deltas.
441
# The window was changed to a maximum of 200 deltas, but also added
442
# was a check that the total compressed size of the deltas is
443
# smaller than the compressed size of the fulltext.
444
if not self._check_should_delta([delta_parent]):
445
# We don't want a delta here, just do a normal insertion.
446
return super(KnitVersionedFile, self)._add_delta(version_id,
453
options.append('line-delta')
454
store_lines = self.factory.lower_line_delta(delta)
456
where, size = self._data.add_record(version_id, digest, store_lines)
457
self._index.add_version(version_id, options, where, size, parents)
459
def _add_raw_records(self, records, data):
460
"""Add all the records 'records' with data pre-joined in 'data'.
462
:param records: A list of tuples(version_id, options, parents, size).
463
:param data: The data for the records. When it is written, the records
464
are adjusted to have pos pointing into data by the sum of
465
the preceding records sizes.
468
pos = self._data.add_raw_record(data)
471
for (version_id, options, parents, size) in records:
472
index_entries.append((version_id, options, pos+offset,
474
if self._data._do_cache:
475
self._data._cache[version_id] = data[offset:offset+size]
477
self._index.add_versions(index_entries)
479
def enable_cache(self):
480
"""Start caching data for this knit"""
481
self._data.enable_cache()
483
def clear_cache(self):
484
"""Clear the data cache only."""
485
self._data.clear_cache()
487
def copy_to(self, name, transport):
488
"""See VersionedFile.copy_to()."""
489
# copy the current index to a temp index to avoid racing with local
491
transport.put_file_non_atomic(name + INDEX_SUFFIX + '.tmp',
492
self.transport.get(self._index._filename))
494
f = self._data._open_file()
496
transport.put_file(name + DATA_SUFFIX, f)
499
# move the copied index into place
500
transport.move(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
502
def create_empty(self, name, transport, mode=None):
503
return KnitVersionedFile(name, transport, factory=self.factory,
504
delta=self.delta, create=True)
506
def _fix_parents(self, version_id, new_parents):
507
"""Fix the parents list for version.
509
This is done by appending a new version to the index
510
with identical data except for the parents list.
511
the parents list must be a superset of the current
514
current_values = self._index._cache[version_id]
515
assert set(current_values[4]).difference(set(new_parents)) == set()
516
self._index.add_version(version_id,
522
def get_delta(self, version_id):
523
"""Get a delta for constructing version from some other version."""
524
version_id = osutils.safe_revision_id(version_id)
525
self.check_not_reserved_id(version_id)
526
if not self.has_version(version_id):
527
raise RevisionNotPresent(version_id, self.filename)
529
parents = self.get_parents(version_id)
534
data_pos, data_size = self._index.get_position(version_id)
535
data, sha1 = self._data.read_records(((version_id, data_pos, data_size),))[version_id]
536
noeol = 'no-eol' in self._index.get_options(version_id)
537
if 'fulltext' == self._index.get_method(version_id):
538
new_content = self.factory.parse_fulltext(data, version_id)
539
if parent is not None:
540
reference_content = self._get_content(parent)
541
old_texts = reference_content.text()
544
new_texts = new_content.text()
545
delta_seq = KnitSequenceMatcher(None, old_texts, new_texts)
546
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
548
delta = self.factory.parse_line_delta(data, version_id)
549
return parent, sha1, noeol, delta
551
def get_graph_with_ghosts(self):
552
"""See VersionedFile.get_graph_with_ghosts()."""
553
graph_items = self._index.get_graph()
554
return dict(graph_items)
556
def get_sha1(self, version_id):
557
"""See VersionedFile.get_sha1()."""
558
version_id = osutils.safe_revision_id(version_id)
559
record_map = self._get_record_map([version_id])
560
method, content, digest, next = record_map[version_id]
565
"""See VersionedFile.get_suffixes()."""
566
return [DATA_SUFFIX, INDEX_SUFFIX]
568
def has_ghost(self, version_id):
569
"""True if there is a ghost reference in the file to version_id."""
570
version_id = osutils.safe_revision_id(version_id)
572
if self.has_version(version_id):
574
# optimisable if needed by memoising the _ghosts set.
575
items = self._index.get_graph()
576
for node, parents in items:
577
for parent in parents:
578
if parent not in self._index._cache:
579
if parent == version_id:
584
"""See VersionedFile.versions."""
585
return self._index.get_versions()
587
def has_version(self, version_id):
588
"""See VersionedFile.has_version."""
589
version_id = osutils.safe_revision_id(version_id)
590
return self._index.has_version(version_id)
592
__contains__ = has_version
594
def _merge_annotations(self, content, parents, parent_texts={},
595
delta=None, annotated=None):
596
"""Merge annotations for content. This is done by comparing
597
the annotations based on changed to the text.
601
for parent_id in parents:
602
merge_content = self._get_content(parent_id, parent_texts)
603
seq = patiencediff.PatienceSequenceMatcher(
604
None, merge_content.text(), content.text())
605
if delta_seq is None:
606
# setup a delta seq to reuse.
608
for i, j, n in seq.get_matching_blocks():
611
# this appears to copy (origin, text) pairs across to the new
612
# content for any line that matches the last-checked parent.
613
# FIXME: save the sequence control data for delta compression
614
# against the most relevant parent rather than rediffing.
615
content._lines[j:j+n] = merge_content._lines[i:i+n]
618
reference_content = self._get_content(parents[0], parent_texts)
619
new_texts = content.text()
620
old_texts = reference_content.text()
621
delta_seq = patiencediff.PatienceSequenceMatcher(
622
None, old_texts, new_texts)
623
return self._make_line_delta(delta_seq, content)
625
def _make_line_delta(self, delta_seq, new_content):
626
"""Generate a line delta from delta_seq and new_content."""
628
for op in delta_seq.get_opcodes():
631
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
634
def _get_components_positions(self, version_ids):
635
"""Produce a map of position data for the components of versions.
1095
def _build_details_to_components(self, build_details):
1096
"""Convert a build_details tuple to a position tuple."""
1097
# record_details, access_memo, compression_parent
1098
return build_details[3], build_details[0], build_details[1]
1100
def _get_components_positions(self, keys, allow_missing=False):
1101
"""Produce a map of position data for the components of keys.
637
1103
This data is intended to be used for retrieving the knit records.
639
A dict of version_id to (method, data_pos, data_size, next) is
1105
A dict of key to (record_details, index_memo, next, parents) is
641
1107
method is the way referenced data should be applied.
642
data_pos is the position of the data in the knit.
643
data_size is the size of the data in the knit.
1108
index_memo is the handle to pass to the data access to actually get the
644
1110
next is the build-parent of the version, or None for fulltexts.
1111
parents is the version_ids of the parents of this version
1113
:param allow_missing: If True do not raise an error on a missing component,
646
1116
component_data = {}
647
for version_id in version_ids:
650
while cursor is not None and cursor not in component_data:
651
method = self._index.get_method(cursor)
652
if method == 'fulltext':
655
next = self.get_parents(cursor)[0]
656
data_pos, data_size = self._index.get_position(cursor)
657
component_data[cursor] = (method, data_pos, data_size, next)
1117
pending_components = keys
1118
while pending_components:
1119
build_details = self._index.get_build_details(pending_components)
1120
current_components = set(pending_components)
1121
pending_components = set()
1122
for key, details in build_details.iteritems():
1123
(index_memo, compression_parent, parents,
1124
record_details) = details
1125
method = record_details[0]
1126
if compression_parent is not None:
1127
pending_components.add(compression_parent)
1128
component_data[key] = self._build_details_to_components(details)
1129
missing = current_components.difference(build_details)
1130
if missing and not allow_missing:
1131
raise errors.RevisionNotPresent(missing.pop(), self)
659
1132
return component_data
661
def _get_content(self, version_id, parent_texts={}):
1134
def _get_content(self, key, parent_texts={}):
662
1135
"""Returns a content object that makes up the specified
664
if not self.has_version(version_id):
665
raise RevisionNotPresent(version_id, self.filename)
667
cached_version = parent_texts.get(version_id, None)
1137
cached_version = parent_texts.get(key, None)
668
1138
if cached_version is not None:
1139
# Ensure the cache dict is valid.
1140
if not self.get_parent_map([key]):
1141
raise RevisionNotPresent(key, self)
669
1142
return cached_version
671
text_map, contents_map = self._get_content_maps([version_id])
672
return contents_map[version_id]
674
def _check_versions_present(self, version_ids):
675
"""Check that all specified versions are present."""
676
self._index.check_versions_present(version_ids)
678
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts):
679
"""See VersionedFile.add_lines_with_ghosts()."""
680
self._check_add(version_id, lines)
681
return self._add(version_id, lines[:], parents, self.delta, parent_texts)
683
def _add_lines(self, version_id, parents, lines, parent_texts):
684
"""See VersionedFile.add_lines."""
685
self._check_add(version_id, lines)
686
self._check_versions_present(parents)
687
return self._add(version_id, lines[:], parents, self.delta, parent_texts)
689
def _check_add(self, version_id, lines):
690
"""check that version_id and lines are safe to add."""
691
assert self.writable, "knit is not opened for write"
692
### FIXME escape. RBC 20060228
693
if contains_whitespace(version_id):
694
raise InvalidRevisionId(version_id, self.filename)
695
self.check_not_reserved_id(version_id)
696
if self.has_version(version_id):
697
raise RevisionAlreadyPresent(version_id, self.filename)
698
self._check_lines_not_unicode(lines)
699
self._check_lines_are_lines(lines)
701
def _add(self, version_id, lines, parents, delta, parent_texts):
702
"""Add a set of lines on top of version specified by parents.
704
If delta is true, compress the text as a line-delta against
707
Any versions not present will be converted into ghosts.
709
# 461 0 6546.0390 43.9100 bzrlib.knit:489(_add)
710
# +400 0 889.4890 418.9790 +bzrlib.knit:192(lower_fulltext)
711
# +461 0 1364.8070 108.8030 +bzrlib.knit:996(add_record)
712
# +461 0 193.3940 41.5720 +bzrlib.knit:898(add_version)
713
# +461 0 134.0590 18.3810 +bzrlib.osutils:361(sha_strings)
714
# +461 0 36.3420 15.4540 +bzrlib.knit:146(make)
715
# +1383 0 8.0370 8.0370 +<len>
716
# +61 0 13.5770 7.9190 +bzrlib.knit:199(lower_line_delta)
717
# +61 0 963.3470 7.8740 +bzrlib.knit:427(_get_content)
718
# +61 0 973.9950 5.2950 +bzrlib.knit:136(line_delta)
719
# +61 0 1918.1800 5.2640 +bzrlib.knit:359(_merge_annotations)
723
if parent_texts is None:
725
for parent in parents:
726
if not self.has_version(parent):
727
ghosts.append(parent)
729
present_parents.append(parent)
731
if delta and not len(present_parents):
734
digest = sha_strings(lines)
737
if lines[-1][-1] != '\n':
738
options.append('no-eol')
739
lines[-1] = lines[-1] + '\n'
741
if len(present_parents) and delta:
742
# To speed the extract of texts the delta chain is limited
743
# to a fixed number of deltas. This should minimize both
744
# I/O and the time spend applying deltas.
745
delta = self._check_should_delta(present_parents)
747
assert isinstance(version_id, str)
748
lines = self.factory.make(lines, version_id)
749
if delta or (self.factory.annotated and len(present_parents) > 0):
750
# Merge annotations from parent texts if so is needed.
751
delta_hunks = self._merge_annotations(lines, present_parents, parent_texts,
752
delta, self.factory.annotated)
755
options.append('line-delta')
756
store_lines = self.factory.lower_line_delta(delta_hunks)
758
options.append('fulltext')
759
store_lines = self.factory.lower_fulltext(lines)
761
where, size = self._data.add_record(version_id, digest, store_lines)
762
self._index.add_version(version_id, options, where, size, parents)
765
def check(self, progress_bar=None):
766
"""See VersionedFile.check()."""
768
def _clone_text(self, new_version_id, old_version_id, parents):
769
"""See VersionedFile.clone_text()."""
770
# FIXME RBC 20060228 make fast by only inserting an index with null
772
self.add_lines(new_version_id, parents, self.get_lines(old_version_id))
774
def get_lines(self, version_id):
775
"""See VersionedFile.get_lines()."""
776
return self.get_line_list([version_id])[0]
778
def _get_record_map(self, version_ids):
1143
generator = _VFContentMapGenerator(self, [key])
1144
return generator._get_content(key)
1146
def get_parent_map(self, keys):
1147
"""Get a map of the graph parents of keys.
1149
:param keys: The keys to look up parents for.
1150
:return: A mapping from keys to parents. Absent keys are absent from
1153
return self._get_parent_map_with_sources(keys)[0]
1155
def _get_parent_map_with_sources(self, keys):
1156
"""Get a map of the parents of keys.
1158
:param keys: The keys to look up parents for.
1159
:return: A tuple. The first element is a mapping from keys to parents.
1160
Absent keys are absent from the mapping. The second element is a
1161
list with the locations each key was found in. The first element
1162
is the in-this-knit parents, the second the first fallback source,
1166
sources = [self._index] + self._fallback_vfs
1169
for source in sources:
1172
new_result = source.get_parent_map(missing)
1173
source_results.append(new_result)
1174
result.update(new_result)
1175
missing.difference_update(set(new_result))
1176
return result, source_results
1178
def _get_record_map(self, keys, allow_missing=False):
779
1179
"""Produce a dictionary of knit records.
781
The keys are version_ids, the values are tuples of (method, content,
783
method is the way the content should be applied.
784
content is a KnitContent object.
785
digest is the SHA1 digest of this version id after all steps are done
786
next is the build-parent of the version, i.e. the leftmost ancestor.
787
If the method is fulltext, next will be None.
789
position_map = self._get_components_positions(version_ids)
790
# c = component_id, m = method, p = position, s = size, n = next
791
records = [(c, p, s) for c, (m, p, s, n) in position_map.iteritems()]
793
for component_id, content, digest in \
794
self._data.read_records_iter(records):
795
method, position, size, next = position_map[component_id]
796
record_map[component_id] = method, content, digest, next
800
def get_text(self, version_id):
801
"""See VersionedFile.get_text"""
802
return self.get_texts([version_id])[0]
804
def get_texts(self, version_ids):
805
return [''.join(l) for l in self.get_line_list(version_ids)]
807
def get_line_list(self, version_ids):
808
"""Return the texts of listed versions as a list of strings."""
809
version_ids = [osutils.safe_revision_id(v) for v in version_ids]
810
for version_id in version_ids:
811
self.check_not_reserved_id(version_id)
812
text_map, content_map = self._get_content_maps(version_ids)
813
return [text_map[v] for v in version_ids]
815
def _get_content_maps(self, version_ids):
816
"""Produce maps of text and KnitContents
1181
:return: {key:(record, record_details, digest, next)}
1183
data returned from read_records (a KnitContentobject)
1185
opaque information to pass to parse_record
1187
SHA1 digest of the full text after all steps are done
1189
build-parent of the version, i.e. the leftmost ancestor.
1190
Will be None if the record is not a delta.
1191
:param keys: The keys to build a map for
1192
:param allow_missing: If some records are missing, rather than
1193
error, just return the data that could be generated.
1195
raw_map = self._get_record_map_unparsed(keys,
1196
allow_missing=allow_missing)
1197
return self._raw_map_to_record_map(raw_map)
1199
def _raw_map_to_record_map(self, raw_map):
1200
"""Parse the contents of _get_record_map_unparsed.
1202
:return: see _get_record_map.
1206
data, record_details, next = raw_map[key]
1207
content, digest = self._parse_record(key[-1], data)
1208
result[key] = content, record_details, digest, next
1211
def _get_record_map_unparsed(self, keys, allow_missing=False):
1212
"""Get the raw data for reconstructing keys without parsing it.
1214
:return: A dict suitable for parsing via _raw_map_to_record_map.
1215
key-> raw_bytes, (method, noeol), compression_parent
1217
# This retries the whole request if anything fails. Potentially we
1218
# could be a bit more selective. We could track the keys whose records
1219
# we have successfully found, and then only request the new records
1220
# from there. However, _get_components_positions grabs the whole build
1221
# chain, which means we'll likely try to grab the same records again
1222
# anyway. Also, can the build chains change as part of a pack
1223
# operation? We wouldn't want to end up with a broken chain.
1226
position_map = self._get_components_positions(keys,
1227
allow_missing=allow_missing)
1228
# key = component_id, r = record_details, i_m = index_memo,
1230
records = [(key, i_m) for key, (r, i_m, n)
1231
in position_map.iteritems()]
1232
# Sort by the index memo, so that we request records from the
1233
# same pack file together, and in forward-sorted order
1234
records.sort(key=operator.itemgetter(1))
1236
for key, data in self._read_records_iter_unchecked(records):
1237
(record_details, index_memo, next) = position_map[key]
1238
raw_record_map[key] = data, record_details, next
1239
return raw_record_map
1240
except errors.RetryWithNewPacks, e:
1241
self._access.reload_or_raise(e)
1244
def _split_by_prefix(cls, keys):
1245
"""For the given keys, split them up based on their prefix.
1247
To keep memory pressure somewhat under control, split the
1248
requests back into per-file-id requests, otherwise "bzr co"
1249
extracts the full tree into memory before writing it to disk.
1250
This should be revisited if _get_content_maps() can ever cross
1253
The keys for a given file_id are kept in the same relative order.
1254
Ordering between file_ids is not, though prefix_order will return the
1255
order that the key was first seen.
1257
:param keys: An iterable of key tuples
1258
:return: (split_map, prefix_order)
1259
split_map A dictionary mapping prefix => keys
1260
prefix_order The order that we saw the various prefixes
1262
split_by_prefix = {}
1270
if prefix in split_by_prefix:
1271
split_by_prefix[prefix].append(key)
1273
split_by_prefix[prefix] = [key]
1274
prefix_order.append(prefix)
1275
return split_by_prefix, prefix_order
1277
def _group_keys_for_io(self, keys, non_local_keys, positions,
1278
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
1279
"""For the given keys, group them into 'best-sized' requests.
1281
The idea is to avoid making 1 request per file, but to never try to
1282
unpack an entire 1.5GB source tree in a single pass. Also when
1283
possible, we should try to group requests to the same pack file
1286
:return: list of (keys, non_local) tuples that indicate what keys
1287
should be fetched next.
1289
# TODO: Ideally we would group on 2 factors. We want to extract texts
1290
# from the same pack file together, and we want to extract all
1291
# the texts for a given build-chain together. Ultimately it
1292
# probably needs a better global view.
1293
total_keys = len(keys)
1294
prefix_split_keys, prefix_order = self._split_by_prefix(keys)
1295
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
1297
cur_non_local = set()
1301
for prefix in prefix_order:
1302
keys = prefix_split_keys[prefix]
1303
non_local = prefix_split_non_local_keys.get(prefix, [])
1305
this_size = self._index._get_total_build_size(keys, positions)
1306
cur_size += this_size
1307
cur_keys.extend(keys)
1308
cur_non_local.update(non_local)
1309
if cur_size > _min_buffer_size:
1310
result.append((cur_keys, cur_non_local))
1311
sizes.append(cur_size)
1313
cur_non_local = set()
1316
result.append((cur_keys, cur_non_local))
1317
sizes.append(cur_size)
1320
def get_record_stream(self, keys, ordering, include_delta_closure):
1321
"""Get a stream of records for keys.
1323
:param keys: The keys to include.
1324
:param ordering: Either 'unordered' or 'topological'. A topologically
1325
sorted stream has compression parents strictly before their
1327
:param include_delta_closure: If True then the closure across any
1328
compression parents will be included (in the opaque data).
1329
:return: An iterator of ContentFactory objects, each of which is only
1330
valid until the iterator is advanced.
1332
# keys might be a generator
1336
if not self._index.has_graph:
1337
# Cannot sort when no graph has been stored.
1338
ordering = 'unordered'
1340
remaining_keys = keys
1343
keys = set(remaining_keys)
1344
for content_factory in self._get_remaining_record_stream(keys,
1345
ordering, include_delta_closure):
1346
remaining_keys.discard(content_factory.key)
1347
yield content_factory
1349
except errors.RetryWithNewPacks, e:
1350
self._access.reload_or_raise(e)
1352
def _get_remaining_record_stream(self, keys, ordering,
1353
include_delta_closure):
1354
"""This function is the 'retry' portion for get_record_stream."""
1355
if include_delta_closure:
1356
positions = self._get_components_positions(keys, allow_missing=True)
1358
build_details = self._index.get_build_details(keys)
1360
# (record_details, access_memo, compression_parent_key)
1361
positions = dict((key, self._build_details_to_components(details))
1362
for key, details in build_details.iteritems())
1363
absent_keys = keys.difference(set(positions))
1364
# There may be more absent keys : if we're missing the basis component
1365
# and are trying to include the delta closure.
1366
# XXX: We should not ever need to examine remote sources because we do
1367
# not permit deltas across versioned files boundaries.
1368
if include_delta_closure:
1369
needed_from_fallback = set()
1370
# Build up reconstructable_keys dict. key:True in this dict means
1371
# the key can be reconstructed.
1372
reconstructable_keys = {}
1376
chain = [key, positions[key][2]]
1378
needed_from_fallback.add(key)
1381
while chain[-1] is not None:
1382
if chain[-1] in reconstructable_keys:
1383
result = reconstructable_keys[chain[-1]]
1387
chain.append(positions[chain[-1]][2])
1389
# missing basis component
1390
needed_from_fallback.add(chain[-1])
1393
for chain_key in chain[:-1]:
1394
reconstructable_keys[chain_key] = result
1396
needed_from_fallback.add(key)
1397
# Double index lookups here : need a unified api ?
1398
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1399
if ordering in ('topological', 'groupcompress'):
1400
if ordering == 'topological':
1401
# Global topological sort
1402
present_keys = tsort.topo_sort(global_map)
1404
present_keys = sort_groupcompress(global_map)
1405
# Now group by source:
1407
current_source = None
1408
for key in present_keys:
1409
for parent_map in parent_maps:
1410
if key in parent_map:
1411
key_source = parent_map
1413
if current_source is not key_source:
1414
source_keys.append((key_source, []))
1415
current_source = key_source
1416
source_keys[-1][1].append(key)
1418
if ordering != 'unordered':
1419
raise AssertionError('valid values for ordering are:'
1420
' "unordered", "groupcompress" or "topological" not: %r'
1422
# Just group by source; remote sources first.
1425
for parent_map in reversed(parent_maps):
1426
source_keys.append((parent_map, []))
1427
for key in parent_map:
1428
present_keys.append(key)
1429
source_keys[-1][1].append(key)
1430
# We have been requested to return these records in an order that
1431
# suits us. So we ask the index to give us an optimally sorted
1433
for source, sub_keys in source_keys:
1434
if source is parent_maps[0]:
1435
# Only sort the keys for this VF
1436
self._index._sort_keys_by_io(sub_keys, positions)
1437
absent_keys = keys - set(global_map)
1438
for key in absent_keys:
1439
yield AbsentContentFactory(key)
1440
# restrict our view to the keys we can answer.
1441
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1442
# XXX: At that point we need to consider the impact of double reads by
1443
# utilising components multiple times.
1444
if include_delta_closure:
1445
# XXX: get_content_maps performs its own index queries; allow state
1447
non_local_keys = needed_from_fallback - absent_keys
1448
for keys, non_local_keys in self._group_keys_for_io(present_keys,
1451
generator = _VFContentMapGenerator(self, keys, non_local_keys,
1453
for record in generator.get_record_stream():
1456
for source, keys in source_keys:
1457
if source is parent_maps[0]:
1458
# this KnitVersionedFiles
1459
records = [(key, positions[key][1]) for key in keys]
1460
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1461
(record_details, index_memo, _) = positions[key]
1462
yield KnitContentFactory(key, global_map[key],
1463
record_details, sha1, raw_data, self._factory.annotated, None)
1465
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1466
for record in vf.get_record_stream(keys, ordering,
1467
include_delta_closure):
1470
def get_sha1s(self, keys):
1471
"""See VersionedFiles.get_sha1s()."""
1473
record_map = self._get_record_map(missing, allow_missing=True)
1475
for key, details in record_map.iteritems():
1476
if key not in missing:
1478
# record entry 2 is the 'digest'.
1479
result[key] = details[2]
1480
missing.difference_update(set(result))
1481
for source in self._fallback_vfs:
1484
new_result = source.get_sha1s(missing)
1485
result.update(new_result)
1486
missing.difference_update(set(new_result))
1489
def insert_record_stream(self, stream):
1490
"""Insert a record stream into this container.
1492
:param stream: A stream of records to insert.
1494
:seealso VersionedFiles.get_record_stream:
1496
def get_adapter(adapter_key):
1498
return adapters[adapter_key]
1500
adapter_factory = adapter_registry.get(adapter_key)
1501
adapter = adapter_factory(self)
1502
adapters[adapter_key] = adapter
1505
if self._factory.annotated:
1506
# self is annotated, we need annotated knits to use directly.
1507
annotated = "annotated-"
1510
# self is not annotated, but we can strip annotations cheaply.
1512
convertibles = set(["knit-annotated-ft-gz"])
1513
if self._max_delta_chain:
1514
delta_types.add("knit-annotated-delta-gz")
1515
convertibles.add("knit-annotated-delta-gz")
1516
# The set of types we can cheaply adapt without needing basis texts.
1517
native_types = set()
1518
if self._max_delta_chain:
1519
native_types.add("knit-%sdelta-gz" % annotated)
1520
delta_types.add("knit-%sdelta-gz" % annotated)
1521
native_types.add("knit-%sft-gz" % annotated)
1522
knit_types = native_types.union(convertibles)
1524
# Buffer all index entries that we can't add immediately because their
1525
# basis parent is missing. We don't buffer all because generating
1526
# annotations may require access to some of the new records. However we
1527
# can't generate annotations from new deltas until their basis parent
1528
# is present anyway, so we get away with not needing an index that
1529
# includes the new keys.
1531
# See <http://launchpad.net/bugs/300177> about ordering of compression
1532
# parents in the records - to be conservative, we insist that all
1533
# parents must be present to avoid expanding to a fulltext.
1535
# key = basis_parent, value = index entry to add
1536
buffered_index_entries = {}
1537
for record in stream:
1539
parents = record.parents
1540
if record.storage_kind in delta_types:
1541
# TODO: eventually the record itself should track
1542
# compression_parent
1543
compression_parent = parents[0]
1545
compression_parent = None
1546
# Raise an error when a record is missing.
1547
if record.storage_kind == 'absent':
1548
raise RevisionNotPresent([record.key], self)
1549
elif ((record.storage_kind in knit_types)
1550
and (compression_parent is None
1551
or not self._fallback_vfs
1552
or self._index.has_key(compression_parent)
1553
or not self.has_key(compression_parent))):
1554
# we can insert the knit record literally if either it has no
1555
# compression parent OR we already have its basis in this kvf
1556
# OR the basis is not present even in the fallbacks. In the
1557
# last case it will either turn up later in the stream and all
1558
# will be well, or it won't turn up at all and we'll raise an
1561
# TODO: self.has_key is somewhat redundant with
1562
# self._index.has_key; we really want something that directly
1563
# asks if it's only present in the fallbacks. -- mbp 20081119
1564
if record.storage_kind not in native_types:
1566
adapter_key = (record.storage_kind, "knit-delta-gz")
1567
adapter = get_adapter(adapter_key)
1569
adapter_key = (record.storage_kind, "knit-ft-gz")
1570
adapter = get_adapter(adapter_key)
1571
bytes = adapter.get_bytes(record)
1573
# It's a knit record, it has a _raw_record field (even if
1574
# it was reconstituted from a network stream).
1575
bytes = record._raw_record
1576
options = [record._build_details[0]]
1577
if record._build_details[1]:
1578
options.append('no-eol')
1579
# Just blat it across.
1580
# Note: This does end up adding data on duplicate keys. As
1581
# modern repositories use atomic insertions this should not
1582
# lead to excessive growth in the event of interrupted fetches.
1583
# 'knit' repositories may suffer excessive growth, but as a
1584
# deprecated format this is tolerable. It can be fixed if
1585
# needed by in the kndx index support raising on a duplicate
1586
# add with identical parents and options.
1587
access_memo = self._access.add_raw_records(
1588
[(record.key, len(bytes))], bytes)[0]
1589
index_entry = (record.key, options, access_memo, parents)
1590
if 'fulltext' not in options:
1591
# Not a fulltext, so we need to make sure the compression
1592
# parent will also be present.
1593
# Note that pack backed knits don't need to buffer here
1594
# because they buffer all writes to the transaction level,
1595
# but we don't expose that difference at the index level. If
1596
# the query here has sufficient cost to show up in
1597
# profiling we should do that.
1599
# They're required to be physically in this
1600
# KnitVersionedFiles, not in a fallback.
1601
if not self._index.has_key(compression_parent):
1602
pending = buffered_index_entries.setdefault(
1603
compression_parent, [])
1604
pending.append(index_entry)
1607
self._index.add_records([index_entry])
1608
elif record.storage_kind == 'chunked':
1609
self.add_lines(record.key, parents,
1610
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1612
# Not suitable for direct insertion as a
1613
# delta, either because it's not the right format, or this
1614
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1615
# 0) or because it depends on a base only present in the
1617
self._access.flush()
1619
# Try getting a fulltext directly from the record.
1620
bytes = record.get_bytes_as('fulltext')
1621
except errors.UnavailableRepresentation:
1622
adapter_key = record.storage_kind, 'fulltext'
1623
adapter = get_adapter(adapter_key)
1624
bytes = adapter.get_bytes(record)
1625
lines = split_lines(bytes)
1627
self.add_lines(record.key, parents, lines)
1628
except errors.RevisionAlreadyPresent:
1630
# Add any records whose basis parent is now available.
1632
added_keys = [record.key]
1634
key = added_keys.pop(0)
1635
if key in buffered_index_entries:
1636
index_entries = buffered_index_entries[key]
1637
self._index.add_records(index_entries)
1639
[index_entry[0] for index_entry in index_entries])
1640
del buffered_index_entries[key]
1641
if buffered_index_entries:
1642
# There were index entries buffered at the end of the stream,
1643
# So these need to be added (if the index supports holding such
1644
# entries for later insertion)
1645
for key in buffered_index_entries:
1646
index_entries = buffered_index_entries[key]
1647
self._index.add_records(index_entries,
1648
missing_compression_parents=True)
1650
def get_missing_compression_parent_keys(self):
1651
"""Return an iterable of keys of missing compression parents.
1653
Check this after calling insert_record_stream to find out if there are
1654
any missing compression parents. If there are, the records that
1655
depend on them are not able to be inserted safely. For atomic
1656
KnitVersionedFiles built on packs, the transaction should be aborted or
1657
suspended - commit will fail at this point. Nonatomic knits will error
1658
earlier because they have no staging area to put pending entries into.
1660
return self._index.get_missing_compression_parents()
1662
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1663
"""Iterate over the lines in the versioned files from keys.
1665
This may return lines from other keys. Each item the returned
1666
iterator yields is a tuple of a line and a text version that that line
1667
is present in (not introduced in).
1669
Ordering of results is in whatever order is most suitable for the
1670
underlying storage format.
1672
If a progress bar is supplied, it may be used to indicate progress.
1673
The caller is responsible for cleaning up progress bars (because this
1677
* Lines are normalised by the underlying store: they will all have \\n
1679
* Lines are returned in arbitrary order.
1680
* If a requested key did not change any lines (or didn't have any
1681
lines), it may not be mentioned at all in the result.
1683
:param pb: Progress bar supplied by caller.
1684
:return: An iterator over (line, key).
1687
pb = progress.DummyProgress()
1693
# we don't care about inclusions, the caller cares.
1694
# but we need to setup a list of records to visit.
1695
# we need key, position, length
1697
build_details = self._index.get_build_details(keys)
1698
for key, details in build_details.iteritems():
1700
key_records.append((key, details[0]))
1701
records_iter = enumerate(self._read_records_iter(key_records))
1702
for (key_idx, (key, data, sha_value)) in records_iter:
1703
pb.update('Walking content', key_idx, total)
1704
compression_parent = build_details[key][1]
1705
if compression_parent is None:
1707
line_iterator = self._factory.get_fulltext_content(data)
1710
line_iterator = self._factory.get_linedelta_content(data)
1711
# Now that we are yielding the data for this key, remove it
1714
# XXX: It might be more efficient to yield (key,
1715
# line_iterator) in the future. However for now, this is a
1716
# simpler change to integrate into the rest of the
1717
# codebase. RBC 20071110
1718
for line in line_iterator:
1721
except errors.RetryWithNewPacks, e:
1722
self._access.reload_or_raise(e)
1723
# If there are still keys we've not yet found, we look in the fallback
1724
# vfs, and hope to find them there. Note that if the keys are found
1725
# but had no changes or no content, the fallback may not return
1727
if keys and not self._fallback_vfs:
1728
# XXX: strictly the second parameter is meant to be the file id
1729
# but it's not easily accessible here.
1730
raise RevisionNotPresent(keys, repr(self))
1731
for source in self._fallback_vfs:
1735
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1736
source_keys.add(key)
1738
keys.difference_update(source_keys)
1739
pb.update('Walking content', total, total)
1741
def _make_line_delta(self, delta_seq, new_content):
1742
"""Generate a line delta from delta_seq and new_content."""
1744
for op in delta_seq.get_opcodes():
1745
if op[0] == 'equal':
1747
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1750
def _merge_annotations(self, content, parents, parent_texts={},
1751
delta=None, annotated=None,
1752
left_matching_blocks=None):
1753
"""Merge annotations for content and generate deltas.
1755
This is done by comparing the annotations based on changes to the text
1756
and generating a delta on the resulting full texts. If annotations are
1757
not being created then a simple delta is created.
1759
if left_matching_blocks is not None:
1760
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1764
for parent_key in parents:
1765
merge_content = self._get_content(parent_key, parent_texts)
1766
if (parent_key == parents[0] and delta_seq is not None):
1769
seq = patiencediff.PatienceSequenceMatcher(
1770
None, merge_content.text(), content.text())
1771
for i, j, n in seq.get_matching_blocks():
1774
# this copies (origin, text) pairs across to the new
1775
# content for any line that matches the last-checked
1777
content._lines[j:j+n] = merge_content._lines[i:i+n]
1778
# XXX: Robert says the following block is a workaround for a
1779
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1780
if content._lines and content._lines[-1][1][-1] != '\n':
1781
# The copied annotation was from a line without a trailing EOL,
1782
# reinstate one for the content object, to ensure correct
1784
line = content._lines[-1][1] + '\n'
1785
content._lines[-1] = (content._lines[-1][0], line)
1787
if delta_seq is None:
1788
reference_content = self._get_content(parents[0], parent_texts)
1789
new_texts = content.text()
1790
old_texts = reference_content.text()
1791
delta_seq = patiencediff.PatienceSequenceMatcher(
1792
None, old_texts, new_texts)
1793
return self._make_line_delta(delta_seq, content)
1795
def _parse_record(self, version_id, data):
1796
"""Parse an original format knit record.
1798
These have the last element of the key only present in the stored data.
1800
rec, record_contents = self._parse_record_unchecked(data)
1801
self._check_header_version(rec, version_id)
1802
return record_contents, rec[3]
1804
def _parse_record_header(self, key, raw_data):
1805
"""Parse a record header for consistency.
1807
:return: the header and the decompressor stream.
1808
as (stream, header_record)
1810
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1813
rec = self._check_header(key, df.readline())
1814
except Exception, e:
1815
raise KnitCorrupt(self,
1816
"While reading {%s} got %s(%s)"
1817
% (key, e.__class__.__name__, str(e)))
1820
def _parse_record_unchecked(self, data):
1822
# 4168 calls in 2880 217 internal
1823
# 4168 calls to _parse_record_header in 2121
1824
# 4168 calls to readlines in 330
1825
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1827
record_contents = df.readlines()
1828
except Exception, e:
1829
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1830
(data, e.__class__.__name__, str(e)))
1831
header = record_contents.pop(0)
1832
rec = self._split_header(header)
1833
last_line = record_contents.pop()
1834
if len(record_contents) != int(rec[2]):
1835
raise KnitCorrupt(self,
1836
'incorrect number of lines %s != %s'
1837
' for version {%s} %s'
1838
% (len(record_contents), int(rec[2]),
1839
rec[1], record_contents))
1840
if last_line != 'end %s\n' % rec[1]:
1841
raise KnitCorrupt(self,
1842
'unexpected version end line %r, wanted %r'
1843
% (last_line, rec[1]))
1845
return rec, record_contents
1847
def _read_records_iter(self, records):
1848
"""Read text records from data file and yield result.
1850
The result will be returned in whatever is the fastest to read.
1851
Not by the order requested. Also, multiple requests for the same
1852
record will only yield 1 response.
1853
:param records: A list of (key, access_memo) entries
1854
:return: Yields (key, contents, digest) in the order
1855
read, not the order requested
1860
# XXX: This smells wrong, IO may not be getting ordered right.
1861
needed_records = sorted(set(records), key=operator.itemgetter(1))
1862
if not needed_records:
1865
# The transport optimizes the fetching as well
1866
# (ie, reads continuous ranges.)
1867
raw_data = self._access.get_raw_records(
1868
[index_memo for key, index_memo in needed_records])
1870
for (key, index_memo), data in \
1871
izip(iter(needed_records), raw_data):
1872
content, digest = self._parse_record(key[-1], data)
1873
yield key, content, digest
1875
def _read_records_iter_raw(self, records):
1876
"""Read text records from data file and yield raw data.
1878
This unpacks enough of the text record to validate the id is
1879
as expected but thats all.
1881
Each item the iterator yields is (key, bytes,
1882
expected_sha1_of_full_text).
1884
for key, data in self._read_records_iter_unchecked(records):
1885
# validate the header (note that we can only use the suffix in
1886
# current knit records).
1887
df, rec = self._parse_record_header(key, data)
1889
yield key, data, rec[3]
1891
def _read_records_iter_unchecked(self, records):
1892
"""Read text records from data file and yield raw data.
1894
No validation is done.
1896
Yields tuples of (key, data).
1898
# setup an iterator of the external records:
1899
# uses readv so nice and fast we hope.
1901
# grab the disk data needed.
1902
needed_offsets = [index_memo for key, index_memo
1904
raw_records = self._access.get_raw_records(needed_offsets)
1906
for key, index_memo in records:
1907
data = raw_records.next()
1910
def _record_to_data(self, key, digest, lines, dense_lines=None):
1911
"""Convert key, digest, lines into a raw data block.
1913
:param key: The key of the record. Currently keys are always serialised
1914
using just the trailing component.
1915
:param dense_lines: The bytes of lines but in a denser form. For
1916
instance, if lines is a list of 1000 bytestrings each ending in \n,
1917
dense_lines may be a list with one line in it, containing all the
1918
1000's lines and their \n's. Using dense_lines if it is already
1919
known is a win because the string join to create bytes in this
1920
function spends less time resizing the final string.
1921
:return: (len, a StringIO instance with the raw data ready to read.)
1923
# Note: using a string copy here increases memory pressure with e.g.
1924
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
1925
# when doing the initial commit of a mozilla tree. RBC 20070921
1926
bytes = ''.join(chain(
1927
["version %s %d %s\n" % (key[-1],
1930
dense_lines or lines,
1931
["end %s\n" % key[-1]]))
1932
if type(bytes) != str:
1933
raise AssertionError(
1934
'data must be plain bytes was %s' % type(bytes))
1935
if lines and lines[-1][-1] != '\n':
1936
raise ValueError('corrupt lines value %r' % lines)
1937
compressed_bytes = tuned_gzip.bytes_to_gzip(bytes)
1938
return len(compressed_bytes), compressed_bytes
1940
def _split_header(self, line):
1943
raise KnitCorrupt(self,
1944
'unexpected number of elements in record header')
1948
"""See VersionedFiles.keys."""
1949
if 'evil' in debug.debug_flags:
1950
trace.mutter_callsite(2, "keys scales with size of history")
1951
sources = [self._index] + self._fallback_vfs
1953
for source in sources:
1954
result.update(source.keys())
1958
class _ContentMapGenerator(object):
1959
"""Generate texts or expose raw deltas for a set of texts."""
1961
def _get_content(self, key):
1962
"""Get the content object for key."""
1963
# Note that _get_content is only called when the _ContentMapGenerator
1964
# has been constructed with just one key requested for reconstruction.
1965
if key in self.nonlocal_keys:
1966
record = self.get_record_stream().next()
1967
# Create a content object on the fly
1968
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
1969
return PlainKnitContent(lines, record.key)
1971
# local keys we can ask for directly
1972
return self._get_one_work(key)
1974
def get_record_stream(self):
1975
"""Get a record stream for the keys requested during __init__."""
1976
for record in self._work():
1980
"""Produce maps of text and KnitContents as dicts.
818
1982
:return: (text_map, content_map) where text_map contains the texts for
819
the requested versions and content_map contains the KnitContents.
820
Both dicts take version_ids as their keys.
1983
the requested versions and content_map contains the KnitContents.
822
for version_id in version_ids:
823
if not self.has_version(version_id):
824
raise RevisionNotPresent(version_id, self.filename)
825
record_map = self._get_record_map(version_ids)
830
for version_id in version_ids:
1985
# NB: By definition we never need to read remote sources unless texts
1986
# are requested from them: we don't delta across stores - and we
1987
# explicitly do not want to to prevent data loss situations.
1988
if self.global_map is None:
1989
self.global_map = self.vf.get_parent_map(self.keys)
1990
nonlocal_keys = self.nonlocal_keys
1992
missing_keys = set(nonlocal_keys)
1993
# Read from remote versioned file instances and provide to our caller.
1994
for source in self.vf._fallback_vfs:
1995
if not missing_keys:
1997
# Loop over fallback repositories asking them for texts - ignore
1998
# any missing from a particular fallback.
1999
for record in source.get_record_stream(missing_keys,
2001
if record.storage_kind == 'absent':
2002
# Not in thie particular stream, may be in one of the
2003
# other fallback vfs objects.
2005
missing_keys.remove(record.key)
2008
self._raw_record_map = self.vf._get_record_map_unparsed(self.keys,
2011
for key in self.keys:
2012
if key in self.nonlocal_keys:
2014
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2017
def _get_one_work(self, requested_key):
2018
# Now, if we have calculated everything already, just return the
2020
if requested_key in self._contents_map:
2021
return self._contents_map[requested_key]
2022
# To simplify things, parse everything at once - code that wants one text
2023
# probably wants them all.
2024
# FUTURE: This function could be improved for the 'extract many' case
2025
# by tracking each component and only doing the copy when the number of
2026
# children than need to apply delta's to it is > 1 or it is part of the
2028
multiple_versions = len(self.keys) != 1
2029
if self._record_map is None:
2030
self._record_map = self.vf._raw_map_to_record_map(
2031
self._raw_record_map)
2032
record_map = self._record_map
2033
# raw_record_map is key:
2034
# Have read and parsed records at this point.
2035
for key in self.keys:
2036
if key in self.nonlocal_keys:
833
2041
while cursor is not None:
834
method, data, digest, next = record_map[cursor]
835
components.append((cursor, method, data, digest))
836
if cursor in content_map:
2043
record, record_details, digest, next = record_map[cursor]
2045
raise RevisionNotPresent(cursor, self)
2046
components.append((cursor, record, record_details, digest))
2048
if cursor in self._contents_map:
2049
# no need to plan further back
2050
components.append((cursor, None, None, None))
841
for component_id, method, data, digest in reversed(components):
842
if component_id in content_map:
843
content = content_map[component_id]
2054
for (component_id, record, record_details,
2055
digest) in reversed(components):
2056
if component_id in self._contents_map:
2057
content = self._contents_map[component_id]
845
if method == 'fulltext':
846
assert content is None
847
content = self.factory.parse_fulltext(data, version_id)
848
elif method == 'line-delta':
849
delta = self.factory.parse_line_delta(data, version_id)
850
content = content.copy()
851
content._lines = self._apply_delta(content._lines,
853
content_map[component_id] = content
855
if 'no-eol' in self._index.get_options(version_id):
856
content = content.copy()
857
line = content._lines[-1][1].rstrip('\n')
858
content._lines[-1] = (content._lines[-1][0], line)
859
final_content[version_id] = content
2059
content, delta = self._factory.parse_record(key[-1],
2060
record, record_details, content,
2061
copy_base_content=multiple_versions)
2062
if multiple_versions:
2063
self._contents_map[component_id] = content
861
2065
# digest here is the digest from the last applied component.
862
2066
text = content.text()
863
if sha_strings(text) != digest:
864
raise KnitCorrupt(self.filename,
865
'sha-1 does not match %s' % version_id)
867
text_map[version_id] = text
868
return text_map, final_content
870
def iter_lines_added_or_present_in_versions(self, version_ids=None,
872
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
873
if version_ids is None:
874
version_ids = self.versions()
876
version_ids = [osutils.safe_revision_id(v) for v in version_ids]
878
pb = progress.DummyProgress()
879
# we don't care about inclusions, the caller cares.
880
# but we need to setup a list of records to visit.
881
# we need version_id, position, length
882
version_id_records = []
883
requested_versions = set(version_ids)
884
# filter for available versions
885
for version_id in requested_versions:
886
if not self.has_version(version_id):
887
raise RevisionNotPresent(version_id, self.filename)
888
# get a in-component-order queue:
889
for version_id in self.versions():
890
if version_id in requested_versions:
891
data_pos, length = self._index.get_position(version_id)
892
version_id_records.append((version_id, data_pos, length))
894
total = len(version_id_records)
895
for version_idx, (version_id, data, sha_value) in \
896
enumerate(self._data.read_records_iter(version_id_records)):
897
pb.update('Walking content.', version_idx, total)
898
method = self._index.get_method(version_id)
900
assert method in ('fulltext', 'line-delta')
901
if method == 'fulltext':
902
line_iterator = self.factory.get_fulltext_content(data)
904
line_iterator = self.factory.get_linedelta_content(data)
905
for line in line_iterator:
908
pb.update('Walking content.', total, total)
910
def num_versions(self):
911
"""See VersionedFile.num_versions()."""
912
return self._index.num_versions()
914
__len__ = num_versions
916
def annotate_iter(self, version_id):
917
"""See VersionedFile.annotate_iter."""
918
version_id = osutils.safe_revision_id(version_id)
919
content = self._get_content(version_id)
920
for origin, text in content.annotate_iter():
923
def get_parents(self, version_id):
924
"""See VersionedFile.get_parents."""
927
# 52554 calls in 1264 872 internal down from 3674
928
version_id = osutils.safe_revision_id(version_id)
930
return self._index.get_parents(version_id)
932
raise RevisionNotPresent(version_id, self.filename)
934
def get_parents_with_ghosts(self, version_id):
935
"""See VersionedFile.get_parents."""
936
version_id = osutils.safe_revision_id(version_id)
938
return self._index.get_parents_with_ghosts(version_id)
940
raise RevisionNotPresent(version_id, self.filename)
942
def get_ancestry(self, versions, topo_sorted=True):
943
"""See VersionedFile.get_ancestry."""
944
if isinstance(versions, basestring):
945
versions = [versions]
948
versions = [osutils.safe_revision_id(v) for v in versions]
949
return self._index.get_ancestry(versions, topo_sorted)
951
def get_ancestry_with_ghosts(self, versions):
952
"""See VersionedFile.get_ancestry_with_ghosts."""
953
if isinstance(versions, basestring):
954
versions = [versions]
957
versions = [osutils.safe_revision_id(v) for v in versions]
958
return self._index.get_ancestry_with_ghosts(versions)
960
#@deprecated_method(zero_eight)
961
def walk(self, version_ids):
962
"""See VersionedFile.walk."""
963
# We take the short path here, and extract all relevant texts
964
# and put them in a weave and let that do all the work. Far
965
# from optimal, but is much simpler.
966
# FIXME RB 20060228 this really is inefficient!
967
from bzrlib.weave import Weave
969
w = Weave(self.filename)
970
ancestry = set(self.get_ancestry(version_ids, topo_sorted=False))
971
sorted_graph = topo_sort(self._index.get_graph())
972
version_list = [vid for vid in sorted_graph if vid in ancestry]
974
for version_id in version_list:
975
lines = self.get_lines(version_id)
976
w.add_lines(version_id, self.get_parents(version_id), lines)
978
for lineno, insert_id, dset, line in w.walk(version_ids):
979
yield lineno, insert_id, dset, line
981
def plan_merge(self, ver_a, ver_b):
982
"""See VersionedFile.plan_merge."""
983
ver_a = osutils.safe_revision_id(ver_a)
984
ver_b = osutils.safe_revision_id(ver_b)
985
ancestors_b = set(self.get_ancestry(ver_b, topo_sorted=False))
986
def status_a(revision, text):
987
if revision in ancestors_b:
988
return 'killed-b', text
992
ancestors_a = set(self.get_ancestry(ver_a, topo_sorted=False))
993
def status_b(revision, text):
994
if revision in ancestors_a:
995
return 'killed-a', text
999
annotated_a = self.annotate(ver_a)
1000
annotated_b = self.annotate(ver_b)
1001
plain_a = [t for (a, t) in annotated_a]
1002
plain_b = [t for (a, t) in annotated_b]
1003
blocks = KnitSequenceMatcher(None, plain_a, plain_b).get_matching_blocks()
1006
for ai, bi, l in blocks:
1007
# process all mismatched sections
1008
# (last mismatched section is handled because blocks always
1009
# includes a 0-length last block)
1010
for revision, text in annotated_a[a_cur:ai]:
1011
yield status_a(revision, text)
1012
for revision, text in annotated_b[b_cur:bi]:
1013
yield status_b(revision, text)
1015
# and now the matched section
1018
for text_a, text_b in zip(plain_a[ai:a_cur], plain_b[bi:b_cur]):
1019
assert text_a == text_b
1020
yield "unchanged", text_a
1023
class _KnitComponentFile(object):
1024
"""One of the files used to implement a knit database"""
1026
def __init__(self, transport, filename, mode, file_mode=None,
1027
create_parent_dir=False, dir_mode=None):
1028
self._transport = transport
1029
self._filename = filename
1031
self._file_mode = file_mode
1032
self._dir_mode = dir_mode
1033
self._create_parent_dir = create_parent_dir
1034
self._need_to_create = False
1036
def _full_path(self):
1037
"""Return the full path to this file."""
1038
return self._transport.base + self._filename
1040
def check_header(self, fp):
1041
line = fp.readline()
1043
# An empty file can actually be treated as though the file doesn't
1045
raise errors.NoSuchFile(self._full_path())
1046
if line != self.HEADER:
1047
raise KnitHeaderError(badline=line,
1048
filename=self._transport.abspath(self._filename))
1051
"""Commit is a nop."""
1054
return '%s(%s)' % (self.__class__.__name__, self._filename)
1057
class _KnitIndex(_KnitComponentFile):
1058
"""Manages knit index file.
1060
The index is already kept in memory and read on startup, to enable
2067
actual_sha = sha_strings(text)
2068
if actual_sha != digest:
2069
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
2070
if multiple_versions:
2071
return self._contents_map[requested_key]
2075
def _wire_bytes(self):
2076
"""Get the bytes to put on the wire for 'key'.
2078
The first collection of bytes asked for returns the serialised
2079
raw_record_map and the additional details (key, parent) for key.
2080
Subsequent calls return just the additional details (key, parent).
2081
The wire storage_kind given for the first key is 'knit-delta-closure',
2082
For subsequent keys it is 'knit-delta-closure-ref'.
2084
:param key: A key from the content generator.
2085
:return: Bytes to put on the wire.
2088
# kind marker for dispatch on the far side,
2089
lines.append('knit-delta-closure')
2091
if self.vf._factory.annotated:
2092
lines.append('annotated')
2095
# then the list of keys
2096
lines.append('\t'.join(['\x00'.join(key) for key in self.keys
2097
if key not in self.nonlocal_keys]))
2098
# then the _raw_record_map in serialised form:
2100
# for each item in the map:
2102
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
2103
# one line with method
2104
# one line with noeol
2105
# one line with next ('' for None)
2106
# one line with byte count of the record bytes
2108
for key, (record_bytes, (method, noeol), next) in \
2109
self._raw_record_map.iteritems():
2110
key_bytes = '\x00'.join(key)
2111
parents = self.global_map.get(key, None)
2113
parent_bytes = 'None:'
2115
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2116
method_bytes = method
2122
next_bytes = '\x00'.join(next)
2125
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2126
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2127
len(record_bytes), record_bytes))
2128
map_bytes = ''.join(map_byte_list)
2129
lines.append(map_bytes)
2130
bytes = '\n'.join(lines)
2134
class _VFContentMapGenerator(_ContentMapGenerator):
2135
"""Content map generator reading from a VersionedFiles object."""
2137
def __init__(self, versioned_files, keys, nonlocal_keys=None,
2138
global_map=None, raw_record_map=None):
2139
"""Create a _ContentMapGenerator.
2141
:param versioned_files: The versioned files that the texts are being
2143
:param keys: The keys to produce content maps for.
2144
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
2145
which are known to not be in this knit, but rather in one of the
2147
:param global_map: The result of get_parent_map(keys) (or a supermap).
2148
This is required if get_record_stream() is to be used.
2149
:param raw_record_map: A unparsed raw record map to use for answering
2152
# The vf to source data from
2153
self.vf = versioned_files
2155
self.keys = list(keys)
2156
# Keys known to be in fallback vfs objects
2157
if nonlocal_keys is None:
2158
self.nonlocal_keys = set()
2160
self.nonlocal_keys = frozenset(nonlocal_keys)
2161
# Parents data for keys to be returned in get_record_stream
2162
self.global_map = global_map
2163
# The chunked lists for self.keys in text form
2165
# A cache of KnitContent objects used in extracting texts.
2166
self._contents_map = {}
2167
# All the knit records needed to assemble the requested keys as full
2169
self._record_map = None
2170
if raw_record_map is None:
2171
self._raw_record_map = self.vf._get_record_map_unparsed(keys,
2174
self._raw_record_map = raw_record_map
2175
# the factory for parsing records
2176
self._factory = self.vf._factory
2179
class _NetworkContentMapGenerator(_ContentMapGenerator):
2180
"""Content map generator sourced from a network stream."""
2182
def __init__(self, bytes, line_end):
2183
"""Construct a _NetworkContentMapGenerator from a bytes block."""
2185
self.global_map = {}
2186
self._raw_record_map = {}
2187
self._contents_map = {}
2188
self._record_map = None
2189
self.nonlocal_keys = []
2190
# Get access to record parsing facilities
2191
self.vf = KnitVersionedFiles(None, None)
2194
line_end = bytes.find('\n', start)
2195
line = bytes[start:line_end]
2196
start = line_end + 1
2197
if line == 'annotated':
2198
self._factory = KnitAnnotateFactory()
2200
self._factory = KnitPlainFactory()
2201
# list of keys to emit in get_record_stream
2202
line_end = bytes.find('\n', start)
2203
line = bytes[start:line_end]
2204
start = line_end + 1
2206
tuple(segment.split('\x00')) for segment in line.split('\t')
2208
# now a loop until the end. XXX: It would be nice if this was just a
2209
# bunch of the same records as get_record_stream(..., False) gives, but
2210
# there is a decent sized gap stopping that at the moment.
2214
line_end = bytes.find('\n', start)
2215
key = tuple(bytes[start:line_end].split('\x00'))
2216
start = line_end + 1
2217
# 1 line with parents (None: for None, '' for ())
2218
line_end = bytes.find('\n', start)
2219
line = bytes[start:line_end]
2224
[tuple(segment.split('\x00')) for segment in line.split('\t')
2226
self.global_map[key] = parents
2227
start = line_end + 1
2228
# one line with method
2229
line_end = bytes.find('\n', start)
2230
line = bytes[start:line_end]
2232
start = line_end + 1
2233
# one line with noeol
2234
line_end = bytes.find('\n', start)
2235
line = bytes[start:line_end]
2237
start = line_end + 1
2238
# one line with next ('' for None)
2239
line_end = bytes.find('\n', start)
2240
line = bytes[start:line_end]
2244
next = tuple(bytes[start:line_end].split('\x00'))
2245
start = line_end + 1
2246
# one line with byte count of the record bytes
2247
line_end = bytes.find('\n', start)
2248
line = bytes[start:line_end]
2250
start = line_end + 1
2252
record_bytes = bytes[start:start+count]
2253
start = start + count
2255
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2257
def get_record_stream(self):
2258
"""Get a record stream for for keys requested by the bytestream."""
2260
for key in self.keys:
2261
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2264
def _wire_bytes(self):
2268
class _KndxIndex(object):
2269
"""Manages knit index files
2271
The index is kept in memory and read on startup, to enable
1061
2272
fast lookups of revision information. The cursor of the index
1062
2273
file is always pointing to the end, making it easy to append
1105
2316
to ensure that records always start on new lines even if the last write was
1106
2317
interrupted. As a result its normal for the last line in the index to be
1107
2318
missing a trailing newline. One can be added with no harmful effects.
2320
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
2321
where prefix is e.g. the (fileid,) for .texts instances or () for
2322
constant-mapped things like .revisions, and the old state is
2323
tuple(history_vector, cache_dict). This is used to prevent having an
2324
ABI change with the C extension that reads .kndx files.
1110
2327
HEADER = "# bzr knit index 8\n"
1112
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
1113
# __slots__ = ['_cache', '_history', '_transport', '_filename']
1115
def _cache_version(self, version_id, options, pos, size, parents):
2329
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
2330
"""Create a _KndxIndex on transport using mapper."""
2331
self._transport = transport
2332
self._mapper = mapper
2333
self._get_scope = get_scope
2334
self._allow_writes = allow_writes
2335
self._is_locked = is_locked
2337
self.has_graph = True
2339
def add_records(self, records, random_id=False, missing_compression_parents=False):
2340
"""Add multiple records to the index.
2342
:param records: a list of tuples:
2343
(key, options, access_memo, parents).
2344
:param random_id: If True the ids being added were randomly generated
2345
and no check for existence will be performed.
2346
:param missing_compression_parents: If True the records being added are
2347
only compressed against texts already in the index (or inside
2348
records). If False the records all refer to unavailable texts (or
2349
texts inside records) as compression parents.
2351
if missing_compression_parents:
2352
# It might be nice to get the edge of the records. But keys isn't
2354
keys = sorted(record[0] for record in records)
2355
raise errors.RevisionNotPresent(keys, self)
2357
for record in records:
2360
path = self._mapper.map(key) + '.kndx'
2361
path_keys = paths.setdefault(path, (prefix, []))
2362
path_keys[1].append(record)
2363
for path in sorted(paths):
2364
prefix, path_keys = paths[path]
2365
self._load_prefixes([prefix])
2367
orig_history = self._kndx_cache[prefix][1][:]
2368
orig_cache = self._kndx_cache[prefix][0].copy()
2371
for key, options, (_, pos, size), parents in path_keys:
2373
# kndx indices cannot be parentless.
2375
line = "\n%s %s %s %s %s :" % (
2376
key[-1], ','.join(options), pos, size,
2377
self._dictionary_compress(parents))
2378
if type(line) != str:
2379
raise AssertionError(
2380
'data must be utf8 was %s' % type(line))
2382
self._cache_key(key, options, pos, size, parents)
2383
if len(orig_history):
2384
self._transport.append_bytes(path, ''.join(lines))
2386
self._init_index(path, lines)
2388
# If any problems happen, restore the original values and re-raise
2389
self._kndx_cache[prefix] = (orig_cache, orig_history)
2392
def scan_unvalidated_index(self, graph_index):
2393
"""See _KnitGraphIndex.scan_unvalidated_index."""
2394
# Because kndx files do not support atomic insertion via separate index
2395
# files, they do not support this method.
2396
raise NotImplementedError(self.scan_unvalidated_index)
2398
def get_missing_compression_parents(self):
2399
"""See _KnitGraphIndex.get_missing_compression_parents."""
2400
# Because kndx files do not support atomic insertion via separate index
2401
# files, they do not support this method.
2402
raise NotImplementedError(self.get_missing_compression_parents)
2404
def _cache_key(self, key, options, pos, size, parent_keys):
1116
2405
"""Cache a version record in the history array and index cache.
1118
2407
This is inlined into _load_data for performance. KEEP IN SYNC.
1119
2408
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
2412
version_id = key[-1]
2413
# last-element only for compatibilty with the C load_data.
2414
parents = tuple(parent[-1] for parent in parent_keys)
2415
for parent in parent_keys:
2416
if parent[:-1] != prefix:
2417
raise ValueError("mismatched prefixes for %r, %r" % (
2419
cache, history = self._kndx_cache[prefix]
1122
2420
# only want the _history index to reference the 1st index entry
1123
2421
# for version_id
1124
if version_id not in self._cache:
1125
index = len(self._history)
1126
self._history.append(version_id)
2422
if version_id not in cache:
2423
index = len(history)
2424
history.append(version_id)
1128
index = self._cache[version_id][5]
1129
self._cache[version_id] = (version_id,
2426
index = cache[version_id][5]
2427
cache[version_id] = (version_id,
1136
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1137
create_parent_dir=False, delay_create=False, dir_mode=None):
1138
_KnitComponentFile.__init__(self, transport, filename, mode,
1139
file_mode=file_mode,
1140
create_parent_dir=create_parent_dir,
1143
# position in _history is the 'official' index for a revision
1144
# but the values may have come from a newer entry.
1145
# so - wc -l of a knit index is != the number of unique names
2434
def check_header(self, fp):
2435
line = fp.readline()
2437
# An empty file can actually be treated as though the file doesn't
2439
raise errors.NoSuchFile(self)
2440
if line != self.HEADER:
2441
raise KnitHeaderError(badline=line, filename=self)
2443
def _check_read(self):
2444
if not self._is_locked():
2445
raise errors.ObjectNotLocked(self)
2446
if self._get_scope() != self._scope:
2449
def _check_write_ok(self):
2450
"""Assert if not writes are permitted."""
2451
if not self._is_locked():
2452
raise errors.ObjectNotLocked(self)
2453
if self._get_scope() != self._scope:
2455
if self._mode != 'w':
2456
raise errors.ReadOnlyObjectDirtiedError(self)
2458
def get_build_details(self, keys):
2459
"""Get the method, index_memo and compression parent for keys.
2461
Ghosts are omitted from the result.
2463
:param keys: An iterable of keys.
2464
:return: A dict of key:(index_memo, compression_parent, parents,
2467
opaque structure to pass to read_records to extract the raw
2470
Content that this record is built upon, may be None
2472
Logical parents of this node
2474
extra information about the content which needs to be passed to
2475
Factory.parse_record
2477
parent_map = self.get_parent_map(keys)
2480
if key not in parent_map:
2482
method = self.get_method(key)
2483
parents = parent_map[key]
2484
if method == 'fulltext':
2485
compression_parent = None
2487
compression_parent = parents[0]
2488
noeol = 'no-eol' in self.get_options(key)
2489
index_memo = self.get_position(key)
2490
result[key] = (index_memo, compression_parent,
2491
parents, (method, noeol))
2494
def get_method(self, key):
2495
"""Return compression method of specified key."""
2496
options = self.get_options(key)
2497
if 'fulltext' in options:
2499
elif 'line-delta' in options:
2502
raise errors.KnitIndexUnknownMethod(self, options)
2504
def get_options(self, key):
2505
"""Return a list representing options.
2509
prefix, suffix = self._split_key(key)
2510
self._load_prefixes([prefix])
1149
fp = self._transport.get(self._filename)
1151
# _load_data may raise NoSuchFile if the target knit is
1157
if mode != 'w' or not create:
1160
self._need_to_create = True
1162
self._transport.put_bytes_non_atomic(
1163
self._filename, self.HEADER, mode=self._file_mode)
1165
def _load_data(self, fp):
1167
history = self._history
1169
self.check_header(fp)
1170
# readlines reads the whole file at once:
1171
# bad for transports like http, good for local disk
1172
# we save 60 ms doing this one change (
1173
# from calling readline each time to calling
1175
# probably what we want for nice behaviour on
1176
# http is a incremental readlines that yields, or
1177
# a check for local vs non local indexes,
1178
history_top = len(history) - 1
1179
for line in fp.readlines():
1181
if len(rec) < 5 or rec[-1] != ':':
1183
# FIXME: in the future we should determine if its a
1184
# short write - and ignore it
1185
# or a different failure, and raise. RBC 20060407
1190
for value in rec[4:-1]:
1192
# uncompressed reference
1193
parent_id = value[1:]
1195
parent_id = history[int(value)]
1196
parents.append(parent_id)
1197
except (IndexError, ValueError), e:
1198
# The parent could not be decoded to get its parent row. This
1199
# at a minimum will cause this row to have wrong parents, or
1200
# even to apply a delta to the wrong base and decode
1201
# incorrectly. its therefore not usable, and because we have
1202
# encountered a situation where a new knit index had this
1203
# corrupt we can't asssume that no other rows referring to the
1204
# index of this record actually mean the subsequent uncorrupt
1206
raise errors.KnitCorrupt(self._filename,
1207
"line %r: %s" % (rec, e))
1209
version_id, options, pos, size = rec[:4]
1210
version_id = version_id
1212
# See self._cache_version
1213
# only want the _history index to reference the 1st
1214
# index entry for version_id
1215
if version_id not in cache:
1218
history.append(version_id)
1220
index = cache[version_id][5]
1221
cache[version_id] = (version_id,
1227
# end self._cache_version
1229
def get_graph(self):
1230
return [(vid, idx[4]) for vid, idx in self._cache.iteritems()]
1232
def get_ancestry(self, versions, topo_sorted=True):
1233
"""See VersionedFile.get_ancestry."""
1234
# get a graph of all the mentioned versions:
1236
pending = set(versions)
1239
version = pending.pop()
1242
parents = [p for p in cache[version][4] if p in cache]
1244
raise RevisionNotPresent(version, self._filename)
1245
# if not completed and not a ghost
1246
pending.update([p for p in parents if p not in graph])
1247
graph[version] = parents
1250
return topo_sort(graph.items())
1252
def get_ancestry_with_ghosts(self, versions):
1253
"""See VersionedFile.get_ancestry_with_ghosts."""
1254
# get a graph of all the mentioned versions:
1255
self.check_versions_present(versions)
1258
pending = set(versions)
1260
version = pending.pop()
1262
parents = cache[version][4]
1268
pending.update([p for p in parents if p not in graph])
1269
graph[version] = parents
1270
return topo_sort(graph.items())
1272
def num_versions(self):
1273
return len(self._history)
1275
__len__ = num_versions
1277
def get_versions(self):
1278
return self._history
1280
def idx_to_name(self, idx):
1281
return self._history[idx]
1283
def lookup(self, version_id):
1284
assert version_id in self._cache
1285
return self._cache[version_id][5]
1287
def _version_list_to_index(self, versions):
2512
return self._kndx_cache[prefix][0][suffix][1]
2514
raise RevisionNotPresent(key, self)
2516
def get_parent_map(self, keys):
2517
"""Get a map of the parents of keys.
2519
:param keys: The keys to look up parents for.
2520
:return: A mapping from keys to parents. Absent keys are absent from
2523
# Parse what we need to up front, this potentially trades off I/O
2524
# locality (.kndx and .knit in the same block group for the same file
2525
# id) for less checking in inner loops.
2526
prefixes = set(key[:-1] for key in keys)
2527
self._load_prefixes(prefixes)
2532
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2536
result[key] = tuple(prefix + (suffix,) for
2537
suffix in suffix_parents)
2540
def get_position(self, key):
2541
"""Return details needed to access the version.
2543
:return: a tuple (key, data position, size) to hand to the access
2544
logic to get the record.
2546
prefix, suffix = self._split_key(key)
2547
self._load_prefixes([prefix])
2548
entry = self._kndx_cache[prefix][0][suffix]
2549
return key, entry[2], entry[3]
2551
has_key = _mod_index._has_key_from_parent_map
2553
def _init_index(self, path, extra_lines=[]):
2554
"""Initialize an index."""
2556
sio.write(self.HEADER)
2557
sio.writelines(extra_lines)
2559
self._transport.put_file_non_atomic(path, sio,
2560
create_parent_dir=True)
2561
# self._create_parent_dir)
2562
# mode=self._file_mode,
2563
# dir_mode=self._dir_mode)
2566
"""Get all the keys in the collection.
2568
The keys are not ordered.
2571
# Identify all key prefixes.
2572
# XXX: A bit hacky, needs polish.
2573
if type(self._mapper) == ConstantMapper:
2577
for quoted_relpath in self._transport.iter_files_recursive():
2578
path, ext = os.path.splitext(quoted_relpath)
2580
prefixes = [self._mapper.unmap(path) for path in relpaths]
2581
self._load_prefixes(prefixes)
2582
for prefix in prefixes:
2583
for suffix in self._kndx_cache[prefix][1]:
2584
result.add(prefix + (suffix,))
2587
def _load_prefixes(self, prefixes):
2588
"""Load the indices for prefixes."""
2590
for prefix in prefixes:
2591
if prefix not in self._kndx_cache:
2592
# the load_data interface writes to these variables.
2595
self._filename = prefix
2597
path = self._mapper.map(prefix) + '.kndx'
2598
fp = self._transport.get(path)
2600
# _load_data may raise NoSuchFile if the target knit is
2602
_load_data(self, fp)
2605
self._kndx_cache[prefix] = (self._cache, self._history)
2610
self._kndx_cache[prefix] = ({}, [])
2611
if type(self._mapper) == ConstantMapper:
2612
# preserve behaviour for revisions.kndx etc.
2613
self._init_index(path)
2618
missing_keys = _mod_index._missing_keys_from_parent_map
2620
def _partition_keys(self, keys):
2621
"""Turn keys into a dict of prefix:suffix_list."""
2624
prefix_keys = result.setdefault(key[:-1], [])
2625
prefix_keys.append(key[-1])
2628
def _dictionary_compress(self, keys):
2629
"""Dictionary compress keys.
2631
:param keys: The keys to generate references to.
2632
:return: A string representation of keys. keys which are present are
2633
dictionary compressed, and others are emitted as fulltext with a
1288
2638
result_list = []
1290
for version in versions:
1291
if version in cache:
2639
prefix = keys[0][:-1]
2640
cache = self._kndx_cache[prefix][0]
2642
if key[:-1] != prefix:
2643
# kndx indices cannot refer across partitioned storage.
2644
raise ValueError("mismatched prefixes for %r" % keys)
2645
if key[-1] in cache:
1292
2646
# -- inlined lookup() --
1293
result_list.append(str(cache[version][5]))
2647
result_list.append(str(cache[key[-1]][5]))
1294
2648
# -- end lookup () --
1296
result_list.append('.' + version)
2650
result_list.append('.' + key[-1])
1297
2651
return ' '.join(result_list)
1299
def add_version(self, version_id, options, pos, size, parents):
1300
"""Add a version record to the index."""
1301
self.add_versions(((version_id, options, pos, size, parents),))
1303
def add_versions(self, versions):
1304
"""Add multiple versions to the index.
1306
:param versions: a list of tuples:
1307
(version_id, options, pos, size, parents).
2653
def _reset_cache(self):
2654
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2655
# (cache_dict, history_vector) for parsed kndx files.
2656
self._kndx_cache = {}
2657
self._scope = self._get_scope()
2658
allow_writes = self._allow_writes()
2664
def _sort_keys_by_io(self, keys, positions):
2665
"""Figure out an optimal order to read the records for the given keys.
2667
Sort keys, grouped by index and sorted by position.
2669
:param keys: A list of keys whose records we want to read. This will be
2671
:param positions: A dict, such as the one returned by
2672
_get_components_positions()
1310
orig_history = self._history[:]
1311
orig_cache = self._cache.copy()
1314
for version_id, options, pos, size, parents in versions:
1315
line = "\n%s %s %s %s %s :" % (version_id,
1319
self._version_list_to_index(parents))
1320
assert isinstance(line, str), \
1321
'content must be utf-8 encoded: %r' % (line,)
1323
self._cache_version(version_id, options, pos, size, parents)
1324
if not self._need_to_create:
1325
self._transport.append_bytes(self._filename, ''.join(lines))
1328
sio.write(self.HEADER)
1329
sio.writelines(lines)
1331
self._transport.put_file_non_atomic(self._filename, sio,
1332
create_parent_dir=self._create_parent_dir,
1333
mode=self._file_mode,
1334
dir_mode=self._dir_mode)
1335
self._need_to_create = False
1337
# If any problems happen, restore the original values and re-raise
1338
self._history = orig_history
1339
self._cache = orig_cache
1342
def has_version(self, version_id):
1343
"""True if the version is in the index."""
1344
return version_id in self._cache
1346
def get_position(self, version_id):
1347
"""Return data position and size of specified version."""
1348
entry = self._cache[version_id]
1349
return entry[2], entry[3]
1351
def get_method(self, version_id):
1352
"""Return compression method of specified version."""
1353
options = self._cache[version_id][1]
1354
if 'fulltext' in options:
1357
if 'line-delta' not in options:
1358
raise errors.KnitIndexUnknownMethod(self._full_path(), options)
1361
def get_options(self, version_id):
1362
return self._cache[version_id][1]
1364
def get_parents(self, version_id):
1365
"""Return parents of specified version ignoring ghosts."""
1366
return [parent for parent in self._cache[version_id][4]
1367
if parent in self._cache]
1369
def get_parents_with_ghosts(self, version_id):
1370
"""Return parents of specified version with ghosts."""
1371
return self._cache[version_id][4]
1373
def check_versions_present(self, version_ids):
1374
"""Check that all specified versions are present."""
1376
for version_id in version_ids:
1377
if version_id not in cache:
1378
raise RevisionNotPresent(version_id, self._filename)
1381
class _KnitData(_KnitComponentFile):
1382
"""Contents of the knit data file"""
1384
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1385
create_parent_dir=False, delay_create=False,
1387
_KnitComponentFile.__init__(self, transport, filename, mode,
1388
file_mode=file_mode,
1389
create_parent_dir=create_parent_dir,
1391
self._checked = False
1392
# TODO: jam 20060713 conceptually, this could spill to disk
1393
# if the cached size gets larger than a certain amount
1394
# but it complicates the model a bit, so for now just use
1395
# a simple dictionary
1397
self._do_cache = False
1400
self._need_to_create = create
1402
self._transport.put_bytes_non_atomic(self._filename, '',
1403
mode=self._file_mode)
1405
def enable_cache(self):
1406
"""Enable caching of reads."""
1407
self._do_cache = True
1409
def clear_cache(self):
1410
"""Clear the record cache."""
1411
self._do_cache = False
1414
def _open_file(self):
1416
return self._transport.get(self._filename)
2675
def get_sort_key(key):
2676
index_memo = positions[key][1]
2677
# Group by prefix and position. index_memo[0] is the key, so it is
2678
# (file_id, revision_id) and we don't want to sort on revision_id,
2679
# index_memo[1] is the position, and index_memo[2] is the size,
2680
# which doesn't matter for the sort
2681
return index_memo[0][:-1], index_memo[1]
2682
return keys.sort(key=get_sort_key)
2684
_get_total_build_size = _get_total_build_size
2686
def _split_key(self, key):
2687
"""Split key into a prefix and suffix."""
2688
return key[:-1], key[-1]
2691
class _KeyRefs(object):
2694
# dict mapping 'key' to 'set of keys referring to that key'
2697
def add_references(self, key, refs):
2698
# Record the new references
2699
for referenced in refs:
2701
needed_by = self.refs[referenced]
2703
needed_by = self.refs[referenced] = set()
2705
# Discard references satisfied by the new key
2708
def get_unsatisfied_refs(self):
2709
return self.refs.iterkeys()
2711
def add_key(self, key):
2715
# No keys depended on this key. That's ok.
1421
def _record_to_data(self, version_id, digest, lines):
1422
"""Convert version_id, digest, lines into a raw data block.
1424
:return: (len, a StringIO instance with the raw data ready to read.)
1427
data_file = GzipFile(None, mode='wb', fileobj=sio)
1429
assert isinstance(version_id, str)
1430
data_file.writelines(chain(
1431
["version %s %d %s\n" % (version_id,
1435
["end %s\n" % version_id]))
1442
def add_raw_record(self, raw_data):
1443
"""Append a prepared record to the data file.
1445
:return: the offset in the data file raw_data was written.
1447
assert isinstance(raw_data, str), 'data must be plain bytes'
1448
if not self._need_to_create:
1449
return self._transport.append_bytes(self._filename, raw_data)
1451
self._transport.put_bytes_non_atomic(self._filename, raw_data,
1452
create_parent_dir=self._create_parent_dir,
1453
mode=self._file_mode,
1454
dir_mode=self._dir_mode)
1455
self._need_to_create = False
1458
def add_record(self, version_id, digest, lines):
1459
"""Write new text record to disk. Returns the position in the
1460
file where it was written."""
1461
size, sio = self._record_to_data(version_id, digest, lines)
1463
if not self._need_to_create:
1464
start_pos = self._transport.append_file(self._filename, sio)
1466
self._transport.put_file_non_atomic(self._filename, sio,
1467
create_parent_dir=self._create_parent_dir,
1468
mode=self._file_mode,
1469
dir_mode=self._dir_mode)
1470
self._need_to_create = False
1473
self._cache[version_id] = sio.getvalue()
1474
return start_pos, size
1476
def _parse_record_header(self, version_id, raw_data):
1477
"""Parse a record header for consistency.
1479
:return: the header and the decompressor stream.
1480
as (stream, header_record)
1482
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
1484
rec = self._check_header(version_id, df.readline())
1485
except Exception, e:
1486
raise KnitCorrupt(self._filename,
1487
"While reading {%s} got %s(%s)"
1488
% (version_id, e.__class__.__name__, str(e)))
1491
def _check_header(self, version_id, line):
1494
raise KnitCorrupt(self._filename,
1495
'unexpected number of elements in record header')
1496
if rec[1] != version_id:
1497
raise KnitCorrupt(self._filename,
1498
'unexpected version, wanted %r, got %r'
1499
% (version_id, rec[1]))
1502
def _parse_record(self, version_id, data):
1504
# 4168 calls in 2880 217 internal
1505
# 4168 calls to _parse_record_header in 2121
1506
# 4168 calls to readlines in 330
1507
df = GzipFile(mode='rb', fileobj=StringIO(data))
1510
record_contents = df.readlines()
1511
except Exception, e:
1512
raise KnitCorrupt(self._filename,
1513
"While reading {%s} got %s(%s)"
1514
% (version_id, e.__class__.__name__, str(e)))
1515
header = record_contents.pop(0)
1516
rec = self._check_header(version_id, header)
1518
last_line = record_contents.pop()
1519
if len(record_contents) != int(rec[2]):
1520
raise KnitCorrupt(self._filename,
1521
'incorrect number of lines %s != %s'
1523
% (len(record_contents), int(rec[2]),
1525
if last_line != 'end %s\n' % rec[1]:
1526
raise KnitCorrupt(self._filename,
1527
'unexpected version end line %r, wanted %r'
1528
% (last_line, version_id))
1530
return record_contents, rec[3]
1532
def read_records_iter_raw(self, records):
1533
"""Read text records from data file and yield raw data.
1535
This unpacks enough of the text record to validate the id is
1536
as expected but thats all.
1538
# setup an iterator of the external records:
1539
# uses readv so nice and fast we hope.
1541
# grab the disk data needed.
1543
# Don't check _cache if it is empty
1544
needed_offsets = [(pos, size) for version_id, pos, size
1546
if version_id not in self._cache]
1548
needed_offsets = [(pos, size) for version_id, pos, size
1551
raw_records = self._transport.readv(self._filename, needed_offsets)
1553
for version_id, pos, size in records:
1554
if version_id in self._cache:
1555
# This data has already been validated
1556
data = self._cache[version_id]
1558
pos, data = raw_records.next()
1560
self._cache[version_id] = data
1562
# validate the header
1563
df, rec = self._parse_record_header(version_id, data)
1565
yield version_id, data
1567
def read_records_iter(self, records):
1568
"""Read text records from data file and yield result.
1570
The result will be returned in whatever is the fastest to read.
1571
Not by the order requested. Also, multiple requests for the same
1572
record will only yield 1 response.
1573
:param records: A list of (version_id, pos, len) entries
1574
:return: Yields (version_id, contents, digest) in the order
1575
read, not the order requested
1581
# Skip records we have alread seen
1582
yielded_records = set()
1583
needed_records = set()
1584
for record in records:
1585
if record[0] in self._cache:
1586
if record[0] in yielded_records:
1588
yielded_records.add(record[0])
1589
data = self._cache[record[0]]
1590
content, digest = self._parse_record(record[0], data)
1591
yield (record[0], content, digest)
2718
def add_keys(self, keys):
2722
def get_referrers(self):
2724
for referrers in self.refs.itervalues():
2725
result.update(referrers)
2729
class _KnitGraphIndex(object):
2730
"""A KnitVersionedFiles index layered on GraphIndex."""
2732
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2733
add_callback=None, track_external_parent_refs=False):
2734
"""Construct a KnitGraphIndex on a graph_index.
2736
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2737
:param is_locked: A callback to check whether the object should answer
2739
:param deltas: Allow delta-compressed records.
2740
:param parents: If True, record knits parents, if not do not record
2742
:param add_callback: If not None, allow additions to the index and call
2743
this callback with a list of added GraphIndex nodes:
2744
[(node, value, node_refs), ...]
2745
:param is_locked: A callback, returns True if the index is locked and
2747
:param track_external_parent_refs: If True, record all external parent
2748
references parents from added records. These can be retrieved
2749
later by calling get_missing_parents().
2751
self._add_callback = add_callback
2752
self._graph_index = graph_index
2753
self._deltas = deltas
2754
self._parents = parents
2755
if deltas and not parents:
2756
# XXX: TODO: Delta tree and parent graph should be conceptually
2758
raise KnitCorrupt(self, "Cannot do delta compression without "
2760
self.has_graph = parents
2761
self._is_locked = is_locked
2762
self._missing_compression_parents = set()
2763
if track_external_parent_refs:
2764
self._key_dependencies = _KeyRefs()
2766
self._key_dependencies = None
2769
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2771
def add_records(self, records, random_id=False,
2772
missing_compression_parents=False):
2773
"""Add multiple records to the index.
2775
This function does not insert data into the Immutable GraphIndex
2776
backing the KnitGraphIndex, instead it prepares data for insertion by
2777
the caller and checks that it is safe to insert then calls
2778
self._add_callback with the prepared GraphIndex nodes.
2780
:param records: a list of tuples:
2781
(key, options, access_memo, parents).
2782
:param random_id: If True the ids being added were randomly generated
2783
and no check for existence will be performed.
2784
:param missing_compression_parents: If True the records being added are
2785
only compressed against texts already in the index (or inside
2786
records). If False the records all refer to unavailable texts (or
2787
texts inside records) as compression parents.
2789
if not self._add_callback:
2790
raise errors.ReadOnlyError(self)
2791
# we hope there are no repositories with inconsistent parentage
2795
compression_parents = set()
2796
key_dependencies = self._key_dependencies
2797
for (key, options, access_memo, parents) in records:
2799
parents = tuple(parents)
2800
if key_dependencies is not None:
2801
key_dependencies.add_references(key, parents)
2802
index, pos, size = access_memo
2803
if 'no-eol' in options:
2807
value += "%d %d" % (pos, size)
2808
if not self._deltas:
2809
if 'line-delta' in options:
2810
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2813
if 'line-delta' in options:
2814
node_refs = (parents, (parents[0],))
2815
if missing_compression_parents:
2816
compression_parents.add(parents[0])
2818
node_refs = (parents, ())
1593
needed_records.add(record)
1594
needed_records = sorted(needed_records, key=operator.itemgetter(1))
1596
needed_records = sorted(set(records), key=operator.itemgetter(1))
1598
if not needed_records:
1601
# The transport optimizes the fetching as well
1602
# (ie, reads continuous ranges.)
1603
readv_response = self._transport.readv(self._filename,
1604
[(pos, size) for version_id, pos, size in needed_records])
1606
for (version_id, pos, size), (pos, data) in \
1607
izip(iter(needed_records), readv_response):
1608
content, digest = self._parse_record(version_id, data)
1610
self._cache[version_id] = data
1611
yield version_id, content, digest
1613
def read_records(self, records):
1614
"""Read records into a dictionary."""
1616
for record_id, content, digest in \
1617
self.read_records_iter(records):
1618
components[record_id] = (content, digest)
1622
class InterKnit(InterVersionedFile):
1623
"""Optimised code paths for knit to knit operations."""
1625
_matching_file_from_factory = KnitVersionedFile
1626
_matching_file_to_factory = KnitVersionedFile
1629
def is_compatible(source, target):
1630
"""Be compatible with knits. """
1632
return (isinstance(source, KnitVersionedFile) and
1633
isinstance(target, KnitVersionedFile))
1634
except AttributeError:
1637
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
1638
"""See InterVersionedFile.join."""
1639
assert isinstance(self.source, KnitVersionedFile)
1640
assert isinstance(self.target, KnitVersionedFile)
1642
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
1647
pb = ui.ui_factory.nested_progress_bar()
1649
version_ids = list(version_ids)
1650
if None in version_ids:
1651
version_ids.remove(None)
1653
self.source_ancestry = set(self.source.get_ancestry(version_ids))
1654
this_versions = set(self.target._index.get_versions())
1655
needed_versions = self.source_ancestry - this_versions
1656
cross_check_versions = self.source_ancestry.intersection(this_versions)
1657
mismatched_versions = set()
1658
for version in cross_check_versions:
1659
# scan to include needed parents.
1660
n1 = set(self.target.get_parents_with_ghosts(version))
1661
n2 = set(self.source.get_parents_with_ghosts(version))
1663
# FIXME TEST this check for cycles being introduced works
1664
# the logic is we have a cycle if in our graph we are an
1665
# ancestor of any of the n2 revisions.
1671
parent_ancestors = self.source.get_ancestry(parent)
1672
if version in parent_ancestors:
1673
raise errors.GraphCycleError([parent, version])
1674
# ensure this parent will be available later.
1675
new_parents = n2.difference(n1)
1676
needed_versions.update(new_parents.difference(this_versions))
1677
mismatched_versions.add(version)
1679
if not needed_versions and not mismatched_versions:
1681
full_list = topo_sort(self.source.get_graph())
1683
version_list = [i for i in full_list if (not self.target.has_version(i)
1684
and i in needed_versions)]
1688
copy_queue_records = []
1690
for version_id in version_list:
1691
options = self.source._index.get_options(version_id)
1692
parents = self.source._index.get_parents_with_ghosts(version_id)
1693
# check that its will be a consistent copy:
1694
for parent in parents:
1695
# if source has the parent, we must :
1696
# * already have it or
1697
# * have it scheduled already
1698
# otherwise we don't care
1699
assert (self.target.has_version(parent) or
1700
parent in copy_set or
1701
not self.source.has_version(parent))
1702
data_pos, data_size = self.source._index.get_position(version_id)
1703
copy_queue_records.append((version_id, data_pos, data_size))
1704
copy_queue.append((version_id, options, parents))
1705
copy_set.add(version_id)
1707
# data suck the join:
1709
total = len(version_list)
1712
for (version_id, raw_data), \
1713
(version_id2, options, parents) in \
1714
izip(self.source._data.read_records_iter_raw(copy_queue_records),
1716
assert version_id == version_id2, 'logic error, inconsistent results'
1718
pb.update("Joining knit", count, total)
1719
raw_records.append((version_id, options, parents, len(raw_data)))
1720
raw_datum.append(raw_data)
1721
self.target._add_raw_records(raw_records, ''.join(raw_datum))
1723
for version in mismatched_versions:
1724
# FIXME RBC 20060309 is this needed?
1725
n1 = set(self.target.get_parents_with_ghosts(version))
1726
n2 = set(self.source.get_parents_with_ghosts(version))
1727
# write a combined record to our history preserving the current
1728
# parents as first in the list
1729
new_parents = self.target.get_parents_with_ghosts(version) + list(n2.difference(n1))
1730
self.target.fix_parents(version, new_parents)
1736
InterVersionedFile.register_optimiser(InterKnit)
1739
class WeaveToKnit(InterVersionedFile):
1740
"""Optimised code paths for weave to knit operations."""
1742
_matching_file_from_factory = bzrlib.weave.WeaveFile
1743
_matching_file_to_factory = KnitVersionedFile
1746
def is_compatible(source, target):
1747
"""Be compatible with weaves to knits."""
1749
return (isinstance(source, bzrlib.weave.Weave) and
1750
isinstance(target, KnitVersionedFile))
1751
except AttributeError:
1754
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
1755
"""See InterVersionedFile.join."""
1756
assert isinstance(self.source, bzrlib.weave.Weave)
1757
assert isinstance(self.target, KnitVersionedFile)
1759
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
1764
pb = ui.ui_factory.nested_progress_bar()
1766
version_ids = list(version_ids)
1768
self.source_ancestry = set(self.source.get_ancestry(version_ids))
1769
this_versions = set(self.target._index.get_versions())
1770
needed_versions = self.source_ancestry - this_versions
1771
cross_check_versions = self.source_ancestry.intersection(this_versions)
1772
mismatched_versions = set()
1773
for version in cross_check_versions:
1774
# scan to include needed parents.
1775
n1 = set(self.target.get_parents_with_ghosts(version))
1776
n2 = set(self.source.get_parents(version))
1777
# if all of n2's parents are in n1, then its fine.
1778
if n2.difference(n1):
1779
# FIXME TEST this check for cycles being introduced works
1780
# the logic is we have a cycle if in our graph we are an
1781
# ancestor of any of the n2 revisions.
1787
parent_ancestors = self.source.get_ancestry(parent)
1788
if version in parent_ancestors:
1789
raise errors.GraphCycleError([parent, version])
1790
# ensure this parent will be available later.
1791
new_parents = n2.difference(n1)
1792
needed_versions.update(new_parents.difference(this_versions))
1793
mismatched_versions.add(version)
1795
if not needed_versions and not mismatched_versions:
1797
full_list = topo_sort(self.source.get_graph())
1799
version_list = [i for i in full_list if (not self.target.has_version(i)
1800
and i in needed_versions)]
1804
total = len(version_list)
1805
for version_id in version_list:
1806
pb.update("Converting to knit", count, total)
1807
parents = self.source.get_parents(version_id)
1808
# check that its will be a consistent copy:
1809
for parent in parents:
1810
# if source has the parent, we must already have it
1811
assert (self.target.has_version(parent))
1812
self.target.add_lines(
1813
version_id, parents, self.source.get_lines(version_id))
1816
for version in mismatched_versions:
1817
# FIXME RBC 20060309 is this needed?
1818
n1 = set(self.target.get_parents_with_ghosts(version))
1819
n2 = set(self.source.get_parents(version))
1820
# write a combined record to our history preserving the current
1821
# parents as first in the list
1822
new_parents = self.target.get_parents_with_ghosts(version) + list(n2.difference(n1))
1823
self.target.fix_parents(version, new_parents)
1829
InterVersionedFile.register_optimiser(WeaveToKnit)
1832
class KnitSequenceMatcher(difflib.SequenceMatcher):
1833
"""Knit tuned sequence matcher.
1835
This is based on profiling of difflib which indicated some improvements
1836
for our usage pattern.
1839
def find_longest_match(self, alo, ahi, blo, bhi):
1840
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
1842
If isjunk is not defined:
1844
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
1845
alo <= i <= i+k <= ahi
1846
blo <= j <= j+k <= bhi
1847
and for all (i',j',k') meeting those conditions,
1850
and if i == i', j <= j'
1852
In other words, of all maximal matching blocks, return one that
1853
starts earliest in a, and of all those maximal matching blocks that
1854
start earliest in a, return the one that starts earliest in b.
1856
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
1857
>>> s.find_longest_match(0, 5, 0, 9)
1860
If isjunk is defined, first the longest matching block is
1861
determined as above, but with the additional restriction that no
1862
junk element appears in the block. Then that block is extended as
1863
far as possible by matching (only) junk elements on both sides. So
1864
the resulting block never matches on junk except as identical junk
1865
happens to be adjacent to an "interesting" match.
1867
Here's the same example as before, but considering blanks to be
1868
junk. That prevents " abcd" from matching the " abcd" at the tail
1869
end of the second sequence directly. Instead only the "abcd" can
1870
match, and matches the leftmost "abcd" in the second sequence:
1872
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
1873
>>> s.find_longest_match(0, 5, 0, 9)
1876
If no blocks match, return (alo, blo, 0).
1878
>>> s = SequenceMatcher(None, "ab", "c")
1879
>>> s.find_longest_match(0, 2, 0, 1)
1883
# CAUTION: stripping common prefix or suffix would be incorrect.
1887
# Longest matching block is "ab", but if common prefix is
1888
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
1889
# strip, so ends up claiming that ab is changed to acab by
1890
# inserting "ca" in the middle. That's minimal but unintuitive:
1891
# "it's obvious" that someone inserted "ac" at the front.
1892
# Windiff ends up at the same place as diff, but by pairing up
1893
# the unique 'b's and then matching the first two 'a's.
1895
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
1896
besti, bestj, bestsize = alo, blo, 0
1897
# find longest junk-free match
1898
# during an iteration of the loop, j2len[j] = length of longest
1899
# junk-free match ending with a[i-1] and b[j]
1903
for i in xrange(alo, ahi):
1904
# look at all instances of a[i] in b; note that because
1905
# b2j has no junk keys, the loop is skipped if a[i] is junk
1906
j2lenget = j2len.get
2820
node_refs = (parents, )
2823
raise KnitCorrupt(self, "attempt to add node with parents "
2824
"in parentless index.")
2826
keys[key] = (value, node_refs)
2829
present_nodes = self._get_entries(keys)
2830
for (index, key, value, node_refs) in present_nodes:
2831
if (value[0] != keys[key][0][0] or
2832
node_refs[:1] != keys[key][1][:1]):
2833
raise KnitCorrupt(self, "inconsistent details in add_records"
2834
": %s %s" % ((value, node_refs), keys[key]))
2838
for key, (value, node_refs) in keys.iteritems():
2839
result.append((key, value, node_refs))
2841
for key, (value, node_refs) in keys.iteritems():
2842
result.append((key, value))
2843
self._add_callback(result)
2844
if missing_compression_parents:
2845
# This may appear to be incorrect (it does not check for
2846
# compression parents that are in the existing graph index),
2847
# but such records won't have been buffered, so this is
2848
# actually correct: every entry when
2849
# missing_compression_parents==True either has a missing parent, or
2850
# a parent that is one of the keys in records.
2851
compression_parents.difference_update(keys)
2852
self._missing_compression_parents.update(compression_parents)
2853
# Adding records may have satisfied missing compression parents.
2854
self._missing_compression_parents.difference_update(keys)
2856
def scan_unvalidated_index(self, graph_index):
2857
"""Inform this _KnitGraphIndex that there is an unvalidated index.
2859
This allows this _KnitGraphIndex to keep track of any missing
2860
compression parents we may want to have filled in to make those
2863
:param graph_index: A GraphIndex
2866
new_missing = graph_index.external_references(ref_list_num=1)
2867
new_missing.difference_update(self.get_parent_map(new_missing))
2868
self._missing_compression_parents.update(new_missing)
2869
if self._key_dependencies is not None:
2870
# Add parent refs from graph_index (and discard parent refs that
2871
# the graph_index has).
2872
for node in graph_index.iter_all_entries():
2873
self._key_dependencies.add_references(node[1], node[3][0])
2875
def get_missing_compression_parents(self):
2876
"""Return the keys of missing compression parents.
2878
Missing compression parents occur when a record stream was missing
2879
basis texts, or a index was scanned that had missing basis texts.
2881
return frozenset(self._missing_compression_parents)
2883
def get_missing_parents(self):
2884
"""Return the keys of missing parents."""
2885
# If updating this, you should also update
2886
# groupcompress._GCGraphIndex.get_missing_parents
2887
# We may have false positives, so filter those out.
2888
self._key_dependencies.add_keys(
2889
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
2890
return frozenset(self._key_dependencies.get_unsatisfied_refs())
2892
def _check_read(self):
2893
"""raise if reads are not permitted."""
2894
if not self._is_locked():
2895
raise errors.ObjectNotLocked(self)
2897
def _check_write_ok(self):
2898
"""Assert if writes are not permitted."""
2899
if not self._is_locked():
2900
raise errors.ObjectNotLocked(self)
2902
def _compression_parent(self, an_entry):
2903
# return the key that an_entry is compressed against, or None
2904
# Grab the second parent list (as deltas implies parents currently)
2905
compression_parents = an_entry[3][1]
2906
if not compression_parents:
2908
if len(compression_parents) != 1:
2909
raise AssertionError(
2910
"Too many compression parents: %r" % compression_parents)
2911
return compression_parents[0]
2913
def get_build_details(self, keys):
2914
"""Get the method, index_memo and compression parent for version_ids.
2916
Ghosts are omitted from the result.
2918
:param keys: An iterable of keys.
2919
:return: A dict of key:
2920
(index_memo, compression_parent, parents, record_details).
2922
opaque structure to pass to read_records to extract the raw
2925
Content that this record is built upon, may be None
2927
Logical parents of this node
2929
extra information about the content which needs to be passed to
2930
Factory.parse_record
2934
entries = self._get_entries(keys, False)
2935
for entry in entries:
2937
if not self._parents:
2940
parents = entry[3][0]
2941
if not self._deltas:
2942
compression_parent_key = None
2944
compression_parent_key = self._compression_parent(entry)
2945
noeol = (entry[2][0] == 'N')
2946
if compression_parent_key:
2947
method = 'line-delta'
2950
result[key] = (self._node_to_position(entry),
2951
compression_parent_key, parents,
2955
def _get_entries(self, keys, check_present=False):
2956
"""Get the entries for keys.
2958
:param keys: An iterable of index key tuples.
2963
for node in self._graph_index.iter_entries(keys):
2965
found_keys.add(node[1])
2967
# adapt parentless index to the rest of the code.
2968
for node in self._graph_index.iter_entries(keys):
2969
yield node[0], node[1], node[2], ()
2970
found_keys.add(node[1])
2972
missing_keys = keys.difference(found_keys)
2974
raise RevisionNotPresent(missing_keys.pop(), self)
2976
def get_method(self, key):
2977
"""Return compression method of specified key."""
2978
return self._get_method(self._get_node(key))
2980
def _get_method(self, node):
2981
if not self._deltas:
2983
if self._compression_parent(node):
2988
def _get_node(self, key):
2990
return list(self._get_entries([key]))[0]
2992
raise RevisionNotPresent(key, self)
2994
def get_options(self, key):
2995
"""Return a list representing options.
2999
node = self._get_node(key)
3000
options = [self._get_method(node)]
3001
if node[2][0] == 'N':
3002
options.append('no-eol')
3005
def get_parent_map(self, keys):
3006
"""Get a map of the parents of keys.
3008
:param keys: The keys to look up parents for.
3009
:return: A mapping from keys to parents. Absent keys are absent from
3013
nodes = self._get_entries(keys)
3017
result[node[1]] = node[3][0]
3020
result[node[1]] = None
3023
def get_position(self, key):
3024
"""Return details needed to access the version.
3026
:return: a tuple (index, data position, size) to hand to the access
3027
logic to get the record.
3029
node = self._get_node(key)
3030
return self._node_to_position(node)
3032
has_key = _mod_index._has_key_from_parent_map
3035
"""Get all the keys in the collection.
3037
The keys are not ordered.
3040
return [node[1] for node in self._graph_index.iter_all_entries()]
3042
missing_keys = _mod_index._missing_keys_from_parent_map
3044
def _node_to_position(self, node):
3045
"""Convert an index value to position details."""
3046
bits = node[2][1:].split(' ')
3047
return node[0], int(bits[0]), int(bits[1])
3049
def _sort_keys_by_io(self, keys, positions):
3050
"""Figure out an optimal order to read the records for the given keys.
3052
Sort keys, grouped by index and sorted by position.
3054
:param keys: A list of keys whose records we want to read. This will be
3056
:param positions: A dict, such as the one returned by
3057
_get_components_positions()
3060
def get_index_memo(key):
3061
# index_memo is at offset [1]. It is made up of (GraphIndex,
3062
# position, size). GI is an object, which will be unique for each
3063
# pack file. This causes us to group by pack file, then sort by
3064
# position. Size doesn't matter, but it isn't worth breaking up the
3066
return positions[key][1]
3067
return keys.sort(key=get_index_memo)
3069
_get_total_build_size = _get_total_build_size
3072
class _KnitKeyAccess(object):
3073
"""Access to records in .knit files."""
3075
def __init__(self, transport, mapper):
3076
"""Create a _KnitKeyAccess with transport and mapper.
3078
:param transport: The transport the access object is rooted at.
3079
:param mapper: The mapper used to map keys to .knit files.
3081
self._transport = transport
3082
self._mapper = mapper
3084
def add_raw_records(self, key_sizes, raw_data):
3085
"""Add raw knit bytes to a storage area.
3087
The data is spooled to the container writer in one bytes-record per
3090
:param sizes: An iterable of tuples containing the key and size of each
3092
:param raw_data: A bytestring containing the data.
3093
:return: A list of memos to retrieve the record later. Each memo is an
3094
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
3095
length), where the key is the record key.
3097
if type(raw_data) != str:
3098
raise AssertionError(
3099
'data must be plain bytes was %s' % type(raw_data))
3102
# TODO: This can be tuned for writing to sftp and other servers where
3103
# append() is relatively expensive by grouping the writes to each key
3105
for key, size in key_sizes:
3106
path = self._mapper.map(key)
3108
base = self._transport.append_bytes(path + '.knit',
3109
raw_data[offset:offset+size])
3110
except errors.NoSuchFile:
3111
self._transport.mkdir(osutils.dirname(path))
3112
base = self._transport.append_bytes(path + '.knit',
3113
raw_data[offset:offset+size])
3117
result.append((key, base, size))
3121
"""Flush pending writes on this access object.
3123
For .knit files this is a no-op.
3127
def get_raw_records(self, memos_for_retrieval):
3128
"""Get the raw bytes for a records.
3130
:param memos_for_retrieval: An iterable containing the access memo for
3131
retrieving the bytes.
3132
:return: An iterator over the bytes of the records.
3134
# first pass, group into same-index request to minimise readv's issued.
3136
current_prefix = None
3137
for (key, offset, length) in memos_for_retrieval:
3138
if current_prefix == key[:-1]:
3139
current_list.append((offset, length))
3141
if current_prefix is not None:
3142
request_lists.append((current_prefix, current_list))
3143
current_prefix = key[:-1]
3144
current_list = [(offset, length)]
3145
# handle the last entry
3146
if current_prefix is not None:
3147
request_lists.append((current_prefix, current_list))
3148
for prefix, read_vector in request_lists:
3149
path = self._mapper.map(prefix) + '.knit'
3150
for pos, data in self._transport.readv(path, read_vector):
3154
class _DirectPackAccess(object):
3155
"""Access to data in one or more packs with less translation."""
3157
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3158
"""Create a _DirectPackAccess object.
3160
:param index_to_packs: A dict mapping index objects to the transport
3161
and file names for obtaining data.
3162
:param reload_func: A function to call if we determine that the pack
3163
files have moved and we need to reload our caches. See
3164
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
3166
self._container_writer = None
3167
self._write_index = None
3168
self._indices = index_to_packs
3169
self._reload_func = reload_func
3170
self._flush_func = flush_func
3172
def add_raw_records(self, key_sizes, raw_data):
3173
"""Add raw knit bytes to a storage area.
3175
The data is spooled to the container writer in one bytes-record per
3178
:param sizes: An iterable of tuples containing the key and size of each
3180
:param raw_data: A bytestring containing the data.
3181
:return: A list of memos to retrieve the record later. Each memo is an
3182
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3183
length), where the index field is the write_index object supplied
3184
to the PackAccess object.
3186
if type(raw_data) != str:
3187
raise AssertionError(
3188
'data must be plain bytes was %s' % type(raw_data))
3191
for key, size in key_sizes:
3192
p_offset, p_length = self._container_writer.add_bytes_record(
3193
raw_data[offset:offset+size], [])
3195
result.append((self._write_index, p_offset, p_length))
3199
"""Flush pending writes on this access object.
3201
This will flush any buffered writes to a NewPack.
3203
if self._flush_func is not None:
1909
# changing b2j.get(a[i], nothing) to a try:KeyError pair produced the
1910
# following improvement
1911
# 704 0 4650.5320 2620.7410 bzrlib.knit:1336(find_longest_match)
1912
# +326674 0 1655.1210 1655.1210 +<method 'get' of 'dict' objects>
1913
# +76519 0 374.6700 374.6700 +<method 'has_key' of 'dict' objects>
1915
# 704 0 3733.2820 2209.6520 bzrlib.knit:1336(find_longest_match)
1916
# +211400 0 1147.3520 1147.3520 +<method 'get' of 'dict' objects>
1917
# +76519 0 376.2780 376.2780 +<method 'has_key' of 'dict' objects>
3206
def get_raw_records(self, memos_for_retrieval):
3207
"""Get the raw bytes for a records.
3209
:param memos_for_retrieval: An iterable containing the (index, pos,
3210
length) memo for retrieving the bytes. The Pack access method
3211
looks up the pack to use for a given record in its index_to_pack
3213
:return: An iterator over the bytes of the records.
3215
# first pass, group into same-index requests
3217
current_index = None
3218
for (index, offset, length) in memos_for_retrieval:
3219
if current_index == index:
3220
current_list.append((offset, length))
3222
if current_index is not None:
3223
request_lists.append((current_index, current_list))
3224
current_index = index
3225
current_list = [(offset, length)]
3226
# handle the last entry
3227
if current_index is not None:
3228
request_lists.append((current_index, current_list))
3229
for index, offsets in request_lists:
3231
transport, path = self._indices[index]
1921
3232
except KeyError:
1929
k = newj2len[j] = 1 + j2lenget(-1 + j, 0)
1931
besti, bestj, bestsize = 1 + i-k, 1 + j-k, k
1934
# Extend the best by non-junk elements on each end. In particular,
1935
# "popular" non-junk elements aren't in b2j, which greatly speeds
1936
# the inner loop above, but also means "the best" match so far
1937
# doesn't contain any junk *or* popular non-junk elements.
1938
while besti > alo and bestj > blo and \
1939
not isbjunk(b[bestj-1]) and \
1940
a[besti-1] == b[bestj-1]:
1941
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
1942
while besti+bestsize < ahi and bestj+bestsize < bhi and \
1943
not isbjunk(b[bestj+bestsize]) and \
1944
a[besti+bestsize] == b[bestj+bestsize]:
1947
# Now that we have a wholly interesting match (albeit possibly
1948
# empty!), we may as well suck up the matching junk on each
1949
# side of it too. Can't think of a good reason not to, and it
1950
# saves post-processing the (possibly considerable) expense of
1951
# figuring out what to do with it. In the case of an empty
1952
# interesting match, this is clearly the right thing to do,
1953
# because no other kind of match is possible in the regions.
1954
while besti > alo and bestj > blo and \
1955
isbjunk(b[bestj-1]) and \
1956
a[besti-1] == b[bestj-1]:
1957
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
1958
while besti+bestsize < ahi and bestj+bestsize < bhi and \
1959
isbjunk(b[bestj+bestsize]) and \
1960
a[besti+bestsize] == b[bestj+bestsize]:
1961
bestsize = bestsize + 1
1963
return besti, bestj, bestsize
3233
# A KeyError here indicates that someone has triggered an index
3234
# reload, and this index has gone missing, we need to start
3236
if self._reload_func is None:
3237
# If we don't have a _reload_func there is nothing that can
3240
raise errors.RetryWithNewPacks(index,
3241
reload_occurred=True,
3242
exc_info=sys.exc_info())
3244
reader = pack.make_readv_reader(transport, path, offsets)
3245
for names, read_func in reader.iter_records():
3246
yield read_func(None)
3247
except errors.NoSuchFile:
3248
# A NoSuchFile error indicates that a pack file has gone
3249
# missing on disk, we need to trigger a reload, and start over.
3250
if self._reload_func is None:
3252
raise errors.RetryWithNewPacks(transport.abspath(path),
3253
reload_occurred=False,
3254
exc_info=sys.exc_info())
3256
def set_writer(self, writer, index, transport_packname):
3257
"""Set a writer to use for adding data."""
3258
if index is not None:
3259
self._indices[index] = transport_packname
3260
self._container_writer = writer
3261
self._write_index = index
3263
def reload_or_raise(self, retry_exc):
3264
"""Try calling the reload function, or re-raise the original exception.
3266
This should be called after _DirectPackAccess raises a
3267
RetryWithNewPacks exception. This function will handle the common logic
3268
of determining when the error is fatal versus being temporary.
3269
It will also make sure that the original exception is raised, rather
3270
than the RetryWithNewPacks exception.
3272
If this function returns, then the calling function should retry
3273
whatever operation was being performed. Otherwise an exception will
3276
:param retry_exc: A RetryWithNewPacks exception.
3279
if self._reload_func is None:
3281
elif not self._reload_func():
3282
# The reload claimed that nothing changed
3283
if not retry_exc.reload_occurred:
3284
# If there wasn't an earlier reload, then we really were
3285
# expecting to find changes. We didn't find them, so this is a
3289
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3290
raise exc_class, exc_value, exc_traceback
3293
# Deprecated, use PatienceSequenceMatcher instead
3294
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
3297
def annotate_knit(knit, revision_id):
3298
"""Annotate a knit with no cached annotations.
3300
This implementation is for knits with no cached annotations.
3301
It will work for knits with cached annotations, but this is not
3304
annotator = _KnitAnnotator(knit)
3305
return iter(annotator.annotate(revision_id))
3308
class _KnitAnnotator(object):
3309
"""Build up the annotations for a text."""
3311
def __init__(self, knit):
3314
# Content objects, differs from fulltexts because of how final newlines
3315
# are treated by knits. the content objects here will always have a
3317
self._fulltext_contents = {}
3319
# Annotated lines of specific revisions
3320
self._annotated_lines = {}
3322
# Track the raw data for nodes that we could not process yet.
3323
# This maps the revision_id of the base to a list of children that will
3324
# annotated from it.
3325
self._pending_children = {}
3327
# Nodes which cannot be extracted
3328
self._ghosts = set()
3330
# Track how many children this node has, so we know if we need to keep
3332
self._annotate_children = {}
3333
self._compression_children = {}
3335
self._all_build_details = {}
3336
# The children => parent revision_id graph
3337
self._revision_id_graph = {}
3339
self._heads_provider = None
3341
self._nodes_to_keep_annotations = set()
3342
self._generations_until_keep = 100
3344
def set_generations_until_keep(self, value):
3345
"""Set the number of generations before caching a node.
3347
Setting this to -1 will cache every merge node, setting this higher
3348
will cache fewer nodes.
3350
self._generations_until_keep = value
3352
def _add_fulltext_content(self, revision_id, content_obj):
3353
self._fulltext_contents[revision_id] = content_obj
3354
# TODO: jam 20080305 It might be good to check the sha1digest here
3355
return content_obj.text()
3357
def _check_parents(self, child, nodes_to_annotate):
3358
"""Check if all parents have been processed.
3360
:param child: A tuple of (rev_id, parents, raw_content)
3361
:param nodes_to_annotate: If child is ready, add it to
3362
nodes_to_annotate, otherwise put it back in self._pending_children
3364
for parent_id in child[1]:
3365
if (parent_id not in self._annotated_lines):
3366
# This parent is present, but another parent is missing
3367
self._pending_children.setdefault(parent_id,
3371
# This one is ready to be processed
3372
nodes_to_annotate.append(child)
3374
def _add_annotation(self, revision_id, fulltext, parent_ids,
3375
left_matching_blocks=None):
3376
"""Add an annotation entry.
3378
All parents should already have been annotated.
3379
:return: A list of children that now have their parents satisfied.
3381
a = self._annotated_lines
3382
annotated_parent_lines = [a[p] for p in parent_ids]
3383
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
3384
fulltext, revision_id, left_matching_blocks,
3385
heads_provider=self._get_heads_provider()))
3386
self._annotated_lines[revision_id] = annotated_lines
3387
for p in parent_ids:
3388
ann_children = self._annotate_children[p]
3389
ann_children.remove(revision_id)
3390
if (not ann_children
3391
and p not in self._nodes_to_keep_annotations):
3392
del self._annotated_lines[p]
3393
del self._all_build_details[p]
3394
if p in self._fulltext_contents:
3395
del self._fulltext_contents[p]
3396
# Now that we've added this one, see if there are any pending
3397
# deltas to be done, certainly this parent is finished
3398
nodes_to_annotate = []
3399
for child in self._pending_children.pop(revision_id, []):
3400
self._check_parents(child, nodes_to_annotate)
3401
return nodes_to_annotate
3403
def _get_build_graph(self, key):
3404
"""Get the graphs for building texts and annotations.
3406
The data you need for creating a full text may be different than the
3407
data you need to annotate that text. (At a minimum, you need both
3408
parents to create an annotation, but only need 1 parent to generate the
3411
:return: A list of (key, index_memo) records, suitable for
3412
passing to read_records_iter to start reading in the raw data from
3415
if key in self._annotated_lines:
3418
pending = set([key])
3423
# get all pending nodes
3425
this_iteration = pending
3426
build_details = self._knit._index.get_build_details(this_iteration)
3427
self._all_build_details.update(build_details)
3428
# new_nodes = self._knit._index._get_entries(this_iteration)
3430
for key, details in build_details.iteritems():
3431
(index_memo, compression_parent, parents,
3432
record_details) = details
3433
self._revision_id_graph[key] = parents
3434
records.append((key, index_memo))
3435
# Do we actually need to check _annotated_lines?
3436
pending.update(p for p in parents
3437
if p not in self._all_build_details)
3438
if compression_parent:
3439
self._compression_children.setdefault(compression_parent,
3442
for parent in parents:
3443
self._annotate_children.setdefault(parent,
3445
num_gens = generation - kept_generation
3446
if ((num_gens >= self._generations_until_keep)
3447
and len(parents) > 1):
3448
kept_generation = generation
3449
self._nodes_to_keep_annotations.add(key)
3451
missing_versions = this_iteration.difference(build_details.keys())
3452
self._ghosts.update(missing_versions)
3453
for missing_version in missing_versions:
3454
# add a key, no parents
3455
self._revision_id_graph[missing_version] = ()
3456
pending.discard(missing_version) # don't look for it
3457
if self._ghosts.intersection(self._compression_children):
3459
"We cannot have nodes which have a ghost compression parent:\n"
3461
"compression children: %r"
3462
% (self._ghosts, self._compression_children))
3463
# Cleanout anything that depends on a ghost so that we don't wait for
3464
# the ghost to show up
3465
for node in self._ghosts:
3466
if node in self._annotate_children:
3467
# We won't be building this node
3468
del self._annotate_children[node]
3469
# Generally we will want to read the records in reverse order, because
3470
# we find the parent nodes after the children
3474
def _annotate_records(self, records):
3475
"""Build the annotations for the listed records."""
3476
# We iterate in the order read, rather than a strict order requested
3477
# However, process what we can, and put off to the side things that
3478
# still need parents, cleaning them up when those parents are
3480
for (rev_id, record,
3481
digest) in self._knit._read_records_iter(records):
3482
if rev_id in self._annotated_lines:
3484
parent_ids = self._revision_id_graph[rev_id]
3485
parent_ids = [p for p in parent_ids if p not in self._ghosts]
3486
details = self._all_build_details[rev_id]
3487
(index_memo, compression_parent, parents,
3488
record_details) = details
3489
nodes_to_annotate = []
3490
# TODO: Remove the punning between compression parents, and
3491
# parent_ids, we should be able to do this without assuming
3493
if len(parent_ids) == 0:
3494
# There are no parents for this node, so just add it
3495
# TODO: This probably needs to be decoupled
3496
fulltext_content, delta = self._knit._factory.parse_record(
3497
rev_id, record, record_details, None)
3498
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
3499
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
3500
parent_ids, left_matching_blocks=None))
3502
child = (rev_id, parent_ids, record)
3503
# Check if all the parents are present
3504
self._check_parents(child, nodes_to_annotate)
3505
while nodes_to_annotate:
3506
# Should we use a queue here instead of a stack?
3507
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
3508
(index_memo, compression_parent, parents,
3509
record_details) = self._all_build_details[rev_id]
3511
if compression_parent is not None:
3512
comp_children = self._compression_children[compression_parent]
3513
if rev_id not in comp_children:
3514
raise AssertionError("%r not in compression children %r"
3515
% (rev_id, comp_children))
3516
# If there is only 1 child, it is safe to reuse this
3518
reuse_content = (len(comp_children) == 1
3519
and compression_parent not in
3520
self._nodes_to_keep_annotations)
3522
# Remove it from the cache since it will be changing
3523
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
3524
# Make sure to copy the fulltext since it might be
3526
parent_fulltext = list(parent_fulltext_content.text())
3528
parent_fulltext_content = self._fulltext_contents[compression_parent]
3529
parent_fulltext = parent_fulltext_content.text()
3530
comp_children.remove(rev_id)
3531
fulltext_content, delta = self._knit._factory.parse_record(
3532
rev_id, record, record_details,
3533
parent_fulltext_content,
3534
copy_base_content=(not reuse_content))
3535
fulltext = self._add_fulltext_content(rev_id,
3537
if compression_parent == parent_ids[0]:
3538
# the compression_parent is the left parent, so we can
3540
blocks = KnitContent.get_line_delta_blocks(delta,
3541
parent_fulltext, fulltext)
3543
fulltext_content = self._knit._factory.parse_fulltext(
3545
fulltext = self._add_fulltext_content(rev_id,
3547
nodes_to_annotate.extend(
3548
self._add_annotation(rev_id, fulltext, parent_ids,
3549
left_matching_blocks=blocks))
3551
def _get_heads_provider(self):
3552
"""Create a heads provider for resolving ancestry issues."""
3553
if self._heads_provider is not None:
3554
return self._heads_provider
3555
parent_provider = _mod_graph.DictParentsProvider(
3556
self._revision_id_graph)
3557
graph_obj = _mod_graph.Graph(parent_provider)
3558
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
3559
self._heads_provider = head_cache
3562
def annotate(self, key):
3563
"""Return the annotated fulltext at the given key.
3565
:param key: The key to annotate.
3567
if len(self._knit._fallback_vfs) > 0:
3568
# stacked knits can't use the fast path at present.
3569
return self._simple_annotate(key)
3572
records = self._get_build_graph(key)
3573
if key in self._ghosts:
3574
raise errors.RevisionNotPresent(key, self._knit)
3575
self._annotate_records(records)
3576
return self._annotated_lines[key]
3577
except errors.RetryWithNewPacks, e:
3578
self._knit._access.reload_or_raise(e)
3579
# The cached build_details are no longer valid
3580
self._all_build_details.clear()
3582
def _simple_annotate(self, key):
3583
"""Return annotated fulltext, rediffing from the full texts.
3585
This is slow but makes no assumptions about the repository
3586
being able to produce line deltas.
3588
# TODO: this code generates a parent maps of present ancestors; it
3589
# could be split out into a separate method, and probably should use
3590
# iter_ancestry instead. -- mbp and robertc 20080704
3591
graph = _mod_graph.Graph(self._knit)
3592
head_cache = _mod_graph.FrozenHeadsCache(graph)
3593
search = graph._make_breadth_first_searcher([key])
3597
present, ghosts = search.next_with_ghosts()
3598
except StopIteration:
3600
keys.update(present)
3601
parent_map = self._knit.get_parent_map(keys)
3603
reannotate = annotate.reannotate
3604
for record in self._knit.get_record_stream(keys, 'topological', True):
3606
fulltext = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
3607
parents = parent_map[key]
3608
if parents is not None:
3609
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
3612
parent_cache[key] = list(
3613
reannotate(parent_lines, fulltext, key, None, head_cache))
3615
return parent_cache[key]
3617
raise errors.RevisionNotPresent(key, self._knit)
3621
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3623
from bzrlib._knit_load_data_py import _load_data_py as _load_data