352
208
commit to be valid, deletes against the basis MUST be recorded via
353
209
builder.record_delete().
355
self._recording_deletes = True
357
basis_id = self.parents[0]
359
basis_id = _mod_revision.NULL_REVISION
360
self.basis_delta_revision = basis_id
362
def record_entry_contents(self, ie, parent_invs, path, tree,
364
"""Record the content of ie from tree into the commit if needed.
366
Side effect: sets ie.revision when unchanged
368
:param ie: An inventory entry present in the commit.
369
:param parent_invs: The inventories of the parent revisions of the
371
:param path: The path the entry is at in the tree.
372
:param tree: The tree which contains this entry and should be used to
374
:param content_summary: Summary data from the tree about the paths
375
content - stat, length, exec, sha/link target. This is only
376
accessed when the entry has a revision of None - that is when it is
377
a candidate to commit.
378
:return: A tuple (change_delta, version_recorded, fs_hash).
379
change_delta is an inventory_delta change for this entry against
380
the basis tree of the commit, or None if no change occured against
382
version_recorded is True if a new version of the entry has been
383
recorded. For instance, committing a merge where a file was only
384
changed on the other side will return (delta, False).
385
fs_hash is either None, or the hash details for the path (currently
386
a tuple of the contents sha1 and the statvalue returned by
387
tree.get_file_with_stat()).
389
if self.new_inventory.root is None:
390
if ie.parent_id is not None:
391
raise errors.RootMissing()
392
self._check_root(ie, parent_invs, tree)
393
if ie.revision is None:
394
kind = content_summary[0]
396
# ie is carried over from a prior commit
398
# XXX: repository specific check for nested tree support goes here - if
399
# the repo doesn't want nested trees we skip it ?
400
if (kind == 'tree-reference' and
401
not self.repository._format.supports_tree_reference):
402
# mismatch between commit builder logic and repository:
403
# this needs the entry creation pushed down into the builder.
404
raise NotImplementedError('Missing repository subtree support.')
405
self.new_inventory.add(ie)
407
# TODO: slow, take it out of the inner loop.
409
basis_inv = parent_invs[0]
411
basis_inv = Inventory(root_id=None)
413
# ie.revision is always None if the InventoryEntry is considered
414
# for committing. We may record the previous parents revision if the
415
# content is actually unchanged against a sole head.
416
if ie.revision is not None:
417
if not self._versioned_root and path == '':
418
# repositories that do not version the root set the root's
419
# revision to the new commit even when no change occurs (more
420
# specifically, they do not record a revision on the root; and
421
# the rev id is assigned to the root during deserialisation -
422
# this masks when a change may have occurred against the basis.
423
# To match this we always issue a delta, because the revision
424
# of the root will always be changing.
425
if ie.file_id in basis_inv:
426
delta = (basis_inv.id2path(ie.file_id), path,
430
delta = (None, path, ie.file_id, ie)
431
self._basis_delta.append(delta)
432
return delta, False, None
434
# we don't need to commit this, because the caller already
435
# determined that an existing revision of this file is
436
# appropriate. If its not being considered for committing then
437
# it and all its parents to the root must be unaltered so
438
# no-change against the basis.
439
if ie.revision == self._new_revision_id:
440
raise AssertionError("Impossible situation, a skipped "
441
"inventory entry (%r) claims to be modified in this "
442
"commit (%r).", (ie, self._new_revision_id))
443
return None, False, None
444
# XXX: Friction: parent_candidates should return a list not a dict
445
# so that we don't have to walk the inventories again.
446
parent_candiate_entries = ie.parent_candidates(parent_invs)
447
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
449
for inv in parent_invs:
450
if ie.file_id in inv:
451
old_rev = inv[ie.file_id].revision
452
if old_rev in head_set:
453
heads.append(inv[ie.file_id].revision)
454
head_set.remove(inv[ie.file_id].revision)
457
# now we check to see if we need to write a new record to the
459
# We write a new entry unless there is one head to the ancestors, and
460
# the kind-derived content is unchanged.
462
# Cheapest check first: no ancestors, or more the one head in the
463
# ancestors, we write a new node.
467
# There is a single head, look it up for comparison
468
parent_entry = parent_candiate_entries[heads[0]]
469
# if the non-content specific data has changed, we'll be writing a
471
if (parent_entry.parent_id != ie.parent_id or
472
parent_entry.name != ie.name):
474
# now we need to do content specific checks:
476
# if the kind changed the content obviously has
477
if kind != parent_entry.kind:
479
# Stat cache fingerprint feedback for the caller - None as we usually
480
# don't generate one.
483
if content_summary[2] is None:
484
raise ValueError("Files must not have executable = None")
486
# We can't trust a check of the file length because of content
488
if (# if the exec bit has changed we have to store:
489
parent_entry.executable != content_summary[2]):
491
elif parent_entry.text_sha1 == content_summary[3]:
492
# all meta and content is unchanged (using a hash cache
493
# hit to check the sha)
494
ie.revision = parent_entry.revision
495
ie.text_size = parent_entry.text_size
496
ie.text_sha1 = parent_entry.text_sha1
497
ie.executable = parent_entry.executable
498
return self._get_delta(ie, basis_inv, path), False, None
500
# Either there is only a hash change(no hash cache entry,
501
# or same size content change), or there is no change on
503
# Provide the parent's hash to the store layer, so that the
504
# content is unchanged we will not store a new node.
505
nostore_sha = parent_entry.text_sha1
507
# We want to record a new node regardless of the presence or
508
# absence of a content change in the file.
510
ie.executable = content_summary[2]
511
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
513
text = file_obj.read()
517
ie.text_sha1, ie.text_size = self._add_text_to_weave(
518
ie.file_id, text, heads, nostore_sha)
519
# Let the caller know we generated a stat fingerprint.
520
fingerprint = (ie.text_sha1, stat_value)
521
except errors.ExistingContent:
522
# Turns out that the file content was unchanged, and we were
523
# only going to store a new node if it was changed. Carry over
525
ie.revision = parent_entry.revision
526
ie.text_size = parent_entry.text_size
527
ie.text_sha1 = parent_entry.text_sha1
528
ie.executable = parent_entry.executable
529
return self._get_delta(ie, basis_inv, path), False, None
530
elif kind == 'directory':
532
# all data is meta here, nothing specific to directory, so
534
ie.revision = parent_entry.revision
535
return self._get_delta(ie, basis_inv, path), False, None
536
self._add_text_to_weave(ie.file_id, '', heads, None)
537
elif kind == 'symlink':
538
current_link_target = content_summary[3]
540
# symlink target is not generic metadata, check if it has
542
if current_link_target != parent_entry.symlink_target:
545
# unchanged, carry over.
546
ie.revision = parent_entry.revision
547
ie.symlink_target = parent_entry.symlink_target
548
return self._get_delta(ie, basis_inv, path), False, None
549
ie.symlink_target = current_link_target
550
self._add_text_to_weave(ie.file_id, '', heads, None)
551
elif kind == 'tree-reference':
553
if content_summary[3] != parent_entry.reference_revision:
556
# unchanged, carry over.
557
ie.reference_revision = parent_entry.reference_revision
558
ie.revision = parent_entry.revision
559
return self._get_delta(ie, basis_inv, path), False, None
560
ie.reference_revision = content_summary[3]
561
if ie.reference_revision is None:
562
raise AssertionError("invalid content_summary for nested tree: %r"
563
% (content_summary,))
564
self._add_text_to_weave(ie.file_id, '', heads, None)
566
raise NotImplementedError('unknown kind')
567
ie.revision = self._new_revision_id
568
self._any_changes = True
569
return self._get_delta(ie, basis_inv, path), True, fingerprint
571
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
572
_entry_factory=entry_factory):
211
raise NotImplementedError(self.will_record_deletes)
213
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
573
214
"""Record a new tree via iter_changes.
575
216
:param tree: The tree to obtain text contents from for changed objects.
580
221
to basis_revision_id. The iterator must not include any items with
581
222
a current kind of None - missing items must be either filtered out
582
223
or errored-on beefore record_iter_changes sees the item.
583
:param _entry_factory: Private method to bind entry_factory locally for
585
224
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
586
225
tree._observed_sha1.
588
# Create an inventory delta based on deltas between all the parents and
589
# deltas between all the parent inventories. We use inventory delta's
590
# between the inventory objects because iter_changes masks
591
# last-changed-field only changes.
593
# file_id -> change map, change is fileid, paths, changed, versioneds,
594
# parents, names, kinds, executables
596
# {file_id -> revision_id -> inventory entry, for entries in parent
597
# trees that are not parents[0]
601
revtrees = list(self.repository.revision_trees(self.parents))
602
except errors.NoSuchRevision:
603
# one or more ghosts, slow path.
605
for revision_id in self.parents:
607
revtrees.append(self.repository.revision_tree(revision_id))
608
except errors.NoSuchRevision:
610
basis_revision_id = _mod_revision.NULL_REVISION
612
revtrees.append(self.repository.revision_tree(
613
_mod_revision.NULL_REVISION))
614
# The basis inventory from a repository
616
basis_inv = revtrees[0].inventory
618
basis_inv = self.repository.revision_tree(
619
_mod_revision.NULL_REVISION).inventory
620
if len(self.parents) > 0:
621
if basis_revision_id != self.parents[0] and not ghost_basis:
623
"arbitrary basis parents not yet supported with merges")
624
for revtree in revtrees[1:]:
625
for change in revtree.inventory._make_delta(basis_inv):
626
if change[1] is None:
627
# Not present in this parent.
629
if change[2] not in merged_ids:
630
if change[0] is not None:
631
basis_entry = basis_inv[change[2]]
632
merged_ids[change[2]] = [
634
basis_entry.revision,
637
parent_entries[change[2]] = {
639
basis_entry.revision:basis_entry,
641
change[3].revision:change[3],
644
merged_ids[change[2]] = [change[3].revision]
645
parent_entries[change[2]] = {change[3].revision:change[3]}
647
merged_ids[change[2]].append(change[3].revision)
648
parent_entries[change[2]][change[3].revision] = change[3]
651
# Setup the changes from the tree:
652
# changes maps file_id -> (change, [parent revision_ids])
654
for change in iter_changes:
655
# This probably looks up in basis_inv way to much.
656
if change[1][0] is not None:
657
head_candidate = [basis_inv[change[0]].revision]
660
changes[change[0]] = change, merged_ids.get(change[0],
662
unchanged_merged = set(merged_ids) - set(changes)
663
# Extend the changes dict with synthetic changes to record merges of
665
for file_id in unchanged_merged:
666
# Record a merged version of these items that did not change vs the
667
# basis. This can be either identical parallel changes, or a revert
668
# of a specific file after a merge. The recorded content will be
669
# that of the current tree (which is the same as the basis), but
670
# the per-file graph will reflect a merge.
671
# NB:XXX: We are reconstructing path information we had, this
672
# should be preserved instead.
673
# inv delta change: (file_id, (path_in_source, path_in_target),
674
# changed_content, versioned, parent, name, kind,
677
basis_entry = basis_inv[file_id]
678
except errors.NoSuchId:
679
# a change from basis->some_parents but file_id isn't in basis
680
# so was new in the merge, which means it must have changed
681
# from basis -> current, and as it hasn't the add was reverted
682
# by the user. So we discard this change.
686
(basis_inv.id2path(file_id), tree.id2path(file_id)),
688
(basis_entry.parent_id, basis_entry.parent_id),
689
(basis_entry.name, basis_entry.name),
690
(basis_entry.kind, basis_entry.kind),
691
(basis_entry.executable, basis_entry.executable))
692
changes[file_id] = (change, merged_ids[file_id])
693
# changes contains tuples with the change and a set of inventory
694
# candidates for the file.
696
# old_path, new_path, file_id, new_inventory_entry
697
seen_root = False # Is the root in the basis delta?
698
inv_delta = self._basis_delta
699
modified_rev = self._new_revision_id
700
for change, head_candidates in changes.values():
701
if change[3][1]: # versioned in target.
702
# Several things may be happening here:
703
# We may have a fork in the per-file graph
704
# - record a change with the content from tree
705
# We may have a change against < all trees
706
# - carry over the tree that hasn't changed
707
# We may have a change against all trees
708
# - record the change with the content from tree
711
entry = _entry_factory[kind](file_id, change[5][1],
713
head_set = self._heads(change[0], set(head_candidates))
716
for head_candidate in head_candidates:
717
if head_candidate in head_set:
718
heads.append(head_candidate)
719
head_set.remove(head_candidate)
722
# Could be a carry-over situation:
723
parent_entry_revs = parent_entries.get(file_id, None)
724
if parent_entry_revs:
725
parent_entry = parent_entry_revs.get(heads[0], None)
728
if parent_entry is None:
729
# The parent iter_changes was called against is the one
730
# that is the per-file head, so any change is relevant
731
# iter_changes is valid.
732
carry_over_possible = False
734
# could be a carry over situation
735
# A change against the basis may just indicate a merge,
736
# we need to check the content against the source of the
737
# merge to determine if it was changed after the merge
739
if (parent_entry.kind != entry.kind or
740
parent_entry.parent_id != entry.parent_id or
741
parent_entry.name != entry.name):
742
# Metadata common to all entries has changed
743
# against per-file parent
744
carry_over_possible = False
746
carry_over_possible = True
747
# per-type checks for changes against the parent_entry
750
# Cannot be a carry-over situation
751
carry_over_possible = False
752
# Populate the entry in the delta
754
# XXX: There is still a small race here: If someone reverts the content of a file
755
# after iter_changes examines and decides it has changed,
756
# we will unconditionally record a new version even if some
757
# other process reverts it while commit is running (with
758
# the revert happening after iter_changes did it's
761
entry.executable = True
763
entry.executable = False
764
if (carry_over_possible and
765
parent_entry.executable == entry.executable):
766
# Check the file length, content hash after reading
768
nostore_sha = parent_entry.text_sha1
771
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
773
text = file_obj.read()
777
entry.text_sha1, entry.text_size = self._add_text_to_weave(
778
file_id, text, heads, nostore_sha)
779
yield file_id, change[1][1], (entry.text_sha1, stat_value)
780
except errors.ExistingContent:
781
# No content change against a carry_over parent
782
# Perhaps this should also yield a fs hash update?
784
entry.text_size = parent_entry.text_size
785
entry.text_sha1 = parent_entry.text_sha1
786
elif kind == 'symlink':
788
entry.symlink_target = tree.get_symlink_target(file_id)
789
if (carry_over_possible and
790
parent_entry.symlink_target == entry.symlink_target):
793
self._add_text_to_weave(change[0], '', heads, None)
794
elif kind == 'directory':
795
if carry_over_possible:
798
# Nothing to set on the entry.
799
# XXX: split into the Root and nonRoot versions.
800
if change[1][1] != '' or self.repository.supports_rich_root():
801
self._add_text_to_weave(change[0], '', heads, None)
802
elif kind == 'tree-reference':
803
if not self.repository._format.supports_tree_reference:
804
# This isn't quite sane as an error, but we shouldn't
805
# ever see this code path in practice: tree's don't
806
# permit references when the repo doesn't support tree
808
raise errors.UnsupportedOperation(tree.add_reference,
810
reference_revision = tree.get_reference_revision(change[0])
811
entry.reference_revision = reference_revision
812
if (carry_over_possible and
813
parent_entry.reference_revision == reference_revision):
816
self._add_text_to_weave(change[0], '', heads, None)
818
raise AssertionError('unknown kind %r' % kind)
820
entry.revision = modified_rev
822
entry.revision = parent_entry.revision
825
new_path = change[1][1]
826
inv_delta.append((change[1][0], new_path, change[0], entry))
829
self.new_inventory = None
831
# This should perhaps be guarded by a check that the basis we
832
# commit against is the basis for the commit and if not do a delta
834
self._any_changes = True
836
# housekeeping root entry changes do not affect no-change commits.
837
self._require_root_change(tree)
838
self.basis_delta_revision = basis_revision_id
840
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
841
parent_keys = tuple([(file_id, parent) for parent in parents])
842
return self.repository.texts._add_text(
843
(file_id, self._new_revision_id), parent_keys, new_text,
844
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
847
class RootCommitBuilder(CommitBuilder):
848
"""This commitbuilder actually records the root id"""
850
# the root entry gets versioned properly by this builder.
851
_versioned_root = True
853
def _check_root(self, ie, parent_invs, tree):
854
"""Helper for record_entry_contents.
856
:param ie: An entry being added.
857
:param parent_invs: The inventories of the parent revisions of the
859
:param tree: The tree that is being committed.
862
def _require_root_change(self, tree):
863
"""Enforce an appropriate root object change.
865
This is called once when record_iter_changes is called, if and only if
866
the root was not in the delta calculated by record_iter_changes.
868
:param tree: The tree which is being committed.
870
# versioned roots do not change unless the tree found a change.
227
raise NotImplementedError(self.record_iter_changes)
873
230
class RepositoryWriteLockResult(LogicalLockResult):
1028
319
return InterRepository._assert_same_model(self, repository)
1030
def add_inventory(self, revision_id, inv, parents):
1031
"""Add the inventory inv to the repository as revision_id.
1033
:param parents: The revision ids of the parents that revision_id
1034
is known to have and are in the repository already.
1036
:returns: The validator(which is a sha1 digest, though what is sha'd is
1037
repository format specific) of the serialized inventory.
1039
if not self.is_in_write_group():
1040
raise AssertionError("%r not in write group" % (self,))
1041
_mod_revision.check_not_reserved_id(revision_id)
1042
if not (inv.revision_id is None or inv.revision_id == revision_id):
1043
raise AssertionError(
1044
"Mismatch between inventory revision"
1045
" id and insertion revid (%r, %r)"
1046
% (inv.revision_id, revision_id))
1047
if inv.root is None:
1048
raise errors.RootMissing()
1049
return self._add_inventory_checked(revision_id, inv, parents)
1051
def _add_inventory_checked(self, revision_id, inv, parents):
1052
"""Add inv to the repository after checking the inputs.
1054
This function can be overridden to allow different inventory styles.
1056
:seealso: add_inventory, for the contract.
1058
inv_lines = self._serializer.write_inventory_to_lines(inv)
1059
return self._inventory_add_lines(revision_id, parents,
1060
inv_lines, check_content=False)
1062
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1063
parents, basis_inv=None, propagate_caches=False):
1064
"""Add a new inventory expressed as a delta against another revision.
1066
See the inventory developers documentation for the theory behind
1069
:param basis_revision_id: The inventory id the delta was created
1070
against. (This does not have to be a direct parent.)
1071
:param delta: The inventory delta (see Inventory.apply_delta for
1073
:param new_revision_id: The revision id that the inventory is being
1075
:param parents: The revision ids of the parents that revision_id is
1076
known to have and are in the repository already. These are supplied
1077
for repositories that depend on the inventory graph for revision
1078
graph access, as well as for those that pun ancestry with delta
1080
:param basis_inv: The basis inventory if it is already known,
1082
:param propagate_caches: If True, the caches for this inventory are
1083
copied to and updated for the result if possible.
1085
:returns: (validator, new_inv)
1086
The validator(which is a sha1 digest, though what is sha'd is
1087
repository format specific) of the serialized inventory, and the
1088
resulting inventory.
1090
if not self.is_in_write_group():
1091
raise AssertionError("%r not in write group" % (self,))
1092
_mod_revision.check_not_reserved_id(new_revision_id)
1093
basis_tree = self.revision_tree(basis_revision_id)
1094
basis_tree.lock_read()
1096
# Note that this mutates the inventory of basis_tree, which not all
1097
# inventory implementations may support: A better idiom would be to
1098
# return a new inventory, but as there is no revision tree cache in
1099
# repository this is safe for now - RBC 20081013
1100
if basis_inv is None:
1101
basis_inv = basis_tree.inventory
1102
basis_inv.apply_delta(delta)
1103
basis_inv.revision_id = new_revision_id
1104
return (self.add_inventory(new_revision_id, basis_inv, parents),
1109
def _inventory_add_lines(self, revision_id, parents, lines,
1110
check_content=True):
1111
"""Store lines in inv_vf and return the sha1 of the inventory."""
1112
parents = [(parent,) for parent in parents]
1113
result = self.inventories.add_lines((revision_id,), parents, lines,
1114
check_content=check_content)[0]
1115
self.inventories._access.flush()
1118
def add_revision(self, revision_id, rev, inv=None, config=None):
1119
"""Add rev to the revision store as revision_id.
1121
:param revision_id: the revision id to use.
1122
:param rev: The revision object.
1123
:param inv: The inventory for the revision. if None, it will be looked
1124
up in the inventory storer
1125
:param config: If None no digital signature will be created.
1126
If supplied its signature_needed method will be used
1127
to determine if a signature should be made.
1129
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1131
_mod_revision.check_not_reserved_id(revision_id)
1132
if config is not None and config.signature_needed():
1134
inv = self.get_inventory(revision_id)
1135
plaintext = Testament(rev, inv).as_short_text()
1136
self.store_revision_signature(
1137
gpg.GPGStrategy(config), plaintext, revision_id)
1138
# check inventory present
1139
if not self.inventories.get_parent_map([(revision_id,)]):
1141
raise errors.WeaveRevisionNotPresent(revision_id,
1144
# yes, this is not suitable for adding with ghosts.
1145
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1148
key = (revision_id,)
1149
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1150
self._add_revision(rev)
1152
def _add_revision(self, revision):
1153
text = self._serializer.write_revision_to_string(revision)
1154
key = (revision.revision_id,)
1155
parents = tuple((parent,) for parent in revision.parent_ids)
1156
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1158
321
def all_revision_ids(self):
1159
322
"""Returns a list of all the revision ids in the repository.
1184
347
self.control_files.break_lock()
1187
def _eliminate_revisions_not_present(self, revision_ids):
1188
"""Check every revision id in revision_ids to see if we have it.
1190
Returns a set of the present revisions.
1193
graph = self.get_graph()
1194
parent_map = graph.get_parent_map(revision_ids)
1195
# The old API returned a list, should this actually be a set?
1196
return parent_map.keys()
1198
def _check_inventories(self, checker):
1199
"""Check the inventories found from the revision scan.
1201
This is responsible for verifying the sha1 of inventories and
1202
creating a pending_keys set that covers data referenced by inventories.
1204
bar = ui.ui_factory.nested_progress_bar()
1206
self._do_check_inventories(checker, bar)
1210
def _do_check_inventories(self, checker, bar):
1211
"""Helper for _check_inventories."""
1213
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1214
kinds = ['chk_bytes', 'texts']
1215
count = len(checker.pending_keys)
1216
bar.update("inventories", 0, 2)
1217
current_keys = checker.pending_keys
1218
checker.pending_keys = {}
1219
# Accumulate current checks.
1220
for key in current_keys:
1221
if key[0] != 'inventories' and key[0] not in kinds:
1222
checker._report_items.append('unknown key type %r' % (key,))
1223
keys[key[0]].add(key[1:])
1224
if keys['inventories']:
1225
# NB: output order *should* be roughly sorted - topo or
1226
# inverse topo depending on repository - either way decent
1227
# to just delta against. However, pre-CHK formats didn't
1228
# try to optimise inventory layout on disk. As such the
1229
# pre-CHK code path does not use inventory deltas.
1231
for record in self.inventories.check(keys=keys['inventories']):
1232
if record.storage_kind == 'absent':
1233
checker._report_items.append(
1234
'Missing inventory {%s}' % (record.key,))
1236
last_object = self._check_record('inventories', record,
1237
checker, last_object,
1238
current_keys[('inventories',) + record.key])
1239
del keys['inventories']
1242
bar.update("texts", 1)
1243
while (checker.pending_keys or keys['chk_bytes']
1245
# Something to check.
1246
current_keys = checker.pending_keys
1247
checker.pending_keys = {}
1248
# Accumulate current checks.
1249
for key in current_keys:
1250
if key[0] not in kinds:
1251
checker._report_items.append('unknown key type %r' % (key,))
1252
keys[key[0]].add(key[1:])
1253
# Check the outermost kind only - inventories || chk_bytes || texts
1257
for record in getattr(self, kind).check(keys=keys[kind]):
1258
if record.storage_kind == 'absent':
1259
checker._report_items.append(
1260
'Missing %s {%s}' % (kind, record.key,))
1262
last_object = self._check_record(kind, record,
1263
checker, last_object, current_keys[(kind,) + record.key])
1267
def _check_record(self, kind, record, checker, last_object, item_data):
1268
"""Check a single text from this repository."""
1269
if kind == 'inventories':
1270
rev_id = record.key[0]
1271
inv = self._deserialise_inventory(rev_id,
1272
record.get_bytes_as('fulltext'))
1273
if last_object is not None:
1274
delta = inv._make_delta(last_object)
1275
for old_path, path, file_id, ie in delta:
1278
ie.check(checker, rev_id, inv)
1280
for path, ie in inv.iter_entries():
1281
ie.check(checker, rev_id, inv)
1282
if self._format.fast_deltas:
1284
elif kind == 'chk_bytes':
1285
# No code written to check chk_bytes for this repo format.
1286
checker._report_items.append(
1287
'unsupported key type chk_bytes for %s' % (record.key,))
1288
elif kind == 'texts':
1289
self._check_text(record, checker, item_data)
1291
checker._report_items.append(
1292
'unknown key type %s for %s' % (kind, record.key))
1294
def _check_text(self, record, checker, item_data):
1295
"""Check a single text."""
1296
# Check it is extractable.
1297
# TODO: check length.
1298
if record.storage_kind == 'chunked':
1299
chunks = record.get_bytes_as(record.storage_kind)
1300
sha1 = osutils.sha_strings(chunks)
1301
length = sum(map(len, chunks))
1303
content = record.get_bytes_as('fulltext')
1304
sha1 = osutils.sha_string(content)
1305
length = len(content)
1306
if item_data and sha1 != item_data[1]:
1307
checker._report_items.append(
1308
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1309
(record.key, sha1, item_data[1], item_data[2]))
1312
def create(a_bzrdir):
1313
"""Construct the current default format repository in a_bzrdir."""
1314
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
350
def create(controldir):
351
"""Construct the current default format repository in controldir."""
352
return RepositoryFormat.get_default_format().initialize(controldir)
1316
def __init__(self, _format, a_bzrdir, control_files):
354
def __init__(self, _format, controldir, control_files):
1317
355
"""instantiate a Repository.
1319
357
:param _format: The format of the repository on disk.
1320
:param a_bzrdir: The BzrDir of the repository.
358
:param controldir: The ControlDir of the repository.
359
:param control_files: Control files to use for locking, etc.
1322
361
# In the future we will have a single api for all stores for
1323
362
# getting file texts, inventories and revisions, then
1996
909
signature = gpg_strategy.sign(plaintext)
1997
910
self.add_signature_text(revision_id, signature)
2000
912
def add_signature_text(self, revision_id, signature):
2001
self.signatures.add_lines((revision_id,), (),
2002
osutils.split_lines(signature))
2004
def find_text_key_references(self):
2005
"""Find the text key references within the repository.
2007
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2008
to whether they were referred to by the inventory of the
2009
revision_id that they contain. The inventory texts from all present
2010
revision ids are assessed to generate this report.
2012
revision_keys = self.revisions.keys()
2013
w = self.inventories
2014
pb = ui.ui_factory.nested_progress_bar()
2016
return self._find_text_key_references_from_xml_inventory_lines(
2017
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
2021
def _find_text_key_references_from_xml_inventory_lines(self,
2023
"""Core routine for extracting references to texts from inventories.
2025
This performs the translation of xml lines to revision ids.
2027
:param line_iterator: An iterator of lines, origin_version_id
2028
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2029
to whether they were referred to by the inventory of the
2030
revision_id that they contain. Note that if that revision_id was
2031
not part of the line_iterator's output then False will be given -
2032
even though it may actually refer to that key.
2034
if not self._serializer.support_altered_by_hack:
2035
raise AssertionError(
2036
"_find_text_key_references_from_xml_inventory_lines only "
2037
"supported for branches which store inventory as unnested xml"
2038
", not on %r" % self)
2041
# this code needs to read every new line in every inventory for the
2042
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
2043
# not present in one of those inventories is unnecessary but not
2044
# harmful because we are filtering by the revision id marker in the
2045
# inventory lines : we only select file ids altered in one of those
2046
# revisions. We don't need to see all lines in the inventory because
2047
# only those added in an inventory in rev X can contain a revision=X
2049
unescape_revid_cache = {}
2050
unescape_fileid_cache = {}
2052
# jam 20061218 In a big fetch, this handles hundreds of thousands
2053
# of lines, so it has had a lot of inlining and optimizing done.
2054
# Sorry that it is a little bit messy.
2055
# Move several functions to be local variables, since this is a long
2057
search = self._file_ids_altered_regex.search
2058
unescape = _unescape_xml
2059
setdefault = result.setdefault
2060
for line, line_key in line_iterator:
2061
match = search(line)
2064
# One call to match.group() returning multiple items is quite a
2065
# bit faster than 2 calls to match.group() each returning 1
2066
file_id, revision_id = match.group('file_id', 'revision_id')
2068
# Inlining the cache lookups helps a lot when you make 170,000
2069
# lines and 350k ids, versus 8.4 unique ids.
2070
# Using a cache helps in 2 ways:
2071
# 1) Avoids unnecessary decoding calls
2072
# 2) Re-uses cached strings, which helps in future set and
2074
# (2) is enough that removing encoding entirely along with
2075
# the cache (so we are using plain strings) results in no
2076
# performance improvement.
2078
revision_id = unescape_revid_cache[revision_id]
2080
unescaped = unescape(revision_id)
2081
unescape_revid_cache[revision_id] = unescaped
2082
revision_id = unescaped
2084
# Note that unconditionally unescaping means that we deserialise
2085
# every fileid, which for general 'pull' is not great, but we don't
2086
# really want to have some many fulltexts that this matters anyway.
2089
file_id = unescape_fileid_cache[file_id]
2091
unescaped = unescape(file_id)
2092
unescape_fileid_cache[file_id] = unescaped
2095
key = (file_id, revision_id)
2096
setdefault(key, False)
2097
if revision_id == line_key[-1]:
2101
def _inventory_xml_lines_for_keys(self, keys):
2102
"""Get a line iterator of the sort needed for findind references.
2104
Not relevant for non-xml inventory repositories.
2106
Ghosts in revision_keys are ignored.
2108
:param revision_keys: The revision keys for the inventories to inspect.
2109
:return: An iterator over (inventory line, revid) for the fulltexts of
2110
all of the xml inventories specified by revision_keys.
2112
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2113
for record in stream:
2114
if record.storage_kind != 'absent':
2115
chunks = record.get_bytes_as('chunked')
2116
revid = record.key[-1]
2117
lines = osutils.chunks_to_lines(chunks)
2121
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2123
"""Helper routine for fileids_altered_by_revision_ids.
2125
This performs the translation of xml lines to revision ids.
2127
:param line_iterator: An iterator of lines, origin_version_id
2128
:param revision_keys: The revision ids to filter for. This should be a
2129
set or other type which supports efficient __contains__ lookups, as
2130
the revision key from each parsed line will be looked up in the
2131
revision_keys filter.
2132
:return: a dictionary mapping altered file-ids to an iterable of
2133
revision_ids. Each altered file-ids has the exact revision_ids that
2134
altered it listed explicitly.
2136
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2137
line_iterator).iterkeys())
2138
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2139
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2140
self._inventory_xml_lines_for_keys(parent_keys)))
2141
new_keys = seen - parent_seen
2143
setdefault = result.setdefault
2144
for key in new_keys:
2145
setdefault(key[0], set()).add(key[-1])
913
"""Store a signature text for a revision.
915
:param revision_id: Revision id of the revision
916
:param signature: Signature text.
918
raise NotImplementedError(self.add_signature_text)
2148
920
def _find_parent_ids_of_revisions(self, revision_ids):
2149
921
"""Find all parent ids that are mentioned in the revision graph.
2200
941
uniquely identify the file version in the caller's context. (Examples:
2201
942
an index number or a TreeTransform trans_id.)
2203
bytes_iterator is an iterable of bytestrings for the file. The
2204
kind of iterable and length of the bytestrings are unspecified, but for
2205
this implementation, it is a list of bytes produced by
2206
VersionedFile.get_record_stream().
2208
944
:param desired_files: a list of (file_id, revision_id, identifier)
2212
for file_id, revision_id, callable_data in desired_files:
2213
text_keys[(file_id, revision_id)] = callable_data
2214
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2215
if record.storage_kind == 'absent':
2216
raise errors.RevisionNotPresent(record.key, self)
2217
yield text_keys[record.key], record.get_bytes_as('chunked')
2219
def _generate_text_key_index(self, text_key_references=None,
2221
"""Generate a new text key index for the repository.
2223
This is an expensive function that will take considerable time to run.
2225
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2226
list of parents, also text keys. When a given key has no parents,
2227
the parents list will be [NULL_REVISION].
2229
# All revisions, to find inventory parents.
2230
if ancestors is None:
2231
graph = self.get_graph()
2232
ancestors = graph.get_parent_map(self.all_revision_ids())
2233
if text_key_references is None:
2234
text_key_references = self.find_text_key_references()
2235
pb = ui.ui_factory.nested_progress_bar()
2237
return self._do_generate_text_key_index(ancestors,
2238
text_key_references, pb)
2242
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2243
"""Helper for _generate_text_key_index to avoid deep nesting."""
2244
revision_order = tsort.topo_sort(ancestors)
2245
invalid_keys = set()
2247
for revision_id in revision_order:
2248
revision_keys[revision_id] = set()
2249
text_count = len(text_key_references)
2250
# a cache of the text keys to allow reuse; costs a dict of all the
2251
# keys, but saves a 2-tuple for every child of a given key.
2253
for text_key, valid in text_key_references.iteritems():
2255
invalid_keys.add(text_key)
2257
revision_keys[text_key[1]].add(text_key)
2258
text_key_cache[text_key] = text_key
2259
del text_key_references
2261
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2262
NULL_REVISION = _mod_revision.NULL_REVISION
2263
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2264
# too small for large or very branchy trees. However, for 55K path
2265
# trees, it would be easy to use too much memory trivially. Ideally we
2266
# could gauge this by looking at available real memory etc, but this is
2267
# always a tricky proposition.
2268
inventory_cache = lru_cache.LRUCache(10)
2269
batch_size = 10 # should be ~150MB on a 55K path tree
2270
batch_count = len(revision_order) / batch_size + 1
2272
pb.update("Calculating text parents", processed_texts, text_count)
2273
for offset in xrange(batch_count):
2274
to_query = revision_order[offset * batch_size:(offset + 1) *
2278
for revision_id in to_query:
2279
parent_ids = ancestors[revision_id]
2280
for text_key in revision_keys[revision_id]:
2281
pb.update("Calculating text parents", processed_texts)
2282
processed_texts += 1
2283
candidate_parents = []
2284
for parent_id in parent_ids:
2285
parent_text_key = (text_key[0], parent_id)
2287
check_parent = parent_text_key not in \
2288
revision_keys[parent_id]
2290
# the parent parent_id is a ghost:
2291
check_parent = False
2292
# truncate the derived graph against this ghost.
2293
parent_text_key = None
2295
# look at the parent commit details inventories to
2296
# determine possible candidates in the per file graph.
2299
inv = inventory_cache[parent_id]
2301
inv = self.revision_tree(parent_id).inventory
2302
inventory_cache[parent_id] = inv
2304
parent_entry = inv[text_key[0]]
2305
except (KeyError, errors.NoSuchId):
2307
if parent_entry is not None:
2309
text_key[0], parent_entry.revision)
2311
parent_text_key = None
2312
if parent_text_key is not None:
2313
candidate_parents.append(
2314
text_key_cache[parent_text_key])
2315
parent_heads = text_graph.heads(candidate_parents)
2316
new_parents = list(parent_heads)
2317
new_parents.sort(key=lambda x:candidate_parents.index(x))
2318
if new_parents == []:
2319
new_parents = [NULL_REVISION]
2320
text_index[text_key] = new_parents
2322
for text_key in invalid_keys:
2323
text_index[text_key] = [NULL_REVISION]
2326
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2327
"""Get an iterable listing the keys of all the data introduced by a set
2330
The keys will be ordered so that the corresponding items can be safely
2331
fetched and inserted in that order.
2333
:returns: An iterable producing tuples of (knit-kind, file-id,
2334
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2335
'revisions'. file-id is None unless knit-kind is 'file'.
2337
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2340
for result in self._find_non_file_keys_to_fetch(revision_ids):
2343
def _find_file_keys_to_fetch(self, revision_ids, pb):
2344
# XXX: it's a bit weird to control the inventory weave caching in this
2345
# generator. Ideally the caching would be done in fetch.py I think. Or
2346
# maybe this generator should explicitly have the contract that it
2347
# should not be iterated until the previously yielded item has been
2349
inv_w = self.inventories
2351
# file ids that changed
2352
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2354
num_file_ids = len(file_ids)
2355
for file_id, altered_versions in file_ids.iteritems():
2357
pb.update("Fetch texts", count, num_file_ids)
2359
yield ("file", file_id, altered_versions)
2361
def _find_non_file_keys_to_fetch(self, revision_ids):
2363
yield ("inventory", None, revision_ids)
2366
# XXX: Note ATM no callers actually pay attention to this return
2367
# instead they just use the list of revision ids and ignore
2368
# missing sigs. Consider removing this work entirely
2369
revisions_with_signatures = set(self.signatures.get_parent_map(
2370
[(r,) for r in revision_ids]))
2371
revisions_with_signatures = set(
2372
[r for (r,) in revisions_with_signatures])
2373
revisions_with_signatures.intersection_update(revision_ids)
2374
yield ("signatures", None, revisions_with_signatures)
2377
yield ("revisions", None, revision_ids)
2380
def get_inventory(self, revision_id):
2381
"""Get Inventory object by revision id."""
2382
return self.iter_inventories([revision_id]).next()
2384
def iter_inventories(self, revision_ids, ordering=None):
2385
"""Get many inventories by revision_ids.
2387
This will buffer some or all of the texts used in constructing the
2388
inventories in memory, but will only parse a single inventory at a
2391
:param revision_ids: The expected revision ids of the inventories.
2392
:param ordering: optional ordering, e.g. 'topological'. If not
2393
specified, the order of revision_ids will be preserved (by
2394
buffering if necessary).
2395
:return: An iterator of inventories.
2397
if ((None in revision_ids)
2398
or (_mod_revision.NULL_REVISION in revision_ids)):
2399
raise ValueError('cannot get null revision inventory')
2400
return self._iter_inventories(revision_ids, ordering)
2402
def _iter_inventories(self, revision_ids, ordering):
2403
"""single-document based inventory iteration."""
2404
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2405
for text, revision_id in inv_xmls:
2406
yield self._deserialise_inventory(revision_id, text)
2408
def _iter_inventory_xmls(self, revision_ids, ordering):
2409
if ordering is None:
2410
order_as_requested = True
2411
ordering = 'unordered'
2413
order_as_requested = False
2414
keys = [(revision_id,) for revision_id in revision_ids]
2417
if order_as_requested:
2418
key_iter = iter(keys)
2419
next_key = key_iter.next()
2420
stream = self.inventories.get_record_stream(keys, ordering, True)
2422
for record in stream:
2423
if record.storage_kind != 'absent':
2424
chunks = record.get_bytes_as('chunked')
2425
if order_as_requested:
2426
text_chunks[record.key] = chunks
2428
yield ''.join(chunks), record.key[-1]
2430
raise errors.NoSuchRevision(self, record.key)
2431
if order_as_requested:
2432
# Yield as many results as we can while preserving order.
2433
while next_key in text_chunks:
2434
chunks = text_chunks.pop(next_key)
2435
yield ''.join(chunks), next_key[-1]
2437
next_key = key_iter.next()
2438
except StopIteration:
2439
# We still want to fully consume the get_record_stream,
2440
# just in case it is not actually finished at this point
2444
def _deserialise_inventory(self, revision_id, xml):
2445
"""Transform the xml into an inventory object.
2447
:param revision_id: The expected revision id of the inventory.
2448
:param xml: A serialised inventory.
2450
result = self._serializer.read_inventory_from_string(xml, revision_id,
2451
entry_cache=self._inventory_entry_cache,
2452
return_from_cache=self._safe_to_return_from_cache)
2453
if result.revision_id != revision_id:
2454
raise AssertionError('revision id mismatch %s != %s' % (
2455
result.revision_id, revision_id))
2458
def get_serializer_format(self):
2459
return self._serializer.format_num
2462
def _get_inventory_xml(self, revision_id):
2463
"""Get serialized inventory as a string."""
2464
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2466
text, revision_id = texts.next()
2467
except StopIteration:
2468
raise errors.HistoryMissing(self, 'inventory', revision_id)
947
raise NotImplementedError(self.iter_files_bytes)
2471
949
def get_rev_id_for_revno(self, revno, known_pair):
2472
950
"""Return the revision id of a revno, given a later (revno, revid)
2816
1221
except UnicodeDecodeError:
2817
1222
raise errors.NonAsciiRevisionId(method, self)
2819
def revision_graph_can_have_wrong_parents(self):
2820
"""Is it possible for this repository to have a revision graph with
2823
If True, then this repository must also implement
2824
_find_inconsistent_revision_parents so that check and reconcile can
2825
check for inconsistencies before proceeding with other checks that may
2826
depend on the revision index being consistent.
2828
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2831
# remove these delegates a while after bzr 0.15
2832
def __make_delegated(name, from_module):
2833
def _deprecated_repository_forwarder():
2834
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2835
% (name, from_module),
2838
m = __import__(from_module, globals(), locals(), [name])
2840
return getattr(m, name)
2841
except AttributeError:
2842
raise AttributeError('module %s has no name %s'
2844
globals()[name] = _deprecated_repository_forwarder
2847
'AllInOneRepository',
2848
'WeaveMetaDirRepository',
2849
'PreSplitOutRepositoryFormat',
2850
'RepositoryFormat4',
2851
'RepositoryFormat5',
2852
'RepositoryFormat6',
2853
'RepositoryFormat7',
2855
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2859
'RepositoryFormatKnit',
2860
'RepositoryFormatKnit1',
2862
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2865
def install_revision(repository, rev, revision_tree):
2866
"""Install all revision data into a repository."""
2867
install_revisions(repository, [(rev, revision_tree, None)])
2870
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2871
"""Install all revision data into a repository.
2873
Accepts an iterable of revision, tree, signature tuples. The signature
2876
repository.start_write_group()
2878
inventory_cache = lru_cache.LRUCache(10)
2879
for n, (revision, revision_tree, signature) in enumerate(iterable):
2880
_install_revision(repository, revision, revision_tree, signature,
2883
pb.update('Transferring revisions', n + 1, num_revisions)
2885
repository.abort_write_group()
2888
repository.commit_write_group()
2891
def _install_revision(repository, rev, revision_tree, signature,
2893
"""Install all revision data into a repository."""
2894
present_parents = []
2896
for p_id in rev.parent_ids:
2897
if repository.has_revision(p_id):
2898
present_parents.append(p_id)
2899
parent_trees[p_id] = repository.revision_tree(p_id)
2901
parent_trees[p_id] = repository.revision_tree(
2902
_mod_revision.NULL_REVISION)
2904
inv = revision_tree.inventory
2905
entries = inv.iter_entries()
2906
# backwards compatibility hack: skip the root id.
2907
if not repository.supports_rich_root():
2908
path, root = entries.next()
2909
if root.revision != rev.revision_id:
2910
raise errors.IncompatibleRevision(repr(repository))
2912
for path, ie in entries:
2913
text_keys[(ie.file_id, ie.revision)] = ie
2914
text_parent_map = repository.texts.get_parent_map(text_keys)
2915
missing_texts = set(text_keys) - set(text_parent_map)
2916
# Add the texts that are not already present
2917
for text_key in missing_texts:
2918
ie = text_keys[text_key]
2920
# FIXME: TODO: The following loop overlaps/duplicates that done by
2921
# commit to determine parents. There is a latent/real bug here where
2922
# the parents inserted are not those commit would do - in particular
2923
# they are not filtered by heads(). RBC, AB
2924
for revision, tree in parent_trees.iteritems():
2925
if ie.file_id not in tree:
2927
parent_id = tree.inventory[ie.file_id].revision
2928
if parent_id in text_parents:
2930
text_parents.append((ie.file_id, parent_id))
2931
lines = revision_tree.get_file(ie.file_id).readlines()
2932
repository.texts.add_lines(text_key, text_parents, lines)
2934
# install the inventory
2935
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2936
# Cache this inventory
2937
inventory_cache[rev.revision_id] = inv
2939
basis_inv = inventory_cache[rev.parent_ids[0]]
2941
repository.add_inventory(rev.revision_id, inv, present_parents)
2943
delta = inv._make_delta(basis_inv)
2944
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2945
rev.revision_id, present_parents)
2947
repository.add_inventory(rev.revision_id, inv, present_parents)
2948
except errors.RevisionAlreadyPresent:
2950
if signature is not None:
2951
repository.add_signature_text(rev.revision_id, signature)
2952
repository.add_revision(rev.revision_id, rev, inv)
2955
1225
class MetaDirRepository(Repository):
2956
1226
"""Repositories in the new meta-dir layout.
3321
1558
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3322
1559
format_registry.register_lazy(
3323
1560
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3324
'bzrlib.repofmt.pack_repo',
1561
'bzrlib.repofmt.knitpack_repo',
3325
1562
'RepositoryFormatKnitPack1',
3327
1564
format_registry.register_lazy(
3328
1565
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3329
'bzrlib.repofmt.pack_repo',
1566
'bzrlib.repofmt.knitpack_repo',
3330
1567
'RepositoryFormatKnitPack3',
3332
1569
format_registry.register_lazy(
3333
1570
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3334
'bzrlib.repofmt.pack_repo',
1571
'bzrlib.repofmt.knitpack_repo',
3335
1572
'RepositoryFormatKnitPack4',
3337
1574
format_registry.register_lazy(
3338
1575
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3339
'bzrlib.repofmt.pack_repo',
1576
'bzrlib.repofmt.knitpack_repo',
3340
1577
'RepositoryFormatKnitPack5',
3342
1579
format_registry.register_lazy(
3343
1580
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3344
'bzrlib.repofmt.pack_repo',
1581
'bzrlib.repofmt.knitpack_repo',
3345
1582
'RepositoryFormatKnitPack5RichRoot',
3347
1584
format_registry.register_lazy(
3348
1585
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3349
'bzrlib.repofmt.pack_repo',
1586
'bzrlib.repofmt.knitpack_repo',
3350
1587
'RepositoryFormatKnitPack5RichRootBroken',
3352
1589
format_registry.register_lazy(
3353
1590
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3354
'bzrlib.repofmt.pack_repo',
1591
'bzrlib.repofmt.knitpack_repo',
3355
1592
'RepositoryFormatKnitPack6',
3357
1594
format_registry.register_lazy(
3358
1595
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3359
'bzrlib.repofmt.pack_repo',
1596
'bzrlib.repofmt.knitpack_repo',
3360
1597
'RepositoryFormatKnitPack6RichRoot',
1599
format_registry.register_lazy(
1600
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1601
'bzrlib.repofmt.groupcompress_repo',
1602
'RepositoryFormat2a',
3363
1605
# Development formats.
3364
# Obsolete but kept pending a CHK based subtree format.
1606
# Check their docstrings to see if/when they are obsolete.
3365
1607
format_registry.register_lazy(
3366
1608
("Bazaar development format 2 with subtree support "
3367
1609
"(needs bzr.dev from before 1.8)\n"),
3368
'bzrlib.repofmt.pack_repo',
1610
'bzrlib.repofmt.knitpack_repo',
3369
1611
'RepositoryFormatPackDevelopment2Subtree',
3372
# 1.14->1.16 go below here
3373
format_registry.register_lazy(
3374
'Bazaar development format - group compression and chk inventory'
3375
' (needs bzr.dev from 1.14)\n',
3376
'bzrlib.repofmt.groupcompress_repo',
3377
'RepositoryFormatCHK1',
3380
format_registry.register_lazy(
3381
'Bazaar development format - chk repository with bencode revision '
3382
'serialization (needs bzr.dev from 1.16)\n',
3383
'bzrlib.repofmt.groupcompress_repo',
3384
'RepositoryFormatCHK2',
3386
format_registry.register_lazy(
3387
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3388
'bzrlib.repofmt.groupcompress_repo',
3389
'RepositoryFormat2a',
1613
format_registry.register_lazy(
1614
'Bazaar development format 8\n',
1615
'bzrlib.repofmt.groupcompress_repo',
1616
'RepositoryFormat2aSubtree',
3423
1649
self.target.fetch(self.source, revision_id=revision_id)
3425
1651
@needs_write_lock
3426
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
1652
def fetch(self, revision_id=None, find_ghosts=False):
3428
1653
"""Fetch the content required to construct revision_id.
3430
1655
The content is copied from self.source to self.target.
3432
1657
:param revision_id: if None all content is copied, if NULL_REVISION no
3433
1658
content is copied.
3437
ui.ui_factory.warn_experimental_format_fetch(self)
3438
from bzrlib.fetch import RepoFetcher
3439
# See <https://launchpad.net/bugs/456077> asking for a warning here
3440
if self.source._format.network_name() != self.target._format.network_name():
3441
ui.ui_factory.show_user_warning('cross_format_fetch',
3442
from_format=self.source._format,
3443
to_format=self.target._format)
3444
f = RepoFetcher(to_repository=self.target,
3445
from_repository=self.source,
3446
last_revision=revision_id,
3447
fetch_spec=fetch_spec,
3448
find_ghosts=find_ghosts)
3450
def _walk_to_common_revisions(self, revision_ids):
3451
"""Walk out from revision_ids in source to revisions target has.
3453
:param revision_ids: The start point for the search.
3454
:return: A set of revision ids.
3456
target_graph = self.target.get_graph()
3457
revision_ids = frozenset(revision_ids)
3458
missing_revs = set()
3459
source_graph = self.source.get_graph()
3460
# ensure we don't pay silly lookup costs.
3461
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3462
null_set = frozenset([_mod_revision.NULL_REVISION])
3463
searcher_exhausted = False
3467
# Iterate the searcher until we have enough next_revs
3468
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3470
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3471
next_revs.update(next_revs_part)
3472
ghosts.update(ghosts_part)
3473
except StopIteration:
3474
searcher_exhausted = True
3476
# If there are ghosts in the source graph, and the caller asked for
3477
# them, make sure that they are present in the target.
3478
# We don't care about other ghosts as we can't fetch them and
3479
# haven't been asked to.
3480
ghosts_to_check = set(revision_ids.intersection(ghosts))
3481
revs_to_get = set(next_revs).union(ghosts_to_check)
3483
have_revs = set(target_graph.get_parent_map(revs_to_get))
3484
# we always have NULL_REVISION present.
3485
have_revs = have_revs.union(null_set)
3486
# Check if the target is missing any ghosts we need.
3487
ghosts_to_check.difference_update(have_revs)
3489
# One of the caller's revision_ids is a ghost in both the
3490
# source and the target.
3491
raise errors.NoSuchRevision(
3492
self.source, ghosts_to_check.pop())
3493
missing_revs.update(next_revs - have_revs)
3494
# Because we may have walked past the original stop point, make
3495
# sure everything is stopped
3496
stop_revs = searcher.find_seen_ancestors(have_revs)
3497
searcher.stop_searching_any(stop_revs)
3498
if searcher_exhausted:
3500
return searcher.get_result()
1661
raise NotImplementedError(self.fetch)
3502
1663
@needs_read_lock
3503
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
1664
def search_missing_revision_ids(self,
1665
revision_id=symbol_versioning.DEPRECATED_PARAMETER,
1666
find_ghosts=True, revision_ids=None, if_present_ids=None,
3504
1668
"""Return the revision ids that source has that target does not.
3506
1670
:param revision_id: only return revision ids included by this
1672
:param revision_ids: return revision ids included by these
1673
revision_ids. NoSuchRevision will be raised if any of these
1674
revisions are not present.
1675
:param if_present_ids: like revision_ids, but will not cause
1676
NoSuchRevision if any of these are absent, instead they will simply
1677
not be in the result. This is useful for e.g. finding revisions
1678
to fetch for tags, which may reference absent revisions.
3508
1679
:param find_ghosts: If True find missing revisions in deep history
3509
1680
rather than just finding the surface difference.
1681
:param limit: Maximum number of revisions to return, topologically
3510
1683
:return: A bzrlib.graph.SearchResult.
3512
# stop searching at found target revisions.
3513
if not find_ghosts and revision_id is not None:
3514
return self._walk_to_common_revisions([revision_id])
3515
# generic, possibly worst case, slow code path.
3516
target_ids = set(self.target.all_revision_ids())
3517
if revision_id is not None:
3518
source_ids = self.source.get_ancestry(revision_id)
3519
if source_ids[0] is not None:
3520
raise AssertionError()
3523
source_ids = self.source.all_revision_ids()
3524
result_set = set(source_ids).difference(target_ids)
3525
return self.source.revision_ids_to_search_result(result_set)
1685
raise NotImplementedError(self.search_missing_revision_ids)
3528
1688
def _same_model(source, target):
3549
1709
"different serializers")
3552
class InterSameDataRepository(InterRepository):
3553
"""Code for converting between repositories that represent the same data.
3555
Data format and model must match for this to work.
3559
def _get_repo_format_to_test(self):
3560
"""Repository format for testing with.
3562
InterSameData can pull from subtree to subtree and from non-subtree to
3563
non-subtree, so we test this with the richest repository format.
3565
from bzrlib.repofmt import knitrepo
3566
return knitrepo.RepositoryFormatKnit3()
3569
def is_compatible(source, target):
3570
return InterRepository._same_model(source, target)
3573
class InterWeaveRepo(InterSameDataRepository):
3574
"""Optimised code paths between Weave based repositories.
3576
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3577
implemented lazy inter-object optimisation.
3581
def _get_repo_format_to_test(self):
3582
from bzrlib.repofmt import weaverepo
3583
return weaverepo.RepositoryFormat7()
3586
def is_compatible(source, target):
3587
"""Be compatible with known Weave formats.
3589
We don't test for the stores being of specific types because that
3590
could lead to confusing results, and there is no need to be
3593
from bzrlib.repofmt.weaverepo import (
3599
return (isinstance(source._format, (RepositoryFormat5,
3601
RepositoryFormat7)) and
3602
isinstance(target._format, (RepositoryFormat5,
3604
RepositoryFormat7)))
3605
except AttributeError:
3609
def copy_content(self, revision_id=None):
3610
"""See InterRepository.copy_content()."""
3611
# weave specific optimised path:
3613
self.target.set_make_working_trees(self.source.make_working_trees())
3614
except (errors.RepositoryUpgradeRequired, NotImplemented):
3616
# FIXME do not peek!
3617
if self.source._transport.listable():
3618
pb = ui.ui_factory.nested_progress_bar()
3620
self.target.texts.insert_record_stream(
3621
self.source.texts.get_record_stream(
3622
self.source.texts.keys(), 'topological', False))
3623
pb.update('Copying inventory', 0, 1)
3624
self.target.inventories.insert_record_stream(
3625
self.source.inventories.get_record_stream(
3626
self.source.inventories.keys(), 'topological', False))
3627
self.target.signatures.insert_record_stream(
3628
self.source.signatures.get_record_stream(
3629
self.source.signatures.keys(),
3631
self.target.revisions.insert_record_stream(
3632
self.source.revisions.get_record_stream(
3633
self.source.revisions.keys(),
3634
'topological', True))
3638
self.target.fetch(self.source, revision_id=revision_id)
3641
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3642
"""See InterRepository.missing_revision_ids()."""
3643
# we want all revisions to satisfy revision_id in source.
3644
# but we don't want to stat every file here and there.
3645
# we want then, all revisions other needs to satisfy revision_id
3646
# checked, but not those that we have locally.
3647
# so the first thing is to get a subset of the revisions to
3648
# satisfy revision_id in source, and then eliminate those that
3649
# we do already have.
3650
# this is slow on high latency connection to self, but as this
3651
# disk format scales terribly for push anyway due to rewriting
3652
# inventory.weave, this is considered acceptable.
3654
if revision_id is not None:
3655
source_ids = self.source.get_ancestry(revision_id)
3656
if source_ids[0] is not None:
3657
raise AssertionError()
3660
source_ids = self.source._all_possible_ids()
3661
source_ids_set = set(source_ids)
3662
# source_ids is the worst possible case we may need to pull.
3663
# now we want to filter source_ids against what we actually
3664
# have in target, but don't try to check for existence where we know
3665
# we do not have a revision as that would be pointless.
3666
target_ids = set(self.target._all_possible_ids())
3667
possibly_present_revisions = target_ids.intersection(source_ids_set)
3668
actually_present_revisions = set(
3669
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3670
required_revisions = source_ids_set.difference(actually_present_revisions)
3671
if revision_id is not None:
3672
# we used get_ancestry to determine source_ids then we are assured all
3673
# revisions referenced are present as they are installed in topological order.
3674
# and the tip revision was validated by get_ancestry.
3675
result_set = required_revisions
3677
# if we just grabbed the possibly available ids, then
3678
# we only have an estimate of whats available and need to validate
3679
# that against the revision records.
3681
self.source._eliminate_revisions_not_present(required_revisions))
3682
return self.source.revision_ids_to_search_result(result_set)
3685
class InterKnitRepo(InterSameDataRepository):
3686
"""Optimised code paths between Knit based repositories."""
3689
def _get_repo_format_to_test(self):
3690
from bzrlib.repofmt import knitrepo
3691
return knitrepo.RepositoryFormatKnit1()
3694
def is_compatible(source, target):
3695
"""Be compatible with known Knit formats.
3697
We don't test for the stores being of specific types because that
3698
could lead to confusing results, and there is no need to be
3701
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3703
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3704
isinstance(target._format, RepositoryFormatKnit))
3705
except AttributeError:
3707
return are_knits and InterRepository._same_model(source, target)
3710
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3711
"""See InterRepository.missing_revision_ids()."""
3712
if revision_id is not None:
3713
source_ids = self.source.get_ancestry(revision_id)
3714
if source_ids[0] is not None:
3715
raise AssertionError()
3718
source_ids = self.source.all_revision_ids()
3719
source_ids_set = set(source_ids)
3720
# source_ids is the worst possible case we may need to pull.
3721
# now we want to filter source_ids against what we actually
3722
# have in target, but don't try to check for existence where we know
3723
# we do not have a revision as that would be pointless.
3724
target_ids = set(self.target.all_revision_ids())
3725
possibly_present_revisions = target_ids.intersection(source_ids_set)
3726
actually_present_revisions = set(
3727
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3728
required_revisions = source_ids_set.difference(actually_present_revisions)
3729
if revision_id is not None:
3730
# we used get_ancestry to determine source_ids then we are assured all
3731
# revisions referenced are present as they are installed in topological order.
3732
# and the tip revision was validated by get_ancestry.
3733
result_set = required_revisions
3735
# if we just grabbed the possibly available ids, then
3736
# we only have an estimate of whats available and need to validate
3737
# that against the revision records.
3739
self.source._eliminate_revisions_not_present(required_revisions))
3740
return self.source.revision_ids_to_search_result(result_set)
3743
class InterDifferingSerializer(InterRepository):
3746
def _get_repo_format_to_test(self):
3750
def is_compatible(source, target):
3751
"""Be compatible with Knit2 source and Knit3 target"""
3752
# This is redundant with format.check_conversion_target(), however that
3753
# raises an exception, and we just want to say "False" as in we won't
3754
# support converting between these formats.
3755
if 'IDS_never' in debug.debug_flags:
3757
if source.supports_rich_root() and not target.supports_rich_root():
3759
if (source._format.supports_tree_reference
3760
and not target._format.supports_tree_reference):
3762
if target._fallback_repositories and target._format.supports_chks:
3763
# IDS doesn't know how to copy CHKs for the parent inventories it
3764
# adds to stacked repos.
3766
if 'IDS_always' in debug.debug_flags:
3768
# Only use this code path for local source and target. IDS does far
3769
# too much IO (both bandwidth and roundtrips) over a network.
3770
if not source.bzrdir.transport.base.startswith('file:///'):
3772
if not target.bzrdir.transport.base.startswith('file:///'):
3776
def _get_trees(self, revision_ids, cache):
3778
for rev_id in revision_ids:
3780
possible_trees.append((rev_id, cache[rev_id]))
3782
# Not cached, but inventory might be present anyway.
3784
tree = self.source.revision_tree(rev_id)
3785
except errors.NoSuchRevision:
3786
# Nope, parent is ghost.
3789
cache[rev_id] = tree
3790
possible_trees.append((rev_id, tree))
3791
return possible_trees
3793
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3794
"""Get the best delta and base for this revision.
3796
:return: (basis_id, delta)
3799
# Generate deltas against each tree, to find the shortest.
3800
texts_possibly_new_in_tree = set()
3801
for basis_id, basis_tree in possible_trees:
3802
delta = tree.inventory._make_delta(basis_tree.inventory)
3803
for old_path, new_path, file_id, new_entry in delta:
3804
if new_path is None:
3805
# This file_id isn't present in the new rev, so we don't
3809
# Rich roots are handled elsewhere...
3811
kind = new_entry.kind
3812
if kind != 'directory' and kind != 'file':
3813
# No text record associated with this inventory entry.
3815
# This is a directory or file that has changed somehow.
3816
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3817
deltas.append((len(delta), basis_id, delta))
3819
return deltas[0][1:]
3821
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3822
"""Find all parent revisions that are absent, but for which the
3823
inventory is present, and copy those inventories.
3825
This is necessary to preserve correctness when the source is stacked
3826
without fallbacks configured. (Note that in cases like upgrade the
3827
source may be not have _fallback_repositories even though it is
3831
for parents in parent_map.values():
3832
parent_revs.update(parents)
3833
present_parents = self.source.get_parent_map(parent_revs)
3834
absent_parents = set(parent_revs).difference(present_parents)
3835
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3836
(rev_id,) for rev_id in absent_parents)
3837
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3838
for parent_tree in self.source.revision_trees(parent_inv_ids):
3839
current_revision_id = parent_tree.get_revision_id()
3840
parents_parents_keys = parent_invs_keys_for_stacking[
3841
(current_revision_id,)]
3842
parents_parents = [key[-1] for key in parents_parents_keys]
3843
basis_id = _mod_revision.NULL_REVISION
3844
basis_tree = self.source.revision_tree(basis_id)
3845
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3846
self.target.add_inventory_by_delta(
3847
basis_id, delta, current_revision_id, parents_parents)
3848
cache[current_revision_id] = parent_tree
3850
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3851
"""Fetch across a few revisions.
3853
:param revision_ids: The revisions to copy
3854
:param basis_id: The revision_id of a tree that must be in cache, used
3855
as a basis for delta when no other base is available
3856
:param cache: A cache of RevisionTrees that we can use.
3857
:param a_graph: A Graph object to determine the heads() of the
3858
rich-root data stream.
3859
:return: The revision_id of the last converted tree. The RevisionTree
3860
for it will be in cache
3862
# Walk though all revisions; get inventory deltas, copy referenced
3863
# texts that delta references, insert the delta, revision and
3865
root_keys_to_create = set()
3868
pending_revisions = []
3869
parent_map = self.source.get_parent_map(revision_ids)
3870
self._fetch_parent_invs_for_stacking(parent_map, cache)
3871
self.source._safe_to_return_from_cache = True
3872
for tree in self.source.revision_trees(revision_ids):
3873
# Find a inventory delta for this revision.
3874
# Find text entries that need to be copied, too.
3875
current_revision_id = tree.get_revision_id()
3876
parent_ids = parent_map.get(current_revision_id, ())
3877
parent_trees = self._get_trees(parent_ids, cache)
3878
possible_trees = list(parent_trees)
3879
if len(possible_trees) == 0:
3880
# There either aren't any parents, or the parents are ghosts,
3881
# so just use the last converted tree.
3882
possible_trees.append((basis_id, cache[basis_id]))
3883
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3885
revision = self.source.get_revision(current_revision_id)
3886
pending_deltas.append((basis_id, delta,
3887
current_revision_id, revision.parent_ids))
3888
if self._converting_to_rich_root:
3889
self._revision_id_to_root_id[current_revision_id] = \
3891
# Determine which texts are in present in this revision but not in
3892
# any of the available parents.
3893
texts_possibly_new_in_tree = set()
3894
for old_path, new_path, file_id, entry in delta:
3895
if new_path is None:
3896
# This file_id isn't present in the new rev
3900
if not self.target.supports_rich_root():
3901
# The target doesn't support rich root, so we don't
3904
if self._converting_to_rich_root:
3905
# This can't be copied normally, we have to insert
3907
root_keys_to_create.add((file_id, entry.revision))
3910
texts_possibly_new_in_tree.add((file_id, entry.revision))
3911
for basis_id, basis_tree in possible_trees:
3912
basis_inv = basis_tree.inventory
3913
for file_key in list(texts_possibly_new_in_tree):
3914
file_id, file_revision = file_key
3916
entry = basis_inv[file_id]
3917
except errors.NoSuchId:
3919
if entry.revision == file_revision:
3920
texts_possibly_new_in_tree.remove(file_key)
3921
text_keys.update(texts_possibly_new_in_tree)
3922
pending_revisions.append(revision)
3923
cache[current_revision_id] = tree
3924
basis_id = current_revision_id
3925
self.source._safe_to_return_from_cache = False
3927
from_texts = self.source.texts
3928
to_texts = self.target.texts
3929
if root_keys_to_create:
3930
root_stream = _mod_fetch._new_root_data_stream(
3931
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3932
self.source, graph=a_graph)
3933
to_texts.insert_record_stream(root_stream)
3934
to_texts.insert_record_stream(from_texts.get_record_stream(
3935
text_keys, self.target._format._fetch_order,
3936
not self.target._format._fetch_uses_deltas))
3937
# insert inventory deltas
3938
for delta in pending_deltas:
3939
self.target.add_inventory_by_delta(*delta)
3940
if self.target._fallback_repositories:
3941
# Make sure this stacked repository has all the parent inventories
3942
# for the new revisions that we are about to insert. We do this
3943
# before adding the revisions so that no revision is added until
3944
# all the inventories it may depend on are added.
3945
# Note that this is overzealous, as we may have fetched these in an
3948
revision_ids = set()
3949
for revision in pending_revisions:
3950
revision_ids.add(revision.revision_id)
3951
parent_ids.update(revision.parent_ids)
3952
parent_ids.difference_update(revision_ids)
3953
parent_ids.discard(_mod_revision.NULL_REVISION)
3954
parent_map = self.source.get_parent_map(parent_ids)
3955
# we iterate over parent_map and not parent_ids because we don't
3956
# want to try copying any revision which is a ghost
3957
for parent_tree in self.source.revision_trees(parent_map):
3958
current_revision_id = parent_tree.get_revision_id()
3959
parents_parents = parent_map[current_revision_id]
3960
possible_trees = self._get_trees(parents_parents, cache)
3961
if len(possible_trees) == 0:
3962
# There either aren't any parents, or the parents are
3963
# ghosts, so just use the last converted tree.
3964
possible_trees.append((basis_id, cache[basis_id]))
3965
basis_id, delta = self._get_delta_for_revision(parent_tree,
3966
parents_parents, possible_trees)
3967
self.target.add_inventory_by_delta(
3968
basis_id, delta, current_revision_id, parents_parents)
3969
# insert signatures and revisions
3970
for revision in pending_revisions:
3972
signature = self.source.get_signature_text(
3973
revision.revision_id)
3974
self.target.add_signature_text(revision.revision_id,
3976
except errors.NoSuchRevision:
3978
self.target.add_revision(revision.revision_id, revision)
3981
def _fetch_all_revisions(self, revision_ids, pb):
3982
"""Fetch everything for the list of revisions.
3984
:param revision_ids: The list of revisions to fetch. Must be in
3986
:param pb: A ProgressTask
3989
basis_id, basis_tree = self._get_basis(revision_ids[0])
3991
cache = lru_cache.LRUCache(100)
3992
cache[basis_id] = basis_tree
3993
del basis_tree # We don't want to hang on to it here
3995
if self._converting_to_rich_root and len(revision_ids) > 100:
3996
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
4001
for offset in range(0, len(revision_ids), batch_size):
4002
self.target.start_write_group()
4004
pb.update('Transferring revisions', offset,
4006
batch = revision_ids[offset:offset+batch_size]
4007
basis_id = self._fetch_batch(batch, basis_id, cache,
4010
self.source._safe_to_return_from_cache = False
4011
self.target.abort_write_group()
4014
hint = self.target.commit_write_group()
4017
if hints and self.target._format.pack_compresses:
4018
self.target.pack(hint=hints)
4019
pb.update('Transferring revisions', len(revision_ids),
4023
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
4025
"""See InterRepository.fetch()."""
4026
if fetch_spec is not None:
4027
raise AssertionError("Not implemented yet...")
4028
ui.ui_factory.warn_experimental_format_fetch(self)
4029
if (not self.source.supports_rich_root()
4030
and self.target.supports_rich_root()):
4031
self._converting_to_rich_root = True
4032
self._revision_id_to_root_id = {}
4034
self._converting_to_rich_root = False
4035
# See <https://launchpad.net/bugs/456077> asking for a warning here
4036
if self.source._format.network_name() != self.target._format.network_name():
4037
ui.ui_factory.show_user_warning('cross_format_fetch',
4038
from_format=self.source._format,
4039
to_format=self.target._format)
4040
revision_ids = self.target.search_missing_revision_ids(self.source,
4041
revision_id, find_ghosts=find_ghosts).get_keys()
4042
if not revision_ids:
4044
revision_ids = tsort.topo_sort(
4045
self.source.get_graph().get_parent_map(revision_ids))
4046
if not revision_ids:
4048
# Walk though all revisions; get inventory deltas, copy referenced
4049
# texts that delta references, insert the delta, revision and
4052
my_pb = ui.ui_factory.nested_progress_bar()
4055
symbol_versioning.warn(
4056
symbol_versioning.deprecated_in((1, 14, 0))
4057
% "pb parameter to fetch()")
4060
self._fetch_all_revisions(revision_ids, pb)
4062
if my_pb is not None:
4064
return len(revision_ids), 0
4066
def _get_basis(self, first_revision_id):
4067
"""Get a revision and tree which exists in the target.
4069
This assumes that first_revision_id is selected for transmission
4070
because all other ancestors are already present. If we can't find an
4071
ancestor we fall back to NULL_REVISION since we know that is safe.
4073
:return: (basis_id, basis_tree)
4075
first_rev = self.source.get_revision(first_revision_id)
4077
basis_id = first_rev.parent_ids[0]
4078
# only valid as a basis if the target has it
4079
self.target.get_revision(basis_id)
4080
# Try to get a basis tree - if its a ghost it will hit the
4081
# NoSuchRevision case.
4082
basis_tree = self.source.revision_tree(basis_id)
4083
except (IndexError, errors.NoSuchRevision):
4084
basis_id = _mod_revision.NULL_REVISION
4085
basis_tree = self.source.revision_tree(basis_id)
4086
return basis_id, basis_tree
4089
InterRepository.register_optimiser(InterDifferingSerializer)
4090
InterRepository.register_optimiser(InterSameDataRepository)
4091
InterRepository.register_optimiser(InterWeaveRepo)
4092
InterRepository.register_optimiser(InterKnitRepo)
4095
1712
class CopyConverter(object):
4096
1713
"""A repository conversion tool which just performs a copy of the content.
4118
1735
# trigger an assertion if not such
4119
1736
repo._format.get_format_string()
4120
1737
self.repo_dir = repo.bzrdir
4121
pb.update('Moving repository to repository.backup')
1738
pb.update(gettext('Moving repository to repository.backup'))
4122
1739
self.repo_dir.transport.move('repository', 'repository.backup')
4123
1740
backup_transport = self.repo_dir.transport.clone('repository.backup')
4124
1741
repo._format.check_conversion_target(self.target_format)
4125
1742
self.source_repo = repo._format.open(self.repo_dir,
4127
1744
_override_transport=backup_transport)
4128
pb.update('Creating new repository')
1745
pb.update(gettext('Creating new repository'))
4129
1746
converted = self.target_format.initialize(self.repo_dir,
4130
1747
self.source_repo.is_shared())
4131
1748
converted.lock_write()
4133
pb.update('Copying content')
1750
pb.update(gettext('Copying content'))
4134
1751
self.source_repo.copy_content_into(converted)
4136
1753
converted.unlock()
4137
pb.update('Deleting old repository content')
1754
pb.update(gettext('Deleting old repository content'))
4138
1755
self.repo_dir.transport.delete_tree('repository.backup')
4139
ui.ui_factory.note('repository converted')
1756
ui.ui_factory.note(gettext('repository converted'))
4152
def _unescaper(match, _map=_unescape_map):
4153
code = match.group(1)
4157
if not code.startswith('#'):
4159
return unichr(int(code[1:])).encode('utf8')
4165
def _unescape_xml(data):
4166
"""Unescape predefined XML entities in a string of data."""
4168
if _unescape_re is None:
4169
_unescape_re = re.compile('\&([^;]*);')
4170
return _unescape_re.sub(_unescaper, data)
4173
class _VersionedFileChecker(object):
4175
def __init__(self, repository, text_key_references=None, ancestors=None):
4176
self.repository = repository
4177
self.text_index = self.repository._generate_text_key_index(
4178
text_key_references=text_key_references, ancestors=ancestors)
4180
def calculate_file_version_parents(self, text_key):
4181
"""Calculate the correct parents for a file version according to
4184
parent_keys = self.text_index[text_key]
4185
if parent_keys == [_mod_revision.NULL_REVISION]:
4187
return tuple(parent_keys)
4189
def check_file_version_parents(self, texts, progress_bar=None):
4190
"""Check the parents stored in a versioned file are correct.
4192
It also detects file versions that are not referenced by their
4193
corresponding revision's inventory.
4195
:returns: A tuple of (wrong_parents, dangling_file_versions).
4196
wrong_parents is a dict mapping {revision_id: (stored_parents,
4197
correct_parents)} for each revision_id where the stored parents
4198
are not correct. dangling_file_versions is a set of (file_id,
4199
revision_id) tuples for versions that are present in this versioned
4200
file, but not used by the corresponding inventory.
4202
local_progress = None
4203
if progress_bar is None:
4204
local_progress = ui.ui_factory.nested_progress_bar()
4205
progress_bar = local_progress
4207
return self._check_file_version_parents(texts, progress_bar)
4210
local_progress.finished()
4212
def _check_file_version_parents(self, texts, progress_bar):
4213
"""See check_file_version_parents."""
4215
self.file_ids = set([file_id for file_id, _ in
4216
self.text_index.iterkeys()])
4217
# text keys is now grouped by file_id
4218
n_versions = len(self.text_index)
4219
progress_bar.update('loading text store', 0, n_versions)
4220
parent_map = self.repository.texts.get_parent_map(self.text_index)
4221
# On unlistable transports this could well be empty/error...
4222
text_keys = self.repository.texts.keys()
4223
unused_keys = frozenset(text_keys) - set(self.text_index)
4224
for num, key in enumerate(self.text_index.iterkeys()):
4225
progress_bar.update('checking text graph', num, n_versions)
4226
correct_parents = self.calculate_file_version_parents(key)
4228
knit_parents = parent_map[key]
4229
except errors.RevisionNotPresent:
4232
if correct_parents != knit_parents:
4233
wrong_parents[key] = (knit_parents, correct_parents)
4234
return wrong_parents, unused_keys
4237
def _old_get_graph(repository, revision_id):
4238
"""DO NOT USE. That is all. I'm serious."""
4239
graph = repository.get_graph()
4240
revision_graph = dict(((key, value) for key, value in
4241
graph.iter_ancestry([revision_id]) if value is not None))
4242
return _strip_NULL_ghosts(revision_graph)
4245
1760
def _strip_NULL_ghosts(revision_graph):
4246
1761
"""Also don't use this. more compatibility code for unmigrated clients."""
4247
1762
# Filter ghosts, and null:
4253
1768
return revision_graph
4256
class StreamSink(object):
4257
"""An object that can insert a stream into a repository.
4259
This interface handles the complexity of reserialising inventories and
4260
revisions from different formats, and allows unidirectional insertion into
4261
stacked repositories without looking for the missing basis parents
4265
def __init__(self, target_repo):
4266
self.target_repo = target_repo
4268
def insert_stream(self, stream, src_format, resume_tokens):
4269
"""Insert a stream's content into the target repository.
4271
:param src_format: a bzr repository format.
4273
:return: a list of resume tokens and an iterable of keys additional
4274
items required before the insertion can be completed.
4276
self.target_repo.lock_write()
4279
self.target_repo.resume_write_group(resume_tokens)
4282
self.target_repo.start_write_group()
4285
# locked_insert_stream performs a commit|suspend.
4286
return self._locked_insert_stream(stream, src_format,
4289
self.target_repo.abort_write_group(suppress_errors=True)
4292
self.target_repo.unlock()
4294
def _locked_insert_stream(self, stream, src_format, is_resume):
4295
to_serializer = self.target_repo._format._serializer
4296
src_serializer = src_format._serializer
4298
if to_serializer == src_serializer:
4299
# If serializers match and the target is a pack repository, set the
4300
# write cache size on the new pack. This avoids poor performance
4301
# on transports where append is unbuffered (such as
4302
# RemoteTransport). This is safe to do because nothing should read
4303
# back from the target repository while a stream with matching
4304
# serialization is being inserted.
4305
# The exception is that a delta record from the source that should
4306
# be a fulltext may need to be expanded by the target (see
4307
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4308
# explicitly flush any buffered writes first in that rare case.
4310
new_pack = self.target_repo._pack_collection._new_pack
4311
except AttributeError:
4312
# Not a pack repository
4315
new_pack.set_write_cache_size(1024*1024)
4316
for substream_type, substream in stream:
4317
if 'stream' in debug.debug_flags:
4318
mutter('inserting substream: %s', substream_type)
4319
if substream_type == 'texts':
4320
self.target_repo.texts.insert_record_stream(substream)
4321
elif substream_type == 'inventories':
4322
if src_serializer == to_serializer:
4323
self.target_repo.inventories.insert_record_stream(
4326
self._extract_and_insert_inventories(
4327
substream, src_serializer)
4328
elif substream_type == 'inventory-deltas':
4329
self._extract_and_insert_inventory_deltas(
4330
substream, src_serializer)
4331
elif substream_type == 'chk_bytes':
4332
# XXX: This doesn't support conversions, as it assumes the
4333
# conversion was done in the fetch code.
4334
self.target_repo.chk_bytes.insert_record_stream(substream)
4335
elif substream_type == 'revisions':
4336
# This may fallback to extract-and-insert more often than
4337
# required if the serializers are different only in terms of
4339
if src_serializer == to_serializer:
4340
self.target_repo.revisions.insert_record_stream(substream)
4342
self._extract_and_insert_revisions(substream,
4344
elif substream_type == 'signatures':
4345
self.target_repo.signatures.insert_record_stream(substream)
4347
raise AssertionError('kaboom! %s' % (substream_type,))
4348
# Done inserting data, and the missing_keys calculations will try to
4349
# read back from the inserted data, so flush the writes to the new pack
4350
# (if this is pack format).
4351
if new_pack is not None:
4352
new_pack._write_data('', flush=True)
4353
# Find all the new revisions (including ones from resume_tokens)
4354
missing_keys = self.target_repo.get_missing_parent_inventories(
4355
check_for_missing_texts=is_resume)
4357
for prefix, versioned_file in (
4358
('texts', self.target_repo.texts),
4359
('inventories', self.target_repo.inventories),
4360
('revisions', self.target_repo.revisions),
4361
('signatures', self.target_repo.signatures),
4362
('chk_bytes', self.target_repo.chk_bytes),
4364
if versioned_file is None:
4366
# TODO: key is often going to be a StaticTuple object
4367
# I don't believe we can define a method by which
4368
# (prefix,) + StaticTuple will work, though we could
4369
# define a StaticTuple.sq_concat that would allow you to
4370
# pass in either a tuple or a StaticTuple as the second
4371
# object, so instead we could have:
4372
# StaticTuple(prefix) + key here...
4373
missing_keys.update((prefix,) + key for key in
4374
versioned_file.get_missing_compression_parent_keys())
4375
except NotImplementedError:
4376
# cannot even attempt suspending, and missing would have failed
4377
# during stream insertion.
4378
missing_keys = set()
4381
# suspend the write group and tell the caller what we is
4382
# missing. We know we can suspend or else we would not have
4383
# entered this code path. (All repositories that can handle
4384
# missing keys can handle suspending a write group).
4385
write_group_tokens = self.target_repo.suspend_write_group()
4386
return write_group_tokens, missing_keys
4387
hint = self.target_repo.commit_write_group()
4388
if (to_serializer != src_serializer and
4389
self.target_repo._format.pack_compresses):
4390
self.target_repo.pack(hint=hint)
4393
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4394
target_rich_root = self.target_repo._format.rich_root_data
4395
target_tree_refs = self.target_repo._format.supports_tree_reference
4396
for record in substream:
4397
# Insert the delta directly
4398
inventory_delta_bytes = record.get_bytes_as('fulltext')
4399
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4401
parse_result = deserialiser.parse_text_bytes(
4402
inventory_delta_bytes)
4403
except inventory_delta.IncompatibleInventoryDelta, err:
4404
trace.mutter("Incompatible delta: %s", err.msg)
4405
raise errors.IncompatibleRevision(self.target_repo._format)
4406
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4407
revision_id = new_id
4408
parents = [key[0] for key in record.parents]
4409
self.target_repo.add_inventory_by_delta(
4410
basis_id, inv_delta, revision_id, parents)
4412
def _extract_and_insert_inventories(self, substream, serializer,
4414
"""Generate a new inventory versionedfile in target, converting data.
4416
The inventory is retrieved from the source, (deserializing it), and
4417
stored in the target (reserializing it in a different format).
4419
target_rich_root = self.target_repo._format.rich_root_data
4420
target_tree_refs = self.target_repo._format.supports_tree_reference
4421
for record in substream:
4422
# It's not a delta, so it must be a fulltext in the source
4423
# serializer's format.
4424
bytes = record.get_bytes_as('fulltext')
4425
revision_id = record.key[0]
4426
inv = serializer.read_inventory_from_string(bytes, revision_id)
4427
parents = [key[0] for key in record.parents]
4428
self.target_repo.add_inventory(revision_id, inv, parents)
4429
# No need to keep holding this full inv in memory when the rest of
4430
# the substream is likely to be all deltas.
4433
def _extract_and_insert_revisions(self, substream, serializer):
4434
for record in substream:
4435
bytes = record.get_bytes_as('fulltext')
4436
revision_id = record.key[0]
4437
rev = serializer.read_revision_from_string(bytes)
4438
if rev.revision_id != revision_id:
4439
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4440
self.target_repo.add_revision(revision_id, rev)
4443
if self.target_repo._format._fetch_reconcile:
4444
self.target_repo.reconcile()
4447
class StreamSource(object):
4448
"""A source of a stream for fetching between repositories."""
4450
def __init__(self, from_repository, to_format):
4451
"""Create a StreamSource streaming from from_repository."""
4452
self.from_repository = from_repository
4453
self.to_format = to_format
4454
self._record_counter = RecordCounter()
4456
def delta_on_metadata(self):
4457
"""Return True if delta's are permitted on metadata streams.
4459
That is on revisions and signatures.
4461
src_serializer = self.from_repository._format._serializer
4462
target_serializer = self.to_format._serializer
4463
return (self.to_format._fetch_uses_deltas and
4464
src_serializer == target_serializer)
4466
def _fetch_revision_texts(self, revs):
4467
# fetch signatures first and then the revision texts
4468
# may need to be a InterRevisionStore call here.
4469
from_sf = self.from_repository.signatures
4470
# A missing signature is just skipped.
4471
keys = [(rev_id,) for rev_id in revs]
4472
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4474
self.to_format._fetch_order,
4475
not self.to_format._fetch_uses_deltas))
4476
# If a revision has a delta, this is actually expanded inside the
4477
# insert_record_stream code now, which is an alternate fix for
4479
from_rf = self.from_repository.revisions
4480
revisions = from_rf.get_record_stream(
4482
self.to_format._fetch_order,
4483
not self.delta_on_metadata())
4484
return [('signatures', signatures), ('revisions', revisions)]
4486
def _generate_root_texts(self, revs):
4487
"""This will be called by get_stream between fetching weave texts and
4488
fetching the inventory weave.
4490
if self._rich_root_upgrade():
4491
return _mod_fetch.Inter1and2Helper(
4492
self.from_repository).generate_root_texts(revs)
4496
def get_stream(self, search):
4498
revs = search.get_keys()
4499
graph = self.from_repository.get_graph()
4500
revs = tsort.topo_sort(graph.get_parent_map(revs))
4501
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4503
for knit_kind, file_id, revisions in data_to_fetch:
4504
if knit_kind != phase:
4506
# Make a new progress bar for this phase
4507
if knit_kind == "file":
4508
# Accumulate file texts
4509
text_keys.extend([(file_id, revision) for revision in
4511
elif knit_kind == "inventory":
4512
# Now copy the file texts.
4513
from_texts = self.from_repository.texts
4514
yield ('texts', from_texts.get_record_stream(
4515
text_keys, self.to_format._fetch_order,
4516
not self.to_format._fetch_uses_deltas))
4517
# Cause an error if a text occurs after we have done the
4520
# Before we process the inventory we generate the root
4521
# texts (if necessary) so that the inventories references
4523
for _ in self._generate_root_texts(revs):
4525
# we fetch only the referenced inventories because we do not
4526
# know for unselected inventories whether all their required
4527
# texts are present in the other repository - it could be
4529
for info in self._get_inventory_stream(revs):
4531
elif knit_kind == "signatures":
4532
# Nothing to do here; this will be taken care of when
4533
# _fetch_revision_texts happens.
4535
elif knit_kind == "revisions":
4536
for record in self._fetch_revision_texts(revs):
4539
raise AssertionError("Unknown knit kind %r" % knit_kind)
4541
def get_stream_for_missing_keys(self, missing_keys):
4542
# missing keys can only occur when we are byte copying and not
4543
# translating (because translation means we don't send
4544
# unreconstructable deltas ever).
4546
keys['texts'] = set()
4547
keys['revisions'] = set()
4548
keys['inventories'] = set()
4549
keys['chk_bytes'] = set()
4550
keys['signatures'] = set()
4551
for key in missing_keys:
4552
keys[key[0]].add(key[1:])
4553
if len(keys['revisions']):
4554
# If we allowed copying revisions at this point, we could end up
4555
# copying a revision without copying its required texts: a
4556
# violation of the requirements for repository integrity.
4557
raise AssertionError(
4558
'cannot copy revisions to fill in missing deltas %s' % (
4559
keys['revisions'],))
4560
for substream_kind, keys in keys.iteritems():
4561
vf = getattr(self.from_repository, substream_kind)
4562
if vf is None and keys:
4563
raise AssertionError(
4564
"cannot fill in keys for a versioned file we don't"
4565
" have: %s needs %s" % (substream_kind, keys))
4567
# No need to stream something we don't have
4569
if substream_kind == 'inventories':
4570
# Some missing keys are genuinely ghosts, filter those out.
4571
present = self.from_repository.inventories.get_parent_map(keys)
4572
revs = [key[0] for key in present]
4573
# Get the inventory stream more-or-less as we do for the
4574
# original stream; there's no reason to assume that records
4575
# direct from the source will be suitable for the sink. (Think
4576
# e.g. 2a -> 1.9-rich-root).
4577
for info in self._get_inventory_stream(revs, missing=True):
4581
# Ask for full texts always so that we don't need more round trips
4582
# after this stream.
4583
# Some of the missing keys are genuinely ghosts, so filter absent
4584
# records. The Sink is responsible for doing another check to
4585
# ensure that ghosts don't introduce missing data for future
4587
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4588
self.to_format._fetch_order, True))
4589
yield substream_kind, stream
4591
def inventory_fetch_order(self):
4592
if self._rich_root_upgrade():
4593
return 'topological'
4595
return self.to_format._fetch_order
4597
def _rich_root_upgrade(self):
4598
return (not self.from_repository._format.rich_root_data and
4599
self.to_format.rich_root_data)
4601
def _get_inventory_stream(self, revision_ids, missing=False):
4602
from_format = self.from_repository._format
4603
if (from_format.supports_chks and self.to_format.supports_chks and
4604
from_format.network_name() == self.to_format.network_name()):
4605
raise AssertionError(
4606
"this case should be handled by GroupCHKStreamSource")
4607
elif 'forceinvdeltas' in debug.debug_flags:
4608
return self._get_convertable_inventory_stream(revision_ids,
4609
delta_versus_null=missing)
4610
elif from_format.network_name() == self.to_format.network_name():
4612
return self._get_simple_inventory_stream(revision_ids,
4614
elif (not from_format.supports_chks and not self.to_format.supports_chks
4615
and from_format._serializer == self.to_format._serializer):
4616
# Essentially the same format.
4617
return self._get_simple_inventory_stream(revision_ids,
4620
# Any time we switch serializations, we want to use an
4621
# inventory-delta based approach.
4622
return self._get_convertable_inventory_stream(revision_ids,
4623
delta_versus_null=missing)
4625
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4626
# NB: This currently reopens the inventory weave in source;
4627
# using a single stream interface instead would avoid this.
4628
from_weave = self.from_repository.inventories
4630
delta_closure = True
4632
delta_closure = not self.delta_on_metadata()
4633
yield ('inventories', from_weave.get_record_stream(
4634
[(rev_id,) for rev_id in revision_ids],
4635
self.inventory_fetch_order(), delta_closure))
4637
def _get_convertable_inventory_stream(self, revision_ids,
4638
delta_versus_null=False):
4639
# The two formats are sufficiently different that there is no fast
4640
# path, so we need to send just inventorydeltas, which any
4641
# sufficiently modern client can insert into any repository.
4642
# The StreamSink code expects to be able to
4643
# convert on the target, so we need to put bytes-on-the-wire that can
4644
# be converted. That means inventory deltas (if the remote is <1.19,
4645
# RemoteStreamSink will fallback to VFS to insert the deltas).
4646
yield ('inventory-deltas',
4647
self._stream_invs_as_deltas(revision_ids,
4648
delta_versus_null=delta_versus_null))
4650
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4651
"""Return a stream of inventory-deltas for the given rev ids.
4653
:param revision_ids: The list of inventories to transmit
4654
:param delta_versus_null: Don't try to find a minimal delta for this
4655
entry, instead compute the delta versus the NULL_REVISION. This
4656
effectively streams a complete inventory. Used for stuff like
4657
filling in missing parents, etc.
4659
from_repo = self.from_repository
4660
revision_keys = [(rev_id,) for rev_id in revision_ids]
4661
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4662
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4664
inventories = self.from_repository.iter_inventories(
4665
revision_ids, 'topological')
4666
format = from_repo._format
4667
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4668
inventory_cache = lru_cache.LRUCache(50)
4669
null_inventory = from_repo.revision_tree(
4670
_mod_revision.NULL_REVISION).inventory
4671
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4672
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4673
# repo back into a non-rich-root repo ought to be allowed)
4674
serializer = inventory_delta.InventoryDeltaSerializer(
4675
versioned_root=format.rich_root_data,
4676
tree_references=format.supports_tree_reference)
4677
for inv in inventories:
4678
key = (inv.revision_id,)
4679
parent_keys = parent_map.get(key, ())
4681
if not delta_versus_null and parent_keys:
4682
# The caller did not ask for complete inventories and we have
4683
# some parents that we can delta against. Make a delta against
4684
# each parent so that we can find the smallest.
4685
parent_ids = [parent_key[0] for parent_key in parent_keys]
4686
for parent_id in parent_ids:
4687
if parent_id not in invs_sent_so_far:
4688
# We don't know that the remote side has this basis, so
4691
if parent_id == _mod_revision.NULL_REVISION:
4692
parent_inv = null_inventory
4694
parent_inv = inventory_cache.get(parent_id, None)
4695
if parent_inv is None:
4696
parent_inv = from_repo.get_inventory(parent_id)
4697
candidate_delta = inv._make_delta(parent_inv)
4698
if (delta is None or
4699
len(delta) > len(candidate_delta)):
4700
delta = candidate_delta
4701
basis_id = parent_id
4703
# Either none of the parents ended up being suitable, or we
4704
# were asked to delta against NULL
4705
basis_id = _mod_revision.NULL_REVISION
4706
delta = inv._make_delta(null_inventory)
4707
invs_sent_so_far.add(inv.revision_id)
4708
inventory_cache[inv.revision_id] = inv
4709
delta_serialized = ''.join(
4710
serializer.delta_to_lines(basis_id, key[-1], delta))
4711
yield versionedfile.FulltextContentFactory(
4712
key, parent_keys, None, delta_serialized)
4715
1771
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4716
1772
stop_revision=None):
4717
1773
"""Extend the partial history to include a given index