355
206
commit to be valid, deletes against the basis MUST be recorded via
356
207
builder.record_delete().
358
self._recording_deletes = True
360
basis_id = self.parents[0]
362
basis_id = _mod_revision.NULL_REVISION
363
self.basis_delta_revision = basis_id
365
def record_entry_contents(self, ie, parent_invs, path, tree,
367
"""Record the content of ie from tree into the commit if needed.
369
Side effect: sets ie.revision when unchanged
371
:param ie: An inventory entry present in the commit.
372
:param parent_invs: The inventories of the parent revisions of the
374
:param path: The path the entry is at in the tree.
375
:param tree: The tree which contains this entry and should be used to
377
:param content_summary: Summary data from the tree about the paths
378
content - stat, length, exec, sha/link target. This is only
379
accessed when the entry has a revision of None - that is when it is
380
a candidate to commit.
381
:return: A tuple (change_delta, version_recorded, fs_hash).
382
change_delta is an inventory_delta change for this entry against
383
the basis tree of the commit, or None if no change occured against
385
version_recorded is True if a new version of the entry has been
386
recorded. For instance, committing a merge where a file was only
387
changed on the other side will return (delta, False).
388
fs_hash is either None, or the hash details for the path (currently
389
a tuple of the contents sha1 and the statvalue returned by
390
tree.get_file_with_stat()).
392
if self.new_inventory.root is None:
393
if ie.parent_id is not None:
394
raise errors.RootMissing()
395
self._check_root(ie, parent_invs, tree)
396
if ie.revision is None:
397
kind = content_summary[0]
399
# ie is carried over from a prior commit
401
# XXX: repository specific check for nested tree support goes here - if
402
# the repo doesn't want nested trees we skip it ?
403
if (kind == 'tree-reference' and
404
not self.repository._format.supports_tree_reference):
405
# mismatch between commit builder logic and repository:
406
# this needs the entry creation pushed down into the builder.
407
raise NotImplementedError('Missing repository subtree support.')
408
self.new_inventory.add(ie)
410
# TODO: slow, take it out of the inner loop.
412
basis_inv = parent_invs[0]
414
basis_inv = Inventory(root_id=None)
416
# ie.revision is always None if the InventoryEntry is considered
417
# for committing. We may record the previous parents revision if the
418
# content is actually unchanged against a sole head.
419
if ie.revision is not None:
420
if not self._versioned_root and path == '':
421
# repositories that do not version the root set the root's
422
# revision to the new commit even when no change occurs (more
423
# specifically, they do not record a revision on the root; and
424
# the rev id is assigned to the root during deserialisation -
425
# this masks when a change may have occurred against the basis.
426
# To match this we always issue a delta, because the revision
427
# of the root will always be changing.
428
if ie.file_id in basis_inv:
429
delta = (basis_inv.id2path(ie.file_id), path,
433
delta = (None, path, ie.file_id, ie)
434
self._basis_delta.append(delta)
435
return delta, False, None
437
# we don't need to commit this, because the caller already
438
# determined that an existing revision of this file is
439
# appropriate. If its not being considered for committing then
440
# it and all its parents to the root must be unaltered so
441
# no-change against the basis.
442
if ie.revision == self._new_revision_id:
443
raise AssertionError("Impossible situation, a skipped "
444
"inventory entry (%r) claims to be modified in this "
445
"commit (%r).", (ie, self._new_revision_id))
446
return None, False, None
447
# XXX: Friction: parent_candidates should return a list not a dict
448
# so that we don't have to walk the inventories again.
449
parent_candiate_entries = ie.parent_candidates(parent_invs)
450
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
452
for inv in parent_invs:
453
if ie.file_id in inv:
454
old_rev = inv[ie.file_id].revision
455
if old_rev in head_set:
456
heads.append(inv[ie.file_id].revision)
457
head_set.remove(inv[ie.file_id].revision)
460
# now we check to see if we need to write a new record to the
462
# We write a new entry unless there is one head to the ancestors, and
463
# the kind-derived content is unchanged.
465
# Cheapest check first: no ancestors, or more the one head in the
466
# ancestors, we write a new node.
470
# There is a single head, look it up for comparison
471
parent_entry = parent_candiate_entries[heads[0]]
472
# if the non-content specific data has changed, we'll be writing a
474
if (parent_entry.parent_id != ie.parent_id or
475
parent_entry.name != ie.name):
477
# now we need to do content specific checks:
479
# if the kind changed the content obviously has
480
if kind != parent_entry.kind:
482
# Stat cache fingerprint feedback for the caller - None as we usually
483
# don't generate one.
486
if content_summary[2] is None:
487
raise ValueError("Files must not have executable = None")
489
# We can't trust a check of the file length because of content
491
if (# if the exec bit has changed we have to store:
492
parent_entry.executable != content_summary[2]):
494
elif parent_entry.text_sha1 == content_summary[3]:
495
# all meta and content is unchanged (using a hash cache
496
# hit to check the sha)
497
ie.revision = parent_entry.revision
498
ie.text_size = parent_entry.text_size
499
ie.text_sha1 = parent_entry.text_sha1
500
ie.executable = parent_entry.executable
501
return self._get_delta(ie, basis_inv, path), False, None
503
# Either there is only a hash change(no hash cache entry,
504
# or same size content change), or there is no change on
506
# Provide the parent's hash to the store layer, so that the
507
# content is unchanged we will not store a new node.
508
nostore_sha = parent_entry.text_sha1
510
# We want to record a new node regardless of the presence or
511
# absence of a content change in the file.
513
ie.executable = content_summary[2]
514
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
516
text = file_obj.read()
520
ie.text_sha1, ie.text_size = self._add_text_to_weave(
521
ie.file_id, text, heads, nostore_sha)
522
# Let the caller know we generated a stat fingerprint.
523
fingerprint = (ie.text_sha1, stat_value)
524
except errors.ExistingContent:
525
# Turns out that the file content was unchanged, and we were
526
# only going to store a new node if it was changed. Carry over
528
ie.revision = parent_entry.revision
529
ie.text_size = parent_entry.text_size
530
ie.text_sha1 = parent_entry.text_sha1
531
ie.executable = parent_entry.executable
532
return self._get_delta(ie, basis_inv, path), False, None
533
elif kind == 'directory':
535
# all data is meta here, nothing specific to directory, so
537
ie.revision = parent_entry.revision
538
return self._get_delta(ie, basis_inv, path), False, None
539
self._add_text_to_weave(ie.file_id, '', heads, None)
540
elif kind == 'symlink':
541
current_link_target = content_summary[3]
543
# symlink target is not generic metadata, check if it has
545
if current_link_target != parent_entry.symlink_target:
548
# unchanged, carry over.
549
ie.revision = parent_entry.revision
550
ie.symlink_target = parent_entry.symlink_target
551
return self._get_delta(ie, basis_inv, path), False, None
552
ie.symlink_target = current_link_target
553
self._add_text_to_weave(ie.file_id, '', heads, None)
554
elif kind == 'tree-reference':
556
if content_summary[3] != parent_entry.reference_revision:
559
# unchanged, carry over.
560
ie.reference_revision = parent_entry.reference_revision
561
ie.revision = parent_entry.revision
562
return self._get_delta(ie, basis_inv, path), False, None
563
ie.reference_revision = content_summary[3]
564
if ie.reference_revision is None:
565
raise AssertionError("invalid content_summary for nested tree: %r"
566
% (content_summary,))
567
self._add_text_to_weave(ie.file_id, '', heads, None)
569
raise NotImplementedError('unknown kind')
570
ie.revision = self._new_revision_id
571
self._any_changes = True
572
return self._get_delta(ie, basis_inv, path), True, fingerprint
574
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
575
_entry_factory=entry_factory):
209
raise NotImplementedError(self.will_record_deletes)
211
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
576
212
"""Record a new tree via iter_changes.
578
214
:param tree: The tree to obtain text contents from for changed objects.
583
219
to basis_revision_id. The iterator must not include any items with
584
220
a current kind of None - missing items must be either filtered out
585
221
or errored-on beefore record_iter_changes sees the item.
586
:param _entry_factory: Private method to bind entry_factory locally for
588
222
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
589
223
tree._observed_sha1.
591
# Create an inventory delta based on deltas between all the parents and
592
# deltas between all the parent inventories. We use inventory delta's
593
# between the inventory objects because iter_changes masks
594
# last-changed-field only changes.
596
# file_id -> change map, change is fileid, paths, changed, versioneds,
597
# parents, names, kinds, executables
599
# {file_id -> revision_id -> inventory entry, for entries in parent
600
# trees that are not parents[0]
604
revtrees = list(self.repository.revision_trees(self.parents))
605
except errors.NoSuchRevision:
606
# one or more ghosts, slow path.
608
for revision_id in self.parents:
610
revtrees.append(self.repository.revision_tree(revision_id))
611
except errors.NoSuchRevision:
613
basis_revision_id = _mod_revision.NULL_REVISION
615
revtrees.append(self.repository.revision_tree(
616
_mod_revision.NULL_REVISION))
617
# The basis inventory from a repository
619
basis_inv = revtrees[0].inventory
621
basis_inv = self.repository.revision_tree(
622
_mod_revision.NULL_REVISION).inventory
623
if len(self.parents) > 0:
624
if basis_revision_id != self.parents[0] and not ghost_basis:
626
"arbitrary basis parents not yet supported with merges")
627
for revtree in revtrees[1:]:
628
for change in revtree.inventory._make_delta(basis_inv):
629
if change[1] is None:
630
# Not present in this parent.
632
if change[2] not in merged_ids:
633
if change[0] is not None:
634
basis_entry = basis_inv[change[2]]
635
merged_ids[change[2]] = [
637
basis_entry.revision,
640
parent_entries[change[2]] = {
642
basis_entry.revision:basis_entry,
644
change[3].revision:change[3],
647
merged_ids[change[2]] = [change[3].revision]
648
parent_entries[change[2]] = {change[3].revision:change[3]}
650
merged_ids[change[2]].append(change[3].revision)
651
parent_entries[change[2]][change[3].revision] = change[3]
654
# Setup the changes from the tree:
655
# changes maps file_id -> (change, [parent revision_ids])
657
for change in iter_changes:
658
# This probably looks up in basis_inv way to much.
659
if change[1][0] is not None:
660
head_candidate = [basis_inv[change[0]].revision]
663
changes[change[0]] = change, merged_ids.get(change[0],
665
unchanged_merged = set(merged_ids) - set(changes)
666
# Extend the changes dict with synthetic changes to record merges of
668
for file_id in unchanged_merged:
669
# Record a merged version of these items that did not change vs the
670
# basis. This can be either identical parallel changes, or a revert
671
# of a specific file after a merge. The recorded content will be
672
# that of the current tree (which is the same as the basis), but
673
# the per-file graph will reflect a merge.
674
# NB:XXX: We are reconstructing path information we had, this
675
# should be preserved instead.
676
# inv delta change: (file_id, (path_in_source, path_in_target),
677
# changed_content, versioned, parent, name, kind,
680
basis_entry = basis_inv[file_id]
681
except errors.NoSuchId:
682
# a change from basis->some_parents but file_id isn't in basis
683
# so was new in the merge, which means it must have changed
684
# from basis -> current, and as it hasn't the add was reverted
685
# by the user. So we discard this change.
689
(basis_inv.id2path(file_id), tree.id2path(file_id)),
691
(basis_entry.parent_id, basis_entry.parent_id),
692
(basis_entry.name, basis_entry.name),
693
(basis_entry.kind, basis_entry.kind),
694
(basis_entry.executable, basis_entry.executable))
695
changes[file_id] = (change, merged_ids[file_id])
696
# changes contains tuples with the change and a set of inventory
697
# candidates for the file.
699
# old_path, new_path, file_id, new_inventory_entry
700
seen_root = False # Is the root in the basis delta?
701
inv_delta = self._basis_delta
702
modified_rev = self._new_revision_id
703
for change, head_candidates in changes.values():
704
if change[3][1]: # versioned in target.
705
# Several things may be happening here:
706
# We may have a fork in the per-file graph
707
# - record a change with the content from tree
708
# We may have a change against < all trees
709
# - carry over the tree that hasn't changed
710
# We may have a change against all trees
711
# - record the change with the content from tree
714
entry = _entry_factory[kind](file_id, change[5][1],
716
head_set = self._heads(change[0], set(head_candidates))
719
for head_candidate in head_candidates:
720
if head_candidate in head_set:
721
heads.append(head_candidate)
722
head_set.remove(head_candidate)
725
# Could be a carry-over situation:
726
parent_entry_revs = parent_entries.get(file_id, None)
727
if parent_entry_revs:
728
parent_entry = parent_entry_revs.get(heads[0], None)
731
if parent_entry is None:
732
# The parent iter_changes was called against is the one
733
# that is the per-file head, so any change is relevant
734
# iter_changes is valid.
735
carry_over_possible = False
737
# could be a carry over situation
738
# A change against the basis may just indicate a merge,
739
# we need to check the content against the source of the
740
# merge to determine if it was changed after the merge
742
if (parent_entry.kind != entry.kind or
743
parent_entry.parent_id != entry.parent_id or
744
parent_entry.name != entry.name):
745
# Metadata common to all entries has changed
746
# against per-file parent
747
carry_over_possible = False
749
carry_over_possible = True
750
# per-type checks for changes against the parent_entry
753
# Cannot be a carry-over situation
754
carry_over_possible = False
755
# Populate the entry in the delta
757
# XXX: There is still a small race here: If someone reverts the content of a file
758
# after iter_changes examines and decides it has changed,
759
# we will unconditionally record a new version even if some
760
# other process reverts it while commit is running (with
761
# the revert happening after iter_changes did it's
764
entry.executable = True
766
entry.executable = False
767
if (carry_over_possible and
768
parent_entry.executable == entry.executable):
769
# Check the file length, content hash after reading
771
nostore_sha = parent_entry.text_sha1
774
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
776
text = file_obj.read()
780
entry.text_sha1, entry.text_size = self._add_text_to_weave(
781
file_id, text, heads, nostore_sha)
782
yield file_id, change[1][1], (entry.text_sha1, stat_value)
783
except errors.ExistingContent:
784
# No content change against a carry_over parent
785
# Perhaps this should also yield a fs hash update?
787
entry.text_size = parent_entry.text_size
788
entry.text_sha1 = parent_entry.text_sha1
789
elif kind == 'symlink':
791
entry.symlink_target = tree.get_symlink_target(file_id)
792
if (carry_over_possible and
793
parent_entry.symlink_target == entry.symlink_target):
796
self._add_text_to_weave(change[0], '', heads, None)
797
elif kind == 'directory':
798
if carry_over_possible:
801
# Nothing to set on the entry.
802
# XXX: split into the Root and nonRoot versions.
803
if change[1][1] != '' or self.repository.supports_rich_root():
804
self._add_text_to_weave(change[0], '', heads, None)
805
elif kind == 'tree-reference':
806
if not self.repository._format.supports_tree_reference:
807
# This isn't quite sane as an error, but we shouldn't
808
# ever see this code path in practice: tree's don't
809
# permit references when the repo doesn't support tree
811
raise errors.UnsupportedOperation(tree.add_reference,
813
reference_revision = tree.get_reference_revision(change[0])
814
entry.reference_revision = reference_revision
815
if (carry_over_possible and
816
parent_entry.reference_revision == reference_revision):
819
self._add_text_to_weave(change[0], '', heads, None)
821
raise AssertionError('unknown kind %r' % kind)
823
entry.revision = modified_rev
825
entry.revision = parent_entry.revision
828
new_path = change[1][1]
829
inv_delta.append((change[1][0], new_path, change[0], entry))
832
self.new_inventory = None
834
# This should perhaps be guarded by a check that the basis we
835
# commit against is the basis for the commit and if not do a delta
837
self._any_changes = True
839
# housekeeping root entry changes do not affect no-change commits.
840
self._require_root_change(tree)
841
self.basis_delta_revision = basis_revision_id
843
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
844
parent_keys = tuple([(file_id, parent) for parent in parents])
845
return self.repository.texts._add_text(
846
(file_id, self._new_revision_id), parent_keys, new_text,
847
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
850
class RootCommitBuilder(CommitBuilder):
851
"""This commitbuilder actually records the root id"""
853
# the root entry gets versioned properly by this builder.
854
_versioned_root = True
856
def _check_root(self, ie, parent_invs, tree):
857
"""Helper for record_entry_contents.
859
:param ie: An entry being added.
860
:param parent_invs: The inventories of the parent revisions of the
862
:param tree: The tree that is being committed.
865
def _require_root_change(self, tree):
866
"""Enforce an appropriate root object change.
868
This is called once when record_iter_changes is called, if and only if
869
the root was not in the delta calculated by record_iter_changes.
871
:param tree: The tree which is being committed.
873
# versioned roots do not change unless the tree found a change.
225
raise NotImplementedError(self.record_iter_changes)
876
228
class RepositoryWriteLockResult(LogicalLockResult):
1031
317
return InterRepository._assert_same_model(self, repository)
1033
def add_inventory(self, revision_id, inv, parents):
1034
"""Add the inventory inv to the repository as revision_id.
1036
:param parents: The revision ids of the parents that revision_id
1037
is known to have and are in the repository already.
1039
:returns: The validator(which is a sha1 digest, though what is sha'd is
1040
repository format specific) of the serialized inventory.
1042
if not self.is_in_write_group():
1043
raise AssertionError("%r not in write group" % (self,))
1044
_mod_revision.check_not_reserved_id(revision_id)
1045
if not (inv.revision_id is None or inv.revision_id == revision_id):
1046
raise AssertionError(
1047
"Mismatch between inventory revision"
1048
" id and insertion revid (%r, %r)"
1049
% (inv.revision_id, revision_id))
1050
if inv.root is None:
1051
raise errors.RootMissing()
1052
return self._add_inventory_checked(revision_id, inv, parents)
1054
def _add_inventory_checked(self, revision_id, inv, parents):
1055
"""Add inv to the repository after checking the inputs.
1057
This function can be overridden to allow different inventory styles.
1059
:seealso: add_inventory, for the contract.
1061
inv_lines = self._serializer.write_inventory_to_lines(inv)
1062
return self._inventory_add_lines(revision_id, parents,
1063
inv_lines, check_content=False)
1065
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1066
parents, basis_inv=None, propagate_caches=False):
1067
"""Add a new inventory expressed as a delta against another revision.
1069
See the inventory developers documentation for the theory behind
1072
:param basis_revision_id: The inventory id the delta was created
1073
against. (This does not have to be a direct parent.)
1074
:param delta: The inventory delta (see Inventory.apply_delta for
1076
:param new_revision_id: The revision id that the inventory is being
1078
:param parents: The revision ids of the parents that revision_id is
1079
known to have and are in the repository already. These are supplied
1080
for repositories that depend on the inventory graph for revision
1081
graph access, as well as for those that pun ancestry with delta
1083
:param basis_inv: The basis inventory if it is already known,
1085
:param propagate_caches: If True, the caches for this inventory are
1086
copied to and updated for the result if possible.
1088
:returns: (validator, new_inv)
1089
The validator(which is a sha1 digest, though what is sha'd is
1090
repository format specific) of the serialized inventory, and the
1091
resulting inventory.
1093
if not self.is_in_write_group():
1094
raise AssertionError("%r not in write group" % (self,))
1095
_mod_revision.check_not_reserved_id(new_revision_id)
1096
basis_tree = self.revision_tree(basis_revision_id)
1097
basis_tree.lock_read()
1099
# Note that this mutates the inventory of basis_tree, which not all
1100
# inventory implementations may support: A better idiom would be to
1101
# return a new inventory, but as there is no revision tree cache in
1102
# repository this is safe for now - RBC 20081013
1103
if basis_inv is None:
1104
basis_inv = basis_tree.inventory
1105
basis_inv.apply_delta(delta)
1106
basis_inv.revision_id = new_revision_id
1107
return (self.add_inventory(new_revision_id, basis_inv, parents),
1112
def _inventory_add_lines(self, revision_id, parents, lines,
1113
check_content=True):
1114
"""Store lines in inv_vf and return the sha1 of the inventory."""
1115
parents = [(parent,) for parent in parents]
1116
result = self.inventories.add_lines((revision_id,), parents, lines,
1117
check_content=check_content)[0]
1118
self.inventories._access.flush()
1121
def add_revision(self, revision_id, rev, inv=None, config=None):
1122
"""Add rev to the revision store as revision_id.
1124
:param revision_id: the revision id to use.
1125
:param rev: The revision object.
1126
:param inv: The inventory for the revision. if None, it will be looked
1127
up in the inventory storer
1128
:param config: If None no digital signature will be created.
1129
If supplied its signature_needed method will be used
1130
to determine if a signature should be made.
1132
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1134
_mod_revision.check_not_reserved_id(revision_id)
1135
if config is not None and config.signature_needed():
1137
inv = self.get_inventory(revision_id)
1138
plaintext = Testament(rev, inv).as_short_text()
1139
self.store_revision_signature(
1140
gpg.GPGStrategy(config), plaintext, revision_id)
1141
# check inventory present
1142
if not self.inventories.get_parent_map([(revision_id,)]):
1144
raise errors.WeaveRevisionNotPresent(revision_id,
1147
# yes, this is not suitable for adding with ghosts.
1148
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1151
key = (revision_id,)
1152
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1153
self._add_revision(rev)
1155
def _add_revision(self, revision):
1156
text = self._serializer.write_revision_to_string(revision)
1157
key = (revision.revision_id,)
1158
parents = tuple((parent,) for parent in revision.parent_ids)
1159
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1161
319
def all_revision_ids(self):
1162
320
"""Returns a list of all the revision ids in the repository.
1187
345
self.control_files.break_lock()
1190
def _eliminate_revisions_not_present(self, revision_ids):
1191
"""Check every revision id in revision_ids to see if we have it.
1193
Returns a set of the present revisions.
1196
graph = self.get_graph()
1197
parent_map = graph.get_parent_map(revision_ids)
1198
# The old API returned a list, should this actually be a set?
1199
return parent_map.keys()
1201
def _check_inventories(self, checker):
1202
"""Check the inventories found from the revision scan.
1204
This is responsible for verifying the sha1 of inventories and
1205
creating a pending_keys set that covers data referenced by inventories.
1207
bar = ui.ui_factory.nested_progress_bar()
1209
self._do_check_inventories(checker, bar)
1213
def _do_check_inventories(self, checker, bar):
1214
"""Helper for _check_inventories."""
1216
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1217
kinds = ['chk_bytes', 'texts']
1218
count = len(checker.pending_keys)
1219
bar.update("inventories", 0, 2)
1220
current_keys = checker.pending_keys
1221
checker.pending_keys = {}
1222
# Accumulate current checks.
1223
for key in current_keys:
1224
if key[0] != 'inventories' and key[0] not in kinds:
1225
checker._report_items.append('unknown key type %r' % (key,))
1226
keys[key[0]].add(key[1:])
1227
if keys['inventories']:
1228
# NB: output order *should* be roughly sorted - topo or
1229
# inverse topo depending on repository - either way decent
1230
# to just delta against. However, pre-CHK formats didn't
1231
# try to optimise inventory layout on disk. As such the
1232
# pre-CHK code path does not use inventory deltas.
1234
for record in self.inventories.check(keys=keys['inventories']):
1235
if record.storage_kind == 'absent':
1236
checker._report_items.append(
1237
'Missing inventory {%s}' % (record.key,))
1239
last_object = self._check_record('inventories', record,
1240
checker, last_object,
1241
current_keys[('inventories',) + record.key])
1242
del keys['inventories']
1245
bar.update("texts", 1)
1246
while (checker.pending_keys or keys['chk_bytes']
1248
# Something to check.
1249
current_keys = checker.pending_keys
1250
checker.pending_keys = {}
1251
# Accumulate current checks.
1252
for key in current_keys:
1253
if key[0] not in kinds:
1254
checker._report_items.append('unknown key type %r' % (key,))
1255
keys[key[0]].add(key[1:])
1256
# Check the outermost kind only - inventories || chk_bytes || texts
1260
for record in getattr(self, kind).check(keys=keys[kind]):
1261
if record.storage_kind == 'absent':
1262
checker._report_items.append(
1263
'Missing %s {%s}' % (kind, record.key,))
1265
last_object = self._check_record(kind, record,
1266
checker, last_object, current_keys[(kind,) + record.key])
1270
def _check_record(self, kind, record, checker, last_object, item_data):
1271
"""Check a single text from this repository."""
1272
if kind == 'inventories':
1273
rev_id = record.key[0]
1274
inv = self._deserialise_inventory(rev_id,
1275
record.get_bytes_as('fulltext'))
1276
if last_object is not None:
1277
delta = inv._make_delta(last_object)
1278
for old_path, path, file_id, ie in delta:
1281
ie.check(checker, rev_id, inv)
1283
for path, ie in inv.iter_entries():
1284
ie.check(checker, rev_id, inv)
1285
if self._format.fast_deltas:
1287
elif kind == 'chk_bytes':
1288
# No code written to check chk_bytes for this repo format.
1289
checker._report_items.append(
1290
'unsupported key type chk_bytes for %s' % (record.key,))
1291
elif kind == 'texts':
1292
self._check_text(record, checker, item_data)
1294
checker._report_items.append(
1295
'unknown key type %s for %s' % (kind, record.key))
1297
def _check_text(self, record, checker, item_data):
1298
"""Check a single text."""
1299
# Check it is extractable.
1300
# TODO: check length.
1301
if record.storage_kind == 'chunked':
1302
chunks = record.get_bytes_as(record.storage_kind)
1303
sha1 = osutils.sha_strings(chunks)
1304
length = sum(map(len, chunks))
1306
content = record.get_bytes_as('fulltext')
1307
sha1 = osutils.sha_string(content)
1308
length = len(content)
1309
if item_data and sha1 != item_data[1]:
1310
checker._report_items.append(
1311
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1312
(record.key, sha1, item_data[1], item_data[2]))
1315
def create(a_bzrdir):
1316
"""Construct the current default format repository in a_bzrdir."""
1317
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
348
def create(controldir):
349
"""Construct the current default format repository in controldir."""
350
return RepositoryFormat.get_default_format().initialize(controldir)
1319
def __init__(self, _format, a_bzrdir, control_files):
352
def __init__(self, _format, controldir, control_files):
1320
353
"""instantiate a Repository.
1322
355
:param _format: The format of the repository on disk.
1323
:param a_bzrdir: The BzrDir of the repository.
356
:param controldir: The ControlDir of the repository.
357
:param control_files: Control files to use for locking, etc.
1325
359
# In the future we will have a single api for all stores for
1326
360
# getting file texts, inventories and revisions, then
1999
907
signature = gpg_strategy.sign(plaintext)
2000
908
self.add_signature_text(revision_id, signature)
2003
910
def add_signature_text(self, revision_id, signature):
2004
self.signatures.add_lines((revision_id,), (),
2005
osutils.split_lines(signature))
2007
def find_text_key_references(self):
2008
"""Find the text key references within the repository.
2010
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2011
to whether they were referred to by the inventory of the
2012
revision_id that they contain. The inventory texts from all present
2013
revision ids are assessed to generate this report.
2015
revision_keys = self.revisions.keys()
2016
w = self.inventories
2017
pb = ui.ui_factory.nested_progress_bar()
2019
return self._find_text_key_references_from_xml_inventory_lines(
2020
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
2024
def _find_text_key_references_from_xml_inventory_lines(self,
2026
"""Core routine for extracting references to texts from inventories.
2028
This performs the translation of xml lines to revision ids.
2030
:param line_iterator: An iterator of lines, origin_version_id
2031
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2032
to whether they were referred to by the inventory of the
2033
revision_id that they contain. Note that if that revision_id was
2034
not part of the line_iterator's output then False will be given -
2035
even though it may actually refer to that key.
2037
if not self._serializer.support_altered_by_hack:
2038
raise AssertionError(
2039
"_find_text_key_references_from_xml_inventory_lines only "
2040
"supported for branches which store inventory as unnested xml"
2041
", not on %r" % self)
2044
# this code needs to read every new line in every inventory for the
2045
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
2046
# not present in one of those inventories is unnecessary but not
2047
# harmful because we are filtering by the revision id marker in the
2048
# inventory lines : we only select file ids altered in one of those
2049
# revisions. We don't need to see all lines in the inventory because
2050
# only those added in an inventory in rev X can contain a revision=X
2052
unescape_revid_cache = {}
2053
unescape_fileid_cache = {}
2055
# jam 20061218 In a big fetch, this handles hundreds of thousands
2056
# of lines, so it has had a lot of inlining and optimizing done.
2057
# Sorry that it is a little bit messy.
2058
# Move several functions to be local variables, since this is a long
2060
search = self._file_ids_altered_regex.search
2061
unescape = _unescape_xml
2062
setdefault = result.setdefault
2063
for line, line_key in line_iterator:
2064
match = search(line)
2067
# One call to match.group() returning multiple items is quite a
2068
# bit faster than 2 calls to match.group() each returning 1
2069
file_id, revision_id = match.group('file_id', 'revision_id')
2071
# Inlining the cache lookups helps a lot when you make 170,000
2072
# lines and 350k ids, versus 8.4 unique ids.
2073
# Using a cache helps in 2 ways:
2074
# 1) Avoids unnecessary decoding calls
2075
# 2) Re-uses cached strings, which helps in future set and
2077
# (2) is enough that removing encoding entirely along with
2078
# the cache (so we are using plain strings) results in no
2079
# performance improvement.
2081
revision_id = unescape_revid_cache[revision_id]
2083
unescaped = unescape(revision_id)
2084
unescape_revid_cache[revision_id] = unescaped
2085
revision_id = unescaped
2087
# Note that unconditionally unescaping means that we deserialise
2088
# every fileid, which for general 'pull' is not great, but we don't
2089
# really want to have some many fulltexts that this matters anyway.
2092
file_id = unescape_fileid_cache[file_id]
2094
unescaped = unescape(file_id)
2095
unescape_fileid_cache[file_id] = unescaped
2098
key = (file_id, revision_id)
2099
setdefault(key, False)
2100
if revision_id == line_key[-1]:
2104
def _inventory_xml_lines_for_keys(self, keys):
2105
"""Get a line iterator of the sort needed for findind references.
2107
Not relevant for non-xml inventory repositories.
2109
Ghosts in revision_keys are ignored.
2111
:param revision_keys: The revision keys for the inventories to inspect.
2112
:return: An iterator over (inventory line, revid) for the fulltexts of
2113
all of the xml inventories specified by revision_keys.
2115
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2116
for record in stream:
2117
if record.storage_kind != 'absent':
2118
chunks = record.get_bytes_as('chunked')
2119
revid = record.key[-1]
2120
lines = osutils.chunks_to_lines(chunks)
2124
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2126
"""Helper routine for fileids_altered_by_revision_ids.
2128
This performs the translation of xml lines to revision ids.
2130
:param line_iterator: An iterator of lines, origin_version_id
2131
:param revision_keys: The revision ids to filter for. This should be a
2132
set or other type which supports efficient __contains__ lookups, as
2133
the revision key from each parsed line will be looked up in the
2134
revision_keys filter.
2135
:return: a dictionary mapping altered file-ids to an iterable of
2136
revision_ids. Each altered file-ids has the exact revision_ids that
2137
altered it listed explicitly.
2139
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2140
line_iterator).iterkeys())
2141
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2142
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2143
self._inventory_xml_lines_for_keys(parent_keys)))
2144
new_keys = seen - parent_seen
2146
setdefault = result.setdefault
2147
for key in new_keys:
2148
setdefault(key[0], set()).add(key[-1])
911
"""Store a signature text for a revision.
913
:param revision_id: Revision id of the revision
914
:param signature: Signature text.
916
raise NotImplementedError(self.add_signature_text)
2151
918
def _find_parent_ids_of_revisions(self, revision_ids):
2152
919
"""Find all parent ids that are mentioned in the revision graph.
2203
939
uniquely identify the file version in the caller's context. (Examples:
2204
940
an index number or a TreeTransform trans_id.)
2206
bytes_iterator is an iterable of bytestrings for the file. The
2207
kind of iterable and length of the bytestrings are unspecified, but for
2208
this implementation, it is a list of bytes produced by
2209
VersionedFile.get_record_stream().
2211
942
:param desired_files: a list of (file_id, revision_id, identifier)
2215
for file_id, revision_id, callable_data in desired_files:
2216
text_keys[(file_id, revision_id)] = callable_data
2217
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2218
if record.storage_kind == 'absent':
2219
raise errors.RevisionNotPresent(record.key, self)
2220
yield text_keys[record.key], record.get_bytes_as('chunked')
2222
def _generate_text_key_index(self, text_key_references=None,
2224
"""Generate a new text key index for the repository.
2226
This is an expensive function that will take considerable time to run.
2228
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2229
list of parents, also text keys. When a given key has no parents,
2230
the parents list will be [NULL_REVISION].
2232
# All revisions, to find inventory parents.
2233
if ancestors is None:
2234
graph = self.get_graph()
2235
ancestors = graph.get_parent_map(self.all_revision_ids())
2236
if text_key_references is None:
2237
text_key_references = self.find_text_key_references()
2238
pb = ui.ui_factory.nested_progress_bar()
2240
return self._do_generate_text_key_index(ancestors,
2241
text_key_references, pb)
2245
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2246
"""Helper for _generate_text_key_index to avoid deep nesting."""
2247
revision_order = tsort.topo_sort(ancestors)
2248
invalid_keys = set()
2250
for revision_id in revision_order:
2251
revision_keys[revision_id] = set()
2252
text_count = len(text_key_references)
2253
# a cache of the text keys to allow reuse; costs a dict of all the
2254
# keys, but saves a 2-tuple for every child of a given key.
2256
for text_key, valid in text_key_references.iteritems():
2258
invalid_keys.add(text_key)
2260
revision_keys[text_key[1]].add(text_key)
2261
text_key_cache[text_key] = text_key
2262
del text_key_references
2264
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2265
NULL_REVISION = _mod_revision.NULL_REVISION
2266
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2267
# too small for large or very branchy trees. However, for 55K path
2268
# trees, it would be easy to use too much memory trivially. Ideally we
2269
# could gauge this by looking at available real memory etc, but this is
2270
# always a tricky proposition.
2271
inventory_cache = lru_cache.LRUCache(10)
2272
batch_size = 10 # should be ~150MB on a 55K path tree
2273
batch_count = len(revision_order) / batch_size + 1
2275
pb.update("Calculating text parents", processed_texts, text_count)
2276
for offset in xrange(batch_count):
2277
to_query = revision_order[offset * batch_size:(offset + 1) *
2281
for revision_id in to_query:
2282
parent_ids = ancestors[revision_id]
2283
for text_key in revision_keys[revision_id]:
2284
pb.update("Calculating text parents", processed_texts)
2285
processed_texts += 1
2286
candidate_parents = []
2287
for parent_id in parent_ids:
2288
parent_text_key = (text_key[0], parent_id)
2290
check_parent = parent_text_key not in \
2291
revision_keys[parent_id]
2293
# the parent parent_id is a ghost:
2294
check_parent = False
2295
# truncate the derived graph against this ghost.
2296
parent_text_key = None
2298
# look at the parent commit details inventories to
2299
# determine possible candidates in the per file graph.
2302
inv = inventory_cache[parent_id]
2304
inv = self.revision_tree(parent_id).inventory
2305
inventory_cache[parent_id] = inv
2307
parent_entry = inv[text_key[0]]
2308
except (KeyError, errors.NoSuchId):
2310
if parent_entry is not None:
2312
text_key[0], parent_entry.revision)
2314
parent_text_key = None
2315
if parent_text_key is not None:
2316
candidate_parents.append(
2317
text_key_cache[parent_text_key])
2318
parent_heads = text_graph.heads(candidate_parents)
2319
new_parents = list(parent_heads)
2320
new_parents.sort(key=lambda x:candidate_parents.index(x))
2321
if new_parents == []:
2322
new_parents = [NULL_REVISION]
2323
text_index[text_key] = new_parents
2325
for text_key in invalid_keys:
2326
text_index[text_key] = [NULL_REVISION]
2329
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2330
"""Get an iterable listing the keys of all the data introduced by a set
2333
The keys will be ordered so that the corresponding items can be safely
2334
fetched and inserted in that order.
2336
:returns: An iterable producing tuples of (knit-kind, file-id,
2337
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2338
'revisions'. file-id is None unless knit-kind is 'file'.
2340
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2343
for result in self._find_non_file_keys_to_fetch(revision_ids):
2346
def _find_file_keys_to_fetch(self, revision_ids, pb):
2347
# XXX: it's a bit weird to control the inventory weave caching in this
2348
# generator. Ideally the caching would be done in fetch.py I think. Or
2349
# maybe this generator should explicitly have the contract that it
2350
# should not be iterated until the previously yielded item has been
2352
inv_w = self.inventories
2354
# file ids that changed
2355
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2357
num_file_ids = len(file_ids)
2358
for file_id, altered_versions in file_ids.iteritems():
2360
pb.update("Fetch texts", count, num_file_ids)
2362
yield ("file", file_id, altered_versions)
2364
def _find_non_file_keys_to_fetch(self, revision_ids):
2366
yield ("inventory", None, revision_ids)
2369
# XXX: Note ATM no callers actually pay attention to this return
2370
# instead they just use the list of revision ids and ignore
2371
# missing sigs. Consider removing this work entirely
2372
revisions_with_signatures = set(self.signatures.get_parent_map(
2373
[(r,) for r in revision_ids]))
2374
revisions_with_signatures = set(
2375
[r for (r,) in revisions_with_signatures])
2376
revisions_with_signatures.intersection_update(revision_ids)
2377
yield ("signatures", None, revisions_with_signatures)
2380
yield ("revisions", None, revision_ids)
2383
def get_inventory(self, revision_id):
2384
"""Get Inventory object by revision id."""
2385
return self.iter_inventories([revision_id]).next()
2387
def iter_inventories(self, revision_ids, ordering=None):
2388
"""Get many inventories by revision_ids.
2390
This will buffer some or all of the texts used in constructing the
2391
inventories in memory, but will only parse a single inventory at a
2394
:param revision_ids: The expected revision ids of the inventories.
2395
:param ordering: optional ordering, e.g. 'topological'. If not
2396
specified, the order of revision_ids will be preserved (by
2397
buffering if necessary).
2398
:return: An iterator of inventories.
2400
if ((None in revision_ids)
2401
or (_mod_revision.NULL_REVISION in revision_ids)):
2402
raise ValueError('cannot get null revision inventory')
2403
return self._iter_inventories(revision_ids, ordering)
2405
def _iter_inventories(self, revision_ids, ordering):
2406
"""single-document based inventory iteration."""
2407
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2408
for text, revision_id in inv_xmls:
2409
yield self._deserialise_inventory(revision_id, text)
2411
def _iter_inventory_xmls(self, revision_ids, ordering):
2412
if ordering is None:
2413
order_as_requested = True
2414
ordering = 'unordered'
2416
order_as_requested = False
2417
keys = [(revision_id,) for revision_id in revision_ids]
2420
if order_as_requested:
2421
key_iter = iter(keys)
2422
next_key = key_iter.next()
2423
stream = self.inventories.get_record_stream(keys, ordering, True)
2425
for record in stream:
2426
if record.storage_kind != 'absent':
2427
chunks = record.get_bytes_as('chunked')
2428
if order_as_requested:
2429
text_chunks[record.key] = chunks
2431
yield ''.join(chunks), record.key[-1]
2433
raise errors.NoSuchRevision(self, record.key)
2434
if order_as_requested:
2435
# Yield as many results as we can while preserving order.
2436
while next_key in text_chunks:
2437
chunks = text_chunks.pop(next_key)
2438
yield ''.join(chunks), next_key[-1]
2440
next_key = key_iter.next()
2441
except StopIteration:
2442
# We still want to fully consume the get_record_stream,
2443
# just in case it is not actually finished at this point
2447
def _deserialise_inventory(self, revision_id, xml):
2448
"""Transform the xml into an inventory object.
2450
:param revision_id: The expected revision id of the inventory.
2451
:param xml: A serialised inventory.
2453
result = self._serializer.read_inventory_from_string(xml, revision_id,
2454
entry_cache=self._inventory_entry_cache,
2455
return_from_cache=self._safe_to_return_from_cache)
2456
if result.revision_id != revision_id:
2457
raise AssertionError('revision id mismatch %s != %s' % (
2458
result.revision_id, revision_id))
2461
def get_serializer_format(self):
2462
return self._serializer.format_num
2465
def _get_inventory_xml(self, revision_id):
2466
"""Get serialized inventory as a string."""
2467
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2469
text, revision_id = texts.next()
2470
except StopIteration:
2471
raise errors.HistoryMissing(self, 'inventory', revision_id)
945
raise NotImplementedError(self.iter_files_bytes)
2474
947
def get_rev_id_for_revno(self, revno, known_pair):
2475
948
"""Return the revision id of a revno, given a later (revno, revid)
2808
1252
except UnicodeDecodeError:
2809
1253
raise errors.NonAsciiRevisionId(method, self)
2811
def revision_graph_can_have_wrong_parents(self):
2812
"""Is it possible for this repository to have a revision graph with
2815
If True, then this repository must also implement
2816
_find_inconsistent_revision_parents so that check and reconcile can
2817
check for inconsistencies before proceeding with other checks that may
2818
depend on the revision index being consistent.
2820
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2823
# remove these delegates a while after bzr 0.15
2824
def __make_delegated(name, from_module):
2825
def _deprecated_repository_forwarder():
2826
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2827
% (name, from_module),
2831
return pyutils.get_named_object(from_module, name)
2832
except AttributeError:
2833
raise AttributeError('module %s has no name %s'
2834
% (sys.modules[from_module], name))
2835
globals()[name] = _deprecated_repository_forwarder
2838
'AllInOneRepository',
2839
'WeaveMetaDirRepository',
2840
'PreSplitOutRepositoryFormat',
2841
'RepositoryFormat4',
2842
'RepositoryFormat5',
2843
'RepositoryFormat6',
2844
'RepositoryFormat7',
2846
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2850
'RepositoryFormatKnit',
2851
'RepositoryFormatKnit1',
2853
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2856
def install_revision(repository, rev, revision_tree):
2857
"""Install all revision data into a repository."""
2858
install_revisions(repository, [(rev, revision_tree, None)])
2861
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2862
"""Install all revision data into a repository.
2864
Accepts an iterable of revision, tree, signature tuples. The signature
2867
repository.start_write_group()
2869
inventory_cache = lru_cache.LRUCache(10)
2870
for n, (revision, revision_tree, signature) in enumerate(iterable):
2871
_install_revision(repository, revision, revision_tree, signature,
2874
pb.update('Transferring revisions', n + 1, num_revisions)
2876
repository.abort_write_group()
2879
repository.commit_write_group()
2882
def _install_revision(repository, rev, revision_tree, signature,
2884
"""Install all revision data into a repository."""
2885
present_parents = []
2887
for p_id in rev.parent_ids:
2888
if repository.has_revision(p_id):
2889
present_parents.append(p_id)
2890
parent_trees[p_id] = repository.revision_tree(p_id)
2892
parent_trees[p_id] = repository.revision_tree(
2893
_mod_revision.NULL_REVISION)
2895
inv = revision_tree.inventory
2896
entries = inv.iter_entries()
2897
# backwards compatibility hack: skip the root id.
2898
if not repository.supports_rich_root():
2899
path, root = entries.next()
2900
if root.revision != rev.revision_id:
2901
raise errors.IncompatibleRevision(repr(repository))
2903
for path, ie in entries:
2904
text_keys[(ie.file_id, ie.revision)] = ie
2905
text_parent_map = repository.texts.get_parent_map(text_keys)
2906
missing_texts = set(text_keys) - set(text_parent_map)
2907
# Add the texts that are not already present
2908
for text_key in missing_texts:
2909
ie = text_keys[text_key]
2911
# FIXME: TODO: The following loop overlaps/duplicates that done by
2912
# commit to determine parents. There is a latent/real bug here where
2913
# the parents inserted are not those commit would do - in particular
2914
# they are not filtered by heads(). RBC, AB
2915
for revision, tree in parent_trees.iteritems():
2916
if ie.file_id not in tree:
2918
parent_id = tree.inventory[ie.file_id].revision
2919
if parent_id in text_parents:
2921
text_parents.append((ie.file_id, parent_id))
2922
lines = revision_tree.get_file(ie.file_id).readlines()
2923
repository.texts.add_lines(text_key, text_parents, lines)
2925
# install the inventory
2926
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2927
# Cache this inventory
2928
inventory_cache[rev.revision_id] = inv
2930
basis_inv = inventory_cache[rev.parent_ids[0]]
2932
repository.add_inventory(rev.revision_id, inv, present_parents)
2934
delta = inv._make_delta(basis_inv)
2935
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2936
rev.revision_id, present_parents)
2938
repository.add_inventory(rev.revision_id, inv, present_parents)
2939
except errors.RevisionAlreadyPresent:
2941
if signature is not None:
2942
repository.add_signature_text(rev.revision_id, signature)
2943
repository.add_revision(rev.revision_id, rev, inv)
2946
1256
class MetaDirRepository(Repository):
2947
1257
"""Repositories in the new meta-dir layout.
3312
1587
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3313
1588
format_registry.register_lazy(
3314
1589
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3315
'bzrlib.repofmt.pack_repo',
1590
'bzrlib.repofmt.knitpack_repo',
3316
1591
'RepositoryFormatKnitPack1',
3318
1593
format_registry.register_lazy(
3319
1594
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3320
'bzrlib.repofmt.pack_repo',
1595
'bzrlib.repofmt.knitpack_repo',
3321
1596
'RepositoryFormatKnitPack3',
3323
1598
format_registry.register_lazy(
3324
1599
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3325
'bzrlib.repofmt.pack_repo',
1600
'bzrlib.repofmt.knitpack_repo',
3326
1601
'RepositoryFormatKnitPack4',
3328
1603
format_registry.register_lazy(
3329
1604
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3330
'bzrlib.repofmt.pack_repo',
1605
'bzrlib.repofmt.knitpack_repo',
3331
1606
'RepositoryFormatKnitPack5',
3333
1608
format_registry.register_lazy(
3334
1609
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3335
'bzrlib.repofmt.pack_repo',
1610
'bzrlib.repofmt.knitpack_repo',
3336
1611
'RepositoryFormatKnitPack5RichRoot',
3338
1613
format_registry.register_lazy(
3339
1614
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3340
'bzrlib.repofmt.pack_repo',
1615
'bzrlib.repofmt.knitpack_repo',
3341
1616
'RepositoryFormatKnitPack5RichRootBroken',
3343
1618
format_registry.register_lazy(
3344
1619
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3345
'bzrlib.repofmt.pack_repo',
1620
'bzrlib.repofmt.knitpack_repo',
3346
1621
'RepositoryFormatKnitPack6',
3348
1623
format_registry.register_lazy(
3349
1624
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3350
'bzrlib.repofmt.pack_repo',
1625
'bzrlib.repofmt.knitpack_repo',
3351
1626
'RepositoryFormatKnitPack6RichRoot',
1628
format_registry.register_lazy(
1629
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1630
'bzrlib.repofmt.groupcompress_repo',
1631
'RepositoryFormat2a',
3354
1634
# Development formats.
3355
# Obsolete but kept pending a CHK based subtree format.
1635
# Check their docstrings to see if/when they are obsolete.
3356
1636
format_registry.register_lazy(
3357
1637
("Bazaar development format 2 with subtree support "
3358
1638
"(needs bzr.dev from before 1.8)\n"),
3359
'bzrlib.repofmt.pack_repo',
1639
'bzrlib.repofmt.knitpack_repo',
3360
1640
'RepositoryFormatPackDevelopment2Subtree',
3363
# 1.14->1.16 go below here
3364
format_registry.register_lazy(
3365
'Bazaar development format - group compression and chk inventory'
3366
' (needs bzr.dev from 1.14)\n',
3367
'bzrlib.repofmt.groupcompress_repo',
3368
'RepositoryFormatCHK1',
3371
format_registry.register_lazy(
3372
'Bazaar development format - chk repository with bencode revision '
3373
'serialization (needs bzr.dev from 1.16)\n',
3374
'bzrlib.repofmt.groupcompress_repo',
3375
'RepositoryFormatCHK2',
3377
format_registry.register_lazy(
3378
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3379
'bzrlib.repofmt.groupcompress_repo',
3380
'RepositoryFormat2a',
3382
1642
format_registry.register_lazy(
3383
1643
'Bazaar development format 8\n',
3384
1644
'bzrlib.repofmt.groupcompress_repo',
3419
1678
self.target.fetch(self.source, revision_id=revision_id)
3421
1680
@needs_write_lock
3422
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
1681
def fetch(self, revision_id=None, find_ghosts=False):
3424
1682
"""Fetch the content required to construct revision_id.
3426
1684
The content is copied from self.source to self.target.
3428
1686
:param revision_id: if None all content is copied, if NULL_REVISION no
3429
1687
content is copied.
3433
ui.ui_factory.warn_experimental_format_fetch(self)
3434
from bzrlib.fetch import RepoFetcher
3435
# See <https://launchpad.net/bugs/456077> asking for a warning here
3436
if self.source._format.network_name() != self.target._format.network_name():
3437
ui.ui_factory.show_user_warning('cross_format_fetch',
3438
from_format=self.source._format,
3439
to_format=self.target._format)
3440
f = RepoFetcher(to_repository=self.target,
3441
from_repository=self.source,
3442
last_revision=revision_id,
3443
fetch_spec=fetch_spec,
3444
find_ghosts=find_ghosts)
3446
def _walk_to_common_revisions(self, revision_ids):
3447
"""Walk out from revision_ids in source to revisions target has.
3449
:param revision_ids: The start point for the search.
3450
:return: A set of revision ids.
3452
target_graph = self.target.get_graph()
3453
revision_ids = frozenset(revision_ids)
3454
missing_revs = set()
3455
source_graph = self.source.get_graph()
3456
# ensure we don't pay silly lookup costs.
3457
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3458
null_set = frozenset([_mod_revision.NULL_REVISION])
3459
searcher_exhausted = False
3463
# Iterate the searcher until we have enough next_revs
3464
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3466
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3467
next_revs.update(next_revs_part)
3468
ghosts.update(ghosts_part)
3469
except StopIteration:
3470
searcher_exhausted = True
3472
# If there are ghosts in the source graph, and the caller asked for
3473
# them, make sure that they are present in the target.
3474
# We don't care about other ghosts as we can't fetch them and
3475
# haven't been asked to.
3476
ghosts_to_check = set(revision_ids.intersection(ghosts))
3477
revs_to_get = set(next_revs).union(ghosts_to_check)
3479
have_revs = set(target_graph.get_parent_map(revs_to_get))
3480
# we always have NULL_REVISION present.
3481
have_revs = have_revs.union(null_set)
3482
# Check if the target is missing any ghosts we need.
3483
ghosts_to_check.difference_update(have_revs)
3485
# One of the caller's revision_ids is a ghost in both the
3486
# source and the target.
3487
raise errors.NoSuchRevision(
3488
self.source, ghosts_to_check.pop())
3489
missing_revs.update(next_revs - have_revs)
3490
# Because we may have walked past the original stop point, make
3491
# sure everything is stopped
3492
stop_revs = searcher.find_seen_ancestors(have_revs)
3493
searcher.stop_searching_any(stop_revs)
3494
if searcher_exhausted:
3496
return searcher.get_result()
1690
raise NotImplementedError(self.fetch)
3498
1692
@needs_read_lock
3499
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
1693
def search_missing_revision_ids(self,
1694
revision_id=symbol_versioning.DEPRECATED_PARAMETER,
1695
find_ghosts=True, revision_ids=None, if_present_ids=None,
3500
1697
"""Return the revision ids that source has that target does not.
3502
1699
:param revision_id: only return revision ids included by this
1701
:param revision_ids: return revision ids included by these
1702
revision_ids. NoSuchRevision will be raised if any of these
1703
revisions are not present.
1704
:param if_present_ids: like revision_ids, but will not cause
1705
NoSuchRevision if any of these are absent, instead they will simply
1706
not be in the result. This is useful for e.g. finding revisions
1707
to fetch for tags, which may reference absent revisions.
3504
1708
:param find_ghosts: If True find missing revisions in deep history
3505
1709
rather than just finding the surface difference.
1710
:param limit: Maximum number of revisions to return, topologically
3506
1712
:return: A bzrlib.graph.SearchResult.
3508
# stop searching at found target revisions.
3509
if not find_ghosts and revision_id is not None:
3510
return self._walk_to_common_revisions([revision_id])
3511
# generic, possibly worst case, slow code path.
3512
target_ids = set(self.target.all_revision_ids())
3513
if revision_id is not None:
3514
source_ids = self.source.get_ancestry(revision_id)
3515
if source_ids[0] is not None:
3516
raise AssertionError()
3519
source_ids = self.source.all_revision_ids()
3520
result_set = set(source_ids).difference(target_ids)
3521
return self.source.revision_ids_to_search_result(result_set)
1714
raise NotImplementedError(self.search_missing_revision_ids)
3524
1717
def _same_model(source, target):
3545
1738
"different serializers")
3548
class InterSameDataRepository(InterRepository):
3549
"""Code for converting between repositories that represent the same data.
3551
Data format and model must match for this to work.
3555
def _get_repo_format_to_test(self):
3556
"""Repository format for testing with.
3558
InterSameData can pull from subtree to subtree and from non-subtree to
3559
non-subtree, so we test this with the richest repository format.
3561
from bzrlib.repofmt import knitrepo
3562
return knitrepo.RepositoryFormatKnit3()
3565
def is_compatible(source, target):
3566
return InterRepository._same_model(source, target)
3569
class InterWeaveRepo(InterSameDataRepository):
3570
"""Optimised code paths between Weave based repositories.
3572
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3573
implemented lazy inter-object optimisation.
3577
def _get_repo_format_to_test(self):
3578
from bzrlib.repofmt import weaverepo
3579
return weaverepo.RepositoryFormat7()
3582
def is_compatible(source, target):
3583
"""Be compatible with known Weave formats.
3585
We don't test for the stores being of specific types because that
3586
could lead to confusing results, and there is no need to be
3589
from bzrlib.repofmt.weaverepo import (
3595
return (isinstance(source._format, (RepositoryFormat5,
3597
RepositoryFormat7)) and
3598
isinstance(target._format, (RepositoryFormat5,
3600
RepositoryFormat7)))
3601
except AttributeError:
3605
def copy_content(self, revision_id=None):
3606
"""See InterRepository.copy_content()."""
3607
# weave specific optimised path:
3609
self.target.set_make_working_trees(self.source.make_working_trees())
3610
except (errors.RepositoryUpgradeRequired, NotImplemented):
3612
# FIXME do not peek!
3613
if self.source._transport.listable():
3614
pb = ui.ui_factory.nested_progress_bar()
3616
self.target.texts.insert_record_stream(
3617
self.source.texts.get_record_stream(
3618
self.source.texts.keys(), 'topological', False))
3619
pb.update('Copying inventory', 0, 1)
3620
self.target.inventories.insert_record_stream(
3621
self.source.inventories.get_record_stream(
3622
self.source.inventories.keys(), 'topological', False))
3623
self.target.signatures.insert_record_stream(
3624
self.source.signatures.get_record_stream(
3625
self.source.signatures.keys(),
3627
self.target.revisions.insert_record_stream(
3628
self.source.revisions.get_record_stream(
3629
self.source.revisions.keys(),
3630
'topological', True))
3634
self.target.fetch(self.source, revision_id=revision_id)
3637
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3638
"""See InterRepository.missing_revision_ids()."""
3639
# we want all revisions to satisfy revision_id in source.
3640
# but we don't want to stat every file here and there.
3641
# we want then, all revisions other needs to satisfy revision_id
3642
# checked, but not those that we have locally.
3643
# so the first thing is to get a subset of the revisions to
3644
# satisfy revision_id in source, and then eliminate those that
3645
# we do already have.
3646
# this is slow on high latency connection to self, but as this
3647
# disk format scales terribly for push anyway due to rewriting
3648
# inventory.weave, this is considered acceptable.
3650
if revision_id is not None:
3651
source_ids = self.source.get_ancestry(revision_id)
3652
if source_ids[0] is not None:
3653
raise AssertionError()
3656
source_ids = self.source._all_possible_ids()
3657
source_ids_set = set(source_ids)
3658
# source_ids is the worst possible case we may need to pull.
3659
# now we want to filter source_ids against what we actually
3660
# have in target, but don't try to check for existence where we know
3661
# we do not have a revision as that would be pointless.
3662
target_ids = set(self.target._all_possible_ids())
3663
possibly_present_revisions = target_ids.intersection(source_ids_set)
3664
actually_present_revisions = set(
3665
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3666
required_revisions = source_ids_set.difference(actually_present_revisions)
3667
if revision_id is not None:
3668
# we used get_ancestry to determine source_ids then we are assured all
3669
# revisions referenced are present as they are installed in topological order.
3670
# and the tip revision was validated by get_ancestry.
3671
result_set = required_revisions
3673
# if we just grabbed the possibly available ids, then
3674
# we only have an estimate of whats available and need to validate
3675
# that against the revision records.
3677
self.source._eliminate_revisions_not_present(required_revisions))
3678
return self.source.revision_ids_to_search_result(result_set)
3681
class InterKnitRepo(InterSameDataRepository):
3682
"""Optimised code paths between Knit based repositories."""
3685
def _get_repo_format_to_test(self):
3686
from bzrlib.repofmt import knitrepo
3687
return knitrepo.RepositoryFormatKnit1()
3690
def is_compatible(source, target):
3691
"""Be compatible with known Knit formats.
3693
We don't test for the stores being of specific types because that
3694
could lead to confusing results, and there is no need to be
3697
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3699
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3700
isinstance(target._format, RepositoryFormatKnit))
3701
except AttributeError:
3703
return are_knits and InterRepository._same_model(source, target)
3706
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3707
"""See InterRepository.missing_revision_ids()."""
3708
if revision_id is not None:
3709
source_ids = self.source.get_ancestry(revision_id)
3710
if source_ids[0] is not None:
3711
raise AssertionError()
3714
source_ids = self.source.all_revision_ids()
3715
source_ids_set = set(source_ids)
3716
# source_ids is the worst possible case we may need to pull.
3717
# now we want to filter source_ids against what we actually
3718
# have in target, but don't try to check for existence where we know
3719
# we do not have a revision as that would be pointless.
3720
target_ids = set(self.target.all_revision_ids())
3721
possibly_present_revisions = target_ids.intersection(source_ids_set)
3722
actually_present_revisions = set(
3723
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3724
required_revisions = source_ids_set.difference(actually_present_revisions)
3725
if revision_id is not None:
3726
# we used get_ancestry to determine source_ids then we are assured all
3727
# revisions referenced are present as they are installed in topological order.
3728
# and the tip revision was validated by get_ancestry.
3729
result_set = required_revisions
3731
# if we just grabbed the possibly available ids, then
3732
# we only have an estimate of whats available and need to validate
3733
# that against the revision records.
3735
self.source._eliminate_revisions_not_present(required_revisions))
3736
return self.source.revision_ids_to_search_result(result_set)
3739
class InterDifferingSerializer(InterRepository):
3742
def _get_repo_format_to_test(self):
3746
def is_compatible(source, target):
3747
"""Be compatible with Knit2 source and Knit3 target"""
3748
# This is redundant with format.check_conversion_target(), however that
3749
# raises an exception, and we just want to say "False" as in we won't
3750
# support converting between these formats.
3751
if 'IDS_never' in debug.debug_flags:
3753
if source.supports_rich_root() and not target.supports_rich_root():
3755
if (source._format.supports_tree_reference
3756
and not target._format.supports_tree_reference):
3758
if target._fallback_repositories and target._format.supports_chks:
3759
# IDS doesn't know how to copy CHKs for the parent inventories it
3760
# adds to stacked repos.
3762
if 'IDS_always' in debug.debug_flags:
3764
# Only use this code path for local source and target. IDS does far
3765
# too much IO (both bandwidth and roundtrips) over a network.
3766
if not source.bzrdir.transport.base.startswith('file:///'):
3768
if not target.bzrdir.transport.base.startswith('file:///'):
3772
def _get_trees(self, revision_ids, cache):
3774
for rev_id in revision_ids:
3776
possible_trees.append((rev_id, cache[rev_id]))
3778
# Not cached, but inventory might be present anyway.
3780
tree = self.source.revision_tree(rev_id)
3781
except errors.NoSuchRevision:
3782
# Nope, parent is ghost.
3785
cache[rev_id] = tree
3786
possible_trees.append((rev_id, tree))
3787
return possible_trees
3789
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3790
"""Get the best delta and base for this revision.
3792
:return: (basis_id, delta)
3795
# Generate deltas against each tree, to find the shortest.
3796
texts_possibly_new_in_tree = set()
3797
for basis_id, basis_tree in possible_trees:
3798
delta = tree.inventory._make_delta(basis_tree.inventory)
3799
for old_path, new_path, file_id, new_entry in delta:
3800
if new_path is None:
3801
# This file_id isn't present in the new rev, so we don't
3805
# Rich roots are handled elsewhere...
3807
kind = new_entry.kind
3808
if kind != 'directory' and kind != 'file':
3809
# No text record associated with this inventory entry.
3811
# This is a directory or file that has changed somehow.
3812
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3813
deltas.append((len(delta), basis_id, delta))
3815
return deltas[0][1:]
3817
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3818
"""Find all parent revisions that are absent, but for which the
3819
inventory is present, and copy those inventories.
3821
This is necessary to preserve correctness when the source is stacked
3822
without fallbacks configured. (Note that in cases like upgrade the
3823
source may be not have _fallback_repositories even though it is
3827
for parents in parent_map.values():
3828
parent_revs.update(parents)
3829
present_parents = self.source.get_parent_map(parent_revs)
3830
absent_parents = set(parent_revs).difference(present_parents)
3831
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3832
(rev_id,) for rev_id in absent_parents)
3833
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3834
for parent_tree in self.source.revision_trees(parent_inv_ids):
3835
current_revision_id = parent_tree.get_revision_id()
3836
parents_parents_keys = parent_invs_keys_for_stacking[
3837
(current_revision_id,)]
3838
parents_parents = [key[-1] for key in parents_parents_keys]
3839
basis_id = _mod_revision.NULL_REVISION
3840
basis_tree = self.source.revision_tree(basis_id)
3841
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3842
self.target.add_inventory_by_delta(
3843
basis_id, delta, current_revision_id, parents_parents)
3844
cache[current_revision_id] = parent_tree
3846
def _fetch_batch(self, revision_ids, basis_id, cache):
3847
"""Fetch across a few revisions.
3849
:param revision_ids: The revisions to copy
3850
:param basis_id: The revision_id of a tree that must be in cache, used
3851
as a basis for delta when no other base is available
3852
:param cache: A cache of RevisionTrees that we can use.
3853
:return: The revision_id of the last converted tree. The RevisionTree
3854
for it will be in cache
3856
# Walk though all revisions; get inventory deltas, copy referenced
3857
# texts that delta references, insert the delta, revision and
3859
root_keys_to_create = set()
3862
pending_revisions = []
3863
parent_map = self.source.get_parent_map(revision_ids)
3864
self._fetch_parent_invs_for_stacking(parent_map, cache)
3865
self.source._safe_to_return_from_cache = True
3866
for tree in self.source.revision_trees(revision_ids):
3867
# Find a inventory delta for this revision.
3868
# Find text entries that need to be copied, too.
3869
current_revision_id = tree.get_revision_id()
3870
parent_ids = parent_map.get(current_revision_id, ())
3871
parent_trees = self._get_trees(parent_ids, cache)
3872
possible_trees = list(parent_trees)
3873
if len(possible_trees) == 0:
3874
# There either aren't any parents, or the parents are ghosts,
3875
# so just use the last converted tree.
3876
possible_trees.append((basis_id, cache[basis_id]))
3877
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3879
revision = self.source.get_revision(current_revision_id)
3880
pending_deltas.append((basis_id, delta,
3881
current_revision_id, revision.parent_ids))
3882
if self._converting_to_rich_root:
3883
self._revision_id_to_root_id[current_revision_id] = \
3885
# Determine which texts are in present in this revision but not in
3886
# any of the available parents.
3887
texts_possibly_new_in_tree = set()
3888
for old_path, new_path, file_id, entry in delta:
3889
if new_path is None:
3890
# This file_id isn't present in the new rev
3894
if not self.target.supports_rich_root():
3895
# The target doesn't support rich root, so we don't
3898
if self._converting_to_rich_root:
3899
# This can't be copied normally, we have to insert
3901
root_keys_to_create.add((file_id, entry.revision))
3904
texts_possibly_new_in_tree.add((file_id, entry.revision))
3905
for basis_id, basis_tree in possible_trees:
3906
basis_inv = basis_tree.inventory
3907
for file_key in list(texts_possibly_new_in_tree):
3908
file_id, file_revision = file_key
3910
entry = basis_inv[file_id]
3911
except errors.NoSuchId:
3913
if entry.revision == file_revision:
3914
texts_possibly_new_in_tree.remove(file_key)
3915
text_keys.update(texts_possibly_new_in_tree)
3916
pending_revisions.append(revision)
3917
cache[current_revision_id] = tree
3918
basis_id = current_revision_id
3919
self.source._safe_to_return_from_cache = False
3921
from_texts = self.source.texts
3922
to_texts = self.target.texts
3923
if root_keys_to_create:
3924
root_stream = _mod_fetch._new_root_data_stream(
3925
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3927
to_texts.insert_record_stream(root_stream)
3928
to_texts.insert_record_stream(from_texts.get_record_stream(
3929
text_keys, self.target._format._fetch_order,
3930
not self.target._format._fetch_uses_deltas))
3931
# insert inventory deltas
3932
for delta in pending_deltas:
3933
self.target.add_inventory_by_delta(*delta)
3934
if self.target._fallback_repositories:
3935
# Make sure this stacked repository has all the parent inventories
3936
# for the new revisions that we are about to insert. We do this
3937
# before adding the revisions so that no revision is added until
3938
# all the inventories it may depend on are added.
3939
# Note that this is overzealous, as we may have fetched these in an
3942
revision_ids = set()
3943
for revision in pending_revisions:
3944
revision_ids.add(revision.revision_id)
3945
parent_ids.update(revision.parent_ids)
3946
parent_ids.difference_update(revision_ids)
3947
parent_ids.discard(_mod_revision.NULL_REVISION)
3948
parent_map = self.source.get_parent_map(parent_ids)
3949
# we iterate over parent_map and not parent_ids because we don't
3950
# want to try copying any revision which is a ghost
3951
for parent_tree in self.source.revision_trees(parent_map):
3952
current_revision_id = parent_tree.get_revision_id()
3953
parents_parents = parent_map[current_revision_id]
3954
possible_trees = self._get_trees(parents_parents, cache)
3955
if len(possible_trees) == 0:
3956
# There either aren't any parents, or the parents are
3957
# ghosts, so just use the last converted tree.
3958
possible_trees.append((basis_id, cache[basis_id]))
3959
basis_id, delta = self._get_delta_for_revision(parent_tree,
3960
parents_parents, possible_trees)
3961
self.target.add_inventory_by_delta(
3962
basis_id, delta, current_revision_id, parents_parents)
3963
# insert signatures and revisions
3964
for revision in pending_revisions:
3966
signature = self.source.get_signature_text(
3967
revision.revision_id)
3968
self.target.add_signature_text(revision.revision_id,
3970
except errors.NoSuchRevision:
3972
self.target.add_revision(revision.revision_id, revision)
3975
def _fetch_all_revisions(self, revision_ids, pb):
3976
"""Fetch everything for the list of revisions.
3978
:param revision_ids: The list of revisions to fetch. Must be in
3980
:param pb: A ProgressTask
3983
basis_id, basis_tree = self._get_basis(revision_ids[0])
3985
cache = lru_cache.LRUCache(100)
3986
cache[basis_id] = basis_tree
3987
del basis_tree # We don't want to hang on to it here
3991
for offset in range(0, len(revision_ids), batch_size):
3992
self.target.start_write_group()
3994
pb.update('Transferring revisions', offset,
3996
batch = revision_ids[offset:offset+batch_size]
3997
basis_id = self._fetch_batch(batch, basis_id, cache)
3999
self.source._safe_to_return_from_cache = False
4000
self.target.abort_write_group()
4003
hint = self.target.commit_write_group()
4006
if hints and self.target._format.pack_compresses:
4007
self.target.pack(hint=hints)
4008
pb.update('Transferring revisions', len(revision_ids),
4012
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
4014
"""See InterRepository.fetch()."""
4015
if fetch_spec is not None:
4016
raise AssertionError("Not implemented yet...")
4017
ui.ui_factory.warn_experimental_format_fetch(self)
4018
if (not self.source.supports_rich_root()
4019
and self.target.supports_rich_root()):
4020
self._converting_to_rich_root = True
4021
self._revision_id_to_root_id = {}
4023
self._converting_to_rich_root = False
4024
# See <https://launchpad.net/bugs/456077> asking for a warning here
4025
if self.source._format.network_name() != self.target._format.network_name():
4026
ui.ui_factory.show_user_warning('cross_format_fetch',
4027
from_format=self.source._format,
4028
to_format=self.target._format)
4029
revision_ids = self.target.search_missing_revision_ids(self.source,
4030
revision_id, find_ghosts=find_ghosts).get_keys()
4031
if not revision_ids:
4033
revision_ids = tsort.topo_sort(
4034
self.source.get_graph().get_parent_map(revision_ids))
4035
if not revision_ids:
4037
# Walk though all revisions; get inventory deltas, copy referenced
4038
# texts that delta references, insert the delta, revision and
4041
my_pb = ui.ui_factory.nested_progress_bar()
4044
symbol_versioning.warn(
4045
symbol_versioning.deprecated_in((1, 14, 0))
4046
% "pb parameter to fetch()")
4049
self._fetch_all_revisions(revision_ids, pb)
4051
if my_pb is not None:
4053
return len(revision_ids), 0
4055
def _get_basis(self, first_revision_id):
4056
"""Get a revision and tree which exists in the target.
4058
This assumes that first_revision_id is selected for transmission
4059
because all other ancestors are already present. If we can't find an
4060
ancestor we fall back to NULL_REVISION since we know that is safe.
4062
:return: (basis_id, basis_tree)
4064
first_rev = self.source.get_revision(first_revision_id)
4066
basis_id = first_rev.parent_ids[0]
4067
# only valid as a basis if the target has it
4068
self.target.get_revision(basis_id)
4069
# Try to get a basis tree - if its a ghost it will hit the
4070
# NoSuchRevision case.
4071
basis_tree = self.source.revision_tree(basis_id)
4072
except (IndexError, errors.NoSuchRevision):
4073
basis_id = _mod_revision.NULL_REVISION
4074
basis_tree = self.source.revision_tree(basis_id)
4075
return basis_id, basis_tree
4078
InterRepository.register_optimiser(InterDifferingSerializer)
4079
InterRepository.register_optimiser(InterSameDataRepository)
4080
InterRepository.register_optimiser(InterWeaveRepo)
4081
InterRepository.register_optimiser(InterKnitRepo)
4084
1741
class CopyConverter(object):
4085
1742
"""A repository conversion tool which just performs a copy of the content.
4107
1764
# trigger an assertion if not such
4108
1765
repo._format.get_format_string()
4109
1766
self.repo_dir = repo.bzrdir
4110
pb.update('Moving repository to repository.backup')
1767
pb.update(gettext('Moving repository to repository.backup'))
4111
1768
self.repo_dir.transport.move('repository', 'repository.backup')
4112
1769
backup_transport = self.repo_dir.transport.clone('repository.backup')
4113
1770
repo._format.check_conversion_target(self.target_format)
4114
1771
self.source_repo = repo._format.open(self.repo_dir,
4116
1773
_override_transport=backup_transport)
4117
pb.update('Creating new repository')
1774
pb.update(gettext('Creating new repository'))
4118
1775
converted = self.target_format.initialize(self.repo_dir,
4119
1776
self.source_repo.is_shared())
4120
1777
converted.lock_write()
4122
pb.update('Copying content')
1779
pb.update(gettext('Copying content'))
4123
1780
self.source_repo.copy_content_into(converted)
4125
1782
converted.unlock()
4126
pb.update('Deleting old repository content')
1783
pb.update(gettext('Deleting old repository content'))
4127
1784
self.repo_dir.transport.delete_tree('repository.backup')
4128
ui.ui_factory.note('repository converted')
1785
ui.ui_factory.note(gettext('repository converted'))
4141
def _unescaper(match, _map=_unescape_map):
4142
code = match.group(1)
4146
if not code.startswith('#'):
4148
return unichr(int(code[1:])).encode('utf8')
4154
def _unescape_xml(data):
4155
"""Unescape predefined XML entities in a string of data."""
4157
if _unescape_re is None:
4158
_unescape_re = re.compile('\&([^;]*);')
4159
return _unescape_re.sub(_unescaper, data)
4162
class _VersionedFileChecker(object):
4164
def __init__(self, repository, text_key_references=None, ancestors=None):
4165
self.repository = repository
4166
self.text_index = self.repository._generate_text_key_index(
4167
text_key_references=text_key_references, ancestors=ancestors)
4169
def calculate_file_version_parents(self, text_key):
4170
"""Calculate the correct parents for a file version according to
4173
parent_keys = self.text_index[text_key]
4174
if parent_keys == [_mod_revision.NULL_REVISION]:
4176
return tuple(parent_keys)
4178
def check_file_version_parents(self, texts, progress_bar=None):
4179
"""Check the parents stored in a versioned file are correct.
4181
It also detects file versions that are not referenced by their
4182
corresponding revision's inventory.
4184
:returns: A tuple of (wrong_parents, dangling_file_versions).
4185
wrong_parents is a dict mapping {revision_id: (stored_parents,
4186
correct_parents)} for each revision_id where the stored parents
4187
are not correct. dangling_file_versions is a set of (file_id,
4188
revision_id) tuples for versions that are present in this versioned
4189
file, but not used by the corresponding inventory.
4191
local_progress = None
4192
if progress_bar is None:
4193
local_progress = ui.ui_factory.nested_progress_bar()
4194
progress_bar = local_progress
4196
return self._check_file_version_parents(texts, progress_bar)
4199
local_progress.finished()
4201
def _check_file_version_parents(self, texts, progress_bar):
4202
"""See check_file_version_parents."""
4204
self.file_ids = set([file_id for file_id, _ in
4205
self.text_index.iterkeys()])
4206
# text keys is now grouped by file_id
4207
n_versions = len(self.text_index)
4208
progress_bar.update('loading text store', 0, n_versions)
4209
parent_map = self.repository.texts.get_parent_map(self.text_index)
4210
# On unlistable transports this could well be empty/error...
4211
text_keys = self.repository.texts.keys()
4212
unused_keys = frozenset(text_keys) - set(self.text_index)
4213
for num, key in enumerate(self.text_index.iterkeys()):
4214
progress_bar.update('checking text graph', num, n_versions)
4215
correct_parents = self.calculate_file_version_parents(key)
4217
knit_parents = parent_map[key]
4218
except errors.RevisionNotPresent:
4221
if correct_parents != knit_parents:
4222
wrong_parents[key] = (knit_parents, correct_parents)
4223
return wrong_parents, unused_keys
4226
def _old_get_graph(repository, revision_id):
4227
"""DO NOT USE. That is all. I'm serious."""
4228
graph = repository.get_graph()
4229
revision_graph = dict(((key, value) for key, value in
4230
graph.iter_ancestry([revision_id]) if value is not None))
4231
return _strip_NULL_ghosts(revision_graph)
4234
1789
def _strip_NULL_ghosts(revision_graph):
4235
1790
"""Also don't use this. more compatibility code for unmigrated clients."""
4236
1791
# Filter ghosts, and null:
4242
1797
return revision_graph
4245
class StreamSink(object):
4246
"""An object that can insert a stream into a repository.
4248
This interface handles the complexity of reserialising inventories and
4249
revisions from different formats, and allows unidirectional insertion into
4250
stacked repositories without looking for the missing basis parents
4254
def __init__(self, target_repo):
4255
self.target_repo = target_repo
4257
def insert_stream(self, stream, src_format, resume_tokens):
4258
"""Insert a stream's content into the target repository.
4260
:param src_format: a bzr repository format.
4262
:return: a list of resume tokens and an iterable of keys additional
4263
items required before the insertion can be completed.
4265
self.target_repo.lock_write()
4268
self.target_repo.resume_write_group(resume_tokens)
4271
self.target_repo.start_write_group()
4274
# locked_insert_stream performs a commit|suspend.
4275
return self._locked_insert_stream(stream, src_format,
4278
self.target_repo.abort_write_group(suppress_errors=True)
4281
self.target_repo.unlock()
4283
def _locked_insert_stream(self, stream, src_format, is_resume):
4284
to_serializer = self.target_repo._format._serializer
4285
src_serializer = src_format._serializer
4287
if to_serializer == src_serializer:
4288
# If serializers match and the target is a pack repository, set the
4289
# write cache size on the new pack. This avoids poor performance
4290
# on transports where append is unbuffered (such as
4291
# RemoteTransport). This is safe to do because nothing should read
4292
# back from the target repository while a stream with matching
4293
# serialization is being inserted.
4294
# The exception is that a delta record from the source that should
4295
# be a fulltext may need to be expanded by the target (see
4296
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4297
# explicitly flush any buffered writes first in that rare case.
4299
new_pack = self.target_repo._pack_collection._new_pack
4300
except AttributeError:
4301
# Not a pack repository
4304
new_pack.set_write_cache_size(1024*1024)
4305
for substream_type, substream in stream:
4306
if 'stream' in debug.debug_flags:
4307
mutter('inserting substream: %s', substream_type)
4308
if substream_type == 'texts':
4309
self.target_repo.texts.insert_record_stream(substream)
4310
elif substream_type == 'inventories':
4311
if src_serializer == to_serializer:
4312
self.target_repo.inventories.insert_record_stream(
4315
self._extract_and_insert_inventories(
4316
substream, src_serializer)
4317
elif substream_type == 'inventory-deltas':
4318
self._extract_and_insert_inventory_deltas(
4319
substream, src_serializer)
4320
elif substream_type == 'chk_bytes':
4321
# XXX: This doesn't support conversions, as it assumes the
4322
# conversion was done in the fetch code.
4323
self.target_repo.chk_bytes.insert_record_stream(substream)
4324
elif substream_type == 'revisions':
4325
# This may fallback to extract-and-insert more often than
4326
# required if the serializers are different only in terms of
4328
if src_serializer == to_serializer:
4329
self.target_repo.revisions.insert_record_stream(substream)
4331
self._extract_and_insert_revisions(substream,
4333
elif substream_type == 'signatures':
4334
self.target_repo.signatures.insert_record_stream(substream)
4336
raise AssertionError('kaboom! %s' % (substream_type,))
4337
# Done inserting data, and the missing_keys calculations will try to
4338
# read back from the inserted data, so flush the writes to the new pack
4339
# (if this is pack format).
4340
if new_pack is not None:
4341
new_pack._write_data('', flush=True)
4342
# Find all the new revisions (including ones from resume_tokens)
4343
missing_keys = self.target_repo.get_missing_parent_inventories(
4344
check_for_missing_texts=is_resume)
4346
for prefix, versioned_file in (
4347
('texts', self.target_repo.texts),
4348
('inventories', self.target_repo.inventories),
4349
('revisions', self.target_repo.revisions),
4350
('signatures', self.target_repo.signatures),
4351
('chk_bytes', self.target_repo.chk_bytes),
4353
if versioned_file is None:
4355
# TODO: key is often going to be a StaticTuple object
4356
# I don't believe we can define a method by which
4357
# (prefix,) + StaticTuple will work, though we could
4358
# define a StaticTuple.sq_concat that would allow you to
4359
# pass in either a tuple or a StaticTuple as the second
4360
# object, so instead we could have:
4361
# StaticTuple(prefix) + key here...
4362
missing_keys.update((prefix,) + key for key in
4363
versioned_file.get_missing_compression_parent_keys())
4364
except NotImplementedError:
4365
# cannot even attempt suspending, and missing would have failed
4366
# during stream insertion.
4367
missing_keys = set()
4370
# suspend the write group and tell the caller what we is
4371
# missing. We know we can suspend or else we would not have
4372
# entered this code path. (All repositories that can handle
4373
# missing keys can handle suspending a write group).
4374
write_group_tokens = self.target_repo.suspend_write_group()
4375
return write_group_tokens, missing_keys
4376
hint = self.target_repo.commit_write_group()
4377
if (to_serializer != src_serializer and
4378
self.target_repo._format.pack_compresses):
4379
self.target_repo.pack(hint=hint)
4382
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4383
target_rich_root = self.target_repo._format.rich_root_data
4384
target_tree_refs = self.target_repo._format.supports_tree_reference
4385
for record in substream:
4386
# Insert the delta directly
4387
inventory_delta_bytes = record.get_bytes_as('fulltext')
4388
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4390
parse_result = deserialiser.parse_text_bytes(
4391
inventory_delta_bytes)
4392
except inventory_delta.IncompatibleInventoryDelta, err:
4393
trace.mutter("Incompatible delta: %s", err.msg)
4394
raise errors.IncompatibleRevision(self.target_repo._format)
4395
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4396
revision_id = new_id
4397
parents = [key[0] for key in record.parents]
4398
self.target_repo.add_inventory_by_delta(
4399
basis_id, inv_delta, revision_id, parents)
4401
def _extract_and_insert_inventories(self, substream, serializer,
4403
"""Generate a new inventory versionedfile in target, converting data.
4405
The inventory is retrieved from the source, (deserializing it), and
4406
stored in the target (reserializing it in a different format).
4408
target_rich_root = self.target_repo._format.rich_root_data
4409
target_tree_refs = self.target_repo._format.supports_tree_reference
4410
for record in substream:
4411
# It's not a delta, so it must be a fulltext in the source
4412
# serializer's format.
4413
bytes = record.get_bytes_as('fulltext')
4414
revision_id = record.key[0]
4415
inv = serializer.read_inventory_from_string(bytes, revision_id)
4416
parents = [key[0] for key in record.parents]
4417
self.target_repo.add_inventory(revision_id, inv, parents)
4418
# No need to keep holding this full inv in memory when the rest of
4419
# the substream is likely to be all deltas.
4422
def _extract_and_insert_revisions(self, substream, serializer):
4423
for record in substream:
4424
bytes = record.get_bytes_as('fulltext')
4425
revision_id = record.key[0]
4426
rev = serializer.read_revision_from_string(bytes)
4427
if rev.revision_id != revision_id:
4428
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4429
self.target_repo.add_revision(revision_id, rev)
4432
if self.target_repo._format._fetch_reconcile:
4433
self.target_repo.reconcile()
4436
class StreamSource(object):
4437
"""A source of a stream for fetching between repositories."""
4439
def __init__(self, from_repository, to_format):
4440
"""Create a StreamSource streaming from from_repository."""
4441
self.from_repository = from_repository
4442
self.to_format = to_format
4443
self._record_counter = RecordCounter()
4445
def delta_on_metadata(self):
4446
"""Return True if delta's are permitted on metadata streams.
4448
That is on revisions and signatures.
4450
src_serializer = self.from_repository._format._serializer
4451
target_serializer = self.to_format._serializer
4452
return (self.to_format._fetch_uses_deltas and
4453
src_serializer == target_serializer)
4455
def _fetch_revision_texts(self, revs):
4456
# fetch signatures first and then the revision texts
4457
# may need to be a InterRevisionStore call here.
4458
from_sf = self.from_repository.signatures
4459
# A missing signature is just skipped.
4460
keys = [(rev_id,) for rev_id in revs]
4461
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4463
self.to_format._fetch_order,
4464
not self.to_format._fetch_uses_deltas))
4465
# If a revision has a delta, this is actually expanded inside the
4466
# insert_record_stream code now, which is an alternate fix for
4468
from_rf = self.from_repository.revisions
4469
revisions = from_rf.get_record_stream(
4471
self.to_format._fetch_order,
4472
not self.delta_on_metadata())
4473
return [('signatures', signatures), ('revisions', revisions)]
4475
def _generate_root_texts(self, revs):
4476
"""This will be called by get_stream between fetching weave texts and
4477
fetching the inventory weave.
4479
if self._rich_root_upgrade():
4480
return _mod_fetch.Inter1and2Helper(
4481
self.from_repository).generate_root_texts(revs)
4485
def get_stream(self, search):
4487
revs = search.get_keys()
4488
graph = self.from_repository.get_graph()
4489
revs = tsort.topo_sort(graph.get_parent_map(revs))
4490
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4492
for knit_kind, file_id, revisions in data_to_fetch:
4493
if knit_kind != phase:
4495
# Make a new progress bar for this phase
4496
if knit_kind == "file":
4497
# Accumulate file texts
4498
text_keys.extend([(file_id, revision) for revision in
4500
elif knit_kind == "inventory":
4501
# Now copy the file texts.
4502
from_texts = self.from_repository.texts
4503
yield ('texts', from_texts.get_record_stream(
4504
text_keys, self.to_format._fetch_order,
4505
not self.to_format._fetch_uses_deltas))
4506
# Cause an error if a text occurs after we have done the
4509
# Before we process the inventory we generate the root
4510
# texts (if necessary) so that the inventories references
4512
for _ in self._generate_root_texts(revs):
4514
# we fetch only the referenced inventories because we do not
4515
# know for unselected inventories whether all their required
4516
# texts are present in the other repository - it could be
4518
for info in self._get_inventory_stream(revs):
4520
elif knit_kind == "signatures":
4521
# Nothing to do here; this will be taken care of when
4522
# _fetch_revision_texts happens.
4524
elif knit_kind == "revisions":
4525
for record in self._fetch_revision_texts(revs):
4528
raise AssertionError("Unknown knit kind %r" % knit_kind)
4530
def get_stream_for_missing_keys(self, missing_keys):
4531
# missing keys can only occur when we are byte copying and not
4532
# translating (because translation means we don't send
4533
# unreconstructable deltas ever).
4535
keys['texts'] = set()
4536
keys['revisions'] = set()
4537
keys['inventories'] = set()
4538
keys['chk_bytes'] = set()
4539
keys['signatures'] = set()
4540
for key in missing_keys:
4541
keys[key[0]].add(key[1:])
4542
if len(keys['revisions']):
4543
# If we allowed copying revisions at this point, we could end up
4544
# copying a revision without copying its required texts: a
4545
# violation of the requirements for repository integrity.
4546
raise AssertionError(
4547
'cannot copy revisions to fill in missing deltas %s' % (
4548
keys['revisions'],))
4549
for substream_kind, keys in keys.iteritems():
4550
vf = getattr(self.from_repository, substream_kind)
4551
if vf is None and keys:
4552
raise AssertionError(
4553
"cannot fill in keys for a versioned file we don't"
4554
" have: %s needs %s" % (substream_kind, keys))
4556
# No need to stream something we don't have
4558
if substream_kind == 'inventories':
4559
# Some missing keys are genuinely ghosts, filter those out.
4560
present = self.from_repository.inventories.get_parent_map(keys)
4561
revs = [key[0] for key in present]
4562
# Get the inventory stream more-or-less as we do for the
4563
# original stream; there's no reason to assume that records
4564
# direct from the source will be suitable for the sink. (Think
4565
# e.g. 2a -> 1.9-rich-root).
4566
for info in self._get_inventory_stream(revs, missing=True):
4570
# Ask for full texts always so that we don't need more round trips
4571
# after this stream.
4572
# Some of the missing keys are genuinely ghosts, so filter absent
4573
# records. The Sink is responsible for doing another check to
4574
# ensure that ghosts don't introduce missing data for future
4576
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4577
self.to_format._fetch_order, True))
4578
yield substream_kind, stream
4580
def inventory_fetch_order(self):
4581
if self._rich_root_upgrade():
4582
return 'topological'
4584
return self.to_format._fetch_order
4586
def _rich_root_upgrade(self):
4587
return (not self.from_repository._format.rich_root_data and
4588
self.to_format.rich_root_data)
4590
def _get_inventory_stream(self, revision_ids, missing=False):
4591
from_format = self.from_repository._format
4592
if (from_format.supports_chks and self.to_format.supports_chks and
4593
from_format.network_name() == self.to_format.network_name()):
4594
raise AssertionError(
4595
"this case should be handled by GroupCHKStreamSource")
4596
elif 'forceinvdeltas' in debug.debug_flags:
4597
return self._get_convertable_inventory_stream(revision_ids,
4598
delta_versus_null=missing)
4599
elif from_format.network_name() == self.to_format.network_name():
4601
return self._get_simple_inventory_stream(revision_ids,
4603
elif (not from_format.supports_chks and not self.to_format.supports_chks
4604
and from_format._serializer == self.to_format._serializer):
4605
# Essentially the same format.
4606
return self._get_simple_inventory_stream(revision_ids,
4609
# Any time we switch serializations, we want to use an
4610
# inventory-delta based approach.
4611
return self._get_convertable_inventory_stream(revision_ids,
4612
delta_versus_null=missing)
4614
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4615
# NB: This currently reopens the inventory weave in source;
4616
# using a single stream interface instead would avoid this.
4617
from_weave = self.from_repository.inventories
4619
delta_closure = True
4621
delta_closure = not self.delta_on_metadata()
4622
yield ('inventories', from_weave.get_record_stream(
4623
[(rev_id,) for rev_id in revision_ids],
4624
self.inventory_fetch_order(), delta_closure))
4626
def _get_convertable_inventory_stream(self, revision_ids,
4627
delta_versus_null=False):
4628
# The two formats are sufficiently different that there is no fast
4629
# path, so we need to send just inventorydeltas, which any
4630
# sufficiently modern client can insert into any repository.
4631
# The StreamSink code expects to be able to
4632
# convert on the target, so we need to put bytes-on-the-wire that can
4633
# be converted. That means inventory deltas (if the remote is <1.19,
4634
# RemoteStreamSink will fallback to VFS to insert the deltas).
4635
yield ('inventory-deltas',
4636
self._stream_invs_as_deltas(revision_ids,
4637
delta_versus_null=delta_versus_null))
4639
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4640
"""Return a stream of inventory-deltas for the given rev ids.
4642
:param revision_ids: The list of inventories to transmit
4643
:param delta_versus_null: Don't try to find a minimal delta for this
4644
entry, instead compute the delta versus the NULL_REVISION. This
4645
effectively streams a complete inventory. Used for stuff like
4646
filling in missing parents, etc.
4648
from_repo = self.from_repository
4649
revision_keys = [(rev_id,) for rev_id in revision_ids]
4650
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4651
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4653
inventories = self.from_repository.iter_inventories(
4654
revision_ids, 'topological')
4655
format = from_repo._format
4656
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4657
inventory_cache = lru_cache.LRUCache(50)
4658
null_inventory = from_repo.revision_tree(
4659
_mod_revision.NULL_REVISION).inventory
4660
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4661
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4662
# repo back into a non-rich-root repo ought to be allowed)
4663
serializer = inventory_delta.InventoryDeltaSerializer(
4664
versioned_root=format.rich_root_data,
4665
tree_references=format.supports_tree_reference)
4666
for inv in inventories:
4667
key = (inv.revision_id,)
4668
parent_keys = parent_map.get(key, ())
4670
if not delta_versus_null and parent_keys:
4671
# The caller did not ask for complete inventories and we have
4672
# some parents that we can delta against. Make a delta against
4673
# each parent so that we can find the smallest.
4674
parent_ids = [parent_key[0] for parent_key in parent_keys]
4675
for parent_id in parent_ids:
4676
if parent_id not in invs_sent_so_far:
4677
# We don't know that the remote side has this basis, so
4680
if parent_id == _mod_revision.NULL_REVISION:
4681
parent_inv = null_inventory
4683
parent_inv = inventory_cache.get(parent_id, None)
4684
if parent_inv is None:
4685
parent_inv = from_repo.get_inventory(parent_id)
4686
candidate_delta = inv._make_delta(parent_inv)
4687
if (delta is None or
4688
len(delta) > len(candidate_delta)):
4689
delta = candidate_delta
4690
basis_id = parent_id
4692
# Either none of the parents ended up being suitable, or we
4693
# were asked to delta against NULL
4694
basis_id = _mod_revision.NULL_REVISION
4695
delta = inv._make_delta(null_inventory)
4696
invs_sent_so_far.add(inv.revision_id)
4697
inventory_cache[inv.revision_id] = inv
4698
delta_serialized = ''.join(
4699
serializer.delta_to_lines(basis_id, key[-1], delta))
4700
yield versionedfile.FulltextContentFactory(
4701
key, parent_keys, None, delta_serialized)
4704
1800
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4705
1801
stop_revision=None):
4706
1802
"""Extend the partial history to include a given index