353
206
commit to be valid, deletes against the basis MUST be recorded via
354
207
builder.record_delete().
356
self._recording_deletes = True
358
basis_id = self.parents[0]
360
basis_id = _mod_revision.NULL_REVISION
361
self.basis_delta_revision = basis_id
363
def record_entry_contents(self, ie, parent_invs, path, tree,
365
"""Record the content of ie from tree into the commit if needed.
367
Side effect: sets ie.revision when unchanged
369
:param ie: An inventory entry present in the commit.
370
:param parent_invs: The inventories of the parent revisions of the
372
:param path: The path the entry is at in the tree.
373
:param tree: The tree which contains this entry and should be used to
375
:param content_summary: Summary data from the tree about the paths
376
content - stat, length, exec, sha/link target. This is only
377
accessed when the entry has a revision of None - that is when it is
378
a candidate to commit.
379
:return: A tuple (change_delta, version_recorded, fs_hash).
380
change_delta is an inventory_delta change for this entry against
381
the basis tree of the commit, or None if no change occured against
383
version_recorded is True if a new version of the entry has been
384
recorded. For instance, committing a merge where a file was only
385
changed on the other side will return (delta, False).
386
fs_hash is either None, or the hash details for the path (currently
387
a tuple of the contents sha1 and the statvalue returned by
388
tree.get_file_with_stat()).
390
if self.new_inventory.root is None:
391
if ie.parent_id is not None:
392
raise errors.RootMissing()
393
self._check_root(ie, parent_invs, tree)
394
if ie.revision is None:
395
kind = content_summary[0]
397
# ie is carried over from a prior commit
399
# XXX: repository specific check for nested tree support goes here - if
400
# the repo doesn't want nested trees we skip it ?
401
if (kind == 'tree-reference' and
402
not self.repository._format.supports_tree_reference):
403
# mismatch between commit builder logic and repository:
404
# this needs the entry creation pushed down into the builder.
405
raise NotImplementedError('Missing repository subtree support.')
406
self.new_inventory.add(ie)
408
# TODO: slow, take it out of the inner loop.
410
basis_inv = parent_invs[0]
412
basis_inv = Inventory(root_id=None)
414
# ie.revision is always None if the InventoryEntry is considered
415
# for committing. We may record the previous parents revision if the
416
# content is actually unchanged against a sole head.
417
if ie.revision is not None:
418
if not self._versioned_root and path == '':
419
# repositories that do not version the root set the root's
420
# revision to the new commit even when no change occurs (more
421
# specifically, they do not record a revision on the root; and
422
# the rev id is assigned to the root during deserialisation -
423
# this masks when a change may have occurred against the basis.
424
# To match this we always issue a delta, because the revision
425
# of the root will always be changing.
426
if ie.file_id in basis_inv:
427
delta = (basis_inv.id2path(ie.file_id), path,
431
delta = (None, path, ie.file_id, ie)
432
self._basis_delta.append(delta)
433
return delta, False, None
435
# we don't need to commit this, because the caller already
436
# determined that an existing revision of this file is
437
# appropriate. If its not being considered for committing then
438
# it and all its parents to the root must be unaltered so
439
# no-change against the basis.
440
if ie.revision == self._new_revision_id:
441
raise AssertionError("Impossible situation, a skipped "
442
"inventory entry (%r) claims to be modified in this "
443
"commit (%r).", (ie, self._new_revision_id))
444
return None, False, None
445
# XXX: Friction: parent_candidates should return a list not a dict
446
# so that we don't have to walk the inventories again.
447
parent_candiate_entries = ie.parent_candidates(parent_invs)
448
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
450
for inv in parent_invs:
451
if ie.file_id in inv:
452
old_rev = inv[ie.file_id].revision
453
if old_rev in head_set:
454
heads.append(inv[ie.file_id].revision)
455
head_set.remove(inv[ie.file_id].revision)
458
# now we check to see if we need to write a new record to the
460
# We write a new entry unless there is one head to the ancestors, and
461
# the kind-derived content is unchanged.
463
# Cheapest check first: no ancestors, or more the one head in the
464
# ancestors, we write a new node.
468
# There is a single head, look it up for comparison
469
parent_entry = parent_candiate_entries[heads[0]]
470
# if the non-content specific data has changed, we'll be writing a
472
if (parent_entry.parent_id != ie.parent_id or
473
parent_entry.name != ie.name):
475
# now we need to do content specific checks:
477
# if the kind changed the content obviously has
478
if kind != parent_entry.kind:
480
# Stat cache fingerprint feedback for the caller - None as we usually
481
# don't generate one.
484
if content_summary[2] is None:
485
raise ValueError("Files must not have executable = None")
487
# We can't trust a check of the file length because of content
489
if (# if the exec bit has changed we have to store:
490
parent_entry.executable != content_summary[2]):
492
elif parent_entry.text_sha1 == content_summary[3]:
493
# all meta and content is unchanged (using a hash cache
494
# hit to check the sha)
495
ie.revision = parent_entry.revision
496
ie.text_size = parent_entry.text_size
497
ie.text_sha1 = parent_entry.text_sha1
498
ie.executable = parent_entry.executable
499
return self._get_delta(ie, basis_inv, path), False, None
501
# Either there is only a hash change(no hash cache entry,
502
# or same size content change), or there is no change on
504
# Provide the parent's hash to the store layer, so that the
505
# content is unchanged we will not store a new node.
506
nostore_sha = parent_entry.text_sha1
508
# We want to record a new node regardless of the presence or
509
# absence of a content change in the file.
511
ie.executable = content_summary[2]
512
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
514
text = file_obj.read()
518
ie.text_sha1, ie.text_size = self._add_text_to_weave(
519
ie.file_id, text, heads, nostore_sha)
520
# Let the caller know we generated a stat fingerprint.
521
fingerprint = (ie.text_sha1, stat_value)
522
except errors.ExistingContent:
523
# Turns out that the file content was unchanged, and we were
524
# only going to store a new node if it was changed. Carry over
526
ie.revision = parent_entry.revision
527
ie.text_size = parent_entry.text_size
528
ie.text_sha1 = parent_entry.text_sha1
529
ie.executable = parent_entry.executable
530
return self._get_delta(ie, basis_inv, path), False, None
531
elif kind == 'directory':
533
# all data is meta here, nothing specific to directory, so
535
ie.revision = parent_entry.revision
536
return self._get_delta(ie, basis_inv, path), False, None
537
self._add_text_to_weave(ie.file_id, '', heads, None)
538
elif kind == 'symlink':
539
current_link_target = content_summary[3]
541
# symlink target is not generic metadata, check if it has
543
if current_link_target != parent_entry.symlink_target:
546
# unchanged, carry over.
547
ie.revision = parent_entry.revision
548
ie.symlink_target = parent_entry.symlink_target
549
return self._get_delta(ie, basis_inv, path), False, None
550
ie.symlink_target = current_link_target
551
self._add_text_to_weave(ie.file_id, '', heads, None)
552
elif kind == 'tree-reference':
554
if content_summary[3] != parent_entry.reference_revision:
557
# unchanged, carry over.
558
ie.reference_revision = parent_entry.reference_revision
559
ie.revision = parent_entry.revision
560
return self._get_delta(ie, basis_inv, path), False, None
561
ie.reference_revision = content_summary[3]
562
if ie.reference_revision is None:
563
raise AssertionError("invalid content_summary for nested tree: %r"
564
% (content_summary,))
565
self._add_text_to_weave(ie.file_id, '', heads, None)
567
raise NotImplementedError('unknown kind')
568
ie.revision = self._new_revision_id
569
self._any_changes = True
570
return self._get_delta(ie, basis_inv, path), True, fingerprint
572
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
573
_entry_factory=entry_factory):
209
raise NotImplementedError(self.will_record_deletes)
211
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
574
212
"""Record a new tree via iter_changes.
576
214
:param tree: The tree to obtain text contents from for changed objects.
581
219
to basis_revision_id. The iterator must not include any items with
582
220
a current kind of None - missing items must be either filtered out
583
221
or errored-on beefore record_iter_changes sees the item.
584
:param _entry_factory: Private method to bind entry_factory locally for
586
222
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
587
223
tree._observed_sha1.
589
# Create an inventory delta based on deltas between all the parents and
590
# deltas between all the parent inventories. We use inventory delta's
591
# between the inventory objects because iter_changes masks
592
# last-changed-field only changes.
594
# file_id -> change map, change is fileid, paths, changed, versioneds,
595
# parents, names, kinds, executables
597
# {file_id -> revision_id -> inventory entry, for entries in parent
598
# trees that are not parents[0]
602
revtrees = list(self.repository.revision_trees(self.parents))
603
except errors.NoSuchRevision:
604
# one or more ghosts, slow path.
606
for revision_id in self.parents:
608
revtrees.append(self.repository.revision_tree(revision_id))
609
except errors.NoSuchRevision:
611
basis_revision_id = _mod_revision.NULL_REVISION
613
revtrees.append(self.repository.revision_tree(
614
_mod_revision.NULL_REVISION))
615
# The basis inventory from a repository
617
basis_inv = revtrees[0].inventory
619
basis_inv = self.repository.revision_tree(
620
_mod_revision.NULL_REVISION).inventory
621
if len(self.parents) > 0:
622
if basis_revision_id != self.parents[0] and not ghost_basis:
624
"arbitrary basis parents not yet supported with merges")
625
for revtree in revtrees[1:]:
626
for change in revtree.inventory._make_delta(basis_inv):
627
if change[1] is None:
628
# Not present in this parent.
630
if change[2] not in merged_ids:
631
if change[0] is not None:
632
basis_entry = basis_inv[change[2]]
633
merged_ids[change[2]] = [
635
basis_entry.revision,
638
parent_entries[change[2]] = {
640
basis_entry.revision:basis_entry,
642
change[3].revision:change[3],
645
merged_ids[change[2]] = [change[3].revision]
646
parent_entries[change[2]] = {change[3].revision:change[3]}
648
merged_ids[change[2]].append(change[3].revision)
649
parent_entries[change[2]][change[3].revision] = change[3]
652
# Setup the changes from the tree:
653
# changes maps file_id -> (change, [parent revision_ids])
655
for change in iter_changes:
656
# This probably looks up in basis_inv way to much.
657
if change[1][0] is not None:
658
head_candidate = [basis_inv[change[0]].revision]
661
changes[change[0]] = change, merged_ids.get(change[0],
663
unchanged_merged = set(merged_ids) - set(changes)
664
# Extend the changes dict with synthetic changes to record merges of
666
for file_id in unchanged_merged:
667
# Record a merged version of these items that did not change vs the
668
# basis. This can be either identical parallel changes, or a revert
669
# of a specific file after a merge. The recorded content will be
670
# that of the current tree (which is the same as the basis), but
671
# the per-file graph will reflect a merge.
672
# NB:XXX: We are reconstructing path information we had, this
673
# should be preserved instead.
674
# inv delta change: (file_id, (path_in_source, path_in_target),
675
# changed_content, versioned, parent, name, kind,
678
basis_entry = basis_inv[file_id]
679
except errors.NoSuchId:
680
# a change from basis->some_parents but file_id isn't in basis
681
# so was new in the merge, which means it must have changed
682
# from basis -> current, and as it hasn't the add was reverted
683
# by the user. So we discard this change.
687
(basis_inv.id2path(file_id), tree.id2path(file_id)),
689
(basis_entry.parent_id, basis_entry.parent_id),
690
(basis_entry.name, basis_entry.name),
691
(basis_entry.kind, basis_entry.kind),
692
(basis_entry.executable, basis_entry.executable))
693
changes[file_id] = (change, merged_ids[file_id])
694
# changes contains tuples with the change and a set of inventory
695
# candidates for the file.
697
# old_path, new_path, file_id, new_inventory_entry
698
seen_root = False # Is the root in the basis delta?
699
inv_delta = self._basis_delta
700
modified_rev = self._new_revision_id
701
for change, head_candidates in changes.values():
702
if change[3][1]: # versioned in target.
703
# Several things may be happening here:
704
# We may have a fork in the per-file graph
705
# - record a change with the content from tree
706
# We may have a change against < all trees
707
# - carry over the tree that hasn't changed
708
# We may have a change against all trees
709
# - record the change with the content from tree
712
entry = _entry_factory[kind](file_id, change[5][1],
714
head_set = self._heads(change[0], set(head_candidates))
717
for head_candidate in head_candidates:
718
if head_candidate in head_set:
719
heads.append(head_candidate)
720
head_set.remove(head_candidate)
723
# Could be a carry-over situation:
724
parent_entry_revs = parent_entries.get(file_id, None)
725
if parent_entry_revs:
726
parent_entry = parent_entry_revs.get(heads[0], None)
729
if parent_entry is None:
730
# The parent iter_changes was called against is the one
731
# that is the per-file head, so any change is relevant
732
# iter_changes is valid.
733
carry_over_possible = False
735
# could be a carry over situation
736
# A change against the basis may just indicate a merge,
737
# we need to check the content against the source of the
738
# merge to determine if it was changed after the merge
740
if (parent_entry.kind != entry.kind or
741
parent_entry.parent_id != entry.parent_id or
742
parent_entry.name != entry.name):
743
# Metadata common to all entries has changed
744
# against per-file parent
745
carry_over_possible = False
747
carry_over_possible = True
748
# per-type checks for changes against the parent_entry
751
# Cannot be a carry-over situation
752
carry_over_possible = False
753
# Populate the entry in the delta
755
# XXX: There is still a small race here: If someone reverts the content of a file
756
# after iter_changes examines and decides it has changed,
757
# we will unconditionally record a new version even if some
758
# other process reverts it while commit is running (with
759
# the revert happening after iter_changes did it's
762
entry.executable = True
764
entry.executable = False
765
if (carry_over_possible and
766
parent_entry.executable == entry.executable):
767
# Check the file length, content hash after reading
769
nostore_sha = parent_entry.text_sha1
772
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
774
text = file_obj.read()
778
entry.text_sha1, entry.text_size = self._add_text_to_weave(
779
file_id, text, heads, nostore_sha)
780
yield file_id, change[1][1], (entry.text_sha1, stat_value)
781
except errors.ExistingContent:
782
# No content change against a carry_over parent
783
# Perhaps this should also yield a fs hash update?
785
entry.text_size = parent_entry.text_size
786
entry.text_sha1 = parent_entry.text_sha1
787
elif kind == 'symlink':
789
entry.symlink_target = tree.get_symlink_target(file_id)
790
if (carry_over_possible and
791
parent_entry.symlink_target == entry.symlink_target):
794
self._add_text_to_weave(change[0], '', heads, None)
795
elif kind == 'directory':
796
if carry_over_possible:
799
# Nothing to set on the entry.
800
# XXX: split into the Root and nonRoot versions.
801
if change[1][1] != '' or self.repository.supports_rich_root():
802
self._add_text_to_weave(change[0], '', heads, None)
803
elif kind == 'tree-reference':
804
if not self.repository._format.supports_tree_reference:
805
# This isn't quite sane as an error, but we shouldn't
806
# ever see this code path in practice: tree's don't
807
# permit references when the repo doesn't support tree
809
raise errors.UnsupportedOperation(tree.add_reference,
811
reference_revision = tree.get_reference_revision(change[0])
812
entry.reference_revision = reference_revision
813
if (carry_over_possible and
814
parent_entry.reference_revision == reference_revision):
817
self._add_text_to_weave(change[0], '', heads, None)
819
raise AssertionError('unknown kind %r' % kind)
821
entry.revision = modified_rev
823
entry.revision = parent_entry.revision
826
new_path = change[1][1]
827
inv_delta.append((change[1][0], new_path, change[0], entry))
830
self.new_inventory = None
832
# This should perhaps be guarded by a check that the basis we
833
# commit against is the basis for the commit and if not do a delta
835
self._any_changes = True
837
# housekeeping root entry changes do not affect no-change commits.
838
self._require_root_change(tree)
839
self.basis_delta_revision = basis_revision_id
841
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
842
parent_keys = tuple([(file_id, parent) for parent in parents])
843
return self.repository.texts._add_text(
844
(file_id, self._new_revision_id), parent_keys, new_text,
845
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
848
class RootCommitBuilder(CommitBuilder):
849
"""This commitbuilder actually records the root id"""
851
# the root entry gets versioned properly by this builder.
852
_versioned_root = True
854
def _check_root(self, ie, parent_invs, tree):
855
"""Helper for record_entry_contents.
857
:param ie: An entry being added.
858
:param parent_invs: The inventories of the parent revisions of the
860
:param tree: The tree that is being committed.
863
def _require_root_change(self, tree):
864
"""Enforce an appropriate root object change.
866
This is called once when record_iter_changes is called, if and only if
867
the root was not in the delta calculated by record_iter_changes.
869
:param tree: The tree which is being committed.
871
# versioned roots do not change unless the tree found a change.
225
raise NotImplementedError(self.record_iter_changes)
874
228
class RepositoryWriteLockResult(LogicalLockResult):
1029
317
return InterRepository._assert_same_model(self, repository)
1031
def add_inventory(self, revision_id, inv, parents):
1032
"""Add the inventory inv to the repository as revision_id.
1034
:param parents: The revision ids of the parents that revision_id
1035
is known to have and are in the repository already.
1037
:returns: The validator(which is a sha1 digest, though what is sha'd is
1038
repository format specific) of the serialized inventory.
1040
if not self.is_in_write_group():
1041
raise AssertionError("%r not in write group" % (self,))
1042
_mod_revision.check_not_reserved_id(revision_id)
1043
if not (inv.revision_id is None or inv.revision_id == revision_id):
1044
raise AssertionError(
1045
"Mismatch between inventory revision"
1046
" id and insertion revid (%r, %r)"
1047
% (inv.revision_id, revision_id))
1048
if inv.root is None:
1049
raise errors.RootMissing()
1050
return self._add_inventory_checked(revision_id, inv, parents)
1052
def _add_inventory_checked(self, revision_id, inv, parents):
1053
"""Add inv to the repository after checking the inputs.
1055
This function can be overridden to allow different inventory styles.
1057
:seealso: add_inventory, for the contract.
1059
inv_lines = self._serializer.write_inventory_to_lines(inv)
1060
return self._inventory_add_lines(revision_id, parents,
1061
inv_lines, check_content=False)
1063
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1064
parents, basis_inv=None, propagate_caches=False):
1065
"""Add a new inventory expressed as a delta against another revision.
1067
See the inventory developers documentation for the theory behind
1070
:param basis_revision_id: The inventory id the delta was created
1071
against. (This does not have to be a direct parent.)
1072
:param delta: The inventory delta (see Inventory.apply_delta for
1074
:param new_revision_id: The revision id that the inventory is being
1076
:param parents: The revision ids of the parents that revision_id is
1077
known to have and are in the repository already. These are supplied
1078
for repositories that depend on the inventory graph for revision
1079
graph access, as well as for those that pun ancestry with delta
1081
:param basis_inv: The basis inventory if it is already known,
1083
:param propagate_caches: If True, the caches for this inventory are
1084
copied to and updated for the result if possible.
1086
:returns: (validator, new_inv)
1087
The validator(which is a sha1 digest, though what is sha'd is
1088
repository format specific) of the serialized inventory, and the
1089
resulting inventory.
1091
if not self.is_in_write_group():
1092
raise AssertionError("%r not in write group" % (self,))
1093
_mod_revision.check_not_reserved_id(new_revision_id)
1094
basis_tree = self.revision_tree(basis_revision_id)
1095
basis_tree.lock_read()
1097
# Note that this mutates the inventory of basis_tree, which not all
1098
# inventory implementations may support: A better idiom would be to
1099
# return a new inventory, but as there is no revision tree cache in
1100
# repository this is safe for now - RBC 20081013
1101
if basis_inv is None:
1102
basis_inv = basis_tree.inventory
1103
basis_inv.apply_delta(delta)
1104
basis_inv.revision_id = new_revision_id
1105
return (self.add_inventory(new_revision_id, basis_inv, parents),
1110
def _inventory_add_lines(self, revision_id, parents, lines,
1111
check_content=True):
1112
"""Store lines in inv_vf and return the sha1 of the inventory."""
1113
parents = [(parent,) for parent in parents]
1114
result = self.inventories.add_lines((revision_id,), parents, lines,
1115
check_content=check_content)[0]
1116
self.inventories._access.flush()
1119
def add_revision(self, revision_id, rev, inv=None, config=None):
1120
"""Add rev to the revision store as revision_id.
1122
:param revision_id: the revision id to use.
1123
:param rev: The revision object.
1124
:param inv: The inventory for the revision. if None, it will be looked
1125
up in the inventory storer
1126
:param config: If None no digital signature will be created.
1127
If supplied its signature_needed method will be used
1128
to determine if a signature should be made.
1130
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1132
_mod_revision.check_not_reserved_id(revision_id)
1133
if config is not None and config.signature_needed():
1135
inv = self.get_inventory(revision_id)
1136
plaintext = Testament(rev, inv).as_short_text()
1137
self.store_revision_signature(
1138
gpg.GPGStrategy(config), plaintext, revision_id)
1139
# check inventory present
1140
if not self.inventories.get_parent_map([(revision_id,)]):
1142
raise errors.WeaveRevisionNotPresent(revision_id,
1145
# yes, this is not suitable for adding with ghosts.
1146
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1149
key = (revision_id,)
1150
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1151
self._add_revision(rev)
1153
def _add_revision(self, revision):
1154
text = self._serializer.write_revision_to_string(revision)
1155
key = (revision.revision_id,)
1156
parents = tuple((parent,) for parent in revision.parent_ids)
1157
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1159
319
def all_revision_ids(self):
1160
320
"""Returns a list of all the revision ids in the repository.
1185
345
self.control_files.break_lock()
1188
def _eliminate_revisions_not_present(self, revision_ids):
1189
"""Check every revision id in revision_ids to see if we have it.
1191
Returns a set of the present revisions.
1194
graph = self.get_graph()
1195
parent_map = graph.get_parent_map(revision_ids)
1196
# The old API returned a list, should this actually be a set?
1197
return parent_map.keys()
1199
def _check_inventories(self, checker):
1200
"""Check the inventories found from the revision scan.
1202
This is responsible for verifying the sha1 of inventories and
1203
creating a pending_keys set that covers data referenced by inventories.
1205
bar = ui.ui_factory.nested_progress_bar()
1207
self._do_check_inventories(checker, bar)
1211
def _do_check_inventories(self, checker, bar):
1212
"""Helper for _check_inventories."""
1214
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1215
kinds = ['chk_bytes', 'texts']
1216
count = len(checker.pending_keys)
1217
bar.update("inventories", 0, 2)
1218
current_keys = checker.pending_keys
1219
checker.pending_keys = {}
1220
# Accumulate current checks.
1221
for key in current_keys:
1222
if key[0] != 'inventories' and key[0] not in kinds:
1223
checker._report_items.append('unknown key type %r' % (key,))
1224
keys[key[0]].add(key[1:])
1225
if keys['inventories']:
1226
# NB: output order *should* be roughly sorted - topo or
1227
# inverse topo depending on repository - either way decent
1228
# to just delta against. However, pre-CHK formats didn't
1229
# try to optimise inventory layout on disk. As such the
1230
# pre-CHK code path does not use inventory deltas.
1232
for record in self.inventories.check(keys=keys['inventories']):
1233
if record.storage_kind == 'absent':
1234
checker._report_items.append(
1235
'Missing inventory {%s}' % (record.key,))
1237
last_object = self._check_record('inventories', record,
1238
checker, last_object,
1239
current_keys[('inventories',) + record.key])
1240
del keys['inventories']
1243
bar.update("texts", 1)
1244
while (checker.pending_keys or keys['chk_bytes']
1246
# Something to check.
1247
current_keys = checker.pending_keys
1248
checker.pending_keys = {}
1249
# Accumulate current checks.
1250
for key in current_keys:
1251
if key[0] not in kinds:
1252
checker._report_items.append('unknown key type %r' % (key,))
1253
keys[key[0]].add(key[1:])
1254
# Check the outermost kind only - inventories || chk_bytes || texts
1258
for record in getattr(self, kind).check(keys=keys[kind]):
1259
if record.storage_kind == 'absent':
1260
checker._report_items.append(
1261
'Missing %s {%s}' % (kind, record.key,))
1263
last_object = self._check_record(kind, record,
1264
checker, last_object, current_keys[(kind,) + record.key])
1268
def _check_record(self, kind, record, checker, last_object, item_data):
1269
"""Check a single text from this repository."""
1270
if kind == 'inventories':
1271
rev_id = record.key[0]
1272
inv = self._deserialise_inventory(rev_id,
1273
record.get_bytes_as('fulltext'))
1274
if last_object is not None:
1275
delta = inv._make_delta(last_object)
1276
for old_path, path, file_id, ie in delta:
1279
ie.check(checker, rev_id, inv)
1281
for path, ie in inv.iter_entries():
1282
ie.check(checker, rev_id, inv)
1283
if self._format.fast_deltas:
1285
elif kind == 'chk_bytes':
1286
# No code written to check chk_bytes for this repo format.
1287
checker._report_items.append(
1288
'unsupported key type chk_bytes for %s' % (record.key,))
1289
elif kind == 'texts':
1290
self._check_text(record, checker, item_data)
1292
checker._report_items.append(
1293
'unknown key type %s for %s' % (kind, record.key))
1295
def _check_text(self, record, checker, item_data):
1296
"""Check a single text."""
1297
# Check it is extractable.
1298
# TODO: check length.
1299
if record.storage_kind == 'chunked':
1300
chunks = record.get_bytes_as(record.storage_kind)
1301
sha1 = osutils.sha_strings(chunks)
1302
length = sum(map(len, chunks))
1304
content = record.get_bytes_as('fulltext')
1305
sha1 = osutils.sha_string(content)
1306
length = len(content)
1307
if item_data and sha1 != item_data[1]:
1308
checker._report_items.append(
1309
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1310
(record.key, sha1, item_data[1], item_data[2]))
1313
def create(a_bzrdir):
1314
"""Construct the current default format repository in a_bzrdir."""
1315
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
348
def create(controldir):
349
"""Construct the current default format repository in controldir."""
350
return RepositoryFormat.get_default_format().initialize(controldir)
1317
def __init__(self, _format, a_bzrdir, control_files):
352
def __init__(self, _format, controldir, control_files):
1318
353
"""instantiate a Repository.
1320
355
:param _format: The format of the repository on disk.
1321
:param a_bzrdir: The BzrDir of the repository.
356
:param controldir: The ControlDir of the repository.
357
:param control_files: Control files to use for locking, etc.
1323
359
# In the future we will have a single api for all stores for
1324
360
# getting file texts, inventories and revisions, then
1997
907
signature = gpg_strategy.sign(plaintext)
1998
908
self.add_signature_text(revision_id, signature)
2001
910
def add_signature_text(self, revision_id, signature):
2002
self.signatures.add_lines((revision_id,), (),
2003
osutils.split_lines(signature))
2005
def find_text_key_references(self):
2006
"""Find the text key references within the repository.
2008
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2009
to whether they were referred to by the inventory of the
2010
revision_id that they contain. The inventory texts from all present
2011
revision ids are assessed to generate this report.
2013
revision_keys = self.revisions.keys()
2014
w = self.inventories
2015
pb = ui.ui_factory.nested_progress_bar()
2017
return self._find_text_key_references_from_xml_inventory_lines(
2018
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
2022
def _find_text_key_references_from_xml_inventory_lines(self,
2024
"""Core routine for extracting references to texts from inventories.
2026
This performs the translation of xml lines to revision ids.
2028
:param line_iterator: An iterator of lines, origin_version_id
2029
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2030
to whether they were referred to by the inventory of the
2031
revision_id that they contain. Note that if that revision_id was
2032
not part of the line_iterator's output then False will be given -
2033
even though it may actually refer to that key.
2035
if not self._serializer.support_altered_by_hack:
2036
raise AssertionError(
2037
"_find_text_key_references_from_xml_inventory_lines only "
2038
"supported for branches which store inventory as unnested xml"
2039
", not on %r" % self)
2042
# this code needs to read every new line in every inventory for the
2043
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
2044
# not present in one of those inventories is unnecessary but not
2045
# harmful because we are filtering by the revision id marker in the
2046
# inventory lines : we only select file ids altered in one of those
2047
# revisions. We don't need to see all lines in the inventory because
2048
# only those added in an inventory in rev X can contain a revision=X
2050
unescape_revid_cache = {}
2051
unescape_fileid_cache = {}
2053
# jam 20061218 In a big fetch, this handles hundreds of thousands
2054
# of lines, so it has had a lot of inlining and optimizing done.
2055
# Sorry that it is a little bit messy.
2056
# Move several functions to be local variables, since this is a long
2058
search = self._file_ids_altered_regex.search
2059
unescape = _unescape_xml
2060
setdefault = result.setdefault
2061
for line, line_key in line_iterator:
2062
match = search(line)
2065
# One call to match.group() returning multiple items is quite a
2066
# bit faster than 2 calls to match.group() each returning 1
2067
file_id, revision_id = match.group('file_id', 'revision_id')
2069
# Inlining the cache lookups helps a lot when you make 170,000
2070
# lines and 350k ids, versus 8.4 unique ids.
2071
# Using a cache helps in 2 ways:
2072
# 1) Avoids unnecessary decoding calls
2073
# 2) Re-uses cached strings, which helps in future set and
2075
# (2) is enough that removing encoding entirely along with
2076
# the cache (so we are using plain strings) results in no
2077
# performance improvement.
2079
revision_id = unescape_revid_cache[revision_id]
2081
unescaped = unescape(revision_id)
2082
unescape_revid_cache[revision_id] = unescaped
2083
revision_id = unescaped
2085
# Note that unconditionally unescaping means that we deserialise
2086
# every fileid, which for general 'pull' is not great, but we don't
2087
# really want to have some many fulltexts that this matters anyway.
2090
file_id = unescape_fileid_cache[file_id]
2092
unescaped = unescape(file_id)
2093
unescape_fileid_cache[file_id] = unescaped
2096
key = (file_id, revision_id)
2097
setdefault(key, False)
2098
if revision_id == line_key[-1]:
2102
def _inventory_xml_lines_for_keys(self, keys):
2103
"""Get a line iterator of the sort needed for findind references.
2105
Not relevant for non-xml inventory repositories.
2107
Ghosts in revision_keys are ignored.
2109
:param revision_keys: The revision keys for the inventories to inspect.
2110
:return: An iterator over (inventory line, revid) for the fulltexts of
2111
all of the xml inventories specified by revision_keys.
2113
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2114
for record in stream:
2115
if record.storage_kind != 'absent':
2116
chunks = record.get_bytes_as('chunked')
2117
revid = record.key[-1]
2118
lines = osutils.chunks_to_lines(chunks)
2122
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2124
"""Helper routine for fileids_altered_by_revision_ids.
2126
This performs the translation of xml lines to revision ids.
2128
:param line_iterator: An iterator of lines, origin_version_id
2129
:param revision_keys: The revision ids to filter for. This should be a
2130
set or other type which supports efficient __contains__ lookups, as
2131
the revision key from each parsed line will be looked up in the
2132
revision_keys filter.
2133
:return: a dictionary mapping altered file-ids to an iterable of
2134
revision_ids. Each altered file-ids has the exact revision_ids that
2135
altered it listed explicitly.
2137
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2138
line_iterator).iterkeys())
2139
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2140
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2141
self._inventory_xml_lines_for_keys(parent_keys)))
2142
new_keys = seen - parent_seen
2144
setdefault = result.setdefault
2145
for key in new_keys:
2146
setdefault(key[0], set()).add(key[-1])
911
"""Store a signature text for a revision.
913
:param revision_id: Revision id of the revision
914
:param signature: Signature text.
916
raise NotImplementedError(self.add_signature_text)
2149
918
def _find_parent_ids_of_revisions(self, revision_ids):
2150
919
"""Find all parent ids that are mentioned in the revision graph.
2201
939
uniquely identify the file version in the caller's context. (Examples:
2202
940
an index number or a TreeTransform trans_id.)
2204
bytes_iterator is an iterable of bytestrings for the file. The
2205
kind of iterable and length of the bytestrings are unspecified, but for
2206
this implementation, it is a list of bytes produced by
2207
VersionedFile.get_record_stream().
2209
942
:param desired_files: a list of (file_id, revision_id, identifier)
2213
for file_id, revision_id, callable_data in desired_files:
2214
text_keys[(file_id, revision_id)] = callable_data
2215
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2216
if record.storage_kind == 'absent':
2217
raise errors.RevisionNotPresent(record.key, self)
2218
yield text_keys[record.key], record.get_bytes_as('chunked')
2220
def _generate_text_key_index(self, text_key_references=None,
2222
"""Generate a new text key index for the repository.
2224
This is an expensive function that will take considerable time to run.
2226
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2227
list of parents, also text keys. When a given key has no parents,
2228
the parents list will be [NULL_REVISION].
2230
# All revisions, to find inventory parents.
2231
if ancestors is None:
2232
graph = self.get_graph()
2233
ancestors = graph.get_parent_map(self.all_revision_ids())
2234
if text_key_references is None:
2235
text_key_references = self.find_text_key_references()
2236
pb = ui.ui_factory.nested_progress_bar()
2238
return self._do_generate_text_key_index(ancestors,
2239
text_key_references, pb)
2243
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2244
"""Helper for _generate_text_key_index to avoid deep nesting."""
2245
revision_order = tsort.topo_sort(ancestors)
2246
invalid_keys = set()
2248
for revision_id in revision_order:
2249
revision_keys[revision_id] = set()
2250
text_count = len(text_key_references)
2251
# a cache of the text keys to allow reuse; costs a dict of all the
2252
# keys, but saves a 2-tuple for every child of a given key.
2254
for text_key, valid in text_key_references.iteritems():
2256
invalid_keys.add(text_key)
2258
revision_keys[text_key[1]].add(text_key)
2259
text_key_cache[text_key] = text_key
2260
del text_key_references
2262
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2263
NULL_REVISION = _mod_revision.NULL_REVISION
2264
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2265
# too small for large or very branchy trees. However, for 55K path
2266
# trees, it would be easy to use too much memory trivially. Ideally we
2267
# could gauge this by looking at available real memory etc, but this is
2268
# always a tricky proposition.
2269
inventory_cache = lru_cache.LRUCache(10)
2270
batch_size = 10 # should be ~150MB on a 55K path tree
2271
batch_count = len(revision_order) / batch_size + 1
2273
pb.update("Calculating text parents", processed_texts, text_count)
2274
for offset in xrange(batch_count):
2275
to_query = revision_order[offset * batch_size:(offset + 1) *
2279
for revision_id in to_query:
2280
parent_ids = ancestors[revision_id]
2281
for text_key in revision_keys[revision_id]:
2282
pb.update("Calculating text parents", processed_texts)
2283
processed_texts += 1
2284
candidate_parents = []
2285
for parent_id in parent_ids:
2286
parent_text_key = (text_key[0], parent_id)
2288
check_parent = parent_text_key not in \
2289
revision_keys[parent_id]
2291
# the parent parent_id is a ghost:
2292
check_parent = False
2293
# truncate the derived graph against this ghost.
2294
parent_text_key = None
2296
# look at the parent commit details inventories to
2297
# determine possible candidates in the per file graph.
2300
inv = inventory_cache[parent_id]
2302
inv = self.revision_tree(parent_id).inventory
2303
inventory_cache[parent_id] = inv
2305
parent_entry = inv[text_key[0]]
2306
except (KeyError, errors.NoSuchId):
2308
if parent_entry is not None:
2310
text_key[0], parent_entry.revision)
2312
parent_text_key = None
2313
if parent_text_key is not None:
2314
candidate_parents.append(
2315
text_key_cache[parent_text_key])
2316
parent_heads = text_graph.heads(candidate_parents)
2317
new_parents = list(parent_heads)
2318
new_parents.sort(key=lambda x:candidate_parents.index(x))
2319
if new_parents == []:
2320
new_parents = [NULL_REVISION]
2321
text_index[text_key] = new_parents
2323
for text_key in invalid_keys:
2324
text_index[text_key] = [NULL_REVISION]
2327
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2328
"""Get an iterable listing the keys of all the data introduced by a set
2331
The keys will be ordered so that the corresponding items can be safely
2332
fetched and inserted in that order.
2334
:returns: An iterable producing tuples of (knit-kind, file-id,
2335
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2336
'revisions'. file-id is None unless knit-kind is 'file'.
2338
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2341
for result in self._find_non_file_keys_to_fetch(revision_ids):
2344
def _find_file_keys_to_fetch(self, revision_ids, pb):
2345
# XXX: it's a bit weird to control the inventory weave caching in this
2346
# generator. Ideally the caching would be done in fetch.py I think. Or
2347
# maybe this generator should explicitly have the contract that it
2348
# should not be iterated until the previously yielded item has been
2350
inv_w = self.inventories
2352
# file ids that changed
2353
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2355
num_file_ids = len(file_ids)
2356
for file_id, altered_versions in file_ids.iteritems():
2358
pb.update("Fetch texts", count, num_file_ids)
2360
yield ("file", file_id, altered_versions)
2362
def _find_non_file_keys_to_fetch(self, revision_ids):
2364
yield ("inventory", None, revision_ids)
2367
# XXX: Note ATM no callers actually pay attention to this return
2368
# instead they just use the list of revision ids and ignore
2369
# missing sigs. Consider removing this work entirely
2370
revisions_with_signatures = set(self.signatures.get_parent_map(
2371
[(r,) for r in revision_ids]))
2372
revisions_with_signatures = set(
2373
[r for (r,) in revisions_with_signatures])
2374
revisions_with_signatures.intersection_update(revision_ids)
2375
yield ("signatures", None, revisions_with_signatures)
2378
yield ("revisions", None, revision_ids)
2381
def get_inventory(self, revision_id):
2382
"""Get Inventory object by revision id."""
2383
return self.iter_inventories([revision_id]).next()
2385
def iter_inventories(self, revision_ids, ordering=None):
2386
"""Get many inventories by revision_ids.
2388
This will buffer some or all of the texts used in constructing the
2389
inventories in memory, but will only parse a single inventory at a
2392
:param revision_ids: The expected revision ids of the inventories.
2393
:param ordering: optional ordering, e.g. 'topological'. If not
2394
specified, the order of revision_ids will be preserved (by
2395
buffering if necessary).
2396
:return: An iterator of inventories.
2398
if ((None in revision_ids)
2399
or (_mod_revision.NULL_REVISION in revision_ids)):
2400
raise ValueError('cannot get null revision inventory')
2401
return self._iter_inventories(revision_ids, ordering)
2403
def _iter_inventories(self, revision_ids, ordering):
2404
"""single-document based inventory iteration."""
2405
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2406
for text, revision_id in inv_xmls:
2407
yield self._deserialise_inventory(revision_id, text)
2409
def _iter_inventory_xmls(self, revision_ids, ordering):
2410
if ordering is None:
2411
order_as_requested = True
2412
ordering = 'unordered'
2414
order_as_requested = False
2415
keys = [(revision_id,) for revision_id in revision_ids]
2418
if order_as_requested:
2419
key_iter = iter(keys)
2420
next_key = key_iter.next()
2421
stream = self.inventories.get_record_stream(keys, ordering, True)
2423
for record in stream:
2424
if record.storage_kind != 'absent':
2425
chunks = record.get_bytes_as('chunked')
2426
if order_as_requested:
2427
text_chunks[record.key] = chunks
2429
yield ''.join(chunks), record.key[-1]
2431
raise errors.NoSuchRevision(self, record.key)
2432
if order_as_requested:
2433
# Yield as many results as we can while preserving order.
2434
while next_key in text_chunks:
2435
chunks = text_chunks.pop(next_key)
2436
yield ''.join(chunks), next_key[-1]
2438
next_key = key_iter.next()
2439
except StopIteration:
2440
# We still want to fully consume the get_record_stream,
2441
# just in case it is not actually finished at this point
2445
def _deserialise_inventory(self, revision_id, xml):
2446
"""Transform the xml into an inventory object.
2448
:param revision_id: The expected revision id of the inventory.
2449
:param xml: A serialised inventory.
2451
result = self._serializer.read_inventory_from_string(xml, revision_id,
2452
entry_cache=self._inventory_entry_cache,
2453
return_from_cache=self._safe_to_return_from_cache)
2454
if result.revision_id != revision_id:
2455
raise AssertionError('revision id mismatch %s != %s' % (
2456
result.revision_id, revision_id))
2459
def get_serializer_format(self):
2460
return self._serializer.format_num
2463
def _get_inventory_xml(self, revision_id):
2464
"""Get serialized inventory as a string."""
2465
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2467
text, revision_id = texts.next()
2468
except StopIteration:
2469
raise errors.HistoryMissing(self, 'inventory', revision_id)
945
raise NotImplementedError(self.iter_files_bytes)
2472
947
def get_rev_id_for_revno(self, revno, known_pair):
2473
948
"""Return the revision id of a revno, given a later (revno, revid)
2817
1252
except UnicodeDecodeError:
2818
1253
raise errors.NonAsciiRevisionId(method, self)
2820
def revision_graph_can_have_wrong_parents(self):
2821
"""Is it possible for this repository to have a revision graph with
2824
If True, then this repository must also implement
2825
_find_inconsistent_revision_parents so that check and reconcile can
2826
check for inconsistencies before proceeding with other checks that may
2827
depend on the revision index being consistent.
2829
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2832
# remove these delegates a while after bzr 0.15
2833
def __make_delegated(name, from_module):
2834
def _deprecated_repository_forwarder():
2835
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2836
% (name, from_module),
2839
m = __import__(from_module, globals(), locals(), [name])
2841
return getattr(m, name)
2842
except AttributeError:
2843
raise AttributeError('module %s has no name %s'
2845
globals()[name] = _deprecated_repository_forwarder
2848
'AllInOneRepository',
2849
'WeaveMetaDirRepository',
2850
'PreSplitOutRepositoryFormat',
2851
'RepositoryFormat4',
2852
'RepositoryFormat5',
2853
'RepositoryFormat6',
2854
'RepositoryFormat7',
2856
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2860
'RepositoryFormatKnit',
2861
'RepositoryFormatKnit1',
2863
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2866
def install_revision(repository, rev, revision_tree):
2867
"""Install all revision data into a repository."""
2868
install_revisions(repository, [(rev, revision_tree, None)])
2871
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2872
"""Install all revision data into a repository.
2874
Accepts an iterable of revision, tree, signature tuples. The signature
2877
repository.start_write_group()
2879
inventory_cache = lru_cache.LRUCache(10)
2880
for n, (revision, revision_tree, signature) in enumerate(iterable):
2881
_install_revision(repository, revision, revision_tree, signature,
2884
pb.update('Transferring revisions', n + 1, num_revisions)
2886
repository.abort_write_group()
2889
repository.commit_write_group()
2892
def _install_revision(repository, rev, revision_tree, signature,
2894
"""Install all revision data into a repository."""
2895
present_parents = []
2897
for p_id in rev.parent_ids:
2898
if repository.has_revision(p_id):
2899
present_parents.append(p_id)
2900
parent_trees[p_id] = repository.revision_tree(p_id)
2902
parent_trees[p_id] = repository.revision_tree(
2903
_mod_revision.NULL_REVISION)
2905
inv = revision_tree.inventory
2906
entries = inv.iter_entries()
2907
# backwards compatibility hack: skip the root id.
2908
if not repository.supports_rich_root():
2909
path, root = entries.next()
2910
if root.revision != rev.revision_id:
2911
raise errors.IncompatibleRevision(repr(repository))
2913
for path, ie in entries:
2914
text_keys[(ie.file_id, ie.revision)] = ie
2915
text_parent_map = repository.texts.get_parent_map(text_keys)
2916
missing_texts = set(text_keys) - set(text_parent_map)
2917
# Add the texts that are not already present
2918
for text_key in missing_texts:
2919
ie = text_keys[text_key]
2921
# FIXME: TODO: The following loop overlaps/duplicates that done by
2922
# commit to determine parents. There is a latent/real bug here where
2923
# the parents inserted are not those commit would do - in particular
2924
# they are not filtered by heads(). RBC, AB
2925
for revision, tree in parent_trees.iteritems():
2926
if ie.file_id not in tree:
2928
parent_id = tree.inventory[ie.file_id].revision
2929
if parent_id in text_parents:
2931
text_parents.append((ie.file_id, parent_id))
2932
lines = revision_tree.get_file(ie.file_id).readlines()
2933
repository.texts.add_lines(text_key, text_parents, lines)
2935
# install the inventory
2936
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2937
# Cache this inventory
2938
inventory_cache[rev.revision_id] = inv
2940
basis_inv = inventory_cache[rev.parent_ids[0]]
2942
repository.add_inventory(rev.revision_id, inv, present_parents)
2944
delta = inv._make_delta(basis_inv)
2945
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2946
rev.revision_id, present_parents)
2948
repository.add_inventory(rev.revision_id, inv, present_parents)
2949
except errors.RevisionAlreadyPresent:
2951
if signature is not None:
2952
repository.add_signature_text(rev.revision_id, signature)
2953
repository.add_revision(rev.revision_id, rev, inv)
2956
1256
class MetaDirRepository(Repository):
2957
1257
"""Repositories in the new meta-dir layout.
3322
1587
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3323
1588
format_registry.register_lazy(
3324
1589
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3325
'bzrlib.repofmt.pack_repo',
1590
'bzrlib.repofmt.knitpack_repo',
3326
1591
'RepositoryFormatKnitPack1',
3328
1593
format_registry.register_lazy(
3329
1594
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3330
'bzrlib.repofmt.pack_repo',
1595
'bzrlib.repofmt.knitpack_repo',
3331
1596
'RepositoryFormatKnitPack3',
3333
1598
format_registry.register_lazy(
3334
1599
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3335
'bzrlib.repofmt.pack_repo',
1600
'bzrlib.repofmt.knitpack_repo',
3336
1601
'RepositoryFormatKnitPack4',
3338
1603
format_registry.register_lazy(
3339
1604
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3340
'bzrlib.repofmt.pack_repo',
1605
'bzrlib.repofmt.knitpack_repo',
3341
1606
'RepositoryFormatKnitPack5',
3343
1608
format_registry.register_lazy(
3344
1609
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3345
'bzrlib.repofmt.pack_repo',
1610
'bzrlib.repofmt.knitpack_repo',
3346
1611
'RepositoryFormatKnitPack5RichRoot',
3348
1613
format_registry.register_lazy(
3349
1614
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3350
'bzrlib.repofmt.pack_repo',
1615
'bzrlib.repofmt.knitpack_repo',
3351
1616
'RepositoryFormatKnitPack5RichRootBroken',
3353
1618
format_registry.register_lazy(
3354
1619
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3355
'bzrlib.repofmt.pack_repo',
1620
'bzrlib.repofmt.knitpack_repo',
3356
1621
'RepositoryFormatKnitPack6',
3358
1623
format_registry.register_lazy(
3359
1624
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3360
'bzrlib.repofmt.pack_repo',
1625
'bzrlib.repofmt.knitpack_repo',
3361
1626
'RepositoryFormatKnitPack6RichRoot',
1628
format_registry.register_lazy(
1629
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1630
'bzrlib.repofmt.groupcompress_repo',
1631
'RepositoryFormat2a',
3364
1634
# Development formats.
3365
# Obsolete but kept pending a CHK based subtree format.
1635
# Check their docstrings to see if/when they are obsolete.
3366
1636
format_registry.register_lazy(
3367
1637
("Bazaar development format 2 with subtree support "
3368
1638
"(needs bzr.dev from before 1.8)\n"),
3369
'bzrlib.repofmt.pack_repo',
1639
'bzrlib.repofmt.knitpack_repo',
3370
1640
'RepositoryFormatPackDevelopment2Subtree',
3373
# 1.14->1.16 go below here
3374
format_registry.register_lazy(
3375
'Bazaar development format - group compression and chk inventory'
3376
' (needs bzr.dev from 1.14)\n',
3377
'bzrlib.repofmt.groupcompress_repo',
3378
'RepositoryFormatCHK1',
3381
format_registry.register_lazy(
3382
'Bazaar development format - chk repository with bencode revision '
3383
'serialization (needs bzr.dev from 1.16)\n',
3384
'bzrlib.repofmt.groupcompress_repo',
3385
'RepositoryFormatCHK2',
3387
format_registry.register_lazy(
3388
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3389
'bzrlib.repofmt.groupcompress_repo',
3390
'RepositoryFormat2a',
3392
1642
format_registry.register_lazy(
3393
1643
'Bazaar development format 8\n',
3394
1644
'bzrlib.repofmt.groupcompress_repo',
3429
1678
self.target.fetch(self.source, revision_id=revision_id)
3431
1680
@needs_write_lock
3432
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
1681
def fetch(self, revision_id=None, find_ghosts=False):
3434
1682
"""Fetch the content required to construct revision_id.
3436
1684
The content is copied from self.source to self.target.
3438
1686
:param revision_id: if None all content is copied, if NULL_REVISION no
3439
1687
content is copied.
3443
ui.ui_factory.warn_experimental_format_fetch(self)
3444
from bzrlib.fetch import RepoFetcher
3445
# See <https://launchpad.net/bugs/456077> asking for a warning here
3446
if self.source._format.network_name() != self.target._format.network_name():
3447
ui.ui_factory.show_user_warning('cross_format_fetch',
3448
from_format=self.source._format,
3449
to_format=self.target._format)
3450
f = RepoFetcher(to_repository=self.target,
3451
from_repository=self.source,
3452
last_revision=revision_id,
3453
fetch_spec=fetch_spec,
3454
find_ghosts=find_ghosts)
3456
def _walk_to_common_revisions(self, revision_ids):
3457
"""Walk out from revision_ids in source to revisions target has.
3459
:param revision_ids: The start point for the search.
3460
:return: A set of revision ids.
3462
target_graph = self.target.get_graph()
3463
revision_ids = frozenset(revision_ids)
3464
missing_revs = set()
3465
source_graph = self.source.get_graph()
3466
# ensure we don't pay silly lookup costs.
3467
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3468
null_set = frozenset([_mod_revision.NULL_REVISION])
3469
searcher_exhausted = False
3473
# Iterate the searcher until we have enough next_revs
3474
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3476
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3477
next_revs.update(next_revs_part)
3478
ghosts.update(ghosts_part)
3479
except StopIteration:
3480
searcher_exhausted = True
3482
# If there are ghosts in the source graph, and the caller asked for
3483
# them, make sure that they are present in the target.
3484
# We don't care about other ghosts as we can't fetch them and
3485
# haven't been asked to.
3486
ghosts_to_check = set(revision_ids.intersection(ghosts))
3487
revs_to_get = set(next_revs).union(ghosts_to_check)
3489
have_revs = set(target_graph.get_parent_map(revs_to_get))
3490
# we always have NULL_REVISION present.
3491
have_revs = have_revs.union(null_set)
3492
# Check if the target is missing any ghosts we need.
3493
ghosts_to_check.difference_update(have_revs)
3495
# One of the caller's revision_ids is a ghost in both the
3496
# source and the target.
3497
raise errors.NoSuchRevision(
3498
self.source, ghosts_to_check.pop())
3499
missing_revs.update(next_revs - have_revs)
3500
# Because we may have walked past the original stop point, make
3501
# sure everything is stopped
3502
stop_revs = searcher.find_seen_ancestors(have_revs)
3503
searcher.stop_searching_any(stop_revs)
3504
if searcher_exhausted:
3506
return searcher.get_result()
1690
raise NotImplementedError(self.fetch)
3508
1692
@needs_read_lock
3509
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
1693
def search_missing_revision_ids(self,
1694
revision_id=symbol_versioning.DEPRECATED_PARAMETER,
1695
find_ghosts=True, revision_ids=None, if_present_ids=None,
3510
1697
"""Return the revision ids that source has that target does not.
3512
1699
:param revision_id: only return revision ids included by this
1701
:param revision_ids: return revision ids included by these
1702
revision_ids. NoSuchRevision will be raised if any of these
1703
revisions are not present.
1704
:param if_present_ids: like revision_ids, but will not cause
1705
NoSuchRevision if any of these are absent, instead they will simply
1706
not be in the result. This is useful for e.g. finding revisions
1707
to fetch for tags, which may reference absent revisions.
3514
1708
:param find_ghosts: If True find missing revisions in deep history
3515
1709
rather than just finding the surface difference.
1710
:param limit: Maximum number of revisions to return, topologically
3516
1712
:return: A bzrlib.graph.SearchResult.
3518
# stop searching at found target revisions.
3519
if not find_ghosts and revision_id is not None:
3520
return self._walk_to_common_revisions([revision_id])
3521
# generic, possibly worst case, slow code path.
3522
target_ids = set(self.target.all_revision_ids())
3523
if revision_id is not None:
3524
source_ids = self.source.get_ancestry(revision_id)
3525
if source_ids[0] is not None:
3526
raise AssertionError()
3529
source_ids = self.source.all_revision_ids()
3530
result_set = set(source_ids).difference(target_ids)
3531
return self.source.revision_ids_to_search_result(result_set)
1714
raise NotImplementedError(self.search_missing_revision_ids)
3534
1717
def _same_model(source, target):
3555
1738
"different serializers")
3558
class InterSameDataRepository(InterRepository):
3559
"""Code for converting between repositories that represent the same data.
3561
Data format and model must match for this to work.
3565
def _get_repo_format_to_test(self):
3566
"""Repository format for testing with.
3568
InterSameData can pull from subtree to subtree and from non-subtree to
3569
non-subtree, so we test this with the richest repository format.
3571
from bzrlib.repofmt import knitrepo
3572
return knitrepo.RepositoryFormatKnit3()
3575
def is_compatible(source, target):
3576
return InterRepository._same_model(source, target)
3579
class InterWeaveRepo(InterSameDataRepository):
3580
"""Optimised code paths between Weave based repositories.
3582
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3583
implemented lazy inter-object optimisation.
3587
def _get_repo_format_to_test(self):
3588
from bzrlib.repofmt import weaverepo
3589
return weaverepo.RepositoryFormat7()
3592
def is_compatible(source, target):
3593
"""Be compatible with known Weave formats.
3595
We don't test for the stores being of specific types because that
3596
could lead to confusing results, and there is no need to be
3599
from bzrlib.repofmt.weaverepo import (
3605
return (isinstance(source._format, (RepositoryFormat5,
3607
RepositoryFormat7)) and
3608
isinstance(target._format, (RepositoryFormat5,
3610
RepositoryFormat7)))
3611
except AttributeError:
3615
def copy_content(self, revision_id=None):
3616
"""See InterRepository.copy_content()."""
3617
# weave specific optimised path:
3619
self.target.set_make_working_trees(self.source.make_working_trees())
3620
except (errors.RepositoryUpgradeRequired, NotImplemented):
3622
# FIXME do not peek!
3623
if self.source._transport.listable():
3624
pb = ui.ui_factory.nested_progress_bar()
3626
self.target.texts.insert_record_stream(
3627
self.source.texts.get_record_stream(
3628
self.source.texts.keys(), 'topological', False))
3629
pb.update('Copying inventory', 0, 1)
3630
self.target.inventories.insert_record_stream(
3631
self.source.inventories.get_record_stream(
3632
self.source.inventories.keys(), 'topological', False))
3633
self.target.signatures.insert_record_stream(
3634
self.source.signatures.get_record_stream(
3635
self.source.signatures.keys(),
3637
self.target.revisions.insert_record_stream(
3638
self.source.revisions.get_record_stream(
3639
self.source.revisions.keys(),
3640
'topological', True))
3644
self.target.fetch(self.source, revision_id=revision_id)
3647
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3648
"""See InterRepository.missing_revision_ids()."""
3649
# we want all revisions to satisfy revision_id in source.
3650
# but we don't want to stat every file here and there.
3651
# we want then, all revisions other needs to satisfy revision_id
3652
# checked, but not those that we have locally.
3653
# so the first thing is to get a subset of the revisions to
3654
# satisfy revision_id in source, and then eliminate those that
3655
# we do already have.
3656
# this is slow on high latency connection to self, but as this
3657
# disk format scales terribly for push anyway due to rewriting
3658
# inventory.weave, this is considered acceptable.
3660
if revision_id is not None:
3661
source_ids = self.source.get_ancestry(revision_id)
3662
if source_ids[0] is not None:
3663
raise AssertionError()
3666
source_ids = self.source._all_possible_ids()
3667
source_ids_set = set(source_ids)
3668
# source_ids is the worst possible case we may need to pull.
3669
# now we want to filter source_ids against what we actually
3670
# have in target, but don't try to check for existence where we know
3671
# we do not have a revision as that would be pointless.
3672
target_ids = set(self.target._all_possible_ids())
3673
possibly_present_revisions = target_ids.intersection(source_ids_set)
3674
actually_present_revisions = set(
3675
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3676
required_revisions = source_ids_set.difference(actually_present_revisions)
3677
if revision_id is not None:
3678
# we used get_ancestry to determine source_ids then we are assured all
3679
# revisions referenced are present as they are installed in topological order.
3680
# and the tip revision was validated by get_ancestry.
3681
result_set = required_revisions
3683
# if we just grabbed the possibly available ids, then
3684
# we only have an estimate of whats available and need to validate
3685
# that against the revision records.
3687
self.source._eliminate_revisions_not_present(required_revisions))
3688
return self.source.revision_ids_to_search_result(result_set)
3691
class InterKnitRepo(InterSameDataRepository):
3692
"""Optimised code paths between Knit based repositories."""
3695
def _get_repo_format_to_test(self):
3696
from bzrlib.repofmt import knitrepo
3697
return knitrepo.RepositoryFormatKnit1()
3700
def is_compatible(source, target):
3701
"""Be compatible with known Knit formats.
3703
We don't test for the stores being of specific types because that
3704
could lead to confusing results, and there is no need to be
3707
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3709
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3710
isinstance(target._format, RepositoryFormatKnit))
3711
except AttributeError:
3713
return are_knits and InterRepository._same_model(source, target)
3716
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3717
"""See InterRepository.missing_revision_ids()."""
3718
if revision_id is not None:
3719
source_ids = self.source.get_ancestry(revision_id)
3720
if source_ids[0] is not None:
3721
raise AssertionError()
3724
source_ids = self.source.all_revision_ids()
3725
source_ids_set = set(source_ids)
3726
# source_ids is the worst possible case we may need to pull.
3727
# now we want to filter source_ids against what we actually
3728
# have in target, but don't try to check for existence where we know
3729
# we do not have a revision as that would be pointless.
3730
target_ids = set(self.target.all_revision_ids())
3731
possibly_present_revisions = target_ids.intersection(source_ids_set)
3732
actually_present_revisions = set(
3733
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3734
required_revisions = source_ids_set.difference(actually_present_revisions)
3735
if revision_id is not None:
3736
# we used get_ancestry to determine source_ids then we are assured all
3737
# revisions referenced are present as they are installed in topological order.
3738
# and the tip revision was validated by get_ancestry.
3739
result_set = required_revisions
3741
# if we just grabbed the possibly available ids, then
3742
# we only have an estimate of whats available and need to validate
3743
# that against the revision records.
3745
self.source._eliminate_revisions_not_present(required_revisions))
3746
return self.source.revision_ids_to_search_result(result_set)
3749
class InterDifferingSerializer(InterRepository):
3752
def _get_repo_format_to_test(self):
3756
def is_compatible(source, target):
3757
"""Be compatible with Knit2 source and Knit3 target"""
3758
# This is redundant with format.check_conversion_target(), however that
3759
# raises an exception, and we just want to say "False" as in we won't
3760
# support converting between these formats.
3761
if 'IDS_never' in debug.debug_flags:
3763
if source.supports_rich_root() and not target.supports_rich_root():
3765
if (source._format.supports_tree_reference
3766
and not target._format.supports_tree_reference):
3768
if target._fallback_repositories and target._format.supports_chks:
3769
# IDS doesn't know how to copy CHKs for the parent inventories it
3770
# adds to stacked repos.
3772
if 'IDS_always' in debug.debug_flags:
3774
# Only use this code path for local source and target. IDS does far
3775
# too much IO (both bandwidth and roundtrips) over a network.
3776
if not source.bzrdir.transport.base.startswith('file:///'):
3778
if not target.bzrdir.transport.base.startswith('file:///'):
3782
def _get_trees(self, revision_ids, cache):
3784
for rev_id in revision_ids:
3786
possible_trees.append((rev_id, cache[rev_id]))
3788
# Not cached, but inventory might be present anyway.
3790
tree = self.source.revision_tree(rev_id)
3791
except errors.NoSuchRevision:
3792
# Nope, parent is ghost.
3795
cache[rev_id] = tree
3796
possible_trees.append((rev_id, tree))
3797
return possible_trees
3799
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3800
"""Get the best delta and base for this revision.
3802
:return: (basis_id, delta)
3805
# Generate deltas against each tree, to find the shortest.
3806
texts_possibly_new_in_tree = set()
3807
for basis_id, basis_tree in possible_trees:
3808
delta = tree.inventory._make_delta(basis_tree.inventory)
3809
for old_path, new_path, file_id, new_entry in delta:
3810
if new_path is None:
3811
# This file_id isn't present in the new rev, so we don't
3815
# Rich roots are handled elsewhere...
3817
kind = new_entry.kind
3818
if kind != 'directory' and kind != 'file':
3819
# No text record associated with this inventory entry.
3821
# This is a directory or file that has changed somehow.
3822
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3823
deltas.append((len(delta), basis_id, delta))
3825
return deltas[0][1:]
3827
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3828
"""Find all parent revisions that are absent, but for which the
3829
inventory is present, and copy those inventories.
3831
This is necessary to preserve correctness when the source is stacked
3832
without fallbacks configured. (Note that in cases like upgrade the
3833
source may be not have _fallback_repositories even though it is
3837
for parents in parent_map.values():
3838
parent_revs.update(parents)
3839
present_parents = self.source.get_parent_map(parent_revs)
3840
absent_parents = set(parent_revs).difference(present_parents)
3841
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3842
(rev_id,) for rev_id in absent_parents)
3843
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3844
for parent_tree in self.source.revision_trees(parent_inv_ids):
3845
current_revision_id = parent_tree.get_revision_id()
3846
parents_parents_keys = parent_invs_keys_for_stacking[
3847
(current_revision_id,)]
3848
parents_parents = [key[-1] for key in parents_parents_keys]
3849
basis_id = _mod_revision.NULL_REVISION
3850
basis_tree = self.source.revision_tree(basis_id)
3851
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3852
self.target.add_inventory_by_delta(
3853
basis_id, delta, current_revision_id, parents_parents)
3854
cache[current_revision_id] = parent_tree
3856
def _fetch_batch(self, revision_ids, basis_id, cache):
3857
"""Fetch across a few revisions.
3859
:param revision_ids: The revisions to copy
3860
:param basis_id: The revision_id of a tree that must be in cache, used
3861
as a basis for delta when no other base is available
3862
:param cache: A cache of RevisionTrees that we can use.
3863
:return: The revision_id of the last converted tree. The RevisionTree
3864
for it will be in cache
3866
# Walk though all revisions; get inventory deltas, copy referenced
3867
# texts that delta references, insert the delta, revision and
3869
root_keys_to_create = set()
3872
pending_revisions = []
3873
parent_map = self.source.get_parent_map(revision_ids)
3874
self._fetch_parent_invs_for_stacking(parent_map, cache)
3875
self.source._safe_to_return_from_cache = True
3876
for tree in self.source.revision_trees(revision_ids):
3877
# Find a inventory delta for this revision.
3878
# Find text entries that need to be copied, too.
3879
current_revision_id = tree.get_revision_id()
3880
parent_ids = parent_map.get(current_revision_id, ())
3881
parent_trees = self._get_trees(parent_ids, cache)
3882
possible_trees = list(parent_trees)
3883
if len(possible_trees) == 0:
3884
# There either aren't any parents, or the parents are ghosts,
3885
# so just use the last converted tree.
3886
possible_trees.append((basis_id, cache[basis_id]))
3887
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3889
revision = self.source.get_revision(current_revision_id)
3890
pending_deltas.append((basis_id, delta,
3891
current_revision_id, revision.parent_ids))
3892
if self._converting_to_rich_root:
3893
self._revision_id_to_root_id[current_revision_id] = \
3895
# Determine which texts are in present in this revision but not in
3896
# any of the available parents.
3897
texts_possibly_new_in_tree = set()
3898
for old_path, new_path, file_id, entry in delta:
3899
if new_path is None:
3900
# This file_id isn't present in the new rev
3904
if not self.target.supports_rich_root():
3905
# The target doesn't support rich root, so we don't
3908
if self._converting_to_rich_root:
3909
# This can't be copied normally, we have to insert
3911
root_keys_to_create.add((file_id, entry.revision))
3914
texts_possibly_new_in_tree.add((file_id, entry.revision))
3915
for basis_id, basis_tree in possible_trees:
3916
basis_inv = basis_tree.inventory
3917
for file_key in list(texts_possibly_new_in_tree):
3918
file_id, file_revision = file_key
3920
entry = basis_inv[file_id]
3921
except errors.NoSuchId:
3923
if entry.revision == file_revision:
3924
texts_possibly_new_in_tree.remove(file_key)
3925
text_keys.update(texts_possibly_new_in_tree)
3926
pending_revisions.append(revision)
3927
cache[current_revision_id] = tree
3928
basis_id = current_revision_id
3929
self.source._safe_to_return_from_cache = False
3931
from_texts = self.source.texts
3932
to_texts = self.target.texts
3933
if root_keys_to_create:
3934
root_stream = _mod_fetch._new_root_data_stream(
3935
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3937
to_texts.insert_record_stream(root_stream)
3938
to_texts.insert_record_stream(from_texts.get_record_stream(
3939
text_keys, self.target._format._fetch_order,
3940
not self.target._format._fetch_uses_deltas))
3941
# insert inventory deltas
3942
for delta in pending_deltas:
3943
self.target.add_inventory_by_delta(*delta)
3944
if self.target._fallback_repositories:
3945
# Make sure this stacked repository has all the parent inventories
3946
# for the new revisions that we are about to insert. We do this
3947
# before adding the revisions so that no revision is added until
3948
# all the inventories it may depend on are added.
3949
# Note that this is overzealous, as we may have fetched these in an
3952
revision_ids = set()
3953
for revision in pending_revisions:
3954
revision_ids.add(revision.revision_id)
3955
parent_ids.update(revision.parent_ids)
3956
parent_ids.difference_update(revision_ids)
3957
parent_ids.discard(_mod_revision.NULL_REVISION)
3958
parent_map = self.source.get_parent_map(parent_ids)
3959
# we iterate over parent_map and not parent_ids because we don't
3960
# want to try copying any revision which is a ghost
3961
for parent_tree in self.source.revision_trees(parent_map):
3962
current_revision_id = parent_tree.get_revision_id()
3963
parents_parents = parent_map[current_revision_id]
3964
possible_trees = self._get_trees(parents_parents, cache)
3965
if len(possible_trees) == 0:
3966
# There either aren't any parents, or the parents are
3967
# ghosts, so just use the last converted tree.
3968
possible_trees.append((basis_id, cache[basis_id]))
3969
basis_id, delta = self._get_delta_for_revision(parent_tree,
3970
parents_parents, possible_trees)
3971
self.target.add_inventory_by_delta(
3972
basis_id, delta, current_revision_id, parents_parents)
3973
# insert signatures and revisions
3974
for revision in pending_revisions:
3976
signature = self.source.get_signature_text(
3977
revision.revision_id)
3978
self.target.add_signature_text(revision.revision_id,
3980
except errors.NoSuchRevision:
3982
self.target.add_revision(revision.revision_id, revision)
3985
def _fetch_all_revisions(self, revision_ids, pb):
3986
"""Fetch everything for the list of revisions.
3988
:param revision_ids: The list of revisions to fetch. Must be in
3990
:param pb: A ProgressTask
3993
basis_id, basis_tree = self._get_basis(revision_ids[0])
3995
cache = lru_cache.LRUCache(100)
3996
cache[basis_id] = basis_tree
3997
del basis_tree # We don't want to hang on to it here
4001
for offset in range(0, len(revision_ids), batch_size):
4002
self.target.start_write_group()
4004
pb.update('Transferring revisions', offset,
4006
batch = revision_ids[offset:offset+batch_size]
4007
basis_id = self._fetch_batch(batch, basis_id, cache)
4009
self.source._safe_to_return_from_cache = False
4010
self.target.abort_write_group()
4013
hint = self.target.commit_write_group()
4016
if hints and self.target._format.pack_compresses:
4017
self.target.pack(hint=hints)
4018
pb.update('Transferring revisions', len(revision_ids),
4022
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
4024
"""See InterRepository.fetch()."""
4025
if fetch_spec is not None:
4026
raise AssertionError("Not implemented yet...")
4027
ui.ui_factory.warn_experimental_format_fetch(self)
4028
if (not self.source.supports_rich_root()
4029
and self.target.supports_rich_root()):
4030
self._converting_to_rich_root = True
4031
self._revision_id_to_root_id = {}
4033
self._converting_to_rich_root = False
4034
# See <https://launchpad.net/bugs/456077> asking for a warning here
4035
if self.source._format.network_name() != self.target._format.network_name():
4036
ui.ui_factory.show_user_warning('cross_format_fetch',
4037
from_format=self.source._format,
4038
to_format=self.target._format)
4039
revision_ids = self.target.search_missing_revision_ids(self.source,
4040
revision_id, find_ghosts=find_ghosts).get_keys()
4041
if not revision_ids:
4043
revision_ids = tsort.topo_sort(
4044
self.source.get_graph().get_parent_map(revision_ids))
4045
if not revision_ids:
4047
# Walk though all revisions; get inventory deltas, copy referenced
4048
# texts that delta references, insert the delta, revision and
4051
my_pb = ui.ui_factory.nested_progress_bar()
4054
symbol_versioning.warn(
4055
symbol_versioning.deprecated_in((1, 14, 0))
4056
% "pb parameter to fetch()")
4059
self._fetch_all_revisions(revision_ids, pb)
4061
if my_pb is not None:
4063
return len(revision_ids), 0
4065
def _get_basis(self, first_revision_id):
4066
"""Get a revision and tree which exists in the target.
4068
This assumes that first_revision_id is selected for transmission
4069
because all other ancestors are already present. If we can't find an
4070
ancestor we fall back to NULL_REVISION since we know that is safe.
4072
:return: (basis_id, basis_tree)
4074
first_rev = self.source.get_revision(first_revision_id)
4076
basis_id = first_rev.parent_ids[0]
4077
# only valid as a basis if the target has it
4078
self.target.get_revision(basis_id)
4079
# Try to get a basis tree - if its a ghost it will hit the
4080
# NoSuchRevision case.
4081
basis_tree = self.source.revision_tree(basis_id)
4082
except (IndexError, errors.NoSuchRevision):
4083
basis_id = _mod_revision.NULL_REVISION
4084
basis_tree = self.source.revision_tree(basis_id)
4085
return basis_id, basis_tree
4088
InterRepository.register_optimiser(InterDifferingSerializer)
4089
InterRepository.register_optimiser(InterSameDataRepository)
4090
InterRepository.register_optimiser(InterWeaveRepo)
4091
InterRepository.register_optimiser(InterKnitRepo)
4094
1741
class CopyConverter(object):
4095
1742
"""A repository conversion tool which just performs a copy of the content.
4117
1764
# trigger an assertion if not such
4118
1765
repo._format.get_format_string()
4119
1766
self.repo_dir = repo.bzrdir
4120
pb.update('Moving repository to repository.backup')
1767
pb.update(gettext('Moving repository to repository.backup'))
4121
1768
self.repo_dir.transport.move('repository', 'repository.backup')
4122
1769
backup_transport = self.repo_dir.transport.clone('repository.backup')
4123
1770
repo._format.check_conversion_target(self.target_format)
4124
1771
self.source_repo = repo._format.open(self.repo_dir,
4126
1773
_override_transport=backup_transport)
4127
pb.update('Creating new repository')
1774
pb.update(gettext('Creating new repository'))
4128
1775
converted = self.target_format.initialize(self.repo_dir,
4129
1776
self.source_repo.is_shared())
4130
1777
converted.lock_write()
4132
pb.update('Copying content')
1779
pb.update(gettext('Copying content'))
4133
1780
self.source_repo.copy_content_into(converted)
4135
1782
converted.unlock()
4136
pb.update('Deleting old repository content')
1783
pb.update(gettext('Deleting old repository content'))
4137
1784
self.repo_dir.transport.delete_tree('repository.backup')
4138
ui.ui_factory.note('repository converted')
1785
ui.ui_factory.note(gettext('repository converted'))
4151
def _unescaper(match, _map=_unescape_map):
4152
code = match.group(1)
4156
if not code.startswith('#'):
4158
return unichr(int(code[1:])).encode('utf8')
4164
def _unescape_xml(data):
4165
"""Unescape predefined XML entities in a string of data."""
4167
if _unescape_re is None:
4168
_unescape_re = re.compile('\&([^;]*);')
4169
return _unescape_re.sub(_unescaper, data)
4172
class _VersionedFileChecker(object):
4174
def __init__(self, repository, text_key_references=None, ancestors=None):
4175
self.repository = repository
4176
self.text_index = self.repository._generate_text_key_index(
4177
text_key_references=text_key_references, ancestors=ancestors)
4179
def calculate_file_version_parents(self, text_key):
4180
"""Calculate the correct parents for a file version according to
4183
parent_keys = self.text_index[text_key]
4184
if parent_keys == [_mod_revision.NULL_REVISION]:
4186
return tuple(parent_keys)
4188
def check_file_version_parents(self, texts, progress_bar=None):
4189
"""Check the parents stored in a versioned file are correct.
4191
It also detects file versions that are not referenced by their
4192
corresponding revision's inventory.
4194
:returns: A tuple of (wrong_parents, dangling_file_versions).
4195
wrong_parents is a dict mapping {revision_id: (stored_parents,
4196
correct_parents)} for each revision_id where the stored parents
4197
are not correct. dangling_file_versions is a set of (file_id,
4198
revision_id) tuples for versions that are present in this versioned
4199
file, but not used by the corresponding inventory.
4201
local_progress = None
4202
if progress_bar is None:
4203
local_progress = ui.ui_factory.nested_progress_bar()
4204
progress_bar = local_progress
4206
return self._check_file_version_parents(texts, progress_bar)
4209
local_progress.finished()
4211
def _check_file_version_parents(self, texts, progress_bar):
4212
"""See check_file_version_parents."""
4214
self.file_ids = set([file_id for file_id, _ in
4215
self.text_index.iterkeys()])
4216
# text keys is now grouped by file_id
4217
n_versions = len(self.text_index)
4218
progress_bar.update('loading text store', 0, n_versions)
4219
parent_map = self.repository.texts.get_parent_map(self.text_index)
4220
# On unlistable transports this could well be empty/error...
4221
text_keys = self.repository.texts.keys()
4222
unused_keys = frozenset(text_keys) - set(self.text_index)
4223
for num, key in enumerate(self.text_index.iterkeys()):
4224
progress_bar.update('checking text graph', num, n_versions)
4225
correct_parents = self.calculate_file_version_parents(key)
4227
knit_parents = parent_map[key]
4228
except errors.RevisionNotPresent:
4231
if correct_parents != knit_parents:
4232
wrong_parents[key] = (knit_parents, correct_parents)
4233
return wrong_parents, unused_keys
4236
def _old_get_graph(repository, revision_id):
4237
"""DO NOT USE. That is all. I'm serious."""
4238
graph = repository.get_graph()
4239
revision_graph = dict(((key, value) for key, value in
4240
graph.iter_ancestry([revision_id]) if value is not None))
4241
return _strip_NULL_ghosts(revision_graph)
4244
1789
def _strip_NULL_ghosts(revision_graph):
4245
1790
"""Also don't use this. more compatibility code for unmigrated clients."""
4246
1791
# Filter ghosts, and null:
4252
1797
return revision_graph
4255
class StreamSink(object):
4256
"""An object that can insert a stream into a repository.
4258
This interface handles the complexity of reserialising inventories and
4259
revisions from different formats, and allows unidirectional insertion into
4260
stacked repositories without looking for the missing basis parents
4264
def __init__(self, target_repo):
4265
self.target_repo = target_repo
4267
def insert_stream(self, stream, src_format, resume_tokens):
4268
"""Insert a stream's content into the target repository.
4270
:param src_format: a bzr repository format.
4272
:return: a list of resume tokens and an iterable of keys additional
4273
items required before the insertion can be completed.
4275
self.target_repo.lock_write()
4278
self.target_repo.resume_write_group(resume_tokens)
4281
self.target_repo.start_write_group()
4284
# locked_insert_stream performs a commit|suspend.
4285
return self._locked_insert_stream(stream, src_format,
4288
self.target_repo.abort_write_group(suppress_errors=True)
4291
self.target_repo.unlock()
4293
def _locked_insert_stream(self, stream, src_format, is_resume):
4294
to_serializer = self.target_repo._format._serializer
4295
src_serializer = src_format._serializer
4297
if to_serializer == src_serializer:
4298
# If serializers match and the target is a pack repository, set the
4299
# write cache size on the new pack. This avoids poor performance
4300
# on transports where append is unbuffered (such as
4301
# RemoteTransport). This is safe to do because nothing should read
4302
# back from the target repository while a stream with matching
4303
# serialization is being inserted.
4304
# The exception is that a delta record from the source that should
4305
# be a fulltext may need to be expanded by the target (see
4306
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4307
# explicitly flush any buffered writes first in that rare case.
4309
new_pack = self.target_repo._pack_collection._new_pack
4310
except AttributeError:
4311
# Not a pack repository
4314
new_pack.set_write_cache_size(1024*1024)
4315
for substream_type, substream in stream:
4316
if 'stream' in debug.debug_flags:
4317
mutter('inserting substream: %s', substream_type)
4318
if substream_type == 'texts':
4319
self.target_repo.texts.insert_record_stream(substream)
4320
elif substream_type == 'inventories':
4321
if src_serializer == to_serializer:
4322
self.target_repo.inventories.insert_record_stream(
4325
self._extract_and_insert_inventories(
4326
substream, src_serializer)
4327
elif substream_type == 'inventory-deltas':
4328
self._extract_and_insert_inventory_deltas(
4329
substream, src_serializer)
4330
elif substream_type == 'chk_bytes':
4331
# XXX: This doesn't support conversions, as it assumes the
4332
# conversion was done in the fetch code.
4333
self.target_repo.chk_bytes.insert_record_stream(substream)
4334
elif substream_type == 'revisions':
4335
# This may fallback to extract-and-insert more often than
4336
# required if the serializers are different only in terms of
4338
if src_serializer == to_serializer:
4339
self.target_repo.revisions.insert_record_stream(substream)
4341
self._extract_and_insert_revisions(substream,
4343
elif substream_type == 'signatures':
4344
self.target_repo.signatures.insert_record_stream(substream)
4346
raise AssertionError('kaboom! %s' % (substream_type,))
4347
# Done inserting data, and the missing_keys calculations will try to
4348
# read back from the inserted data, so flush the writes to the new pack
4349
# (if this is pack format).
4350
if new_pack is not None:
4351
new_pack._write_data('', flush=True)
4352
# Find all the new revisions (including ones from resume_tokens)
4353
missing_keys = self.target_repo.get_missing_parent_inventories(
4354
check_for_missing_texts=is_resume)
4356
for prefix, versioned_file in (
4357
('texts', self.target_repo.texts),
4358
('inventories', self.target_repo.inventories),
4359
('revisions', self.target_repo.revisions),
4360
('signatures', self.target_repo.signatures),
4361
('chk_bytes', self.target_repo.chk_bytes),
4363
if versioned_file is None:
4365
# TODO: key is often going to be a StaticTuple object
4366
# I don't believe we can define a method by which
4367
# (prefix,) + StaticTuple will work, though we could
4368
# define a StaticTuple.sq_concat that would allow you to
4369
# pass in either a tuple or a StaticTuple as the second
4370
# object, so instead we could have:
4371
# StaticTuple(prefix) + key here...
4372
missing_keys.update((prefix,) + key for key in
4373
versioned_file.get_missing_compression_parent_keys())
4374
except NotImplementedError:
4375
# cannot even attempt suspending, and missing would have failed
4376
# during stream insertion.
4377
missing_keys = set()
4380
# suspend the write group and tell the caller what we is
4381
# missing. We know we can suspend or else we would not have
4382
# entered this code path. (All repositories that can handle
4383
# missing keys can handle suspending a write group).
4384
write_group_tokens = self.target_repo.suspend_write_group()
4385
return write_group_tokens, missing_keys
4386
hint = self.target_repo.commit_write_group()
4387
if (to_serializer != src_serializer and
4388
self.target_repo._format.pack_compresses):
4389
self.target_repo.pack(hint=hint)
4392
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4393
target_rich_root = self.target_repo._format.rich_root_data
4394
target_tree_refs = self.target_repo._format.supports_tree_reference
4395
for record in substream:
4396
# Insert the delta directly
4397
inventory_delta_bytes = record.get_bytes_as('fulltext')
4398
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4400
parse_result = deserialiser.parse_text_bytes(
4401
inventory_delta_bytes)
4402
except inventory_delta.IncompatibleInventoryDelta, err:
4403
trace.mutter("Incompatible delta: %s", err.msg)
4404
raise errors.IncompatibleRevision(self.target_repo._format)
4405
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4406
revision_id = new_id
4407
parents = [key[0] for key in record.parents]
4408
self.target_repo.add_inventory_by_delta(
4409
basis_id, inv_delta, revision_id, parents)
4411
def _extract_and_insert_inventories(self, substream, serializer,
4413
"""Generate a new inventory versionedfile in target, converting data.
4415
The inventory is retrieved from the source, (deserializing it), and
4416
stored in the target (reserializing it in a different format).
4418
target_rich_root = self.target_repo._format.rich_root_data
4419
target_tree_refs = self.target_repo._format.supports_tree_reference
4420
for record in substream:
4421
# It's not a delta, so it must be a fulltext in the source
4422
# serializer's format.
4423
bytes = record.get_bytes_as('fulltext')
4424
revision_id = record.key[0]
4425
inv = serializer.read_inventory_from_string(bytes, revision_id)
4426
parents = [key[0] for key in record.parents]
4427
self.target_repo.add_inventory(revision_id, inv, parents)
4428
# No need to keep holding this full inv in memory when the rest of
4429
# the substream is likely to be all deltas.
4432
def _extract_and_insert_revisions(self, substream, serializer):
4433
for record in substream:
4434
bytes = record.get_bytes_as('fulltext')
4435
revision_id = record.key[0]
4436
rev = serializer.read_revision_from_string(bytes)
4437
if rev.revision_id != revision_id:
4438
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4439
self.target_repo.add_revision(revision_id, rev)
4442
if self.target_repo._format._fetch_reconcile:
4443
self.target_repo.reconcile()
4446
class StreamSource(object):
4447
"""A source of a stream for fetching between repositories."""
4449
def __init__(self, from_repository, to_format):
4450
"""Create a StreamSource streaming from from_repository."""
4451
self.from_repository = from_repository
4452
self.to_format = to_format
4453
self._record_counter = RecordCounter()
4455
def delta_on_metadata(self):
4456
"""Return True if delta's are permitted on metadata streams.
4458
That is on revisions and signatures.
4460
src_serializer = self.from_repository._format._serializer
4461
target_serializer = self.to_format._serializer
4462
return (self.to_format._fetch_uses_deltas and
4463
src_serializer == target_serializer)
4465
def _fetch_revision_texts(self, revs):
4466
# fetch signatures first and then the revision texts
4467
# may need to be a InterRevisionStore call here.
4468
from_sf = self.from_repository.signatures
4469
# A missing signature is just skipped.
4470
keys = [(rev_id,) for rev_id in revs]
4471
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4473
self.to_format._fetch_order,
4474
not self.to_format._fetch_uses_deltas))
4475
# If a revision has a delta, this is actually expanded inside the
4476
# insert_record_stream code now, which is an alternate fix for
4478
from_rf = self.from_repository.revisions
4479
revisions = from_rf.get_record_stream(
4481
self.to_format._fetch_order,
4482
not self.delta_on_metadata())
4483
return [('signatures', signatures), ('revisions', revisions)]
4485
def _generate_root_texts(self, revs):
4486
"""This will be called by get_stream between fetching weave texts and
4487
fetching the inventory weave.
4489
if self._rich_root_upgrade():
4490
return _mod_fetch.Inter1and2Helper(
4491
self.from_repository).generate_root_texts(revs)
4495
def get_stream(self, search):
4497
revs = search.get_keys()
4498
graph = self.from_repository.get_graph()
4499
revs = tsort.topo_sort(graph.get_parent_map(revs))
4500
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4502
for knit_kind, file_id, revisions in data_to_fetch:
4503
if knit_kind != phase:
4505
# Make a new progress bar for this phase
4506
if knit_kind == "file":
4507
# Accumulate file texts
4508
text_keys.extend([(file_id, revision) for revision in
4510
elif knit_kind == "inventory":
4511
# Now copy the file texts.
4512
from_texts = self.from_repository.texts
4513
yield ('texts', from_texts.get_record_stream(
4514
text_keys, self.to_format._fetch_order,
4515
not self.to_format._fetch_uses_deltas))
4516
# Cause an error if a text occurs after we have done the
4519
# Before we process the inventory we generate the root
4520
# texts (if necessary) so that the inventories references
4522
for _ in self._generate_root_texts(revs):
4524
# we fetch only the referenced inventories because we do not
4525
# know for unselected inventories whether all their required
4526
# texts are present in the other repository - it could be
4528
for info in self._get_inventory_stream(revs):
4530
elif knit_kind == "signatures":
4531
# Nothing to do here; this will be taken care of when
4532
# _fetch_revision_texts happens.
4534
elif knit_kind == "revisions":
4535
for record in self._fetch_revision_texts(revs):
4538
raise AssertionError("Unknown knit kind %r" % knit_kind)
4540
def get_stream_for_missing_keys(self, missing_keys):
4541
# missing keys can only occur when we are byte copying and not
4542
# translating (because translation means we don't send
4543
# unreconstructable deltas ever).
4545
keys['texts'] = set()
4546
keys['revisions'] = set()
4547
keys['inventories'] = set()
4548
keys['chk_bytes'] = set()
4549
keys['signatures'] = set()
4550
for key in missing_keys:
4551
keys[key[0]].add(key[1:])
4552
if len(keys['revisions']):
4553
# If we allowed copying revisions at this point, we could end up
4554
# copying a revision without copying its required texts: a
4555
# violation of the requirements for repository integrity.
4556
raise AssertionError(
4557
'cannot copy revisions to fill in missing deltas %s' % (
4558
keys['revisions'],))
4559
for substream_kind, keys in keys.iteritems():
4560
vf = getattr(self.from_repository, substream_kind)
4561
if vf is None and keys:
4562
raise AssertionError(
4563
"cannot fill in keys for a versioned file we don't"
4564
" have: %s needs %s" % (substream_kind, keys))
4566
# No need to stream something we don't have
4568
if substream_kind == 'inventories':
4569
# Some missing keys are genuinely ghosts, filter those out.
4570
present = self.from_repository.inventories.get_parent_map(keys)
4571
revs = [key[0] for key in present]
4572
# Get the inventory stream more-or-less as we do for the
4573
# original stream; there's no reason to assume that records
4574
# direct from the source will be suitable for the sink. (Think
4575
# e.g. 2a -> 1.9-rich-root).
4576
for info in self._get_inventory_stream(revs, missing=True):
4580
# Ask for full texts always so that we don't need more round trips
4581
# after this stream.
4582
# Some of the missing keys are genuinely ghosts, so filter absent
4583
# records. The Sink is responsible for doing another check to
4584
# ensure that ghosts don't introduce missing data for future
4586
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4587
self.to_format._fetch_order, True))
4588
yield substream_kind, stream
4590
def inventory_fetch_order(self):
4591
if self._rich_root_upgrade():
4592
return 'topological'
4594
return self.to_format._fetch_order
4596
def _rich_root_upgrade(self):
4597
return (not self.from_repository._format.rich_root_data and
4598
self.to_format.rich_root_data)
4600
def _get_inventory_stream(self, revision_ids, missing=False):
4601
from_format = self.from_repository._format
4602
if (from_format.supports_chks and self.to_format.supports_chks and
4603
from_format.network_name() == self.to_format.network_name()):
4604
raise AssertionError(
4605
"this case should be handled by GroupCHKStreamSource")
4606
elif 'forceinvdeltas' in debug.debug_flags:
4607
return self._get_convertable_inventory_stream(revision_ids,
4608
delta_versus_null=missing)
4609
elif from_format.network_name() == self.to_format.network_name():
4611
return self._get_simple_inventory_stream(revision_ids,
4613
elif (not from_format.supports_chks and not self.to_format.supports_chks
4614
and from_format._serializer == self.to_format._serializer):
4615
# Essentially the same format.
4616
return self._get_simple_inventory_stream(revision_ids,
4619
# Any time we switch serializations, we want to use an
4620
# inventory-delta based approach.
4621
return self._get_convertable_inventory_stream(revision_ids,
4622
delta_versus_null=missing)
4624
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4625
# NB: This currently reopens the inventory weave in source;
4626
# using a single stream interface instead would avoid this.
4627
from_weave = self.from_repository.inventories
4629
delta_closure = True
4631
delta_closure = not self.delta_on_metadata()
4632
yield ('inventories', from_weave.get_record_stream(
4633
[(rev_id,) for rev_id in revision_ids],
4634
self.inventory_fetch_order(), delta_closure))
4636
def _get_convertable_inventory_stream(self, revision_ids,
4637
delta_versus_null=False):
4638
# The two formats are sufficiently different that there is no fast
4639
# path, so we need to send just inventorydeltas, which any
4640
# sufficiently modern client can insert into any repository.
4641
# The StreamSink code expects to be able to
4642
# convert on the target, so we need to put bytes-on-the-wire that can
4643
# be converted. That means inventory deltas (if the remote is <1.19,
4644
# RemoteStreamSink will fallback to VFS to insert the deltas).
4645
yield ('inventory-deltas',
4646
self._stream_invs_as_deltas(revision_ids,
4647
delta_versus_null=delta_versus_null))
4649
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4650
"""Return a stream of inventory-deltas for the given rev ids.
4652
:param revision_ids: The list of inventories to transmit
4653
:param delta_versus_null: Don't try to find a minimal delta for this
4654
entry, instead compute the delta versus the NULL_REVISION. This
4655
effectively streams a complete inventory. Used for stuff like
4656
filling in missing parents, etc.
4658
from_repo = self.from_repository
4659
revision_keys = [(rev_id,) for rev_id in revision_ids]
4660
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4661
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4663
inventories = self.from_repository.iter_inventories(
4664
revision_ids, 'topological')
4665
format = from_repo._format
4666
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4667
inventory_cache = lru_cache.LRUCache(50)
4668
null_inventory = from_repo.revision_tree(
4669
_mod_revision.NULL_REVISION).inventory
4670
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4671
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4672
# repo back into a non-rich-root repo ought to be allowed)
4673
serializer = inventory_delta.InventoryDeltaSerializer(
4674
versioned_root=format.rich_root_data,
4675
tree_references=format.supports_tree_reference)
4676
for inv in inventories:
4677
key = (inv.revision_id,)
4678
parent_keys = parent_map.get(key, ())
4680
if not delta_versus_null and parent_keys:
4681
# The caller did not ask for complete inventories and we have
4682
# some parents that we can delta against. Make a delta against
4683
# each parent so that we can find the smallest.
4684
parent_ids = [parent_key[0] for parent_key in parent_keys]
4685
for parent_id in parent_ids:
4686
if parent_id not in invs_sent_so_far:
4687
# We don't know that the remote side has this basis, so
4690
if parent_id == _mod_revision.NULL_REVISION:
4691
parent_inv = null_inventory
4693
parent_inv = inventory_cache.get(parent_id, None)
4694
if parent_inv is None:
4695
parent_inv = from_repo.get_inventory(parent_id)
4696
candidate_delta = inv._make_delta(parent_inv)
4697
if (delta is None or
4698
len(delta) > len(candidate_delta)):
4699
delta = candidate_delta
4700
basis_id = parent_id
4702
# Either none of the parents ended up being suitable, or we
4703
# were asked to delta against NULL
4704
basis_id = _mod_revision.NULL_REVISION
4705
delta = inv._make_delta(null_inventory)
4706
invs_sent_so_far.add(inv.revision_id)
4707
inventory_cache[inv.revision_id] = inv
4708
delta_serialized = ''.join(
4709
serializer.delta_to_lines(basis_id, key[-1], delta))
4710
yield versionedfile.FulltextContentFactory(
4711
key, parent_keys, None, delta_serialized)
4714
1800
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4715
1801
stop_revision=None):
4716
1802
"""Extend the partial history to include a given index