208
334
commit to be valid, deletes against the basis MUST be recorded via
209
335
builder.record_delete().
211
raise NotImplementedError(self.will_record_deletes)
213
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
337
self._recording_deletes = True
339
basis_id = self.parents[0]
341
basis_id = _mod_revision.NULL_REVISION
342
self.basis_delta_revision = basis_id
344
def record_entry_contents(self, ie, parent_invs, path, tree,
346
"""Record the content of ie from tree into the commit if needed.
348
Side effect: sets ie.revision when unchanged
350
:param ie: An inventory entry present in the commit.
351
:param parent_invs: The inventories of the parent revisions of the
353
:param path: The path the entry is at in the tree.
354
:param tree: The tree which contains this entry and should be used to
356
:param content_summary: Summary data from the tree about the paths
357
content - stat, length, exec, sha/link target. This is only
358
accessed when the entry has a revision of None - that is when it is
359
a candidate to commit.
360
:return: A tuple (change_delta, version_recorded, fs_hash).
361
change_delta is an inventory_delta change for this entry against
362
the basis tree of the commit, or None if no change occured against
364
version_recorded is True if a new version of the entry has been
365
recorded. For instance, committing a merge where a file was only
366
changed on the other side will return (delta, False).
367
fs_hash is either None, or the hash details for the path (currently
368
a tuple of the contents sha1 and the statvalue returned by
369
tree.get_file_with_stat()).
371
if self.new_inventory.root is None:
372
if ie.parent_id is not None:
373
raise errors.RootMissing()
374
self._check_root(ie, parent_invs, tree)
375
if ie.revision is None:
376
kind = content_summary[0]
378
# ie is carried over from a prior commit
380
# XXX: repository specific check for nested tree support goes here - if
381
# the repo doesn't want nested trees we skip it ?
382
if (kind == 'tree-reference' and
383
not self.repository._format.supports_tree_reference):
384
# mismatch between commit builder logic and repository:
385
# this needs the entry creation pushed down into the builder.
386
raise NotImplementedError('Missing repository subtree support.')
387
self.new_inventory.add(ie)
389
# TODO: slow, take it out of the inner loop.
391
basis_inv = parent_invs[0]
393
basis_inv = Inventory(root_id=None)
395
# ie.revision is always None if the InventoryEntry is considered
396
# for committing. We may record the previous parents revision if the
397
# content is actually unchanged against a sole head.
398
if ie.revision is not None:
399
if not self._versioned_root and path == '':
400
# repositories that do not version the root set the root's
401
# revision to the new commit even when no change occurs (more
402
# specifically, they do not record a revision on the root; and
403
# the rev id is assigned to the root during deserialisation -
404
# this masks when a change may have occurred against the basis.
405
# To match this we always issue a delta, because the revision
406
# of the root will always be changing.
407
if ie.file_id in basis_inv:
408
delta = (basis_inv.id2path(ie.file_id), path,
412
delta = (None, path, ie.file_id, ie)
413
self._basis_delta.append(delta)
414
return delta, False, None
416
# we don't need to commit this, because the caller already
417
# determined that an existing revision of this file is
418
# appropriate. If its not being considered for committing then
419
# it and all its parents to the root must be unaltered so
420
# no-change against the basis.
421
if ie.revision == self._new_revision_id:
422
raise AssertionError("Impossible situation, a skipped "
423
"inventory entry (%r) claims to be modified in this "
424
"commit (%r).", (ie, self._new_revision_id))
425
return None, False, None
426
# XXX: Friction: parent_candidates should return a list not a dict
427
# so that we don't have to walk the inventories again.
428
parent_candiate_entries = ie.parent_candidates(parent_invs)
429
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
431
for inv in parent_invs:
432
if ie.file_id in inv:
433
old_rev = inv[ie.file_id].revision
434
if old_rev in head_set:
435
heads.append(inv[ie.file_id].revision)
436
head_set.remove(inv[ie.file_id].revision)
439
# now we check to see if we need to write a new record to the
441
# We write a new entry unless there is one head to the ancestors, and
442
# the kind-derived content is unchanged.
444
# Cheapest check first: no ancestors, or more the one head in the
445
# ancestors, we write a new node.
449
# There is a single head, look it up for comparison
450
parent_entry = parent_candiate_entries[heads[0]]
451
# if the non-content specific data has changed, we'll be writing a
453
if (parent_entry.parent_id != ie.parent_id or
454
parent_entry.name != ie.name):
456
# now we need to do content specific checks:
458
# if the kind changed the content obviously has
459
if kind != parent_entry.kind:
461
# Stat cache fingerprint feedback for the caller - None as we usually
462
# don't generate one.
465
if content_summary[2] is None:
466
raise ValueError("Files must not have executable = None")
468
# We can't trust a check of the file length because of content
470
if (# if the exec bit has changed we have to store:
471
parent_entry.executable != content_summary[2]):
473
elif parent_entry.text_sha1 == content_summary[3]:
474
# all meta and content is unchanged (using a hash cache
475
# hit to check the sha)
476
ie.revision = parent_entry.revision
477
ie.text_size = parent_entry.text_size
478
ie.text_sha1 = parent_entry.text_sha1
479
ie.executable = parent_entry.executable
480
return self._get_delta(ie, basis_inv, path), False, None
482
# Either there is only a hash change(no hash cache entry,
483
# or same size content change), or there is no change on
485
# Provide the parent's hash to the store layer, so that the
486
# content is unchanged we will not store a new node.
487
nostore_sha = parent_entry.text_sha1
489
# We want to record a new node regardless of the presence or
490
# absence of a content change in the file.
492
ie.executable = content_summary[2]
493
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
495
text = file_obj.read()
499
ie.text_sha1, ie.text_size = self._add_text_to_weave(
500
ie.file_id, text, heads, nostore_sha)
501
# Let the caller know we generated a stat fingerprint.
502
fingerprint = (ie.text_sha1, stat_value)
503
except errors.ExistingContent:
504
# Turns out that the file content was unchanged, and we were
505
# only going to store a new node if it was changed. Carry over
507
ie.revision = parent_entry.revision
508
ie.text_size = parent_entry.text_size
509
ie.text_sha1 = parent_entry.text_sha1
510
ie.executable = parent_entry.executable
511
return self._get_delta(ie, basis_inv, path), False, None
512
elif kind == 'directory':
514
# all data is meta here, nothing specific to directory, so
516
ie.revision = parent_entry.revision
517
return self._get_delta(ie, basis_inv, path), False, None
518
self._add_text_to_weave(ie.file_id, '', heads, None)
519
elif kind == 'symlink':
520
current_link_target = content_summary[3]
522
# symlink target is not generic metadata, check if it has
524
if current_link_target != parent_entry.symlink_target:
527
# unchanged, carry over.
528
ie.revision = parent_entry.revision
529
ie.symlink_target = parent_entry.symlink_target
530
return self._get_delta(ie, basis_inv, path), False, None
531
ie.symlink_target = current_link_target
532
self._add_text_to_weave(ie.file_id, '', heads, None)
533
elif kind == 'tree-reference':
535
if content_summary[3] != parent_entry.reference_revision:
538
# unchanged, carry over.
539
ie.reference_revision = parent_entry.reference_revision
540
ie.revision = parent_entry.revision
541
return self._get_delta(ie, basis_inv, path), False, None
542
ie.reference_revision = content_summary[3]
543
if ie.reference_revision is None:
544
raise AssertionError("invalid content_summary for nested tree: %r"
545
% (content_summary,))
546
self._add_text_to_weave(ie.file_id, '', heads, None)
548
raise NotImplementedError('unknown kind')
549
ie.revision = self._new_revision_id
550
self._any_changes = True
551
return self._get_delta(ie, basis_inv, path), True, fingerprint
553
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
554
_entry_factory=entry_factory):
214
555
"""Record a new tree via iter_changes.
216
557
:param tree: The tree to obtain text contents from for changed objects.
221
562
to basis_revision_id. The iterator must not include any items with
222
563
a current kind of None - missing items must be either filtered out
223
564
or errored-on beefore record_iter_changes sees the item.
565
:param _entry_factory: Private method to bind entry_factory locally for
224
567
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
225
568
tree._observed_sha1.
227
raise NotImplementedError(self.record_iter_changes)
230
class RepositoryWriteLockResult(LogicalLockResult):
231
"""The result of write locking a repository.
233
:ivar repository_token: The token obtained from the underlying lock, or
235
:ivar unlock: A callable which will unlock the lock.
238
def __init__(self, unlock, repository_token):
239
LogicalLockResult.__init__(self, unlock)
240
self.repository_token = repository_token
243
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
570
# Create an inventory delta based on deltas between all the parents and
571
# deltas between all the parent inventories. We use inventory delta's
572
# between the inventory objects because iter_changes masks
573
# last-changed-field only changes.
575
# file_id -> change map, change is fileid, paths, changed, versioneds,
576
# parents, names, kinds, executables
578
# {file_id -> revision_id -> inventory entry, for entries in parent
579
# trees that are not parents[0]
583
revtrees = list(self.repository.revision_trees(self.parents))
584
except errors.NoSuchRevision:
585
# one or more ghosts, slow path.
587
for revision_id in self.parents:
589
revtrees.append(self.repository.revision_tree(revision_id))
590
except errors.NoSuchRevision:
592
basis_revision_id = _mod_revision.NULL_REVISION
594
revtrees.append(self.repository.revision_tree(
595
_mod_revision.NULL_REVISION))
596
# The basis inventory from a repository
598
basis_inv = revtrees[0].inventory
600
basis_inv = self.repository.revision_tree(
601
_mod_revision.NULL_REVISION).inventory
602
if len(self.parents) > 0:
603
if basis_revision_id != self.parents[0] and not ghost_basis:
605
"arbitrary basis parents not yet supported with merges")
606
for revtree in revtrees[1:]:
607
for change in revtree.inventory._make_delta(basis_inv):
608
if change[1] is None:
609
# Not present in this parent.
611
if change[2] not in merged_ids:
612
if change[0] is not None:
613
basis_entry = basis_inv[change[2]]
614
merged_ids[change[2]] = [
616
basis_entry.revision,
619
parent_entries[change[2]] = {
621
basis_entry.revision:basis_entry,
623
change[3].revision:change[3],
626
merged_ids[change[2]] = [change[3].revision]
627
parent_entries[change[2]] = {change[3].revision:change[3]}
629
merged_ids[change[2]].append(change[3].revision)
630
parent_entries[change[2]][change[3].revision] = change[3]
633
# Setup the changes from the tree:
634
# changes maps file_id -> (change, [parent revision_ids])
636
for change in iter_changes:
637
# This probably looks up in basis_inv way to much.
638
if change[1][0] is not None:
639
head_candidate = [basis_inv[change[0]].revision]
642
changes[change[0]] = change, merged_ids.get(change[0],
644
unchanged_merged = set(merged_ids) - set(changes)
645
# Extend the changes dict with synthetic changes to record merges of
647
for file_id in unchanged_merged:
648
# Record a merged version of these items that did not change vs the
649
# basis. This can be either identical parallel changes, or a revert
650
# of a specific file after a merge. The recorded content will be
651
# that of the current tree (which is the same as the basis), but
652
# the per-file graph will reflect a merge.
653
# NB:XXX: We are reconstructing path information we had, this
654
# should be preserved instead.
655
# inv delta change: (file_id, (path_in_source, path_in_target),
656
# changed_content, versioned, parent, name, kind,
659
basis_entry = basis_inv[file_id]
660
except errors.NoSuchId:
661
# a change from basis->some_parents but file_id isn't in basis
662
# so was new in the merge, which means it must have changed
663
# from basis -> current, and as it hasn't the add was reverted
664
# by the user. So we discard this change.
668
(basis_inv.id2path(file_id), tree.id2path(file_id)),
670
(basis_entry.parent_id, basis_entry.parent_id),
671
(basis_entry.name, basis_entry.name),
672
(basis_entry.kind, basis_entry.kind),
673
(basis_entry.executable, basis_entry.executable))
674
changes[file_id] = (change, merged_ids[file_id])
675
# changes contains tuples with the change and a set of inventory
676
# candidates for the file.
678
# old_path, new_path, file_id, new_inventory_entry
679
seen_root = False # Is the root in the basis delta?
680
inv_delta = self._basis_delta
681
modified_rev = self._new_revision_id
682
for change, head_candidates in changes.values():
683
if change[3][1]: # versioned in target.
684
# Several things may be happening here:
685
# We may have a fork in the per-file graph
686
# - record a change with the content from tree
687
# We may have a change against < all trees
688
# - carry over the tree that hasn't changed
689
# We may have a change against all trees
690
# - record the change with the content from tree
693
entry = _entry_factory[kind](file_id, change[5][1],
695
head_set = self._heads(change[0], set(head_candidates))
698
for head_candidate in head_candidates:
699
if head_candidate in head_set:
700
heads.append(head_candidate)
701
head_set.remove(head_candidate)
704
# Could be a carry-over situation:
705
parent_entry_revs = parent_entries.get(file_id, None)
706
if parent_entry_revs:
707
parent_entry = parent_entry_revs.get(heads[0], None)
710
if parent_entry is None:
711
# The parent iter_changes was called against is the one
712
# that is the per-file head, so any change is relevant
713
# iter_changes is valid.
714
carry_over_possible = False
716
# could be a carry over situation
717
# A change against the basis may just indicate a merge,
718
# we need to check the content against the source of the
719
# merge to determine if it was changed after the merge
721
if (parent_entry.kind != entry.kind or
722
parent_entry.parent_id != entry.parent_id or
723
parent_entry.name != entry.name):
724
# Metadata common to all entries has changed
725
# against per-file parent
726
carry_over_possible = False
728
carry_over_possible = True
729
# per-type checks for changes against the parent_entry
732
# Cannot be a carry-over situation
733
carry_over_possible = False
734
# Populate the entry in the delta
736
# XXX: There is still a small race here: If someone reverts the content of a file
737
# after iter_changes examines and decides it has changed,
738
# we will unconditionally record a new version even if some
739
# other process reverts it while commit is running (with
740
# the revert happening after iter_changes did it's
743
entry.executable = True
745
entry.executable = False
746
if (carry_over_possible and
747
parent_entry.executable == entry.executable):
748
# Check the file length, content hash after reading
750
nostore_sha = parent_entry.text_sha1
753
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
755
text = file_obj.read()
759
entry.text_sha1, entry.text_size = self._add_text_to_weave(
760
file_id, text, heads, nostore_sha)
761
yield file_id, change[1][1], (entry.text_sha1, stat_value)
762
except errors.ExistingContent:
763
# No content change against a carry_over parent
764
# Perhaps this should also yield a fs hash update?
766
entry.text_size = parent_entry.text_size
767
entry.text_sha1 = parent_entry.text_sha1
768
elif kind == 'symlink':
770
entry.symlink_target = tree.get_symlink_target(file_id)
771
if (carry_over_possible and
772
parent_entry.symlink_target == entry.symlink_target):
775
self._add_text_to_weave(change[0], '', heads, None)
776
elif kind == 'directory':
777
if carry_over_possible:
780
# Nothing to set on the entry.
781
# XXX: split into the Root and nonRoot versions.
782
if change[1][1] != '' or self.repository.supports_rich_root():
783
self._add_text_to_weave(change[0], '', heads, None)
784
elif kind == 'tree-reference':
785
if not self.repository._format.supports_tree_reference:
786
# This isn't quite sane as an error, but we shouldn't
787
# ever see this code path in practice: tree's don't
788
# permit references when the repo doesn't support tree
790
raise errors.UnsupportedOperation(tree.add_reference,
792
reference_revision = tree.get_reference_revision(change[0])
793
entry.reference_revision = reference_revision
794
if (carry_over_possible and
795
parent_entry.reference_revision == reference_revision):
798
self._add_text_to_weave(change[0], '', heads, None)
800
raise AssertionError('unknown kind %r' % kind)
802
entry.revision = modified_rev
804
entry.revision = parent_entry.revision
807
new_path = change[1][1]
808
inv_delta.append((change[1][0], new_path, change[0], entry))
811
self.new_inventory = None
813
# This should perhaps be guarded by a check that the basis we
814
# commit against is the basis for the commit and if not do a delta
816
self._any_changes = True
818
# housekeeping root entry changes do not affect no-change commits.
819
self._require_root_change(tree)
820
self.basis_delta_revision = basis_revision_id
822
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
823
parent_keys = tuple([(file_id, parent) for parent in parents])
824
return self.repository.texts._add_text(
825
(file_id, self._new_revision_id), parent_keys, new_text,
826
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
829
class RootCommitBuilder(CommitBuilder):
830
"""This commitbuilder actually records the root id"""
832
# the root entry gets versioned properly by this builder.
833
_versioned_root = True
835
def _check_root(self, ie, parent_invs, tree):
836
"""Helper for record_entry_contents.
838
:param ie: An entry being added.
839
:param parent_invs: The inventories of the parent revisions of the
841
:param tree: The tree that is being committed.
844
def _require_root_change(self, tree):
845
"""Enforce an appropriate root object change.
847
This is called once when record_iter_changes is called, if and only if
848
the root was not in the delta calculated by record_iter_changes.
850
:param tree: The tree which is being committed.
852
# versioned roots do not change unless the tree found a change.
247
855
######################################################################
251
class Repository(_RelockDebugMixin, controldir.ControlComponent):
859
class Repository(object):
252
860
"""Repository holding history for one or more branches.
254
862
The repository holds and retrieves historical information including
255
863
revisions and file history. It's normally accessed only by the Branch,
256
864
which views a particular line of development through that history.
258
See VersionedFileRepository in bzrlib.vf_repository for the
259
base class for most Bazaar repositories.
866
The Repository builds on top of some byte storage facilies (the revisions,
867
signatures, inventories, texts and chk_bytes attributes) and a Transport,
868
which respectively provide byte storage and a means to access the (possibly
871
The byte storage facilities are addressed via tuples, which we refer to
872
as 'keys' throughout the code base. Revision_keys, inventory_keys and
873
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
874
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
875
byte string made up of a hash identifier and a hash value.
876
We use this interface because it allows low friction with the underlying
877
code that implements disk indices, network encoding and other parts of
880
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
881
the serialised revisions for the repository. This can be used to obtain
882
revision graph information or to access raw serialised revisions.
883
The result of trying to insert data into the repository via this store
884
is undefined: it should be considered read-only except for implementors
886
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
887
the serialised signatures for the repository. This can be used to
888
obtain access to raw serialised signatures. The result of trying to
889
insert data into the repository via this store is undefined: it should
890
be considered read-only except for implementors of repositories.
891
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
892
the serialised inventories for the repository. This can be used to
893
obtain unserialised inventories. The result of trying to insert data
894
into the repository via this store is undefined: it should be
895
considered read-only except for implementors of repositories.
896
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
897
texts of files and directories for the repository. This can be used to
898
obtain file texts or file graphs. Note that Repository.iter_file_bytes
899
is usually a better interface for accessing file texts.
900
The result of trying to insert data into the repository via this store
901
is undefined: it should be considered read-only except for implementors
903
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
904
any data the repository chooses to store or have indexed by its hash.
905
The result of trying to insert data into the repository via this store
906
is undefined: it should be considered read-only except for implementors
908
:ivar _transport: Transport for file access to repository, typically
909
pointing to .bzr/repository.
912
# What class to use for a CommitBuilder. Often its simpler to change this
913
# in a Repository class subclass rather than to override
914
# get_commit_builder.
915
_commit_builder_class = CommitBuilder
916
# The search regex used by xml based repositories to determine what things
917
# where changed in a single commit.
918
_file_ids_altered_regex = lazy_regex.lazy_compile(
919
r'file_id="(?P<file_id>[^"]+)"'
920
r'.* revision="(?P<revision_id>[^"]+)"'
262
923
def abort_write_group(self, suppress_errors=False):
263
924
"""Commit the contents accrued within the current write group.
319
993
return InterRepository._assert_same_model(self, repository)
995
def add_inventory(self, revision_id, inv, parents):
996
"""Add the inventory inv to the repository as revision_id.
998
:param parents: The revision ids of the parents that revision_id
999
is known to have and are in the repository already.
1001
:returns: The validator(which is a sha1 digest, though what is sha'd is
1002
repository format specific) of the serialized inventory.
1004
if not self.is_in_write_group():
1005
raise AssertionError("%r not in write group" % (self,))
1006
_mod_revision.check_not_reserved_id(revision_id)
1007
if not (inv.revision_id is None or inv.revision_id == revision_id):
1008
raise AssertionError(
1009
"Mismatch between inventory revision"
1010
" id and insertion revid (%r, %r)"
1011
% (inv.revision_id, revision_id))
1012
if inv.root is None:
1013
raise AssertionError()
1014
return self._add_inventory_checked(revision_id, inv, parents)
1016
def _add_inventory_checked(self, revision_id, inv, parents):
1017
"""Add inv to the repository after checking the inputs.
1019
This function can be overridden to allow different inventory styles.
1021
:seealso: add_inventory, for the contract.
1023
inv_lines = self._serialise_inventory_to_lines(inv)
1024
return self._inventory_add_lines(revision_id, parents,
1025
inv_lines, check_content=False)
1027
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1028
parents, basis_inv=None, propagate_caches=False):
1029
"""Add a new inventory expressed as a delta against another revision.
1031
See the inventory developers documentation for the theory behind
1034
:param basis_revision_id: The inventory id the delta was created
1035
against. (This does not have to be a direct parent.)
1036
:param delta: The inventory delta (see Inventory.apply_delta for
1038
:param new_revision_id: The revision id that the inventory is being
1040
:param parents: The revision ids of the parents that revision_id is
1041
known to have and are in the repository already. These are supplied
1042
for repositories that depend on the inventory graph for revision
1043
graph access, as well as for those that pun ancestry with delta
1045
:param basis_inv: The basis inventory if it is already known,
1047
:param propagate_caches: If True, the caches for this inventory are
1048
copied to and updated for the result if possible.
1050
:returns: (validator, new_inv)
1051
The validator(which is a sha1 digest, though what is sha'd is
1052
repository format specific) of the serialized inventory, and the
1053
resulting inventory.
1055
if not self.is_in_write_group():
1056
raise AssertionError("%r not in write group" % (self,))
1057
_mod_revision.check_not_reserved_id(new_revision_id)
1058
basis_tree = self.revision_tree(basis_revision_id)
1059
basis_tree.lock_read()
1061
# Note that this mutates the inventory of basis_tree, which not all
1062
# inventory implementations may support: A better idiom would be to
1063
# return a new inventory, but as there is no revision tree cache in
1064
# repository this is safe for now - RBC 20081013
1065
if basis_inv is None:
1066
basis_inv = basis_tree.inventory
1067
basis_inv.apply_delta(delta)
1068
basis_inv.revision_id = new_revision_id
1069
return (self.add_inventory(new_revision_id, basis_inv, parents),
1074
def _inventory_add_lines(self, revision_id, parents, lines,
1075
check_content=True):
1076
"""Store lines in inv_vf and return the sha1 of the inventory."""
1077
parents = [(parent,) for parent in parents]
1078
result = self.inventories.add_lines((revision_id,), parents, lines,
1079
check_content=check_content)[0]
1080
self.inventories._access.flush()
1083
def add_revision(self, revision_id, rev, inv=None, config=None):
1084
"""Add rev to the revision store as revision_id.
1086
:param revision_id: the revision id to use.
1087
:param rev: The revision object.
1088
:param inv: The inventory for the revision. if None, it will be looked
1089
up in the inventory storer
1090
:param config: If None no digital signature will be created.
1091
If supplied its signature_needed method will be used
1092
to determine if a signature should be made.
1094
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1096
_mod_revision.check_not_reserved_id(revision_id)
1097
if config is not None and config.signature_needed():
1099
inv = self.get_inventory(revision_id)
1100
plaintext = Testament(rev, inv).as_short_text()
1101
self.store_revision_signature(
1102
gpg.GPGStrategy(config), plaintext, revision_id)
1103
# check inventory present
1104
if not self.inventories.get_parent_map([(revision_id,)]):
1106
raise errors.WeaveRevisionNotPresent(revision_id,
1109
# yes, this is not suitable for adding with ghosts.
1110
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1113
key = (revision_id,)
1114
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1115
self._add_revision(rev)
1117
def _add_revision(self, revision):
1118
text = self._serializer.write_revision_to_string(revision)
1119
key = (revision.revision_id,)
1120
parents = tuple((parent,) for parent in revision.parent_ids)
1121
self.revisions.add_lines(key, parents, osutils.split_lines(text))
321
1123
def all_revision_ids(self):
322
1124
"""Returns a list of all the revision ids in the repository.
347
1149
self.control_files.break_lock()
1152
def _eliminate_revisions_not_present(self, revision_ids):
1153
"""Check every revision id in revision_ids to see if we have it.
1155
Returns a set of the present revisions.
1158
graph = self.get_graph()
1159
parent_map = graph.get_parent_map(revision_ids)
1160
# The old API returned a list, should this actually be a set?
1161
return parent_map.keys()
1163
def _check_inventories(self, checker):
1164
"""Check the inventories found from the revision scan.
1166
This is responsible for verifying the sha1 of inventories and
1167
creating a pending_keys set that covers data referenced by inventories.
1169
bar = ui.ui_factory.nested_progress_bar()
1171
self._do_check_inventories(checker, bar)
1175
def _do_check_inventories(self, checker, bar):
1176
"""Helper for _check_inventories."""
1178
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1179
kinds = ['chk_bytes', 'texts']
1180
count = len(checker.pending_keys)
1181
bar.update("inventories", 0, 2)
1182
current_keys = checker.pending_keys
1183
checker.pending_keys = {}
1184
# Accumulate current checks.
1185
for key in current_keys:
1186
if key[0] != 'inventories' and key[0] not in kinds:
1187
checker._report_items.append('unknown key type %r' % (key,))
1188
keys[key[0]].add(key[1:])
1189
if keys['inventories']:
1190
# NB: output order *should* be roughly sorted - topo or
1191
# inverse topo depending on repository - either way decent
1192
# to just delta against. However, pre-CHK formats didn't
1193
# try to optimise inventory layout on disk. As such the
1194
# pre-CHK code path does not use inventory deltas.
1196
for record in self.inventories.check(keys=keys['inventories']):
1197
if record.storage_kind == 'absent':
1198
checker._report_items.append(
1199
'Missing inventory {%s}' % (record.key,))
1201
last_object = self._check_record('inventories', record,
1202
checker, last_object,
1203
current_keys[('inventories',) + record.key])
1204
del keys['inventories']
1207
bar.update("texts", 1)
1208
while (checker.pending_keys or keys['chk_bytes']
1210
# Something to check.
1211
current_keys = checker.pending_keys
1212
checker.pending_keys = {}
1213
# Accumulate current checks.
1214
for key in current_keys:
1215
if key[0] not in kinds:
1216
checker._report_items.append('unknown key type %r' % (key,))
1217
keys[key[0]].add(key[1:])
1218
# Check the outermost kind only - inventories || chk_bytes || texts
1222
for record in getattr(self, kind).check(keys=keys[kind]):
1223
if record.storage_kind == 'absent':
1224
checker._report_items.append(
1225
'Missing %s {%s}' % (kind, record.key,))
1227
last_object = self._check_record(kind, record,
1228
checker, last_object, current_keys[(kind,) + record.key])
1232
def _check_record(self, kind, record, checker, last_object, item_data):
1233
"""Check a single text from this repository."""
1234
if kind == 'inventories':
1235
rev_id = record.key[0]
1236
inv = self.deserialise_inventory(rev_id,
1237
record.get_bytes_as('fulltext'))
1238
if last_object is not None:
1239
delta = inv._make_delta(last_object)
1240
for old_path, path, file_id, ie in delta:
1243
ie.check(checker, rev_id, inv)
1245
for path, ie in inv.iter_entries():
1246
ie.check(checker, rev_id, inv)
1247
if self._format.fast_deltas:
1249
elif kind == 'chk_bytes':
1250
# No code written to check chk_bytes for this repo format.
1251
checker._report_items.append(
1252
'unsupported key type chk_bytes for %s' % (record.key,))
1253
elif kind == 'texts':
1254
self._check_text(record, checker, item_data)
1256
checker._report_items.append(
1257
'unknown key type %s for %s' % (kind, record.key))
1259
def _check_text(self, record, checker, item_data):
1260
"""Check a single text."""
1261
# Check it is extractable.
1262
# TODO: check length.
1263
if record.storage_kind == 'chunked':
1264
chunks = record.get_bytes_as(record.storage_kind)
1265
sha1 = osutils.sha_strings(chunks)
1266
length = sum(map(len, chunks))
1268
content = record.get_bytes_as('fulltext')
1269
sha1 = osutils.sha_string(content)
1270
length = len(content)
1271
if item_data and sha1 != item_data[1]:
1272
checker._report_items.append(
1273
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1274
(record.key, sha1, item_data[1], item_data[2]))
350
def create(controldir):
351
"""Construct the current default format repository in controldir."""
352
return RepositoryFormat.get_default_format().initialize(controldir)
1277
def create(a_bzrdir):
1278
"""Construct the current default format repository in a_bzrdir."""
1279
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
354
def __init__(self, _format, controldir, control_files):
1281
def __init__(self, _format, a_bzrdir, control_files):
355
1282
"""instantiate a Repository.
357
1284
:param _format: The format of the repository on disk.
358
:param controldir: The ControlDir of the repository.
359
:param control_files: Control files to use for locking, etc.
1285
:param a_bzrdir: The BzrDir of the repository.
1287
In the future we will have a single api for all stores for
1288
getting file texts, inventories and revisions, then
1289
this construct will accept instances of those things.
361
# In the future we will have a single api for all stores for
362
# getting file texts, inventories and revisions, then
363
# this construct will accept instances of those things.
364
1291
super(Repository, self).__init__()
365
1292
self._format = _format
366
1293
# the following are part of the public API for Repository:
367
self.bzrdir = controldir
1294
self.bzrdir = a_bzrdir
368
1295
self.control_files = control_files
1296
self._transport = control_files._transport
1297
self.base = self._transport.base
1299
self._reconcile_does_inventory_gc = True
1300
self._reconcile_fixes_text_parents = False
1301
self._reconcile_backsup_inventory = True
1302
# not right yet - should be more semantically clear ?
1304
# TODO: make sure to construct the right store classes, etc, depending
1305
# on whether escaping is required.
1306
self._warn_if_deprecated()
370
1307
self._write_group = None
371
1308
# Additional places to query for data.
372
1309
self._fallback_repositories = []
375
def user_transport(self):
376
return self.bzrdir.user_transport
379
def control_transport(self):
380
return self._transport
1310
# An InventoryEntry cache, used during deserialization
1311
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
382
1313
def __repr__(self):
383
1314
if self._fallback_repositories:
909
1967
signature = gpg_strategy.sign(plaintext)
910
1968
self.add_signature_text(revision_id, signature)
912
1971
def add_signature_text(self, revision_id, signature):
913
"""Store a signature text for a revision.
915
:param revision_id: Revision id of the revision
916
:param signature: Signature text.
918
raise NotImplementedError(self.add_signature_text)
1972
self.signatures.add_lines((revision_id,), (),
1973
osutils.split_lines(signature))
1975
def find_text_key_references(self):
1976
"""Find the text key references within the repository.
1978
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1979
to whether they were referred to by the inventory of the
1980
revision_id that they contain. The inventory texts from all present
1981
revision ids are assessed to generate this report.
1983
revision_keys = self.revisions.keys()
1984
w = self.inventories
1985
pb = ui.ui_factory.nested_progress_bar()
1987
return self._find_text_key_references_from_xml_inventory_lines(
1988
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1992
def _find_text_key_references_from_xml_inventory_lines(self,
1994
"""Core routine for extracting references to texts from inventories.
1996
This performs the translation of xml lines to revision ids.
1998
:param line_iterator: An iterator of lines, origin_version_id
1999
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2000
to whether they were referred to by the inventory of the
2001
revision_id that they contain. Note that if that revision_id was
2002
not part of the line_iterator's output then False will be given -
2003
even though it may actually refer to that key.
2005
if not self._serializer.support_altered_by_hack:
2006
raise AssertionError(
2007
"_find_text_key_references_from_xml_inventory_lines only "
2008
"supported for branches which store inventory as unnested xml"
2009
", not on %r" % self)
2012
# this code needs to read every new line in every inventory for the
2013
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
2014
# not present in one of those inventories is unnecessary but not
2015
# harmful because we are filtering by the revision id marker in the
2016
# inventory lines : we only select file ids altered in one of those
2017
# revisions. We don't need to see all lines in the inventory because
2018
# only those added in an inventory in rev X can contain a revision=X
2020
unescape_revid_cache = {}
2021
unescape_fileid_cache = {}
2023
# jam 20061218 In a big fetch, this handles hundreds of thousands
2024
# of lines, so it has had a lot of inlining and optimizing done.
2025
# Sorry that it is a little bit messy.
2026
# Move several functions to be local variables, since this is a long
2028
search = self._file_ids_altered_regex.search
2029
unescape = _unescape_xml
2030
setdefault = result.setdefault
2031
for line, line_key in line_iterator:
2032
match = search(line)
2035
# One call to match.group() returning multiple items is quite a
2036
# bit faster than 2 calls to match.group() each returning 1
2037
file_id, revision_id = match.group('file_id', 'revision_id')
2039
# Inlining the cache lookups helps a lot when you make 170,000
2040
# lines and 350k ids, versus 8.4 unique ids.
2041
# Using a cache helps in 2 ways:
2042
# 1) Avoids unnecessary decoding calls
2043
# 2) Re-uses cached strings, which helps in future set and
2045
# (2) is enough that removing encoding entirely along with
2046
# the cache (so we are using plain strings) results in no
2047
# performance improvement.
2049
revision_id = unescape_revid_cache[revision_id]
2051
unescaped = unescape(revision_id)
2052
unescape_revid_cache[revision_id] = unescaped
2053
revision_id = unescaped
2055
# Note that unconditionally unescaping means that we deserialise
2056
# every fileid, which for general 'pull' is not great, but we don't
2057
# really want to have some many fulltexts that this matters anyway.
2060
file_id = unescape_fileid_cache[file_id]
2062
unescaped = unescape(file_id)
2063
unescape_fileid_cache[file_id] = unescaped
2066
key = (file_id, revision_id)
2067
setdefault(key, False)
2068
if revision_id == line_key[-1]:
2072
def _inventory_xml_lines_for_keys(self, keys):
2073
"""Get a line iterator of the sort needed for findind references.
2075
Not relevant for non-xml inventory repositories.
2077
Ghosts in revision_keys are ignored.
2079
:param revision_keys: The revision keys for the inventories to inspect.
2080
:return: An iterator over (inventory line, revid) for the fulltexts of
2081
all of the xml inventories specified by revision_keys.
2083
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2084
for record in stream:
2085
if record.storage_kind != 'absent':
2086
chunks = record.get_bytes_as('chunked')
2087
revid = record.key[-1]
2088
lines = osutils.chunks_to_lines(chunks)
2092
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2094
"""Helper routine for fileids_altered_by_revision_ids.
2096
This performs the translation of xml lines to revision ids.
2098
:param line_iterator: An iterator of lines, origin_version_id
2099
:param revision_keys: The revision ids to filter for. This should be a
2100
set or other type which supports efficient __contains__ lookups, as
2101
the revision key from each parsed line will be looked up in the
2102
revision_keys filter.
2103
:return: a dictionary mapping altered file-ids to an iterable of
2104
revision_ids. Each altered file-ids has the exact revision_ids that
2105
altered it listed explicitly.
2107
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2108
line_iterator).iterkeys())
2109
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2110
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2111
self._inventory_xml_lines_for_keys(parent_keys)))
2112
new_keys = seen - parent_seen
2114
setdefault = result.setdefault
2115
for key in new_keys:
2116
setdefault(key[0], set()).add(key[-1])
920
2119
def _find_parent_ids_of_revisions(self, revision_ids):
921
2120
"""Find all parent ids that are mentioned in the revision graph.
941
2175
uniquely identify the file version in the caller's context. (Examples:
942
2176
an index number or a TreeTransform trans_id.)
2178
bytes_iterator is an iterable of bytestrings for the file. The
2179
kind of iterable and length of the bytestrings are unspecified, but for
2180
this implementation, it is a list of bytes produced by
2181
VersionedFile.get_record_stream().
944
2183
:param desired_files: a list of (file_id, revision_id, identifier)
947
raise NotImplementedError(self.iter_files_bytes)
2187
for file_id, revision_id, callable_data in desired_files:
2188
text_keys[(file_id, revision_id)] = callable_data
2189
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2190
if record.storage_kind == 'absent':
2191
raise errors.RevisionNotPresent(record.key, self)
2192
yield text_keys[record.key], record.get_bytes_as('chunked')
2194
def _generate_text_key_index(self, text_key_references=None,
2196
"""Generate a new text key index for the repository.
2198
This is an expensive function that will take considerable time to run.
2200
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2201
list of parents, also text keys. When a given key has no parents,
2202
the parents list will be [NULL_REVISION].
2204
# All revisions, to find inventory parents.
2205
if ancestors is None:
2206
graph = self.get_graph()
2207
ancestors = graph.get_parent_map(self.all_revision_ids())
2208
if text_key_references is None:
2209
text_key_references = self.find_text_key_references()
2210
pb = ui.ui_factory.nested_progress_bar()
2212
return self._do_generate_text_key_index(ancestors,
2213
text_key_references, pb)
2217
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2218
"""Helper for _generate_text_key_index to avoid deep nesting."""
2219
revision_order = tsort.topo_sort(ancestors)
2220
invalid_keys = set()
2222
for revision_id in revision_order:
2223
revision_keys[revision_id] = set()
2224
text_count = len(text_key_references)
2225
# a cache of the text keys to allow reuse; costs a dict of all the
2226
# keys, but saves a 2-tuple for every child of a given key.
2228
for text_key, valid in text_key_references.iteritems():
2230
invalid_keys.add(text_key)
2232
revision_keys[text_key[1]].add(text_key)
2233
text_key_cache[text_key] = text_key
2234
del text_key_references
2236
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2237
NULL_REVISION = _mod_revision.NULL_REVISION
2238
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2239
# too small for large or very branchy trees. However, for 55K path
2240
# trees, it would be easy to use too much memory trivially. Ideally we
2241
# could gauge this by looking at available real memory etc, but this is
2242
# always a tricky proposition.
2243
inventory_cache = lru_cache.LRUCache(10)
2244
batch_size = 10 # should be ~150MB on a 55K path tree
2245
batch_count = len(revision_order) / batch_size + 1
2247
pb.update("Calculating text parents", processed_texts, text_count)
2248
for offset in xrange(batch_count):
2249
to_query = revision_order[offset * batch_size:(offset + 1) *
2253
for revision_id in to_query:
2254
parent_ids = ancestors[revision_id]
2255
for text_key in revision_keys[revision_id]:
2256
pb.update("Calculating text parents", processed_texts)
2257
processed_texts += 1
2258
candidate_parents = []
2259
for parent_id in parent_ids:
2260
parent_text_key = (text_key[0], parent_id)
2262
check_parent = parent_text_key not in \
2263
revision_keys[parent_id]
2265
# the parent parent_id is a ghost:
2266
check_parent = False
2267
# truncate the derived graph against this ghost.
2268
parent_text_key = None
2270
# look at the parent commit details inventories to
2271
# determine possible candidates in the per file graph.
2274
inv = inventory_cache[parent_id]
2276
inv = self.revision_tree(parent_id).inventory
2277
inventory_cache[parent_id] = inv
2279
parent_entry = inv[text_key[0]]
2280
except (KeyError, errors.NoSuchId):
2282
if parent_entry is not None:
2284
text_key[0], parent_entry.revision)
2286
parent_text_key = None
2287
if parent_text_key is not None:
2288
candidate_parents.append(
2289
text_key_cache[parent_text_key])
2290
parent_heads = text_graph.heads(candidate_parents)
2291
new_parents = list(parent_heads)
2292
new_parents.sort(key=lambda x:candidate_parents.index(x))
2293
if new_parents == []:
2294
new_parents = [NULL_REVISION]
2295
text_index[text_key] = new_parents
2297
for text_key in invalid_keys:
2298
text_index[text_key] = [NULL_REVISION]
2301
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2302
"""Get an iterable listing the keys of all the data introduced by a set
2305
The keys will be ordered so that the corresponding items can be safely
2306
fetched and inserted in that order.
2308
:returns: An iterable producing tuples of (knit-kind, file-id,
2309
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2310
'revisions'. file-id is None unless knit-kind is 'file'.
2312
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2315
for result in self._find_non_file_keys_to_fetch(revision_ids):
2318
def _find_file_keys_to_fetch(self, revision_ids, pb):
2319
# XXX: it's a bit weird to control the inventory weave caching in this
2320
# generator. Ideally the caching would be done in fetch.py I think. Or
2321
# maybe this generator should explicitly have the contract that it
2322
# should not be iterated until the previously yielded item has been
2324
inv_w = self.inventories
2326
# file ids that changed
2327
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2329
num_file_ids = len(file_ids)
2330
for file_id, altered_versions in file_ids.iteritems():
2332
pb.update("fetch texts", count, num_file_ids)
2334
yield ("file", file_id, altered_versions)
2336
def _find_non_file_keys_to_fetch(self, revision_ids):
2338
yield ("inventory", None, revision_ids)
2341
# XXX: Note ATM no callers actually pay attention to this return
2342
# instead they just use the list of revision ids and ignore
2343
# missing sigs. Consider removing this work entirely
2344
revisions_with_signatures = set(self.signatures.get_parent_map(
2345
[(r,) for r in revision_ids]))
2346
revisions_with_signatures = set(
2347
[r for (r,) in revisions_with_signatures])
2348
revisions_with_signatures.intersection_update(revision_ids)
2349
yield ("signatures", None, revisions_with_signatures)
2352
yield ("revisions", None, revision_ids)
2355
def get_inventory(self, revision_id):
2356
"""Get Inventory object by revision id."""
2357
return self.iter_inventories([revision_id]).next()
2359
def iter_inventories(self, revision_ids, ordering=None):
2360
"""Get many inventories by revision_ids.
2362
This will buffer some or all of the texts used in constructing the
2363
inventories in memory, but will only parse a single inventory at a
2366
:param revision_ids: The expected revision ids of the inventories.
2367
:param ordering: optional ordering, e.g. 'topological'. If not
2368
specified, the order of revision_ids will be preserved (by
2369
buffering if necessary).
2370
:return: An iterator of inventories.
2372
if ((None in revision_ids)
2373
or (_mod_revision.NULL_REVISION in revision_ids)):
2374
raise ValueError('cannot get null revision inventory')
2375
return self._iter_inventories(revision_ids, ordering)
2377
def _iter_inventories(self, revision_ids, ordering):
2378
"""single-document based inventory iteration."""
2379
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2380
for text, revision_id in inv_xmls:
2381
yield self.deserialise_inventory(revision_id, text)
2383
def _iter_inventory_xmls(self, revision_ids, ordering):
2384
if ordering is None:
2385
order_as_requested = True
2386
ordering = 'unordered'
2388
order_as_requested = False
2389
keys = [(revision_id,) for revision_id in revision_ids]
2392
if order_as_requested:
2393
key_iter = iter(keys)
2394
next_key = key_iter.next()
2395
stream = self.inventories.get_record_stream(keys, ordering, True)
2397
for record in stream:
2398
if record.storage_kind != 'absent':
2399
chunks = record.get_bytes_as('chunked')
2400
if order_as_requested:
2401
text_chunks[record.key] = chunks
2403
yield ''.join(chunks), record.key[-1]
2405
raise errors.NoSuchRevision(self, record.key)
2406
if order_as_requested:
2407
# Yield as many results as we can while preserving order.
2408
while next_key in text_chunks:
2409
chunks = text_chunks.pop(next_key)
2410
yield ''.join(chunks), next_key[-1]
2412
next_key = key_iter.next()
2413
except StopIteration:
2414
# We still want to fully consume the get_record_stream,
2415
# just in case it is not actually finished at this point
2419
def deserialise_inventory(self, revision_id, xml):
2420
"""Transform the xml into an inventory object.
2422
:param revision_id: The expected revision id of the inventory.
2423
:param xml: A serialised inventory.
2425
result = self._serializer.read_inventory_from_string(xml, revision_id,
2426
entry_cache=self._inventory_entry_cache)
2427
if result.revision_id != revision_id:
2428
raise AssertionError('revision id mismatch %s != %s' % (
2429
result.revision_id, revision_id))
2432
def serialise_inventory(self, inv):
2433
return self._serializer.write_inventory_to_string(inv)
2435
def _serialise_inventory_to_lines(self, inv):
2436
return self._serializer.write_inventory_to_lines(inv)
2438
def get_serializer_format(self):
2439
return self._serializer.format_num
2442
def get_inventory_xml(self, revision_id):
2443
"""Get inventory XML as a file object."""
2444
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2446
text, revision_id = texts.next()
2447
except StopIteration:
2448
raise errors.HistoryMissing(self, 'inventory', revision_id)
2452
def get_inventory_sha1(self, revision_id):
2453
"""Return the sha1 hash of the inventory entry
2455
return self.get_revision(revision_id).inventory_sha1
949
2457
def get_rev_id_for_revno(self, revno, known_pair):
950
2458
"""Return the revision id of a revno, given a later (revno, revid)
1254
2797
except UnicodeDecodeError:
1255
2798
raise errors.NonAsciiRevisionId(method, self)
2800
def revision_graph_can_have_wrong_parents(self):
2801
"""Is it possible for this repository to have a revision graph with
2804
If True, then this repository must also implement
2805
_find_inconsistent_revision_parents so that check and reconcile can
2806
check for inconsistencies before proceeding with other checks that may
2807
depend on the revision index being consistent.
2809
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2812
# remove these delegates a while after bzr 0.15
2813
def __make_delegated(name, from_module):
2814
def _deprecated_repository_forwarder():
2815
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2816
% (name, from_module),
2819
m = __import__(from_module, globals(), locals(), [name])
2821
return getattr(m, name)
2822
except AttributeError:
2823
raise AttributeError('module %s has no name %s'
2825
globals()[name] = _deprecated_repository_forwarder
2828
'AllInOneRepository',
2829
'WeaveMetaDirRepository',
2830
'PreSplitOutRepositoryFormat',
2831
'RepositoryFormat4',
2832
'RepositoryFormat5',
2833
'RepositoryFormat6',
2834
'RepositoryFormat7',
2836
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2840
'RepositoryFormatKnit',
2841
'RepositoryFormatKnit1',
2843
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2846
def install_revision(repository, rev, revision_tree):
2847
"""Install all revision data into a repository."""
2848
install_revisions(repository, [(rev, revision_tree, None)])
2851
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2852
"""Install all revision data into a repository.
2854
Accepts an iterable of revision, tree, signature tuples. The signature
2857
repository.start_write_group()
2859
inventory_cache = lru_cache.LRUCache(10)
2860
for n, (revision, revision_tree, signature) in enumerate(iterable):
2861
_install_revision(repository, revision, revision_tree, signature,
2864
pb.update('Transferring revisions', n + 1, num_revisions)
2866
repository.abort_write_group()
2869
repository.commit_write_group()
2872
def _install_revision(repository, rev, revision_tree, signature,
2874
"""Install all revision data into a repository."""
2875
present_parents = []
2877
for p_id in rev.parent_ids:
2878
if repository.has_revision(p_id):
2879
present_parents.append(p_id)
2880
parent_trees[p_id] = repository.revision_tree(p_id)
2882
parent_trees[p_id] = repository.revision_tree(
2883
_mod_revision.NULL_REVISION)
2885
inv = revision_tree.inventory
2886
entries = inv.iter_entries()
2887
# backwards compatibility hack: skip the root id.
2888
if not repository.supports_rich_root():
2889
path, root = entries.next()
2890
if root.revision != rev.revision_id:
2891
raise errors.IncompatibleRevision(repr(repository))
2893
for path, ie in entries:
2894
text_keys[(ie.file_id, ie.revision)] = ie
2895
text_parent_map = repository.texts.get_parent_map(text_keys)
2896
missing_texts = set(text_keys) - set(text_parent_map)
2897
# Add the texts that are not already present
2898
for text_key in missing_texts:
2899
ie = text_keys[text_key]
2901
# FIXME: TODO: The following loop overlaps/duplicates that done by
2902
# commit to determine parents. There is a latent/real bug here where
2903
# the parents inserted are not those commit would do - in particular
2904
# they are not filtered by heads(). RBC, AB
2905
for revision, tree in parent_trees.iteritems():
2906
if ie.file_id not in tree:
2908
parent_id = tree.inventory[ie.file_id].revision
2909
if parent_id in text_parents:
2911
text_parents.append((ie.file_id, parent_id))
2912
lines = revision_tree.get_file(ie.file_id).readlines()
2913
repository.texts.add_lines(text_key, text_parents, lines)
2915
# install the inventory
2916
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2917
# Cache this inventory
2918
inventory_cache[rev.revision_id] = inv
2920
basis_inv = inventory_cache[rev.parent_ids[0]]
2922
repository.add_inventory(rev.revision_id, inv, present_parents)
2924
delta = inv._make_delta(basis_inv)
2925
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2926
rev.revision_id, present_parents)
2928
repository.add_inventory(rev.revision_id, inv, present_parents)
2929
except errors.RevisionAlreadyPresent:
2931
if signature is not None:
2932
repository.add_signature_text(rev.revision_id, signature)
2933
repository.add_revision(rev.revision_id, rev, inv)
1258
2936
class MetaDirRepository(Repository):
1259
2937
"""Repositories in the new meta-dir layout.
1589
3291
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1590
3292
format_registry.register_lazy(
1591
3293
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1592
'bzrlib.repofmt.knitpack_repo',
3294
'bzrlib.repofmt.pack_repo',
1593
3295
'RepositoryFormatKnitPack1',
1595
3297
format_registry.register_lazy(
1596
3298
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1597
'bzrlib.repofmt.knitpack_repo',
3299
'bzrlib.repofmt.pack_repo',
1598
3300
'RepositoryFormatKnitPack3',
1600
3302
format_registry.register_lazy(
1601
3303
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1602
'bzrlib.repofmt.knitpack_repo',
3304
'bzrlib.repofmt.pack_repo',
1603
3305
'RepositoryFormatKnitPack4',
1605
3307
format_registry.register_lazy(
1606
3308
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1607
'bzrlib.repofmt.knitpack_repo',
3309
'bzrlib.repofmt.pack_repo',
1608
3310
'RepositoryFormatKnitPack5',
1610
3312
format_registry.register_lazy(
1611
3313
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1612
'bzrlib.repofmt.knitpack_repo',
3314
'bzrlib.repofmt.pack_repo',
1613
3315
'RepositoryFormatKnitPack5RichRoot',
1615
3317
format_registry.register_lazy(
1616
3318
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1617
'bzrlib.repofmt.knitpack_repo',
3319
'bzrlib.repofmt.pack_repo',
1618
3320
'RepositoryFormatKnitPack5RichRootBroken',
1620
3322
format_registry.register_lazy(
1621
3323
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1622
'bzrlib.repofmt.knitpack_repo',
3324
'bzrlib.repofmt.pack_repo',
1623
3325
'RepositoryFormatKnitPack6',
1625
3327
format_registry.register_lazy(
1626
3328
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1627
'bzrlib.repofmt.knitpack_repo',
3329
'bzrlib.repofmt.pack_repo',
1628
3330
'RepositoryFormatKnitPack6RichRoot',
1630
format_registry.register_lazy(
1631
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1632
'bzrlib.repofmt.groupcompress_repo',
1633
'RepositoryFormat2a',
1636
3333
# Development formats.
1637
# Check their docstrings to see if/when they are obsolete.
3334
# Obsolete but kept pending a CHK based subtree format.
1638
3335
format_registry.register_lazy(
1639
3336
("Bazaar development format 2 with subtree support "
1640
3337
"(needs bzr.dev from before 1.8)\n"),
1641
'bzrlib.repofmt.knitpack_repo',
3338
'bzrlib.repofmt.pack_repo',
1642
3339
'RepositoryFormatPackDevelopment2Subtree',
1644
format_registry.register_lazy(
1645
'Bazaar development format 8\n',
1646
'bzrlib.repofmt.groupcompress_repo',
1647
'RepositoryFormat2aSubtree',
3342
# 1.14->1.16 go below here
3343
format_registry.register_lazy(
3344
'Bazaar development format - group compression and chk inventory'
3345
' (needs bzr.dev from 1.14)\n',
3346
'bzrlib.repofmt.groupcompress_repo',
3347
'RepositoryFormatCHK1',
3350
format_registry.register_lazy(
3351
'Bazaar development format - chk repository with bencode revision '
3352
'serialization (needs bzr.dev from 1.16)\n',
3353
'bzrlib.repofmt.groupcompress_repo',
3354
'RepositoryFormatCHK2',
3356
format_registry.register_lazy(
3357
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3358
'bzrlib.repofmt.groupcompress_repo',
3359
'RepositoryFormat2a',
1680
3393
self.target.fetch(self.source, revision_id=revision_id)
1682
3395
@needs_write_lock
1683
def fetch(self, revision_id=None, find_ghosts=False):
3396
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
1684
3398
"""Fetch the content required to construct revision_id.
1686
3400
The content is copied from self.source to self.target.
1688
3402
:param revision_id: if None all content is copied, if NULL_REVISION no
1689
3403
content is copied.
3404
:param pb: optional progress bar to use for progress reports. If not
3405
provided a default one will be created.
1692
raise NotImplementedError(self.fetch)
3408
from bzrlib.fetch import RepoFetcher
3409
f = RepoFetcher(to_repository=self.target,
3410
from_repository=self.source,
3411
last_revision=revision_id,
3412
fetch_spec=fetch_spec,
3413
pb=pb, find_ghosts=find_ghosts)
3415
def _walk_to_common_revisions(self, revision_ids):
3416
"""Walk out from revision_ids in source to revisions target has.
3418
:param revision_ids: The start point for the search.
3419
:return: A set of revision ids.
3421
target_graph = self.target.get_graph()
3422
revision_ids = frozenset(revision_ids)
3423
missing_revs = set()
3424
source_graph = self.source.get_graph()
3425
# ensure we don't pay silly lookup costs.
3426
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3427
null_set = frozenset([_mod_revision.NULL_REVISION])
3428
searcher_exhausted = False
3432
# Iterate the searcher until we have enough next_revs
3433
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3435
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3436
next_revs.update(next_revs_part)
3437
ghosts.update(ghosts_part)
3438
except StopIteration:
3439
searcher_exhausted = True
3441
# If there are ghosts in the source graph, and the caller asked for
3442
# them, make sure that they are present in the target.
3443
# We don't care about other ghosts as we can't fetch them and
3444
# haven't been asked to.
3445
ghosts_to_check = set(revision_ids.intersection(ghosts))
3446
revs_to_get = set(next_revs).union(ghosts_to_check)
3448
have_revs = set(target_graph.get_parent_map(revs_to_get))
3449
# we always have NULL_REVISION present.
3450
have_revs = have_revs.union(null_set)
3451
# Check if the target is missing any ghosts we need.
3452
ghosts_to_check.difference_update(have_revs)
3454
# One of the caller's revision_ids is a ghost in both the
3455
# source and the target.
3456
raise errors.NoSuchRevision(
3457
self.source, ghosts_to_check.pop())
3458
missing_revs.update(next_revs - have_revs)
3459
# Because we may have walked past the original stop point, make
3460
# sure everything is stopped
3461
stop_revs = searcher.find_seen_ancestors(have_revs)
3462
searcher.stop_searching_any(stop_revs)
3463
if searcher_exhausted:
3465
return searcher.get_result()
1694
3467
@needs_read_lock
1695
def search_missing_revision_ids(self,
1696
revision_id=symbol_versioning.DEPRECATED_PARAMETER,
1697
find_ghosts=True, revision_ids=None, if_present_ids=None,
3468
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
1699
3469
"""Return the revision ids that source has that target does not.
1701
3471
:param revision_id: only return revision ids included by this
1703
:param revision_ids: return revision ids included by these
1704
revision_ids. NoSuchRevision will be raised if any of these
1705
revisions are not present.
1706
:param if_present_ids: like revision_ids, but will not cause
1707
NoSuchRevision if any of these are absent, instead they will simply
1708
not be in the result. This is useful for e.g. finding revisions
1709
to fetch for tags, which may reference absent revisions.
1710
3473
:param find_ghosts: If True find missing revisions in deep history
1711
3474
rather than just finding the surface difference.
1712
:param limit: Maximum number of revisions to return, topologically
1714
3475
:return: A bzrlib.graph.SearchResult.
1716
raise NotImplementedError(self.search_missing_revision_ids)
3477
# stop searching at found target revisions.
3478
if not find_ghosts and revision_id is not None:
3479
return self._walk_to_common_revisions([revision_id])
3480
# generic, possibly worst case, slow code path.
3481
target_ids = set(self.target.all_revision_ids())
3482
if revision_id is not None:
3483
source_ids = self.source.get_ancestry(revision_id)
3484
if source_ids[0] is not None:
3485
raise AssertionError()
3488
source_ids = self.source.all_revision_ids()
3489
result_set = set(source_ids).difference(target_ids)
3490
return self.source.revision_ids_to_search_result(result_set)
1719
3493
def _same_model(source, target):
1740
3514
"different serializers")
3517
class InterSameDataRepository(InterRepository):
3518
"""Code for converting between repositories that represent the same data.
3520
Data format and model must match for this to work.
3524
def _get_repo_format_to_test(self):
3525
"""Repository format for testing with.
3527
InterSameData can pull from subtree to subtree and from non-subtree to
3528
non-subtree, so we test this with the richest repository format.
3530
from bzrlib.repofmt import knitrepo
3531
return knitrepo.RepositoryFormatKnit3()
3534
def is_compatible(source, target):
3535
return InterRepository._same_model(source, target)
3538
class InterWeaveRepo(InterSameDataRepository):
3539
"""Optimised code paths between Weave based repositories.
3541
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3542
implemented lazy inter-object optimisation.
3546
def _get_repo_format_to_test(self):
3547
from bzrlib.repofmt import weaverepo
3548
return weaverepo.RepositoryFormat7()
3551
def is_compatible(source, target):
3552
"""Be compatible with known Weave formats.
3554
We don't test for the stores being of specific types because that
3555
could lead to confusing results, and there is no need to be
3558
from bzrlib.repofmt.weaverepo import (
3564
return (isinstance(source._format, (RepositoryFormat5,
3566
RepositoryFormat7)) and
3567
isinstance(target._format, (RepositoryFormat5,
3569
RepositoryFormat7)))
3570
except AttributeError:
3574
def copy_content(self, revision_id=None):
3575
"""See InterRepository.copy_content()."""
3576
# weave specific optimised path:
3578
self.target.set_make_working_trees(self.source.make_working_trees())
3579
except (errors.RepositoryUpgradeRequired, NotImplemented):
3581
# FIXME do not peek!
3582
if self.source._transport.listable():
3583
pb = ui.ui_factory.nested_progress_bar()
3585
self.target.texts.insert_record_stream(
3586
self.source.texts.get_record_stream(
3587
self.source.texts.keys(), 'topological', False))
3588
pb.update('copying inventory', 0, 1)
3589
self.target.inventories.insert_record_stream(
3590
self.source.inventories.get_record_stream(
3591
self.source.inventories.keys(), 'topological', False))
3592
self.target.signatures.insert_record_stream(
3593
self.source.signatures.get_record_stream(
3594
self.source.signatures.keys(),
3596
self.target.revisions.insert_record_stream(
3597
self.source.revisions.get_record_stream(
3598
self.source.revisions.keys(),
3599
'topological', True))
3603
self.target.fetch(self.source, revision_id=revision_id)
3606
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3607
"""See InterRepository.missing_revision_ids()."""
3608
# we want all revisions to satisfy revision_id in source.
3609
# but we don't want to stat every file here and there.
3610
# we want then, all revisions other needs to satisfy revision_id
3611
# checked, but not those that we have locally.
3612
# so the first thing is to get a subset of the revisions to
3613
# satisfy revision_id in source, and then eliminate those that
3614
# we do already have.
3615
# this is slow on high latency connection to self, but as this
3616
# disk format scales terribly for push anyway due to rewriting
3617
# inventory.weave, this is considered acceptable.
3619
if revision_id is not None:
3620
source_ids = self.source.get_ancestry(revision_id)
3621
if source_ids[0] is not None:
3622
raise AssertionError()
3625
source_ids = self.source._all_possible_ids()
3626
source_ids_set = set(source_ids)
3627
# source_ids is the worst possible case we may need to pull.
3628
# now we want to filter source_ids against what we actually
3629
# have in target, but don't try to check for existence where we know
3630
# we do not have a revision as that would be pointless.
3631
target_ids = set(self.target._all_possible_ids())
3632
possibly_present_revisions = target_ids.intersection(source_ids_set)
3633
actually_present_revisions = set(
3634
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3635
required_revisions = source_ids_set.difference(actually_present_revisions)
3636
if revision_id is not None:
3637
# we used get_ancestry to determine source_ids then we are assured all
3638
# revisions referenced are present as they are installed in topological order.
3639
# and the tip revision was validated by get_ancestry.
3640
result_set = required_revisions
3642
# if we just grabbed the possibly available ids, then
3643
# we only have an estimate of whats available and need to validate
3644
# that against the revision records.
3646
self.source._eliminate_revisions_not_present(required_revisions))
3647
return self.source.revision_ids_to_search_result(result_set)
3650
class InterKnitRepo(InterSameDataRepository):
3651
"""Optimised code paths between Knit based repositories."""
3654
def _get_repo_format_to_test(self):
3655
from bzrlib.repofmt import knitrepo
3656
return knitrepo.RepositoryFormatKnit1()
3659
def is_compatible(source, target):
3660
"""Be compatible with known Knit formats.
3662
We don't test for the stores being of specific types because that
3663
could lead to confusing results, and there is no need to be
3666
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3668
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3669
isinstance(target._format, RepositoryFormatKnit))
3670
except AttributeError:
3672
return are_knits and InterRepository._same_model(source, target)
3675
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3676
"""See InterRepository.missing_revision_ids()."""
3677
if revision_id is not None:
3678
source_ids = self.source.get_ancestry(revision_id)
3679
if source_ids[0] is not None:
3680
raise AssertionError()
3683
source_ids = self.source.all_revision_ids()
3684
source_ids_set = set(source_ids)
3685
# source_ids is the worst possible case we may need to pull.
3686
# now we want to filter source_ids against what we actually
3687
# have in target, but don't try to check for existence where we know
3688
# we do not have a revision as that would be pointless.
3689
target_ids = set(self.target.all_revision_ids())
3690
possibly_present_revisions = target_ids.intersection(source_ids_set)
3691
actually_present_revisions = set(
3692
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3693
required_revisions = source_ids_set.difference(actually_present_revisions)
3694
if revision_id is not None:
3695
# we used get_ancestry to determine source_ids then we are assured all
3696
# revisions referenced are present as they are installed in topological order.
3697
# and the tip revision was validated by get_ancestry.
3698
result_set = required_revisions
3700
# if we just grabbed the possibly available ids, then
3701
# we only have an estimate of whats available and need to validate
3702
# that against the revision records.
3704
self.source._eliminate_revisions_not_present(required_revisions))
3705
return self.source.revision_ids_to_search_result(result_set)
3708
class InterDifferingSerializer(InterRepository):
3711
def _get_repo_format_to_test(self):
3715
def is_compatible(source, target):
3716
"""Be compatible with Knit2 source and Knit3 target"""
3717
# This is redundant with format.check_conversion_target(), however that
3718
# raises an exception, and we just want to say "False" as in we won't
3719
# support converting between these formats.
3720
if 'IDS_never' in debug.debug_flags:
3722
if source.supports_rich_root() and not target.supports_rich_root():
3724
if (source._format.supports_tree_reference
3725
and not target._format.supports_tree_reference):
3727
if target._fallback_repositories and target._format.supports_chks:
3728
# IDS doesn't know how to copy CHKs for the parent inventories it
3729
# adds to stacked repos.
3731
if 'IDS_always' in debug.debug_flags:
3733
# Only use this code path for local source and target. IDS does far
3734
# too much IO (both bandwidth and roundtrips) over a network.
3735
if not source.bzrdir.transport.base.startswith('file:///'):
3737
if not target.bzrdir.transport.base.startswith('file:///'):
3741
def _get_trees(self, revision_ids, cache):
3743
for rev_id in revision_ids:
3745
possible_trees.append((rev_id, cache[rev_id]))
3747
# Not cached, but inventory might be present anyway.
3749
tree = self.source.revision_tree(rev_id)
3750
except errors.NoSuchRevision:
3751
# Nope, parent is ghost.
3754
cache[rev_id] = tree
3755
possible_trees.append((rev_id, tree))
3756
return possible_trees
3758
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3759
"""Get the best delta and base for this revision.
3761
:return: (basis_id, delta)
3764
# Generate deltas against each tree, to find the shortest.
3765
texts_possibly_new_in_tree = set()
3766
for basis_id, basis_tree in possible_trees:
3767
delta = tree.inventory._make_delta(basis_tree.inventory)
3768
for old_path, new_path, file_id, new_entry in delta:
3769
if new_path is None:
3770
# This file_id isn't present in the new rev, so we don't
3774
# Rich roots are handled elsewhere...
3776
kind = new_entry.kind
3777
if kind != 'directory' and kind != 'file':
3778
# No text record associated with this inventory entry.
3780
# This is a directory or file that has changed somehow.
3781
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3782
deltas.append((len(delta), basis_id, delta))
3784
return deltas[0][1:]
3786
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3787
"""Find all parent revisions that are absent, but for which the
3788
inventory is present, and copy those inventories.
3790
This is necessary to preserve correctness when the source is stacked
3791
without fallbacks configured. (Note that in cases like upgrade the
3792
source may be not have _fallback_repositories even though it is
3796
for parents in parent_map.values():
3797
parent_revs.update(parents)
3798
present_parents = self.source.get_parent_map(parent_revs)
3799
absent_parents = set(parent_revs).difference(present_parents)
3800
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3801
(rev_id,) for rev_id in absent_parents)
3802
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3803
for parent_tree in self.source.revision_trees(parent_inv_ids):
3804
current_revision_id = parent_tree.get_revision_id()
3805
parents_parents_keys = parent_invs_keys_for_stacking[
3806
(current_revision_id,)]
3807
parents_parents = [key[-1] for key in parents_parents_keys]
3808
basis_id = _mod_revision.NULL_REVISION
3809
basis_tree = self.source.revision_tree(basis_id)
3810
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3811
self.target.add_inventory_by_delta(
3812
basis_id, delta, current_revision_id, parents_parents)
3813
cache[current_revision_id] = parent_tree
3815
def _fetch_batch(self, revision_ids, basis_id, cache):
3816
"""Fetch across a few revisions.
3818
:param revision_ids: The revisions to copy
3819
:param basis_id: The revision_id of a tree that must be in cache, used
3820
as a basis for delta when no other base is available
3821
:param cache: A cache of RevisionTrees that we can use.
3822
:return: The revision_id of the last converted tree. The RevisionTree
3823
for it will be in cache
3825
# Walk though all revisions; get inventory deltas, copy referenced
3826
# texts that delta references, insert the delta, revision and
3828
root_keys_to_create = set()
3831
pending_revisions = []
3832
parent_map = self.source.get_parent_map(revision_ids)
3833
self._fetch_parent_invs_for_stacking(parent_map, cache)
3834
for tree in self.source.revision_trees(revision_ids):
3835
# Find a inventory delta for this revision.
3836
# Find text entries that need to be copied, too.
3837
current_revision_id = tree.get_revision_id()
3838
parent_ids = parent_map.get(current_revision_id, ())
3839
parent_trees = self._get_trees(parent_ids, cache)
3840
possible_trees = list(parent_trees)
3841
if len(possible_trees) == 0:
3842
# There either aren't any parents, or the parents are ghosts,
3843
# so just use the last converted tree.
3844
possible_trees.append((basis_id, cache[basis_id]))
3845
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3847
revision = self.source.get_revision(current_revision_id)
3848
pending_deltas.append((basis_id, delta,
3849
current_revision_id, revision.parent_ids))
3850
if self._converting_to_rich_root:
3851
self._revision_id_to_root_id[current_revision_id] = \
3853
# Determine which texts are in present in this revision but not in
3854
# any of the available parents.
3855
texts_possibly_new_in_tree = set()
3856
for old_path, new_path, file_id, entry in delta:
3857
if new_path is None:
3858
# This file_id isn't present in the new rev
3862
if not self.target.supports_rich_root():
3863
# The target doesn't support rich root, so we don't
3866
if self._converting_to_rich_root:
3867
# This can't be copied normally, we have to insert
3869
root_keys_to_create.add((file_id, entry.revision))
3872
texts_possibly_new_in_tree.add((file_id, entry.revision))
3873
for basis_id, basis_tree in possible_trees:
3874
basis_inv = basis_tree.inventory
3875
for file_key in list(texts_possibly_new_in_tree):
3876
file_id, file_revision = file_key
3878
entry = basis_inv[file_id]
3879
except errors.NoSuchId:
3881
if entry.revision == file_revision:
3882
texts_possibly_new_in_tree.remove(file_key)
3883
text_keys.update(texts_possibly_new_in_tree)
3884
pending_revisions.append(revision)
3885
cache[current_revision_id] = tree
3886
basis_id = current_revision_id
3888
from_texts = self.source.texts
3889
to_texts = self.target.texts
3890
if root_keys_to_create:
3891
from bzrlib.fetch import _new_root_data_stream
3892
root_stream = _new_root_data_stream(
3893
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3895
to_texts.insert_record_stream(root_stream)
3896
to_texts.insert_record_stream(from_texts.get_record_stream(
3897
text_keys, self.target._format._fetch_order,
3898
not self.target._format._fetch_uses_deltas))
3899
# insert inventory deltas
3900
for delta in pending_deltas:
3901
self.target.add_inventory_by_delta(*delta)
3902
if self.target._fallback_repositories:
3903
# Make sure this stacked repository has all the parent inventories
3904
# for the new revisions that we are about to insert. We do this
3905
# before adding the revisions so that no revision is added until
3906
# all the inventories it may depend on are added.
3907
# Note that this is overzealous, as we may have fetched these in an
3910
revision_ids = set()
3911
for revision in pending_revisions:
3912
revision_ids.add(revision.revision_id)
3913
parent_ids.update(revision.parent_ids)
3914
parent_ids.difference_update(revision_ids)
3915
parent_ids.discard(_mod_revision.NULL_REVISION)
3916
parent_map = self.source.get_parent_map(parent_ids)
3917
# we iterate over parent_map and not parent_ids because we don't
3918
# want to try copying any revision which is a ghost
3919
for parent_tree in self.source.revision_trees(parent_map):
3920
current_revision_id = parent_tree.get_revision_id()
3921
parents_parents = parent_map[current_revision_id]
3922
possible_trees = self._get_trees(parents_parents, cache)
3923
if len(possible_trees) == 0:
3924
# There either aren't any parents, or the parents are
3925
# ghosts, so just use the last converted tree.
3926
possible_trees.append((basis_id, cache[basis_id]))
3927
basis_id, delta = self._get_delta_for_revision(parent_tree,
3928
parents_parents, possible_trees)
3929
self.target.add_inventory_by_delta(
3930
basis_id, delta, current_revision_id, parents_parents)
3931
# insert signatures and revisions
3932
for revision in pending_revisions:
3934
signature = self.source.get_signature_text(
3935
revision.revision_id)
3936
self.target.add_signature_text(revision.revision_id,
3938
except errors.NoSuchRevision:
3940
self.target.add_revision(revision.revision_id, revision)
3943
def _fetch_all_revisions(self, revision_ids, pb):
3944
"""Fetch everything for the list of revisions.
3946
:param revision_ids: The list of revisions to fetch. Must be in
3948
:param pb: A ProgressTask
3951
basis_id, basis_tree = self._get_basis(revision_ids[0])
3953
cache = lru_cache.LRUCache(100)
3954
cache[basis_id] = basis_tree
3955
del basis_tree # We don't want to hang on to it here
3957
for offset in range(0, len(revision_ids), batch_size):
3958
self.target.start_write_group()
3960
pb.update('Transferring revisions', offset,
3962
batch = revision_ids[offset:offset+batch_size]
3963
basis_id = self._fetch_batch(batch, basis_id, cache)
3965
self.target.abort_write_group()
3968
hint = self.target.commit_write_group()
3971
if hints and self.target._format.pack_compresses:
3972
self.target.pack(hint=hints)
3973
pb.update('Transferring revisions', len(revision_ids),
3977
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3979
"""See InterRepository.fetch()."""
3980
if fetch_spec is not None:
3981
raise AssertionError("Not implemented yet...")
3982
if (not self.source.supports_rich_root()
3983
and self.target.supports_rich_root()):
3984
self._converting_to_rich_root = True
3985
self._revision_id_to_root_id = {}
3987
self._converting_to_rich_root = False
3988
revision_ids = self.target.search_missing_revision_ids(self.source,
3989
revision_id, find_ghosts=find_ghosts).get_keys()
3990
if not revision_ids:
3992
revision_ids = tsort.topo_sort(
3993
self.source.get_graph().get_parent_map(revision_ids))
3994
if not revision_ids:
3996
# Walk though all revisions; get inventory deltas, copy referenced
3997
# texts that delta references, insert the delta, revision and
4000
my_pb = ui.ui_factory.nested_progress_bar()
4003
symbol_versioning.warn(
4004
symbol_versioning.deprecated_in((1, 14, 0))
4005
% "pb parameter to fetch()")
4008
self._fetch_all_revisions(revision_ids, pb)
4010
if my_pb is not None:
4012
return len(revision_ids), 0
4014
def _get_basis(self, first_revision_id):
4015
"""Get a revision and tree which exists in the target.
4017
This assumes that first_revision_id is selected for transmission
4018
because all other ancestors are already present. If we can't find an
4019
ancestor we fall back to NULL_REVISION since we know that is safe.
4021
:return: (basis_id, basis_tree)
4023
first_rev = self.source.get_revision(first_revision_id)
4025
basis_id = first_rev.parent_ids[0]
4026
# only valid as a basis if the target has it
4027
self.target.get_revision(basis_id)
4028
# Try to get a basis tree - if its a ghost it will hit the
4029
# NoSuchRevision case.
4030
basis_tree = self.source.revision_tree(basis_id)
4031
except (IndexError, errors.NoSuchRevision):
4032
basis_id = _mod_revision.NULL_REVISION
4033
basis_tree = self.source.revision_tree(basis_id)
4034
return basis_id, basis_tree
4037
InterRepository.register_optimiser(InterDifferingSerializer)
4038
InterRepository.register_optimiser(InterSameDataRepository)
4039
InterRepository.register_optimiser(InterWeaveRepo)
4040
InterRepository.register_optimiser(InterKnitRepo)
1743
4043
class CopyConverter(object):
1744
4044
"""A repository conversion tool which just performs a copy of the content.
1759
4059
:param to_convert: The disk object to convert.
1760
4060
:param pb: a progress bar to use for progress information.
1762
pb = ui.ui_factory.nested_progress_bar()
1765
4065
# this is only useful with metadir layouts - separated repo content.
1766
4066
# trigger an assertion if not such
1767
4067
repo._format.get_format_string()
1768
4068
self.repo_dir = repo.bzrdir
1769
pb.update(gettext('Moving repository to repository.backup'))
4069
self.step('Moving repository to repository.backup')
1770
4070
self.repo_dir.transport.move('repository', 'repository.backup')
1771
4071
backup_transport = self.repo_dir.transport.clone('repository.backup')
1772
4072
repo._format.check_conversion_target(self.target_format)
1773
4073
self.source_repo = repo._format.open(self.repo_dir,
1775
4075
_override_transport=backup_transport)
1776
pb.update(gettext('Creating new repository'))
4076
self.step('Creating new repository')
1777
4077
converted = self.target_format.initialize(self.repo_dir,
1778
4078
self.source_repo.is_shared())
1779
4079
converted.lock_write()
1781
pb.update(gettext('Copying content'))
4081
self.step('Copying content into repository.')
1782
4082
self.source_repo.copy_content_into(converted)
1784
4084
converted.unlock()
1785
pb.update(gettext('Deleting old repository content'))
4085
self.step('Deleting old repository content.')
1786
4086
self.repo_dir.transport.delete_tree('repository.backup')
1787
ui.ui_factory.note(gettext('repository converted'))
4087
self.pb.note('repository converted')
4089
def step(self, message):
4090
"""Update the pb by a step."""
4092
self.pb.update(message, self.count, self.total)
4104
def _unescaper(match, _map=_unescape_map):
4105
code = match.group(1)
4109
if not code.startswith('#'):
4111
return unichr(int(code[1:])).encode('utf8')
4117
def _unescape_xml(data):
4118
"""Unescape predefined XML entities in a string of data."""
4120
if _unescape_re is None:
4121
_unescape_re = re.compile('\&([^;]*);')
4122
return _unescape_re.sub(_unescaper, data)
4125
class _VersionedFileChecker(object):
4127
def __init__(self, repository, text_key_references=None, ancestors=None):
4128
self.repository = repository
4129
self.text_index = self.repository._generate_text_key_index(
4130
text_key_references=text_key_references, ancestors=ancestors)
4132
def calculate_file_version_parents(self, text_key):
4133
"""Calculate the correct parents for a file version according to
4136
parent_keys = self.text_index[text_key]
4137
if parent_keys == [_mod_revision.NULL_REVISION]:
4139
return tuple(parent_keys)
4141
def check_file_version_parents(self, texts, progress_bar=None):
4142
"""Check the parents stored in a versioned file are correct.
4144
It also detects file versions that are not referenced by their
4145
corresponding revision's inventory.
4147
:returns: A tuple of (wrong_parents, dangling_file_versions).
4148
wrong_parents is a dict mapping {revision_id: (stored_parents,
4149
correct_parents)} for each revision_id where the stored parents
4150
are not correct. dangling_file_versions is a set of (file_id,
4151
revision_id) tuples for versions that are present in this versioned
4152
file, but not used by the corresponding inventory.
4154
local_progress = None
4155
if progress_bar is None:
4156
local_progress = ui.ui_factory.nested_progress_bar()
4157
progress_bar = local_progress
4159
return self._check_file_version_parents(texts, progress_bar)
4162
local_progress.finished()
4164
def _check_file_version_parents(self, texts, progress_bar):
4165
"""See check_file_version_parents."""
4167
self.file_ids = set([file_id for file_id, _ in
4168
self.text_index.iterkeys()])
4169
# text keys is now grouped by file_id
4170
n_versions = len(self.text_index)
4171
progress_bar.update('loading text store', 0, n_versions)
4172
parent_map = self.repository.texts.get_parent_map(self.text_index)
4173
# On unlistable transports this could well be empty/error...
4174
text_keys = self.repository.texts.keys()
4175
unused_keys = frozenset(text_keys) - set(self.text_index)
4176
for num, key in enumerate(self.text_index.iterkeys()):
4177
progress_bar.update('checking text graph', num, n_versions)
4178
correct_parents = self.calculate_file_version_parents(key)
4180
knit_parents = parent_map[key]
4181
except errors.RevisionNotPresent:
4184
if correct_parents != knit_parents:
4185
wrong_parents[key] = (knit_parents, correct_parents)
4186
return wrong_parents, unused_keys
4189
def _old_get_graph(repository, revision_id):
4190
"""DO NOT USE. That is all. I'm serious."""
4191
graph = repository.get_graph()
4192
revision_graph = dict(((key, value) for key, value in
4193
graph.iter_ancestry([revision_id]) if value is not None))
4194
return _strip_NULL_ghosts(revision_graph)
1791
4197
def _strip_NULL_ghosts(revision_graph):
1799
4205
return revision_graph
4208
class StreamSink(object):
4209
"""An object that can insert a stream into a repository.
4211
This interface handles the complexity of reserialising inventories and
4212
revisions from different formats, and allows unidirectional insertion into
4213
stacked repositories without looking for the missing basis parents
4217
def __init__(self, target_repo):
4218
self.target_repo = target_repo
4220
def insert_stream(self, stream, src_format, resume_tokens):
4221
"""Insert a stream's content into the target repository.
4223
:param src_format: a bzr repository format.
4225
:return: a list of resume tokens and an iterable of keys additional
4226
items required before the insertion can be completed.
4228
self.target_repo.lock_write()
4231
self.target_repo.resume_write_group(resume_tokens)
4234
self.target_repo.start_write_group()
4237
# locked_insert_stream performs a commit|suspend.
4238
return self._locked_insert_stream(stream, src_format, is_resume)
4240
self.target_repo.abort_write_group(suppress_errors=True)
4243
self.target_repo.unlock()
4245
def _locked_insert_stream(self, stream, src_format, is_resume):
4246
to_serializer = self.target_repo._format._serializer
4247
src_serializer = src_format._serializer
4249
if to_serializer == src_serializer:
4250
# If serializers match and the target is a pack repository, set the
4251
# write cache size on the new pack. This avoids poor performance
4252
# on transports where append is unbuffered (such as
4253
# RemoteTransport). This is safe to do because nothing should read
4254
# back from the target repository while a stream with matching
4255
# serialization is being inserted.
4256
# The exception is that a delta record from the source that should
4257
# be a fulltext may need to be expanded by the target (see
4258
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4259
# explicitly flush any buffered writes first in that rare case.
4261
new_pack = self.target_repo._pack_collection._new_pack
4262
except AttributeError:
4263
# Not a pack repository
4266
new_pack.set_write_cache_size(1024*1024)
4267
for substream_type, substream in stream:
4268
if 'stream' in debug.debug_flags:
4269
mutter('inserting substream: %s', substream_type)
4270
if substream_type == 'texts':
4271
self.target_repo.texts.insert_record_stream(substream)
4272
elif substream_type == 'inventories':
4273
if src_serializer == to_serializer:
4274
self.target_repo.inventories.insert_record_stream(
4277
self._extract_and_insert_inventories(
4278
substream, src_serializer)
4279
elif substream_type == 'inventory-deltas':
4280
self._extract_and_insert_inventory_deltas(
4281
substream, src_serializer)
4282
elif substream_type == 'chk_bytes':
4283
# XXX: This doesn't support conversions, as it assumes the
4284
# conversion was done in the fetch code.
4285
self.target_repo.chk_bytes.insert_record_stream(substream)
4286
elif substream_type == 'revisions':
4287
# This may fallback to extract-and-insert more often than
4288
# required if the serializers are different only in terms of
4290
if src_serializer == to_serializer:
4291
self.target_repo.revisions.insert_record_stream(
4294
self._extract_and_insert_revisions(substream,
4296
elif substream_type == 'signatures':
4297
self.target_repo.signatures.insert_record_stream(substream)
4299
raise AssertionError('kaboom! %s' % (substream_type,))
4300
# Done inserting data, and the missing_keys calculations will try to
4301
# read back from the inserted data, so flush the writes to the new pack
4302
# (if this is pack format).
4303
if new_pack is not None:
4304
new_pack._write_data('', flush=True)
4305
# Find all the new revisions (including ones from resume_tokens)
4306
missing_keys = self.target_repo.get_missing_parent_inventories(
4307
check_for_missing_texts=is_resume)
4309
for prefix, versioned_file in (
4310
('texts', self.target_repo.texts),
4311
('inventories', self.target_repo.inventories),
4312
('revisions', self.target_repo.revisions),
4313
('signatures', self.target_repo.signatures),
4314
('chk_bytes', self.target_repo.chk_bytes),
4316
if versioned_file is None:
4318
missing_keys.update((prefix,) + key for key in
4319
versioned_file.get_missing_compression_parent_keys())
4320
except NotImplementedError:
4321
# cannot even attempt suspending, and missing would have failed
4322
# during stream insertion.
4323
missing_keys = set()
4326
# suspend the write group and tell the caller what we is
4327
# missing. We know we can suspend or else we would not have
4328
# entered this code path. (All repositories that can handle
4329
# missing keys can handle suspending a write group).
4330
write_group_tokens = self.target_repo.suspend_write_group()
4331
return write_group_tokens, missing_keys
4332
hint = self.target_repo.commit_write_group()
4333
if (to_serializer != src_serializer and
4334
self.target_repo._format.pack_compresses):
4335
self.target_repo.pack(hint=hint)
4338
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4339
target_rich_root = self.target_repo._format.rich_root_data
4340
target_tree_refs = self.target_repo._format.supports_tree_reference
4341
for record in substream:
4342
# Insert the delta directly
4343
inventory_delta_bytes = record.get_bytes_as('fulltext')
4344
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4346
parse_result = deserialiser.parse_text_bytes(
4347
inventory_delta_bytes)
4348
except inventory_delta.IncompatibleInventoryDelta, err:
4349
trace.mutter("Incompatible delta: %s", err.msg)
4350
raise errors.IncompatibleRevision(self.target_repo._format)
4351
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4352
revision_id = new_id
4353
parents = [key[0] for key in record.parents]
4354
self.target_repo.add_inventory_by_delta(
4355
basis_id, inv_delta, revision_id, parents)
4357
def _extract_and_insert_inventories(self, substream, serializer,
4359
"""Generate a new inventory versionedfile in target, converting data.
4361
The inventory is retrieved from the source, (deserializing it), and
4362
stored in the target (reserializing it in a different format).
4364
target_rich_root = self.target_repo._format.rich_root_data
4365
target_tree_refs = self.target_repo._format.supports_tree_reference
4366
for record in substream:
4367
# It's not a delta, so it must be a fulltext in the source
4368
# serializer's format.
4369
bytes = record.get_bytes_as('fulltext')
4370
revision_id = record.key[0]
4371
inv = serializer.read_inventory_from_string(bytes, revision_id)
4372
parents = [key[0] for key in record.parents]
4373
self.target_repo.add_inventory(revision_id, inv, parents)
4374
# No need to keep holding this full inv in memory when the rest of
4375
# the substream is likely to be all deltas.
4378
def _extract_and_insert_revisions(self, substream, serializer):
4379
for record in substream:
4380
bytes = record.get_bytes_as('fulltext')
4381
revision_id = record.key[0]
4382
rev = serializer.read_revision_from_string(bytes)
4383
if rev.revision_id != revision_id:
4384
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4385
self.target_repo.add_revision(revision_id, rev)
4388
if self.target_repo._format._fetch_reconcile:
4389
self.target_repo.reconcile()
4392
class StreamSource(object):
4393
"""A source of a stream for fetching between repositories."""
4395
def __init__(self, from_repository, to_format):
4396
"""Create a StreamSource streaming from from_repository."""
4397
self.from_repository = from_repository
4398
self.to_format = to_format
4400
def delta_on_metadata(self):
4401
"""Return True if delta's are permitted on metadata streams.
4403
That is on revisions and signatures.
4405
src_serializer = self.from_repository._format._serializer
4406
target_serializer = self.to_format._serializer
4407
return (self.to_format._fetch_uses_deltas and
4408
src_serializer == target_serializer)
4410
def _fetch_revision_texts(self, revs):
4411
# fetch signatures first and then the revision texts
4412
# may need to be a InterRevisionStore call here.
4413
from_sf = self.from_repository.signatures
4414
# A missing signature is just skipped.
4415
keys = [(rev_id,) for rev_id in revs]
4416
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4418
self.to_format._fetch_order,
4419
not self.to_format._fetch_uses_deltas))
4420
# If a revision has a delta, this is actually expanded inside the
4421
# insert_record_stream code now, which is an alternate fix for
4423
from_rf = self.from_repository.revisions
4424
revisions = from_rf.get_record_stream(
4426
self.to_format._fetch_order,
4427
not self.delta_on_metadata())
4428
return [('signatures', signatures), ('revisions', revisions)]
4430
def _generate_root_texts(self, revs):
4431
"""This will be called by get_stream between fetching weave texts and
4432
fetching the inventory weave.
4434
if self._rich_root_upgrade():
4436
return bzrlib.fetch.Inter1and2Helper(
4437
self.from_repository).generate_root_texts(revs)
4441
def get_stream(self, search):
4443
revs = search.get_keys()
4444
graph = self.from_repository.get_graph()
4445
revs = tsort.topo_sort(graph.get_parent_map(revs))
4446
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4448
for knit_kind, file_id, revisions in data_to_fetch:
4449
if knit_kind != phase:
4451
# Make a new progress bar for this phase
4452
if knit_kind == "file":
4453
# Accumulate file texts
4454
text_keys.extend([(file_id, revision) for revision in
4456
elif knit_kind == "inventory":
4457
# Now copy the file texts.
4458
from_texts = self.from_repository.texts
4459
yield ('texts', from_texts.get_record_stream(
4460
text_keys, self.to_format._fetch_order,
4461
not self.to_format._fetch_uses_deltas))
4462
# Cause an error if a text occurs after we have done the
4465
# Before we process the inventory we generate the root
4466
# texts (if necessary) so that the inventories references
4468
for _ in self._generate_root_texts(revs):
4470
# we fetch only the referenced inventories because we do not
4471
# know for unselected inventories whether all their required
4472
# texts are present in the other repository - it could be
4474
for info in self._get_inventory_stream(revs):
4476
elif knit_kind == "signatures":
4477
# Nothing to do here; this will be taken care of when
4478
# _fetch_revision_texts happens.
4480
elif knit_kind == "revisions":
4481
for record in self._fetch_revision_texts(revs):
4484
raise AssertionError("Unknown knit kind %r" % knit_kind)
4486
def get_stream_for_missing_keys(self, missing_keys):
4487
# missing keys can only occur when we are byte copying and not
4488
# translating (because translation means we don't send
4489
# unreconstructable deltas ever).
4491
keys['texts'] = set()
4492
keys['revisions'] = set()
4493
keys['inventories'] = set()
4494
keys['chk_bytes'] = set()
4495
keys['signatures'] = set()
4496
for key in missing_keys:
4497
keys[key[0]].add(key[1:])
4498
if len(keys['revisions']):
4499
# If we allowed copying revisions at this point, we could end up
4500
# copying a revision without copying its required texts: a
4501
# violation of the requirements for repository integrity.
4502
raise AssertionError(
4503
'cannot copy revisions to fill in missing deltas %s' % (
4504
keys['revisions'],))
4505
for substream_kind, keys in keys.iteritems():
4506
vf = getattr(self.from_repository, substream_kind)
4507
if vf is None and keys:
4508
raise AssertionError(
4509
"cannot fill in keys for a versioned file we don't"
4510
" have: %s needs %s" % (substream_kind, keys))
4512
# No need to stream something we don't have
4514
if substream_kind == 'inventories':
4515
# Some missing keys are genuinely ghosts, filter those out.
4516
present = self.from_repository.inventories.get_parent_map(keys)
4517
revs = [key[0] for key in present]
4518
# Get the inventory stream more-or-less as we do for the
4519
# original stream; there's no reason to assume that records
4520
# direct from the source will be suitable for the sink. (Think
4521
# e.g. 2a -> 1.9-rich-root).
4522
for info in self._get_inventory_stream(revs, missing=True):
4526
# Ask for full texts always so that we don't need more round trips
4527
# after this stream.
4528
# Some of the missing keys are genuinely ghosts, so filter absent
4529
# records. The Sink is responsible for doing another check to
4530
# ensure that ghosts don't introduce missing data for future
4532
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4533
self.to_format._fetch_order, True))
4534
yield substream_kind, stream
4536
def inventory_fetch_order(self):
4537
if self._rich_root_upgrade():
4538
return 'topological'
4540
return self.to_format._fetch_order
4542
def _rich_root_upgrade(self):
4543
return (not self.from_repository._format.rich_root_data and
4544
self.to_format.rich_root_data)
4546
def _get_inventory_stream(self, revision_ids, missing=False):
4547
from_format = self.from_repository._format
4548
if (from_format.supports_chks and self.to_format.supports_chks and
4549
from_format.network_name() == self.to_format.network_name()):
4550
raise AssertionError(
4551
"this case should be handled by GroupCHKStreamSource")
4552
elif 'forceinvdeltas' in debug.debug_flags:
4553
return self._get_convertable_inventory_stream(revision_ids,
4554
delta_versus_null=missing)
4555
elif from_format.network_name() == self.to_format.network_name():
4557
return self._get_simple_inventory_stream(revision_ids,
4559
elif (not from_format.supports_chks and not self.to_format.supports_chks
4560
and from_format._serializer == self.to_format._serializer):
4561
# Essentially the same format.
4562
return self._get_simple_inventory_stream(revision_ids,
4565
# Any time we switch serializations, we want to use an
4566
# inventory-delta based approach.
4567
return self._get_convertable_inventory_stream(revision_ids,
4568
delta_versus_null=missing)
4570
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4571
# NB: This currently reopens the inventory weave in source;
4572
# using a single stream interface instead would avoid this.
4573
from_weave = self.from_repository.inventories
4575
delta_closure = True
4577
delta_closure = not self.delta_on_metadata()
4578
yield ('inventories', from_weave.get_record_stream(
4579
[(rev_id,) for rev_id in revision_ids],
4580
self.inventory_fetch_order(), delta_closure))
4582
def _get_convertable_inventory_stream(self, revision_ids,
4583
delta_versus_null=False):
4584
# The source is using CHKs, but the target either doesn't or it has a
4585
# different serializer. The StreamSink code expects to be able to
4586
# convert on the target, so we need to put bytes-on-the-wire that can
4587
# be converted. That means inventory deltas (if the remote is <1.19,
4588
# RemoteStreamSink will fallback to VFS to insert the deltas).
4589
yield ('inventory-deltas',
4590
self._stream_invs_as_deltas(revision_ids,
4591
delta_versus_null=delta_versus_null))
4593
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4594
"""Return a stream of inventory-deltas for the given rev ids.
4596
:param revision_ids: The list of inventories to transmit
4597
:param delta_versus_null: Don't try to find a minimal delta for this
4598
entry, instead compute the delta versus the NULL_REVISION. This
4599
effectively streams a complete inventory. Used for stuff like
4600
filling in missing parents, etc.
4602
from_repo = self.from_repository
4603
revision_keys = [(rev_id,) for rev_id in revision_ids]
4604
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4605
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4607
inventories = self.from_repository.iter_inventories(
4608
revision_ids, 'topological')
4609
format = from_repo._format
4610
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4611
inventory_cache = lru_cache.LRUCache(50)
4612
null_inventory = from_repo.revision_tree(
4613
_mod_revision.NULL_REVISION).inventory
4614
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4615
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4616
# repo back into a non-rich-root repo ought to be allowed)
4617
serializer = inventory_delta.InventoryDeltaSerializer(
4618
versioned_root=format.rich_root_data,
4619
tree_references=format.supports_tree_reference)
4620
for inv in inventories:
4621
key = (inv.revision_id,)
4622
parent_keys = parent_map.get(key, ())
4624
if not delta_versus_null and parent_keys:
4625
# The caller did not ask for complete inventories and we have
4626
# some parents that we can delta against. Make a delta against
4627
# each parent so that we can find the smallest.
4628
parent_ids = [parent_key[0] for parent_key in parent_keys]
4629
for parent_id in parent_ids:
4630
if parent_id not in invs_sent_so_far:
4631
# We don't know that the remote side has this basis, so
4634
if parent_id == _mod_revision.NULL_REVISION:
4635
parent_inv = null_inventory
4637
parent_inv = inventory_cache.get(parent_id, None)
4638
if parent_inv is None:
4639
parent_inv = from_repo.get_inventory(parent_id)
4640
candidate_delta = inv._make_delta(parent_inv)
4641
if (delta is None or
4642
len(delta) > len(candidate_delta)):
4643
delta = candidate_delta
4644
basis_id = parent_id
4646
# Either none of the parents ended up being suitable, or we
4647
# were asked to delta against NULL
4648
basis_id = _mod_revision.NULL_REVISION
4649
delta = inv._make_delta(null_inventory)
4650
invs_sent_so_far.add(inv.revision_id)
4651
inventory_cache[inv.revision_id] = inv
4652
delta_serialized = ''.join(
4653
serializer.delta_to_lines(basis_id, key[-1], delta))
4654
yield versionedfile.FulltextContentFactory(
4655
key, parent_keys, None, delta_serialized)
1802
4658
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
1803
4659
stop_revision=None):
1804
4660
"""Extend the partial history to include a given index