547
417
ie.revision = parent_entry.revision
548
418
return self._get_delta(ie, basis_inv, path), False, None
549
419
ie.reference_revision = content_summary[3]
550
if ie.reference_revision is None:
551
raise AssertionError("invalid content_summary for nested tree: %r"
552
% (content_summary,))
553
self._add_text_to_weave(ie.file_id, '', heads, None)
421
self._add_text_to_weave(ie.file_id, lines, heads, None)
555
423
raise NotImplementedError('unknown kind')
556
424
ie.revision = self._new_revision_id
557
self._any_changes = True
558
425
return self._get_delta(ie, basis_inv, path), True, fingerprint
560
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
561
_entry_factory=entry_factory):
562
"""Record a new tree via iter_changes.
564
:param tree: The tree to obtain text contents from for changed objects.
565
:param basis_revision_id: The revision id of the tree the iter_changes
566
has been generated against. Currently assumed to be the same
567
as self.parents[0] - if it is not, errors may occur.
568
:param iter_changes: An iter_changes iterator with the changes to apply
569
to basis_revision_id. The iterator must not include any items with
570
a current kind of None - missing items must be either filtered out
571
or errored-on beefore record_iter_changes sees the item.
572
:param _entry_factory: Private method to bind entry_factory locally for
574
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
577
# Create an inventory delta based on deltas between all the parents and
578
# deltas between all the parent inventories. We use inventory delta's
579
# between the inventory objects because iter_changes masks
580
# last-changed-field only changes.
582
# file_id -> change map, change is fileid, paths, changed, versioneds,
583
# parents, names, kinds, executables
585
# {file_id -> revision_id -> inventory entry, for entries in parent
586
# trees that are not parents[0]
590
revtrees = list(self.repository.revision_trees(self.parents))
591
except errors.NoSuchRevision:
592
# one or more ghosts, slow path.
594
for revision_id in self.parents:
596
revtrees.append(self.repository.revision_tree(revision_id))
597
except errors.NoSuchRevision:
599
basis_revision_id = _mod_revision.NULL_REVISION
601
revtrees.append(self.repository.revision_tree(
602
_mod_revision.NULL_REVISION))
603
# The basis inventory from a repository
605
basis_inv = revtrees[0].inventory
607
basis_inv = self.repository.revision_tree(
608
_mod_revision.NULL_REVISION).inventory
609
if len(self.parents) > 0:
610
if basis_revision_id != self.parents[0] and not ghost_basis:
612
"arbitrary basis parents not yet supported with merges")
613
for revtree in revtrees[1:]:
614
for change in revtree.inventory._make_delta(basis_inv):
615
if change[1] is None:
616
# Not present in this parent.
618
if change[2] not in merged_ids:
619
if change[0] is not None:
620
basis_entry = basis_inv[change[2]]
621
merged_ids[change[2]] = [
623
basis_entry.revision,
626
parent_entries[change[2]] = {
628
basis_entry.revision:basis_entry,
630
change[3].revision:change[3],
633
merged_ids[change[2]] = [change[3].revision]
634
parent_entries[change[2]] = {change[3].revision:change[3]}
636
merged_ids[change[2]].append(change[3].revision)
637
parent_entries[change[2]][change[3].revision] = change[3]
640
# Setup the changes from the tree:
641
# changes maps file_id -> (change, [parent revision_ids])
643
for change in iter_changes:
644
# This probably looks up in basis_inv way to much.
645
if change[1][0] is not None:
646
head_candidate = [basis_inv[change[0]].revision]
649
changes[change[0]] = change, merged_ids.get(change[0],
651
unchanged_merged = set(merged_ids) - set(changes)
652
# Extend the changes dict with synthetic changes to record merges of
654
for file_id in unchanged_merged:
655
# Record a merged version of these items that did not change vs the
656
# basis. This can be either identical parallel changes, or a revert
657
# of a specific file after a merge. The recorded content will be
658
# that of the current tree (which is the same as the basis), but
659
# the per-file graph will reflect a merge.
660
# NB:XXX: We are reconstructing path information we had, this
661
# should be preserved instead.
662
# inv delta change: (file_id, (path_in_source, path_in_target),
663
# changed_content, versioned, parent, name, kind,
666
basis_entry = basis_inv[file_id]
667
except errors.NoSuchId:
668
# a change from basis->some_parents but file_id isn't in basis
669
# so was new in the merge, which means it must have changed
670
# from basis -> current, and as it hasn't the add was reverted
671
# by the user. So we discard this change.
675
(basis_inv.id2path(file_id), tree.id2path(file_id)),
677
(basis_entry.parent_id, basis_entry.parent_id),
678
(basis_entry.name, basis_entry.name),
679
(basis_entry.kind, basis_entry.kind),
680
(basis_entry.executable, basis_entry.executable))
681
changes[file_id] = (change, merged_ids[file_id])
682
# changes contains tuples with the change and a set of inventory
683
# candidates for the file.
685
# old_path, new_path, file_id, new_inventory_entry
686
seen_root = False # Is the root in the basis delta?
687
inv_delta = self._basis_delta
688
modified_rev = self._new_revision_id
689
for change, head_candidates in changes.values():
690
if change[3][1]: # versioned in target.
691
# Several things may be happening here:
692
# We may have a fork in the per-file graph
693
# - record a change with the content from tree
694
# We may have a change against < all trees
695
# - carry over the tree that hasn't changed
696
# We may have a change against all trees
697
# - record the change with the content from tree
700
entry = _entry_factory[kind](file_id, change[5][1],
702
head_set = self._heads(change[0], set(head_candidates))
705
for head_candidate in head_candidates:
706
if head_candidate in head_set:
707
heads.append(head_candidate)
708
head_set.remove(head_candidate)
711
# Could be a carry-over situation:
712
parent_entry_revs = parent_entries.get(file_id, None)
713
if parent_entry_revs:
714
parent_entry = parent_entry_revs.get(heads[0], None)
717
if parent_entry is None:
718
# The parent iter_changes was called against is the one
719
# that is the per-file head, so any change is relevant
720
# iter_changes is valid.
721
carry_over_possible = False
723
# could be a carry over situation
724
# A change against the basis may just indicate a merge,
725
# we need to check the content against the source of the
726
# merge to determine if it was changed after the merge
728
if (parent_entry.kind != entry.kind or
729
parent_entry.parent_id != entry.parent_id or
730
parent_entry.name != entry.name):
731
# Metadata common to all entries has changed
732
# against per-file parent
733
carry_over_possible = False
735
carry_over_possible = True
736
# per-type checks for changes against the parent_entry
739
# Cannot be a carry-over situation
740
carry_over_possible = False
741
# Populate the entry in the delta
743
# XXX: There is still a small race here: If someone reverts the content of a file
744
# after iter_changes examines and decides it has changed,
745
# we will unconditionally record a new version even if some
746
# other process reverts it while commit is running (with
747
# the revert happening after iter_changes did it's
750
entry.executable = True
752
entry.executable = False
753
if (carry_over_possible and
754
parent_entry.executable == entry.executable):
755
# Check the file length, content hash after reading
757
nostore_sha = parent_entry.text_sha1
760
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
762
text = file_obj.read()
766
entry.text_sha1, entry.text_size = self._add_text_to_weave(
767
file_id, text, heads, nostore_sha)
768
yield file_id, change[1][1], (entry.text_sha1, stat_value)
769
except errors.ExistingContent:
770
# No content change against a carry_over parent
771
# Perhaps this should also yield a fs hash update?
773
entry.text_size = parent_entry.text_size
774
entry.text_sha1 = parent_entry.text_sha1
775
elif kind == 'symlink':
777
entry.symlink_target = tree.get_symlink_target(file_id)
778
if (carry_over_possible and
779
parent_entry.symlink_target == entry.symlink_target):
782
self._add_text_to_weave(change[0], '', heads, None)
783
elif kind == 'directory':
784
if carry_over_possible:
787
# Nothing to set on the entry.
788
# XXX: split into the Root and nonRoot versions.
789
if change[1][1] != '' or self.repository.supports_rich_root():
790
self._add_text_to_weave(change[0], '', heads, None)
791
elif kind == 'tree-reference':
792
if not self.repository._format.supports_tree_reference:
793
# This isn't quite sane as an error, but we shouldn't
794
# ever see this code path in practice: tree's don't
795
# permit references when the repo doesn't support tree
797
raise errors.UnsupportedOperation(tree.add_reference,
799
reference_revision = tree.get_reference_revision(change[0])
800
entry.reference_revision = reference_revision
801
if (carry_over_possible and
802
parent_entry.reference_revision == reference_revision):
805
self._add_text_to_weave(change[0], '', heads, None)
807
raise AssertionError('unknown kind %r' % kind)
809
entry.revision = modified_rev
811
entry.revision = parent_entry.revision
814
new_path = change[1][1]
815
inv_delta.append((change[1][0], new_path, change[0], entry))
818
self.new_inventory = None
820
# This should perhaps be guarded by a check that the basis we
821
# commit against is the basis for the commit and if not do a delta
823
self._any_changes = True
825
# housekeeping root entry changes do not affect no-change commits.
826
self._require_root_change(tree)
827
self.basis_delta_revision = basis_revision_id
829
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
830
parent_keys = tuple([(file_id, parent) for parent in parents])
831
return self.repository.texts._add_text(
832
(file_id, self._new_revision_id), parent_keys, new_text,
833
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
427
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
428
# Note: as we read the content directly from the tree, we know its not
429
# been turned into unicode or badly split - but a broken tree
430
# implementation could give us bad output from readlines() so this is
431
# not a guarantee of safety. What would be better is always checking
432
# the content during test suite execution. RBC 20070912
433
parent_keys = tuple((file_id, parent) for parent in parents)
434
return self.repository.texts.add_lines(
435
(file_id, self._new_revision_id), parent_keys, new_lines,
436
nostore_sha=nostore_sha, random_id=self.random_revid,
437
check_content=False)[0:2]
836
440
class RootCommitBuilder(CommitBuilder):
837
441
"""This commitbuilder actually records the root id"""
839
443
# the root entry gets versioned properly by this builder.
840
444
_versioned_root = True
1167
670
# The old API returned a list, should this actually be a set?
1168
671
return parent_map.keys()
1170
def _check_inventories(self, checker):
1171
"""Check the inventories found from the revision scan.
1173
This is responsible for verifying the sha1 of inventories and
1174
creating a pending_keys set that covers data referenced by inventories.
1176
bar = ui.ui_factory.nested_progress_bar()
1178
self._do_check_inventories(checker, bar)
1182
def _do_check_inventories(self, checker, bar):
1183
"""Helper for _check_inventories."""
1185
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1186
kinds = ['chk_bytes', 'texts']
1187
count = len(checker.pending_keys)
1188
bar.update("inventories", 0, 2)
1189
current_keys = checker.pending_keys
1190
checker.pending_keys = {}
1191
# Accumulate current checks.
1192
for key in current_keys:
1193
if key[0] != 'inventories' and key[0] not in kinds:
1194
checker._report_items.append('unknown key type %r' % (key,))
1195
keys[key[0]].add(key[1:])
1196
if keys['inventories']:
1197
# NB: output order *should* be roughly sorted - topo or
1198
# inverse topo depending on repository - either way decent
1199
# to just delta against. However, pre-CHK formats didn't
1200
# try to optimise inventory layout on disk. As such the
1201
# pre-CHK code path does not use inventory deltas.
1203
for record in self.inventories.check(keys=keys['inventories']):
1204
if record.storage_kind == 'absent':
1205
checker._report_items.append(
1206
'Missing inventory {%s}' % (record.key,))
1208
last_object = self._check_record('inventories', record,
1209
checker, last_object,
1210
current_keys[('inventories',) + record.key])
1211
del keys['inventories']
1214
bar.update("texts", 1)
1215
while (checker.pending_keys or keys['chk_bytes']
1217
# Something to check.
1218
current_keys = checker.pending_keys
1219
checker.pending_keys = {}
1220
# Accumulate current checks.
1221
for key in current_keys:
1222
if key[0] not in kinds:
1223
checker._report_items.append('unknown key type %r' % (key,))
1224
keys[key[0]].add(key[1:])
1225
# Check the outermost kind only - inventories || chk_bytes || texts
1229
for record in getattr(self, kind).check(keys=keys[kind]):
1230
if record.storage_kind == 'absent':
1231
checker._report_items.append(
1232
'Missing %s {%s}' % (kind, record.key,))
1234
last_object = self._check_record(kind, record,
1235
checker, last_object, current_keys[(kind,) + record.key])
1239
def _check_record(self, kind, record, checker, last_object, item_data):
1240
"""Check a single text from this repository."""
1241
if kind == 'inventories':
1242
rev_id = record.key[0]
1243
inv = self._deserialise_inventory(rev_id,
1244
record.get_bytes_as('fulltext'))
1245
if last_object is not None:
1246
delta = inv._make_delta(last_object)
1247
for old_path, path, file_id, ie in delta:
1250
ie.check(checker, rev_id, inv)
1252
for path, ie in inv.iter_entries():
1253
ie.check(checker, rev_id, inv)
1254
if self._format.fast_deltas:
1256
elif kind == 'chk_bytes':
1257
# No code written to check chk_bytes for this repo format.
1258
checker._report_items.append(
1259
'unsupported key type chk_bytes for %s' % (record.key,))
1260
elif kind == 'texts':
1261
self._check_text(record, checker, item_data)
1263
checker._report_items.append(
1264
'unknown key type %s for %s' % (kind, record.key))
1266
def _check_text(self, record, checker, item_data):
1267
"""Check a single text."""
1268
# Check it is extractable.
1269
# TODO: check length.
1270
if record.storage_kind == 'chunked':
1271
chunks = record.get_bytes_as(record.storage_kind)
1272
sha1 = osutils.sha_strings(chunks)
1273
length = sum(map(len, chunks))
1275
content = record.get_bytes_as('fulltext')
1276
sha1 = osutils.sha_string(content)
1277
length = len(content)
1278
if item_data and sha1 != item_data[1]:
1279
checker._report_items.append(
1280
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1281
(record.key, sha1, item_data[1], item_data[2]))
1284
674
def create(a_bzrdir):
1285
675
"""Construct the current default format repository in a_bzrdir."""
1552
947
"""Commit the contents accrued within the current write group.
1554
949
:seealso: start_write_group.
1556
:return: it may return an opaque hint that can be passed to 'pack'.
1558
951
if self._write_group is not self.get_transaction():
1559
952
# has an unlock or relock occured ?
1560
953
raise errors.BzrError('mismatched lock context %r and '
1561
954
'write group %r.' %
1562
955
(self.get_transaction(), self._write_group))
1563
result = self._commit_write_group()
956
self._commit_write_group()
1564
957
self._write_group = None
1567
959
def _commit_write_group(self):
1568
960
"""Template method for per-repository write group cleanup.
1570
This is called before the write group is considered to be
962
This is called before the write group is considered to be
1571
963
finished and should ensure that all data handed to the repository
1572
for writing during the write group is safely committed (to the
964
for writing during the write group is safely committed (to the
1573
965
extent possible considering file system caching etc).
1576
def suspend_write_group(self):
1577
raise errors.UnsuspendableWriteGroup(self)
1579
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1580
"""Return the keys of missing inventory parents for revisions added in
1583
A revision is not complete if the inventory delta for that revision
1584
cannot be calculated. Therefore if the parent inventories of a
1585
revision are not present, the revision is incomplete, and e.g. cannot
1586
be streamed by a smart server. This method finds missing inventory
1587
parents for revisions added in this write group.
1589
if not self._format.supports_external_lookups:
1590
# This is only an issue for stacked repositories
1592
if not self.is_in_write_group():
1593
raise AssertionError('not in a write group')
1595
# XXX: We assume that every added revision already has its
1596
# corresponding inventory, so we only check for parent inventories that
1597
# might be missing, rather than all inventories.
1598
parents = set(self.revisions._index.get_missing_parents())
1599
parents.discard(_mod_revision.NULL_REVISION)
1600
unstacked_inventories = self.inventories._index
1601
present_inventories = unstacked_inventories.get_parent_map(
1602
key[-1:] for key in parents)
1603
parents.difference_update(present_inventories)
1604
if len(parents) == 0:
1605
# No missing parent inventories.
1607
if not check_for_missing_texts:
1608
return set(('inventories', rev_id) for (rev_id,) in parents)
1609
# Ok, now we have a list of missing inventories. But these only matter
1610
# if the inventories that reference them are missing some texts they
1611
# appear to introduce.
1612
# XXX: Texts referenced by all added inventories need to be present,
1613
# but at the moment we're only checking for texts referenced by
1614
# inventories at the graph's edge.
1615
key_deps = self.revisions._index._key_dependencies
1616
key_deps.satisfy_refs_for_keys(present_inventories)
1617
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1618
file_ids = self.fileids_altered_by_revision_ids(referrers)
1619
missing_texts = set()
1620
for file_id, version_ids in file_ids.iteritems():
1621
missing_texts.update(
1622
(file_id, version_id) for version_id in version_ids)
1623
present_texts = self.texts.get_parent_map(missing_texts)
1624
missing_texts.difference_update(present_texts)
1625
if not missing_texts:
1626
# No texts are missing, so all revisions and their deltas are
1629
# Alternatively the text versions could be returned as the missing
1630
# keys, but this is likely to be less data.
1631
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1634
def refresh_data(self):
1635
"""Re-read any data needed to to synchronise with disk.
1637
This method is intended to be called after another repository instance
1638
(such as one used by a smart server) has inserted data into the
1639
repository. It may not be called during a write group, but may be
1640
called at any other time.
1642
if self.is_in_write_group():
1643
raise errors.InternalBzrError(
1644
"May not refresh_data while in a write group.")
1645
self._refresh_data()
1647
def resume_write_group(self, tokens):
1648
if not self.is_write_locked():
1649
raise errors.NotWriteLocked(self)
1650
if self._write_group:
1651
raise errors.BzrError('already in a write group')
1652
self._resume_write_group(tokens)
1653
# so we can detect unlock/relock - the write group is now entered.
1654
self._write_group = self.get_transaction()
1656
def _resume_write_group(self, tokens):
1657
raise errors.UnsuspendableWriteGroup(self)
1659
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
968
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
1661
969
"""Fetch the content required to construct revision_id from source.
1663
If revision_id is None and fetch_spec is None, then all content is
1666
fetch() may not be used when the repository is in a write group -
1667
either finish the current write group before using fetch, or use
1668
fetch before starting the write group.
971
If revision_id is None all content is copied.
1670
972
:param find_ghosts: Find and copy revisions in the source that are
1671
973
ghosts in the target (and not reachable directly by walking out to
1672
974
the first-present revision in target from revision_id).
1673
:param revision_id: If specified, all the content needed for this
1674
revision ID will be copied to the target. Fetch will determine for
1675
itself which content needs to be copied.
1676
:param fetch_spec: If specified, a SearchResult or
1677
PendingAncestryResult that describes which revisions to copy. This
1678
allows copying multiple heads at once. Mutually exclusive with
1681
if fetch_spec is not None and revision_id is not None:
1682
raise AssertionError(
1683
"fetch_spec and revision_id are mutually exclusive.")
1684
if self.is_in_write_group():
1685
raise errors.InternalBzrError(
1686
"May not fetch while in a write group.")
1687
976
# fast path same-url fetch operations
1688
# TODO: lift out to somewhere common with RemoteRepository
1689
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1690
if (self.has_same_location(source)
1691
and fetch_spec is None
1692
and self._has_same_fallbacks(source)):
977
if self.has_same_location(source):
1693
978
# check that last_revision is in 'from' and then return a
1695
980
if (revision_id is not None and
2362
1533
"""Get Inventory object by revision id."""
2363
1534
return self.iter_inventories([revision_id]).next()
2365
def iter_inventories(self, revision_ids, ordering=None):
1536
def iter_inventories(self, revision_ids):
2366
1537
"""Get many inventories by revision_ids.
2368
1539
This will buffer some or all of the texts used in constructing the
2369
1540
inventories in memory, but will only parse a single inventory at a
2372
:param revision_ids: The expected revision ids of the inventories.
2373
:param ordering: optional ordering, e.g. 'topological'. If not
2374
specified, the order of revision_ids will be preserved (by
2375
buffering if necessary).
2376
1543
:return: An iterator of inventories.
2378
1545
if ((None in revision_ids)
2379
1546
or (_mod_revision.NULL_REVISION in revision_ids)):
2380
1547
raise ValueError('cannot get null revision inventory')
2381
return self._iter_inventories(revision_ids, ordering)
1548
return self._iter_inventories(revision_ids)
2383
def _iter_inventories(self, revision_ids, ordering):
1550
def _iter_inventories(self, revision_ids):
2384
1551
"""single-document based inventory iteration."""
2385
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2386
for text, revision_id in inv_xmls:
2387
yield self._deserialise_inventory(revision_id, text)
1552
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1553
yield self.deserialise_inventory(revision_id, text)
2389
def _iter_inventory_xmls(self, revision_ids, ordering):
2390
if ordering is None:
2391
order_as_requested = True
2392
ordering = 'unordered'
2394
order_as_requested = False
1555
def _iter_inventory_xmls(self, revision_ids):
2395
1556
keys = [(revision_id,) for revision_id in revision_ids]
2398
if order_as_requested:
2399
key_iter = iter(keys)
2400
next_key = key_iter.next()
2401
stream = self.inventories.get_record_stream(keys, ordering, True)
1557
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2403
1559
for record in stream:
2404
1560
if record.storage_kind != 'absent':
2405
chunks = record.get_bytes_as('chunked')
2406
if order_as_requested:
2407
text_chunks[record.key] = chunks
2409
yield ''.join(chunks), record.key[-1]
1561
texts[record.key] = record.get_bytes_as('fulltext')
2411
1563
raise errors.NoSuchRevision(self, record.key)
2412
if order_as_requested:
2413
# Yield as many results as we can while preserving order.
2414
while next_key in text_chunks:
2415
chunks = text_chunks.pop(next_key)
2416
yield ''.join(chunks), next_key[-1]
2418
next_key = key_iter.next()
2419
except StopIteration:
2420
# We still want to fully consume the get_record_stream,
2421
# just in case it is not actually finished at this point
1565
yield texts[key], key[-1]
2425
def _deserialise_inventory(self, revision_id, xml):
2426
"""Transform the xml into an inventory object.
1567
def deserialise_inventory(self, revision_id, xml):
1568
"""Transform the xml into an inventory object.
2428
1570
:param revision_id: The expected revision id of the inventory.
2429
1571
:param xml: A serialised inventory.
2431
result = self._serializer.read_inventory_from_string(xml, revision_id,
2432
entry_cache=self._inventory_entry_cache,
2433
return_from_cache=self._safe_to_return_from_cache)
1573
result = self._serializer.read_inventory_from_string(xml, revision_id)
2434
1574
if result.revision_id != revision_id:
2435
1575
raise AssertionError('revision id mismatch %s != %s' % (
2436
1576
result.revision_id, revision_id))
2439
def _serialise_inventory(self, inv):
1579
def serialise_inventory(self, inv):
2440
1580
return self._serializer.write_inventory_to_string(inv)
2442
1582
def _serialise_inventory_to_lines(self, inv):
3713
2773
return self.source.revision_ids_to_search_result(result_set)
3716
class InterDifferingSerializer(InterRepository):
2776
class InterPackRepo(InterSameDataRepository):
2777
"""Optimised code paths between Pack based repositories."""
2780
def _get_repo_format_to_test(self):
2781
from bzrlib.repofmt import pack_repo
2782
return pack_repo.RepositoryFormatKnitPack1()
2785
def is_compatible(source, target):
2786
"""Be compatible with known Pack formats.
2788
We don't test for the stores being of specific types because that
2789
could lead to confusing results, and there is no need to be
2792
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2794
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2795
isinstance(target._format, RepositoryFormatPack))
2796
except AttributeError:
2798
return are_packs and InterRepository._same_model(source, target)
2801
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2802
"""See InterRepository.fetch()."""
2803
if (len(self.source._fallback_repositories) > 0 or
2804
len(self.target._fallback_repositories) > 0):
2805
# The pack layer is not aware of fallback repositories, so when
2806
# fetching from a stacked repository or into a stacked repository
2807
# we use the generic fetch logic which uses the VersionedFiles
2808
# attributes on repository.
2809
from bzrlib.fetch import RepoFetcher
2810
fetcher = RepoFetcher(self.target, self.source, revision_id,
2812
return fetcher.count_copied, fetcher.failed_revisions
2813
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2814
self.source, self.source._format, self.target, self.target._format)
2815
self.count_copied = 0
2816
if revision_id is None:
2818
# everything to do - use pack logic
2819
# to fetch from all packs to one without
2820
# inventory parsing etc, IFF nothing to be copied is in the target.
2822
source_revision_ids = frozenset(self.source.all_revision_ids())
2823
revision_ids = source_revision_ids - \
2824
frozenset(self.target_get_parent_map(source_revision_ids))
2825
revision_keys = [(revid,) for revid in revision_ids]
2826
target_pack_collection = self._get_target_pack_collection()
2827
index = target_pack_collection.revision_index.combined_index
2828
present_revision_ids = set(item[1][0] for item in
2829
index.iter_entries(revision_keys))
2830
revision_ids = set(revision_ids) - present_revision_ids
2831
# implementing the TODO will involve:
2832
# - detecting when all of a pack is selected
2833
# - avoiding as much as possible pre-selection, so the
2834
# more-core routines such as create_pack_from_packs can filter in
2835
# a just-in-time fashion. (though having a HEADS list on a
2836
# repository might make this a lot easier, because we could
2837
# sensibly detect 'new revisions' without doing a full index scan.
2838
elif _mod_revision.is_null(revision_id):
2843
revision_ids = self.search_missing_revision_ids(revision_id,
2844
find_ghosts=find_ghosts).get_keys()
2845
except errors.NoSuchRevision:
2846
raise errors.InstallFailed([revision_id])
2847
if len(revision_ids) == 0:
2849
return self._pack(self.source, self.target, revision_ids)
2851
def _pack(self, source, target, revision_ids):
2852
from bzrlib.repofmt.pack_repo import Packer
2853
target_pack_collection = self._get_target_pack_collection()
2854
packs = source._pack_collection.all_packs()
2855
pack = Packer(target_pack_collection, packs, '.fetch',
2856
revision_ids).pack()
2857
if pack is not None:
2858
target_pack_collection._save_pack_names()
2859
copied_revs = pack.get_revision_count()
2860
# Trigger an autopack. This may duplicate effort as we've just done
2861
# a pack creation, but for now it is simpler to think about as
2862
# 'upload data, then repack if needed'.
2864
return (copied_revs, [])
2868
def _autopack(self):
2869
self.target._pack_collection.autopack()
2871
def _get_target_pack_collection(self):
2872
return self.target._pack_collection
2875
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2876
"""See InterRepository.missing_revision_ids().
2878
:param find_ghosts: Find ghosts throughout the ancestry of
2881
if not find_ghosts and revision_id is not None:
2882
return self._walk_to_common_revisions([revision_id])
2883
elif revision_id is not None:
2884
# Find ghosts: search for revisions pointing from one repository to
2885
# the other, and vice versa, anywhere in the history of revision_id.
2886
graph = self.target_get_graph(other_repository=self.source)
2887
searcher = graph._make_breadth_first_searcher([revision_id])
2891
next_revs, ghosts = searcher.next_with_ghosts()
2892
except StopIteration:
2894
if revision_id in ghosts:
2895
raise errors.NoSuchRevision(self.source, revision_id)
2896
found_ids.update(next_revs)
2897
found_ids.update(ghosts)
2898
found_ids = frozenset(found_ids)
2899
# Double query here: should be able to avoid this by changing the
2900
# graph api further.
2901
result_set = found_ids - frozenset(
2902
self.target_get_parent_map(found_ids))
2904
source_ids = self.source.all_revision_ids()
2905
# source_ids is the worst possible case we may need to pull.
2906
# now we want to filter source_ids against what we actually
2907
# have in target, but don't try to check for existence where we know
2908
# we do not have a revision as that would be pointless.
2909
target_ids = set(self.target.all_revision_ids())
2910
result_set = set(source_ids).difference(target_ids)
2911
return self.source.revision_ids_to_search_result(result_set)
2914
class InterModel1and2(InterRepository):
2917
def _get_repo_format_to_test(self):
2921
def is_compatible(source, target):
2922
if not source.supports_rich_root() and target.supports_rich_root():
2928
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2929
"""See InterRepository.fetch()."""
2930
from bzrlib.fetch import Model1toKnit2Fetcher
2931
f = Model1toKnit2Fetcher(to_repository=self.target,
2932
from_repository=self.source,
2933
last_revision=revision_id,
2934
pb=pb, find_ghosts=find_ghosts)
2935
return f.count_copied, f.failed_revisions
2938
def copy_content(self, revision_id=None):
2939
"""Make a complete copy of the content in self into destination.
2941
This is a destructive operation! Do not use it on existing
2944
:param revision_id: Only copy the content needed to construct
2945
revision_id and its parents.
2948
self.target.set_make_working_trees(self.source.make_working_trees())
2949
except NotImplementedError:
2951
# but don't bother fetching if we have the needed data now.
2952
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2953
self.target.has_revision(revision_id)):
2955
self.target.fetch(self.source, revision_id=revision_id)
2958
class InterKnit1and2(InterKnitRepo):
2961
def _get_repo_format_to_test(self):
2965
def is_compatible(source, target):
2966
"""Be compatible with Knit1 source and Knit3 target"""
2968
from bzrlib.repofmt.knitrepo import (
2969
RepositoryFormatKnit1,
2970
RepositoryFormatKnit3,
2972
from bzrlib.repofmt.pack_repo import (
2973
RepositoryFormatKnitPack1,
2974
RepositoryFormatKnitPack3,
2975
RepositoryFormatKnitPack4,
2976
RepositoryFormatKnitPack5,
2977
RepositoryFormatKnitPack5RichRoot,
2978
RepositoryFormatKnitPack6,
2979
RepositoryFormatKnitPack6RichRoot,
2980
RepositoryFormatPackDevelopment2,
2981
RepositoryFormatPackDevelopment2Subtree,
2984
RepositoryFormatKnit1, # no rr, no subtree
2985
RepositoryFormatKnitPack1, # no rr, no subtree
2986
RepositoryFormatPackDevelopment2, # no rr, no subtree
2987
RepositoryFormatKnitPack5, # no rr, no subtree
2988
RepositoryFormatKnitPack6, # no rr, no subtree
2991
RepositoryFormatKnit3, # rr, subtree
2992
RepositoryFormatKnitPack3, # rr, subtree
2993
RepositoryFormatKnitPack4, # rr, no subtree
2994
RepositoryFormatKnitPack5RichRoot,# rr, no subtree
2995
RepositoryFormatKnitPack6RichRoot,# rr, no subtree
2996
RepositoryFormatPackDevelopment2Subtree, # rr, subtree
2998
for format in norichroot:
2999
if format.rich_root_data:
3000
raise AssertionError('Format %s is a rich-root format'
3001
' but is included in the non-rich-root list'
3003
for format in richroot:
3004
if not format.rich_root_data:
3005
raise AssertionError('Format %s is not a rich-root format'
3006
' but is included in the rich-root list'
3008
# TODO: One alternative is to just check format.rich_root_data,
3009
# instead of keeping membership lists. However, the formats
3010
# *also* have to use the same 'Knit' style of storage
3011
# (line-deltas, fulltexts, etc.)
3012
return (isinstance(source._format, norichroot) and
3013
isinstance(target._format, richroot))
3014
except AttributeError:
3018
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3019
"""See InterRepository.fetch()."""
3020
from bzrlib.fetch import Knit1to2Fetcher
3021
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
3022
self.source, self.source._format, self.target,
3023
self.target._format)
3024
f = Knit1to2Fetcher(to_repository=self.target,
3025
from_repository=self.source,
3026
last_revision=revision_id,
3027
pb=pb, find_ghosts=find_ghosts)
3028
return f.count_copied, f.failed_revisions
3031
class InterDifferingSerializer(InterKnitRepo):
3719
3034
def _get_repo_format_to_test(self):
3723
3038
def is_compatible(source, target):
3724
3039
"""Be compatible with Knit2 source and Knit3 target"""
3725
# This is redundant with format.check_conversion_target(), however that
3726
# raises an exception, and we just want to say "False" as in we won't
3727
# support converting between these formats.
3728
if 'IDS_never' in debug.debug_flags:
3730
if source.supports_rich_root() and not target.supports_rich_root():
3732
if (source._format.supports_tree_reference
3733
and not target._format.supports_tree_reference):
3735
if target._fallback_repositories and target._format.supports_chks:
3736
# IDS doesn't know how to copy CHKs for the parent inventories it
3737
# adds to stacked repos.
3739
if 'IDS_always' in debug.debug_flags:
3741
# Only use this code path for local source and target. IDS does far
3742
# too much IO (both bandwidth and roundtrips) over a network.
3743
if not source.bzrdir.transport.base.startswith('file:///'):
3745
if not target.bzrdir.transport.base.startswith('file:///'):
3040
if source.supports_rich_root() != target.supports_rich_root():
3042
# Ideally, we'd support fetching if the source had no tree references
3043
# even if it supported them...
3044
if (getattr(source, '_format.supports_tree_reference', False) and
3045
not getattr(target, '_format.supports_tree_reference', False)):
3749
def _get_trees(self, revision_ids, cache):
3751
for rev_id in revision_ids:
3753
possible_trees.append((rev_id, cache[rev_id]))
3755
# Not cached, but inventory might be present anyway.
3757
tree = self.source.revision_tree(rev_id)
3758
except errors.NoSuchRevision:
3759
# Nope, parent is ghost.
3762
cache[rev_id] = tree
3763
possible_trees.append((rev_id, tree))
3764
return possible_trees
3766
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3767
"""Get the best delta and base for this revision.
3769
:return: (basis_id, delta)
3772
# Generate deltas against each tree, to find the shortest.
3773
texts_possibly_new_in_tree = set()
3774
for basis_id, basis_tree in possible_trees:
3775
delta = tree.inventory._make_delta(basis_tree.inventory)
3776
for old_path, new_path, file_id, new_entry in delta:
3777
if new_path is None:
3778
# This file_id isn't present in the new rev, so we don't
3782
# Rich roots are handled elsewhere...
3784
kind = new_entry.kind
3785
if kind != 'directory' and kind != 'file':
3786
# No text record associated with this inventory entry.
3788
# This is a directory or file that has changed somehow.
3789
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3790
deltas.append((len(delta), basis_id, delta))
3792
return deltas[0][1:]
3794
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3795
"""Find all parent revisions that are absent, but for which the
3796
inventory is present, and copy those inventories.
3798
This is necessary to preserve correctness when the source is stacked
3799
without fallbacks configured. (Note that in cases like upgrade the
3800
source may be not have _fallback_repositories even though it is
3804
for parents in parent_map.values():
3805
parent_revs.update(parents)
3806
present_parents = self.source.get_parent_map(parent_revs)
3807
absent_parents = set(parent_revs).difference(present_parents)
3808
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3809
(rev_id,) for rev_id in absent_parents)
3810
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3811
for parent_tree in self.source.revision_trees(parent_inv_ids):
3812
current_revision_id = parent_tree.get_revision_id()
3813
parents_parents_keys = parent_invs_keys_for_stacking[
3814
(current_revision_id,)]
3815
parents_parents = [key[-1] for key in parents_parents_keys]
3816
basis_id = _mod_revision.NULL_REVISION
3817
basis_tree = self.source.revision_tree(basis_id)
3818
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3819
self.target.add_inventory_by_delta(
3820
basis_id, delta, current_revision_id, parents_parents)
3821
cache[current_revision_id] = parent_tree
3823
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3824
"""Fetch across a few revisions.
3826
:param revision_ids: The revisions to copy
3827
:param basis_id: The revision_id of a tree that must be in cache, used
3828
as a basis for delta when no other base is available
3829
:param cache: A cache of RevisionTrees that we can use.
3830
:param a_graph: A Graph object to determine the heads() of the
3831
rich-root data stream.
3832
:return: The revision_id of the last converted tree. The RevisionTree
3833
for it will be in cache
3835
# Walk though all revisions; get inventory deltas, copy referenced
3836
# texts that delta references, insert the delta, revision and
3838
root_keys_to_create = set()
3841
pending_revisions = []
3842
parent_map = self.source.get_parent_map(revision_ids)
3843
self._fetch_parent_invs_for_stacking(parent_map, cache)
3844
self.source._safe_to_return_from_cache = True
3845
for tree in self.source.revision_trees(revision_ids):
3846
# Find a inventory delta for this revision.
3847
# Find text entries that need to be copied, too.
3848
current_revision_id = tree.get_revision_id()
3849
parent_ids = parent_map.get(current_revision_id, ())
3850
parent_trees = self._get_trees(parent_ids, cache)
3851
possible_trees = list(parent_trees)
3852
if len(possible_trees) == 0:
3853
# There either aren't any parents, or the parents are ghosts,
3854
# so just use the last converted tree.
3855
possible_trees.append((basis_id, cache[basis_id]))
3856
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3858
revision = self.source.get_revision(current_revision_id)
3859
pending_deltas.append((basis_id, delta,
3860
current_revision_id, revision.parent_ids))
3861
if self._converting_to_rich_root:
3862
self._revision_id_to_root_id[current_revision_id] = \
3864
# Determine which texts are in present in this revision but not in
3865
# any of the available parents.
3866
texts_possibly_new_in_tree = set()
3867
for old_path, new_path, file_id, entry in delta:
3868
if new_path is None:
3869
# This file_id isn't present in the new rev
3873
if not self.target.supports_rich_root():
3874
# The target doesn't support rich root, so we don't
3877
if self._converting_to_rich_root:
3878
# This can't be copied normally, we have to insert
3880
root_keys_to_create.add((file_id, entry.revision))
3883
texts_possibly_new_in_tree.add((file_id, entry.revision))
3884
for basis_id, basis_tree in possible_trees:
3885
basis_inv = basis_tree.inventory
3886
for file_key in list(texts_possibly_new_in_tree):
3887
file_id, file_revision = file_key
3889
entry = basis_inv[file_id]
3890
except errors.NoSuchId:
3892
if entry.revision == file_revision:
3893
texts_possibly_new_in_tree.remove(file_key)
3894
text_keys.update(texts_possibly_new_in_tree)
3895
pending_revisions.append(revision)
3896
cache[current_revision_id] = tree
3897
basis_id = current_revision_id
3898
self.source._safe_to_return_from_cache = False
3900
from_texts = self.source.texts
3901
to_texts = self.target.texts
3902
if root_keys_to_create:
3903
root_stream = _mod_fetch._new_root_data_stream(
3904
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3905
self.source, graph=a_graph)
3906
to_texts.insert_record_stream(root_stream)
3907
to_texts.insert_record_stream(from_texts.get_record_stream(
3908
text_keys, self.target._format._fetch_order,
3909
not self.target._format._fetch_uses_deltas))
3910
# insert inventory deltas
3911
for delta in pending_deltas:
3912
self.target.add_inventory_by_delta(*delta)
3913
if self.target._fallback_repositories:
3914
# Make sure this stacked repository has all the parent inventories
3915
# for the new revisions that we are about to insert. We do this
3916
# before adding the revisions so that no revision is added until
3917
# all the inventories it may depend on are added.
3918
# Note that this is overzealous, as we may have fetched these in an
3921
revision_ids = set()
3922
for revision in pending_revisions:
3923
revision_ids.add(revision.revision_id)
3924
parent_ids.update(revision.parent_ids)
3925
parent_ids.difference_update(revision_ids)
3926
parent_ids.discard(_mod_revision.NULL_REVISION)
3927
parent_map = self.source.get_parent_map(parent_ids)
3928
# we iterate over parent_map and not parent_ids because we don't
3929
# want to try copying any revision which is a ghost
3930
for parent_tree in self.source.revision_trees(parent_map):
3931
current_revision_id = parent_tree.get_revision_id()
3932
parents_parents = parent_map[current_revision_id]
3933
possible_trees = self._get_trees(parents_parents, cache)
3934
if len(possible_trees) == 0:
3935
# There either aren't any parents, or the parents are
3936
# ghosts, so just use the last converted tree.
3937
possible_trees.append((basis_id, cache[basis_id]))
3938
basis_id, delta = self._get_delta_for_revision(parent_tree,
3939
parents_parents, possible_trees)
3940
self.target.add_inventory_by_delta(
3941
basis_id, delta, current_revision_id, parents_parents)
3942
# insert signatures and revisions
3943
for revision in pending_revisions:
3945
signature = self.source.get_signature_text(
3946
revision.revision_id)
3947
self.target.add_signature_text(revision.revision_id,
3949
except errors.NoSuchRevision:
3951
self.target.add_revision(revision.revision_id, revision)
3954
def _fetch_all_revisions(self, revision_ids, pb):
3955
"""Fetch everything for the list of revisions.
3957
:param revision_ids: The list of revisions to fetch. Must be in
3959
:param pb: A ProgressTask
3962
basis_id, basis_tree = self._get_basis(revision_ids[0])
3964
cache = lru_cache.LRUCache(100)
3965
cache[basis_id] = basis_tree
3966
del basis_tree # We don't want to hang on to it here
3968
if self._converting_to_rich_root and len(revision_ids) > 100:
3969
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3974
for offset in range(0, len(revision_ids), batch_size):
3975
self.target.start_write_group()
3977
pb.update('Transferring revisions', offset,
3979
batch = revision_ids[offset:offset+batch_size]
3980
basis_id = self._fetch_batch(batch, basis_id, cache,
3983
self.source._safe_to_return_from_cache = False
3984
self.target.abort_write_group()
3987
hint = self.target.commit_write_group()
3990
if hints and self.target._format.pack_compresses:
3991
self.target.pack(hint=hints)
3992
pb.update('Transferring revisions', len(revision_ids),
3995
3049
@needs_write_lock
3996
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3050
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3998
3051
"""See InterRepository.fetch()."""
3999
if fetch_spec is not None:
4000
raise AssertionError("Not implemented yet...")
4001
# See <https://launchpad.net/bugs/456077> asking for a warning here
4003
# nb this is only active for local-local fetches; other things using
4005
ui.ui_factory.warn_cross_format_fetch(self.source._format,
4006
self.target._format)
4007
if (not self.source.supports_rich_root()
4008
and self.target.supports_rich_root()):
4009
self._converting_to_rich_root = True
4010
self._revision_id_to_root_id = {}
4012
self._converting_to_rich_root = False
4013
3052
revision_ids = self.target.search_missing_revision_ids(self.source,
4014
3053
revision_id, find_ghosts=find_ghosts).get_keys()
4015
if not revision_ids:
4017
3054
revision_ids = tsort.topo_sort(
4018
3055
self.source.get_graph().get_parent_map(revision_ids))
4019
if not revision_ids:
4021
# Walk though all revisions; get inventory deltas, copy referenced
4022
# texts that delta references, insert the delta, revision and
3056
def revisions_iterator():
3057
for current_revision_id in revision_ids:
3058
revision = self.source.get_revision(current_revision_id)
3059
tree = self.source.revision_tree(current_revision_id)
3061
signature = self.source.get_signature_text(
3062
current_revision_id)
3063
except errors.NoSuchRevision:
3065
yield revision, tree, signature
4025
3067
my_pb = ui.ui_factory.nested_progress_bar()
4028
symbol_versioning.warn(
4029
symbol_versioning.deprecated_in((1, 14, 0))
4030
% "pb parameter to fetch()")
4033
self._fetch_all_revisions(revision_ids, pb)
3072
install_revisions(self.target, revisions_iterator(),
3073
len(revision_ids), pb)
4035
3075
if my_pb is not None:
4036
3076
my_pb.finished()
4037
3077
return len(revision_ids), 0
4039
def _get_basis(self, first_revision_id):
4040
"""Get a revision and tree which exists in the target.
4042
This assumes that first_revision_id is selected for transmission
4043
because all other ancestors are already present. If we can't find an
4044
ancestor we fall back to NULL_REVISION since we know that is safe.
4046
:return: (basis_id, basis_tree)
4048
first_rev = self.source.get_revision(first_revision_id)
4050
basis_id = first_rev.parent_ids[0]
4051
# only valid as a basis if the target has it
4052
self.target.get_revision(basis_id)
4053
# Try to get a basis tree - if its a ghost it will hit the
4054
# NoSuchRevision case.
4055
basis_tree = self.source.revision_tree(basis_id)
4056
except (IndexError, errors.NoSuchRevision):
4057
basis_id = _mod_revision.NULL_REVISION
4058
basis_tree = self.source.revision_tree(basis_id)
4059
return basis_id, basis_tree
3080
class InterOtherToRemote(InterRepository):
3081
"""An InterRepository that simply delegates to the 'real' InterRepository
3082
calculated for (source, target._real_repository).
3085
_walk_to_common_revisions_batch_size = 50
3087
def __init__(self, source, target):
3088
InterRepository.__init__(self, source, target)
3089
self._real_inter = None
3092
def is_compatible(source, target):
3093
if isinstance(target, remote.RemoteRepository):
3097
def _ensure_real_inter(self):
3098
if self._real_inter is None:
3099
self.target._ensure_real()
3100
real_target = self.target._real_repository
3101
self._real_inter = InterRepository.get(self.source, real_target)
3102
# Make _real_inter use the RemoteRepository for get_parent_map
3103
self._real_inter.target_get_graph = self.target.get_graph
3104
self._real_inter.target_get_parent_map = self.target.get_parent_map
3106
def copy_content(self, revision_id=None):
3107
self._ensure_real_inter()
3108
self._real_inter.copy_content(revision_id=revision_id)
3110
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3111
self._ensure_real_inter()
3112
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3113
find_ghosts=find_ghosts)
3116
def _get_repo_format_to_test(self):
3120
class InterRemoteToOther(InterRepository):
3122
def __init__(self, source, target):
3123
InterRepository.__init__(self, source, target)
3124
self._real_inter = None
3127
def is_compatible(source, target):
3128
if not isinstance(source, remote.RemoteRepository):
3130
# Is source's model compatible with target's model?
3131
source._ensure_real()
3132
real_source = source._real_repository
3133
if isinstance(real_source, remote.RemoteRepository):
3134
raise NotImplementedError(
3135
"We don't support remote repos backed by remote repos yet.")
3136
return InterRepository._same_model(real_source, target)
3138
def _ensure_real_inter(self):
3139
if self._real_inter is None:
3140
self.source._ensure_real()
3141
real_source = self.source._real_repository
3142
self._real_inter = InterRepository.get(real_source, self.target)
3144
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3145
self._ensure_real_inter()
3146
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3147
find_ghosts=find_ghosts)
3149
def copy_content(self, revision_id=None):
3150
self._ensure_real_inter()
3151
self._real_inter.copy_content(revision_id=revision_id)
3154
def _get_repo_format_to_test(self):
3159
class InterPackToRemotePack(InterPackRepo):
3160
"""A specialisation of InterPackRepo for a target that is a
3163
This will use the get_parent_map RPC rather than plain readvs, and also
3164
uses an RPC for autopacking.
3167
_walk_to_common_revisions_batch_size = 50
3170
def is_compatible(source, target):
3171
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3172
if isinstance(source._format, RepositoryFormatPack):
3173
if isinstance(target, remote.RemoteRepository):
3174
target._ensure_real()
3175
if isinstance(target._real_repository._format,
3176
RepositoryFormatPack):
3177
if InterRepository._same_model(source, target):
3181
def _autopack(self):
3182
self.target.autopack()
3184
def _get_target_pack_collection(self):
3185
return self.target._real_repository._pack_collection
3188
def _get_repo_format_to_test(self):
4062
3192
InterRepository.register_optimiser(InterDifferingSerializer)
4063
3193
InterRepository.register_optimiser(InterSameDataRepository)
4064
3194
InterRepository.register_optimiser(InterWeaveRepo)
4065
3195
InterRepository.register_optimiser(InterKnitRepo)
3196
InterRepository.register_optimiser(InterModel1and2)
3197
InterRepository.register_optimiser(InterKnit1and2)
3198
InterRepository.register_optimiser(InterPackRepo)
3199
InterRepository.register_optimiser(InterOtherToRemote)
3200
InterRepository.register_optimiser(InterRemoteToOther)
3201
InterRepository.register_optimiser(InterPackToRemotePack)
4068
3204
class CopyConverter(object):
4069
3205
"""A repository conversion tool which just performs a copy of the content.
4071
3207
This is slow but quite reliable.
4224
3355
revision_graph[key] = tuple(parent for parent in parents if parent
4225
3356
in revision_graph)
4226
3357
return revision_graph
4229
class StreamSink(object):
4230
"""An object that can insert a stream into a repository.
4232
This interface handles the complexity of reserialising inventories and
4233
revisions from different formats, and allows unidirectional insertion into
4234
stacked repositories without looking for the missing basis parents
4238
def __init__(self, target_repo):
4239
self.target_repo = target_repo
4241
def insert_stream(self, stream, src_format, resume_tokens):
4242
"""Insert a stream's content into the target repository.
4244
:param src_format: a bzr repository format.
4246
:return: a list of resume tokens and an iterable of keys additional
4247
items required before the insertion can be completed.
4249
self.target_repo.lock_write()
4252
self.target_repo.resume_write_group(resume_tokens)
4255
self.target_repo.start_write_group()
4258
# locked_insert_stream performs a commit|suspend.
4259
return self._locked_insert_stream(stream, src_format, is_resume)
4261
self.target_repo.abort_write_group(suppress_errors=True)
4264
self.target_repo.unlock()
4266
def _locked_insert_stream(self, stream, src_format, is_resume):
4267
to_serializer = self.target_repo._format._serializer
4268
src_serializer = src_format._serializer
4270
if to_serializer == src_serializer:
4271
# If serializers match and the target is a pack repository, set the
4272
# write cache size on the new pack. This avoids poor performance
4273
# on transports where append is unbuffered (such as
4274
# RemoteTransport). This is safe to do because nothing should read
4275
# back from the target repository while a stream with matching
4276
# serialization is being inserted.
4277
# The exception is that a delta record from the source that should
4278
# be a fulltext may need to be expanded by the target (see
4279
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4280
# explicitly flush any buffered writes first in that rare case.
4282
new_pack = self.target_repo._pack_collection._new_pack
4283
except AttributeError:
4284
# Not a pack repository
4287
new_pack.set_write_cache_size(1024*1024)
4288
for substream_type, substream in stream:
4289
if 'stream' in debug.debug_flags:
4290
mutter('inserting substream: %s', substream_type)
4291
if substream_type == 'texts':
4292
self.target_repo.texts.insert_record_stream(substream)
4293
elif substream_type == 'inventories':
4294
if src_serializer == to_serializer:
4295
self.target_repo.inventories.insert_record_stream(
4298
self._extract_and_insert_inventories(
4299
substream, src_serializer)
4300
elif substream_type == 'inventory-deltas':
4301
ui.ui_factory.warn_cross_format_fetch(src_format,
4302
self.target_repo._format)
4303
self._extract_and_insert_inventory_deltas(
4304
substream, src_serializer)
4305
elif substream_type == 'chk_bytes':
4306
# XXX: This doesn't support conversions, as it assumes the
4307
# conversion was done in the fetch code.
4308
self.target_repo.chk_bytes.insert_record_stream(substream)
4309
elif substream_type == 'revisions':
4310
# This may fallback to extract-and-insert more often than
4311
# required if the serializers are different only in terms of
4313
if src_serializer == to_serializer:
4314
self.target_repo.revisions.insert_record_stream(
4317
self._extract_and_insert_revisions(substream,
4319
elif substream_type == 'signatures':
4320
self.target_repo.signatures.insert_record_stream(substream)
4322
raise AssertionError('kaboom! %s' % (substream_type,))
4323
# Done inserting data, and the missing_keys calculations will try to
4324
# read back from the inserted data, so flush the writes to the new pack
4325
# (if this is pack format).
4326
if new_pack is not None:
4327
new_pack._write_data('', flush=True)
4328
# Find all the new revisions (including ones from resume_tokens)
4329
missing_keys = self.target_repo.get_missing_parent_inventories(
4330
check_for_missing_texts=is_resume)
4332
for prefix, versioned_file in (
4333
('texts', self.target_repo.texts),
4334
('inventories', self.target_repo.inventories),
4335
('revisions', self.target_repo.revisions),
4336
('signatures', self.target_repo.signatures),
4337
('chk_bytes', self.target_repo.chk_bytes),
4339
if versioned_file is None:
4341
# TODO: key is often going to be a StaticTuple object
4342
# I don't believe we can define a method by which
4343
# (prefix,) + StaticTuple will work, though we could
4344
# define a StaticTuple.sq_concat that would allow you to
4345
# pass in either a tuple or a StaticTuple as the second
4346
# object, so instead we could have:
4347
# StaticTuple(prefix) + key here...
4348
missing_keys.update((prefix,) + key for key in
4349
versioned_file.get_missing_compression_parent_keys())
4350
except NotImplementedError:
4351
# cannot even attempt suspending, and missing would have failed
4352
# during stream insertion.
4353
missing_keys = set()
4356
# suspend the write group and tell the caller what we is
4357
# missing. We know we can suspend or else we would not have
4358
# entered this code path. (All repositories that can handle
4359
# missing keys can handle suspending a write group).
4360
write_group_tokens = self.target_repo.suspend_write_group()
4361
return write_group_tokens, missing_keys
4362
hint = self.target_repo.commit_write_group()
4363
if (to_serializer != src_serializer and
4364
self.target_repo._format.pack_compresses):
4365
self.target_repo.pack(hint=hint)
4368
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4369
target_rich_root = self.target_repo._format.rich_root_data
4370
target_tree_refs = self.target_repo._format.supports_tree_reference
4371
for record in substream:
4372
# Insert the delta directly
4373
inventory_delta_bytes = record.get_bytes_as('fulltext')
4374
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4376
parse_result = deserialiser.parse_text_bytes(
4377
inventory_delta_bytes)
4378
except inventory_delta.IncompatibleInventoryDelta, err:
4379
trace.mutter("Incompatible delta: %s", err.msg)
4380
raise errors.IncompatibleRevision(self.target_repo._format)
4381
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4382
revision_id = new_id
4383
parents = [key[0] for key in record.parents]
4384
self.target_repo.add_inventory_by_delta(
4385
basis_id, inv_delta, revision_id, parents)
4387
def _extract_and_insert_inventories(self, substream, serializer,
4389
"""Generate a new inventory versionedfile in target, converting data.
4391
The inventory is retrieved from the source, (deserializing it), and
4392
stored in the target (reserializing it in a different format).
4394
target_rich_root = self.target_repo._format.rich_root_data
4395
target_tree_refs = self.target_repo._format.supports_tree_reference
4396
for record in substream:
4397
# It's not a delta, so it must be a fulltext in the source
4398
# serializer's format.
4399
bytes = record.get_bytes_as('fulltext')
4400
revision_id = record.key[0]
4401
inv = serializer.read_inventory_from_string(bytes, revision_id)
4402
parents = [key[0] for key in record.parents]
4403
self.target_repo.add_inventory(revision_id, inv, parents)
4404
# No need to keep holding this full inv in memory when the rest of
4405
# the substream is likely to be all deltas.
4408
def _extract_and_insert_revisions(self, substream, serializer):
4409
for record in substream:
4410
bytes = record.get_bytes_as('fulltext')
4411
revision_id = record.key[0]
4412
rev = serializer.read_revision_from_string(bytes)
4413
if rev.revision_id != revision_id:
4414
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4415
self.target_repo.add_revision(revision_id, rev)
4418
if self.target_repo._format._fetch_reconcile:
4419
self.target_repo.reconcile()
4422
class StreamSource(object):
4423
"""A source of a stream for fetching between repositories."""
4425
def __init__(self, from_repository, to_format):
4426
"""Create a StreamSource streaming from from_repository."""
4427
self.from_repository = from_repository
4428
self.to_format = to_format
4430
def delta_on_metadata(self):
4431
"""Return True if delta's are permitted on metadata streams.
4433
That is on revisions and signatures.
4435
src_serializer = self.from_repository._format._serializer
4436
target_serializer = self.to_format._serializer
4437
return (self.to_format._fetch_uses_deltas and
4438
src_serializer == target_serializer)
4440
def _fetch_revision_texts(self, revs):
4441
# fetch signatures first and then the revision texts
4442
# may need to be a InterRevisionStore call here.
4443
from_sf = self.from_repository.signatures
4444
# A missing signature is just skipped.
4445
keys = [(rev_id,) for rev_id in revs]
4446
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4448
self.to_format._fetch_order,
4449
not self.to_format._fetch_uses_deltas))
4450
# If a revision has a delta, this is actually expanded inside the
4451
# insert_record_stream code now, which is an alternate fix for
4453
from_rf = self.from_repository.revisions
4454
revisions = from_rf.get_record_stream(
4456
self.to_format._fetch_order,
4457
not self.delta_on_metadata())
4458
return [('signatures', signatures), ('revisions', revisions)]
4460
def _generate_root_texts(self, revs):
4461
"""This will be called by get_stream between fetching weave texts and
4462
fetching the inventory weave.
4464
if self._rich_root_upgrade():
4465
return _mod_fetch.Inter1and2Helper(
4466
self.from_repository).generate_root_texts(revs)
4470
def get_stream(self, search):
4472
revs = search.get_keys()
4473
graph = self.from_repository.get_graph()
4474
revs = tsort.topo_sort(graph.get_parent_map(revs))
4475
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4477
for knit_kind, file_id, revisions in data_to_fetch:
4478
if knit_kind != phase:
4480
# Make a new progress bar for this phase
4481
if knit_kind == "file":
4482
# Accumulate file texts
4483
text_keys.extend([(file_id, revision) for revision in
4485
elif knit_kind == "inventory":
4486
# Now copy the file texts.
4487
from_texts = self.from_repository.texts
4488
yield ('texts', from_texts.get_record_stream(
4489
text_keys, self.to_format._fetch_order,
4490
not self.to_format._fetch_uses_deltas))
4491
# Cause an error if a text occurs after we have done the
4494
# Before we process the inventory we generate the root
4495
# texts (if necessary) so that the inventories references
4497
for _ in self._generate_root_texts(revs):
4499
# we fetch only the referenced inventories because we do not
4500
# know for unselected inventories whether all their required
4501
# texts are present in the other repository - it could be
4503
for info in self._get_inventory_stream(revs):
4505
elif knit_kind == "signatures":
4506
# Nothing to do here; this will be taken care of when
4507
# _fetch_revision_texts happens.
4509
elif knit_kind == "revisions":
4510
for record in self._fetch_revision_texts(revs):
4513
raise AssertionError("Unknown knit kind %r" % knit_kind)
4515
def get_stream_for_missing_keys(self, missing_keys):
4516
# missing keys can only occur when we are byte copying and not
4517
# translating (because translation means we don't send
4518
# unreconstructable deltas ever).
4520
keys['texts'] = set()
4521
keys['revisions'] = set()
4522
keys['inventories'] = set()
4523
keys['chk_bytes'] = set()
4524
keys['signatures'] = set()
4525
for key in missing_keys:
4526
keys[key[0]].add(key[1:])
4527
if len(keys['revisions']):
4528
# If we allowed copying revisions at this point, we could end up
4529
# copying a revision without copying its required texts: a
4530
# violation of the requirements for repository integrity.
4531
raise AssertionError(
4532
'cannot copy revisions to fill in missing deltas %s' % (
4533
keys['revisions'],))
4534
for substream_kind, keys in keys.iteritems():
4535
vf = getattr(self.from_repository, substream_kind)
4536
if vf is None and keys:
4537
raise AssertionError(
4538
"cannot fill in keys for a versioned file we don't"
4539
" have: %s needs %s" % (substream_kind, keys))
4541
# No need to stream something we don't have
4543
if substream_kind == 'inventories':
4544
# Some missing keys are genuinely ghosts, filter those out.
4545
present = self.from_repository.inventories.get_parent_map(keys)
4546
revs = [key[0] for key in present]
4547
# Get the inventory stream more-or-less as we do for the
4548
# original stream; there's no reason to assume that records
4549
# direct from the source will be suitable for the sink. (Think
4550
# e.g. 2a -> 1.9-rich-root).
4551
for info in self._get_inventory_stream(revs, missing=True):
4555
# Ask for full texts always so that we don't need more round trips
4556
# after this stream.
4557
# Some of the missing keys are genuinely ghosts, so filter absent
4558
# records. The Sink is responsible for doing another check to
4559
# ensure that ghosts don't introduce missing data for future
4561
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4562
self.to_format._fetch_order, True))
4563
yield substream_kind, stream
4565
def inventory_fetch_order(self):
4566
if self._rich_root_upgrade():
4567
return 'topological'
4569
return self.to_format._fetch_order
4571
def _rich_root_upgrade(self):
4572
return (not self.from_repository._format.rich_root_data and
4573
self.to_format.rich_root_data)
4575
def _get_inventory_stream(self, revision_ids, missing=False):
4576
from_format = self.from_repository._format
4577
if (from_format.supports_chks and self.to_format.supports_chks and
4578
from_format.network_name() == self.to_format.network_name()):
4579
raise AssertionError(
4580
"this case should be handled by GroupCHKStreamSource")
4581
elif 'forceinvdeltas' in debug.debug_flags:
4582
return self._get_convertable_inventory_stream(revision_ids,
4583
delta_versus_null=missing)
4584
elif from_format.network_name() == self.to_format.network_name():
4586
return self._get_simple_inventory_stream(revision_ids,
4588
elif (not from_format.supports_chks and not self.to_format.supports_chks
4589
and from_format._serializer == self.to_format._serializer):
4590
# Essentially the same format.
4591
return self._get_simple_inventory_stream(revision_ids,
4594
# Any time we switch serializations, we want to use an
4595
# inventory-delta based approach.
4596
return self._get_convertable_inventory_stream(revision_ids,
4597
delta_versus_null=missing)
4599
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4600
# NB: This currently reopens the inventory weave in source;
4601
# using a single stream interface instead would avoid this.
4602
from_weave = self.from_repository.inventories
4604
delta_closure = True
4606
delta_closure = not self.delta_on_metadata()
4607
yield ('inventories', from_weave.get_record_stream(
4608
[(rev_id,) for rev_id in revision_ids],
4609
self.inventory_fetch_order(), delta_closure))
4611
def _get_convertable_inventory_stream(self, revision_ids,
4612
delta_versus_null=False):
4613
# The two formats are sufficiently different that there is no fast
4614
# path, so we need to send just inventorydeltas, which any
4615
# sufficiently modern client can insert into any repository.
4616
# The StreamSink code expects to be able to
4617
# convert on the target, so we need to put bytes-on-the-wire that can
4618
# be converted. That means inventory deltas (if the remote is <1.19,
4619
# RemoteStreamSink will fallback to VFS to insert the deltas).
4620
yield ('inventory-deltas',
4621
self._stream_invs_as_deltas(revision_ids,
4622
delta_versus_null=delta_versus_null))
4624
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4625
"""Return a stream of inventory-deltas for the given rev ids.
4627
:param revision_ids: The list of inventories to transmit
4628
:param delta_versus_null: Don't try to find a minimal delta for this
4629
entry, instead compute the delta versus the NULL_REVISION. This
4630
effectively streams a complete inventory. Used for stuff like
4631
filling in missing parents, etc.
4633
from_repo = self.from_repository
4634
revision_keys = [(rev_id,) for rev_id in revision_ids]
4635
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4636
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4638
inventories = self.from_repository.iter_inventories(
4639
revision_ids, 'topological')
4640
format = from_repo._format
4641
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4642
inventory_cache = lru_cache.LRUCache(50)
4643
null_inventory = from_repo.revision_tree(
4644
_mod_revision.NULL_REVISION).inventory
4645
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4646
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4647
# repo back into a non-rich-root repo ought to be allowed)
4648
serializer = inventory_delta.InventoryDeltaSerializer(
4649
versioned_root=format.rich_root_data,
4650
tree_references=format.supports_tree_reference)
4651
for inv in inventories:
4652
key = (inv.revision_id,)
4653
parent_keys = parent_map.get(key, ())
4655
if not delta_versus_null and parent_keys:
4656
# The caller did not ask for complete inventories and we have
4657
# some parents that we can delta against. Make a delta against
4658
# each parent so that we can find the smallest.
4659
parent_ids = [parent_key[0] for parent_key in parent_keys]
4660
for parent_id in parent_ids:
4661
if parent_id not in invs_sent_so_far:
4662
# We don't know that the remote side has this basis, so
4665
if parent_id == _mod_revision.NULL_REVISION:
4666
parent_inv = null_inventory
4668
parent_inv = inventory_cache.get(parent_id, None)
4669
if parent_inv is None:
4670
parent_inv = from_repo.get_inventory(parent_id)
4671
candidate_delta = inv._make_delta(parent_inv)
4672
if (delta is None or
4673
len(delta) > len(candidate_delta)):
4674
delta = candidate_delta
4675
basis_id = parent_id
4677
# Either none of the parents ended up being suitable, or we
4678
# were asked to delta against NULL
4679
basis_id = _mod_revision.NULL_REVISION
4680
delta = inv._make_delta(null_inventory)
4681
invs_sent_so_far.add(inv.revision_id)
4682
inventory_cache[inv.revision_id] = inv
4683
delta_serialized = ''.join(
4684
serializer.delta_to_lines(basis_id, key[-1], delta))
4685
yield versionedfile.FulltextContentFactory(
4686
key, parent_keys, None, delta_serialized)
4689
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4690
stop_revision=None):
4691
"""Extend the partial history to include a given index
4693
If a stop_index is supplied, stop when that index has been reached.
4694
If a stop_revision is supplied, stop when that revision is
4695
encountered. Otherwise, stop when the beginning of history is
4698
:param stop_index: The index which should be present. When it is
4699
present, history extension will stop.
4700
:param stop_revision: The revision id which should be present. When
4701
it is encountered, history extension will stop.
4703
start_revision = partial_history_cache[-1]
4704
iterator = repo.iter_reverse_revision_history(start_revision)
4706
#skip the last revision in the list
4709
if (stop_index is not None and
4710
len(partial_history_cache) > stop_index):
4712
if partial_history_cache[-1] == stop_revision:
4714
revision_id = iterator.next()
4715
partial_history_cache.append(revision_id)
4716
except StopIteration: