559
492
ie.revision = parent_entry.revision
560
493
return self._get_delta(ie, basis_inv, path), False, None
561
494
ie.reference_revision = content_summary[3]
562
if ie.reference_revision is None:
563
raise AssertionError("invalid content_summary for nested tree: %r"
564
% (content_summary,))
565
self._add_text_to_weave(ie.file_id, '', heads, None)
496
self._add_text_to_weave(ie.file_id, lines, heads, None)
567
498
raise NotImplementedError('unknown kind')
568
499
ie.revision = self._new_revision_id
569
self._any_changes = True
570
500
return self._get_delta(ie, basis_inv, path), True, fingerprint
572
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
573
_entry_factory=entry_factory):
574
"""Record a new tree via iter_changes.
576
:param tree: The tree to obtain text contents from for changed objects.
577
:param basis_revision_id: The revision id of the tree the iter_changes
578
has been generated against. Currently assumed to be the same
579
as self.parents[0] - if it is not, errors may occur.
580
:param iter_changes: An iter_changes iterator with the changes to apply
581
to basis_revision_id. The iterator must not include any items with
582
a current kind of None - missing items must be either filtered out
583
or errored-on beefore record_iter_changes sees the item.
584
:param _entry_factory: Private method to bind entry_factory locally for
586
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
589
# Create an inventory delta based on deltas between all the parents and
590
# deltas between all the parent inventories. We use inventory delta's
591
# between the inventory objects because iter_changes masks
592
# last-changed-field only changes.
594
# file_id -> change map, change is fileid, paths, changed, versioneds,
595
# parents, names, kinds, executables
597
# {file_id -> revision_id -> inventory entry, for entries in parent
598
# trees that are not parents[0]
602
revtrees = list(self.repository.revision_trees(self.parents))
603
except errors.NoSuchRevision:
604
# one or more ghosts, slow path.
606
for revision_id in self.parents:
608
revtrees.append(self.repository.revision_tree(revision_id))
609
except errors.NoSuchRevision:
611
basis_revision_id = _mod_revision.NULL_REVISION
613
revtrees.append(self.repository.revision_tree(
614
_mod_revision.NULL_REVISION))
615
# The basis inventory from a repository
617
basis_inv = revtrees[0].inventory
619
basis_inv = self.repository.revision_tree(
620
_mod_revision.NULL_REVISION).inventory
621
if len(self.parents) > 0:
622
if basis_revision_id != self.parents[0] and not ghost_basis:
624
"arbitrary basis parents not yet supported with merges")
625
for revtree in revtrees[1:]:
626
for change in revtree.inventory._make_delta(basis_inv):
627
if change[1] is None:
628
# Not present in this parent.
630
if change[2] not in merged_ids:
631
if change[0] is not None:
632
basis_entry = basis_inv[change[2]]
633
merged_ids[change[2]] = [
635
basis_entry.revision,
638
parent_entries[change[2]] = {
640
basis_entry.revision:basis_entry,
642
change[3].revision:change[3],
645
merged_ids[change[2]] = [change[3].revision]
646
parent_entries[change[2]] = {change[3].revision:change[3]}
648
merged_ids[change[2]].append(change[3].revision)
649
parent_entries[change[2]][change[3].revision] = change[3]
652
# Setup the changes from the tree:
653
# changes maps file_id -> (change, [parent revision_ids])
655
for change in iter_changes:
656
# This probably looks up in basis_inv way to much.
657
if change[1][0] is not None:
658
head_candidate = [basis_inv[change[0]].revision]
661
changes[change[0]] = change, merged_ids.get(change[0],
663
unchanged_merged = set(merged_ids) - set(changes)
664
# Extend the changes dict with synthetic changes to record merges of
666
for file_id in unchanged_merged:
667
# Record a merged version of these items that did not change vs the
668
# basis. This can be either identical parallel changes, or a revert
669
# of a specific file after a merge. The recorded content will be
670
# that of the current tree (which is the same as the basis), but
671
# the per-file graph will reflect a merge.
672
# NB:XXX: We are reconstructing path information we had, this
673
# should be preserved instead.
674
# inv delta change: (file_id, (path_in_source, path_in_target),
675
# changed_content, versioned, parent, name, kind,
678
basis_entry = basis_inv[file_id]
679
except errors.NoSuchId:
680
# a change from basis->some_parents but file_id isn't in basis
681
# so was new in the merge, which means it must have changed
682
# from basis -> current, and as it hasn't the add was reverted
683
# by the user. So we discard this change.
687
(basis_inv.id2path(file_id), tree.id2path(file_id)),
689
(basis_entry.parent_id, basis_entry.parent_id),
690
(basis_entry.name, basis_entry.name),
691
(basis_entry.kind, basis_entry.kind),
692
(basis_entry.executable, basis_entry.executable))
693
changes[file_id] = (change, merged_ids[file_id])
694
# changes contains tuples with the change and a set of inventory
695
# candidates for the file.
697
# old_path, new_path, file_id, new_inventory_entry
698
seen_root = False # Is the root in the basis delta?
699
inv_delta = self._basis_delta
700
modified_rev = self._new_revision_id
701
for change, head_candidates in changes.values():
702
if change[3][1]: # versioned in target.
703
# Several things may be happening here:
704
# We may have a fork in the per-file graph
705
# - record a change with the content from tree
706
# We may have a change against < all trees
707
# - carry over the tree that hasn't changed
708
# We may have a change against all trees
709
# - record the change with the content from tree
712
entry = _entry_factory[kind](file_id, change[5][1],
714
head_set = self._heads(change[0], set(head_candidates))
717
for head_candidate in head_candidates:
718
if head_candidate in head_set:
719
heads.append(head_candidate)
720
head_set.remove(head_candidate)
723
# Could be a carry-over situation:
724
parent_entry_revs = parent_entries.get(file_id, None)
725
if parent_entry_revs:
726
parent_entry = parent_entry_revs.get(heads[0], None)
729
if parent_entry is None:
730
# The parent iter_changes was called against is the one
731
# that is the per-file head, so any change is relevant
732
# iter_changes is valid.
733
carry_over_possible = False
735
# could be a carry over situation
736
# A change against the basis may just indicate a merge,
737
# we need to check the content against the source of the
738
# merge to determine if it was changed after the merge
740
if (parent_entry.kind != entry.kind or
741
parent_entry.parent_id != entry.parent_id or
742
parent_entry.name != entry.name):
743
# Metadata common to all entries has changed
744
# against per-file parent
745
carry_over_possible = False
747
carry_over_possible = True
748
# per-type checks for changes against the parent_entry
751
# Cannot be a carry-over situation
752
carry_over_possible = False
753
# Populate the entry in the delta
755
# XXX: There is still a small race here: If someone reverts the content of a file
756
# after iter_changes examines and decides it has changed,
757
# we will unconditionally record a new version even if some
758
# other process reverts it while commit is running (with
759
# the revert happening after iter_changes did it's
762
entry.executable = True
764
entry.executable = False
765
if (carry_over_possible and
766
parent_entry.executable == entry.executable):
767
# Check the file length, content hash after reading
769
nostore_sha = parent_entry.text_sha1
772
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
774
text = file_obj.read()
778
entry.text_sha1, entry.text_size = self._add_text_to_weave(
779
file_id, text, heads, nostore_sha)
780
yield file_id, change[1][1], (entry.text_sha1, stat_value)
781
except errors.ExistingContent:
782
# No content change against a carry_over parent
783
# Perhaps this should also yield a fs hash update?
785
entry.text_size = parent_entry.text_size
786
entry.text_sha1 = parent_entry.text_sha1
787
elif kind == 'symlink':
789
entry.symlink_target = tree.get_symlink_target(file_id)
790
if (carry_over_possible and
791
parent_entry.symlink_target == entry.symlink_target):
794
self._add_text_to_weave(change[0], '', heads, None)
795
elif kind == 'directory':
796
if carry_over_possible:
799
# Nothing to set on the entry.
800
# XXX: split into the Root and nonRoot versions.
801
if change[1][1] != '' or self.repository.supports_rich_root():
802
self._add_text_to_weave(change[0], '', heads, None)
803
elif kind == 'tree-reference':
804
if not self.repository._format.supports_tree_reference:
805
# This isn't quite sane as an error, but we shouldn't
806
# ever see this code path in practice: tree's don't
807
# permit references when the repo doesn't support tree
809
raise errors.UnsupportedOperation(tree.add_reference,
811
reference_revision = tree.get_reference_revision(change[0])
812
entry.reference_revision = reference_revision
813
if (carry_over_possible and
814
parent_entry.reference_revision == reference_revision):
817
self._add_text_to_weave(change[0], '', heads, None)
819
raise AssertionError('unknown kind %r' % kind)
821
entry.revision = modified_rev
823
entry.revision = parent_entry.revision
826
new_path = change[1][1]
827
inv_delta.append((change[1][0], new_path, change[0], entry))
830
self.new_inventory = None
832
# This should perhaps be guarded by a check that the basis we
833
# commit against is the basis for the commit and if not do a delta
835
self._any_changes = True
837
# housekeeping root entry changes do not affect no-change commits.
838
self._require_root_change(tree)
839
self.basis_delta_revision = basis_revision_id
841
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
842
parent_keys = tuple([(file_id, parent) for parent in parents])
843
return self.repository.texts._add_text(
844
(file_id, self._new_revision_id), parent_keys, new_text,
845
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
502
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
503
# Note: as we read the content directly from the tree, we know its not
504
# been turned into unicode or badly split - but a broken tree
505
# implementation could give us bad output from readlines() so this is
506
# not a guarantee of safety. What would be better is always checking
507
# the content during test suite execution. RBC 20070912
508
parent_keys = tuple((file_id, parent) for parent in parents)
509
return self.repository.texts.add_lines(
510
(file_id, self._new_revision_id), parent_keys, new_lines,
511
nostore_sha=nostore_sha, random_id=self.random_revid,
512
check_content=False)[0:2]
848
515
class RootCommitBuilder(CommitBuilder):
1196
796
# The old API returned a list, should this actually be a set?
1197
797
return parent_map.keys()
1199
def _check_inventories(self, checker):
1200
"""Check the inventories found from the revision scan.
1202
This is responsible for verifying the sha1 of inventories and
1203
creating a pending_keys set that covers data referenced by inventories.
1205
bar = ui.ui_factory.nested_progress_bar()
1207
self._do_check_inventories(checker, bar)
1211
def _do_check_inventories(self, checker, bar):
1212
"""Helper for _check_inventories."""
1214
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1215
kinds = ['chk_bytes', 'texts']
1216
count = len(checker.pending_keys)
1217
bar.update("inventories", 0, 2)
1218
current_keys = checker.pending_keys
1219
checker.pending_keys = {}
1220
# Accumulate current checks.
1221
for key in current_keys:
1222
if key[0] != 'inventories' and key[0] not in kinds:
1223
checker._report_items.append('unknown key type %r' % (key,))
1224
keys[key[0]].add(key[1:])
1225
if keys['inventories']:
1226
# NB: output order *should* be roughly sorted - topo or
1227
# inverse topo depending on repository - either way decent
1228
# to just delta against. However, pre-CHK formats didn't
1229
# try to optimise inventory layout on disk. As such the
1230
# pre-CHK code path does not use inventory deltas.
1232
for record in self.inventories.check(keys=keys['inventories']):
1233
if record.storage_kind == 'absent':
1234
checker._report_items.append(
1235
'Missing inventory {%s}' % (record.key,))
1237
last_object = self._check_record('inventories', record,
1238
checker, last_object,
1239
current_keys[('inventories',) + record.key])
1240
del keys['inventories']
1243
bar.update("texts", 1)
1244
while (checker.pending_keys or keys['chk_bytes']
1246
# Something to check.
1247
current_keys = checker.pending_keys
1248
checker.pending_keys = {}
1249
# Accumulate current checks.
1250
for key in current_keys:
1251
if key[0] not in kinds:
1252
checker._report_items.append('unknown key type %r' % (key,))
1253
keys[key[0]].add(key[1:])
1254
# Check the outermost kind only - inventories || chk_bytes || texts
1258
for record in getattr(self, kind).check(keys=keys[kind]):
1259
if record.storage_kind == 'absent':
1260
checker._report_items.append(
1261
'Missing %s {%s}' % (kind, record.key,))
1263
last_object = self._check_record(kind, record,
1264
checker, last_object, current_keys[(kind,) + record.key])
1268
def _check_record(self, kind, record, checker, last_object, item_data):
1269
"""Check a single text from this repository."""
1270
if kind == 'inventories':
1271
rev_id = record.key[0]
1272
inv = self._deserialise_inventory(rev_id,
1273
record.get_bytes_as('fulltext'))
1274
if last_object is not None:
1275
delta = inv._make_delta(last_object)
1276
for old_path, path, file_id, ie in delta:
1279
ie.check(checker, rev_id, inv)
1281
for path, ie in inv.iter_entries():
1282
ie.check(checker, rev_id, inv)
1283
if self._format.fast_deltas:
1285
elif kind == 'chk_bytes':
1286
# No code written to check chk_bytes for this repo format.
1287
checker._report_items.append(
1288
'unsupported key type chk_bytes for %s' % (record.key,))
1289
elif kind == 'texts':
1290
self._check_text(record, checker, item_data)
1292
checker._report_items.append(
1293
'unknown key type %s for %s' % (kind, record.key))
1295
def _check_text(self, record, checker, item_data):
1296
"""Check a single text."""
1297
# Check it is extractable.
1298
# TODO: check length.
1299
if record.storage_kind == 'chunked':
1300
chunks = record.get_bytes_as(record.storage_kind)
1301
sha1 = osutils.sha_strings(chunks)
1302
length = sum(map(len, chunks))
1304
content = record.get_bytes_as('fulltext')
1305
sha1 = osutils.sha_string(content)
1306
length = len(content)
1307
if item_data and sha1 != item_data[1]:
1308
checker._report_items.append(
1309
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1310
(record.key, sha1, item_data[1], item_data[2]))
1313
800
def create(a_bzrdir):
1314
801
"""Construct the current default format repository in a_bzrdir."""
1613
1084
def suspend_write_group(self):
1614
1085
raise errors.UnsuspendableWriteGroup(self)
1616
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1617
"""Return the keys of missing inventory parents for revisions added in
1620
A revision is not complete if the inventory delta for that revision
1621
cannot be calculated. Therefore if the parent inventories of a
1622
revision are not present, the revision is incomplete, and e.g. cannot
1623
be streamed by a smart server. This method finds missing inventory
1624
parents for revisions added in this write group.
1626
if not self._format.supports_external_lookups:
1627
# This is only an issue for stacked repositories
1629
if not self.is_in_write_group():
1630
raise AssertionError('not in a write group')
1632
# XXX: We assume that every added revision already has its
1633
# corresponding inventory, so we only check for parent inventories that
1634
# might be missing, rather than all inventories.
1635
parents = set(self.revisions._index.get_missing_parents())
1636
parents.discard(_mod_revision.NULL_REVISION)
1637
unstacked_inventories = self.inventories._index
1638
present_inventories = unstacked_inventories.get_parent_map(
1639
key[-1:] for key in parents)
1640
parents.difference_update(present_inventories)
1641
if len(parents) == 0:
1642
# No missing parent inventories.
1644
if not check_for_missing_texts:
1645
return set(('inventories', rev_id) for (rev_id,) in parents)
1646
# Ok, now we have a list of missing inventories. But these only matter
1647
# if the inventories that reference them are missing some texts they
1648
# appear to introduce.
1649
# XXX: Texts referenced by all added inventories need to be present,
1650
# but at the moment we're only checking for texts referenced by
1651
# inventories at the graph's edge.
1652
key_deps = self.revisions._index._key_dependencies
1653
key_deps.satisfy_refs_for_keys(present_inventories)
1654
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1655
file_ids = self.fileids_altered_by_revision_ids(referrers)
1656
missing_texts = set()
1657
for file_id, version_ids in file_ids.iteritems():
1658
missing_texts.update(
1659
(file_id, version_id) for version_id in version_ids)
1660
present_texts = self.texts.get_parent_map(missing_texts)
1661
missing_texts.difference_update(present_texts)
1662
if not missing_texts:
1663
# No texts are missing, so all revisions and their deltas are
1666
# Alternatively the text versions could be returned as the missing
1667
# keys, but this is likely to be less data.
1668
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1671
def refresh_data(self):
1672
"""Re-read any data needed to synchronise with disk.
1674
This method is intended to be called after another repository instance
1675
(such as one used by a smart server) has inserted data into the
1676
repository. On all repositories this will work outside of write groups.
1677
Some repository formats (pack and newer for bzrlib native formats)
1678
support refresh_data inside write groups. If called inside a write
1679
group on a repository that does not support refreshing in a write group
1680
IsInWriteGroupError will be raised.
1682
self._refresh_data()
1684
1087
def resume_write_group(self, tokens):
1685
1088
if not self.is_write_locked():
1686
1089
raise errors.NotWriteLocked(self)
1895
1273
@needs_read_lock
1896
1274
def get_revisions(self, revision_ids):
1897
"""Get many revisions at once.
1899
Repositories that need to check data on every revision read should
1900
subclass this method.
1275
"""Get many revisions at once."""
1902
1276
return self._get_revisions(revision_ids)
1904
1278
@needs_read_lock
1905
1279
def _get_revisions(self, revision_ids):
1906
1280
"""Core work logic to get many revisions without sanity checks."""
1281
for rev_id in revision_ids:
1282
if not rev_id or not isinstance(rev_id, basestring):
1283
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1284
keys = [(key,) for key in revision_ids]
1285
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1908
for revid, rev in self._iter_revisions(revision_ids):
1910
raise errors.NoSuchRevision(self, revid)
1287
for record in stream:
1288
if record.storage_kind == 'absent':
1289
raise errors.NoSuchRevision(self, record.key[0])
1290
text = record.get_bytes_as('fulltext')
1291
rev = self._serializer.read_revision_from_string(text)
1292
revs[record.key[0]] = rev
1912
1293
return [revs[revid] for revid in revision_ids]
1914
def _iter_revisions(self, revision_ids):
1915
"""Iterate over revision objects.
1917
:param revision_ids: An iterable of revisions to examine. None may be
1918
passed to request all revisions known to the repository. Note that
1919
not all repositories can find unreferenced revisions; for those
1920
repositories only referenced ones will be returned.
1921
:return: An iterator of (revid, revision) tuples. Absent revisions (
1922
those asked for but not available) are returned as (revid, None).
1924
if revision_ids is None:
1925
revision_ids = self.all_revision_ids()
1927
for rev_id in revision_ids:
1928
if not rev_id or not isinstance(rev_id, basestring):
1929
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1930
keys = [(key,) for key in revision_ids]
1931
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1932
for record in stream:
1933
revid = record.key[0]
1934
if record.storage_kind == 'absent':
1937
text = record.get_bytes_as('fulltext')
1938
rev = self._serializer.read_revision_from_string(text)
1941
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1296
def get_revision_xml(self, revision_id):
1297
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1298
# would have already do it.
1299
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1300
rev = self.get_revision(revision_id)
1301
rev_tmp = cStringIO.StringIO()
1302
# the current serializer..
1303
self._serializer.write_revision(rev, rev_tmp)
1305
return rev_tmp.getvalue()
1307
def get_deltas_for_revisions(self, revisions):
1942
1308
"""Produce a generator of revision deltas.
1944
1310
Note that the input is a sequence of REVISIONS, not revision_ids.
1945
1311
Trees will be held in memory until the generator exits.
1946
1312
Each delta is relative to the revision's lefthand predecessor.
1948
:param specific_fileids: if not None, the result is filtered
1949
so that only those file-ids, their parents and their
1950
children are included.
1952
# Get the revision-ids of interest
1953
1314
required_trees = set()
1954
1315
for revision in revisions:
1955
1316
required_trees.add(revision.revision_id)
1956
1317
required_trees.update(revision.parent_ids[:1])
1958
# Get the matching filtered trees. Note that it's more
1959
# efficient to pass filtered trees to changes_from() rather
1960
# than doing the filtering afterwards. changes_from() could
1961
# arguably do the filtering itself but it's path-based, not
1962
# file-id based, so filtering before or afterwards is
1964
if specific_fileids is None:
1965
trees = dict((t.get_revision_id(), t) for
1966
t in self.revision_trees(required_trees))
1968
trees = dict((t.get_revision_id(), t) for
1969
t in self._filtered_revision_trees(required_trees,
1972
# Calculate the deltas
1318
trees = dict((t.get_revision_id(), t) for
1319
t in self.revision_trees(required_trees))
1973
1320
for revision in revisions:
1974
1321
if not revision.parent_ids:
1975
1322
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
2099
1444
result[key] = True
2102
def _inventory_xml_lines_for_keys(self, keys):
2103
"""Get a line iterator of the sort needed for findind references.
2105
Not relevant for non-xml inventory repositories.
2107
Ghosts in revision_keys are ignored.
2109
:param revision_keys: The revision keys for the inventories to inspect.
2110
:return: An iterator over (inventory line, revid) for the fulltexts of
2111
all of the xml inventories specified by revision_keys.
2113
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2114
for record in stream:
2115
if record.storage_kind != 'absent':
2116
chunks = record.get_bytes_as('chunked')
2117
revid = record.key[-1]
2118
lines = osutils.chunks_to_lines(chunks)
2122
1447
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2124
1449
"""Helper routine for fileids_altered_by_revision_ids.
2126
1451
This performs the translation of xml lines to revision ids.
2128
1453
:param line_iterator: An iterator of lines, origin_version_id
2129
:param revision_keys: The revision ids to filter for. This should be a
1454
:param revision_ids: The revision ids to filter for. This should be a
2130
1455
set or other type which supports efficient __contains__ lookups, as
2131
the revision key from each parsed line will be looked up in the
2132
revision_keys filter.
1456
the revision id from each parsed line will be looked up in the
1457
revision_ids filter.
2133
1458
:return: a dictionary mapping altered file-ids to an iterable of
2134
1459
revision_ids. Each altered file-ids has the exact revision_ids that
2135
1460
altered it listed explicitly.
2137
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2138
line_iterator).iterkeys())
2139
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2140
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2141
self._inventory_xml_lines_for_keys(parent_keys)))
2142
new_keys = seen - parent_seen
2144
1463
setdefault = result.setdefault
2145
for key in new_keys:
2146
setdefault(key[0], set()).add(key[-1])
1465
self._find_text_key_references_from_xml_inventory_lines(
1466
line_iterator).iterkeys():
1467
# once data is all ensured-consistent; then this is
1468
# if revision_id == version_id
1469
if key[-1:] in revision_ids:
1470
setdefault(key[0], set()).add(key[-1])
2149
def _find_parent_ids_of_revisions(self, revision_ids):
2150
"""Find all parent ids that are mentioned in the revision graph.
2152
:return: set of revisions that are parents of revision_ids which are
2153
not part of revision_ids themselves
2155
parent_map = self.get_parent_map(revision_ids)
2157
map(parent_ids.update, parent_map.itervalues())
2158
parent_ids.difference_update(revision_ids)
2159
parent_ids.discard(_mod_revision.NULL_REVISION)
2162
def _find_parent_keys_of_revisions(self, revision_keys):
2163
"""Similar to _find_parent_ids_of_revisions, but used with keys.
2165
:param revision_keys: An iterable of revision_keys.
2166
:return: The parents of all revision_keys that are not already in
2169
parent_map = self.revisions.get_parent_map(revision_keys)
2171
map(parent_keys.update, parent_map.itervalues())
2172
parent_keys.difference_update(revision_keys)
2173
parent_keys.discard(_mod_revision.NULL_REVISION)
2176
1473
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
2177
1474
"""Find the file ids and versions affected by revisions.
2382
1676
"""Get Inventory object by revision id."""
2383
1677
return self.iter_inventories([revision_id]).next()
2385
def iter_inventories(self, revision_ids, ordering=None):
1679
def iter_inventories(self, revision_ids):
2386
1680
"""Get many inventories by revision_ids.
2388
1682
This will buffer some or all of the texts used in constructing the
2389
1683
inventories in memory, but will only parse a single inventory at a
2392
:param revision_ids: The expected revision ids of the inventories.
2393
:param ordering: optional ordering, e.g. 'topological'. If not
2394
specified, the order of revision_ids will be preserved (by
2395
buffering if necessary).
2396
1686
:return: An iterator of inventories.
2398
1688
if ((None in revision_ids)
2399
1689
or (_mod_revision.NULL_REVISION in revision_ids)):
2400
1690
raise ValueError('cannot get null revision inventory')
2401
return self._iter_inventories(revision_ids, ordering)
1691
return self._iter_inventories(revision_ids)
2403
def _iter_inventories(self, revision_ids, ordering):
1693
def _iter_inventories(self, revision_ids):
2404
1694
"""single-document based inventory iteration."""
2405
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2406
for text, revision_id in inv_xmls:
2407
yield self._deserialise_inventory(revision_id, text)
1695
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1696
yield self.deserialise_inventory(revision_id, text)
2409
def _iter_inventory_xmls(self, revision_ids, ordering):
2410
if ordering is None:
2411
order_as_requested = True
2412
ordering = 'unordered'
2414
order_as_requested = False
1698
def _iter_inventory_xmls(self, revision_ids):
2415
1699
keys = [(revision_id,) for revision_id in revision_ids]
2418
if order_as_requested:
2419
key_iter = iter(keys)
2420
next_key = key_iter.next()
2421
stream = self.inventories.get_record_stream(keys, ordering, True)
1700
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2422
1701
text_chunks = {}
2423
1702
for record in stream:
2424
1703
if record.storage_kind != 'absent':
2425
chunks = record.get_bytes_as('chunked')
2426
if order_as_requested:
2427
text_chunks[record.key] = chunks
2429
yield ''.join(chunks), record.key[-1]
1704
text_chunks[record.key] = record.get_bytes_as('chunked')
2431
1706
raise errors.NoSuchRevision(self, record.key)
2432
if order_as_requested:
2433
# Yield as many results as we can while preserving order.
2434
while next_key in text_chunks:
2435
chunks = text_chunks.pop(next_key)
2436
yield ''.join(chunks), next_key[-1]
2438
next_key = key_iter.next()
2439
except StopIteration:
2440
# We still want to fully consume the get_record_stream,
2441
# just in case it is not actually finished at this point
1708
chunks = text_chunks.pop(key)
1709
yield ''.join(chunks), key[-1]
2445
def _deserialise_inventory(self, revision_id, xml):
1711
def deserialise_inventory(self, revision_id, xml):
2446
1712
"""Transform the xml into an inventory object.
2448
1714
:param revision_id: The expected revision id of the inventory.
2449
1715
:param xml: A serialised inventory.
2451
1717
result = self._serializer.read_inventory_from_string(xml, revision_id,
2452
entry_cache=self._inventory_entry_cache,
2453
return_from_cache=self._safe_to_return_from_cache)
1718
entry_cache=self._inventory_entry_cache)
2454
1719
if result.revision_id != revision_id:
2455
1720
raise AssertionError('revision id mismatch %s != %s' % (
2456
1721
result.revision_id, revision_id))
1724
def serialise_inventory(self, inv):
1725
return self._serializer.write_inventory_to_string(inv)
1727
def _serialise_inventory_to_lines(self, inv):
1728
return self._serializer.write_inventory_to_lines(inv)
2459
1730
def get_serializer_format(self):
2460
1731
return self._serializer.format_num
2462
1733
@needs_read_lock
2463
def _get_inventory_xml(self, revision_id):
2464
"""Get serialized inventory as a string."""
2465
texts = self._iter_inventory_xmls([revision_id], 'unordered')
1734
def get_inventory_xml(self, revision_id):
1735
"""Get inventory XML as a file object."""
1736
texts = self._iter_inventory_xmls([revision_id])
2467
1738
text, revision_id = texts.next()
2468
1739
except StopIteration:
2469
1740
raise errors.HistoryMissing(self, 'inventory', revision_id)
2472
def get_rev_id_for_revno(self, revno, known_pair):
2473
"""Return the revision id of a revno, given a later (revno, revid)
2474
pair in the same history.
2476
:return: if found (True, revid). If the available history ran out
2477
before reaching the revno, then this returns
2478
(False, (closest_revno, closest_revid)).
1744
def get_inventory_sha1(self, revision_id):
1745
"""Return the sha1 hash of the inventory entry
2480
known_revno, known_revid = known_pair
2481
partial_history = [known_revid]
2482
distance_from_known = known_revno - revno
2483
if distance_from_known < 0:
2485
'requested revno (%d) is later than given known revno (%d)'
2486
% (revno, known_revno))
2489
self, partial_history, stop_index=distance_from_known)
2490
except errors.RevisionNotPresent, err:
2491
if err.revision_id == known_revid:
2492
# The start revision (known_revid) wasn't found.
2494
# This is a stacked repository with no fallbacks, or a there's a
2495
# left-hand ghost. Either way, even though the revision named in
2496
# the error isn't in this repo, we know it's the next step in this
2497
# left-hand history.
2498
partial_history.append(err.revision_id)
2499
if len(partial_history) <= distance_from_known:
2500
# Didn't find enough history to get a revid for the revno.
2501
earliest_revno = known_revno - len(partial_history) + 1
2502
return (False, (earliest_revno, partial_history[-1]))
2503
if len(partial_history) - 1 > distance_from_known:
2504
raise AssertionError('_iter_for_revno returned too much history')
2505
return (True, partial_history[-1])
1747
return self.get_revision(revision_id).inventory_sha1
2507
1749
def iter_reverse_revision_history(self, revision_id):
2508
1750
"""Iterate backwards through revision ids in the lefthand history
3741
2949
return self.source.revision_ids_to_search_result(result_set)
3744
class InterDifferingSerializer(InterRepository):
2952
class InterPackRepo(InterSameDataRepository):
2953
"""Optimised code paths between Pack based repositories."""
2956
def _get_repo_format_to_test(self):
2957
from bzrlib.repofmt import pack_repo
2958
return pack_repo.RepositoryFormatKnitPack1()
2961
def is_compatible(source, target):
2962
"""Be compatible with known Pack formats.
2964
We don't test for the stores being of specific types because that
2965
could lead to confusing results, and there is no need to be
2968
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2970
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2971
isinstance(target._format, RepositoryFormatPack))
2972
except AttributeError:
2974
return are_packs and InterRepository._same_model(source, target)
2977
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
2979
"""See InterRepository.fetch()."""
2980
if (len(self.source._fallback_repositories) > 0 or
2981
len(self.target._fallback_repositories) > 0):
2982
# The pack layer is not aware of fallback repositories, so when
2983
# fetching from a stacked repository or into a stacked repository
2984
# we use the generic fetch logic which uses the VersionedFiles
2985
# attributes on repository.
2986
from bzrlib.fetch import RepoFetcher
2987
fetcher = RepoFetcher(self.target, self.source, revision_id,
2988
pb, find_ghosts, fetch_spec=fetch_spec)
2989
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2990
self.source, self.source._format, self.target, self.target._format)
2991
if fetch_spec is not None:
2992
revision_id = fetch_spec.start_key
2994
if revision_id is None:
2996
# everything to do - use pack logic
2997
# to fetch from all packs to one without
2998
# inventory parsing etc, IFF nothing to be copied is in the target.
3000
source_revision_ids = frozenset(self.source.all_revision_ids())
3001
revision_ids = source_revision_ids - \
3002
frozenset(self.target_get_parent_map(source_revision_ids))
3003
revision_keys = [(revid,) for revid in revision_ids]
3004
target_pack_collection = self._get_target_pack_collection()
3005
index = target_pack_collection.revision_index.combined_index
3006
present_revision_ids = set(item[1][0] for item in
3007
index.iter_entries(revision_keys))
3008
revision_ids = set(revision_ids) - present_revision_ids
3009
# implementing the TODO will involve:
3010
# - detecting when all of a pack is selected
3011
# - avoiding as much as possible pre-selection, so the
3012
# more-core routines such as create_pack_from_packs can filter in
3013
# a just-in-time fashion. (though having a HEADS list on a
3014
# repository might make this a lot easier, because we could
3015
# sensibly detect 'new revisions' without doing a full index scan.
3016
elif _mod_revision.is_null(revision_id):
3021
revision_ids = self.search_missing_revision_ids(revision_id,
3022
find_ghosts=find_ghosts).get_keys()
3023
except errors.NoSuchRevision:
3024
raise errors.InstallFailed([revision_id])
3025
if len(revision_ids) == 0:
3027
return self._pack(self.source, self.target, revision_ids)
3029
def _pack(self, source, target, revision_ids):
3030
from bzrlib.repofmt.pack_repo import Packer
3031
target_pack_collection = self._get_target_pack_collection()
3032
packs = source._pack_collection.all_packs()
3033
pack = Packer(target_pack_collection, packs, '.fetch',
3034
revision_ids).pack()
3035
if pack is not None:
3036
target_pack_collection._save_pack_names()
3037
copied_revs = pack.get_revision_count()
3038
# Trigger an autopack. This may duplicate effort as we've just done
3039
# a pack creation, but for now it is simpler to think about as
3040
# 'upload data, then repack if needed'.
3042
return (copied_revs, [])
3046
def _autopack(self):
3047
self.target._pack_collection.autopack()
3049
def _get_target_pack_collection(self):
3050
return self.target._pack_collection
3053
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True,
3055
"""See InterRepository.missing_revision_ids().
3057
:param find_ghosts: Find ghosts throughout the ancestry of
3060
rev_specified = (revision_id is not None and fetch_spec is not None)
3061
if not find_ghosts and rev_specified:
3062
if fetch_spec is not None:
3065
return self._walk_to_common_revisions([revision_id])
3066
elif fetch_spec is not None:
3067
raise AssertionError("not implemented yet...")
3068
elif revision_id is not None:
3069
# Find ghosts: search for revisions pointing from one repository to
3070
# the other, and vice versa, anywhere in the history of revision_id.
3071
graph = self.target_get_graph(other_repository=self.source)
3072
searcher = graph._make_breadth_first_searcher([revision_id])
3076
next_revs, ghosts = searcher.next_with_ghosts()
3077
except StopIteration:
3079
if revision_id in ghosts:
3080
raise errors.NoSuchRevision(self.source, revision_id)
3081
found_ids.update(next_revs)
3082
found_ids.update(ghosts)
3083
found_ids = frozenset(found_ids)
3084
# Double query here: should be able to avoid this by changing the
3085
# graph api further.
3086
result_set = found_ids - frozenset(
3087
self.target_get_parent_map(found_ids))
3089
source_ids = self.source.all_revision_ids()
3090
# source_ids is the worst possible case we may need to pull.
3091
# now we want to filter source_ids against what we actually
3092
# have in target, but don't try to check for existence where we know
3093
# we do not have a revision as that would be pointless.
3094
target_ids = set(self.target.all_revision_ids())
3095
result_set = set(source_ids).difference(target_ids)
3096
return self.source.revision_ids_to_search_result(result_set)
3099
class InterDifferingSerializer(InterKnitRepo):
3747
3102
def _get_repo_format_to_test(self):
3751
3106
def is_compatible(source, target):
3752
3107
"""Be compatible with Knit2 source and Knit3 target"""
3753
# This is redundant with format.check_conversion_target(), however that
3754
# raises an exception, and we just want to say "False" as in we won't
3755
# support converting between these formats.
3756
if 'IDS_never' in debug.debug_flags:
3758
if source.supports_rich_root() and not target.supports_rich_root():
3760
if (source._format.supports_tree_reference
3761
and not target._format.supports_tree_reference):
3763
if target._fallback_repositories and target._format.supports_chks:
3764
# IDS doesn't know how to copy CHKs for the parent inventories it
3765
# adds to stacked repos.
3767
if 'IDS_always' in debug.debug_flags:
3769
# Only use this code path for local source and target. IDS does far
3770
# too much IO (both bandwidth and roundtrips) over a network.
3771
if not source.bzrdir.transport.base.startswith('file:///'):
3773
if not target.bzrdir.transport.base.startswith('file:///'):
3108
if source.supports_rich_root() != target.supports_rich_root():
3110
# Ideally, we'd support fetching if the source had no tree references
3111
# even if it supported them...
3112
if (getattr(source, '_format.supports_tree_reference', False) and
3113
not getattr(target, '_format.supports_tree_reference', False)):
3777
def _get_trees(self, revision_ids, cache):
3779
for rev_id in revision_ids:
3781
possible_trees.append((rev_id, cache[rev_id]))
3783
# Not cached, but inventory might be present anyway.
3785
tree = self.source.revision_tree(rev_id)
3786
except errors.NoSuchRevision:
3787
# Nope, parent is ghost.
3790
cache[rev_id] = tree
3791
possible_trees.append((rev_id, tree))
3792
return possible_trees
3794
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3117
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3795
3118
"""Get the best delta and base for this revision.
3797
3120
:return: (basis_id, delta)
3122
possible_trees = [(parent_id, cache[parent_id])
3123
for parent_id in parent_ids
3124
if parent_id in cache]
3125
if len(possible_trees) == 0:
3126
# There either aren't any parents, or the parents aren't in the
3127
# cache, so just use the last converted tree
3128
possible_trees.append((basis_id, cache[basis_id]))
3800
# Generate deltas against each tree, to find the shortest.
3801
texts_possibly_new_in_tree = set()
3802
3130
for basis_id, basis_tree in possible_trees:
3803
3131
delta = tree.inventory._make_delta(basis_tree.inventory)
3804
for old_path, new_path, file_id, new_entry in delta:
3805
if new_path is None:
3806
# This file_id isn't present in the new rev, so we don't
3810
# Rich roots are handled elsewhere...
3812
kind = new_entry.kind
3813
if kind != 'directory' and kind != 'file':
3814
# No text record associated with this inventory entry.
3816
# This is a directory or file that has changed somehow.
3817
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3818
3132
deltas.append((len(delta), basis_id, delta))
3820
3134
return deltas[0][1:]
3822
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3823
"""Find all parent revisions that are absent, but for which the
3824
inventory is present, and copy those inventories.
3826
This is necessary to preserve correctness when the source is stacked
3827
without fallbacks configured. (Note that in cases like upgrade the
3828
source may be not have _fallback_repositories even though it is
3832
for parents in parent_map.values():
3833
parent_revs.update(parents)
3834
present_parents = self.source.get_parent_map(parent_revs)
3835
absent_parents = set(parent_revs).difference(present_parents)
3836
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3837
(rev_id,) for rev_id in absent_parents)
3838
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3839
for parent_tree in self.source.revision_trees(parent_inv_ids):
3840
current_revision_id = parent_tree.get_revision_id()
3841
parents_parents_keys = parent_invs_keys_for_stacking[
3842
(current_revision_id,)]
3843
parents_parents = [key[-1] for key in parents_parents_keys]
3844
basis_id = _mod_revision.NULL_REVISION
3845
basis_tree = self.source.revision_tree(basis_id)
3846
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3847
self.target.add_inventory_by_delta(
3848
basis_id, delta, current_revision_id, parents_parents)
3849
cache[current_revision_id] = parent_tree
3851
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3136
def _fetch_batch(self, revision_ids, basis_id, cache):
3852
3137
"""Fetch across a few revisions.
3854
3139
:param revision_ids: The revisions to copy
3855
3140
:param basis_id: The revision_id of a tree that must be in cache, used
3856
3141
as a basis for delta when no other base is available
3857
3142
:param cache: A cache of RevisionTrees that we can use.
3858
:param a_graph: A Graph object to determine the heads() of the
3859
rich-root data stream.
3860
3143
:return: The revision_id of the last converted tree. The RevisionTree
3861
3144
for it will be in cache
3863
3146
# Walk though all revisions; get inventory deltas, copy referenced
3864
3147
# texts that delta references, insert the delta, revision and
3866
root_keys_to_create = set()
3867
3149
text_keys = set()
3868
3150
pending_deltas = []
3869
3151
pending_revisions = []
3870
3152
parent_map = self.source.get_parent_map(revision_ids)
3871
self._fetch_parent_invs_for_stacking(parent_map, cache)
3872
self.source._safe_to_return_from_cache = True
3873
3153
for tree in self.source.revision_trees(revision_ids):
3874
# Find a inventory delta for this revision.
3875
# Find text entries that need to be copied, too.
3876
3154
current_revision_id = tree.get_revision_id()
3877
3155
parent_ids = parent_map.get(current_revision_id, ())
3878
parent_trees = self._get_trees(parent_ids, cache)
3879
possible_trees = list(parent_trees)
3880
if len(possible_trees) == 0:
3881
# There either aren't any parents, or the parents are ghosts,
3882
# so just use the last converted tree.
3883
possible_trees.append((basis_id, cache[basis_id]))
3884
3156
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3158
# Find text entries that need to be copied
3159
for old_path, new_path, file_id, entry in delta:
3160
if new_path is not None:
3161
if not (new_path or self.target.supports_rich_root()):
3162
# We don't copy the text for the root node unless the
3163
# target supports_rich_root.
3166
# "if entry.revision == current_revision_id" ?
3167
if entry.revision == current_revision_id:
3168
text_keys.add((file_id, entry.revision))
3886
3169
revision = self.source.get_revision(current_revision_id)
3887
3170
pending_deltas.append((basis_id, delta,
3888
3171
current_revision_id, revision.parent_ids))
3889
if self._converting_to_rich_root:
3890
self._revision_id_to_root_id[current_revision_id] = \
3892
# Determine which texts are in present in this revision but not in
3893
# any of the available parents.
3894
texts_possibly_new_in_tree = set()
3895
for old_path, new_path, file_id, entry in delta:
3896
if new_path is None:
3897
# This file_id isn't present in the new rev
3901
if not self.target.supports_rich_root():
3902
# The target doesn't support rich root, so we don't
3905
if self._converting_to_rich_root:
3906
# This can't be copied normally, we have to insert
3908
root_keys_to_create.add((file_id, entry.revision))
3911
texts_possibly_new_in_tree.add((file_id, entry.revision))
3912
for basis_id, basis_tree in possible_trees:
3913
basis_inv = basis_tree.inventory
3914
for file_key in list(texts_possibly_new_in_tree):
3915
file_id, file_revision = file_key
3917
entry = basis_inv[file_id]
3918
except errors.NoSuchId:
3920
if entry.revision == file_revision:
3921
texts_possibly_new_in_tree.remove(file_key)
3922
text_keys.update(texts_possibly_new_in_tree)
3923
3172
pending_revisions.append(revision)
3924
3173
cache[current_revision_id] = tree
3925
3174
basis_id = current_revision_id
3926
self.source._safe_to_return_from_cache = False
3927
3175
# Copy file texts
3928
3176
from_texts = self.source.texts
3929
3177
to_texts = self.target.texts
3930
if root_keys_to_create:
3931
root_stream = _mod_fetch._new_root_data_stream(
3932
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3933
self.source, graph=a_graph)
3934
to_texts.insert_record_stream(root_stream)
3935
3178
to_texts.insert_record_stream(from_texts.get_record_stream(
3936
3179
text_keys, self.target._format._fetch_order,
3937
3180
not self.target._format._fetch_uses_deltas))
3938
# insert inventory deltas
3939
3182
for delta in pending_deltas:
3940
3183
self.target.add_inventory_by_delta(*delta)
3941
if self.target._fallback_repositories:
3942
# Make sure this stacked repository has all the parent inventories
3943
# for the new revisions that we are about to insert. We do this
3944
# before adding the revisions so that no revision is added until
3945
# all the inventories it may depend on are added.
3946
# Note that this is overzealous, as we may have fetched these in an
3949
revision_ids = set()
3950
for revision in pending_revisions:
3951
revision_ids.add(revision.revision_id)
3952
parent_ids.update(revision.parent_ids)
3953
parent_ids.difference_update(revision_ids)
3954
parent_ids.discard(_mod_revision.NULL_REVISION)
3955
parent_map = self.source.get_parent_map(parent_ids)
3956
# we iterate over parent_map and not parent_ids because we don't
3957
# want to try copying any revision which is a ghost
3958
for parent_tree in self.source.revision_trees(parent_map):
3959
current_revision_id = parent_tree.get_revision_id()
3960
parents_parents = parent_map[current_revision_id]
3961
possible_trees = self._get_trees(parents_parents, cache)
3962
if len(possible_trees) == 0:
3963
# There either aren't any parents, or the parents are
3964
# ghosts, so just use the last converted tree.
3965
possible_trees.append((basis_id, cache[basis_id]))
3966
basis_id, delta = self._get_delta_for_revision(parent_tree,
3967
parents_parents, possible_trees)
3968
self.target.add_inventory_by_delta(
3969
basis_id, delta, current_revision_id, parents_parents)
3970
3184
# insert signatures and revisions
3971
3185
for revision in pending_revisions:
4087
3268
return basis_id, basis_tree
3271
class InterOtherToRemote(InterRepository):
3272
"""An InterRepository that simply delegates to the 'real' InterRepository
3273
calculated for (source, target._real_repository).
3276
_walk_to_common_revisions_batch_size = 50
3278
def __init__(self, source, target):
3279
InterRepository.__init__(self, source, target)
3280
self._real_inter = None
3283
def is_compatible(source, target):
3284
if isinstance(target, remote.RemoteRepository):
3288
def _ensure_real_inter(self):
3289
if self._real_inter is None:
3290
self.target._ensure_real()
3291
real_target = self.target._real_repository
3292
self._real_inter = InterRepository.get(self.source, real_target)
3293
# Make _real_inter use the RemoteRepository for get_parent_map
3294
self._real_inter.target_get_graph = self.target.get_graph
3295
self._real_inter.target_get_parent_map = self.target.get_parent_map
3297
def copy_content(self, revision_id=None):
3298
self._ensure_real_inter()
3299
self._real_inter.copy_content(revision_id=revision_id)
3301
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3303
self._ensure_real_inter()
3304
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3305
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
3308
def _get_repo_format_to_test(self):
3312
class InterRemoteToOther(InterRepository):
3314
def __init__(self, source, target):
3315
InterRepository.__init__(self, source, target)
3316
self._real_inter = None
3319
def is_compatible(source, target):
3320
if not isinstance(source, remote.RemoteRepository):
3322
return InterRepository._same_model(source, target)
3324
def _ensure_real_inter(self):
3325
if self._real_inter is None:
3326
self.source._ensure_real()
3327
real_source = self.source._real_repository
3328
self._real_inter = InterRepository.get(real_source, self.target)
3331
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3333
"""See InterRepository.fetch()."""
3334
# Always fetch using the generic streaming fetch code, to allow
3335
# streaming fetching from remote servers.
3336
from bzrlib.fetch import RepoFetcher
3337
fetcher = RepoFetcher(self.target, self.source, revision_id,
3338
pb, find_ghosts, fetch_spec=fetch_spec)
3340
def copy_content(self, revision_id=None):
3341
self._ensure_real_inter()
3342
self._real_inter.copy_content(revision_id=revision_id)
3345
def _get_repo_format_to_test(self):
3350
class InterPackToRemotePack(InterPackRepo):
3351
"""A specialisation of InterPackRepo for a target that is a
3354
This will use the get_parent_map RPC rather than plain readvs, and also
3355
uses an RPC for autopacking.
3358
_walk_to_common_revisions_batch_size = 50
3361
def is_compatible(source, target):
3362
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3363
if isinstance(source._format, RepositoryFormatPack):
3364
if isinstance(target, remote.RemoteRepository):
3365
target._format._ensure_real()
3366
if isinstance(target._format._custom_format,
3367
RepositoryFormatPack):
3368
if InterRepository._same_model(source, target):
3372
def _autopack(self):
3373
self.target.autopack()
3376
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3378
"""See InterRepository.fetch()."""
3379
# Always fetch using the generic streaming fetch code, to allow
3380
# streaming fetching into remote servers.
3381
from bzrlib.fetch import RepoFetcher
3382
fetcher = RepoFetcher(self.target, self.source, revision_id,
3383
pb, find_ghosts, fetch_spec=fetch_spec)
3385
def _get_target_pack_collection(self):
3386
return self.target._real_repository._pack_collection
3389
def _get_repo_format_to_test(self):
4090
3393
InterRepository.register_optimiser(InterDifferingSerializer)
4091
3394
InterRepository.register_optimiser(InterSameDataRepository)
4092
3395
InterRepository.register_optimiser(InterWeaveRepo)
4093
3396
InterRepository.register_optimiser(InterKnitRepo)
3397
InterRepository.register_optimiser(InterPackRepo)
3398
InterRepository.register_optimiser(InterOtherToRemote)
3399
InterRepository.register_optimiser(InterRemoteToOther)
3400
InterRepository.register_optimiser(InterPackToRemotePack)
4096
3403
class CopyConverter(object):
4385
3640
# missing keys can handle suspending a write group).
4386
3641
write_group_tokens = self.target_repo.suspend_write_group()
4387
3642
return write_group_tokens, missing_keys
4388
hint = self.target_repo.commit_write_group()
4389
if (to_serializer != src_serializer and
4390
self.target_repo._format.pack_compresses):
4391
self.target_repo.pack(hint=hint)
3643
self.target_repo.commit_write_group()
4392
3644
return [], set()
4394
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4395
target_rich_root = self.target_repo._format.rich_root_data
4396
target_tree_refs = self.target_repo._format.supports_tree_reference
4397
for record in substream:
4398
# Insert the delta directly
4399
inventory_delta_bytes = record.get_bytes_as('fulltext')
4400
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4402
parse_result = deserialiser.parse_text_bytes(
4403
inventory_delta_bytes)
4404
except inventory_delta.IncompatibleInventoryDelta, err:
4405
trace.mutter("Incompatible delta: %s", err.msg)
4406
raise errors.IncompatibleRevision(self.target_repo._format)
4407
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4408
revision_id = new_id
4409
parents = [key[0] for key in record.parents]
4410
self.target_repo.add_inventory_by_delta(
4411
basis_id, inv_delta, revision_id, parents)
4413
def _extract_and_insert_inventories(self, substream, serializer,
3646
def _extract_and_insert_inventories(self, substream, serializer):
4415
3647
"""Generate a new inventory versionedfile in target, converting data.
4417
3649
The inventory is retrieved from the source, (deserializing it), and
4418
3650
stored in the target (reserializing it in a different format).
4420
target_rich_root = self.target_repo._format.rich_root_data
4421
target_tree_refs = self.target_repo._format.supports_tree_reference
4422
3652
for record in substream:
4423
# It's not a delta, so it must be a fulltext in the source
4424
# serializer's format.
4425
3653
bytes = record.get_bytes_as('fulltext')
4426
3654
revision_id = record.key[0]
4427
3655
inv = serializer.read_inventory_from_string(bytes, revision_id)
4428
3656
parents = [key[0] for key in record.parents]
4429
3657
self.target_repo.add_inventory(revision_id, inv, parents)
4430
# No need to keep holding this full inv in memory when the rest of
4431
# the substream is likely to be all deltas.
4434
3659
def _extract_and_insert_revisions(self, substream, serializer):
4435
3660
for record in substream:
4599
3808
return (not self.from_repository._format.rich_root_data and
4600
3809
self.to_format.rich_root_data)
4602
def _get_inventory_stream(self, revision_ids, missing=False):
4603
from_format = self.from_repository._format
4604
if (from_format.supports_chks and self.to_format.supports_chks and
4605
from_format.network_name() == self.to_format.network_name()):
4606
raise AssertionError(
4607
"this case should be handled by GroupCHKStreamSource")
4608
elif 'forceinvdeltas' in debug.debug_flags:
4609
return self._get_convertable_inventory_stream(revision_ids,
4610
delta_versus_null=missing)
4611
elif from_format.network_name() == self.to_format.network_name():
4613
return self._get_simple_inventory_stream(revision_ids,
4615
elif (not from_format.supports_chks and not self.to_format.supports_chks
4616
and from_format._serializer == self.to_format._serializer):
4617
# Essentially the same format.
4618
return self._get_simple_inventory_stream(revision_ids,
4621
# Any time we switch serializations, we want to use an
4622
# inventory-delta based approach.
4623
return self._get_convertable_inventory_stream(revision_ids,
4624
delta_versus_null=missing)
4626
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4627
# NB: This currently reopens the inventory weave in source;
4628
# using a single stream interface instead would avoid this.
4629
from_weave = self.from_repository.inventories
4631
delta_closure = True
4633
delta_closure = not self.delta_on_metadata()
4634
yield ('inventories', from_weave.get_record_stream(
4635
[(rev_id,) for rev_id in revision_ids],
4636
self.inventory_fetch_order(), delta_closure))
4638
def _get_convertable_inventory_stream(self, revision_ids,
4639
delta_versus_null=False):
4640
# The two formats are sufficiently different that there is no fast
4641
# path, so we need to send just inventorydeltas, which any
4642
# sufficiently modern client can insert into any repository.
4643
# The StreamSink code expects to be able to
4644
# convert on the target, so we need to put bytes-on-the-wire that can
4645
# be converted. That means inventory deltas (if the remote is <1.19,
4646
# RemoteStreamSink will fallback to VFS to insert the deltas).
4647
yield ('inventory-deltas',
4648
self._stream_invs_as_deltas(revision_ids,
4649
delta_versus_null=delta_versus_null))
4651
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4652
"""Return a stream of inventory-deltas for the given rev ids.
4654
:param revision_ids: The list of inventories to transmit
4655
:param delta_versus_null: Don't try to find a minimal delta for this
4656
entry, instead compute the delta versus the NULL_REVISION. This
4657
effectively streams a complete inventory. Used for stuff like
4658
filling in missing parents, etc.
4660
from_repo = self.from_repository
4661
revision_keys = [(rev_id,) for rev_id in revision_ids]
4662
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4663
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4665
inventories = self.from_repository.iter_inventories(
4666
revision_ids, 'topological')
4667
format = from_repo._format
4668
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4669
inventory_cache = lru_cache.LRUCache(50)
4670
null_inventory = from_repo.revision_tree(
4671
_mod_revision.NULL_REVISION).inventory
4672
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4673
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4674
# repo back into a non-rich-root repo ought to be allowed)
4675
serializer = inventory_delta.InventoryDeltaSerializer(
4676
versioned_root=format.rich_root_data,
4677
tree_references=format.supports_tree_reference)
4678
for inv in inventories:
4679
key = (inv.revision_id,)
4680
parent_keys = parent_map.get(key, ())
4682
if not delta_versus_null and parent_keys:
4683
# The caller did not ask for complete inventories and we have
4684
# some parents that we can delta against. Make a delta against
4685
# each parent so that we can find the smallest.
4686
parent_ids = [parent_key[0] for parent_key in parent_keys]
4687
for parent_id in parent_ids:
4688
if parent_id not in invs_sent_so_far:
4689
# We don't know that the remote side has this basis, so
4692
if parent_id == _mod_revision.NULL_REVISION:
4693
parent_inv = null_inventory
4695
parent_inv = inventory_cache.get(parent_id, None)
4696
if parent_inv is None:
4697
parent_inv = from_repo.get_inventory(parent_id)
4698
candidate_delta = inv._make_delta(parent_inv)
4699
if (delta is None or
4700
len(delta) > len(candidate_delta)):
4701
delta = candidate_delta
4702
basis_id = parent_id
4704
# Either none of the parents ended up being suitable, or we
4705
# were asked to delta against NULL
4706
basis_id = _mod_revision.NULL_REVISION
4707
delta = inv._make_delta(null_inventory)
4708
invs_sent_so_far.add(inv.revision_id)
4709
inventory_cache[inv.revision_id] = inv
4710
delta_serialized = ''.join(
4711
serializer.delta_to_lines(basis_id, key[-1], delta))
4712
yield versionedfile.FulltextContentFactory(
4713
key, parent_keys, None, delta_serialized)
4716
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4717
stop_revision=None):
4718
"""Extend the partial history to include a given index
4720
If a stop_index is supplied, stop when that index has been reached.
4721
If a stop_revision is supplied, stop when that revision is
4722
encountered. Otherwise, stop when the beginning of history is
4725
:param stop_index: The index which should be present. When it is
4726
present, history extension will stop.
4727
:param stop_revision: The revision id which should be present. When
4728
it is encountered, history extension will stop.
4730
start_revision = partial_history_cache[-1]
4731
iterator = repo.iter_reverse_revision_history(start_revision)
4733
#skip the last revision in the list
4736
if (stop_index is not None and
4737
len(partial_history_cache) > stop_index):
4739
if partial_history_cache[-1] == stop_revision:
4741
revision_id = iterator.next()
4742
partial_history_cache.append(revision_id)
4743
except StopIteration: