1145
1178
# The old API returned a list, should this actually be a set?
1146
1179
return parent_map.keys()
1181
def _check_inventories(self, checker):
1182
"""Check the inventories found from the revision scan.
1184
This is responsible for verifying the sha1 of inventories and
1185
creating a pending_keys set that covers data referenced by inventories.
1187
bar = ui.ui_factory.nested_progress_bar()
1189
self._do_check_inventories(checker, bar)
1193
def _do_check_inventories(self, checker, bar):
1194
"""Helper for _check_inventories."""
1196
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1197
kinds = ['chk_bytes', 'texts']
1198
count = len(checker.pending_keys)
1199
bar.update("inventories", 0, 2)
1200
current_keys = checker.pending_keys
1201
checker.pending_keys = {}
1202
# Accumulate current checks.
1203
for key in current_keys:
1204
if key[0] != 'inventories' and key[0] not in kinds:
1205
checker._report_items.append('unknown key type %r' % (key,))
1206
keys[key[0]].add(key[1:])
1207
if keys['inventories']:
1208
# NB: output order *should* be roughly sorted - topo or
1209
# inverse topo depending on repository - either way decent
1210
# to just delta against. However, pre-CHK formats didn't
1211
# try to optimise inventory layout on disk. As such the
1212
# pre-CHK code path does not use inventory deltas.
1214
for record in self.inventories.check(keys=keys['inventories']):
1215
if record.storage_kind == 'absent':
1216
checker._report_items.append(
1217
'Missing inventory {%s}' % (record.key,))
1219
last_object = self._check_record('inventories', record,
1220
checker, last_object,
1221
current_keys[('inventories',) + record.key])
1222
del keys['inventories']
1225
bar.update("texts", 1)
1226
while (checker.pending_keys or keys['chk_bytes']
1228
# Something to check.
1229
current_keys = checker.pending_keys
1230
checker.pending_keys = {}
1231
# Accumulate current checks.
1232
for key in current_keys:
1233
if key[0] not in kinds:
1234
checker._report_items.append('unknown key type %r' % (key,))
1235
keys[key[0]].add(key[1:])
1236
# Check the outermost kind only - inventories || chk_bytes || texts
1240
for record in getattr(self, kind).check(keys=keys[kind]):
1241
if record.storage_kind == 'absent':
1242
checker._report_items.append(
1243
'Missing %s {%s}' % (kind, record.key,))
1245
last_object = self._check_record(kind, record,
1246
checker, last_object, current_keys[(kind,) + record.key])
1250
def _check_record(self, kind, record, checker, last_object, item_data):
1251
"""Check a single text from this repository."""
1252
if kind == 'inventories':
1253
rev_id = record.key[0]
1254
inv = self._deserialise_inventory(rev_id,
1255
record.get_bytes_as('fulltext'))
1256
if last_object is not None:
1257
delta = inv._make_delta(last_object)
1258
for old_path, path, file_id, ie in delta:
1261
ie.check(checker, rev_id, inv)
1263
for path, ie in inv.iter_entries():
1264
ie.check(checker, rev_id, inv)
1265
if self._format.fast_deltas:
1267
elif kind == 'chk_bytes':
1268
# No code written to check chk_bytes for this repo format.
1269
checker._report_items.append(
1270
'unsupported key type chk_bytes for %s' % (record.key,))
1271
elif kind == 'texts':
1272
self._check_text(record, checker, item_data)
1274
checker._report_items.append(
1275
'unknown key type %s for %s' % (kind, record.key))
1277
def _check_text(self, record, checker, item_data):
1278
"""Check a single text."""
1279
# Check it is extractable.
1280
# TODO: check length.
1281
if record.storage_kind == 'chunked':
1282
chunks = record.get_bytes_as(record.storage_kind)
1283
sha1 = osutils.sha_strings(chunks)
1284
length = sum(map(len, chunks))
1286
content = record.get_bytes_as('fulltext')
1287
sha1 = osutils.sha_string(content)
1288
length = len(content)
1289
if item_data and sha1 != item_data[1]:
1290
checker._report_items.append(
1291
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1292
(record.key, sha1, item_data[1], item_data[2]))
1149
1295
def create(a_bzrdir):
1150
1296
"""Construct the current default format repository in a_bzrdir."""
1693
1870
@needs_read_lock
1694
1871
def get_revisions(self, revision_ids):
1695
"""Get many revisions at once."""
1872
"""Get many revisions at once.
1874
Repositories that need to check data on every revision read should
1875
subclass this method.
1696
1877
return self._get_revisions(revision_ids)
1698
1879
@needs_read_lock
1699
1880
def _get_revisions(self, revision_ids):
1700
1881
"""Core work logic to get many revisions without sanity checks."""
1701
for rev_id in revision_ids:
1702
if not rev_id or not isinstance(rev_id, basestring):
1703
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1883
for revid, rev in self._iter_revisions(revision_ids):
1885
raise errors.NoSuchRevision(self, revid)
1887
return [revs[revid] for revid in revision_ids]
1889
def _iter_revisions(self, revision_ids):
1890
"""Iterate over revision objects.
1892
:param revision_ids: An iterable of revisions to examine. None may be
1893
passed to request all revisions known to the repository. Note that
1894
not all repositories can find unreferenced revisions; for those
1895
repositories only referenced ones will be returned.
1896
:return: An iterator of (revid, revision) tuples. Absent revisions (
1897
those asked for but not available) are returned as (revid, None).
1899
if revision_ids is None:
1900
revision_ids = self.all_revision_ids()
1902
for rev_id in revision_ids:
1903
if not rev_id or not isinstance(rev_id, basestring):
1904
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1704
1905
keys = [(key,) for key in revision_ids]
1705
1906
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1707
1907
for record in stream:
1908
revid = record.key[0]
1708
1909
if record.storage_kind == 'absent':
1709
raise errors.NoSuchRevision(self, record.key[0])
1710
text = record.get_bytes_as('fulltext')
1711
rev = self._serializer.read_revision_from_string(text)
1712
revs[record.key[0]] = rev
1713
return [revs[revid] for revid in revision_ids]
1716
def get_revision_xml(self, revision_id):
1717
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1718
# would have already do it.
1719
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1720
# TODO: this can't just be replaced by:
1721
# return self._serializer.write_revision_to_string(
1722
# self.get_revision(revision_id))
1723
# as cStringIO preservers the encoding unlike write_revision_to_string
1724
# or some other call down the path.
1725
rev = self.get_revision(revision_id)
1726
rev_tmp = cStringIO.StringIO()
1727
# the current serializer..
1728
self._serializer.write_revision(rev, rev_tmp)
1730
return rev_tmp.getvalue()
1912
text = record.get_bytes_as('fulltext')
1913
rev = self._serializer.read_revision_from_string(text)
1732
1916
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1733
1917
"""Produce a generator of revision deltas.
2188
2367
:param revision_ids: The expected revision ids of the inventories.
2368
:param ordering: optional ordering, e.g. 'topological'. If not
2369
specified, the order of revision_ids will be preserved (by
2370
buffering if necessary).
2189
2371
:return: An iterator of inventories.
2191
2373
if ((None in revision_ids)
2192
2374
or (_mod_revision.NULL_REVISION in revision_ids)):
2193
2375
raise ValueError('cannot get null revision inventory')
2194
return self._iter_inventories(revision_ids)
2376
return self._iter_inventories(revision_ids, ordering)
2196
def _iter_inventories(self, revision_ids):
2378
def _iter_inventories(self, revision_ids, ordering):
2197
2379
"""single-document based inventory iteration."""
2198
for text, revision_id in self._iter_inventory_xmls(revision_ids):
2199
yield self.deserialise_inventory(revision_id, text)
2380
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2381
for text, revision_id in inv_xmls:
2382
yield self._deserialise_inventory(revision_id, text)
2201
def _iter_inventory_xmls(self, revision_ids):
2384
def _iter_inventory_xmls(self, revision_ids, ordering):
2385
if ordering is None:
2386
order_as_requested = True
2387
ordering = 'unordered'
2389
order_as_requested = False
2202
2390
keys = [(revision_id,) for revision_id in revision_ids]
2203
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2393
if order_as_requested:
2394
key_iter = iter(keys)
2395
next_key = key_iter.next()
2396
stream = self.inventories.get_record_stream(keys, ordering, True)
2204
2397
text_chunks = {}
2205
2398
for record in stream:
2206
2399
if record.storage_kind != 'absent':
2207
text_chunks[record.key] = record.get_bytes_as('chunked')
2400
chunks = record.get_bytes_as('chunked')
2401
if order_as_requested:
2402
text_chunks[record.key] = chunks
2404
yield ''.join(chunks), record.key[-1]
2209
2406
raise errors.NoSuchRevision(self, record.key)
2211
chunks = text_chunks.pop(key)
2212
yield ''.join(chunks), key[-1]
2407
if order_as_requested:
2408
# Yield as many results as we can while preserving order.
2409
while next_key in text_chunks:
2410
chunks = text_chunks.pop(next_key)
2411
yield ''.join(chunks), next_key[-1]
2413
next_key = key_iter.next()
2414
except StopIteration:
2415
# We still want to fully consume the get_record_stream,
2416
# just in case it is not actually finished at this point
2214
def deserialise_inventory(self, revision_id, xml):
2420
def _deserialise_inventory(self, revision_id, xml):
2215
2421
"""Transform the xml into an inventory object.
2217
2423
:param revision_id: The expected revision id of the inventory.
2218
2424
:param xml: A serialised inventory.
2220
2426
result = self._serializer.read_inventory_from_string(xml, revision_id,
2221
entry_cache=self._inventory_entry_cache)
2427
entry_cache=self._inventory_entry_cache,
2428
return_from_cache=self._safe_to_return_from_cache)
2222
2429
if result.revision_id != revision_id:
2223
2430
raise AssertionError('revision id mismatch %s != %s' % (
2224
2431
result.revision_id, revision_id))
2227
def serialise_inventory(self, inv):
2228
return self._serializer.write_inventory_to_string(inv)
2230
def _serialise_inventory_to_lines(self, inv):
2231
return self._serializer.write_inventory_to_lines(inv)
2233
2434
def get_serializer_format(self):
2234
2435
return self._serializer.format_num
2236
2437
@needs_read_lock
2237
def get_inventory_xml(self, revision_id):
2238
"""Get inventory XML as a file object."""
2239
texts = self._iter_inventory_xmls([revision_id])
2438
def _get_inventory_xml(self, revision_id):
2439
"""Get serialized inventory as a string."""
2440
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2241
2442
text, revision_id = texts.next()
2242
2443
except StopIteration:
2243
2444
raise errors.HistoryMissing(self, 'inventory', revision_id)
2247
def get_inventory_sha1(self, revision_id):
2248
"""Return the sha1 hash of the inventory entry
2250
return self.get_revision(revision_id).inventory_sha1
2252
2447
def get_rev_id_for_revno(self, revno, known_pair):
2253
2448
"""Return the revision id of a revno, given a later (revno, revid)
2254
2449
pair in the same history.
3492
3728
# This is redundant with format.check_conversion_target(), however that
3493
3729
# raises an exception, and we just want to say "False" as in we won't
3494
3730
# support converting between these formats.
3731
if 'IDS_never' in debug.debug_flags:
3495
3733
if source.supports_rich_root() and not target.supports_rich_root():
3497
3735
if (source._format.supports_tree_reference
3498
3736
and not target._format.supports_tree_reference):
3738
if target._fallback_repositories and target._format.supports_chks:
3739
# IDS doesn't know how to copy CHKs for the parent inventories it
3740
# adds to stacked repos.
3742
if 'IDS_always' in debug.debug_flags:
3744
# Only use this code path for local source and target. IDS does far
3745
# too much IO (both bandwidth and roundtrips) over a network.
3746
if not source.bzrdir.transport.base.startswith('file:///'):
3748
if not target.bzrdir.transport.base.startswith('file:///'):
3502
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3752
def _get_trees(self, revision_ids, cache):
3754
for rev_id in revision_ids:
3756
possible_trees.append((rev_id, cache[rev_id]))
3758
# Not cached, but inventory might be present anyway.
3760
tree = self.source.revision_tree(rev_id)
3761
except errors.NoSuchRevision:
3762
# Nope, parent is ghost.
3765
cache[rev_id] = tree
3766
possible_trees.append((rev_id, tree))
3767
return possible_trees
3769
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3503
3770
"""Get the best delta and base for this revision.
3505
3772
:return: (basis_id, delta)
3507
possible_trees = [(parent_id, cache[parent_id])
3508
for parent_id in parent_ids
3509
if parent_id in cache]
3510
if len(possible_trees) == 0:
3511
# There either aren't any parents, or the parents aren't in the
3512
# cache, so just use the last converted tree
3513
possible_trees.append((basis_id, cache[basis_id]))
3775
# Generate deltas against each tree, to find the shortest.
3776
texts_possibly_new_in_tree = set()
3515
3777
for basis_id, basis_tree in possible_trees:
3516
3778
delta = tree.inventory._make_delta(basis_tree.inventory)
3779
for old_path, new_path, file_id, new_entry in delta:
3780
if new_path is None:
3781
# This file_id isn't present in the new rev, so we don't
3785
# Rich roots are handled elsewhere...
3787
kind = new_entry.kind
3788
if kind != 'directory' and kind != 'file':
3789
# No text record associated with this inventory entry.
3791
# This is a directory or file that has changed somehow.
3792
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3517
3793
deltas.append((len(delta), basis_id, delta))
3519
3795
return deltas[0][1:]
3521
def _get_parent_keys(self, root_key, parent_map):
3522
"""Get the parent keys for a given root id."""
3523
root_id, rev_id = root_key
3524
# Include direct parents of the revision, but only if they used
3525
# the same root_id and are heads.
3527
for parent_id in parent_map[rev_id]:
3528
if parent_id == _mod_revision.NULL_REVISION:
3530
if parent_id not in self._revision_id_to_root_id:
3531
# We probably didn't read this revision, go spend the
3532
# extra effort to actually check
3534
tree = self.source.revision_tree(parent_id)
3535
except errors.NoSuchRevision:
3536
# Ghost, fill out _revision_id_to_root_id in case we
3537
# encounter this again.
3538
# But set parent_root_id to None since we don't really know
3539
parent_root_id = None
3541
parent_root_id = tree.get_root_id()
3542
self._revision_id_to_root_id[parent_id] = None
3544
parent_root_id = self._revision_id_to_root_id[parent_id]
3545
if root_id == parent_root_id:
3546
# With stacking we _might_ want to refer to a non-local
3547
# revision, but this code path only applies when we have the
3548
# full content available, so ghosts really are ghosts, not just
3549
# the edge of local data.
3550
parent_keys.append((parent_id,))
3552
# root_id may be in the parent anyway.
3554
tree = self.source.revision_tree(parent_id)
3555
except errors.NoSuchRevision:
3556
# ghost, can't refer to it.
3560
parent_keys.append((tree.inventory[root_id].revision,))
3561
except errors.NoSuchId:
3564
g = graph.Graph(self.source.revisions)
3565
heads = g.heads(parent_keys)
3567
for key in parent_keys:
3568
if key in heads and key not in selected_keys:
3569
selected_keys.append(key)
3570
return tuple([(root_id,)+ key for key in selected_keys])
3572
def _new_root_data_stream(self, root_keys_to_create, parent_map):
3573
for root_key in root_keys_to_create:
3574
parent_keys = self._get_parent_keys(root_key, parent_map)
3575
yield versionedfile.FulltextContentFactory(root_key,
3576
parent_keys, None, '')
3578
def _fetch_batch(self, revision_ids, basis_id, cache):
3797
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3798
"""Find all parent revisions that are absent, but for which the
3799
inventory is present, and copy those inventories.
3801
This is necessary to preserve correctness when the source is stacked
3802
without fallbacks configured. (Note that in cases like upgrade the
3803
source may be not have _fallback_repositories even though it is
3807
for parents in parent_map.values():
3808
parent_revs.update(parents)
3809
present_parents = self.source.get_parent_map(parent_revs)
3810
absent_parents = set(parent_revs).difference(present_parents)
3811
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3812
(rev_id,) for rev_id in absent_parents)
3813
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3814
for parent_tree in self.source.revision_trees(parent_inv_ids):
3815
current_revision_id = parent_tree.get_revision_id()
3816
parents_parents_keys = parent_invs_keys_for_stacking[
3817
(current_revision_id,)]
3818
parents_parents = [key[-1] for key in parents_parents_keys]
3819
basis_id = _mod_revision.NULL_REVISION
3820
basis_tree = self.source.revision_tree(basis_id)
3821
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3822
self.target.add_inventory_by_delta(
3823
basis_id, delta, current_revision_id, parents_parents)
3824
cache[current_revision_id] = parent_tree
3826
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3579
3827
"""Fetch across a few revisions.
3581
3829
:param revision_ids: The revisions to copy
3582
3830
:param basis_id: The revision_id of a tree that must be in cache, used
3583
3831
as a basis for delta when no other base is available
3584
3832
:param cache: A cache of RevisionTrees that we can use.
3833
:param a_graph: A Graph object to determine the heads() of the
3834
rich-root data stream.
3585
3835
:return: The revision_id of the last converted tree. The RevisionTree
3586
3836
for it will be in cache
3593
3843
pending_deltas = []
3594
3844
pending_revisions = []
3595
3845
parent_map = self.source.get_parent_map(revision_ids)
3846
self._fetch_parent_invs_for_stacking(parent_map, cache)
3847
self.source._safe_to_return_from_cache = True
3596
3848
for tree in self.source.revision_trees(revision_ids):
3849
# Find a inventory delta for this revision.
3850
# Find text entries that need to be copied, too.
3597
3851
current_revision_id = tree.get_revision_id()
3598
3852
parent_ids = parent_map.get(current_revision_id, ())
3853
parent_trees = self._get_trees(parent_ids, cache)
3854
possible_trees = list(parent_trees)
3855
if len(possible_trees) == 0:
3856
# There either aren't any parents, or the parents are ghosts,
3857
# so just use the last converted tree.
3858
possible_trees.append((basis_id, cache[basis_id]))
3599
3859
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3861
revision = self.source.get_revision(current_revision_id)
3862
pending_deltas.append((basis_id, delta,
3863
current_revision_id, revision.parent_ids))
3601
3864
if self._converting_to_rich_root:
3602
3865
self._revision_id_to_root_id[current_revision_id] = \
3603
3866
tree.get_root_id()
3604
# Find text entries that need to be copied
3867
# Determine which texts are in present in this revision but not in
3868
# any of the available parents.
3869
texts_possibly_new_in_tree = set()
3605
3870
for old_path, new_path, file_id, entry in delta:
3606
if new_path is not None:
3609
if not self.target.supports_rich_root():
3610
# The target doesn't support rich root, so we don't
3613
if self._converting_to_rich_root:
3614
# This can't be copied normally, we have to insert
3616
root_keys_to_create.add((file_id, entry.revision))
3618
text_keys.add((file_id, entry.revision))
3619
revision = self.source.get_revision(current_revision_id)
3620
pending_deltas.append((basis_id, delta,
3621
current_revision_id, revision.parent_ids))
3871
if new_path is None:
3872
# This file_id isn't present in the new rev
3876
if not self.target.supports_rich_root():
3877
# The target doesn't support rich root, so we don't
3880
if self._converting_to_rich_root:
3881
# This can't be copied normally, we have to insert
3883
root_keys_to_create.add((file_id, entry.revision))
3886
texts_possibly_new_in_tree.add((file_id, entry.revision))
3887
for basis_id, basis_tree in possible_trees:
3888
basis_inv = basis_tree.inventory
3889
for file_key in list(texts_possibly_new_in_tree):
3890
file_id, file_revision = file_key
3892
entry = basis_inv[file_id]
3893
except errors.NoSuchId:
3895
if entry.revision == file_revision:
3896
texts_possibly_new_in_tree.remove(file_key)
3897
text_keys.update(texts_possibly_new_in_tree)
3622
3898
pending_revisions.append(revision)
3623
3899
cache[current_revision_id] = tree
3624
3900
basis_id = current_revision_id
3901
self.source._safe_to_return_from_cache = False
3625
3902
# Copy file texts
3626
3903
from_texts = self.source.texts
3627
3904
to_texts = self.target.texts
3628
3905
if root_keys_to_create:
3629
root_stream = self._new_root_data_stream(root_keys_to_create,
3906
root_stream = _mod_fetch._new_root_data_stream(
3907
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3908
self.source, graph=a_graph)
3631
3909
to_texts.insert_record_stream(root_stream)
3632
3910
to_texts.insert_record_stream(from_texts.get_record_stream(
3633
3911
text_keys, self.target._format._fetch_order,
4224
4573
return (not self.from_repository._format.rich_root_data and
4225
4574
self.to_format.rich_root_data)
4227
def _get_inventory_stream(self, revision_ids):
4576
def _get_inventory_stream(self, revision_ids, missing=False):
4228
4577
from_format = self.from_repository._format
4229
if (from_format.supports_chks and self.to_format.supports_chks
4230
and (from_format._serializer == self.to_format._serializer)):
4231
# Both sides support chks, and they use the same serializer, so it
4232
# is safe to transmit the chk pages and inventory pages across
4234
return self._get_chk_inventory_stream(revision_ids)
4235
elif (not from_format.supports_chks):
4236
# Source repository doesn't support chks. So we can transmit the
4237
# inventories 'as-is' and either they are just accepted on the
4238
# target, or the Sink will properly convert it.
4239
return self._get_simple_inventory_stream(revision_ids)
4578
if (from_format.supports_chks and self.to_format.supports_chks and
4579
from_format.network_name() == self.to_format.network_name()):
4580
raise AssertionError(
4581
"this case should be handled by GroupCHKStreamSource")
4582
elif 'forceinvdeltas' in debug.debug_flags:
4583
return self._get_convertable_inventory_stream(revision_ids,
4584
delta_versus_null=missing)
4585
elif from_format.network_name() == self.to_format.network_name():
4587
return self._get_simple_inventory_stream(revision_ids,
4589
elif (not from_format.supports_chks and not self.to_format.supports_chks
4590
and from_format._serializer == self.to_format._serializer):
4591
# Essentially the same format.
4592
return self._get_simple_inventory_stream(revision_ids,
4241
# XXX: Hack to make not-chk->chk fetch: copy the inventories as
4242
# inventories. Note that this should probably be done somehow
4243
# as part of bzrlib.repository.StreamSink. Except JAM couldn't
4244
# figure out how a non-chk repository could possibly handle
4245
# deserializing an inventory stream from a chk repo, as it
4246
# doesn't have a way to understand individual pages.
4247
return self._get_convertable_inventory_stream(revision_ids)
4595
# Any time we switch serializations, we want to use an
4596
# inventory-delta based approach.
4597
return self._get_convertable_inventory_stream(revision_ids,
4598
delta_versus_null=missing)
4249
def _get_simple_inventory_stream(self, revision_ids):
4600
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4601
# NB: This currently reopens the inventory weave in source;
4602
# using a single stream interface instead would avoid this.
4250
4603
from_weave = self.from_repository.inventories
4605
delta_closure = True
4607
delta_closure = not self.delta_on_metadata()
4251
4608
yield ('inventories', from_weave.get_record_stream(
4252
4609
[(rev_id,) for rev_id in revision_ids],
4253
self.inventory_fetch_order(),
4254
not self.delta_on_metadata()))
4256
def _get_chk_inventory_stream(self, revision_ids):
4257
"""Fetch the inventory texts, along with the associated chk maps."""
4258
# We want an inventory outside of the search set, so that we can filter
4259
# out uninteresting chk pages. For now we use
4260
# _find_revision_outside_set, but if we had a Search with cut_revs, we
4261
# could use that instead.
4262
start_rev_id = self.from_repository._find_revision_outside_set(
4264
start_rev_key = (start_rev_id,)
4265
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]
4266
if start_rev_id != _mod_revision.NULL_REVISION:
4267
inv_keys_to_fetch.append((start_rev_id,))
4268
# Any repo that supports chk_bytes must also support out-of-order
4269
# insertion. At least, that is how we expect it to work
4270
# We use get_record_stream instead of iter_inventories because we want
4271
# to be able to insert the stream as well. We could instead fetch
4272
# allowing deltas, and then iter_inventories, but we don't know whether
4273
# source or target is more 'local' anway.
4274
inv_stream = self.from_repository.inventories.get_record_stream(
4275
inv_keys_to_fetch, 'unordered',
4276
True) # We need them as full-texts so we can find their references
4277
uninteresting_chk_roots = set()
4278
interesting_chk_roots = set()
4279
def filter_inv_stream(inv_stream):
4280
for idx, record in enumerate(inv_stream):
4281
### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
4282
bytes = record.get_bytes_as('fulltext')
4283
chk_inv = inventory.CHKInventory.deserialise(
4284
self.from_repository.chk_bytes, bytes, record.key)
4285
if record.key == start_rev_key:
4286
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())
4287
p_id_map = chk_inv.parent_id_basename_to_file_id
4288
if p_id_map is not None:
4289
uninteresting_chk_roots.add(p_id_map.key())
4292
interesting_chk_roots.add(chk_inv.id_to_entry.key())
4293
p_id_map = chk_inv.parent_id_basename_to_file_id
4294
if p_id_map is not None:
4295
interesting_chk_roots.add(p_id_map.key())
4296
### pb.update('fetch inventory', 0, 2)
4297
yield ('inventories', filter_inv_stream(inv_stream))
4298
# Now that we have worked out all of the interesting root nodes, grab
4299
# all of the interesting pages and insert them
4300
### pb.update('fetch inventory', 1, 2)
4301
interesting = chk_map.iter_interesting_nodes(
4302
self.from_repository.chk_bytes, interesting_chk_roots,
4303
uninteresting_chk_roots)
4304
def to_stream_adapter():
4305
"""Adapt the iter_interesting_nodes result to a single stream.
4307
iter_interesting_nodes returns records as it processes them, along
4308
with keys. However, we only want to return the records themselves.
4310
for record, items in interesting:
4311
if record is not None:
4313
# XXX: We could instead call get_record_stream(records.keys())
4314
# ATM, this will always insert the records as fulltexts, and
4315
# requires that you can hang on to records once you have gone
4316
# on to the next one. Further, it causes the target to
4317
# recompress the data. Testing shows it to be faster than
4318
# requesting the records again, though.
4319
yield ('chk_bytes', to_stream_adapter())
4320
### pb.update('fetch inventory', 2, 2)
4322
def _get_convertable_inventory_stream(self, revision_ids):
4323
# XXX: One of source or target is using chks, and they don't have
4324
# compatible serializations. The StreamSink code expects to be
4325
# able to convert on the target, so we need to put
4326
# bytes-on-the-wire that can be converted
4327
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))
4329
def _stream_invs_as_fulltexts(self, revision_ids):
4610
self.inventory_fetch_order(), delta_closure))
4612
def _get_convertable_inventory_stream(self, revision_ids,
4613
delta_versus_null=False):
4614
# The two formats are sufficiently different that there is no fast
4615
# path, so we need to send just inventorydeltas, which any
4616
# sufficiently modern client can insert into any repository.
4617
# The StreamSink code expects to be able to
4618
# convert on the target, so we need to put bytes-on-the-wire that can
4619
# be converted. That means inventory deltas (if the remote is <1.19,
4620
# RemoteStreamSink will fallback to VFS to insert the deltas).
4621
yield ('inventory-deltas',
4622
self._stream_invs_as_deltas(revision_ids,
4623
delta_versus_null=delta_versus_null))
4625
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4626
"""Return a stream of inventory-deltas for the given rev ids.
4628
:param revision_ids: The list of inventories to transmit
4629
:param delta_versus_null: Don't try to find a minimal delta for this
4630
entry, instead compute the delta versus the NULL_REVISION. This
4631
effectively streams a complete inventory. Used for stuff like
4632
filling in missing parents, etc.
4330
4634
from_repo = self.from_repository
4331
from_serializer = from_repo._format._serializer
4332
4635
revision_keys = [(rev_id,) for rev_id in revision_ids]
4333
4636
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4334
for inv in self.from_repository.iter_inventories(revision_ids):
4335
# XXX: This is a bit hackish, but it works. Basically,
4336
# CHKSerializer 'accidentally' supports
4337
# read/write_inventory_to_string, even though that is never
4338
# the format that is stored on disk. It *does* give us a
4339
# single string representation for an inventory, so live with
4341
# This would be far better if we had a 'serialized inventory
4342
# delta' form. Then we could use 'inventory._make_delta', and
4343
# transmit that. This would both be faster to generate, and
4344
# result in fewer bytes-on-the-wire.
4345
as_bytes = from_serializer.write_inventory_to_string(inv)
4637
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4639
inventories = self.from_repository.iter_inventories(
4640
revision_ids, 'topological')
4641
format = from_repo._format
4642
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4643
inventory_cache = lru_cache.LRUCache(50)
4644
null_inventory = from_repo.revision_tree(
4645
_mod_revision.NULL_REVISION).inventory
4646
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4647
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4648
# repo back into a non-rich-root repo ought to be allowed)
4649
serializer = inventory_delta.InventoryDeltaSerializer(
4650
versioned_root=format.rich_root_data,
4651
tree_references=format.supports_tree_reference)
4652
for inv in inventories:
4346
4653
key = (inv.revision_id,)
4347
4654
parent_keys = parent_map.get(key, ())
4656
if not delta_versus_null and parent_keys:
4657
# The caller did not ask for complete inventories and we have
4658
# some parents that we can delta against. Make a delta against
4659
# each parent so that we can find the smallest.
4660
parent_ids = [parent_key[0] for parent_key in parent_keys]
4661
for parent_id in parent_ids:
4662
if parent_id not in invs_sent_so_far:
4663
# We don't know that the remote side has this basis, so
4666
if parent_id == _mod_revision.NULL_REVISION:
4667
parent_inv = null_inventory
4669
parent_inv = inventory_cache.get(parent_id, None)
4670
if parent_inv is None:
4671
parent_inv = from_repo.get_inventory(parent_id)
4672
candidate_delta = inv._make_delta(parent_inv)
4673
if (delta is None or
4674
len(delta) > len(candidate_delta)):
4675
delta = candidate_delta
4676
basis_id = parent_id
4678
# Either none of the parents ended up being suitable, or we
4679
# were asked to delta against NULL
4680
basis_id = _mod_revision.NULL_REVISION
4681
delta = inv._make_delta(null_inventory)
4682
invs_sent_so_far.add(inv.revision_id)
4683
inventory_cache[inv.revision_id] = inv
4684
delta_serialized = ''.join(
4685
serializer.delta_to_lines(basis_id, key[-1], delta))
4348
4686
yield versionedfile.FulltextContentFactory(
4349
key, parent_keys, None, as_bytes)
4687
key, parent_keys, None, delta_serialized)
4352
4690
def _iter_for_revno(repo, partial_history_cache, stop_index=None,