1146
1200
# The old API returned a list, should this actually be a set?
1147
1201
return parent_map.keys()
1203
def _check_inventories(self, checker):
1204
"""Check the inventories found from the revision scan.
1206
This is responsible for verifying the sha1 of inventories and
1207
creating a pending_keys set that covers data referenced by inventories.
1209
bar = ui.ui_factory.nested_progress_bar()
1211
self._do_check_inventories(checker, bar)
1215
def _do_check_inventories(self, checker, bar):
1216
"""Helper for _check_inventories."""
1218
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1219
kinds = ['chk_bytes', 'texts']
1220
count = len(checker.pending_keys)
1221
bar.update("inventories", 0, 2)
1222
current_keys = checker.pending_keys
1223
checker.pending_keys = {}
1224
# Accumulate current checks.
1225
for key in current_keys:
1226
if key[0] != 'inventories' and key[0] not in kinds:
1227
checker._report_items.append('unknown key type %r' % (key,))
1228
keys[key[0]].add(key[1:])
1229
if keys['inventories']:
1230
# NB: output order *should* be roughly sorted - topo or
1231
# inverse topo depending on repository - either way decent
1232
# to just delta against. However, pre-CHK formats didn't
1233
# try to optimise inventory layout on disk. As such the
1234
# pre-CHK code path does not use inventory deltas.
1236
for record in self.inventories.check(keys=keys['inventories']):
1237
if record.storage_kind == 'absent':
1238
checker._report_items.append(
1239
'Missing inventory {%s}' % (record.key,))
1241
last_object = self._check_record('inventories', record,
1242
checker, last_object,
1243
current_keys[('inventories',) + record.key])
1244
del keys['inventories']
1247
bar.update("texts", 1)
1248
while (checker.pending_keys or keys['chk_bytes']
1250
# Something to check.
1251
current_keys = checker.pending_keys
1252
checker.pending_keys = {}
1253
# Accumulate current checks.
1254
for key in current_keys:
1255
if key[0] not in kinds:
1256
checker._report_items.append('unknown key type %r' % (key,))
1257
keys[key[0]].add(key[1:])
1258
# Check the outermost kind only - inventories || chk_bytes || texts
1262
for record in getattr(self, kind).check(keys=keys[kind]):
1263
if record.storage_kind == 'absent':
1264
checker._report_items.append(
1265
'Missing %s {%s}' % (kind, record.key,))
1267
last_object = self._check_record(kind, record,
1268
checker, last_object, current_keys[(kind,) + record.key])
1272
def _check_record(self, kind, record, checker, last_object, item_data):
1273
"""Check a single text from this repository."""
1274
if kind == 'inventories':
1275
rev_id = record.key[0]
1276
inv = self._deserialise_inventory(rev_id,
1277
record.get_bytes_as('fulltext'))
1278
if last_object is not None:
1279
delta = inv._make_delta(last_object)
1280
for old_path, path, file_id, ie in delta:
1283
ie.check(checker, rev_id, inv)
1285
for path, ie in inv.iter_entries():
1286
ie.check(checker, rev_id, inv)
1287
if self._format.fast_deltas:
1289
elif kind == 'chk_bytes':
1290
# No code written to check chk_bytes for this repo format.
1291
checker._report_items.append(
1292
'unsupported key type chk_bytes for %s' % (record.key,))
1293
elif kind == 'texts':
1294
self._check_text(record, checker, item_data)
1296
checker._report_items.append(
1297
'unknown key type %s for %s' % (kind, record.key))
1299
def _check_text(self, record, checker, item_data):
1300
"""Check a single text."""
1301
# Check it is extractable.
1302
# TODO: check length.
1303
if record.storage_kind == 'chunked':
1304
chunks = record.get_bytes_as(record.storage_kind)
1305
sha1 = osutils.sha_strings(chunks)
1306
length = sum(map(len, chunks))
1308
content = record.get_bytes_as('fulltext')
1309
sha1 = osutils.sha_string(content)
1310
length = len(content)
1311
if item_data and sha1 != item_data[1]:
1312
checker._report_items.append(
1313
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1314
(record.key, sha1, item_data[1], item_data[2]))
1150
1317
def create(a_bzrdir):
1151
1318
"""Construct the current default format repository in a_bzrdir."""
1715
1899
@needs_read_lock
1716
1900
def get_revisions(self, revision_ids):
1717
"""Get many revisions at once."""
1901
"""Get many revisions at once.
1903
Repositories that need to check data on every revision read should
1904
subclass this method.
1718
1906
return self._get_revisions(revision_ids)
1720
1908
@needs_read_lock
1721
1909
def _get_revisions(self, revision_ids):
1722
1910
"""Core work logic to get many revisions without sanity checks."""
1723
for rev_id in revision_ids:
1724
if not rev_id or not isinstance(rev_id, basestring):
1725
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1912
for revid, rev in self._iter_revisions(revision_ids):
1914
raise errors.NoSuchRevision(self, revid)
1916
return [revs[revid] for revid in revision_ids]
1918
def _iter_revisions(self, revision_ids):
1919
"""Iterate over revision objects.
1921
:param revision_ids: An iterable of revisions to examine. None may be
1922
passed to request all revisions known to the repository. Note that
1923
not all repositories can find unreferenced revisions; for those
1924
repositories only referenced ones will be returned.
1925
:return: An iterator of (revid, revision) tuples. Absent revisions (
1926
those asked for but not available) are returned as (revid, None).
1928
if revision_ids is None:
1929
revision_ids = self.all_revision_ids()
1931
for rev_id in revision_ids:
1932
if not rev_id or not isinstance(rev_id, basestring):
1933
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1726
1934
keys = [(key,) for key in revision_ids]
1727
1935
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1729
1936
for record in stream:
1937
revid = record.key[0]
1730
1938
if record.storage_kind == 'absent':
1731
raise errors.NoSuchRevision(self, record.key[0])
1732
text = record.get_bytes_as('fulltext')
1733
rev = self._serializer.read_revision_from_string(text)
1734
revs[record.key[0]] = rev
1735
return [revs[revid] for revid in revision_ids]
1738
def get_revision_xml(self, revision_id):
1739
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1740
# would have already do it.
1741
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1742
# TODO: this can't just be replaced by:
1743
# return self._serializer.write_revision_to_string(
1744
# self.get_revision(revision_id))
1745
# as cStringIO preservers the encoding unlike write_revision_to_string
1746
# or some other call down the path.
1747
rev = self.get_revision(revision_id)
1748
rev_tmp = cStringIO.StringIO()
1749
# the current serializer..
1750
self._serializer.write_revision(rev, rev_tmp)
1752
return rev_tmp.getvalue()
1941
text = record.get_bytes_as('fulltext')
1942
rev = self._serializer.read_revision_from_string(text)
1754
1945
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1755
1946
"""Produce a generator of revision deltas.
2210
2396
:param revision_ids: The expected revision ids of the inventories.
2397
:param ordering: optional ordering, e.g. 'topological'. If not
2398
specified, the order of revision_ids will be preserved (by
2399
buffering if necessary).
2211
2400
:return: An iterator of inventories.
2213
2402
if ((None in revision_ids)
2214
2403
or (_mod_revision.NULL_REVISION in revision_ids)):
2215
2404
raise ValueError('cannot get null revision inventory')
2216
return self._iter_inventories(revision_ids)
2405
return self._iter_inventories(revision_ids, ordering)
2218
def _iter_inventories(self, revision_ids):
2407
def _iter_inventories(self, revision_ids, ordering):
2219
2408
"""single-document based inventory iteration."""
2220
for text, revision_id in self._iter_inventory_xmls(revision_ids):
2221
yield self.deserialise_inventory(revision_id, text)
2409
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2410
for text, revision_id in inv_xmls:
2411
yield self._deserialise_inventory(revision_id, text)
2223
def _iter_inventory_xmls(self, revision_ids):
2413
def _iter_inventory_xmls(self, revision_ids, ordering):
2414
if ordering is None:
2415
order_as_requested = True
2416
ordering = 'unordered'
2418
order_as_requested = False
2224
2419
keys = [(revision_id,) for revision_id in revision_ids]
2225
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2422
if order_as_requested:
2423
key_iter = iter(keys)
2424
next_key = key_iter.next()
2425
stream = self.inventories.get_record_stream(keys, ordering, True)
2226
2426
text_chunks = {}
2227
2427
for record in stream:
2228
2428
if record.storage_kind != 'absent':
2229
text_chunks[record.key] = record.get_bytes_as('chunked')
2429
chunks = record.get_bytes_as('chunked')
2430
if order_as_requested:
2431
text_chunks[record.key] = chunks
2433
yield ''.join(chunks), record.key[-1]
2231
2435
raise errors.NoSuchRevision(self, record.key)
2233
chunks = text_chunks.pop(key)
2234
yield ''.join(chunks), key[-1]
2436
if order_as_requested:
2437
# Yield as many results as we can while preserving order.
2438
while next_key in text_chunks:
2439
chunks = text_chunks.pop(next_key)
2440
yield ''.join(chunks), next_key[-1]
2442
next_key = key_iter.next()
2443
except StopIteration:
2444
# We still want to fully consume the get_record_stream,
2445
# just in case it is not actually finished at this point
2236
def deserialise_inventory(self, revision_id, xml):
2449
def _deserialise_inventory(self, revision_id, xml):
2237
2450
"""Transform the xml into an inventory object.
2239
2452
:param revision_id: The expected revision id of the inventory.
2240
2453
:param xml: A serialised inventory.
2242
2455
result = self._serializer.read_inventory_from_string(xml, revision_id,
2243
entry_cache=self._inventory_entry_cache)
2456
entry_cache=self._inventory_entry_cache,
2457
return_from_cache=self._safe_to_return_from_cache)
2244
2458
if result.revision_id != revision_id:
2245
2459
raise AssertionError('revision id mismatch %s != %s' % (
2246
2460
result.revision_id, revision_id))
2249
def serialise_inventory(self, inv):
2250
return self._serializer.write_inventory_to_string(inv)
2252
def _serialise_inventory_to_lines(self, inv):
2253
return self._serializer.write_inventory_to_lines(inv)
2255
2463
def get_serializer_format(self):
2256
2464
return self._serializer.format_num
2258
2466
@needs_read_lock
2259
def get_inventory_xml(self, revision_id):
2260
"""Get inventory XML as a file object."""
2261
texts = self._iter_inventory_xmls([revision_id])
2467
def _get_inventory_xml(self, revision_id):
2468
"""Get serialized inventory as a string."""
2469
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2263
2471
text, revision_id = texts.next()
2264
2472
except StopIteration:
2265
2473
raise errors.HistoryMissing(self, 'inventory', revision_id)
2269
def get_inventory_sha1(self, revision_id):
2270
"""Return the sha1 hash of the inventory entry
2272
return self.get_revision(revision_id).inventory_sha1
2274
2476
def get_rev_id_for_revno(self, revno, known_pair):
2275
2477
"""Return the revision id of a revno, given a later (revno, revid)
2276
2478
pair in the same history.
3332
3553
return InterRepository._same_model(source, target)
3335
class InterWeaveRepo(InterSameDataRepository):
3336
"""Optimised code paths between Weave based repositories.
3338
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3339
implemented lazy inter-object optimisation.
3343
def _get_repo_format_to_test(self):
3344
from bzrlib.repofmt import weaverepo
3345
return weaverepo.RepositoryFormat7()
3348
def is_compatible(source, target):
3349
"""Be compatible with known Weave formats.
3351
We don't test for the stores being of specific types because that
3352
could lead to confusing results, and there is no need to be
3355
from bzrlib.repofmt.weaverepo import (
3361
return (isinstance(source._format, (RepositoryFormat5,
3363
RepositoryFormat7)) and
3364
isinstance(target._format, (RepositoryFormat5,
3366
RepositoryFormat7)))
3367
except AttributeError:
3371
def copy_content(self, revision_id=None):
3372
"""See InterRepository.copy_content()."""
3373
# weave specific optimised path:
3375
self.target.set_make_working_trees(self.source.make_working_trees())
3376
except (errors.RepositoryUpgradeRequired, NotImplemented):
3378
# FIXME do not peek!
3379
if self.source._transport.listable():
3380
pb = ui.ui_factory.nested_progress_bar()
3382
self.target.texts.insert_record_stream(
3383
self.source.texts.get_record_stream(
3384
self.source.texts.keys(), 'topological', False))
3385
pb.update('copying inventory', 0, 1)
3386
self.target.inventories.insert_record_stream(
3387
self.source.inventories.get_record_stream(
3388
self.source.inventories.keys(), 'topological', False))
3389
self.target.signatures.insert_record_stream(
3390
self.source.signatures.get_record_stream(
3391
self.source.signatures.keys(),
3393
self.target.revisions.insert_record_stream(
3394
self.source.revisions.get_record_stream(
3395
self.source.revisions.keys(),
3396
'topological', True))
3400
self.target.fetch(self.source, revision_id=revision_id)
3403
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3404
"""See InterRepository.missing_revision_ids()."""
3405
# we want all revisions to satisfy revision_id in source.
3406
# but we don't want to stat every file here and there.
3407
# we want then, all revisions other needs to satisfy revision_id
3408
# checked, but not those that we have locally.
3409
# so the first thing is to get a subset of the revisions to
3410
# satisfy revision_id in source, and then eliminate those that
3411
# we do already have.
3412
# this is slow on high latency connection to self, but as this
3413
# disk format scales terribly for push anyway due to rewriting
3414
# inventory.weave, this is considered acceptable.
3416
if revision_id is not None:
3417
source_ids = self.source.get_ancestry(revision_id)
3418
if source_ids[0] is not None:
3419
raise AssertionError()
3422
source_ids = self.source._all_possible_ids()
3423
source_ids_set = set(source_ids)
3424
# source_ids is the worst possible case we may need to pull.
3425
# now we want to filter source_ids against what we actually
3426
# have in target, but don't try to check for existence where we know
3427
# we do not have a revision as that would be pointless.
3428
target_ids = set(self.target._all_possible_ids())
3429
possibly_present_revisions = target_ids.intersection(source_ids_set)
3430
actually_present_revisions = set(
3431
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3432
required_revisions = source_ids_set.difference(actually_present_revisions)
3433
if revision_id is not None:
3434
# we used get_ancestry to determine source_ids then we are assured all
3435
# revisions referenced are present as they are installed in topological order.
3436
# and the tip revision was validated by get_ancestry.
3437
result_set = required_revisions
3439
# if we just grabbed the possibly available ids, then
3440
# we only have an estimate of whats available and need to validate
3441
# that against the revision records.
3443
self.source._eliminate_revisions_not_present(required_revisions))
3444
return self.source.revision_ids_to_search_result(result_set)
3447
class InterKnitRepo(InterSameDataRepository):
3448
"""Optimised code paths between Knit based repositories."""
3451
def _get_repo_format_to_test(self):
3452
from bzrlib.repofmt import knitrepo
3453
return knitrepo.RepositoryFormatKnit1()
3456
def is_compatible(source, target):
3457
"""Be compatible with known Knit formats.
3459
We don't test for the stores being of specific types because that
3460
could lead to confusing results, and there is no need to be
3463
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3465
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3466
isinstance(target._format, RepositoryFormatKnit))
3467
except AttributeError:
3469
return are_knits and InterRepository._same_model(source, target)
3472
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3473
"""See InterRepository.missing_revision_ids()."""
3474
if revision_id is not None:
3475
source_ids = self.source.get_ancestry(revision_id)
3476
if source_ids[0] is not None:
3477
raise AssertionError()
3480
source_ids = self.source.all_revision_ids()
3481
source_ids_set = set(source_ids)
3482
# source_ids is the worst possible case we may need to pull.
3483
# now we want to filter source_ids against what we actually
3484
# have in target, but don't try to check for existence where we know
3485
# we do not have a revision as that would be pointless.
3486
target_ids = set(self.target.all_revision_ids())
3487
possibly_present_revisions = target_ids.intersection(source_ids_set)
3488
actually_present_revisions = set(
3489
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3490
required_revisions = source_ids_set.difference(actually_present_revisions)
3491
if revision_id is not None:
3492
# we used get_ancestry to determine source_ids then we are assured all
3493
# revisions referenced are present as they are installed in topological order.
3494
# and the tip revision was validated by get_ancestry.
3495
result_set = required_revisions
3497
# if we just grabbed the possibly available ids, then
3498
# we only have an estimate of whats available and need to validate
3499
# that against the revision records.
3501
self.source._eliminate_revisions_not_present(required_revisions))
3502
return self.source.revision_ids_to_search_result(result_set)
3505
3556
class InterDifferingSerializer(InterRepository):
3514
3565
# This is redundant with format.check_conversion_target(), however that
3515
3566
# raises an exception, and we just want to say "False" as in we won't
3516
3567
# support converting between these formats.
3568
if 'IDS_never' in debug.debug_flags:
3517
3570
if source.supports_rich_root() and not target.supports_rich_root():
3519
3572
if (source._format.supports_tree_reference
3520
3573
and not target._format.supports_tree_reference):
3575
if target._fallback_repositories and target._format.supports_chks:
3576
# IDS doesn't know how to copy CHKs for the parent inventories it
3577
# adds to stacked repos.
3579
if 'IDS_always' in debug.debug_flags:
3581
# Only use this code path for local source and target. IDS does far
3582
# too much IO (both bandwidth and roundtrips) over a network.
3583
if not source.bzrdir.transport.base.startswith('file:///'):
3585
if not target.bzrdir.transport.base.startswith('file:///'):
3524
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3589
def _get_trees(self, revision_ids, cache):
3591
for rev_id in revision_ids:
3593
possible_trees.append((rev_id, cache[rev_id]))
3595
# Not cached, but inventory might be present anyway.
3597
tree = self.source.revision_tree(rev_id)
3598
except errors.NoSuchRevision:
3599
# Nope, parent is ghost.
3602
cache[rev_id] = tree
3603
possible_trees.append((rev_id, tree))
3604
return possible_trees
3606
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3525
3607
"""Get the best delta and base for this revision.
3527
3609
:return: (basis_id, delta)
3529
possible_trees = [(parent_id, cache[parent_id])
3530
for parent_id in parent_ids
3531
if parent_id in cache]
3532
if len(possible_trees) == 0:
3533
# There either aren't any parents, or the parents aren't in the
3534
# cache, so just use the last converted tree
3535
possible_trees.append((basis_id, cache[basis_id]))
3612
# Generate deltas against each tree, to find the shortest.
3613
texts_possibly_new_in_tree = set()
3537
3614
for basis_id, basis_tree in possible_trees:
3538
3615
delta = tree.inventory._make_delta(basis_tree.inventory)
3616
for old_path, new_path, file_id, new_entry in delta:
3617
if new_path is None:
3618
# This file_id isn't present in the new rev, so we don't
3622
# Rich roots are handled elsewhere...
3624
kind = new_entry.kind
3625
if kind != 'directory' and kind != 'file':
3626
# No text record associated with this inventory entry.
3628
# This is a directory or file that has changed somehow.
3629
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3539
3630
deltas.append((len(delta), basis_id, delta))
3541
3632
return deltas[0][1:]
3543
def _get_parent_keys(self, root_key, parent_map):
3544
"""Get the parent keys for a given root id."""
3545
root_id, rev_id = root_key
3546
# Include direct parents of the revision, but only if they used
3547
# the same root_id and are heads.
3549
for parent_id in parent_map[rev_id]:
3550
if parent_id == _mod_revision.NULL_REVISION:
3552
if parent_id not in self._revision_id_to_root_id:
3553
# We probably didn't read this revision, go spend the
3554
# extra effort to actually check
3556
tree = self.source.revision_tree(parent_id)
3557
except errors.NoSuchRevision:
3558
# Ghost, fill out _revision_id_to_root_id in case we
3559
# encounter this again.
3560
# But set parent_root_id to None since we don't really know
3561
parent_root_id = None
3563
parent_root_id = tree.get_root_id()
3564
self._revision_id_to_root_id[parent_id] = None
3566
parent_root_id = self._revision_id_to_root_id[parent_id]
3567
if root_id == parent_root_id:
3568
# With stacking we _might_ want to refer to a non-local
3569
# revision, but this code path only applies when we have the
3570
# full content available, so ghosts really are ghosts, not just
3571
# the edge of local data.
3572
parent_keys.append((parent_id,))
3574
# root_id may be in the parent anyway.
3576
tree = self.source.revision_tree(parent_id)
3577
except errors.NoSuchRevision:
3578
# ghost, can't refer to it.
3582
parent_keys.append((tree.inventory[root_id].revision,))
3583
except errors.NoSuchId:
3586
g = graph.Graph(self.source.revisions)
3587
heads = g.heads(parent_keys)
3589
for key in parent_keys:
3590
if key in heads and key not in selected_keys:
3591
selected_keys.append(key)
3592
return tuple([(root_id,)+ key for key in selected_keys])
3634
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3635
"""Find all parent revisions that are absent, but for which the
3636
inventory is present, and copy those inventories.
3594
def _new_root_data_stream(self, root_keys_to_create, parent_map):
3595
for root_key in root_keys_to_create:
3596
parent_keys = self._get_parent_keys(root_key, parent_map)
3597
yield versionedfile.FulltextContentFactory(root_key,
3598
parent_keys, None, '')
3638
This is necessary to preserve correctness when the source is stacked
3639
without fallbacks configured. (Note that in cases like upgrade the
3640
source may be not have _fallback_repositories even though it is
3644
for parents in parent_map.values():
3645
parent_revs.update(parents)
3646
present_parents = self.source.get_parent_map(parent_revs)
3647
absent_parents = set(parent_revs).difference(present_parents)
3648
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3649
(rev_id,) for rev_id in absent_parents)
3650
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3651
for parent_tree in self.source.revision_trees(parent_inv_ids):
3652
current_revision_id = parent_tree.get_revision_id()
3653
parents_parents_keys = parent_invs_keys_for_stacking[
3654
(current_revision_id,)]
3655
parents_parents = [key[-1] for key in parents_parents_keys]
3656
basis_id = _mod_revision.NULL_REVISION
3657
basis_tree = self.source.revision_tree(basis_id)
3658
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3659
self.target.add_inventory_by_delta(
3660
basis_id, delta, current_revision_id, parents_parents)
3661
cache[current_revision_id] = parent_tree
3600
3663
def _fetch_batch(self, revision_ids, basis_id, cache):
3601
3664
"""Fetch across a few revisions.
3615
3678
pending_deltas = []
3616
3679
pending_revisions = []
3617
3680
parent_map = self.source.get_parent_map(revision_ids)
3681
self._fetch_parent_invs_for_stacking(parent_map, cache)
3682
self.source._safe_to_return_from_cache = True
3618
3683
for tree in self.source.revision_trees(revision_ids):
3684
# Find a inventory delta for this revision.
3685
# Find text entries that need to be copied, too.
3619
3686
current_revision_id = tree.get_revision_id()
3620
3687
parent_ids = parent_map.get(current_revision_id, ())
3688
parent_trees = self._get_trees(parent_ids, cache)
3689
possible_trees = list(parent_trees)
3690
if len(possible_trees) == 0:
3691
# There either aren't any parents, or the parents are ghosts,
3692
# so just use the last converted tree.
3693
possible_trees.append((basis_id, cache[basis_id]))
3621
3694
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3696
revision = self.source.get_revision(current_revision_id)
3697
pending_deltas.append((basis_id, delta,
3698
current_revision_id, revision.parent_ids))
3623
3699
if self._converting_to_rich_root:
3624
3700
self._revision_id_to_root_id[current_revision_id] = \
3625
3701
tree.get_root_id()
3626
# Find text entries that need to be copied
3702
# Determine which texts are in present in this revision but not in
3703
# any of the available parents.
3704
texts_possibly_new_in_tree = set()
3627
3705
for old_path, new_path, file_id, entry in delta:
3628
if new_path is not None:
3631
if not self.target.supports_rich_root():
3632
# The target doesn't support rich root, so we don't
3635
if self._converting_to_rich_root:
3636
# This can't be copied normally, we have to insert
3638
root_keys_to_create.add((file_id, entry.revision))
3640
text_keys.add((file_id, entry.revision))
3641
revision = self.source.get_revision(current_revision_id)
3642
pending_deltas.append((basis_id, delta,
3643
current_revision_id, revision.parent_ids))
3706
if new_path is None:
3707
# This file_id isn't present in the new rev
3711
if not self.target.supports_rich_root():
3712
# The target doesn't support rich root, so we don't
3715
if self._converting_to_rich_root:
3716
# This can't be copied normally, we have to insert
3718
root_keys_to_create.add((file_id, entry.revision))
3721
texts_possibly_new_in_tree.add((file_id, entry.revision))
3722
for basis_id, basis_tree in possible_trees:
3723
basis_inv = basis_tree.inventory
3724
for file_key in list(texts_possibly_new_in_tree):
3725
file_id, file_revision = file_key
3727
entry = basis_inv[file_id]
3728
except errors.NoSuchId:
3730
if entry.revision == file_revision:
3731
texts_possibly_new_in_tree.remove(file_key)
3732
text_keys.update(texts_possibly_new_in_tree)
3644
3733
pending_revisions.append(revision)
3645
3734
cache[current_revision_id] = tree
3646
3735
basis_id = current_revision_id
3736
self.source._safe_to_return_from_cache = False
3647
3737
# Copy file texts
3648
3738
from_texts = self.source.texts
3649
3739
to_texts = self.target.texts
3650
3740
if root_keys_to_create:
3651
root_stream = self._new_root_data_stream(root_keys_to_create,
3741
root_stream = _mod_fetch._new_root_data_stream(
3742
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3653
3744
to_texts.insert_record_stream(root_stream)
3654
3745
to_texts.insert_record_stream(from_texts.get_record_stream(
3655
3746
text_keys, self.target._format._fetch_order,
4246
4402
return (not self.from_repository._format.rich_root_data and
4247
4403
self.to_format.rich_root_data)
4249
def _get_inventory_stream(self, revision_ids):
4405
def _get_inventory_stream(self, revision_ids, missing=False):
4250
4406
from_format = self.from_repository._format
4251
if (from_format.supports_chks and self.to_format.supports_chks
4252
and (from_format._serializer == self.to_format._serializer)):
4253
# Both sides support chks, and they use the same serializer, so it
4254
# is safe to transmit the chk pages and inventory pages across
4256
return self._get_chk_inventory_stream(revision_ids)
4257
elif (not from_format.supports_chks):
4258
# Source repository doesn't support chks. So we can transmit the
4259
# inventories 'as-is' and either they are just accepted on the
4260
# target, or the Sink will properly convert it.
4261
return self._get_simple_inventory_stream(revision_ids)
4407
if (from_format.supports_chks and self.to_format.supports_chks and
4408
from_format.network_name() == self.to_format.network_name()):
4409
raise AssertionError(
4410
"this case should be handled by GroupCHKStreamSource")
4411
elif 'forceinvdeltas' in debug.debug_flags:
4412
return self._get_convertable_inventory_stream(revision_ids,
4413
delta_versus_null=missing)
4414
elif from_format.network_name() == self.to_format.network_name():
4416
return self._get_simple_inventory_stream(revision_ids,
4418
elif (not from_format.supports_chks and not self.to_format.supports_chks
4419
and from_format._serializer == self.to_format._serializer):
4420
# Essentially the same format.
4421
return self._get_simple_inventory_stream(revision_ids,
4263
# XXX: Hack to make not-chk->chk fetch: copy the inventories as
4264
# inventories. Note that this should probably be done somehow
4265
# as part of bzrlib.repository.StreamSink. Except JAM couldn't
4266
# figure out how a non-chk repository could possibly handle
4267
# deserializing an inventory stream from a chk repo, as it
4268
# doesn't have a way to understand individual pages.
4269
return self._get_convertable_inventory_stream(revision_ids)
4424
# Any time we switch serializations, we want to use an
4425
# inventory-delta based approach.
4426
return self._get_convertable_inventory_stream(revision_ids,
4427
delta_versus_null=missing)
4271
def _get_simple_inventory_stream(self, revision_ids):
4429
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4430
# NB: This currently reopens the inventory weave in source;
4431
# using a single stream interface instead would avoid this.
4272
4432
from_weave = self.from_repository.inventories
4434
delta_closure = True
4436
delta_closure = not self.delta_on_metadata()
4273
4437
yield ('inventories', from_weave.get_record_stream(
4274
4438
[(rev_id,) for rev_id in revision_ids],
4275
self.inventory_fetch_order(),
4276
not self.delta_on_metadata()))
4278
def _get_chk_inventory_stream(self, revision_ids):
4279
"""Fetch the inventory texts, along with the associated chk maps."""
4280
# We want an inventory outside of the search set, so that we can filter
4281
# out uninteresting chk pages. For now we use
4282
# _find_revision_outside_set, but if we had a Search with cut_revs, we
4283
# could use that instead.
4284
start_rev_id = self.from_repository._find_revision_outside_set(
4286
start_rev_key = (start_rev_id,)
4287
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]
4288
if start_rev_id != _mod_revision.NULL_REVISION:
4289
inv_keys_to_fetch.append((start_rev_id,))
4290
# Any repo that supports chk_bytes must also support out-of-order
4291
# insertion. At least, that is how we expect it to work
4292
# We use get_record_stream instead of iter_inventories because we want
4293
# to be able to insert the stream as well. We could instead fetch
4294
# allowing deltas, and then iter_inventories, but we don't know whether
4295
# source or target is more 'local' anway.
4296
inv_stream = self.from_repository.inventories.get_record_stream(
4297
inv_keys_to_fetch, 'unordered',
4298
True) # We need them as full-texts so we can find their references
4299
uninteresting_chk_roots = set()
4300
interesting_chk_roots = set()
4301
def filter_inv_stream(inv_stream):
4302
for idx, record in enumerate(inv_stream):
4303
### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
4304
bytes = record.get_bytes_as('fulltext')
4305
chk_inv = inventory.CHKInventory.deserialise(
4306
self.from_repository.chk_bytes, bytes, record.key)
4307
if record.key == start_rev_key:
4308
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())
4309
p_id_map = chk_inv.parent_id_basename_to_file_id
4310
if p_id_map is not None:
4311
uninteresting_chk_roots.add(p_id_map.key())
4314
interesting_chk_roots.add(chk_inv.id_to_entry.key())
4315
p_id_map = chk_inv.parent_id_basename_to_file_id
4316
if p_id_map is not None:
4317
interesting_chk_roots.add(p_id_map.key())
4318
### pb.update('fetch inventory', 0, 2)
4319
yield ('inventories', filter_inv_stream(inv_stream))
4320
# Now that we have worked out all of the interesting root nodes, grab
4321
# all of the interesting pages and insert them
4322
### pb.update('fetch inventory', 1, 2)
4323
interesting = chk_map.iter_interesting_nodes(
4324
self.from_repository.chk_bytes, interesting_chk_roots,
4325
uninteresting_chk_roots)
4326
def to_stream_adapter():
4327
"""Adapt the iter_interesting_nodes result to a single stream.
4329
iter_interesting_nodes returns records as it processes them, along
4330
with keys. However, we only want to return the records themselves.
4332
for record, items in interesting:
4333
if record is not None:
4335
# XXX: We could instead call get_record_stream(records.keys())
4336
# ATM, this will always insert the records as fulltexts, and
4337
# requires that you can hang on to records once you have gone
4338
# on to the next one. Further, it causes the target to
4339
# recompress the data. Testing shows it to be faster than
4340
# requesting the records again, though.
4341
yield ('chk_bytes', to_stream_adapter())
4342
### pb.update('fetch inventory', 2, 2)
4344
def _get_convertable_inventory_stream(self, revision_ids):
4345
# XXX: One of source or target is using chks, and they don't have
4346
# compatible serializations. The StreamSink code expects to be
4347
# able to convert on the target, so we need to put
4348
# bytes-on-the-wire that can be converted
4349
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))
4351
def _stream_invs_as_fulltexts(self, revision_ids):
4439
self.inventory_fetch_order(), delta_closure))
4441
def _get_convertable_inventory_stream(self, revision_ids,
4442
delta_versus_null=False):
4443
# The two formats are sufficiently different that there is no fast
4444
# path, so we need to send just inventorydeltas, which any
4445
# sufficiently modern client can insert into any repository.
4446
# The StreamSink code expects to be able to
4447
# convert on the target, so we need to put bytes-on-the-wire that can
4448
# be converted. That means inventory deltas (if the remote is <1.19,
4449
# RemoteStreamSink will fallback to VFS to insert the deltas).
4450
yield ('inventory-deltas',
4451
self._stream_invs_as_deltas(revision_ids,
4452
delta_versus_null=delta_versus_null))
4454
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4455
"""Return a stream of inventory-deltas for the given rev ids.
4457
:param revision_ids: The list of inventories to transmit
4458
:param delta_versus_null: Don't try to find a minimal delta for this
4459
entry, instead compute the delta versus the NULL_REVISION. This
4460
effectively streams a complete inventory. Used for stuff like
4461
filling in missing parents, etc.
4352
4463
from_repo = self.from_repository
4353
from_serializer = from_repo._format._serializer
4354
4464
revision_keys = [(rev_id,) for rev_id in revision_ids]
4355
4465
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4356
for inv in self.from_repository.iter_inventories(revision_ids):
4357
# XXX: This is a bit hackish, but it works. Basically,
4358
# CHKSerializer 'accidentally' supports
4359
# read/write_inventory_to_string, even though that is never
4360
# the format that is stored on disk. It *does* give us a
4361
# single string representation for an inventory, so live with
4363
# This would be far better if we had a 'serialized inventory
4364
# delta' form. Then we could use 'inventory._make_delta', and
4365
# transmit that. This would both be faster to generate, and
4366
# result in fewer bytes-on-the-wire.
4367
as_bytes = from_serializer.write_inventory_to_string(inv)
4466
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4468
inventories = self.from_repository.iter_inventories(
4469
revision_ids, 'topological')
4470
format = from_repo._format
4471
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4472
inventory_cache = lru_cache.LRUCache(50)
4473
null_inventory = from_repo.revision_tree(
4474
_mod_revision.NULL_REVISION).inventory
4475
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4476
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4477
# repo back into a non-rich-root repo ought to be allowed)
4478
serializer = inventory_delta.InventoryDeltaSerializer(
4479
versioned_root=format.rich_root_data,
4480
tree_references=format.supports_tree_reference)
4481
for inv in inventories:
4368
4482
key = (inv.revision_id,)
4369
4483
parent_keys = parent_map.get(key, ())
4485
if not delta_versus_null and parent_keys:
4486
# The caller did not ask for complete inventories and we have
4487
# some parents that we can delta against. Make a delta against
4488
# each parent so that we can find the smallest.
4489
parent_ids = [parent_key[0] for parent_key in parent_keys]
4490
for parent_id in parent_ids:
4491
if parent_id not in invs_sent_so_far:
4492
# We don't know that the remote side has this basis, so
4495
if parent_id == _mod_revision.NULL_REVISION:
4496
parent_inv = null_inventory
4498
parent_inv = inventory_cache.get(parent_id, None)
4499
if parent_inv is None:
4500
parent_inv = from_repo.get_inventory(parent_id)
4501
candidate_delta = inv._make_delta(parent_inv)
4502
if (delta is None or
4503
len(delta) > len(candidate_delta)):
4504
delta = candidate_delta
4505
basis_id = parent_id
4507
# Either none of the parents ended up being suitable, or we
4508
# were asked to delta against NULL
4509
basis_id = _mod_revision.NULL_REVISION
4510
delta = inv._make_delta(null_inventory)
4511
invs_sent_so_far.add(inv.revision_id)
4512
inventory_cache[inv.revision_id] = inv
4513
delta_serialized = ''.join(
4514
serializer.delta_to_lines(basis_id, key[-1], delta))
4370
4515
yield versionedfile.FulltextContentFactory(
4371
key, parent_keys, None, as_bytes)
4516
key, parent_keys, None, delta_serialized)
4374
4519
def _iter_for_revno(repo, partial_history_cache, stop_index=None,