121
122
self._generate_revision_if_needed()
122
123
self.__heads = graph.HeadsCache(repository.get_graph()).heads
123
self._basis_delta = []
124
# API compatibility, older code that used CommitBuilder did not call
125
# .record_delete(), which means the delta that is computed would not be
126
# valid. Callers that will call record_delete() should call
127
# .will_record_deletes() to indicate that.
128
self._recording_deletes = False
130
def _validate_unicode_text(self, text, context):
131
"""Verify things like commit messages don't have bogus characters."""
133
raise ValueError('Invalid value for %s: %r' % (context, text))
135
def _validate_revprops(self, revprops):
136
for key, value in revprops.iteritems():
137
# We know that the XML serializers do not round trip '\r'
138
# correctly, so refuse to accept them
139
if not isinstance(value, basestring):
140
raise ValueError('revision property (%s) is not a valid'
141
' (unicode) string: %r' % (key, value))
142
self._validate_unicode_text(value,
143
'revision property (%s)' % (key,))
145
125
def commit(self, message):
146
126
"""Make the actual commit.
148
128
:return: The revision id of the recorded revision.
150
self._validate_unicode_text(message, 'commit message')
151
130
rev = _mod_revision.Revision(
152
131
timestamp=self._timestamp,
153
132
timezone=self._timezone,
237
217
"""Get a delta against the basis inventory for ie."""
238
218
if ie.file_id not in basis_inv:
240
result = (None, path, ie.file_id, ie)
241
self._basis_delta.append(result)
220
return (None, path, ie.file_id, ie)
243
221
elif ie != basis_inv[ie.file_id]:
244
222
# common but altered
245
223
# TODO: avoid tis id2path call.
246
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
247
self._basis_delta.append(result)
224
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
250
226
# common, unaltered
253
def get_basis_delta(self):
254
"""Return the complete inventory delta versus the basis inventory.
256
This has been built up with the calls to record_delete and
257
record_entry_contents. The client must have already called
258
will_record_deletes() to indicate that they will be generating a
261
:return: An inventory delta, suitable for use with apply_delta, or
262
Repository.add_inventory_by_delta, etc.
264
if not self._recording_deletes:
265
raise AssertionError("recording deletes not activated.")
266
return self._basis_delta
268
def record_delete(self, path, file_id):
269
"""Record that a delete occured against a basis tree.
271
This is an optional API - when used it adds items to the basis_delta
272
being accumulated by the commit builder. It cannot be called unless the
273
method will_record_deletes() has been called to inform the builder that
274
a delta is being supplied.
276
:param path: The path of the thing deleted.
277
:param file_id: The file id that was deleted.
279
if not self._recording_deletes:
280
raise AssertionError("recording deletes not activated.")
281
delta = (path, None, file_id, None)
282
self._basis_delta.append(delta)
285
def will_record_deletes(self):
286
"""Tell the commit builder that deletes are being notified.
288
This enables the accumulation of an inventory delta; for the resulting
289
commit to be valid, deletes against the basis MUST be recorded via
290
builder.record_delete().
292
self._recording_deletes = True
294
229
def record_entry_contents(self, ie, parent_invs, path, tree,
295
230
content_summary):
296
231
"""Record the content of ie from tree into the commit if needed.
307
242
content - stat, length, exec, sha/link target. This is only
308
243
accessed when the entry has a revision of None - that is when it is
309
244
a candidate to commit.
310
:return: A tuple (change_delta, version_recorded, fs_hash).
311
change_delta is an inventory_delta change for this entry against
312
the basis tree of the commit, or None if no change occured against
245
:return: A tuple (change_delta, version_recorded). change_delta is
246
an inventory_delta change for this entry against the basis tree of
247
the commit, or None if no change occured against the basis tree.
314
248
version_recorded is True if a new version of the entry has been
315
249
recorded. For instance, committing a merge where a file was only
316
250
changed on the other side will return (delta, False).
317
fs_hash is either None, or the hash details for the path (currently
318
a tuple of the contents sha1 and the statvalue returned by
319
tree.get_file_with_stat()).
321
252
if self.new_inventory.root is None:
322
253
if ie.parent_id is not None:
348
279
if ie.revision is not None:
349
280
if not self._versioned_root and path == '':
350
281
# repositories that do not version the root set the root's
351
# revision to the new commit even when no change occurs (more
352
# specifically, they do not record a revision on the root; and
353
# the rev id is assigned to the root during deserialisation -
354
# this masks when a change may have occurred against the basis.
355
# To match this we always issue a delta, because the revision
356
# of the root will always be changing.
282
# revision to the new commit even when no change occurs, and
283
# this masks when a change may have occurred against the basis,
284
# so calculate if one happened.
357
285
if ie.file_id in basis_inv:
358
286
delta = (basis_inv.id2path(ie.file_id), path,
362
290
delta = (None, path, ie.file_id, ie)
363
self._basis_delta.append(delta)
364
return delta, False, None
366
293
# we don't need to commit this, because the caller already
367
294
# determined that an existing revision of this file is
368
# appropriate. If its not being considered for committing then
369
# it and all its parents to the root must be unaltered so
370
# no-change against the basis.
371
if ie.revision == self._new_revision_id:
372
raise AssertionError("Impossible situation, a skipped "
373
"inventory entry (%r) claims to be modified in this "
374
"commit (%r).", (ie, self._new_revision_id))
375
return None, False, None
296
return None, (ie.revision == self._new_revision_id)
376
297
# XXX: Friction: parent_candidates should return a list not a dict
377
298
# so that we don't have to walk the inventories again.
378
299
parent_candiate_entries = ie.parent_candidates(parent_invs)
458
370
ie.text_size = parent_entry.text_size
459
371
ie.text_sha1 = parent_entry.text_sha1
460
372
ie.executable = parent_entry.executable
461
return self._get_delta(ie, basis_inv, path), False, None
373
return self._get_delta(ie, basis_inv, path), False
462
374
elif kind == 'directory':
464
376
# all data is meta here, nothing specific to directory, so
466
378
ie.revision = parent_entry.revision
467
return self._get_delta(ie, basis_inv, path), False, None
379
return self._get_delta(ie, basis_inv, path), False
469
381
self._add_text_to_weave(ie.file_id, lines, heads, None)
470
382
elif kind == 'symlink':
490
402
# unchanged, carry over.
491
403
ie.reference_revision = parent_entry.reference_revision
492
404
ie.revision = parent_entry.revision
493
return self._get_delta(ie, basis_inv, path), False, None
405
return self._get_delta(ie, basis_inv, path), False
494
406
ie.reference_revision = content_summary[3]
496
408
self._add_text_to_weave(ie.file_id, lines, heads, None)
498
410
raise NotImplementedError('unknown kind')
499
411
ie.revision = self._new_revision_id
500
return self._get_delta(ie, basis_inv, path), True, fingerprint
412
return self._get_delta(ie, basis_inv, path), True
502
414
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
503
415
# Note: as we read the content directly from the tree, we know its not
627
def add_fallback_repository(self, repository):
628
"""Add a repository to use for looking up data not held locally.
630
:param repository: A repository.
632
if not self._format.supports_external_lookups:
633
raise errors.UnstackableRepositoryFormat(self._format, self.base)
634
self._check_fallback_repository(repository)
635
self._fallback_repositories.append(repository)
636
self.texts.add_fallback_versioned_files(repository.texts)
637
self.inventories.add_fallback_versioned_files(repository.inventories)
638
self.revisions.add_fallback_versioned_files(repository.revisions)
639
self.signatures.add_fallback_versioned_files(repository.signatures)
640
self._fetch_order = 'topological'
642
def _check_fallback_repository(self, repository):
643
"""Check that this repository can fallback to repository safely.
645
Raise an error if not.
647
:param repository: A repository to fallback to.
649
return InterRepository._assert_same_model(self, repository)
651
527
def add_inventory(self, revision_id, inv, parents):
652
528
"""Add the inventory inv to the repository as revision_id.
671
547
return self._inventory_add_lines(revision_id, parents,
672
548
inv_lines, check_content=False)
674
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
676
"""Add a new inventory expressed as a delta against another revision.
678
:param basis_revision_id: The inventory id the delta was created
679
against. (This does not have to be a direct parent.)
680
:param delta: The inventory delta (see Inventory.apply_delta for
682
:param new_revision_id: The revision id that the inventory is being
684
:param parents: The revision ids of the parents that revision_id is
685
known to have and are in the repository already. These are supplied
686
for repositories that depend on the inventory graph for revision
687
graph access, as well as for those that pun ancestry with delta
690
:returns: (validator, new_inv)
691
The validator(which is a sha1 digest, though what is sha'd is
692
repository format specific) of the serialized inventory, and the
695
if not self.is_in_write_group():
696
raise AssertionError("%r not in write group" % (self,))
697
_mod_revision.check_not_reserved_id(new_revision_id)
698
basis_tree = self.revision_tree(basis_revision_id)
699
basis_tree.lock_read()
701
# Note that this mutates the inventory of basis_tree, which not all
702
# inventory implementations may support: A better idiom would be to
703
# return a new inventory, but as there is no revision tree cache in
704
# repository this is safe for now - RBC 20081013
705
basis_inv = basis_tree.inventory
706
basis_inv.apply_delta(delta)
707
basis_inv.revision_id = new_revision_id
708
return (self.add_inventory(new_revision_id, basis_inv, parents),
713
550
def _inventory_add_lines(self, revision_id, parents, lines,
714
551
check_content=True):
715
552
"""Store lines in inv_vf and return the sha1 of the inventory."""
829
664
# on whether escaping is required.
830
665
self._warn_if_deprecated()
831
666
self._write_group = None
832
# Additional places to query for data.
833
self._fallback_repositories = []
834
# What order should fetch operations request streams in?
835
# The default is unordered as that is the cheapest for an origin to
837
self._fetch_order = 'unordered'
838
# Does this repository use deltas that can be fetched as-deltas ?
839
# (E.g. knits, where the knit deltas can be transplanted intact.
840
# We default to False, which will ensure that enough data to get
841
# a full text out of any fetch stream will be grabbed.
842
self._fetch_uses_deltas = False
843
# Should fetch trigger a reconcile after the fetch? Only needed for
844
# some repository formats that can suffer internal inconsistencies.
845
self._fetch_reconcile = False
846
# An InventoryEntry cache, used during deserialization
847
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
849
668
def __repr__(self):
850
669
return '%s(%r)' % (self.__class__.__name__,
1110
924
not _mod_revision.is_null(revision_id)):
1111
925
self.get_revision(revision_id)
1113
# if there is no specific appropriate InterRepository, this will get
1114
# the InterRepository base class, which raises an
1115
# IncompatibleRepositories when asked to fetch.
1116
927
inter = InterRepository.get(source, self)
1117
return inter.fetch(revision_id=revision_id, pb=pb,
1118
find_ghosts=find_ghosts)
929
return inter.fetch(revision_id=revision_id, pb=pb, find_ghosts=find_ghosts)
930
except NotImplementedError:
931
raise errors.IncompatibleRepositories(source, self)
1120
933
def create_bundle(self, target, base, fileobj, format=None):
1121
934
return serializer.write_bundle(self, target, base, fileobj, format)
1886
1695
parent_map = self.get_parent_map(revision_ids)
1887
1696
return [parent_map.get(r, None) for r in revision_ids]
1889
def get_parent_map(self, revision_ids):
1698
def get_parent_map(self, keys):
1890
1699
"""See graph._StackedParentsProvider.get_parent_map"""
1891
# revisions index works in keys; this just works in revisions
1892
# therefore wrap and unwrap
1895
for revision_id in revision_ids:
1701
for revision_id in keys:
1702
if revision_id is None:
1703
raise ValueError('get_parent_map(None) is not valid')
1896
1704
if revision_id == _mod_revision.NULL_REVISION:
1897
result[revision_id] = ()
1898
elif revision_id is None:
1899
raise ValueError('get_parent_map(None) is not valid')
1901
query_keys.append((revision_id ,))
1902
for ((revision_id,), parent_keys) in \
1903
self.revisions.get_parent_map(query_keys).iteritems():
1905
result[revision_id] = tuple(parent_revid
1906
for (parent_revid,) in parent_keys)
1908
result[revision_id] = (_mod_revision.NULL_REVISION,)
1705
parent_map[revision_id] = ()
1708
parent_id_list = self.get_revision(revision_id).parent_ids
1709
except errors.NoSuchRevision:
1712
if len(parent_id_list) == 0:
1713
parent_ids = (_mod_revision.NULL_REVISION,)
1715
parent_ids = tuple(parent_id_list)
1716
parent_map[revision_id] = parent_ids
1911
1719
def _make_parents_provider(self):
2446
2249
'bzrlib.repofmt.pack_repo',
2447
2250
'RepositoryFormatKnitPack4',
2449
format_registry.register_lazy(
2450
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
2451
'bzrlib.repofmt.pack_repo',
2452
'RepositoryFormatKnitPack5',
2454
format_registry.register_lazy(
2455
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
2456
'bzrlib.repofmt.pack_repo',
2457
'RepositoryFormatKnitPack5RichRoot',
2459
format_registry.register_lazy(
2460
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
2461
'bzrlib.repofmt.pack_repo',
2462
'RepositoryFormatKnitPack5RichRootBroken',
2464
format_registry.register_lazy(
2465
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
2466
'bzrlib.repofmt.pack_repo',
2467
'RepositoryFormatKnitPack6',
2469
format_registry.register_lazy(
2470
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
2471
'bzrlib.repofmt.pack_repo',
2472
'RepositoryFormatKnitPack6RichRoot',
2475
2252
# Development formats.
2476
# 1.7->1.8 go below here
2477
format_registry.register_lazy(
2478
"Bazaar development format 2 (needs bzr.dev from before 1.8)\n",
2479
'bzrlib.repofmt.pack_repo',
2480
'RepositoryFormatPackDevelopment2',
2482
format_registry.register_lazy(
2483
("Bazaar development format 2 with subtree support "
2484
"(needs bzr.dev from before 1.8)\n"),
2485
'bzrlib.repofmt.pack_repo',
2486
'RepositoryFormatPackDevelopment2Subtree',
2254
# development 0 - stub to introduce development versioning scheme.
2255
format_registry.register_lazy(
2256
"Bazaar development format 0 (needs bzr.dev from before 1.3)\n",
2257
'bzrlib.repofmt.pack_repo',
2258
'RepositoryFormatPackDevelopment0',
2260
format_registry.register_lazy(
2261
("Bazaar development format 0 with subtree support "
2262
"(needs bzr.dev from before 1.3)\n"),
2263
'bzrlib.repofmt.pack_repo',
2264
'RepositoryFormatPackDevelopment0Subtree',
2266
# 1.3->1.4 go below here
2490
2269
class InterRepository(InterObject):
2554
2314
# ensure we don't pay silly lookup costs.
2555
2315
searcher = source_graph._make_breadth_first_searcher(revision_ids)
2556
2316
null_set = frozenset([_mod_revision.NULL_REVISION])
2557
searcher_exhausted = False
2561
# Iterate the searcher until we have enough next_revs
2562
while len(next_revs) < self._walk_to_common_revisions_batch_size:
2564
next_revs_part, ghosts_part = searcher.next_with_ghosts()
2565
next_revs.update(next_revs_part)
2566
ghosts.update(ghosts_part)
2567
except StopIteration:
2568
searcher_exhausted = True
2570
# If there are ghosts in the source graph, and the caller asked for
2571
# them, make sure that they are present in the target.
2572
# We don't care about other ghosts as we can't fetch them and
2319
next_revs, ghosts = searcher.next_with_ghosts()
2320
except StopIteration:
2322
if revision_ids.intersection(ghosts):
2323
absent_ids = set(revision_ids.intersection(ghosts))
2324
# If all absent_ids are present in target, no error is needed.
2325
absent_ids.difference_update(
2326
set(target_graph.get_parent_map(absent_ids)))
2328
raise errors.NoSuchRevision(self.source, absent_ids.pop())
2329
# we don't care about other ghosts as we can't fetch them and
2573
2330
# haven't been asked to.
2574
ghosts_to_check = set(revision_ids.intersection(ghosts))
2575
revs_to_get = set(next_revs).union(ghosts_to_check)
2577
have_revs = set(target_graph.get_parent_map(revs_to_get))
2578
# we always have NULL_REVISION present.
2579
have_revs = have_revs.union(null_set)
2580
# Check if the target is missing any ghosts we need.
2581
ghosts_to_check.difference_update(have_revs)
2583
# One of the caller's revision_ids is a ghost in both the
2584
# source and the target.
2585
raise errors.NoSuchRevision(
2586
self.source, ghosts_to_check.pop())
2587
missing_revs.update(next_revs - have_revs)
2588
# Because we may have walked past the original stop point, make
2589
# sure everything is stopped
2590
stop_revs = searcher.find_seen_ancestors(have_revs)
2591
searcher.stop_searching_any(stop_revs)
2592
if searcher_exhausted:
2331
next_revs = set(next_revs)
2332
# we always have NULL_REVISION present.
2333
have_revs = set(target_graph.get_parent_map(next_revs)).union(null_set)
2334
missing_revs.update(next_revs - have_revs)
2335
searcher.stop_searching_any(have_revs)
2594
2336
return searcher.get_result()
2596
2338
@deprecated_method(one_two)
2597
2339
@needs_read_lock
2598
2340
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2637
2379
def _same_model(source, target):
2638
"""True if source and target have the same data representation.
2640
Note: this is always called on the base class; overriding it in a
2641
subclass will have no effect.
2644
InterRepository._assert_same_model(source, target)
2646
except errors.IncompatibleRepositories, e:
2380
"""True if source and target have the same data representation."""
2381
if source.supports_rich_root() != target.supports_rich_root():
2650
def _assert_same_model(source, target):
2651
"""Raise an exception if two repositories do not use the same model.
2653
if source.supports_rich_root() != target.supports_rich_root():
2654
raise errors.IncompatibleRepositories(source, target,
2655
"different rich-root support")
2656
2383
if source._serializer != target._serializer:
2657
raise errors.IncompatibleRepositories(source, target,
2658
"different serializers")
2661
2388
class InterSameDataRepository(InterRepository):
2936
2663
@needs_write_lock
2937
2664
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2938
2665
"""See InterRepository.fetch()."""
2939
if (len(self.source._fallback_repositories) > 0 or
2940
len(self.target._fallback_repositories) > 0):
2941
# The pack layer is not aware of fallback repositories, so when
2942
# fetching from a stacked repository or into a stacked repository
2943
# we use the generic fetch logic which uses the VersionedFiles
2944
# attributes on repository.
2945
from bzrlib.fetch import RepoFetcher
2946
fetcher = RepoFetcher(self.target, self.source, revision_id,
2948
return fetcher.count_copied, fetcher.failed_revisions
2666
from bzrlib.repofmt.pack_repo import Packer
2949
2667
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2950
2668
self.source, self.source._format, self.target, self.target._format)
2951
2669
self.count_copied = 0
2982
2697
raise errors.InstallFailed([revision_id])
2983
2698
if len(revision_ids) == 0:
2985
return self._pack(self.source, self.target, revision_ids)
2987
def _pack(self, source, target, revision_ids):
2988
from bzrlib.repofmt.pack_repo import Packer
2989
target_pack_collection = self._get_target_pack_collection()
2990
packs = source._pack_collection.all_packs()
2991
pack = Packer(target_pack_collection, packs, '.fetch',
2700
packs = self.source._pack_collection.all_packs()
2701
pack = Packer(self.target._pack_collection, packs, '.fetch',
2992
2702
revision_ids).pack()
2993
2703
if pack is not None:
2994
target_pack_collection._save_pack_names()
2995
copied_revs = pack.get_revision_count()
2704
self.target._pack_collection._save_pack_names()
2996
2705
# Trigger an autopack. This may duplicate effort as we've just done
2997
2706
# a pack creation, but for now it is simpler to think about as
2998
2707
# 'upload data, then repack if needed'.
3000
return (copied_revs, [])
2708
self.target._pack_collection.autopack()
2709
return (pack.get_revision_count(), [])
3004
def _autopack(self):
3005
self.target._pack_collection.autopack()
3007
def _get_target_pack_collection(self):
3008
return self.target._pack_collection
3010
2713
@needs_read_lock
3011
2714
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3012
2715
"""See InterRepository.missing_revision_ids().
3017
2720
if not find_ghosts and revision_id is not None:
3018
2721
return self._walk_to_common_revisions([revision_id])
3019
2722
elif revision_id is not None:
3020
# Find ghosts: search for revisions pointing from one repository to
3021
# the other, and vice versa, anywhere in the history of revision_id.
3022
graph = self.target_get_graph(other_repository=self.source)
3023
searcher = graph._make_breadth_first_searcher([revision_id])
3027
next_revs, ghosts = searcher.next_with_ghosts()
3028
except StopIteration:
3030
if revision_id in ghosts:
3031
raise errors.NoSuchRevision(self.source, revision_id)
3032
found_ids.update(next_revs)
3033
found_ids.update(ghosts)
3034
found_ids = frozenset(found_ids)
3035
# Double query here: should be able to avoid this by changing the
3036
# graph api further.
3037
result_set = found_ids - frozenset(
3038
self.target_get_parent_map(found_ids))
2723
source_ids = self.source.get_ancestry(revision_id)
2724
if source_ids[0] is not None:
2725
raise AssertionError()
3040
2728
source_ids = self.source.all_revision_ids()
3041
# source_ids is the worst possible case we may need to pull.
3042
# now we want to filter source_ids against what we actually
3043
# have in target, but don't try to check for existence where we know
3044
# we do not have a revision as that would be pointless.
3045
target_ids = set(self.target.all_revision_ids())
3046
result_set = set(source_ids).difference(target_ids)
2729
# source_ids is the worst possible case we may need to pull.
2730
# now we want to filter source_ids against what we actually
2731
# have in target, but don't try to check for existence where we know
2732
# we do not have a revision as that would be pointless.
2733
target_ids = set(self.target.all_revision_ids())
2734
result_set = set(source_ids).difference(target_ids)
3047
2735
return self.source.revision_ids_to_search_result(result_set)
3101
2789
def is_compatible(source, target):
3102
2790
"""Be compatible with Knit1 source and Knit3 target"""
2791
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
3104
from bzrlib.repofmt.knitrepo import (
2793
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2794
RepositoryFormatKnit3)
2795
from bzrlib.repofmt.pack_repo import (
2796
RepositoryFormatKnitPack1,
2797
RepositoryFormatKnitPack3,
2798
RepositoryFormatPackDevelopment0,
2799
RepositoryFormatPackDevelopment0Subtree,
3105
2802
RepositoryFormatKnit1,
2803
RepositoryFormatKnitPack1,
2804
RepositoryFormatPackDevelopment0,
3106
2807
RepositoryFormatKnit3,
3108
from bzrlib.repofmt.pack_repo import (
3109
RepositoryFormatKnitPack1,
3110
2808
RepositoryFormatKnitPack3,
3111
RepositoryFormatKnitPack4,
3112
RepositoryFormatKnitPack5,
3113
RepositoryFormatKnitPack5RichRoot,
3114
RepositoryFormatKnitPack6,
3115
RepositoryFormatKnitPack6RichRoot,
3116
RepositoryFormatPackDevelopment2,
3117
RepositoryFormatPackDevelopment2Subtree,
3120
RepositoryFormatKnit1, # no rr, no subtree
3121
RepositoryFormatKnitPack1, # no rr, no subtree
3122
RepositoryFormatPackDevelopment2, # no rr, no subtree
3123
RepositoryFormatKnitPack5, # no rr, no subtree
3124
RepositoryFormatKnitPack6, # no rr, no subtree
3127
RepositoryFormatKnit3, # rr, subtree
3128
RepositoryFormatKnitPack3, # rr, subtree
3129
RepositoryFormatKnitPack4, # rr, no subtree
3130
RepositoryFormatKnitPack5RichRoot,# rr, no subtree
3131
RepositoryFormatKnitPack6RichRoot,# rr, no subtree
3132
RepositoryFormatPackDevelopment2Subtree, # rr, subtree
3134
for format in norichroot:
3135
if format.rich_root_data:
3136
raise AssertionError('Format %s is a rich-root format'
3137
' but is included in the non-rich-root list'
3139
for format in richroot:
3140
if not format.rich_root_data:
3141
raise AssertionError('Format %s is not a rich-root format'
3142
' but is included in the rich-root list'
3144
# TODO: One alternative is to just check format.rich_root_data,
3145
# instead of keeping membership lists. However, the formats
3146
# *also* have to use the same 'Knit' style of storage
3147
# (line-deltas, fulltexts, etc.)
3148
return (isinstance(source._format, norichroot) and
3149
isinstance(target._format, richroot))
2809
RepositoryFormatPackDevelopment0Subtree,
2811
return (isinstance(source._format, nosubtrees) and
2812
isinstance(target._format, subtrees))
3150
2813
except AttributeError:
3185
def _fetch_batch(self, revision_ids, basis_id, basis_tree):
3186
"""Fetch across a few revisions.
3188
:param revision_ids: The revisions to copy
3189
:param basis_id: The revision_id of basis_tree
3190
:param basis_tree: A tree that is not in revision_ids which should
3191
already exist in the target.
3192
:return: (basis_id, basis_tree) A new basis to use now that these trees
3195
# Walk though all revisions; get inventory deltas, copy referenced
3196
# texts that delta references, insert the delta, revision and
3200
pending_revisions = []
3201
for tree in self.source.revision_trees(revision_ids):
3202
current_revision_id = tree.get_revision_id()
3203
delta = tree.inventory._make_delta(basis_tree.inventory)
3204
for old_path, new_path, file_id, entry in delta:
3205
if new_path is not None:
3206
if not (new_path or self.target.supports_rich_root()):
3207
# We leave the inventory delta in, because that
3208
# will have the deserialised inventory root
3212
# "if entry.revision == current_revision_id" ?
3213
if entry.revision == current_revision_id:
3214
text_keys.add((file_id, entry.revision))
3215
revision = self.source.get_revision(current_revision_id)
3216
pending_deltas.append((basis_id, delta,
3217
current_revision_id, revision.parent_ids))
3218
pending_revisions.append(revision)
3219
basis_id = current_revision_id
3222
from_texts = self.source.texts
3223
to_texts = self.target.texts
3224
to_texts.insert_record_stream(from_texts.get_record_stream(
3225
text_keys, self.target._fetch_order,
3226
not self.target._fetch_uses_deltas))
3228
for delta in pending_deltas:
3229
self.target.add_inventory_by_delta(*delta)
3230
# insert signatures and revisions
3231
for revision in pending_revisions:
3233
signature = self.source.get_signature_text(
3234
revision.revision_id)
3235
self.target.add_signature_text(revision.revision_id,
3237
except errors.NoSuchRevision:
3239
self.target.add_revision(revision.revision_id, revision)
3240
return basis_id, basis_tree
3242
def _fetch_all_revisions(self, revision_ids, pb):
3243
"""Fetch everything for the list of revisions.
3245
:param revision_ids: The list of revisions to fetch. Must be in
3247
:param pb: A ProgressBar
3250
basis_id, basis_tree = self._get_basis(revision_ids[0])
3252
for offset in range(0, len(revision_ids), batch_size):
3253
self.target.start_write_group()
3255
pb.update('Transferring revisions', offset,
3257
batch = revision_ids[offset:offset+batch_size]
3258
basis_id, basis_tree = self._fetch_batch(batch,
3259
basis_id, basis_tree)
3261
self.target.abort_write_group()
3264
self.target.commit_write_group()
3265
pb.update('Transferring revisions', len(revision_ids),
3268
2848
@needs_write_lock
3269
2849
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3270
2850
"""See InterRepository.fetch()."""
3271
2851
revision_ids = self.target.search_missing_revision_ids(self.source,
3272
2852
revision_id, find_ghosts=find_ghosts).get_keys()
3273
if not revision_ids:
3275
2853
revision_ids = tsort.topo_sort(
3276
2854
self.source.get_graph().get_parent_map(revision_ids))
2855
def revisions_iterator():
2856
for current_revision_id in revision_ids:
2857
revision = self.source.get_revision(current_revision_id)
2858
tree = self.source.revision_tree(current_revision_id)
2860
signature = self.source.get_signature_text(
2861
current_revision_id)
2862
except errors.NoSuchRevision:
2864
yield revision, tree, signature
3278
2866
my_pb = ui.ui_factory.nested_progress_bar()
3283
self._fetch_all_revisions(revision_ids, pb)
2871
install_revisions(self.target, revisions_iterator(),
2872
len(revision_ids), pb)
3285
2874
if my_pb is not None:
3286
2875
my_pb.finished()
3287
2876
return len(revision_ids), 0
3289
def _get_basis(self, first_revision_id):
3290
"""Get a revision and tree which exists in the target.
3292
This assumes that first_revision_id is selected for transmission
3293
because all other ancestors are already present. If we can't find an
3294
ancestor we fall back to NULL_REVISION since we know that is safe.
3296
:return: (basis_id, basis_tree)
3298
first_rev = self.source.get_revision(first_revision_id)
3300
basis_id = first_rev.parent_ids[0]
3301
# only valid as a basis if the target has it
3302
self.target.get_revision(basis_id)
3303
# Try to get a basis tree - if its a ghost it will hit the
3304
# NoSuchRevision case.
3305
basis_tree = self.source.revision_tree(basis_id)
3306
except (IndexError, errors.NoSuchRevision):
3307
basis_id = _mod_revision.NULL_REVISION
3308
basis_tree = self.source.revision_tree(basis_id)
3309
return basis_id, basis_tree
3312
2879
class InterOtherToRemote(InterRepository):
3313
"""An InterRepository that simply delegates to the 'real' InterRepository
3314
calculated for (source, target._real_repository).
3317
_walk_to_common_revisions_batch_size = 50
3319
2881
def __init__(self, source, target):
3320
2882
InterRepository.__init__(self, source, target)
3331
2893
self.target._ensure_real()
3332
2894
real_target = self.target._real_repository
3333
2895
self._real_inter = InterRepository.get(self.source, real_target)
3334
# Make _real_inter use the RemoteRepository for get_parent_map
3335
self._real_inter.target_get_graph = self.target.get_graph
3336
self._real_inter.target_get_parent_map = self.target.get_parent_map
3338
def copy_content(self, revision_id=None):
3339
self._ensure_real_inter()
3340
self._real_inter.copy_content(revision_id=revision_id)
3342
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3343
self._ensure_real_inter()
3344
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3345
find_ghosts=find_ghosts)
3348
def _get_repo_format_to_test(self):
3352
class InterRemoteToOther(InterRepository):
3354
def __init__(self, source, target):
3355
InterRepository.__init__(self, source, target)
3356
self._real_inter = None
3359
def is_compatible(source, target):
3360
if not isinstance(source, remote.RemoteRepository):
3362
# Is source's model compatible with target's model?
3363
source._ensure_real()
3364
real_source = source._real_repository
3365
if isinstance(real_source, remote.RemoteRepository):
3366
raise NotImplementedError(
3367
"We don't support remote repos backed by remote repos yet.")
3368
return InterRepository._same_model(real_source, target)
3370
def _ensure_real_inter(self):
3371
if self._real_inter is None:
3372
self.source._ensure_real()
3373
real_source = self.source._real_repository
3374
self._real_inter = InterRepository.get(real_source, self.target)
3376
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3377
self._ensure_real_inter()
3378
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3379
find_ghosts=find_ghosts)
3381
def copy_content(self, revision_id=None):
3382
self._ensure_real_inter()
3383
self._real_inter.copy_content(revision_id=revision_id)
3386
def _get_repo_format_to_test(self):
3391
class InterPackToRemotePack(InterPackRepo):
3392
"""A specialisation of InterPackRepo for a target that is a
3395
This will use the get_parent_map RPC rather than plain readvs, and also
3396
uses an RPC for autopacking.
3399
_walk_to_common_revisions_batch_size = 50
3402
def is_compatible(source, target):
3403
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3404
if isinstance(source._format, RepositoryFormatPack):
3405
if isinstance(target, remote.RemoteRepository):
3406
target._ensure_real()
3407
if isinstance(target._real_repository._format,
3408
RepositoryFormatPack):
3409
if InterRepository._same_model(source, target):
3413
def _autopack(self):
3414
self.target.autopack()
3416
def _get_target_pack_collection(self):
3417
return self.target._real_repository._pack_collection
2897
def copy_content(self, revision_id=None):
2898
self._ensure_real_inter()
2899
self._real_inter.copy_content(revision_id=revision_id)
2901
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2902
self._ensure_real_inter()
2903
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
2904
find_ghosts=find_ghosts)
3420
2907
def _get_repo_format_to_test(self):