20
21
that has merged into it. As the first step of a merge, pull, or
21
22
branch operation we copy history from the source into the destination
25
The copying is done in a slightly complicated order. We don't want to
26
add a revision to the store until everything it refers to is also
27
stored, so that if a revision is present we can totally recreate it.
28
However, we can't know what files are included in a revision until we
29
read its inventory. So we query the inventory store of the source for
30
the ids we need, and then pull those ids and finally actually join
25
from __future__ import absolute_import
35
import bzrlib.errors as errors
36
from bzrlib.errors import (InstallFailed,
38
from bzrlib.progress import ProgressPhase
39
from bzrlib.revision import is_null, NULL_REVISION
40
from bzrlib.symbol_versioning import (deprecated_function,
43
from bzrlib.trace import mutter
29
46
from bzrlib.lazy_import import lazy_import
30
lazy_import(globals(), """
41
from bzrlib.i18n import gettext
42
from bzrlib.revision import NULL_REVISION
43
from bzrlib.trace import mutter
48
# TODO: Avoid repeatedly opening weaves so many times.
50
# XXX: This doesn't handle ghost (not present in branch) revisions at
51
# all yet. I'm not sure they really should be supported.
53
# NOTE: This doesn't copy revisions which may be present but not
54
# merged into the last revision. I'm not sure we want to do that.
56
# - get a list of revisions that need to be pulled in
57
# - for each one, pull in that revision file
58
# and get the inventory, and store the inventory with right
60
# - and get the ancestry, and store that with right parents too
61
# - and keep a note of all file ids and version seen
62
# - then go through all files; for each one get the weave,
63
# and add in all file versions
46
66
class RepoFetcher(object):
47
67
"""Pull revisions and texts from one repository to another.
70
if set, try to limit to the data this revision references.
73
count_copied -- number of revisions copied
49
75
This should not be used directly, it's essential a object to encapsulate
50
76
the logic in InterRepository.fetch().
53
def __init__(self, to_repository, from_repository, last_revision=None,
54
find_ghosts=True, fetch_spec=None):
55
"""Create a repo fetcher.
57
:param last_revision: If set, try to limit to the data this revision
59
:param fetch_spec: A SearchResult specifying which revisions to fetch.
60
If set, this overrides last_revision.
61
:param find_ghosts: If True search the entire history for ghosts.
63
# repository.fetch has the responsibility for short-circuiting
64
# attempts to copy between a repository and itself.
78
def __init__(self, to_repository, from_repository, last_revision=None, pb=None):
80
self.failed_revisions = []
82
if to_repository.has_same_location(from_repository):
83
# repository.fetch should be taking care of this case.
84
raise errors.BzrError('RepoFetcher run '
85
'between two objects at the same location: '
86
'%r and %r' % (to_repository, from_repository))
65
87
self.to_repository = to_repository
66
88
self.from_repository = from_repository
67
self.sink = to_repository._get_sink()
68
89
# must not mutate self._last_revision as its potentially a shared instance
69
90
self._last_revision = last_revision
70
self._fetch_spec = fetch_spec
71
self.find_ghosts = find_ghosts
92
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
93
self.nested_pb = self.pb
72
97
self.from_repository.lock_read()
73
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
74
self.from_repository, self.from_repository._format,
75
self.to_repository, self.to_repository._format)
99
self.to_repository.lock_write()
101
self.to_repository.start_write_group()
105
self.to_repository.abort_write_group()
108
self.to_repository.commit_write_group()
110
if self.nested_pb is not None:
111
self.nested_pb.finished()
112
self.to_repository.unlock()
79
114
self.from_repository.unlock()
81
116
def __fetch(self):
82
117
"""Primary worker function.
84
This initialises all the needed variables, and then fetches the
119
This initialises all the needed variables, and then fetches the
85
120
requested revisions, finally clearing the progress bar.
87
# Roughly this is what we're aiming for fetch to become:
89
# missing = self.sink.insert_stream(self.source.get_stream(search))
91
# missing = self.sink.insert_stream(self.source.get_items(missing))
122
self.to_weaves = self.to_repository.weave_store
123
self.from_weaves = self.from_repository.weave_store
93
124
self.count_total = 0
94
125
self.file_ids_names = {}
95
pb = ui.ui_factory.nested_progress_bar()
96
pb.show_pct = pb.show_count = False
126
pp = ProgressPhase('Fetch phase', 4, self.pb)
98
pb.update(gettext("Finding revisions"), 0, 2)
99
search_result = self._revids_to_fetch()
100
mutter('fetching: %s', search_result)
101
if search_result.is_empty():
129
revs = self._revids_to_fetch()
103
pb.update(gettext("Fetching revisions"), 1, 2)
104
self._fetch_everything_for_search(search_result)
132
self._fetch_everything_for_revisions(revs, pp)
108
def _fetch_everything_for_search(self, search):
136
def _fetch_everything_for_revisions(self, revs, pp):
109
137
"""Fetch all data for the given set of revisions."""
110
138
# The first phase is "file". We pass the progress bar for it directly
111
139
# into item_keys_introduced_by, which has more information about how
115
143
# item_keys_introduced_by should have a richer API than it does at the
116
144
# moment, so that it can feed the progress information back to this
118
if (self.from_repository._format.rich_root_data and
119
not self.to_repository._format.rich_root_data):
120
raise errors.IncompatibleRepositories(
121
self.from_repository, self.to_repository,
122
"different rich-root support")
123
pb = ui.ui_factory.nested_progress_bar()
147
pb = bzrlib.ui.ui_factory.nested_progress_bar()
125
pb.update("Get stream source")
126
source = self.from_repository._get_source(
127
self.to_repository._format)
128
stream = source.get_stream(search)
129
from_format = self.from_repository._format
130
pb.update("Inserting stream")
131
resume_tokens, missing_keys = self.sink.insert_stream(
132
stream, from_format, [])
134
pb.update("Missing keys")
135
stream = source.get_stream_for_missing_keys(missing_keys)
136
pb.update("Inserting missing keys")
137
resume_tokens, missing_keys = self.sink.insert_stream(
138
stream, from_format, resume_tokens)
140
raise AssertionError(
141
"second push failed to complete a fetch %r." % (
144
raise AssertionError(
145
"second push failed to commit the fetch %r." % (
147
pb.update("Finishing stream")
149
data_to_fetch = self.from_repository.item_keys_introduced_by(revs, pb)
150
for knit_kind, file_id, revisions in data_to_fetch:
151
if knit_kind != phase:
153
# Make a new progress bar for this phase
156
pb = bzrlib.ui.ui_factory.nested_progress_bar()
157
if knit_kind == "file":
158
self._fetch_weave_text(file_id, revisions)
159
elif knit_kind == "inventory":
161
# Once we've processed all the files, then we generate the root
162
# texts (if necessary), then we process the inventory. It's a
163
# bit distasteful to have knit_kind == "inventory" mean this,
164
# perhaps it should happen on the first non-"file" knit, in case
165
# it's not always inventory?
166
self._generate_root_texts(revs)
167
self._fetch_inventory_weave(revs, pb)
168
elif knit_kind == "signatures":
169
# Nothing to do here; this will be taken care of when
170
# _fetch_revision_texts happens.
172
elif knit_kind == "revisions":
173
self._fetch_revision_texts(revs, pb)
175
raise AssertionError("Unknown knit kind %r" % knit_kind)
179
self.count_copied += len(revs)
152
181
def _revids_to_fetch(self):
153
182
"""Determines the exact revisions needed from self.from_repository to
154
183
install self._last_revision in self.to_repository.
156
:returns: A SearchResult of some sort. (Possibly a
157
PendingAncestryResult, EmptySearchResult, etc.)
185
If no revisions need to be fetched, then this just returns None.
159
if self._fetch_spec is not None:
160
# The fetch spec is already a concrete search result.
161
return self._fetch_spec
162
elif self._last_revision == NULL_REVISION:
163
# fetch_spec is None + last_revision is null => empty fetch.
187
mutter('fetch up to rev {%s}', self._last_revision)
188
if self._last_revision is NULL_REVISION:
164
189
# explicit limit of no revisions needed
165
return vf_search.EmptySearchResult()
166
elif self._last_revision is not None:
167
return vf_search.NotInOtherForRevs(self.to_repository,
168
self.from_repository, [self._last_revision],
169
find_ghosts=self.find_ghosts).execute()
170
else: # self._last_revision is None:
171
return vf_search.EverythingNotInOther(self.to_repository,
172
self.from_repository,
173
find_ghosts=self.find_ghosts).execute()
191
if (self._last_revision is not None and
192
self.to_repository.has_revision(self._last_revision)):
196
# XXX: this gets the full graph on both sides, and will make sure
197
# that ghosts are filled whether or not you care about them.
198
return self.to_repository.missing_revision_ids(self.from_repository,
200
except errors.NoSuchRevision:
201
raise InstallFailed([self._last_revision])
203
def _fetch_weave_text(self, file_id, required_versions):
204
to_weave = self.to_weaves.get_weave_or_empty(file_id,
205
self.to_repository.get_transaction())
206
from_weave = self.from_weaves.get_weave(file_id,
207
self.from_repository.get_transaction())
208
# we fetch all the texts, because texts do
209
# not reference anything, and its cheap enough
210
to_weave.join(from_weave, version_ids=required_versions)
211
# we don't need *all* of this data anymore, but we dont know
212
# what we do. This cache clearing will result in a new read
213
# of the knit data when we do the checkout, but probably we
214
# want to emit the needed data on the fly rather than at the
216
# the from weave should know not to cache data being joined,
217
# but its ok to ask it to clear.
218
from_weave.clear_cache()
219
to_weave.clear_cache()
221
def _fetch_inventory_weave(self, revs, pb):
222
pb.update("fetch inventory", 0, 2)
223
to_weave = self.to_repository.get_inventory_weave()
224
child_pb = bzrlib.ui.ui_factory.nested_progress_bar()
226
# just merge, this is optimisable and its means we don't
227
# copy unreferenced data such as not-needed inventories.
228
pb.update("fetch inventory", 1, 3)
229
from_weave = self.from_repository.get_inventory_weave()
230
pb.update("fetch inventory", 2, 3)
231
# we fetch only the referenced inventories because we do not
232
# know for unselected inventories whether all their required
233
# texts are present in the other repository - it could be
235
to_weave.join(from_weave, pb=child_pb, msg='merge inventory',
237
from_weave.clear_cache()
241
def _generate_root_texts(self, revs):
242
"""This will be called by __fetch between fetching weave texts and
243
fetching the inventory weave.
245
Subclasses should override this if they need to generate root texts
246
after fetching weave texts.
251
class GenericRepoFetcher(RepoFetcher):
252
"""This is a generic repo to repo fetcher.
254
This makes minimal assumptions about repo layout and contents.
255
It triggers a reconciliation after fetching to ensure integrity.
258
def _fetch_revision_texts(self, revs, pb):
259
"""Fetch revision object texts"""
260
to_txn = self.to_transaction = self.to_repository.get_transaction()
263
to_store = self.to_repository._revision_store
265
pb.update('copying revisions', count, total)
267
sig_text = self.from_repository.get_signature_text(rev)
268
to_store.add_revision_signature_text(rev, sig_text, to_txn)
269
except errors.NoSuchRevision:
272
to_store.add_revision(self.from_repository.get_revision(rev),
275
# fixup inventory if needed:
276
# this is expensive because we have no inverse index to current ghosts.
277
# but on local disk its a few seconds and sftp push is already insane.
279
# FIXME: repository should inform if this is needed.
280
self.to_repository.reconcile()
283
class KnitRepoFetcher(RepoFetcher):
284
"""This is a knit format repository specific fetcher.
286
This differs from the GenericRepoFetcher by not doing a
287
reconciliation after copying, and using knit joining to
291
def _fetch_revision_texts(self, revs, pb):
292
# may need to be a InterRevisionStore call here.
293
from_transaction = self.from_repository.get_transaction()
294
to_transaction = self.to_repository.get_transaction()
295
to_sf = self.to_repository._revision_store.get_signature_file(
297
from_sf = self.from_repository._revision_store.get_signature_file(
299
to_sf.join(from_sf, version_ids=revs, ignore_missing=True)
300
to_rf = self.to_repository._revision_store.get_revision_file(
302
from_rf = self.from_repository._revision_store.get_revision_file(
304
to_rf.join(from_rf, version_ids=revs)
176
307
class Inter1and2Helper(object):
177
308
"""Helper for operations that convert data from model 1 and 2
179
310
This is for use by fetchers and converters.
182
# This is a class variable so that the test suite can override it.
183
known_graph_threshold = 100
185
def __init__(self, source):
313
def __init__(self, source, target):
188
316
:param source: The repository data comes from
317
:param target: The repository data goes to
190
319
self.source = source
192
322
def iter_rev_trees(self, revs):
193
323
"""Iterate through RevisionTrees efficiently.
200
330
:param revs: A list of revision ids
202
# In case that revs is not a list.
205
333
for tree in self.source.revision_trees(revs[:100]):
206
if tree.root_inventory.revision_id is None:
207
tree.root_inventory.revision_id = tree.get_revision_id()
334
if tree.inventory.revision_id is None:
335
tree.inventory.revision_id = tree.get_revision_id()
209
337
revs = revs[100:]
211
def _find_root_ids(self, revs, parent_map, graph):
213
for tree in self.iter_rev_trees(revs):
214
root_id = tree.get_root_id()
215
revision_id = tree.get_file_revision(root_id, u"")
216
revision_root[revision_id] = root_id
217
# Find out which parents we don't already know root ids for
219
for revision_parents in parent_map.itervalues():
220
parents.update(revision_parents)
221
parents.difference_update(revision_root.keys() + [NULL_REVISION])
222
# Limit to revisions present in the versionedfile
223
parents = graph.get_parent_map(parents).keys()
224
for tree in self.iter_rev_trees(parents):
225
root_id = tree.get_root_id()
226
revision_root[tree.get_revision_id()] = root_id
229
339
def generate_root_texts(self, revs):
230
340
"""Generate VersionedFiles for all root ids.
232
342
:param revs: the revisions to include
234
graph = self.source.get_graph()
235
parent_map = graph.get_parent_map(revs)
236
rev_order = tsort.topo_sort(parent_map)
237
rev_id_to_root_id = self._find_root_ids(revs, parent_map, graph)
238
root_id_order = [(rev_id_to_root_id[rev_id], rev_id) for rev_id in
240
# Guaranteed stable, this groups all the file id operations together
241
# retaining topological order within the revisions of a file id.
242
# File id splits and joins would invalidate this, but they don't exist
243
# yet, and are unlikely to in non-rich-root environments anyway.
244
root_id_order.sort(key=operator.itemgetter(0))
245
# Create a record stream containing the roots to create.
246
if len(revs) > self.known_graph_threshold:
247
graph = self.source.get_known_graph_ancestry(revs)
248
new_roots_stream = _new_root_data_stream(
249
root_id_order, rev_id_to_root_id, parent_map, self.source, graph)
250
return [('texts', new_roots_stream)]
253
def _new_root_data_stream(
254
root_keys_to_create, rev_id_to_root_id_map, parent_map, repo, graph=None):
255
"""Generate a texts substream of synthesised root entries.
257
Used in fetches that do rich-root upgrades.
259
:param root_keys_to_create: iterable of (root_id, rev_id) pairs describing
260
the root entries to create.
261
:param rev_id_to_root_id_map: dict of known rev_id -> root_id mappings for
262
calculating the parents. If a parent rev_id is not found here then it
263
will be recalculated.
264
:param parent_map: a parent map for all the revisions in
266
:param graph: a graph to use instead of repo.get_graph().
268
for root_key in root_keys_to_create:
269
root_id, rev_id = root_key
270
parent_keys = _parent_keys_for_root_version(
271
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph)
272
yield versionedfile.FulltextContentFactory(
273
root_key, parent_keys, None, '')
276
def _parent_keys_for_root_version(
277
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph=None):
278
"""Get the parent keys for a given root id.
280
A helper function for _new_root_data_stream.
282
# Include direct parents of the revision, but only if they used the same
283
# root_id and are heads.
284
rev_parents = parent_map[rev_id]
286
for parent_id in rev_parents:
287
if parent_id == NULL_REVISION:
289
if parent_id not in rev_id_to_root_id_map:
290
# We probably didn't read this revision, go spend the extra effort
293
tree = repo.revision_tree(parent_id)
294
except errors.NoSuchRevision:
295
# Ghost, fill out rev_id_to_root_id in case we encounter this
297
# But set parent_root_id to None since we don't really know
298
parent_root_id = None
300
parent_root_id = tree.get_root_id()
301
rev_id_to_root_id_map[parent_id] = None
303
# rev_id_to_root_id_map[parent_id] = parent_root_id
304
# memory consumption maybe?
306
parent_root_id = rev_id_to_root_id_map[parent_id]
307
if root_id == parent_root_id:
308
# With stacking we _might_ want to refer to a non-local revision,
309
# but this code path only applies when we have the full content
310
# available, so ghosts really are ghosts, not just the edge of
312
parent_ids.append(parent_id)
314
# root_id may be in the parent anyway.
316
tree = repo.revision_tree(parent_id)
317
except errors.NoSuchRevision:
318
# ghost, can't refer to it.
322
parent_ids.append(tree.get_file_revision(root_id))
323
except errors.NoSuchId:
326
# Drop non-head parents
328
graph = repo.get_graph()
329
heads = graph.heads(parent_ids)
331
for parent_id in parent_ids:
332
if parent_id in heads and parent_id not in selected_ids:
333
selected_ids.append(parent_id)
334
parent_keys = [(root_id, parent_id) for parent_id in selected_ids]
338
class TargetRepoKinds(object):
339
"""An enum-like set of constants.
341
They are the possible values of FetchSpecFactory.target_repo_kinds.
344
PREEXISTING = 'preexisting'
349
class FetchSpecFactory(object):
350
"""A helper for building the best fetch spec for a sprout call.
352
Factors that go into determining the sort of fetch to perform:
353
* did the caller specify any revision IDs?
354
* did the caller specify a source branch (need to fetch its
355
heads_to_fetch(), usually the tip + tags)
356
* is there an existing target repo (don't need to refetch revs it
358
* target is stacked? (similar to pre-existing target repo: even if
359
the target itself is new don't want to refetch existing revs)
361
:ivar source_branch: the source branch if one specified, else None.
362
:ivar source_branch_stop_revision_id: fetch up to this revision of
363
source_branch, rather than its tip.
364
:ivar source_repo: the source repository if one found, else None.
365
:ivar target_repo: the target repository acquired by sprout.
366
:ivar target_repo_kind: one of the TargetRepoKinds constants.
370
self._explicit_rev_ids = set()
371
self.source_branch = None
372
self.source_branch_stop_revision_id = None
373
self.source_repo = None
374
self.target_repo = None
375
self.target_repo_kind = None
378
def add_revision_ids(self, revision_ids):
379
"""Add revision_ids to the set of revision_ids to be fetched."""
380
self._explicit_rev_ids.update(revision_ids)
382
def make_fetch_spec(self):
383
"""Build a SearchResult or PendingAncestryResult or etc."""
384
if self.target_repo_kind is None or self.source_repo is None:
385
raise AssertionError(
386
'Incomplete FetchSpecFactory: %r' % (self.__dict__,))
387
if len(self._explicit_rev_ids) == 0 and self.source_branch is None:
388
if self.limit is not None:
389
raise NotImplementedError(
390
"limit is only supported with a source branch set")
391
# Caller hasn't specified any revisions or source branch
392
if self.target_repo_kind == TargetRepoKinds.EMPTY:
393
return vf_search.EverythingResult(self.source_repo)
395
# We want everything not already in the target (or target's
397
return vf_search.EverythingNotInOther(
398
self.target_repo, self.source_repo).execute()
399
heads_to_fetch = set(self._explicit_rev_ids)
400
if self.source_branch is not None:
401
must_fetch, if_present_fetch = self.source_branch.heads_to_fetch()
402
if self.source_branch_stop_revision_id is not None:
403
# Replace the tip rev from must_fetch with the stop revision
404
# XXX: this might be wrong if the tip rev is also in the
405
# must_fetch set for other reasons (e.g. it's the tip of
406
# multiple loom threads?), but then it's pretty unclear what it
407
# should mean to specify a stop_revision in that case anyway.
408
must_fetch.discard(self.source_branch.last_revision())
409
must_fetch.add(self.source_branch_stop_revision_id)
410
heads_to_fetch.update(must_fetch)
412
if_present_fetch = set()
413
if self.target_repo_kind == TargetRepoKinds.EMPTY:
414
# PendingAncestryResult does not raise errors if a requested head
415
# is absent. Ideally it would support the
416
# required_ids/if_present_ids distinction, but in practice
417
# heads_to_fetch will almost certainly be present so this doesn't
419
all_heads = heads_to_fetch.union(if_present_fetch)
420
ret = vf_search.PendingAncestryResult(all_heads, self.source_repo)
421
if self.limit is not None:
422
graph = self.source_repo.get_graph()
423
topo_order = list(graph.iter_topo_order(ret.get_keys()))
424
result_set = topo_order[:self.limit]
425
ret = self.source_repo.revision_ids_to_search_result(result_set)
428
return vf_search.NotInOtherForRevs(self.target_repo, self.source_repo,
429
required_ids=heads_to_fetch, if_present_ids=if_present_fetch,
430
limit=self.limit).execute()
344
inventory_weave = self.source.get_inventory_weave()
347
to_store = self.target.weave_store
348
for tree in self.iter_rev_trees(revs):
349
revision_id = tree.inventory.root.revision
350
root_id = tree.inventory.root.file_id
351
parents = inventory_weave.get_parents(revision_id)
352
if root_id not in versionedfile:
353
versionedfile[root_id] = to_store.get_weave_or_empty(root_id,
354
self.target.get_transaction())
355
_, _, parent_texts[root_id] = versionedfile[root_id].add_lines(
356
revision_id, parents, [], parent_texts)
358
def regenerate_inventory(self, revs):
359
"""Generate a new inventory versionedfile in target, convertin data.
361
The inventory is retrieved from the source, (deserializing it), and
362
stored in the target (reserializing it in a different format).
363
:param revs: The revisions to include
365
inventory_weave = self.source.get_inventory_weave()
366
for tree in self.iter_rev_trees(revs):
367
parents = inventory_weave.get_parents(tree.get_revision_id())
368
self.target.add_inventory(tree.get_revision_id(), tree.inventory,
372
class Model1toKnit2Fetcher(GenericRepoFetcher):
373
"""Fetch from a Model1 repository into a Knit2 repository
375
def __init__(self, to_repository, from_repository, last_revision=None,
377
self.helper = Inter1and2Helper(from_repository, to_repository)
378
GenericRepoFetcher.__init__(self, to_repository, from_repository,
381
def _generate_root_texts(self, revs):
382
self.helper.generate_root_texts(revs)
384
def _fetch_inventory_weave(self, revs, pb):
385
self.helper.regenerate_inventory(revs)
388
class Knit1to2Fetcher(KnitRepoFetcher):
389
"""Fetch from a Knit1 repository into a Knit2 repository"""
391
def __init__(self, to_repository, from_repository, last_revision=None,
393
self.helper = Inter1and2Helper(from_repository, to_repository)
394
KnitRepoFetcher.__init__(self, to_repository, from_repository,
397
def _generate_root_texts(self, revs):
398
self.helper.generate_root_texts(revs)
400
def _fetch_inventory_weave(self, revs, pb):
401
self.helper.regenerate_inventory(revs)
404
class RemoteToOtherFetcher(GenericRepoFetcher):
406
def _fetch_everything_for_revisions(self, revs, pp):
407
data_stream = self.from_repository.get_data_stream(revs)
408
self.to_repository.insert_data_stream(data_stream)