47
53
def __init__(self, to_repository, from_repository, last_revision=None,
48
pb=None, find_ghosts=True, fetch_spec=None):
54
find_ghosts=True, fetch_spec=None):
49
55
"""Create a repo fetcher.
51
57
:param last_revision: If set, try to limit to the data this revision
59
:param fetch_spec: A SearchResult specifying which revisions to fetch.
60
If set, this overrides last_revision.
53
61
:param find_ghosts: If True search the entire history for ghosts.
54
:param _write_group_acquired_callable: Don't use; this parameter only
55
exists to facilitate a hack done in InterPackRepo.fetch. We would
56
like to remove this parameter.
57
:param pb: ProgressBar object to use; deprecated and ignored.
58
This method will just create one on top of the stack.
61
symbol_versioning.warn(
62
symbol_versioning.deprecated_in((1, 14, 0))
63
% "pb parameter to RepoFetcher.__init__")
64
# and for simplicity it is in fact ignored
65
if to_repository.has_same_location(from_repository):
66
# repository.fetch should be taking care of this case.
67
raise errors.BzrError('RepoFetcher run '
68
'between two objects at the same location: '
69
'%r and %r' % (to_repository, from_repository))
63
# repository.fetch has the responsibility for short-circuiting
64
# attempts to copy between a repository and itself.
70
65
self.to_repository = to_repository
71
66
self.from_repository = from_repository
72
67
self.sink = to_repository._get_sink()
160
153
"""Determines the exact revisions needed from self.from_repository to
161
154
install self._last_revision in self.to_repository.
163
If no revisions need to be fetched, then this just returns None.
156
:returns: A SearchResult of some sort. (Possibly a
157
PendingAncestryResult, EmptySearchResult, etc.)
165
159
if self._fetch_spec is not None:
160
# The fetch spec is already a concrete search result.
166
161
return self._fetch_spec
167
mutter('fetch up to rev {%s}', self._last_revision)
168
if self._last_revision is NULL_REVISION:
162
elif self._last_revision == NULL_REVISION:
163
# fetch_spec is None + last_revision is null => empty fetch.
169
164
# explicit limit of no revisions needed
171
return self.to_repository.search_missing_revision_ids(
172
self.from_repository, self._last_revision,
173
find_ghosts=self.find_ghosts)
175
def _parent_inventories(self, revision_ids):
176
# Find all the parent revisions referenced by the stream, but
177
# not present in the stream, and make sure we send their
179
parent_maps = self.to_repository.get_parent_map(revision_ids)
181
map(parents.update, parent_maps.itervalues())
182
parents.discard(NULL_REVISION)
183
parents.difference_update(revision_ids)
184
missing_keys = set(('inventories', rev_id) for rev_id in parents)
165
return vf_search.EmptySearchResult()
166
elif self._last_revision is not None:
167
return vf_search.NotInOtherForRevs(self.to_repository,
168
self.from_repository, [self._last_revision],
169
find_ghosts=self.find_ghosts).execute()
170
else: # self._last_revision is None:
171
return vf_search.EverythingNotInOther(self.to_repository,
172
self.from_repository,
173
find_ghosts=self.find_ghosts).execute()
188
176
class Inter1and2Helper(object):
255
243
# yet, and are unlikely to in non-rich-root environments anyway.
256
244
root_id_order.sort(key=operator.itemgetter(0))
257
245
# Create a record stream containing the roots to create.
259
for key in root_id_order:
260
root_id, rev_id = key
261
rev_parents = parent_map[rev_id]
262
# We drop revision parents with different file-ids, because
263
# that represents a rename of the root to a different location
264
# - its not actually a parent for us. (We could look for that
265
# file id in the revision tree at considerably more expense,
266
# but for now this is sufficient (and reconcile will catch and
267
# correct this anyway).
268
# When a parent revision is a ghost, we guess that its root id
269
# was unchanged (rather than trimming it from the parent list).
270
parent_keys = tuple((root_id, parent) for parent in rev_parents
271
if parent != NULL_REVISION and
272
rev_id_to_root_id.get(parent, root_id) == root_id)
273
yield FulltextContentFactory(key, parent_keys, None, '')
274
return [('texts', yield_roots())]
246
if len(revs) > self.known_graph_threshold:
247
graph = self.source.get_known_graph_ancestry(revs)
248
new_roots_stream = _new_root_data_stream(
249
root_id_order, rev_id_to_root_id, parent_map, self.source, graph)
250
return [('texts', new_roots_stream)]
253
def _new_root_data_stream(
254
root_keys_to_create, rev_id_to_root_id_map, parent_map, repo, graph=None):
255
"""Generate a texts substream of synthesised root entries.
257
Used in fetches that do rich-root upgrades.
259
:param root_keys_to_create: iterable of (root_id, rev_id) pairs describing
260
the root entries to create.
261
:param rev_id_to_root_id_map: dict of known rev_id -> root_id mappings for
262
calculating the parents. If a parent rev_id is not found here then it
263
will be recalculated.
264
:param parent_map: a parent map for all the revisions in
266
:param graph: a graph to use instead of repo.get_graph().
268
for root_key in root_keys_to_create:
269
root_id, rev_id = root_key
270
parent_keys = _parent_keys_for_root_version(
271
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph)
272
yield versionedfile.FulltextContentFactory(
273
root_key, parent_keys, None, '')
276
def _parent_keys_for_root_version(
277
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph=None):
278
"""Get the parent keys for a given root id.
280
A helper function for _new_root_data_stream.
282
# Include direct parents of the revision, but only if they used the same
283
# root_id and are heads.
284
rev_parents = parent_map[rev_id]
286
for parent_id in rev_parents:
287
if parent_id == NULL_REVISION:
289
if parent_id not in rev_id_to_root_id_map:
290
# We probably didn't read this revision, go spend the extra effort
293
tree = repo.revision_tree(parent_id)
294
except errors.NoSuchRevision:
295
# Ghost, fill out rev_id_to_root_id in case we encounter this
297
# But set parent_root_id to None since we don't really know
298
parent_root_id = None
300
parent_root_id = tree.get_root_id()
301
rev_id_to_root_id_map[parent_id] = None
303
# rev_id_to_root_id_map[parent_id] = parent_root_id
304
# memory consumption maybe?
306
parent_root_id = rev_id_to_root_id_map[parent_id]
307
if root_id == parent_root_id:
308
# With stacking we _might_ want to refer to a non-local revision,
309
# but this code path only applies when we have the full content
310
# available, so ghosts really are ghosts, not just the edge of
312
parent_ids.append(parent_id)
314
# root_id may be in the parent anyway.
316
tree = repo.revision_tree(parent_id)
317
except errors.NoSuchRevision:
318
# ghost, can't refer to it.
322
parent_ids.append(tree.get_file_revision(root_id))
323
except errors.NoSuchId:
326
# Drop non-head parents
328
graph = repo.get_graph()
329
heads = graph.heads(parent_ids)
331
for parent_id in parent_ids:
332
if parent_id in heads and parent_id not in selected_ids:
333
selected_ids.append(parent_id)
334
parent_keys = [(root_id, parent_id) for parent_id in selected_ids]
338
class TargetRepoKinds(object):
339
"""An enum-like set of constants.
341
They are the possible values of FetchSpecFactory.target_repo_kinds.
344
PREEXISTING = 'preexisting'
349
class FetchSpecFactory(object):
350
"""A helper for building the best fetch spec for a sprout call.
352
Factors that go into determining the sort of fetch to perform:
353
* did the caller specify any revision IDs?
354
* did the caller specify a source branch (need to fetch its
355
heads_to_fetch(), usually the tip + tags)
356
* is there an existing target repo (don't need to refetch revs it
358
* target is stacked? (similar to pre-existing target repo: even if
359
the target itself is new don't want to refetch existing revs)
361
:ivar source_branch: the source branch if one specified, else None.
362
:ivar source_branch_stop_revision_id: fetch up to this revision of
363
source_branch, rather than its tip.
364
:ivar source_repo: the source repository if one found, else None.
365
:ivar target_repo: the target repository acquired by sprout.
366
:ivar target_repo_kind: one of the TargetRepoKinds constants.
370
self._explicit_rev_ids = set()
371
self.source_branch = None
372
self.source_branch_stop_revision_id = None
373
self.source_repo = None
374
self.target_repo = None
375
self.target_repo_kind = None
378
def add_revision_ids(self, revision_ids):
379
"""Add revision_ids to the set of revision_ids to be fetched."""
380
self._explicit_rev_ids.update(revision_ids)
382
def make_fetch_spec(self):
383
"""Build a SearchResult or PendingAncestryResult or etc."""
384
if self.target_repo_kind is None or self.source_repo is None:
385
raise AssertionError(
386
'Incomplete FetchSpecFactory: %r' % (self.__dict__,))
387
if len(self._explicit_rev_ids) == 0 and self.source_branch is None:
388
if self.limit is not None:
389
raise NotImplementedError(
390
"limit is only supported with a source branch set")
391
# Caller hasn't specified any revisions or source branch
392
if self.target_repo_kind == TargetRepoKinds.EMPTY:
393
return vf_search.EverythingResult(self.source_repo)
395
# We want everything not already in the target (or target's
397
return vf_search.EverythingNotInOther(
398
self.target_repo, self.source_repo).execute()
399
heads_to_fetch = set(self._explicit_rev_ids)
400
if self.source_branch is not None:
401
must_fetch, if_present_fetch = self.source_branch.heads_to_fetch()
402
if self.source_branch_stop_revision_id is not None:
403
# Replace the tip rev from must_fetch with the stop revision
404
# XXX: this might be wrong if the tip rev is also in the
405
# must_fetch set for other reasons (e.g. it's the tip of
406
# multiple loom threads?), but then it's pretty unclear what it
407
# should mean to specify a stop_revision in that case anyway.
408
must_fetch.discard(self.source_branch.last_revision())
409
must_fetch.add(self.source_branch_stop_revision_id)
410
heads_to_fetch.update(must_fetch)
412
if_present_fetch = set()
413
if self.target_repo_kind == TargetRepoKinds.EMPTY:
414
# PendingAncestryResult does not raise errors if a requested head
415
# is absent. Ideally it would support the
416
# required_ids/if_present_ids distinction, but in practice
417
# heads_to_fetch will almost certainly be present so this doesn't
419
all_heads = heads_to_fetch.union(if_present_fetch)
420
ret = vf_search.PendingAncestryResult(all_heads, self.source_repo)
421
if self.limit is not None:
422
graph = self.source_repo.get_graph()
423
topo_order = list(graph.iter_topo_order(ret.get_keys()))
424
result_set = topo_order[:self.limit]
425
ret = self.source_repo.revision_ids_to_search_result(result_set)
428
return vf_search.NotInOtherForRevs(self.target_repo, self.source_repo,
429
required_ids=heads_to_fetch, if_present_ids=if_present_fetch,
430
limit=self.limit).execute()