21
21
that has merged into it. As the first step of a merge, pull, or
22
22
branch operation we copy history from the source into the destination
25
The copying is done in a slightly complicated order. We don't want to
26
add a revision to the store until everything it refers to is also
27
stored, so that if a revision is present we can totally recreate it.
28
However, we can't know what files are included in a revision until we
29
read its inventory. So we query the inventory store of the source for
30
the ids we need, and then pull those ids and finally actually join
28
from bzrlib.lazy_import import lazy_import
29
lazy_import(globals(), """
35
import bzrlib.errors as errors
36
from bzrlib.errors import (InstallFailed,
38
from bzrlib.progress import ProgressPhase
39
from bzrlib.revision import is_null, NULL_REVISION
40
from bzrlib.symbol_versioning import (deprecated_function,
43
from bzrlib.revision import NULL_REVISION
43
44
from bzrlib.trace import mutter
46
from bzrlib.lazy_import import lazy_import
48
# TODO: Avoid repeatedly opening weaves so many times.
50
# XXX: This doesn't handle ghost (not present in branch) revisions at
51
# all yet. I'm not sure they really should be supported.
53
# NOTE: This doesn't copy revisions which may be present but not
54
# merged into the last revision. I'm not sure we want to do that.
56
# - get a list of revisions that need to be pulled in
57
# - for each one, pull in that revision file
58
# and get the inventory, and store the inventory with right
60
# - and get the ancestry, and store that with right parents too
61
# - and keep a note of all file ids and version seen
62
# - then go through all files; for each one get the weave,
63
# and add in all file versions
66
47
class RepoFetcher(object):
67
48
"""Pull revisions and texts from one repository to another.
70
if set, try to limit to the data this revision references.
73
count_copied -- number of revisions copied
75
50
This should not be used directly, it's essential a object to encapsulate
76
51
the logic in InterRepository.fetch().
78
def __init__(self, to_repository, from_repository, last_revision=None, pb=None):
80
self.failed_revisions = []
82
if to_repository.has_same_location(from_repository):
83
# repository.fetch should be taking care of this case.
84
raise errors.BzrError('RepoFetcher run '
85
'between two objects at the same location: '
86
'%r and %r' % (to_repository, from_repository))
54
def __init__(self, to_repository, from_repository, last_revision=None,
55
find_ghosts=True, fetch_spec=None):
56
"""Create a repo fetcher.
58
:param last_revision: If set, try to limit to the data this revision
60
:param find_ghosts: If True search the entire history for ghosts.
62
# repository.fetch has the responsibility for short-circuiting
63
# attempts to copy between a repository and itself.
87
64
self.to_repository = to_repository
88
65
self.from_repository = from_repository
66
self.sink = to_repository._get_sink()
89
67
# must not mutate self._last_revision as its potentially a shared instance
90
68
self._last_revision = last_revision
92
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
93
self.nested_pb = self.pb
69
self._fetch_spec = fetch_spec
70
self.find_ghosts = find_ghosts
97
71
self.from_repository.lock_read()
72
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
73
self.from_repository, self.from_repository._format,
74
self.to_repository, self.to_repository._format)
99
self.to_repository.lock_write()
101
self.to_repository.start_write_group()
105
self.to_repository.abort_write_group()
108
self.to_repository.commit_write_group()
110
if self.nested_pb is not None:
111
self.nested_pb.finished()
112
self.to_repository.unlock()
114
78
self.from_repository.unlock()
116
80
def __fetch(self):
117
81
"""Primary worker function.
119
This initialises all the needed variables, and then fetches the
83
This initialises all the needed variables, and then fetches the
120
84
requested revisions, finally clearing the progress bar.
122
self.to_weaves = self.to_repository.weave_store
123
self.from_weaves = self.from_repository.weave_store
86
# Roughly this is what we're aiming for fetch to become:
88
# missing = self.sink.insert_stream(self.source.get_stream(search))
90
# missing = self.sink.insert_stream(self.source.get_items(missing))
124
92
self.count_total = 0
125
93
self.file_ids_names = {}
126
pp = ProgressPhase('Transferring', 4, self.pb)
94
pb = ui.ui_factory.nested_progress_bar()
95
pb.show_pct = pb.show_count = False
129
revs = self._revids_to_fetch()
97
pb.update("Finding revisions", 0, 2)
98
search = self._revids_to_fetch()
132
self._fetch_everything_for_revisions(revs, pp)
101
pb.update("Fetching revisions", 1, 2)
102
self._fetch_everything_for_search(search)
136
def _fetch_everything_for_revisions(self, revs, pp):
106
def _fetch_everything_for_search(self, search):
137
107
"""Fetch all data for the given set of revisions."""
138
108
# The first phase is "file". We pass the progress bar for it directly
139
109
# into item_keys_introduced_by, which has more information about how
143
113
# item_keys_introduced_by should have a richer API than it does at the
144
114
# moment, so that it can feed the progress information back to this
147
pb = bzrlib.ui.ui_factory.nested_progress_bar()
116
if (self.from_repository._format.rich_root_data and
117
not self.to_repository._format.rich_root_data):
118
raise errors.IncompatibleRepositories(
119
self.from_repository, self.to_repository,
120
"different rich-root support")
121
pb = ui.ui_factory.nested_progress_bar()
149
data_to_fetch = self.from_repository.item_keys_introduced_by(revs, pb)
150
for knit_kind, file_id, revisions in data_to_fetch:
151
if knit_kind != phase:
153
# Make a new progress bar for this phase
156
pb = bzrlib.ui.ui_factory.nested_progress_bar()
157
if knit_kind == "file":
158
self._fetch_weave_text(file_id, revisions)
159
elif knit_kind == "inventory":
161
# Once we've processed all the files, then we generate the root
162
# texts (if necessary), then we process the inventory. It's a
163
# bit distasteful to have knit_kind == "inventory" mean this,
164
# perhaps it should happen on the first non-"file" knit, in case
165
# it's not always inventory?
166
self._generate_root_texts(revs)
167
self._fetch_inventory_weave(revs, pb)
168
elif knit_kind == "signatures":
169
# Nothing to do here; this will be taken care of when
170
# _fetch_revision_texts happens.
172
elif knit_kind == "revisions":
173
self._fetch_revision_texts(revs, pb)
175
raise AssertionError("Unknown knit kind %r" % knit_kind)
123
pb.update("Get stream source")
124
source = self.from_repository._get_source(
125
self.to_repository._format)
126
stream = source.get_stream(search)
127
from_format = self.from_repository._format
128
pb.update("Inserting stream")
129
resume_tokens, missing_keys = self.sink.insert_stream(
130
stream, from_format, [])
131
if self.to_repository._fallback_repositories:
133
self._parent_inventories(search.get_keys()))
135
pb.update("Missing keys")
136
stream = source.get_stream_for_missing_keys(missing_keys)
137
pb.update("Inserting missing keys")
138
resume_tokens, missing_keys = self.sink.insert_stream(
139
stream, from_format, resume_tokens)
141
raise AssertionError(
142
"second push failed to complete a fetch %r." % (
145
raise AssertionError(
146
"second push failed to commit the fetch %r." % (
148
pb.update("Finishing stream")
179
self.count_copied += len(revs)
181
153
def _revids_to_fetch(self):
182
154
"""Determines the exact revisions needed from self.from_repository to
183
155
install self._last_revision in self.to_repository.
185
157
If no revisions need to be fetched, then this just returns None.
159
if self._fetch_spec is not None:
160
return self._fetch_spec
187
161
mutter('fetch up to rev {%s}', self._last_revision)
188
162
if self._last_revision is NULL_REVISION:
189
163
# explicit limit of no revisions needed
191
if (self._last_revision is not None and
192
self.to_repository.has_revision(self._last_revision)):
196
# XXX: this gets the full graph on both sides, and will make sure
197
# that ghosts are filled whether or not you care about them.
198
return self.to_repository.missing_revision_ids(self.from_repository,
200
except errors.NoSuchRevision:
201
raise InstallFailed([self._last_revision])
203
def _fetch_weave_text(self, file_id, required_versions):
204
to_weave = self.to_weaves.get_weave_or_empty(file_id,
205
self.to_repository.get_transaction())
206
from_weave = self.from_weaves.get_weave(file_id,
207
self.from_repository.get_transaction())
208
# we fetch all the texts, because texts do
209
# not reference anything, and its cheap enough
210
to_weave.join(from_weave, version_ids=required_versions)
211
# we don't need *all* of this data anymore, but we dont know
212
# what we do. This cache clearing will result in a new read
213
# of the knit data when we do the checkout, but probably we
214
# want to emit the needed data on the fly rather than at the
216
# the from weave should know not to cache data being joined,
217
# but its ok to ask it to clear.
218
from_weave.clear_cache()
219
to_weave.clear_cache()
221
def _fetch_inventory_weave(self, revs, pb):
222
pb.update("fetch inventory", 0, 2)
223
to_weave = self.to_repository.get_inventory_weave()
224
child_pb = bzrlib.ui.ui_factory.nested_progress_bar()
226
# just merge, this is optimisable and its means we don't
227
# copy unreferenced data such as not-needed inventories.
228
pb.update("fetch inventory", 1, 3)
229
from_weave = self.from_repository.get_inventory_weave()
230
pb.update("fetch inventory", 2, 3)
231
# we fetch only the referenced inventories because we do not
232
# know for unselected inventories whether all their required
233
# texts are present in the other repository - it could be
235
to_weave.join(from_weave, pb=child_pb, msg='merge inventory',
237
from_weave.clear_cache()
241
def _generate_root_texts(self, revs):
242
"""This will be called by __fetch between fetching weave texts and
243
fetching the inventory weave.
245
Subclasses should override this if they need to generate root texts
246
after fetching weave texts.
251
class GenericRepoFetcher(RepoFetcher):
252
"""This is a generic repo to repo fetcher.
254
This makes minimal assumptions about repo layout and contents.
255
It triggers a reconciliation after fetching to ensure integrity.
258
def _fetch_revision_texts(self, revs, pb):
259
"""Fetch revision object texts"""
260
to_txn = self.to_transaction = self.to_repository.get_transaction()
263
to_store = self.to_repository._revision_store
265
pb.update('copying revisions', count, total)
267
sig_text = self.from_repository.get_signature_text(rev)
268
to_store.add_revision_signature_text(rev, sig_text, to_txn)
269
except errors.NoSuchRevision:
272
to_store.add_revision(self.from_repository.get_revision(rev),
275
# fixup inventory if needed:
276
# this is expensive because we have no inverse index to current ghosts.
277
# but on local disk its a few seconds and sftp push is already insane.
279
# FIXME: repository should inform if this is needed.
280
self.to_repository.reconcile()
283
class KnitRepoFetcher(RepoFetcher):
284
"""This is a knit format repository specific fetcher.
286
This differs from the GenericRepoFetcher by not doing a
287
reconciliation after copying, and using knit joining to
291
def _fetch_revision_texts(self, revs, pb):
292
# may need to be a InterRevisionStore call here.
293
from_transaction = self.from_repository.get_transaction()
294
to_transaction = self.to_repository.get_transaction()
295
to_sf = self.to_repository._revision_store.get_signature_file(
297
from_sf = self.from_repository._revision_store.get_signature_file(
299
to_sf.join(from_sf, version_ids=revs, ignore_missing=True)
300
to_rf = self.to_repository._revision_store.get_revision_file(
302
from_rf = self.from_repository._revision_store.get_revision_file(
304
to_rf.join(from_rf, version_ids=revs)
165
return self.to_repository.search_missing_revision_ids(
166
self.from_repository, self._last_revision,
167
find_ghosts=self.find_ghosts)
169
def _parent_inventories(self, revision_ids):
170
# Find all the parent revisions referenced by the stream, but
171
# not present in the stream, and make sure we send their
173
parent_maps = self.to_repository.get_parent_map(revision_ids)
175
map(parents.update, parent_maps.itervalues())
176
parents.discard(NULL_REVISION)
177
parents.difference_update(revision_ids)
178
missing_keys = set(('inventories', rev_id) for rev_id in parents)
307
182
class Inter1and2Helper(object):
308
183
"""Helper for operations that convert data from model 1 and 2
310
185
This is for use by fetchers and converters.
313
def __init__(self, source, target):
188
def __init__(self, source):
316
191
:param source: The repository data comes from
317
:param target: The repository data goes to
319
193
self.source = source
322
195
def iter_rev_trees(self, revs):
323
196
"""Iterate through RevisionTrees efficiently.
337
212
revs = revs[100:]
214
def _find_root_ids(self, revs, parent_map, graph):
216
for tree in self.iter_rev_trees(revs):
217
revision_id = tree.inventory.root.revision
218
root_id = tree.get_root_id()
219
revision_root[revision_id] = root_id
220
# Find out which parents we don't already know root ids for
222
for revision_parents in parent_map.itervalues():
223
parents.update(revision_parents)
224
parents.difference_update(revision_root.keys() + [NULL_REVISION])
225
# Limit to revisions present in the versionedfile
226
parents = graph.get_parent_map(parents).keys()
227
for tree in self.iter_rev_trees(parents):
228
root_id = tree.get_root_id()
229
revision_root[tree.get_revision_id()] = root_id
339
232
def generate_root_texts(self, revs):
340
233
"""Generate VersionedFiles for all root ids.
342
235
:param revs: the revisions to include
344
inventory_weave = self.source.get_inventory_weave()
347
to_store = self.target.weave_store
348
for tree in self.iter_rev_trees(revs):
349
revision_id = tree.inventory.root.revision
350
root_id = tree.get_root_id()
351
parents = inventory_weave.get_parents(revision_id)
352
if root_id not in versionedfile:
353
versionedfile[root_id] = to_store.get_weave_or_empty(root_id,
354
self.target.get_transaction())
355
_, _, parent_texts[root_id] = versionedfile[root_id].add_lines(
356
revision_id, parents, [], parent_texts)
358
def regenerate_inventory(self, revs):
359
"""Generate a new inventory versionedfile in target, convertin data.
361
The inventory is retrieved from the source, (deserializing it), and
362
stored in the target (reserializing it in a different format).
363
:param revs: The revisions to include
365
inventory_weave = self.source.get_inventory_weave()
366
for tree in self.iter_rev_trees(revs):
367
parents = inventory_weave.get_parents(tree.get_revision_id())
368
self.target.add_inventory(tree.get_revision_id(), tree.inventory,
372
class Model1toKnit2Fetcher(GenericRepoFetcher):
373
"""Fetch from a Model1 repository into a Knit2 repository
375
def __init__(self, to_repository, from_repository, last_revision=None,
377
self.helper = Inter1and2Helper(from_repository, to_repository)
378
GenericRepoFetcher.__init__(self, to_repository, from_repository,
381
def _generate_root_texts(self, revs):
382
self.helper.generate_root_texts(revs)
384
def _fetch_inventory_weave(self, revs, pb):
385
self.helper.regenerate_inventory(revs)
388
class Knit1to2Fetcher(KnitRepoFetcher):
389
"""Fetch from a Knit1 repository into a Knit2 repository"""
391
def __init__(self, to_repository, from_repository, last_revision=None,
393
self.helper = Inter1and2Helper(from_repository, to_repository)
394
KnitRepoFetcher.__init__(self, to_repository, from_repository,
397
def _generate_root_texts(self, revs):
398
self.helper.generate_root_texts(revs)
400
def _fetch_inventory_weave(self, revs, pb):
401
self.helper.regenerate_inventory(revs)
404
class RemoteToOtherFetcher(GenericRepoFetcher):
406
def _fetch_everything_for_revisions(self, revs, pp):
407
data_stream = self.from_repository.get_data_stream(revs)
408
self.to_repository.insert_data_stream(data_stream)
237
graph = self.source.get_graph()
238
parent_map = graph.get_parent_map(revs)
239
rev_order = tsort.topo_sort(parent_map)
240
rev_id_to_root_id = self._find_root_ids(revs, parent_map, graph)
241
root_id_order = [(rev_id_to_root_id[rev_id], rev_id) for rev_id in
243
# Guaranteed stable, this groups all the file id operations together
244
# retaining topological order within the revisions of a file id.
245
# File id splits and joins would invalidate this, but they don't exist
246
# yet, and are unlikely to in non-rich-root environments anyway.
247
root_id_order.sort(key=operator.itemgetter(0))
248
# Create a record stream containing the roots to create.
250
# XXX: not covered by tests, should have a flag to always run
251
# this. -- mbp 20100129
252
graph = _get_rich_root_heads_graph(self.source, revs)
253
new_roots_stream = _new_root_data_stream(
254
root_id_order, rev_id_to_root_id, parent_map, self.source, graph)
255
return [('texts', new_roots_stream)]
258
def _get_rich_root_heads_graph(source_repo, revision_ids):
259
"""Get a Graph object suitable for asking heads() for new rich roots."""
260
st = static_tuple.StaticTuple
261
revision_keys = [st(r_id).intern() for r_id in revision_ids]
262
known_graph = source_repo.revisions.get_known_graph_ancestry(
264
return _mod_graph.GraphThunkIdsToKeys(known_graph)
267
def _new_root_data_stream(
268
root_keys_to_create, rev_id_to_root_id_map, parent_map, repo, graph=None):
269
"""Generate a texts substream of synthesised root entries.
271
Used in fetches that do rich-root upgrades.
273
:param root_keys_to_create: iterable of (root_id, rev_id) pairs describing
274
the root entries to create.
275
:param rev_id_to_root_id_map: dict of known rev_id -> root_id mappings for
276
calculating the parents. If a parent rev_id is not found here then it
277
will be recalculated.
278
:param parent_map: a parent map for all the revisions in
280
:param graph: a graph to use instead of repo.get_graph().
282
for root_key in root_keys_to_create:
283
root_id, rev_id = root_key
284
parent_keys = _parent_keys_for_root_version(
285
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph)
286
yield versionedfile.FulltextContentFactory(
287
root_key, parent_keys, None, '')
290
def _parent_keys_for_root_version(
291
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph=None):
292
"""Get the parent keys for a given root id.
294
A helper function for _new_root_data_stream.
296
# Include direct parents of the revision, but only if they used the same
297
# root_id and are heads.
298
rev_parents = parent_map[rev_id]
300
for parent_id in rev_parents:
301
if parent_id == NULL_REVISION:
303
if parent_id not in rev_id_to_root_id_map:
304
# We probably didn't read this revision, go spend the extra effort
307
tree = repo.revision_tree(parent_id)
308
except errors.NoSuchRevision:
309
# Ghost, fill out rev_id_to_root_id in case we encounter this
311
# But set parent_root_id to None since we don't really know
312
parent_root_id = None
314
parent_root_id = tree.get_root_id()
315
rev_id_to_root_id_map[parent_id] = None
317
# rev_id_to_root_id_map[parent_id] = parent_root_id
318
# memory consumption maybe?
320
parent_root_id = rev_id_to_root_id_map[parent_id]
321
if root_id == parent_root_id:
322
# With stacking we _might_ want to refer to a non-local revision,
323
# but this code path only applies when we have the full content
324
# available, so ghosts really are ghosts, not just the edge of
326
parent_ids.append(parent_id)
328
# root_id may be in the parent anyway.
330
tree = repo.revision_tree(parent_id)
331
except errors.NoSuchRevision:
332
# ghost, can't refer to it.
336
parent_ids.append(tree.inventory[root_id].revision)
337
except errors.NoSuchId:
340
# Drop non-head parents
342
graph = repo.get_graph()
343
heads = graph.heads(parent_ids)
345
for parent_id in parent_ids:
346
if parent_id in heads and parent_id not in selected_ids:
347
selected_ids.append(parent_id)
348
parent_keys = [(root_id, parent_id) for parent_id in selected_ids]