1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
42
revision as _mod_revision,
48
from bzrlib.bundle import serializer
49
from bzrlib.revisiontree import RevisionTree
50
from bzrlib.store.versioned import VersionedFileStore
51
from bzrlib.testament import Testament
54
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
55
from bzrlib.inter import InterObject
56
from bzrlib.inventory import (
62
from bzrlib.lock import _RelockDebugMixin
63
from bzrlib import registry
64
from bzrlib.trace import (
65
log_exception_quietly, note, mutter, mutter_callsite, warning)
68
# Old formats display a warning, but only once
69
_deprecation_warning_done = False
72
class CommitBuilder(object):
73
"""Provides an interface to build up a commit.
75
This allows describing a tree to be committed without needing to
76
know the internals of the format of the repository.
79
# all clients should supply tree roots.
80
record_root_entry = True
81
# the default CommitBuilder does not manage trees whose root is versioned.
82
_versioned_root = False
84
def __init__(self, repository, parents, config, timestamp=None,
85
timezone=None, committer=None, revprops=None,
87
"""Initiate a CommitBuilder.
89
:param repository: Repository to commit to.
90
:param parents: Revision ids of the parents of the new revision.
91
:param config: Configuration to use.
92
:param timestamp: Optional timestamp recorded for commit.
93
:param timezone: Optional timezone for timestamp.
94
:param committer: Optional committer to set for commit.
95
:param revprops: Optional dictionary of revision properties.
96
:param revision_id: Optional revision id.
100
if committer is None:
101
self._committer = self._config.username()
103
self._committer = committer
105
self.new_inventory = Inventory(None)
106
self._new_revision_id = revision_id
107
self.parents = parents
108
self.repository = repository
111
if revprops is not None:
112
self._validate_revprops(revprops)
113
self._revprops.update(revprops)
115
if timestamp is None:
116
timestamp = time.time()
117
# Restrict resolution to 1ms
118
self._timestamp = round(timestamp, 3)
121
self._timezone = osutils.local_time_offset()
123
self._timezone = int(timezone)
125
self._generate_revision_if_needed()
126
self.__heads = graph.HeadsCache(repository.get_graph()).heads
127
self._basis_delta = []
128
# API compatibility, older code that used CommitBuilder did not call
129
# .record_delete(), which means the delta that is computed would not be
130
# valid. Callers that will call record_delete() should call
131
# .will_record_deletes() to indicate that.
132
self._recording_deletes = False
133
# memo'd check for no-op commits.
134
self._any_changes = False
136
def any_changes(self):
137
"""Return True if any entries were changed.
139
This includes merge-only changes. It is the core for the --unchanged
142
:return: True if any changes have occured.
144
return self._any_changes
146
def _validate_unicode_text(self, text, context):
147
"""Verify things like commit messages don't have bogus characters."""
149
raise ValueError('Invalid value for %s: %r' % (context, text))
151
def _validate_revprops(self, revprops):
152
for key, value in revprops.iteritems():
153
# We know that the XML serializers do not round trip '\r'
154
# correctly, so refuse to accept them
155
if not isinstance(value, basestring):
156
raise ValueError('revision property (%s) is not a valid'
157
' (unicode) string: %r' % (key, value))
158
self._validate_unicode_text(value,
159
'revision property (%s)' % (key,))
161
def commit(self, message):
162
"""Make the actual commit.
164
:return: The revision id of the recorded revision.
166
self._validate_unicode_text(message, 'commit message')
167
rev = _mod_revision.Revision(
168
timestamp=self._timestamp,
169
timezone=self._timezone,
170
committer=self._committer,
172
inventory_sha1=self.inv_sha1,
173
revision_id=self._new_revision_id,
174
properties=self._revprops)
175
rev.parent_ids = self.parents
176
self.repository.add_revision(self._new_revision_id, rev,
177
self.new_inventory, self._config)
178
self.repository.commit_write_group()
179
return self._new_revision_id
182
"""Abort the commit that is being built.
184
self.repository.abort_write_group()
186
def revision_tree(self):
187
"""Return the tree that was just committed.
189
After calling commit() this can be called to get a RevisionTree
190
representing the newly committed tree. This is preferred to
191
calling Repository.revision_tree() because that may require
192
deserializing the inventory, while we already have a copy in
195
if self.new_inventory is None:
196
self.new_inventory = self.repository.get_inventory(
197
self._new_revision_id)
198
return RevisionTree(self.repository, self.new_inventory,
199
self._new_revision_id)
201
def finish_inventory(self):
202
"""Tell the builder that the inventory is finished.
204
:return: The inventory id in the repository, which can be used with
205
repository.get_inventory.
207
if self.new_inventory is None:
208
# an inventory delta was accumulated without creating a new
210
basis_id = self.basis_delta_revision
211
# We ignore the 'inventory' returned by add_inventory_by_delta
212
# because self.new_inventory is used to hint to the rest of the
213
# system what code path was taken
214
self.inv_sha1, _ = self.repository.add_inventory_by_delta(
215
basis_id, self._basis_delta, self._new_revision_id,
218
if self.new_inventory.root is None:
219
raise AssertionError('Root entry should be supplied to'
220
' record_entry_contents, as of bzr 0.10.')
221
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
222
self.new_inventory.revision_id = self._new_revision_id
223
self.inv_sha1 = self.repository.add_inventory(
224
self._new_revision_id,
228
return self._new_revision_id
230
def _gen_revision_id(self):
231
"""Return new revision-id."""
232
return generate_ids.gen_revision_id(self._config.username(),
235
def _generate_revision_if_needed(self):
236
"""Create a revision id if None was supplied.
238
If the repository can not support user-specified revision ids
239
they should override this function and raise CannotSetRevisionId
240
if _new_revision_id is not None.
242
:raises: CannotSetRevisionId
244
if self._new_revision_id is None:
245
self._new_revision_id = self._gen_revision_id()
246
self.random_revid = True
248
self.random_revid = False
250
def _heads(self, file_id, revision_ids):
251
"""Calculate the graph heads for revision_ids in the graph of file_id.
253
This can use either a per-file graph or a global revision graph as we
254
have an identity relationship between the two graphs.
256
return self.__heads(revision_ids)
258
def _check_root(self, ie, parent_invs, tree):
259
"""Helper for record_entry_contents.
261
:param ie: An entry being added.
262
:param parent_invs: The inventories of the parent revisions of the
264
:param tree: The tree that is being committed.
266
# In this revision format, root entries have no knit or weave When
267
# serializing out to disk and back in root.revision is always
269
ie.revision = self._new_revision_id
271
def _require_root_change(self, tree):
272
"""Enforce an appropriate root object change.
274
This is called once when record_iter_changes is called, if and only if
275
the root was not in the delta calculated by record_iter_changes.
277
:param tree: The tree which is being committed.
279
# NB: if there are no parents then this method is not called, so no
280
# need to guard on parents having length.
281
entry = entry_factory['directory'](tree.path2id(''), '',
283
entry.revision = self._new_revision_id
284
self._basis_delta.append(('', '', entry.file_id, entry))
286
def _get_delta(self, ie, basis_inv, path):
287
"""Get a delta against the basis inventory for ie."""
288
if ie.file_id not in basis_inv:
290
result = (None, path, ie.file_id, ie)
291
self._basis_delta.append(result)
293
elif ie != basis_inv[ie.file_id]:
295
# TODO: avoid tis id2path call.
296
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
297
self._basis_delta.append(result)
303
def get_basis_delta(self):
304
"""Return the complete inventory delta versus the basis inventory.
306
This has been built up with the calls to record_delete and
307
record_entry_contents. The client must have already called
308
will_record_deletes() to indicate that they will be generating a
311
:return: An inventory delta, suitable for use with apply_delta, or
312
Repository.add_inventory_by_delta, etc.
314
if not self._recording_deletes:
315
raise AssertionError("recording deletes not activated.")
316
return self._basis_delta
318
def record_delete(self, path, file_id):
319
"""Record that a delete occured against a basis tree.
321
This is an optional API - when used it adds items to the basis_delta
322
being accumulated by the commit builder. It cannot be called unless the
323
method will_record_deletes() has been called to inform the builder that
324
a delta is being supplied.
326
:param path: The path of the thing deleted.
327
:param file_id: The file id that was deleted.
329
if not self._recording_deletes:
330
raise AssertionError("recording deletes not activated.")
331
delta = (path, None, file_id, None)
332
self._basis_delta.append(delta)
333
self._any_changes = True
336
def will_record_deletes(self):
337
"""Tell the commit builder that deletes are being notified.
339
This enables the accumulation of an inventory delta; for the resulting
340
commit to be valid, deletes against the basis MUST be recorded via
341
builder.record_delete().
343
self._recording_deletes = True
345
basis_id = self.parents[0]
347
basis_id = _mod_revision.NULL_REVISION
348
self.basis_delta_revision = basis_id
350
def record_entry_contents(self, ie, parent_invs, path, tree,
352
"""Record the content of ie from tree into the commit if needed.
354
Side effect: sets ie.revision when unchanged
356
:param ie: An inventory entry present in the commit.
357
:param parent_invs: The inventories of the parent revisions of the
359
:param path: The path the entry is at in the tree.
360
:param tree: The tree which contains this entry and should be used to
362
:param content_summary: Summary data from the tree about the paths
363
content - stat, length, exec, sha/link target. This is only
364
accessed when the entry has a revision of None - that is when it is
365
a candidate to commit.
366
:return: A tuple (change_delta, version_recorded, fs_hash).
367
change_delta is an inventory_delta change for this entry against
368
the basis tree of the commit, or None if no change occured against
370
version_recorded is True if a new version of the entry has been
371
recorded. For instance, committing a merge where a file was only
372
changed on the other side will return (delta, False).
373
fs_hash is either None, or the hash details for the path (currently
374
a tuple of the contents sha1 and the statvalue returned by
375
tree.get_file_with_stat()).
377
if self.new_inventory.root is None:
378
if ie.parent_id is not None:
379
raise errors.RootMissing()
380
self._check_root(ie, parent_invs, tree)
381
if ie.revision is None:
382
kind = content_summary[0]
384
# ie is carried over from a prior commit
386
# XXX: repository specific check for nested tree support goes here - if
387
# the repo doesn't want nested trees we skip it ?
388
if (kind == 'tree-reference' and
389
not self.repository._format.supports_tree_reference):
390
# mismatch between commit builder logic and repository:
391
# this needs the entry creation pushed down into the builder.
392
raise NotImplementedError('Missing repository subtree support.')
393
self.new_inventory.add(ie)
395
# TODO: slow, take it out of the inner loop.
397
basis_inv = parent_invs[0]
399
basis_inv = Inventory(root_id=None)
401
# ie.revision is always None if the InventoryEntry is considered
402
# for committing. We may record the previous parents revision if the
403
# content is actually unchanged against a sole head.
404
if ie.revision is not None:
405
if not self._versioned_root and path == '':
406
# repositories that do not version the root set the root's
407
# revision to the new commit even when no change occurs (more
408
# specifically, they do not record a revision on the root; and
409
# the rev id is assigned to the root during deserialisation -
410
# this masks when a change may have occurred against the basis.
411
# To match this we always issue a delta, because the revision
412
# of the root will always be changing.
413
if ie.file_id in basis_inv:
414
delta = (basis_inv.id2path(ie.file_id), path,
418
delta = (None, path, ie.file_id, ie)
419
self._basis_delta.append(delta)
420
return delta, False, None
422
# we don't need to commit this, because the caller already
423
# determined that an existing revision of this file is
424
# appropriate. If its not being considered for committing then
425
# it and all its parents to the root must be unaltered so
426
# no-change against the basis.
427
if ie.revision == self._new_revision_id:
428
raise AssertionError("Impossible situation, a skipped "
429
"inventory entry (%r) claims to be modified in this "
430
"commit (%r).", (ie, self._new_revision_id))
431
return None, False, None
432
# XXX: Friction: parent_candidates should return a list not a dict
433
# so that we don't have to walk the inventories again.
434
parent_candiate_entries = ie.parent_candidates(parent_invs)
435
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
437
for inv in parent_invs:
438
if ie.file_id in inv:
439
old_rev = inv[ie.file_id].revision
440
if old_rev in head_set:
441
heads.append(inv[ie.file_id].revision)
442
head_set.remove(inv[ie.file_id].revision)
445
# now we check to see if we need to write a new record to the
447
# We write a new entry unless there is one head to the ancestors, and
448
# the kind-derived content is unchanged.
450
# Cheapest check first: no ancestors, or more the one head in the
451
# ancestors, we write a new node.
455
# There is a single head, look it up for comparison
456
parent_entry = parent_candiate_entries[heads[0]]
457
# if the non-content specific data has changed, we'll be writing a
459
if (parent_entry.parent_id != ie.parent_id or
460
parent_entry.name != ie.name):
462
# now we need to do content specific checks:
464
# if the kind changed the content obviously has
465
if kind != parent_entry.kind:
467
# Stat cache fingerprint feedback for the caller - None as we usually
468
# don't generate one.
471
if content_summary[2] is None:
472
raise ValueError("Files must not have executable = None")
474
# We can't trust a check of the file length because of content
476
if (# if the exec bit has changed we have to store:
477
parent_entry.executable != content_summary[2]):
479
elif parent_entry.text_sha1 == content_summary[3]:
480
# all meta and content is unchanged (using a hash cache
481
# hit to check the sha)
482
ie.revision = parent_entry.revision
483
ie.text_size = parent_entry.text_size
484
ie.text_sha1 = parent_entry.text_sha1
485
ie.executable = parent_entry.executable
486
return self._get_delta(ie, basis_inv, path), False, None
488
# Either there is only a hash change(no hash cache entry,
489
# or same size content change), or there is no change on
491
# Provide the parent's hash to the store layer, so that the
492
# content is unchanged we will not store a new node.
493
nostore_sha = parent_entry.text_sha1
495
# We want to record a new node regardless of the presence or
496
# absence of a content change in the file.
498
ie.executable = content_summary[2]
499
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
501
text = file_obj.read()
505
ie.text_sha1, ie.text_size = self._add_text_to_weave(
506
ie.file_id, text, heads, nostore_sha)
507
# Let the caller know we generated a stat fingerprint.
508
fingerprint = (ie.text_sha1, stat_value)
509
except errors.ExistingContent:
510
# Turns out that the file content was unchanged, and we were
511
# only going to store a new node if it was changed. Carry over
513
ie.revision = parent_entry.revision
514
ie.text_size = parent_entry.text_size
515
ie.text_sha1 = parent_entry.text_sha1
516
ie.executable = parent_entry.executable
517
return self._get_delta(ie, basis_inv, path), False, None
518
elif kind == 'directory':
520
# all data is meta here, nothing specific to directory, so
522
ie.revision = parent_entry.revision
523
return self._get_delta(ie, basis_inv, path), False, None
524
self._add_text_to_weave(ie.file_id, '', heads, None)
525
elif kind == 'symlink':
526
current_link_target = content_summary[3]
528
# symlink target is not generic metadata, check if it has
530
if current_link_target != parent_entry.symlink_target:
533
# unchanged, carry over.
534
ie.revision = parent_entry.revision
535
ie.symlink_target = parent_entry.symlink_target
536
return self._get_delta(ie, basis_inv, path), False, None
537
ie.symlink_target = current_link_target
538
self._add_text_to_weave(ie.file_id, '', heads, None)
539
elif kind == 'tree-reference':
541
if content_summary[3] != parent_entry.reference_revision:
544
# unchanged, carry over.
545
ie.reference_revision = parent_entry.reference_revision
546
ie.revision = parent_entry.revision
547
return self._get_delta(ie, basis_inv, path), False, None
548
ie.reference_revision = content_summary[3]
549
if ie.reference_revision is None:
550
raise AssertionError("invalid content_summary for nested tree: %r"
551
% (content_summary,))
552
self._add_text_to_weave(ie.file_id, '', heads, None)
554
raise NotImplementedError('unknown kind')
555
ie.revision = self._new_revision_id
556
self._any_changes = True
557
return self._get_delta(ie, basis_inv, path), True, fingerprint
559
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
560
_entry_factory=entry_factory):
561
"""Record a new tree via iter_changes.
563
:param tree: The tree to obtain text contents from for changed objects.
564
:param basis_revision_id: The revision id of the tree the iter_changes
565
has been generated against. Currently assumed to be the same
566
as self.parents[0] - if it is not, errors may occur.
567
:param iter_changes: An iter_changes iterator with the changes to apply
568
to basis_revision_id. The iterator must not include any items with
569
a current kind of None - missing items must be either filtered out
570
or errored-on beefore record_iter_changes sees the item.
571
:param _entry_factory: Private method to bind entry_factory locally for
573
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
576
# Create an inventory delta based on deltas between all the parents and
577
# deltas between all the parent inventories. We use inventory delta's
578
# between the inventory objects because iter_changes masks
579
# last-changed-field only changes.
581
# file_id -> change map, change is fileid, paths, changed, versioneds,
582
# parents, names, kinds, executables
584
# {file_id -> revision_id -> inventory entry, for entries in parent
585
# trees that are not parents[0]
589
revtrees = list(self.repository.revision_trees(self.parents))
590
except errors.NoSuchRevision:
591
# one or more ghosts, slow path.
593
for revision_id in self.parents:
595
revtrees.append(self.repository.revision_tree(revision_id))
596
except errors.NoSuchRevision:
598
basis_revision_id = _mod_revision.NULL_REVISION
600
revtrees.append(self.repository.revision_tree(
601
_mod_revision.NULL_REVISION))
602
# The basis inventory from a repository
604
basis_inv = revtrees[0].inventory
606
basis_inv = self.repository.revision_tree(
607
_mod_revision.NULL_REVISION).inventory
608
if len(self.parents) > 0:
609
if basis_revision_id != self.parents[0] and not ghost_basis:
611
"arbitrary basis parents not yet supported with merges")
612
for revtree in revtrees[1:]:
613
for change in revtree.inventory._make_delta(basis_inv):
614
if change[1] is None:
615
# Not present in this parent.
617
if change[2] not in merged_ids:
618
if change[0] is not None:
619
basis_entry = basis_inv[change[2]]
620
merged_ids[change[2]] = [
622
basis_entry.revision,
625
parent_entries[change[2]] = {
627
basis_entry.revision:basis_entry,
629
change[3].revision:change[3],
632
merged_ids[change[2]] = [change[3].revision]
633
parent_entries[change[2]] = {change[3].revision:change[3]}
635
merged_ids[change[2]].append(change[3].revision)
636
parent_entries[change[2]][change[3].revision] = change[3]
639
# Setup the changes from the tree:
640
# changes maps file_id -> (change, [parent revision_ids])
642
for change in iter_changes:
643
# This probably looks up in basis_inv way to much.
644
if change[1][0] is not None:
645
head_candidate = [basis_inv[change[0]].revision]
648
changes[change[0]] = change, merged_ids.get(change[0],
650
unchanged_merged = set(merged_ids) - set(changes)
651
# Extend the changes dict with synthetic changes to record merges of
653
for file_id in unchanged_merged:
654
# Record a merged version of these items that did not change vs the
655
# basis. This can be either identical parallel changes, or a revert
656
# of a specific file after a merge. The recorded content will be
657
# that of the current tree (which is the same as the basis), but
658
# the per-file graph will reflect a merge.
659
# NB:XXX: We are reconstructing path information we had, this
660
# should be preserved instead.
661
# inv delta change: (file_id, (path_in_source, path_in_target),
662
# changed_content, versioned, parent, name, kind,
665
basis_entry = basis_inv[file_id]
666
except errors.NoSuchId:
667
# a change from basis->some_parents but file_id isn't in basis
668
# so was new in the merge, which means it must have changed
669
# from basis -> current, and as it hasn't the add was reverted
670
# by the user. So we discard this change.
674
(basis_inv.id2path(file_id), tree.id2path(file_id)),
676
(basis_entry.parent_id, basis_entry.parent_id),
677
(basis_entry.name, basis_entry.name),
678
(basis_entry.kind, basis_entry.kind),
679
(basis_entry.executable, basis_entry.executable))
680
changes[file_id] = (change, merged_ids[file_id])
681
# changes contains tuples with the change and a set of inventory
682
# candidates for the file.
684
# old_path, new_path, file_id, new_inventory_entry
685
seen_root = False # Is the root in the basis delta?
686
inv_delta = self._basis_delta
687
modified_rev = self._new_revision_id
688
for change, head_candidates in changes.values():
689
if change[3][1]: # versioned in target.
690
# Several things may be happening here:
691
# We may have a fork in the per-file graph
692
# - record a change with the content from tree
693
# We may have a change against < all trees
694
# - carry over the tree that hasn't changed
695
# We may have a change against all trees
696
# - record the change with the content from tree
699
entry = _entry_factory[kind](file_id, change[5][1],
701
head_set = self._heads(change[0], set(head_candidates))
704
for head_candidate in head_candidates:
705
if head_candidate in head_set:
706
heads.append(head_candidate)
707
head_set.remove(head_candidate)
710
# Could be a carry-over situation:
711
parent_entry_revs = parent_entries.get(file_id, None)
712
if parent_entry_revs:
713
parent_entry = parent_entry_revs.get(heads[0], None)
716
if parent_entry is None:
717
# The parent iter_changes was called against is the one
718
# that is the per-file head, so any change is relevant
719
# iter_changes is valid.
720
carry_over_possible = False
722
# could be a carry over situation
723
# A change against the basis may just indicate a merge,
724
# we need to check the content against the source of the
725
# merge to determine if it was changed after the merge
727
if (parent_entry.kind != entry.kind or
728
parent_entry.parent_id != entry.parent_id or
729
parent_entry.name != entry.name):
730
# Metadata common to all entries has changed
731
# against per-file parent
732
carry_over_possible = False
734
carry_over_possible = True
735
# per-type checks for changes against the parent_entry
738
# Cannot be a carry-over situation
739
carry_over_possible = False
740
# Populate the entry in the delta
742
# XXX: There is still a small race here: If someone reverts the content of a file
743
# after iter_changes examines and decides it has changed,
744
# we will unconditionally record a new version even if some
745
# other process reverts it while commit is running (with
746
# the revert happening after iter_changes did it's
749
entry.executable = True
751
entry.executable = False
752
if (carry_over_possible and
753
parent_entry.executable == entry.executable):
754
# Check the file length, content hash after reading
756
nostore_sha = parent_entry.text_sha1
759
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
761
text = file_obj.read()
765
entry.text_sha1, entry.text_size = self._add_text_to_weave(
766
file_id, text, heads, nostore_sha)
767
yield file_id, change[1][1], (entry.text_sha1, stat_value)
768
except errors.ExistingContent:
769
# No content change against a carry_over parent
770
# Perhaps this should also yield a fs hash update?
772
entry.text_size = parent_entry.text_size
773
entry.text_sha1 = parent_entry.text_sha1
774
elif kind == 'symlink':
776
entry.symlink_target = tree.get_symlink_target(file_id)
777
if (carry_over_possible and
778
parent_entry.symlink_target == entry.symlink_target):
781
self._add_text_to_weave(change[0], '', heads, None)
782
elif kind == 'directory':
783
if carry_over_possible:
786
# Nothing to set on the entry.
787
# XXX: split into the Root and nonRoot versions.
788
if change[1][1] != '' or self.repository.supports_rich_root():
789
self._add_text_to_weave(change[0], '', heads, None)
790
elif kind == 'tree-reference':
791
if not self.repository._format.supports_tree_reference:
792
# This isn't quite sane as an error, but we shouldn't
793
# ever see this code path in practice: tree's don't
794
# permit references when the repo doesn't support tree
796
raise errors.UnsupportedOperation(tree.add_reference,
798
reference_revision = tree.get_reference_revision(change[0])
799
entry.reference_revision = reference_revision
800
if (carry_over_possible and
801
parent_entry.reference_revision == reference_revision):
804
self._add_text_to_weave(change[0], '', heads, None)
806
raise AssertionError('unknown kind %r' % kind)
808
entry.revision = modified_rev
810
entry.revision = parent_entry.revision
813
new_path = change[1][1]
814
inv_delta.append((change[1][0], new_path, change[0], entry))
817
self.new_inventory = None
819
# This should perhaps be guarded by a check that the basis we
820
# commit against is the basis for the commit and if not do a delta
822
self._any_changes = True
824
# housekeeping root entry changes do not affect no-change commits.
825
self._require_root_change(tree)
826
self.basis_delta_revision = basis_revision_id
828
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
829
parent_keys = tuple([(file_id, parent) for parent in parents])
830
return self.repository.texts._add_text(
831
(file_id, self._new_revision_id), parent_keys, new_text,
832
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
835
class RootCommitBuilder(CommitBuilder):
836
"""This commitbuilder actually records the root id"""
838
# the root entry gets versioned properly by this builder.
839
_versioned_root = True
841
def _check_root(self, ie, parent_invs, tree):
842
"""Helper for record_entry_contents.
844
:param ie: An entry being added.
845
:param parent_invs: The inventories of the parent revisions of the
847
:param tree: The tree that is being committed.
850
def _require_root_change(self, tree):
851
"""Enforce an appropriate root object change.
853
This is called once when record_iter_changes is called, if and only if
854
the root was not in the delta calculated by record_iter_changes.
856
:param tree: The tree which is being committed.
858
# versioned roots do not change unless the tree found a change.
861
######################################################################
865
class Repository(_RelockDebugMixin):
866
"""Repository holding history for one or more branches.
868
The repository holds and retrieves historical information including
869
revisions and file history. It's normally accessed only by the Branch,
870
which views a particular line of development through that history.
872
The Repository builds on top of some byte storage facilies (the revisions,
873
signatures, inventories, texts and chk_bytes attributes) and a Transport,
874
which respectively provide byte storage and a means to access the (possibly
877
The byte storage facilities are addressed via tuples, which we refer to
878
as 'keys' throughout the code base. Revision_keys, inventory_keys and
879
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
880
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
881
byte string made up of a hash identifier and a hash value.
882
We use this interface because it allows low friction with the underlying
883
code that implements disk indices, network encoding and other parts of
886
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
887
the serialised revisions for the repository. This can be used to obtain
888
revision graph information or to access raw serialised revisions.
889
The result of trying to insert data into the repository via this store
890
is undefined: it should be considered read-only except for implementors
892
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
893
the serialised signatures for the repository. This can be used to
894
obtain access to raw serialised signatures. The result of trying to
895
insert data into the repository via this store is undefined: it should
896
be considered read-only except for implementors of repositories.
897
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
898
the serialised inventories for the repository. This can be used to
899
obtain unserialised inventories. The result of trying to insert data
900
into the repository via this store is undefined: it should be
901
considered read-only except for implementors of repositories.
902
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
903
texts of files and directories for the repository. This can be used to
904
obtain file texts or file graphs. Note that Repository.iter_file_bytes
905
is usually a better interface for accessing file texts.
906
The result of trying to insert data into the repository via this store
907
is undefined: it should be considered read-only except for implementors
909
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
910
any data the repository chooses to store or have indexed by its hash.
911
The result of trying to insert data into the repository via this store
912
is undefined: it should be considered read-only except for implementors
914
:ivar _transport: Transport for file access to repository, typically
915
pointing to .bzr/repository.
918
# What class to use for a CommitBuilder. Often its simpler to change this
919
# in a Repository class subclass rather than to override
920
# get_commit_builder.
921
_commit_builder_class = CommitBuilder
922
# The search regex used by xml based repositories to determine what things
923
# where changed in a single commit.
924
_file_ids_altered_regex = lazy_regex.lazy_compile(
925
r'file_id="(?P<file_id>[^"]+)"'
926
r'.* revision="(?P<revision_id>[^"]+)"'
929
def abort_write_group(self, suppress_errors=False):
930
"""Commit the contents accrued within the current write group.
932
:param suppress_errors: if true, abort_write_group will catch and log
933
unexpected errors that happen during the abort, rather than
934
allowing them to propagate. Defaults to False.
936
:seealso: start_write_group.
938
if self._write_group is not self.get_transaction():
939
# has an unlock or relock occured ?
942
'(suppressed) mismatched lock context and write group. %r, %r',
943
self._write_group, self.get_transaction())
945
raise errors.BzrError(
946
'mismatched lock context and write group. %r, %r' %
947
(self._write_group, self.get_transaction()))
949
self._abort_write_group()
950
except Exception, exc:
951
self._write_group = None
952
if not suppress_errors:
954
mutter('abort_write_group failed')
955
log_exception_quietly()
956
note('bzr: ERROR (ignored): %s', exc)
957
self._write_group = None
959
def _abort_write_group(self):
960
"""Template method for per-repository write group cleanup.
962
This is called during abort before the write group is considered to be
963
finished and should cleanup any internal state accrued during the write
964
group. There is no requirement that data handed to the repository be
965
*not* made available - this is not a rollback - but neither should any
966
attempt be made to ensure that data added is fully commited. Abort is
967
invoked when an error has occured so futher disk or network operations
968
may not be possible or may error and if possible should not be
972
def add_fallback_repository(self, repository):
973
"""Add a repository to use for looking up data not held locally.
975
:param repository: A repository.
977
if not self._format.supports_external_lookups:
978
raise errors.UnstackableRepositoryFormat(self._format, self.base)
980
# This repository will call fallback.unlock() when we transition to
981
# the unlocked state, so we make sure to increment the lock count
982
repository.lock_read()
983
self._check_fallback_repository(repository)
984
self._fallback_repositories.append(repository)
985
self.texts.add_fallback_versioned_files(repository.texts)
986
self.inventories.add_fallback_versioned_files(repository.inventories)
987
self.revisions.add_fallback_versioned_files(repository.revisions)
988
self.signatures.add_fallback_versioned_files(repository.signatures)
989
if self.chk_bytes is not None:
990
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
992
def _check_fallback_repository(self, repository):
993
"""Check that this repository can fallback to repository safely.
995
Raise an error if not.
997
:param repository: A repository to fallback to.
999
return InterRepository._assert_same_model(self, repository)
1001
def add_inventory(self, revision_id, inv, parents):
1002
"""Add the inventory inv to the repository as revision_id.
1004
:param parents: The revision ids of the parents that revision_id
1005
is known to have and are in the repository already.
1007
:returns: The validator(which is a sha1 digest, though what is sha'd is
1008
repository format specific) of the serialized inventory.
1010
if not self.is_in_write_group():
1011
raise AssertionError("%r not in write group" % (self,))
1012
_mod_revision.check_not_reserved_id(revision_id)
1013
if not (inv.revision_id is None or inv.revision_id == revision_id):
1014
raise AssertionError(
1015
"Mismatch between inventory revision"
1016
" id and insertion revid (%r, %r)"
1017
% (inv.revision_id, revision_id))
1018
if inv.root is None:
1019
raise AssertionError()
1020
return self._add_inventory_checked(revision_id, inv, parents)
1022
def _add_inventory_checked(self, revision_id, inv, parents):
1023
"""Add inv to the repository after checking the inputs.
1025
This function can be overridden to allow different inventory styles.
1027
:seealso: add_inventory, for the contract.
1029
inv_lines = self._serialise_inventory_to_lines(inv)
1030
return self._inventory_add_lines(revision_id, parents,
1031
inv_lines, check_content=False)
1033
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1034
parents, basis_inv=None, propagate_caches=False):
1035
"""Add a new inventory expressed as a delta against another revision.
1037
See the inventory developers documentation for the theory behind
1040
:param basis_revision_id: The inventory id the delta was created
1041
against. (This does not have to be a direct parent.)
1042
:param delta: The inventory delta (see Inventory.apply_delta for
1044
:param new_revision_id: The revision id that the inventory is being
1046
:param parents: The revision ids of the parents that revision_id is
1047
known to have and are in the repository already. These are supplied
1048
for repositories that depend on the inventory graph for revision
1049
graph access, as well as for those that pun ancestry with delta
1051
:param basis_inv: The basis inventory if it is already known,
1053
:param propagate_caches: If True, the caches for this inventory are
1054
copied to and updated for the result if possible.
1056
:returns: (validator, new_inv)
1057
The validator(which is a sha1 digest, though what is sha'd is
1058
repository format specific) of the serialized inventory, and the
1059
resulting inventory.
1061
if not self.is_in_write_group():
1062
raise AssertionError("%r not in write group" % (self,))
1063
_mod_revision.check_not_reserved_id(new_revision_id)
1064
basis_tree = self.revision_tree(basis_revision_id)
1065
basis_tree.lock_read()
1067
# Note that this mutates the inventory of basis_tree, which not all
1068
# inventory implementations may support: A better idiom would be to
1069
# return a new inventory, but as there is no revision tree cache in
1070
# repository this is safe for now - RBC 20081013
1071
if basis_inv is None:
1072
basis_inv = basis_tree.inventory
1073
basis_inv.apply_delta(delta)
1074
basis_inv.revision_id = new_revision_id
1075
return (self.add_inventory(new_revision_id, basis_inv, parents),
1080
def _inventory_add_lines(self, revision_id, parents, lines,
1081
check_content=True):
1082
"""Store lines in inv_vf and return the sha1 of the inventory."""
1083
parents = [(parent,) for parent in parents]
1084
result = self.inventories.add_lines((revision_id,), parents, lines,
1085
check_content=check_content)[0]
1086
self.inventories._access.flush()
1089
def add_revision(self, revision_id, rev, inv=None, config=None):
1090
"""Add rev to the revision store as revision_id.
1092
:param revision_id: the revision id to use.
1093
:param rev: The revision object.
1094
:param inv: The inventory for the revision. if None, it will be looked
1095
up in the inventory storer
1096
:param config: If None no digital signature will be created.
1097
If supplied its signature_needed method will be used
1098
to determine if a signature should be made.
1100
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1102
_mod_revision.check_not_reserved_id(revision_id)
1103
if config is not None and config.signature_needed():
1105
inv = self.get_inventory(revision_id)
1106
plaintext = Testament(rev, inv).as_short_text()
1107
self.store_revision_signature(
1108
gpg.GPGStrategy(config), plaintext, revision_id)
1109
# check inventory present
1110
if not self.inventories.get_parent_map([(revision_id,)]):
1112
raise errors.WeaveRevisionNotPresent(revision_id,
1115
# yes, this is not suitable for adding with ghosts.
1116
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1119
key = (revision_id,)
1120
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1121
self._add_revision(rev)
1123
def _add_revision(self, revision):
1124
text = self._serializer.write_revision_to_string(revision)
1125
key = (revision.revision_id,)
1126
parents = tuple((parent,) for parent in revision.parent_ids)
1127
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1129
def all_revision_ids(self):
1130
"""Returns a list of all the revision ids in the repository.
1132
This is conceptually deprecated because code should generally work on
1133
the graph reachable from a particular revision, and ignore any other
1134
revisions that might be present. There is no direct replacement
1137
if 'evil' in debug.debug_flags:
1138
mutter_callsite(2, "all_revision_ids is linear with history.")
1139
return self._all_revision_ids()
1141
def _all_revision_ids(self):
1142
"""Returns a list of all the revision ids in the repository.
1144
These are in as much topological order as the underlying store can
1147
raise NotImplementedError(self._all_revision_ids)
1149
def break_lock(self):
1150
"""Break a lock if one is present from another instance.
1152
Uses the ui factory to ask for confirmation if the lock may be from
1155
self.control_files.break_lock()
1158
def _eliminate_revisions_not_present(self, revision_ids):
1159
"""Check every revision id in revision_ids to see if we have it.
1161
Returns a set of the present revisions.
1164
graph = self.get_graph()
1165
parent_map = graph.get_parent_map(revision_ids)
1166
# The old API returned a list, should this actually be a set?
1167
return parent_map.keys()
1169
def _check_inventories(self, checker):
1170
"""Check the inventories found from the revision scan.
1172
This is responsible for verifying the sha1 of inventories and
1173
creating a pending_keys set that covers data referenced by inventories.
1175
bar = ui.ui_factory.nested_progress_bar()
1177
self._do_check_inventories(checker, bar)
1181
def _do_check_inventories(self, checker, bar):
1182
"""Helper for _check_inventories."""
1184
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1185
kinds = ['chk_bytes', 'texts']
1186
count = len(checker.pending_keys)
1187
bar.update("inventories", 0, 2)
1188
current_keys = checker.pending_keys
1189
checker.pending_keys = {}
1190
# Accumulate current checks.
1191
for key in current_keys:
1192
if key[0] != 'inventories' and key[0] not in kinds:
1193
checker._report_items.append('unknown key type %r' % (key,))
1194
keys[key[0]].add(key[1:])
1195
if keys['inventories']:
1196
# NB: output order *should* be roughly sorted - topo or
1197
# inverse topo depending on repository - either way decent
1198
# to just delta against. However, pre-CHK formats didn't
1199
# try to optimise inventory layout on disk. As such the
1200
# pre-CHK code path does not use inventory deltas.
1202
for record in self.inventories.check(keys=keys['inventories']):
1203
if record.storage_kind == 'absent':
1204
checker._report_items.append(
1205
'Missing inventory {%s}' % (record.key,))
1207
last_object = self._check_record('inventories', record,
1208
checker, last_object,
1209
current_keys[('inventories',) + record.key])
1210
del keys['inventories']
1213
bar.update("texts", 1)
1214
while (checker.pending_keys or keys['chk_bytes']
1216
# Something to check.
1217
current_keys = checker.pending_keys
1218
checker.pending_keys = {}
1219
# Accumulate current checks.
1220
for key in current_keys:
1221
if key[0] not in kinds:
1222
checker._report_items.append('unknown key type %r' % (key,))
1223
keys[key[0]].add(key[1:])
1224
# Check the outermost kind only - inventories || chk_bytes || texts
1228
for record in getattr(self, kind).check(keys=keys[kind]):
1229
if record.storage_kind == 'absent':
1230
checker._report_items.append(
1231
'Missing %s {%s}' % (kind, record.key,))
1233
last_object = self._check_record(kind, record,
1234
checker, last_object, current_keys[(kind,) + record.key])
1238
def _check_record(self, kind, record, checker, last_object, item_data):
1239
"""Check a single text from this repository."""
1240
if kind == 'inventories':
1241
rev_id = record.key[0]
1242
inv = self.deserialise_inventory(rev_id,
1243
record.get_bytes_as('fulltext'))
1244
if last_object is not None:
1245
delta = inv._make_delta(last_object)
1246
for old_path, path, file_id, ie in delta:
1249
ie.check(checker, rev_id, inv)
1251
for path, ie in inv.iter_entries():
1252
ie.check(checker, rev_id, inv)
1253
if self._format.fast_deltas:
1255
elif kind == 'chk_bytes':
1256
# No code written to check chk_bytes for this repo format.
1257
checker._report_items.append(
1258
'unsupported key type chk_bytes for %s' % (record.key,))
1259
elif kind == 'texts':
1260
self._check_text(record, checker, item_data)
1262
checker._report_items.append(
1263
'unknown key type %s for %s' % (kind, record.key))
1265
def _check_text(self, record, checker, item_data):
1266
"""Check a single text."""
1267
# Check it is extractable.
1268
# TODO: check length.
1269
if record.storage_kind == 'chunked':
1270
chunks = record.get_bytes_as(record.storage_kind)
1271
sha1 = osutils.sha_strings(chunks)
1272
length = sum(map(len, chunks))
1274
content = record.get_bytes_as('fulltext')
1275
sha1 = osutils.sha_string(content)
1276
length = len(content)
1277
if item_data and sha1 != item_data[1]:
1278
checker._report_items.append(
1279
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1280
(record.key, sha1, item_data[1], item_data[2]))
1283
def create(a_bzrdir):
1284
"""Construct the current default format repository in a_bzrdir."""
1285
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1287
def __init__(self, _format, a_bzrdir, control_files):
1288
"""instantiate a Repository.
1290
:param _format: The format of the repository on disk.
1291
:param a_bzrdir: The BzrDir of the repository.
1293
In the future we will have a single api for all stores for
1294
getting file texts, inventories and revisions, then
1295
this construct will accept instances of those things.
1297
super(Repository, self).__init__()
1298
self._format = _format
1299
# the following are part of the public API for Repository:
1300
self.bzrdir = a_bzrdir
1301
self.control_files = control_files
1302
self._transport = control_files._transport
1303
self.base = self._transport.base
1305
self._reconcile_does_inventory_gc = True
1306
self._reconcile_fixes_text_parents = False
1307
self._reconcile_backsup_inventory = True
1308
self._write_group = None
1309
# Additional places to query for data.
1310
self._fallback_repositories = []
1311
# An InventoryEntry cache, used during deserialization
1312
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1313
# Is it safe to return inventory entries directly from the entry cache,
1314
# rather copying them?
1315
self._safe_to_return_from_cache = False
1318
if self._fallback_repositories:
1319
return '%s(%r, fallback_repositories=%r)' % (
1320
self.__class__.__name__,
1322
self._fallback_repositories)
1324
return '%s(%r)' % (self.__class__.__name__,
1327
def _has_same_fallbacks(self, other_repo):
1328
"""Returns true if the repositories have the same fallbacks."""
1329
my_fb = self._fallback_repositories
1330
other_fb = other_repo._fallback_repositories
1331
if len(my_fb) != len(other_fb):
1333
for f, g in zip(my_fb, other_fb):
1334
if not f.has_same_location(g):
1338
def has_same_location(self, other):
1339
"""Returns a boolean indicating if this repository is at the same
1340
location as another repository.
1342
This might return False even when two repository objects are accessing
1343
the same physical repository via different URLs.
1345
if self.__class__ is not other.__class__:
1347
return (self._transport.base == other._transport.base)
1349
def is_in_write_group(self):
1350
"""Return True if there is an open write group.
1352
:seealso: start_write_group.
1354
return self._write_group is not None
1356
def is_locked(self):
1357
return self.control_files.is_locked()
1359
def is_write_locked(self):
1360
"""Return True if this object is write locked."""
1361
return self.is_locked() and self.control_files._lock_mode == 'w'
1363
def lock_write(self, token=None):
1364
"""Lock this repository for writing.
1366
This causes caching within the repository obejct to start accumlating
1367
data during reads, and allows a 'write_group' to be obtained. Write
1368
groups must be used for actual data insertion.
1370
:param token: if this is already locked, then lock_write will fail
1371
unless the token matches the existing lock.
1372
:returns: a token if this instance supports tokens, otherwise None.
1373
:raises TokenLockingNotSupported: when a token is given but this
1374
instance doesn't support using token locks.
1375
:raises MismatchedToken: if the specified token doesn't match the token
1376
of the existing lock.
1377
:seealso: start_write_group.
1379
A token should be passed in if you know that you have locked the object
1380
some other way, and need to synchronise this object's state with that
1383
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1385
locked = self.is_locked()
1386
result = self.control_files.lock_write(token=token)
1388
self._warn_if_deprecated()
1389
self._note_lock('w')
1390
for repo in self._fallback_repositories:
1391
# Writes don't affect fallback repos
1393
self._refresh_data()
1396
def lock_read(self):
1397
locked = self.is_locked()
1398
self.control_files.lock_read()
1400
self._warn_if_deprecated()
1401
self._note_lock('r')
1402
for repo in self._fallback_repositories:
1404
self._refresh_data()
1406
def get_physical_lock_status(self):
1407
return self.control_files.get_physical_lock_status()
1409
def leave_lock_in_place(self):
1410
"""Tell this repository not to release the physical lock when this
1413
If lock_write doesn't return a token, then this method is not supported.
1415
self.control_files.leave_in_place()
1417
def dont_leave_lock_in_place(self):
1418
"""Tell this repository to release the physical lock when this
1419
object is unlocked, even if it didn't originally acquire it.
1421
If lock_write doesn't return a token, then this method is not supported.
1423
self.control_files.dont_leave_in_place()
1426
def gather_stats(self, revid=None, committers=None):
1427
"""Gather statistics from a revision id.
1429
:param revid: The revision id to gather statistics from, if None, then
1430
no revision specific statistics are gathered.
1431
:param committers: Optional parameter controlling whether to grab
1432
a count of committers from the revision specific statistics.
1433
:return: A dictionary of statistics. Currently this contains:
1434
committers: The number of committers if requested.
1435
firstrev: A tuple with timestamp, timezone for the penultimate left
1436
most ancestor of revid, if revid is not the NULL_REVISION.
1437
latestrev: A tuple with timestamp, timezone for revid, if revid is
1438
not the NULL_REVISION.
1439
revisions: The total revision count in the repository.
1440
size: An estimate disk size of the repository in bytes.
1443
if revid and committers:
1444
result['committers'] = 0
1445
if revid and revid != _mod_revision.NULL_REVISION:
1447
all_committers = set()
1448
revisions = self.get_ancestry(revid)
1449
# pop the leading None
1451
first_revision = None
1453
# ignore the revisions in the middle - just grab first and last
1454
revisions = revisions[0], revisions[-1]
1455
for revision in self.get_revisions(revisions):
1456
if not first_revision:
1457
first_revision = revision
1459
all_committers.add(revision.committer)
1460
last_revision = revision
1462
result['committers'] = len(all_committers)
1463
result['firstrev'] = (first_revision.timestamp,
1464
first_revision.timezone)
1465
result['latestrev'] = (last_revision.timestamp,
1466
last_revision.timezone)
1468
# now gather global repository information
1469
# XXX: This is available for many repos regardless of listability.
1470
if self.bzrdir.root_transport.listable():
1471
# XXX: do we want to __define len__() ?
1472
# Maybe the versionedfiles object should provide a different
1473
# method to get the number of keys.
1474
result['revisions'] = len(self.revisions.keys())
1475
# result['size'] = t
1478
def find_branches(self, using=False):
1479
"""Find branches underneath this repository.
1481
This will include branches inside other branches.
1483
:param using: If True, list only branches using this repository.
1485
if using and not self.is_shared():
1487
return [self.bzrdir.open_branch()]
1488
except errors.NotBranchError:
1490
class Evaluator(object):
1493
self.first_call = True
1495
def __call__(self, bzrdir):
1496
# On the first call, the parameter is always the bzrdir
1497
# containing the current repo.
1498
if not self.first_call:
1500
repository = bzrdir.open_repository()
1501
except errors.NoRepositoryPresent:
1504
return False, (None, repository)
1505
self.first_call = False
1507
value = (bzrdir.open_branch(), None)
1508
except errors.NotBranchError:
1509
value = (None, None)
1513
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1514
self.bzrdir.root_transport, evaluate=Evaluator()):
1515
if branch is not None:
1516
branches.append(branch)
1517
if not using and repository is not None:
1518
branches.extend(repository.find_branches())
1522
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1523
"""Return the revision ids that other has that this does not.
1525
These are returned in topological order.
1527
revision_id: only return revision ids included by revision_id.
1529
return InterRepository.get(other, self).search_missing_revision_ids(
1530
revision_id, find_ghosts)
1534
"""Open the repository rooted at base.
1536
For instance, if the repository is at URL/.bzr/repository,
1537
Repository.open(URL) -> a Repository instance.
1539
control = bzrdir.BzrDir.open(base)
1540
return control.open_repository()
1542
def copy_content_into(self, destination, revision_id=None):
1543
"""Make a complete copy of the content in self into destination.
1545
This is a destructive operation! Do not use it on existing
1548
return InterRepository.get(self, destination).copy_content(revision_id)
1550
def commit_write_group(self):
1551
"""Commit the contents accrued within the current write group.
1553
:seealso: start_write_group.
1555
:return: it may return an opaque hint that can be passed to 'pack'.
1557
if self._write_group is not self.get_transaction():
1558
# has an unlock or relock occured ?
1559
raise errors.BzrError('mismatched lock context %r and '
1561
(self.get_transaction(), self._write_group))
1562
result = self._commit_write_group()
1563
self._write_group = None
1566
def _commit_write_group(self):
1567
"""Template method for per-repository write group cleanup.
1569
This is called before the write group is considered to be
1570
finished and should ensure that all data handed to the repository
1571
for writing during the write group is safely committed (to the
1572
extent possible considering file system caching etc).
1575
def suspend_write_group(self):
1576
raise errors.UnsuspendableWriteGroup(self)
1578
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1579
"""Return the keys of missing inventory parents for revisions added in
1582
A revision is not complete if the inventory delta for that revision
1583
cannot be calculated. Therefore if the parent inventories of a
1584
revision are not present, the revision is incomplete, and e.g. cannot
1585
be streamed by a smart server. This method finds missing inventory
1586
parents for revisions added in this write group.
1588
if not self._format.supports_external_lookups:
1589
# This is only an issue for stacked repositories
1591
if not self.is_in_write_group():
1592
raise AssertionError('not in a write group')
1594
# XXX: We assume that every added revision already has its
1595
# corresponding inventory, so we only check for parent inventories that
1596
# might be missing, rather than all inventories.
1597
parents = set(self.revisions._index.get_missing_parents())
1598
parents.discard(_mod_revision.NULL_REVISION)
1599
unstacked_inventories = self.inventories._index
1600
present_inventories = unstacked_inventories.get_parent_map(
1601
key[-1:] for key in parents)
1602
parents.difference_update(present_inventories)
1603
if len(parents) == 0:
1604
# No missing parent inventories.
1606
if not check_for_missing_texts:
1607
return set(('inventories', rev_id) for (rev_id,) in parents)
1608
# Ok, now we have a list of missing inventories. But these only matter
1609
# if the inventories that reference them are missing some texts they
1610
# appear to introduce.
1611
# XXX: Texts referenced by all added inventories need to be present,
1612
# but at the moment we're only checking for texts referenced by
1613
# inventories at the graph's edge.
1614
key_deps = self.revisions._index._key_dependencies
1615
key_deps.satisfy_refs_for_keys(present_inventories)
1616
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1617
file_ids = self.fileids_altered_by_revision_ids(referrers)
1618
missing_texts = set()
1619
for file_id, version_ids in file_ids.iteritems():
1620
missing_texts.update(
1621
(file_id, version_id) for version_id in version_ids)
1622
present_texts = self.texts.get_parent_map(missing_texts)
1623
missing_texts.difference_update(present_texts)
1624
if not missing_texts:
1625
# No texts are missing, so all revisions and their deltas are
1628
# Alternatively the text versions could be returned as the missing
1629
# keys, but this is likely to be less data.
1630
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1633
def refresh_data(self):
1634
"""Re-read any data needed to to synchronise with disk.
1636
This method is intended to be called after another repository instance
1637
(such as one used by a smart server) has inserted data into the
1638
repository. It may not be called during a write group, but may be
1639
called at any other time.
1641
if self.is_in_write_group():
1642
raise errors.InternalBzrError(
1643
"May not refresh_data while in a write group.")
1644
self._refresh_data()
1646
def resume_write_group(self, tokens):
1647
if not self.is_write_locked():
1648
raise errors.NotWriteLocked(self)
1649
if self._write_group:
1650
raise errors.BzrError('already in a write group')
1651
self._resume_write_group(tokens)
1652
# so we can detect unlock/relock - the write group is now entered.
1653
self._write_group = self.get_transaction()
1655
def _resume_write_group(self, tokens):
1656
raise errors.UnsuspendableWriteGroup(self)
1658
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
1660
"""Fetch the content required to construct revision_id from source.
1662
If revision_id is None and fetch_spec is None, then all content is
1665
fetch() may not be used when the repository is in a write group -
1666
either finish the current write group before using fetch, or use
1667
fetch before starting the write group.
1669
:param find_ghosts: Find and copy revisions in the source that are
1670
ghosts in the target (and not reachable directly by walking out to
1671
the first-present revision in target from revision_id).
1672
:param revision_id: If specified, all the content needed for this
1673
revision ID will be copied to the target. Fetch will determine for
1674
itself which content needs to be copied.
1675
:param fetch_spec: If specified, a SearchResult or
1676
PendingAncestryResult that describes which revisions to copy. This
1677
allows copying multiple heads at once. Mutually exclusive with
1680
if fetch_spec is not None and revision_id is not None:
1681
raise AssertionError(
1682
"fetch_spec and revision_id are mutually exclusive.")
1683
if self.is_in_write_group():
1684
raise errors.InternalBzrError(
1685
"May not fetch while in a write group.")
1686
# fast path same-url fetch operations
1687
# TODO: lift out to somewhere common with RemoteRepository
1688
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1689
if (self.has_same_location(source)
1690
and fetch_spec is None
1691
and self._has_same_fallbacks(source)):
1692
# check that last_revision is in 'from' and then return a
1694
if (revision_id is not None and
1695
not _mod_revision.is_null(revision_id)):
1696
self.get_revision(revision_id)
1698
# if there is no specific appropriate InterRepository, this will get
1699
# the InterRepository base class, which raises an
1700
# IncompatibleRepositories when asked to fetch.
1701
inter = InterRepository.get(source, self)
1702
return inter.fetch(revision_id=revision_id, pb=pb,
1703
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1705
def create_bundle(self, target, base, fileobj, format=None):
1706
return serializer.write_bundle(self, target, base, fileobj, format)
1708
def get_commit_builder(self, branch, parents, config, timestamp=None,
1709
timezone=None, committer=None, revprops=None,
1711
"""Obtain a CommitBuilder for this repository.
1713
:param branch: Branch to commit to.
1714
:param parents: Revision ids of the parents of the new revision.
1715
:param config: Configuration to use.
1716
:param timestamp: Optional timestamp recorded for commit.
1717
:param timezone: Optional timezone for timestamp.
1718
:param committer: Optional committer to set for commit.
1719
:param revprops: Optional dictionary of revision properties.
1720
:param revision_id: Optional revision id.
1722
if self._fallback_repositories:
1723
raise errors.BzrError("Cannot commit from a lightweight checkout "
1724
"to a stacked branch. See "
1725
"https://bugs.launchpad.net/bzr/+bug/375013 for details.")
1726
result = self._commit_builder_class(self, parents, config,
1727
timestamp, timezone, committer, revprops, revision_id)
1728
self.start_write_group()
1731
@only_raises(errors.LockNotHeld, errors.LockBroken)
1733
if (self.control_files._lock_count == 1 and
1734
self.control_files._lock_mode == 'w'):
1735
if self._write_group is not None:
1736
self.abort_write_group()
1737
self.control_files.unlock()
1738
raise errors.BzrError(
1739
'Must end write groups before releasing write locks.')
1740
self.control_files.unlock()
1741
if self.control_files._lock_count == 0:
1742
self._inventory_entry_cache.clear()
1743
for repo in self._fallback_repositories:
1747
def clone(self, a_bzrdir, revision_id=None):
1748
"""Clone this repository into a_bzrdir using the current format.
1750
Currently no check is made that the format of this repository and
1751
the bzrdir format are compatible. FIXME RBC 20060201.
1753
:return: The newly created destination repository.
1755
# TODO: deprecate after 0.16; cloning this with all its settings is
1756
# probably not very useful -- mbp 20070423
1757
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1758
self.copy_content_into(dest_repo, revision_id)
1761
def start_write_group(self):
1762
"""Start a write group in the repository.
1764
Write groups are used by repositories which do not have a 1:1 mapping
1765
between file ids and backend store to manage the insertion of data from
1766
both fetch and commit operations.
1768
A write lock is required around the start_write_group/commit_write_group
1769
for the support of lock-requiring repository formats.
1771
One can only insert data into a repository inside a write group.
1775
if not self.is_write_locked():
1776
raise errors.NotWriteLocked(self)
1777
if self._write_group:
1778
raise errors.BzrError('already in a write group')
1779
self._start_write_group()
1780
# so we can detect unlock/relock - the write group is now entered.
1781
self._write_group = self.get_transaction()
1783
def _start_write_group(self):
1784
"""Template method for per-repository write group startup.
1786
This is called before the write group is considered to be
1791
def sprout(self, to_bzrdir, revision_id=None):
1792
"""Create a descendent repository for new development.
1794
Unlike clone, this does not copy the settings of the repository.
1796
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1797
dest_repo.fetch(self, revision_id=revision_id)
1800
def _create_sprouting_repo(self, a_bzrdir, shared):
1801
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1802
# use target default format.
1803
dest_repo = a_bzrdir.create_repository()
1805
# Most control formats need the repository to be specifically
1806
# created, but on some old all-in-one formats it's not needed
1808
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1809
except errors.UninitializableFormat:
1810
dest_repo = a_bzrdir.open_repository()
1813
def _get_sink(self):
1814
"""Return a sink for streaming into this repository."""
1815
return StreamSink(self)
1817
def _get_source(self, to_format):
1818
"""Return a source for streaming from this repository."""
1819
return StreamSource(self, to_format)
1822
def has_revision(self, revision_id):
1823
"""True if this repository has a copy of the revision."""
1824
return revision_id in self.has_revisions((revision_id,))
1827
def has_revisions(self, revision_ids):
1828
"""Probe to find out the presence of multiple revisions.
1830
:param revision_ids: An iterable of revision_ids.
1831
:return: A set of the revision_ids that were present.
1833
parent_map = self.revisions.get_parent_map(
1834
[(rev_id,) for rev_id in revision_ids])
1836
if _mod_revision.NULL_REVISION in revision_ids:
1837
result.add(_mod_revision.NULL_REVISION)
1838
result.update([key[0] for key in parent_map])
1842
def get_revision(self, revision_id):
1843
"""Return the Revision object for a named revision."""
1844
return self.get_revisions([revision_id])[0]
1847
def get_revision_reconcile(self, revision_id):
1848
"""'reconcile' helper routine that allows access to a revision always.
1850
This variant of get_revision does not cross check the weave graph
1851
against the revision one as get_revision does: but it should only
1852
be used by reconcile, or reconcile-alike commands that are correcting
1853
or testing the revision graph.
1855
return self._get_revisions([revision_id])[0]
1858
def get_revisions(self, revision_ids):
1859
"""Get many revisions at once.
1861
Repositories that need to check data on every revision read should
1862
subclass this method.
1864
return self._get_revisions(revision_ids)
1867
def _get_revisions(self, revision_ids):
1868
"""Core work logic to get many revisions without sanity checks."""
1870
for revid, rev in self._iter_revisions(revision_ids):
1872
raise errors.NoSuchRevision(self, revid)
1874
return [revs[revid] for revid in revision_ids]
1876
def _iter_revisions(self, revision_ids):
1877
"""Iterate over revision objects.
1879
:param revision_ids: An iterable of revisions to examine. None may be
1880
passed to request all revisions known to the repository. Note that
1881
not all repositories can find unreferenced revisions; for those
1882
repositories only referenced ones will be returned.
1883
:return: An iterator of (revid, revision) tuples. Absent revisions (
1884
those asked for but not available) are returned as (revid, None).
1886
if revision_ids is None:
1887
revision_ids = self.all_revision_ids()
1889
for rev_id in revision_ids:
1890
if not rev_id or not isinstance(rev_id, basestring):
1891
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1892
keys = [(key,) for key in revision_ids]
1893
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1894
for record in stream:
1895
revid = record.key[0]
1896
if record.storage_kind == 'absent':
1899
text = record.get_bytes_as('fulltext')
1900
rev = self._serializer.read_revision_from_string(text)
1904
def get_revision_xml(self, revision_id):
1905
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1906
# would have already do it.
1907
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1908
# TODO: this can't just be replaced by:
1909
# return self._serializer.write_revision_to_string(
1910
# self.get_revision(revision_id))
1911
# as cStringIO preservers the encoding unlike write_revision_to_string
1912
# or some other call down the path.
1913
rev = self.get_revision(revision_id)
1914
rev_tmp = cStringIO.StringIO()
1915
# the current serializer..
1916
self._serializer.write_revision(rev, rev_tmp)
1918
return rev_tmp.getvalue()
1920
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1921
"""Produce a generator of revision deltas.
1923
Note that the input is a sequence of REVISIONS, not revision_ids.
1924
Trees will be held in memory until the generator exits.
1925
Each delta is relative to the revision's lefthand predecessor.
1927
:param specific_fileids: if not None, the result is filtered
1928
so that only those file-ids, their parents and their
1929
children are included.
1931
# Get the revision-ids of interest
1932
required_trees = set()
1933
for revision in revisions:
1934
required_trees.add(revision.revision_id)
1935
required_trees.update(revision.parent_ids[:1])
1937
# Get the matching filtered trees. Note that it's more
1938
# efficient to pass filtered trees to changes_from() rather
1939
# than doing the filtering afterwards. changes_from() could
1940
# arguably do the filtering itself but it's path-based, not
1941
# file-id based, so filtering before or afterwards is
1943
if specific_fileids is None:
1944
trees = dict((t.get_revision_id(), t) for
1945
t in self.revision_trees(required_trees))
1947
trees = dict((t.get_revision_id(), t) for
1948
t in self._filtered_revision_trees(required_trees,
1951
# Calculate the deltas
1952
for revision in revisions:
1953
if not revision.parent_ids:
1954
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1956
old_tree = trees[revision.parent_ids[0]]
1957
yield trees[revision.revision_id].changes_from(old_tree)
1960
def get_revision_delta(self, revision_id, specific_fileids=None):
1961
"""Return the delta for one revision.
1963
The delta is relative to the left-hand predecessor of the
1966
:param specific_fileids: if not None, the result is filtered
1967
so that only those file-ids, their parents and their
1968
children are included.
1970
r = self.get_revision(revision_id)
1971
return list(self.get_deltas_for_revisions([r],
1972
specific_fileids=specific_fileids))[0]
1975
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1976
signature = gpg_strategy.sign(plaintext)
1977
self.add_signature_text(revision_id, signature)
1980
def add_signature_text(self, revision_id, signature):
1981
self.signatures.add_lines((revision_id,), (),
1982
osutils.split_lines(signature))
1984
def find_text_key_references(self):
1985
"""Find the text key references within the repository.
1987
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1988
to whether they were referred to by the inventory of the
1989
revision_id that they contain. The inventory texts from all present
1990
revision ids are assessed to generate this report.
1992
revision_keys = self.revisions.keys()
1993
w = self.inventories
1994
pb = ui.ui_factory.nested_progress_bar()
1996
return self._find_text_key_references_from_xml_inventory_lines(
1997
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
2001
def _find_text_key_references_from_xml_inventory_lines(self,
2003
"""Core routine for extracting references to texts from inventories.
2005
This performs the translation of xml lines to revision ids.
2007
:param line_iterator: An iterator of lines, origin_version_id
2008
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
2009
to whether they were referred to by the inventory of the
2010
revision_id that they contain. Note that if that revision_id was
2011
not part of the line_iterator's output then False will be given -
2012
even though it may actually refer to that key.
2014
if not self._serializer.support_altered_by_hack:
2015
raise AssertionError(
2016
"_find_text_key_references_from_xml_inventory_lines only "
2017
"supported for branches which store inventory as unnested xml"
2018
", not on %r" % self)
2021
# this code needs to read every new line in every inventory for the
2022
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
2023
# not present in one of those inventories is unnecessary but not
2024
# harmful because we are filtering by the revision id marker in the
2025
# inventory lines : we only select file ids altered in one of those
2026
# revisions. We don't need to see all lines in the inventory because
2027
# only those added in an inventory in rev X can contain a revision=X
2029
unescape_revid_cache = {}
2030
unescape_fileid_cache = {}
2032
# jam 20061218 In a big fetch, this handles hundreds of thousands
2033
# of lines, so it has had a lot of inlining and optimizing done.
2034
# Sorry that it is a little bit messy.
2035
# Move several functions to be local variables, since this is a long
2037
search = self._file_ids_altered_regex.search
2038
unescape = _unescape_xml
2039
setdefault = result.setdefault
2040
for line, line_key in line_iterator:
2041
match = search(line)
2044
# One call to match.group() returning multiple items is quite a
2045
# bit faster than 2 calls to match.group() each returning 1
2046
file_id, revision_id = match.group('file_id', 'revision_id')
2048
# Inlining the cache lookups helps a lot when you make 170,000
2049
# lines and 350k ids, versus 8.4 unique ids.
2050
# Using a cache helps in 2 ways:
2051
# 1) Avoids unnecessary decoding calls
2052
# 2) Re-uses cached strings, which helps in future set and
2054
# (2) is enough that removing encoding entirely along with
2055
# the cache (so we are using plain strings) results in no
2056
# performance improvement.
2058
revision_id = unescape_revid_cache[revision_id]
2060
unescaped = unescape(revision_id)
2061
unescape_revid_cache[revision_id] = unescaped
2062
revision_id = unescaped
2064
# Note that unconditionally unescaping means that we deserialise
2065
# every fileid, which for general 'pull' is not great, but we don't
2066
# really want to have some many fulltexts that this matters anyway.
2069
file_id = unescape_fileid_cache[file_id]
2071
unescaped = unescape(file_id)
2072
unescape_fileid_cache[file_id] = unescaped
2075
key = (file_id, revision_id)
2076
setdefault(key, False)
2077
if revision_id == line_key[-1]:
2081
def _inventory_xml_lines_for_keys(self, keys):
2082
"""Get a line iterator of the sort needed for findind references.
2084
Not relevant for non-xml inventory repositories.
2086
Ghosts in revision_keys are ignored.
2088
:param revision_keys: The revision keys for the inventories to inspect.
2089
:return: An iterator over (inventory line, revid) for the fulltexts of
2090
all of the xml inventories specified by revision_keys.
2092
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2093
for record in stream:
2094
if record.storage_kind != 'absent':
2095
chunks = record.get_bytes_as('chunked')
2096
revid = record.key[-1]
2097
lines = osutils.chunks_to_lines(chunks)
2101
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2103
"""Helper routine for fileids_altered_by_revision_ids.
2105
This performs the translation of xml lines to revision ids.
2107
:param line_iterator: An iterator of lines, origin_version_id
2108
:param revision_keys: The revision ids to filter for. This should be a
2109
set or other type which supports efficient __contains__ lookups, as
2110
the revision key from each parsed line will be looked up in the
2111
revision_keys filter.
2112
:return: a dictionary mapping altered file-ids to an iterable of
2113
revision_ids. Each altered file-ids has the exact revision_ids that
2114
altered it listed explicitly.
2116
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2117
line_iterator).iterkeys())
2118
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2119
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2120
self._inventory_xml_lines_for_keys(parent_keys)))
2121
new_keys = seen - parent_seen
2123
setdefault = result.setdefault
2124
for key in new_keys:
2125
setdefault(key[0], set()).add(key[-1])
2128
def _find_parent_ids_of_revisions(self, revision_ids):
2129
"""Find all parent ids that are mentioned in the revision graph.
2131
:return: set of revisions that are parents of revision_ids which are
2132
not part of revision_ids themselves
2134
parent_map = self.get_parent_map(revision_ids)
2136
map(parent_ids.update, parent_map.itervalues())
2137
parent_ids.difference_update(revision_ids)
2138
parent_ids.discard(_mod_revision.NULL_REVISION)
2141
def _find_parent_keys_of_revisions(self, revision_keys):
2142
"""Similar to _find_parent_ids_of_revisions, but used with keys.
2144
:param revision_keys: An iterable of revision_keys.
2145
:return: The parents of all revision_keys that are not already in
2148
parent_map = self.revisions.get_parent_map(revision_keys)
2150
map(parent_keys.update, parent_map.itervalues())
2151
parent_keys.difference_update(revision_keys)
2152
parent_keys.discard(_mod_revision.NULL_REVISION)
2155
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
2156
"""Find the file ids and versions affected by revisions.
2158
:param revisions: an iterable containing revision ids.
2159
:param _inv_weave: The inventory weave from this repository or None.
2160
If None, the inventory weave will be opened automatically.
2161
:return: a dictionary mapping altered file-ids to an iterable of
2162
revision_ids. Each altered file-ids has the exact revision_ids that
2163
altered it listed explicitly.
2165
selected_keys = set((revid,) for revid in revision_ids)
2166
w = _inv_weave or self.inventories
2167
pb = ui.ui_factory.nested_progress_bar()
2169
return self._find_file_ids_from_xml_inventory_lines(
2170
w.iter_lines_added_or_present_in_keys(
2171
selected_keys, pb=pb),
2176
def iter_files_bytes(self, desired_files):
2177
"""Iterate through file versions.
2179
Files will not necessarily be returned in the order they occur in
2180
desired_files. No specific order is guaranteed.
2182
Yields pairs of identifier, bytes_iterator. identifier is an opaque
2183
value supplied by the caller as part of desired_files. It should
2184
uniquely identify the file version in the caller's context. (Examples:
2185
an index number or a TreeTransform trans_id.)
2187
bytes_iterator is an iterable of bytestrings for the file. The
2188
kind of iterable and length of the bytestrings are unspecified, but for
2189
this implementation, it is a list of bytes produced by
2190
VersionedFile.get_record_stream().
2192
:param desired_files: a list of (file_id, revision_id, identifier)
2196
for file_id, revision_id, callable_data in desired_files:
2197
text_keys[(file_id, revision_id)] = callable_data
2198
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2199
if record.storage_kind == 'absent':
2200
raise errors.RevisionNotPresent(record.key, self)
2201
yield text_keys[record.key], record.get_bytes_as('chunked')
2203
def _generate_text_key_index(self, text_key_references=None,
2205
"""Generate a new text key index for the repository.
2207
This is an expensive function that will take considerable time to run.
2209
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2210
list of parents, also text keys. When a given key has no parents,
2211
the parents list will be [NULL_REVISION].
2213
# All revisions, to find inventory parents.
2214
if ancestors is None:
2215
graph = self.get_graph()
2216
ancestors = graph.get_parent_map(self.all_revision_ids())
2217
if text_key_references is None:
2218
text_key_references = self.find_text_key_references()
2219
pb = ui.ui_factory.nested_progress_bar()
2221
return self._do_generate_text_key_index(ancestors,
2222
text_key_references, pb)
2226
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2227
"""Helper for _generate_text_key_index to avoid deep nesting."""
2228
revision_order = tsort.topo_sort(ancestors)
2229
invalid_keys = set()
2231
for revision_id in revision_order:
2232
revision_keys[revision_id] = set()
2233
text_count = len(text_key_references)
2234
# a cache of the text keys to allow reuse; costs a dict of all the
2235
# keys, but saves a 2-tuple for every child of a given key.
2237
for text_key, valid in text_key_references.iteritems():
2239
invalid_keys.add(text_key)
2241
revision_keys[text_key[1]].add(text_key)
2242
text_key_cache[text_key] = text_key
2243
del text_key_references
2245
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2246
NULL_REVISION = _mod_revision.NULL_REVISION
2247
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2248
# too small for large or very branchy trees. However, for 55K path
2249
# trees, it would be easy to use too much memory trivially. Ideally we
2250
# could gauge this by looking at available real memory etc, but this is
2251
# always a tricky proposition.
2252
inventory_cache = lru_cache.LRUCache(10)
2253
batch_size = 10 # should be ~150MB on a 55K path tree
2254
batch_count = len(revision_order) / batch_size + 1
2256
pb.update("Calculating text parents", processed_texts, text_count)
2257
for offset in xrange(batch_count):
2258
to_query = revision_order[offset * batch_size:(offset + 1) *
2262
for revision_id in to_query:
2263
parent_ids = ancestors[revision_id]
2264
for text_key in revision_keys[revision_id]:
2265
pb.update("Calculating text parents", processed_texts)
2266
processed_texts += 1
2267
candidate_parents = []
2268
for parent_id in parent_ids:
2269
parent_text_key = (text_key[0], parent_id)
2271
check_parent = parent_text_key not in \
2272
revision_keys[parent_id]
2274
# the parent parent_id is a ghost:
2275
check_parent = False
2276
# truncate the derived graph against this ghost.
2277
parent_text_key = None
2279
# look at the parent commit details inventories to
2280
# determine possible candidates in the per file graph.
2283
inv = inventory_cache[parent_id]
2285
inv = self.revision_tree(parent_id).inventory
2286
inventory_cache[parent_id] = inv
2288
parent_entry = inv[text_key[0]]
2289
except (KeyError, errors.NoSuchId):
2291
if parent_entry is not None:
2293
text_key[0], parent_entry.revision)
2295
parent_text_key = None
2296
if parent_text_key is not None:
2297
candidate_parents.append(
2298
text_key_cache[parent_text_key])
2299
parent_heads = text_graph.heads(candidate_parents)
2300
new_parents = list(parent_heads)
2301
new_parents.sort(key=lambda x:candidate_parents.index(x))
2302
if new_parents == []:
2303
new_parents = [NULL_REVISION]
2304
text_index[text_key] = new_parents
2306
for text_key in invalid_keys:
2307
text_index[text_key] = [NULL_REVISION]
2310
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2311
"""Get an iterable listing the keys of all the data introduced by a set
2314
The keys will be ordered so that the corresponding items can be safely
2315
fetched and inserted in that order.
2317
:returns: An iterable producing tuples of (knit-kind, file-id,
2318
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2319
'revisions'. file-id is None unless knit-kind is 'file'.
2321
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2324
for result in self._find_non_file_keys_to_fetch(revision_ids):
2327
def _find_file_keys_to_fetch(self, revision_ids, pb):
2328
# XXX: it's a bit weird to control the inventory weave caching in this
2329
# generator. Ideally the caching would be done in fetch.py I think. Or
2330
# maybe this generator should explicitly have the contract that it
2331
# should not be iterated until the previously yielded item has been
2333
inv_w = self.inventories
2335
# file ids that changed
2336
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2338
num_file_ids = len(file_ids)
2339
for file_id, altered_versions in file_ids.iteritems():
2341
pb.update("Fetch texts", count, num_file_ids)
2343
yield ("file", file_id, altered_versions)
2345
def _find_non_file_keys_to_fetch(self, revision_ids):
2347
yield ("inventory", None, revision_ids)
2350
# XXX: Note ATM no callers actually pay attention to this return
2351
# instead they just use the list of revision ids and ignore
2352
# missing sigs. Consider removing this work entirely
2353
revisions_with_signatures = set(self.signatures.get_parent_map(
2354
[(r,) for r in revision_ids]))
2355
revisions_with_signatures = set(
2356
[r for (r,) in revisions_with_signatures])
2357
revisions_with_signatures.intersection_update(revision_ids)
2358
yield ("signatures", None, revisions_with_signatures)
2361
yield ("revisions", None, revision_ids)
2364
def get_inventory(self, revision_id):
2365
"""Get Inventory object by revision id."""
2366
return self.iter_inventories([revision_id]).next()
2368
def iter_inventories(self, revision_ids, ordering=None):
2369
"""Get many inventories by revision_ids.
2371
This will buffer some or all of the texts used in constructing the
2372
inventories in memory, but will only parse a single inventory at a
2375
:param revision_ids: The expected revision ids of the inventories.
2376
:param ordering: optional ordering, e.g. 'topological'. If not
2377
specified, the order of revision_ids will be preserved (by
2378
buffering if necessary).
2379
:return: An iterator of inventories.
2381
if ((None in revision_ids)
2382
or (_mod_revision.NULL_REVISION in revision_ids)):
2383
raise ValueError('cannot get null revision inventory')
2384
return self._iter_inventories(revision_ids, ordering)
2386
def _iter_inventories(self, revision_ids, ordering):
2387
"""single-document based inventory iteration."""
2388
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2389
for text, revision_id in inv_xmls:
2390
yield self.deserialise_inventory(revision_id, text)
2392
def _iter_inventory_xmls(self, revision_ids, ordering):
2393
if ordering is None:
2394
order_as_requested = True
2395
ordering = 'unordered'
2397
order_as_requested = False
2398
keys = [(revision_id,) for revision_id in revision_ids]
2401
if order_as_requested:
2402
key_iter = iter(keys)
2403
next_key = key_iter.next()
2404
stream = self.inventories.get_record_stream(keys, ordering, True)
2406
for record in stream:
2407
if record.storage_kind != 'absent':
2408
chunks = record.get_bytes_as('chunked')
2409
if order_as_requested:
2410
text_chunks[record.key] = chunks
2412
yield ''.join(chunks), record.key[-1]
2414
raise errors.NoSuchRevision(self, record.key)
2415
if order_as_requested:
2416
# Yield as many results as we can while preserving order.
2417
while next_key in text_chunks:
2418
chunks = text_chunks.pop(next_key)
2419
yield ''.join(chunks), next_key[-1]
2421
next_key = key_iter.next()
2422
except StopIteration:
2423
# We still want to fully consume the get_record_stream,
2424
# just in case it is not actually finished at this point
2428
def deserialise_inventory(self, revision_id, xml):
2429
"""Transform the xml into an inventory object.
2431
:param revision_id: The expected revision id of the inventory.
2432
:param xml: A serialised inventory.
2434
result = self._serializer.read_inventory_from_string(xml, revision_id,
2435
entry_cache=self._inventory_entry_cache,
2436
return_from_cache=self._safe_to_return_from_cache)
2437
if result.revision_id != revision_id:
2438
raise AssertionError('revision id mismatch %s != %s' % (
2439
result.revision_id, revision_id))
2442
def serialise_inventory(self, inv):
2443
return self._serializer.write_inventory_to_string(inv)
2445
def _serialise_inventory_to_lines(self, inv):
2446
return self._serializer.write_inventory_to_lines(inv)
2448
def get_serializer_format(self):
2449
return self._serializer.format_num
2452
def get_inventory_xml(self, revision_id):
2453
"""Get inventory XML as a file object."""
2454
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2456
text, revision_id = texts.next()
2457
except StopIteration:
2458
raise errors.HistoryMissing(self, 'inventory', revision_id)
2462
def get_inventory_sha1(self, revision_id):
2463
"""Return the sha1 hash of the inventory entry
2465
return self.get_revision(revision_id).inventory_sha1
2467
def get_rev_id_for_revno(self, revno, known_pair):
2468
"""Return the revision id of a revno, given a later (revno, revid)
2469
pair in the same history.
2471
:return: if found (True, revid). If the available history ran out
2472
before reaching the revno, then this returns
2473
(False, (closest_revno, closest_revid)).
2475
known_revno, known_revid = known_pair
2476
partial_history = [known_revid]
2477
distance_from_known = known_revno - revno
2478
if distance_from_known < 0:
2480
'requested revno (%d) is later than given known revno (%d)'
2481
% (revno, known_revno))
2484
self, partial_history, stop_index=distance_from_known)
2485
except errors.RevisionNotPresent, err:
2486
if err.revision_id == known_revid:
2487
# The start revision (known_revid) wasn't found.
2489
# This is a stacked repository with no fallbacks, or a there's a
2490
# left-hand ghost. Either way, even though the revision named in
2491
# the error isn't in this repo, we know it's the next step in this
2492
# left-hand history.
2493
partial_history.append(err.revision_id)
2494
if len(partial_history) <= distance_from_known:
2495
# Didn't find enough history to get a revid for the revno.
2496
earliest_revno = known_revno - len(partial_history) + 1
2497
return (False, (earliest_revno, partial_history[-1]))
2498
if len(partial_history) - 1 > distance_from_known:
2499
raise AssertionError('_iter_for_revno returned too much history')
2500
return (True, partial_history[-1])
2502
def iter_reverse_revision_history(self, revision_id):
2503
"""Iterate backwards through revision ids in the lefthand history
2505
:param revision_id: The revision id to start with. All its lefthand
2506
ancestors will be traversed.
2508
graph = self.get_graph()
2509
next_id = revision_id
2511
if next_id in (None, _mod_revision.NULL_REVISION):
2514
parents = graph.get_parent_map([next_id])[next_id]
2516
raise errors.RevisionNotPresent(next_id, self)
2518
if len(parents) == 0:
2521
next_id = parents[0]
2524
def get_revision_inventory(self, revision_id):
2525
"""Return inventory of a past revision."""
2526
# TODO: Unify this with get_inventory()
2527
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2528
# must be the same as its revision, so this is trivial.
2529
if revision_id is None:
2530
# This does not make sense: if there is no revision,
2531
# then it is the current tree inventory surely ?!
2532
# and thus get_root_id() is something that looks at the last
2533
# commit on the branch, and the get_root_id is an inventory check.
2534
raise NotImplementedError
2535
# return Inventory(self.get_root_id())
2537
return self.get_inventory(revision_id)
2539
def is_shared(self):
2540
"""Return True if this repository is flagged as a shared repository."""
2541
raise NotImplementedError(self.is_shared)
2544
def reconcile(self, other=None, thorough=False):
2545
"""Reconcile this repository."""
2546
from bzrlib.reconcile import RepoReconciler
2547
reconciler = RepoReconciler(self, thorough=thorough)
2548
reconciler.reconcile()
2551
def _refresh_data(self):
2552
"""Helper called from lock_* to ensure coherency with disk.
2554
The default implementation does nothing; it is however possible
2555
for repositories to maintain loaded indices across multiple locks
2556
by checking inside their implementation of this method to see
2557
whether their indices are still valid. This depends of course on
2558
the disk format being validatable in this manner. This method is
2559
also called by the refresh_data() public interface to cause a refresh
2560
to occur while in a write lock so that data inserted by a smart server
2561
push operation is visible on the client's instance of the physical
2566
def revision_tree(self, revision_id):
2567
"""Return Tree for a revision on this branch.
2569
`revision_id` may be NULL_REVISION for the empty tree revision.
2571
revision_id = _mod_revision.ensure_null(revision_id)
2572
# TODO: refactor this to use an existing revision object
2573
# so we don't need to read it in twice.
2574
if revision_id == _mod_revision.NULL_REVISION:
2575
return RevisionTree(self, Inventory(root_id=None),
2576
_mod_revision.NULL_REVISION)
2578
inv = self.get_revision_inventory(revision_id)
2579
return RevisionTree(self, inv, revision_id)
2581
def revision_trees(self, revision_ids):
2582
"""Return Trees for revisions in this repository.
2584
:param revision_ids: a sequence of revision-ids;
2585
a revision-id may not be None or 'null:'
2587
inventories = self.iter_inventories(revision_ids)
2588
for inv in inventories:
2589
yield RevisionTree(self, inv, inv.revision_id)
2591
def _filtered_revision_trees(self, revision_ids, file_ids):
2592
"""Return Tree for a revision on this branch with only some files.
2594
:param revision_ids: a sequence of revision-ids;
2595
a revision-id may not be None or 'null:'
2596
:param file_ids: if not None, the result is filtered
2597
so that only those file-ids, their parents and their
2598
children are included.
2600
inventories = self.iter_inventories(revision_ids)
2601
for inv in inventories:
2602
# Should we introduce a FilteredRevisionTree class rather
2603
# than pre-filter the inventory here?
2604
filtered_inv = inv.filter(file_ids)
2605
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
2608
def get_ancestry(self, revision_id, topo_sorted=True):
2609
"""Return a list of revision-ids integrated by a revision.
2611
The first element of the list is always None, indicating the origin
2612
revision. This might change when we have history horizons, or
2613
perhaps we should have a new API.
2615
This is topologically sorted.
2617
if _mod_revision.is_null(revision_id):
2619
if not self.has_revision(revision_id):
2620
raise errors.NoSuchRevision(self, revision_id)
2621
graph = self.get_graph()
2623
search = graph._make_breadth_first_searcher([revision_id])
2626
found, ghosts = search.next_with_ghosts()
2627
except StopIteration:
2630
if _mod_revision.NULL_REVISION in keys:
2631
keys.remove(_mod_revision.NULL_REVISION)
2633
parent_map = graph.get_parent_map(keys)
2634
keys = tsort.topo_sort(parent_map)
2635
return [None] + list(keys)
2637
def pack(self, hint=None):
2638
"""Compress the data within the repository.
2640
This operation only makes sense for some repository types. For other
2641
types it should be a no-op that just returns.
2643
This stub method does not require a lock, but subclasses should use
2644
@needs_write_lock as this is a long running call its reasonable to
2645
implicitly lock for the user.
2647
:param hint: If not supplied, the whole repository is packed.
2648
If supplied, the repository may use the hint parameter as a
2649
hint for the parts of the repository to pack. A hint can be
2650
obtained from the result of commit_write_group(). Out of
2651
date hints are simply ignored, because concurrent operations
2652
can obsolete them rapidly.
2655
def get_transaction(self):
2656
return self.control_files.get_transaction()
2658
def get_parent_map(self, revision_ids):
2659
"""See graph.StackedParentsProvider.get_parent_map"""
2660
# revisions index works in keys; this just works in revisions
2661
# therefore wrap and unwrap
2664
for revision_id in revision_ids:
2665
if revision_id == _mod_revision.NULL_REVISION:
2666
result[revision_id] = ()
2667
elif revision_id is None:
2668
raise ValueError('get_parent_map(None) is not valid')
2670
query_keys.append((revision_id ,))
2671
for ((revision_id,), parent_keys) in \
2672
self.revisions.get_parent_map(query_keys).iteritems():
2674
result[revision_id] = tuple([parent_revid
2675
for (parent_revid,) in parent_keys])
2677
result[revision_id] = (_mod_revision.NULL_REVISION,)
2680
def _make_parents_provider(self):
2683
def get_graph(self, other_repository=None):
2684
"""Return the graph walker for this repository format"""
2685
parents_provider = self._make_parents_provider()
2686
if (other_repository is not None and
2687
not self.has_same_location(other_repository)):
2688
parents_provider = graph.StackedParentsProvider(
2689
[parents_provider, other_repository._make_parents_provider()])
2690
return graph.Graph(parents_provider)
2692
def _get_versioned_file_checker(self, text_key_references=None,
2694
"""Return an object suitable for checking versioned files.
2696
:param text_key_references: if non-None, an already built
2697
dictionary mapping text keys ((fileid, revision_id) tuples)
2698
to whether they were referred to by the inventory of the
2699
revision_id that they contain. If None, this will be
2701
:param ancestors: Optional result from
2702
self.get_graph().get_parent_map(self.all_revision_ids()) if already
2705
return _VersionedFileChecker(self,
2706
text_key_references=text_key_references, ancestors=ancestors)
2708
def revision_ids_to_search_result(self, result_set):
2709
"""Convert a set of revision ids to a graph SearchResult."""
2710
result_parents = set()
2711
for parents in self.get_graph().get_parent_map(
2712
result_set).itervalues():
2713
result_parents.update(parents)
2714
included_keys = result_set.intersection(result_parents)
2715
start_keys = result_set.difference(included_keys)
2716
exclude_keys = result_parents.difference(result_set)
2717
result = graph.SearchResult(start_keys, exclude_keys,
2718
len(result_set), result_set)
2722
def set_make_working_trees(self, new_value):
2723
"""Set the policy flag for making working trees when creating branches.
2725
This only applies to branches that use this repository.
2727
The default is 'True'.
2728
:param new_value: True to restore the default, False to disable making
2731
raise NotImplementedError(self.set_make_working_trees)
2733
def make_working_trees(self):
2734
"""Returns the policy for making working trees on new branches."""
2735
raise NotImplementedError(self.make_working_trees)
2738
def sign_revision(self, revision_id, gpg_strategy):
2739
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2740
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2743
def has_signature_for_revision_id(self, revision_id):
2744
"""Query for a revision signature for revision_id in the repository."""
2745
if not self.has_revision(revision_id):
2746
raise errors.NoSuchRevision(self, revision_id)
2747
sig_present = (1 == len(
2748
self.signatures.get_parent_map([(revision_id,)])))
2752
def get_signature_text(self, revision_id):
2753
"""Return the text for a signature."""
2754
stream = self.signatures.get_record_stream([(revision_id,)],
2756
record = stream.next()
2757
if record.storage_kind == 'absent':
2758
raise errors.NoSuchRevision(self, revision_id)
2759
return record.get_bytes_as('fulltext')
2762
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
2763
"""Check consistency of all history of given revision_ids.
2765
Different repository implementations should override _check().
2767
:param revision_ids: A non-empty list of revision_ids whose ancestry
2768
will be checked. Typically the last revision_id of a branch.
2769
:param callback_refs: A dict of check-refs to resolve and callback
2770
the check/_check method on the items listed as wanting the ref.
2772
:param check_repo: If False do not check the repository contents, just
2773
calculate the data callback_refs requires and call them back.
2775
return self._check(revision_ids, callback_refs=callback_refs,
2776
check_repo=check_repo)
2778
def _check(self, revision_ids, callback_refs, check_repo):
2779
result = check.Check(self, check_repo=check_repo)
2780
result.check(callback_refs)
2783
def _warn_if_deprecated(self, branch=None):
2784
global _deprecation_warning_done
2785
if _deprecation_warning_done:
2789
conf = config.GlobalConfig()
2791
conf = branch.get_config()
2792
if conf.suppress_warning('format_deprecation'):
2794
warning("Format %s for %s is deprecated -"
2795
" please use 'bzr upgrade' to get better performance"
2796
% (self._format, self.bzrdir.transport.base))
2798
_deprecation_warning_done = True
2800
def supports_rich_root(self):
2801
return self._format.rich_root_data
2803
def _check_ascii_revisionid(self, revision_id, method):
2804
"""Private helper for ascii-only repositories."""
2805
# weave repositories refuse to store revisionids that are non-ascii.
2806
if revision_id is not None:
2807
# weaves require ascii revision ids.
2808
if isinstance(revision_id, unicode):
2810
revision_id.encode('ascii')
2811
except UnicodeEncodeError:
2812
raise errors.NonAsciiRevisionId(method, self)
2815
revision_id.decode('ascii')
2816
except UnicodeDecodeError:
2817
raise errors.NonAsciiRevisionId(method, self)
2819
def revision_graph_can_have_wrong_parents(self):
2820
"""Is it possible for this repository to have a revision graph with
2823
If True, then this repository must also implement
2824
_find_inconsistent_revision_parents so that check and reconcile can
2825
check for inconsistencies before proceeding with other checks that may
2826
depend on the revision index being consistent.
2828
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2831
# remove these delegates a while after bzr 0.15
2832
def __make_delegated(name, from_module):
2833
def _deprecated_repository_forwarder():
2834
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2835
% (name, from_module),
2838
m = __import__(from_module, globals(), locals(), [name])
2840
return getattr(m, name)
2841
except AttributeError:
2842
raise AttributeError('module %s has no name %s'
2844
globals()[name] = _deprecated_repository_forwarder
2847
'AllInOneRepository',
2848
'WeaveMetaDirRepository',
2849
'PreSplitOutRepositoryFormat',
2850
'RepositoryFormat4',
2851
'RepositoryFormat5',
2852
'RepositoryFormat6',
2853
'RepositoryFormat7',
2855
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2859
'RepositoryFormatKnit',
2860
'RepositoryFormatKnit1',
2862
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2865
def install_revision(repository, rev, revision_tree):
2866
"""Install all revision data into a repository."""
2867
install_revisions(repository, [(rev, revision_tree, None)])
2870
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2871
"""Install all revision data into a repository.
2873
Accepts an iterable of revision, tree, signature tuples. The signature
2876
repository.start_write_group()
2878
inventory_cache = lru_cache.LRUCache(10)
2879
for n, (revision, revision_tree, signature) in enumerate(iterable):
2880
_install_revision(repository, revision, revision_tree, signature,
2883
pb.update('Transferring revisions', n + 1, num_revisions)
2885
repository.abort_write_group()
2888
repository.commit_write_group()
2891
def _install_revision(repository, rev, revision_tree, signature,
2893
"""Install all revision data into a repository."""
2894
present_parents = []
2896
for p_id in rev.parent_ids:
2897
if repository.has_revision(p_id):
2898
present_parents.append(p_id)
2899
parent_trees[p_id] = repository.revision_tree(p_id)
2901
parent_trees[p_id] = repository.revision_tree(
2902
_mod_revision.NULL_REVISION)
2904
inv = revision_tree.inventory
2905
entries = inv.iter_entries()
2906
# backwards compatibility hack: skip the root id.
2907
if not repository.supports_rich_root():
2908
path, root = entries.next()
2909
if root.revision != rev.revision_id:
2910
raise errors.IncompatibleRevision(repr(repository))
2912
for path, ie in entries:
2913
text_keys[(ie.file_id, ie.revision)] = ie
2914
text_parent_map = repository.texts.get_parent_map(text_keys)
2915
missing_texts = set(text_keys) - set(text_parent_map)
2916
# Add the texts that are not already present
2917
for text_key in missing_texts:
2918
ie = text_keys[text_key]
2920
# FIXME: TODO: The following loop overlaps/duplicates that done by
2921
# commit to determine parents. There is a latent/real bug here where
2922
# the parents inserted are not those commit would do - in particular
2923
# they are not filtered by heads(). RBC, AB
2924
for revision, tree in parent_trees.iteritems():
2925
if ie.file_id not in tree:
2927
parent_id = tree.inventory[ie.file_id].revision
2928
if parent_id in text_parents:
2930
text_parents.append((ie.file_id, parent_id))
2931
lines = revision_tree.get_file(ie.file_id).readlines()
2932
repository.texts.add_lines(text_key, text_parents, lines)
2934
# install the inventory
2935
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2936
# Cache this inventory
2937
inventory_cache[rev.revision_id] = inv
2939
basis_inv = inventory_cache[rev.parent_ids[0]]
2941
repository.add_inventory(rev.revision_id, inv, present_parents)
2943
delta = inv._make_delta(basis_inv)
2944
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2945
rev.revision_id, present_parents)
2947
repository.add_inventory(rev.revision_id, inv, present_parents)
2948
except errors.RevisionAlreadyPresent:
2950
if signature is not None:
2951
repository.add_signature_text(rev.revision_id, signature)
2952
repository.add_revision(rev.revision_id, rev, inv)
2955
class MetaDirRepository(Repository):
2956
"""Repositories in the new meta-dir layout.
2958
:ivar _transport: Transport for access to repository control files,
2959
typically pointing to .bzr/repository.
2962
def __init__(self, _format, a_bzrdir, control_files):
2963
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2964
self._transport = control_files._transport
2966
def is_shared(self):
2967
"""Return True if this repository is flagged as a shared repository."""
2968
return self._transport.has('shared-storage')
2971
def set_make_working_trees(self, new_value):
2972
"""Set the policy flag for making working trees when creating branches.
2974
This only applies to branches that use this repository.
2976
The default is 'True'.
2977
:param new_value: True to restore the default, False to disable making
2982
self._transport.delete('no-working-trees')
2983
except errors.NoSuchFile:
2986
self._transport.put_bytes('no-working-trees', '',
2987
mode=self.bzrdir._get_file_mode())
2989
def make_working_trees(self):
2990
"""Returns the policy for making working trees on new branches."""
2991
return not self._transport.has('no-working-trees')
2994
class MetaDirVersionedFileRepository(MetaDirRepository):
2995
"""Repositories in a meta-dir, that work via versioned file objects."""
2997
def __init__(self, _format, a_bzrdir, control_files):
2998
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
3002
network_format_registry = registry.FormatRegistry()
3003
"""Registry of formats indexed by their network name.
3005
The network name for a repository format is an identifier that can be used when
3006
referring to formats with smart server operations. See
3007
RepositoryFormat.network_name() for more detail.
3011
format_registry = registry.FormatRegistry(network_format_registry)
3012
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
3014
This can contain either format instances themselves, or classes/factories that
3015
can be called to obtain one.
3019
#####################################################################
3020
# Repository Formats
3022
class RepositoryFormat(object):
3023
"""A repository format.
3025
Formats provide four things:
3026
* An initialization routine to construct repository data on disk.
3027
* a optional format string which is used when the BzrDir supports
3029
* an open routine which returns a Repository instance.
3030
* A network name for referring to the format in smart server RPC
3033
There is one and only one Format subclass for each on-disk format. But
3034
there can be one Repository subclass that is used for several different
3035
formats. The _format attribute on a Repository instance can be used to
3036
determine the disk format.
3038
Formats are placed in a registry by their format string for reference
3039
during opening. These should be subclasses of RepositoryFormat for
3042
Once a format is deprecated, just deprecate the initialize and open
3043
methods on the format class. Do not deprecate the object, as the
3044
object may be created even when a repository instance hasn't been
3047
Common instance attributes:
3048
_matchingbzrdir - the bzrdir format that the repository format was
3049
originally written to work with. This can be used if manually
3050
constructing a bzrdir and repository, or more commonly for test suite
3054
# Set to True or False in derived classes. True indicates that the format
3055
# supports ghosts gracefully.
3056
supports_ghosts = None
3057
# Can this repository be given external locations to lookup additional
3058
# data. Set to True or False in derived classes.
3059
supports_external_lookups = None
3060
# Does this format support CHK bytestring lookups. Set to True or False in
3062
supports_chks = None
3063
# Should commit add an inventory, or an inventory delta to the repository.
3064
_commit_inv_deltas = True
3065
# What order should fetch operations request streams in?
3066
# The default is unordered as that is the cheapest for an origin to
3068
_fetch_order = 'unordered'
3069
# Does this repository format use deltas that can be fetched as-deltas ?
3070
# (E.g. knits, where the knit deltas can be transplanted intact.
3071
# We default to False, which will ensure that enough data to get
3072
# a full text out of any fetch stream will be grabbed.
3073
_fetch_uses_deltas = False
3074
# Should fetch trigger a reconcile after the fetch? Only needed for
3075
# some repository formats that can suffer internal inconsistencies.
3076
_fetch_reconcile = False
3077
# Does this format have < O(tree_size) delta generation. Used to hint what
3078
# code path for commit, amongst other things.
3080
# Does doing a pack operation compress data? Useful for the pack UI command
3081
# (so if there is one pack, the operation can still proceed because it may
3082
# help), and for fetching when data won't have come from the same
3084
pack_compresses = False
3085
# Does the repository inventory storage understand references to trees?
3086
supports_tree_reference = None
3089
return "<%s>" % self.__class__.__name__
3091
def __eq__(self, other):
3092
# format objects are generally stateless
3093
return isinstance(other, self.__class__)
3095
def __ne__(self, other):
3096
return not self == other
3099
def find_format(klass, a_bzrdir):
3100
"""Return the format for the repository object in a_bzrdir.
3102
This is used by bzr native formats that have a "format" file in
3103
the repository. Other methods may be used by different types of
3107
transport = a_bzrdir.get_repository_transport(None)
3108
format_string = transport.get_bytes("format")
3109
return format_registry.get(format_string)
3110
except errors.NoSuchFile:
3111
raise errors.NoRepositoryPresent(a_bzrdir)
3113
raise errors.UnknownFormatError(format=format_string,
3117
def register_format(klass, format):
3118
format_registry.register(format.get_format_string(), format)
3121
def unregister_format(klass, format):
3122
format_registry.remove(format.get_format_string())
3125
def get_default_format(klass):
3126
"""Return the current default format."""
3127
from bzrlib import bzrdir
3128
return bzrdir.format_registry.make_bzrdir('default').repository_format
3130
def get_format_string(self):
3131
"""Return the ASCII format string that identifies this format.
3133
Note that in pre format ?? repositories the format string is
3134
not permitted nor written to disk.
3136
raise NotImplementedError(self.get_format_string)
3138
def get_format_description(self):
3139
"""Return the short description for this format."""
3140
raise NotImplementedError(self.get_format_description)
3142
# TODO: this shouldn't be in the base class, it's specific to things that
3143
# use weaves or knits -- mbp 20070207
3144
def _get_versioned_file_store(self,
3149
versionedfile_class=None,
3150
versionedfile_kwargs={},
3152
if versionedfile_class is None:
3153
versionedfile_class = self._versionedfile_class
3154
weave_transport = control_files._transport.clone(name)
3155
dir_mode = control_files._dir_mode
3156
file_mode = control_files._file_mode
3157
return VersionedFileStore(weave_transport, prefixed=prefixed,
3159
file_mode=file_mode,
3160
versionedfile_class=versionedfile_class,
3161
versionedfile_kwargs=versionedfile_kwargs,
3164
def initialize(self, a_bzrdir, shared=False):
3165
"""Initialize a repository of this format in a_bzrdir.
3167
:param a_bzrdir: The bzrdir to put the new repository in it.
3168
:param shared: The repository should be initialized as a sharable one.
3169
:returns: The new repository object.
3171
This may raise UninitializableFormat if shared repository are not
3172
compatible the a_bzrdir.
3174
raise NotImplementedError(self.initialize)
3176
def is_supported(self):
3177
"""Is this format supported?
3179
Supported formats must be initializable and openable.
3180
Unsupported formats may not support initialization or committing or
3181
some other features depending on the reason for not being supported.
3185
def network_name(self):
3186
"""A simple byte string uniquely identifying this format for RPC calls.
3188
MetaDir repository formats use their disk format string to identify the
3189
repository over the wire. All in one formats such as bzr < 0.8, and
3190
foreign formats like svn/git and hg should use some marker which is
3191
unique and immutable.
3193
raise NotImplementedError(self.network_name)
3195
def check_conversion_target(self, target_format):
3196
if self.rich_root_data and not target_format.rich_root_data:
3197
raise errors.BadConversionTarget(
3198
'Does not support rich root data.', target_format,
3200
if (self.supports_tree_reference and
3201
not getattr(target_format, 'supports_tree_reference', False)):
3202
raise errors.BadConversionTarget(
3203
'Does not support nested trees', target_format,
3206
def open(self, a_bzrdir, _found=False):
3207
"""Return an instance of this format for the bzrdir a_bzrdir.
3209
_found is a private parameter, do not use it.
3211
raise NotImplementedError(self.open)
3214
class MetaDirRepositoryFormat(RepositoryFormat):
3215
"""Common base class for the new repositories using the metadir layout."""
3217
rich_root_data = False
3218
supports_tree_reference = False
3219
supports_external_lookups = False
3222
def _matchingbzrdir(self):
3223
matching = bzrdir.BzrDirMetaFormat1()
3224
matching.repository_format = self
3228
super(MetaDirRepositoryFormat, self).__init__()
3230
def _create_control_files(self, a_bzrdir):
3231
"""Create the required files and the initial control_files object."""
3232
# FIXME: RBC 20060125 don't peek under the covers
3233
# NB: no need to escape relative paths that are url safe.
3234
repository_transport = a_bzrdir.get_repository_transport(self)
3235
control_files = lockable_files.LockableFiles(repository_transport,
3236
'lock', lockdir.LockDir)
3237
control_files.create_lock()
3238
return control_files
3240
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
3241
"""Upload the initial blank content."""
3242
control_files = self._create_control_files(a_bzrdir)
3243
control_files.lock_write()
3244
transport = control_files._transport
3246
utf8_files += [('shared-storage', '')]
3248
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
3249
for (filename, content_stream) in files:
3250
transport.put_file(filename, content_stream,
3251
mode=a_bzrdir._get_file_mode())
3252
for (filename, content_bytes) in utf8_files:
3253
transport.put_bytes_non_atomic(filename, content_bytes,
3254
mode=a_bzrdir._get_file_mode())
3256
control_files.unlock()
3258
def network_name(self):
3259
"""Metadir formats have matching disk and network format strings."""
3260
return self.get_format_string()
3263
# Pre-0.8 formats that don't have a disk format string (because they are
3264
# versioned by the matching control directory). We use the control directories
3265
# disk format string as a key for the network_name because they meet the
3266
# constraints (simple string, unique, immutable).
3267
network_format_registry.register_lazy(
3268
"Bazaar-NG branch, format 5\n",
3269
'bzrlib.repofmt.weaverepo',
3270
'RepositoryFormat5',
3272
network_format_registry.register_lazy(
3273
"Bazaar-NG branch, format 6\n",
3274
'bzrlib.repofmt.weaverepo',
3275
'RepositoryFormat6',
3278
# formats which have no format string are not discoverable or independently
3279
# creatable on disk, so are not registered in format_registry. They're
3280
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
3281
# needed, it's constructed directly by the BzrDir. Non-native formats where
3282
# the repository is not separately opened are similar.
3284
format_registry.register_lazy(
3285
'Bazaar-NG Repository format 7',
3286
'bzrlib.repofmt.weaverepo',
3290
format_registry.register_lazy(
3291
'Bazaar-NG Knit Repository Format 1',
3292
'bzrlib.repofmt.knitrepo',
3293
'RepositoryFormatKnit1',
3296
format_registry.register_lazy(
3297
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
3298
'bzrlib.repofmt.knitrepo',
3299
'RepositoryFormatKnit3',
3302
format_registry.register_lazy(
3303
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
3304
'bzrlib.repofmt.knitrepo',
3305
'RepositoryFormatKnit4',
3308
# Pack-based formats. There is one format for pre-subtrees, and one for
3309
# post-subtrees to allow ease of testing.
3310
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3311
format_registry.register_lazy(
3312
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3313
'bzrlib.repofmt.pack_repo',
3314
'RepositoryFormatKnitPack1',
3316
format_registry.register_lazy(
3317
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3318
'bzrlib.repofmt.pack_repo',
3319
'RepositoryFormatKnitPack3',
3321
format_registry.register_lazy(
3322
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3323
'bzrlib.repofmt.pack_repo',
3324
'RepositoryFormatKnitPack4',
3326
format_registry.register_lazy(
3327
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3328
'bzrlib.repofmt.pack_repo',
3329
'RepositoryFormatKnitPack5',
3331
format_registry.register_lazy(
3332
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3333
'bzrlib.repofmt.pack_repo',
3334
'RepositoryFormatKnitPack5RichRoot',
3336
format_registry.register_lazy(
3337
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3338
'bzrlib.repofmt.pack_repo',
3339
'RepositoryFormatKnitPack5RichRootBroken',
3341
format_registry.register_lazy(
3342
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3343
'bzrlib.repofmt.pack_repo',
3344
'RepositoryFormatKnitPack6',
3346
format_registry.register_lazy(
3347
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3348
'bzrlib.repofmt.pack_repo',
3349
'RepositoryFormatKnitPack6RichRoot',
3352
# Development formats.
3353
# Obsolete but kept pending a CHK based subtree format.
3354
format_registry.register_lazy(
3355
("Bazaar development format 2 with subtree support "
3356
"(needs bzr.dev from before 1.8)\n"),
3357
'bzrlib.repofmt.pack_repo',
3358
'RepositoryFormatPackDevelopment2Subtree',
3361
# 1.14->1.16 go below here
3362
format_registry.register_lazy(
3363
'Bazaar development format - group compression and chk inventory'
3364
' (needs bzr.dev from 1.14)\n',
3365
'bzrlib.repofmt.groupcompress_repo',
3366
'RepositoryFormatCHK1',
3369
format_registry.register_lazy(
3370
'Bazaar development format - chk repository with bencode revision '
3371
'serialization (needs bzr.dev from 1.16)\n',
3372
'bzrlib.repofmt.groupcompress_repo',
3373
'RepositoryFormatCHK2',
3375
format_registry.register_lazy(
3376
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3377
'bzrlib.repofmt.groupcompress_repo',
3378
'RepositoryFormat2a',
3382
class InterRepository(InterObject):
3383
"""This class represents operations taking place between two repositories.
3385
Its instances have methods like copy_content and fetch, and contain
3386
references to the source and target repositories these operations can be
3389
Often we will provide convenience methods on 'repository' which carry out
3390
operations with another repository - they will always forward to
3391
InterRepository.get(other).method_name(parameters).
3394
_walk_to_common_revisions_batch_size = 50
3396
"""The available optimised InterRepository types."""
3399
def copy_content(self, revision_id=None):
3400
"""Make a complete copy of the content in self into destination.
3402
This is a destructive operation! Do not use it on existing
3405
:param revision_id: Only copy the content needed to construct
3406
revision_id and its parents.
3409
self.target.set_make_working_trees(self.source.make_working_trees())
3410
except NotImplementedError:
3412
self.target.fetch(self.source, revision_id=revision_id)
3415
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3417
"""Fetch the content required to construct revision_id.
3419
The content is copied from self.source to self.target.
3421
:param revision_id: if None all content is copied, if NULL_REVISION no
3423
:param pb: optional progress bar to use for progress reports. If not
3424
provided a default one will be created.
3427
f = _mod_fetch.RepoFetcher(to_repository=self.target,
3428
from_repository=self.source,
3429
last_revision=revision_id,
3430
fetch_spec=fetch_spec,
3431
pb=pb, find_ghosts=find_ghosts)
3433
def _walk_to_common_revisions(self, revision_ids):
3434
"""Walk out from revision_ids in source to revisions target has.
3436
:param revision_ids: The start point for the search.
3437
:return: A set of revision ids.
3439
target_graph = self.target.get_graph()
3440
revision_ids = frozenset(revision_ids)
3441
missing_revs = set()
3442
source_graph = self.source.get_graph()
3443
# ensure we don't pay silly lookup costs.
3444
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3445
null_set = frozenset([_mod_revision.NULL_REVISION])
3446
searcher_exhausted = False
3450
# Iterate the searcher until we have enough next_revs
3451
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3453
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3454
next_revs.update(next_revs_part)
3455
ghosts.update(ghosts_part)
3456
except StopIteration:
3457
searcher_exhausted = True
3459
# If there are ghosts in the source graph, and the caller asked for
3460
# them, make sure that they are present in the target.
3461
# We don't care about other ghosts as we can't fetch them and
3462
# haven't been asked to.
3463
ghosts_to_check = set(revision_ids.intersection(ghosts))
3464
revs_to_get = set(next_revs).union(ghosts_to_check)
3466
have_revs = set(target_graph.get_parent_map(revs_to_get))
3467
# we always have NULL_REVISION present.
3468
have_revs = have_revs.union(null_set)
3469
# Check if the target is missing any ghosts we need.
3470
ghosts_to_check.difference_update(have_revs)
3472
# One of the caller's revision_ids is a ghost in both the
3473
# source and the target.
3474
raise errors.NoSuchRevision(
3475
self.source, ghosts_to_check.pop())
3476
missing_revs.update(next_revs - have_revs)
3477
# Because we may have walked past the original stop point, make
3478
# sure everything is stopped
3479
stop_revs = searcher.find_seen_ancestors(have_revs)
3480
searcher.stop_searching_any(stop_revs)
3481
if searcher_exhausted:
3483
return searcher.get_result()
3486
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3487
"""Return the revision ids that source has that target does not.
3489
:param revision_id: only return revision ids included by this
3491
:param find_ghosts: If True find missing revisions in deep history
3492
rather than just finding the surface difference.
3493
:return: A bzrlib.graph.SearchResult.
3495
# stop searching at found target revisions.
3496
if not find_ghosts and revision_id is not None:
3497
return self._walk_to_common_revisions([revision_id])
3498
# generic, possibly worst case, slow code path.
3499
target_ids = set(self.target.all_revision_ids())
3500
if revision_id is not None:
3501
source_ids = self.source.get_ancestry(revision_id)
3502
if source_ids[0] is not None:
3503
raise AssertionError()
3506
source_ids = self.source.all_revision_ids()
3507
result_set = set(source_ids).difference(target_ids)
3508
return self.source.revision_ids_to_search_result(result_set)
3511
def _same_model(source, target):
3512
"""True if source and target have the same data representation.
3514
Note: this is always called on the base class; overriding it in a
3515
subclass will have no effect.
3518
InterRepository._assert_same_model(source, target)
3520
except errors.IncompatibleRepositories, e:
3524
def _assert_same_model(source, target):
3525
"""Raise an exception if two repositories do not use the same model.
3527
if source.supports_rich_root() != target.supports_rich_root():
3528
raise errors.IncompatibleRepositories(source, target,
3529
"different rich-root support")
3530
if source._serializer != target._serializer:
3531
raise errors.IncompatibleRepositories(source, target,
3532
"different serializers")
3535
class InterSameDataRepository(InterRepository):
3536
"""Code for converting between repositories that represent the same data.
3538
Data format and model must match for this to work.
3542
def _get_repo_format_to_test(self):
3543
"""Repository format for testing with.
3545
InterSameData can pull from subtree to subtree and from non-subtree to
3546
non-subtree, so we test this with the richest repository format.
3548
from bzrlib.repofmt import knitrepo
3549
return knitrepo.RepositoryFormatKnit3()
3552
def is_compatible(source, target):
3553
return InterRepository._same_model(source, target)
3556
class InterWeaveRepo(InterSameDataRepository):
3557
"""Optimised code paths between Weave based repositories.
3559
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3560
implemented lazy inter-object optimisation.
3564
def _get_repo_format_to_test(self):
3565
from bzrlib.repofmt import weaverepo
3566
return weaverepo.RepositoryFormat7()
3569
def is_compatible(source, target):
3570
"""Be compatible with known Weave formats.
3572
We don't test for the stores being of specific types because that
3573
could lead to confusing results, and there is no need to be
3576
from bzrlib.repofmt.weaverepo import (
3582
return (isinstance(source._format, (RepositoryFormat5,
3584
RepositoryFormat7)) and
3585
isinstance(target._format, (RepositoryFormat5,
3587
RepositoryFormat7)))
3588
except AttributeError:
3592
def copy_content(self, revision_id=None):
3593
"""See InterRepository.copy_content()."""
3594
# weave specific optimised path:
3596
self.target.set_make_working_trees(self.source.make_working_trees())
3597
except (errors.RepositoryUpgradeRequired, NotImplemented):
3599
# FIXME do not peek!
3600
if self.source._transport.listable():
3601
pb = ui.ui_factory.nested_progress_bar()
3603
self.target.texts.insert_record_stream(
3604
self.source.texts.get_record_stream(
3605
self.source.texts.keys(), 'topological', False))
3606
pb.update('Copying inventory', 0, 1)
3607
self.target.inventories.insert_record_stream(
3608
self.source.inventories.get_record_stream(
3609
self.source.inventories.keys(), 'topological', False))
3610
self.target.signatures.insert_record_stream(
3611
self.source.signatures.get_record_stream(
3612
self.source.signatures.keys(),
3614
self.target.revisions.insert_record_stream(
3615
self.source.revisions.get_record_stream(
3616
self.source.revisions.keys(),
3617
'topological', True))
3621
self.target.fetch(self.source, revision_id=revision_id)
3624
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3625
"""See InterRepository.missing_revision_ids()."""
3626
# we want all revisions to satisfy revision_id in source.
3627
# but we don't want to stat every file here and there.
3628
# we want then, all revisions other needs to satisfy revision_id
3629
# checked, but not those that we have locally.
3630
# so the first thing is to get a subset of the revisions to
3631
# satisfy revision_id in source, and then eliminate those that
3632
# we do already have.
3633
# this is slow on high latency connection to self, but as this
3634
# disk format scales terribly for push anyway due to rewriting
3635
# inventory.weave, this is considered acceptable.
3637
if revision_id is not None:
3638
source_ids = self.source.get_ancestry(revision_id)
3639
if source_ids[0] is not None:
3640
raise AssertionError()
3643
source_ids = self.source._all_possible_ids()
3644
source_ids_set = set(source_ids)
3645
# source_ids is the worst possible case we may need to pull.
3646
# now we want to filter source_ids against what we actually
3647
# have in target, but don't try to check for existence where we know
3648
# we do not have a revision as that would be pointless.
3649
target_ids = set(self.target._all_possible_ids())
3650
possibly_present_revisions = target_ids.intersection(source_ids_set)
3651
actually_present_revisions = set(
3652
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3653
required_revisions = source_ids_set.difference(actually_present_revisions)
3654
if revision_id is not None:
3655
# we used get_ancestry to determine source_ids then we are assured all
3656
# revisions referenced are present as they are installed in topological order.
3657
# and the tip revision was validated by get_ancestry.
3658
result_set = required_revisions
3660
# if we just grabbed the possibly available ids, then
3661
# we only have an estimate of whats available and need to validate
3662
# that against the revision records.
3664
self.source._eliminate_revisions_not_present(required_revisions))
3665
return self.source.revision_ids_to_search_result(result_set)
3668
class InterKnitRepo(InterSameDataRepository):
3669
"""Optimised code paths between Knit based repositories."""
3672
def _get_repo_format_to_test(self):
3673
from bzrlib.repofmt import knitrepo
3674
return knitrepo.RepositoryFormatKnit1()
3677
def is_compatible(source, target):
3678
"""Be compatible with known Knit formats.
3680
We don't test for the stores being of specific types because that
3681
could lead to confusing results, and there is no need to be
3684
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3686
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3687
isinstance(target._format, RepositoryFormatKnit))
3688
except AttributeError:
3690
return are_knits and InterRepository._same_model(source, target)
3693
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3694
"""See InterRepository.missing_revision_ids()."""
3695
if revision_id is not None:
3696
source_ids = self.source.get_ancestry(revision_id)
3697
if source_ids[0] is not None:
3698
raise AssertionError()
3701
source_ids = self.source.all_revision_ids()
3702
source_ids_set = set(source_ids)
3703
# source_ids is the worst possible case we may need to pull.
3704
# now we want to filter source_ids against what we actually
3705
# have in target, but don't try to check for existence where we know
3706
# we do not have a revision as that would be pointless.
3707
target_ids = set(self.target.all_revision_ids())
3708
possibly_present_revisions = target_ids.intersection(source_ids_set)
3709
actually_present_revisions = set(
3710
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3711
required_revisions = source_ids_set.difference(actually_present_revisions)
3712
if revision_id is not None:
3713
# we used get_ancestry to determine source_ids then we are assured all
3714
# revisions referenced are present as they are installed in topological order.
3715
# and the tip revision was validated by get_ancestry.
3716
result_set = required_revisions
3718
# if we just grabbed the possibly available ids, then
3719
# we only have an estimate of whats available and need to validate
3720
# that against the revision records.
3722
self.source._eliminate_revisions_not_present(required_revisions))
3723
return self.source.revision_ids_to_search_result(result_set)
3726
class InterDifferingSerializer(InterRepository):
3729
def _get_repo_format_to_test(self):
3733
def is_compatible(source, target):
3734
"""Be compatible with Knit2 source and Knit3 target"""
3735
# This is redundant with format.check_conversion_target(), however that
3736
# raises an exception, and we just want to say "False" as in we won't
3737
# support converting between these formats.
3738
if 'IDS_never' in debug.debug_flags:
3740
if source.supports_rich_root() and not target.supports_rich_root():
3742
if (source._format.supports_tree_reference
3743
and not target._format.supports_tree_reference):
3745
if target._fallback_repositories and target._format.supports_chks:
3746
# IDS doesn't know how to copy CHKs for the parent inventories it
3747
# adds to stacked repos.
3749
if 'IDS_always' in debug.debug_flags:
3751
# Only use this code path for local source and target. IDS does far
3752
# too much IO (both bandwidth and roundtrips) over a network.
3753
if not source.bzrdir.transport.base.startswith('file:///'):
3755
if not target.bzrdir.transport.base.startswith('file:///'):
3759
def _get_trees(self, revision_ids, cache):
3761
for rev_id in revision_ids:
3763
possible_trees.append((rev_id, cache[rev_id]))
3765
# Not cached, but inventory might be present anyway.
3767
tree = self.source.revision_tree(rev_id)
3768
except errors.NoSuchRevision:
3769
# Nope, parent is ghost.
3772
cache[rev_id] = tree
3773
possible_trees.append((rev_id, tree))
3774
return possible_trees
3776
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3777
"""Get the best delta and base for this revision.
3779
:return: (basis_id, delta)
3782
# Generate deltas against each tree, to find the shortest.
3783
texts_possibly_new_in_tree = set()
3784
for basis_id, basis_tree in possible_trees:
3785
delta = tree.inventory._make_delta(basis_tree.inventory)
3786
for old_path, new_path, file_id, new_entry in delta:
3787
if new_path is None:
3788
# This file_id isn't present in the new rev, so we don't
3792
# Rich roots are handled elsewhere...
3794
kind = new_entry.kind
3795
if kind != 'directory' and kind != 'file':
3796
# No text record associated with this inventory entry.
3798
# This is a directory or file that has changed somehow.
3799
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3800
deltas.append((len(delta), basis_id, delta))
3802
return deltas[0][1:]
3804
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3805
"""Find all parent revisions that are absent, but for which the
3806
inventory is present, and copy those inventories.
3808
This is necessary to preserve correctness when the source is stacked
3809
without fallbacks configured. (Note that in cases like upgrade the
3810
source may be not have _fallback_repositories even though it is
3814
for parents in parent_map.values():
3815
parent_revs.update(parents)
3816
present_parents = self.source.get_parent_map(parent_revs)
3817
absent_parents = set(parent_revs).difference(present_parents)
3818
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3819
(rev_id,) for rev_id in absent_parents)
3820
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3821
for parent_tree in self.source.revision_trees(parent_inv_ids):
3822
current_revision_id = parent_tree.get_revision_id()
3823
parents_parents_keys = parent_invs_keys_for_stacking[
3824
(current_revision_id,)]
3825
parents_parents = [key[-1] for key in parents_parents_keys]
3826
basis_id = _mod_revision.NULL_REVISION
3827
basis_tree = self.source.revision_tree(basis_id)
3828
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3829
self.target.add_inventory_by_delta(
3830
basis_id, delta, current_revision_id, parents_parents)
3831
cache[current_revision_id] = parent_tree
3833
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3834
"""Fetch across a few revisions.
3836
:param revision_ids: The revisions to copy
3837
:param basis_id: The revision_id of a tree that must be in cache, used
3838
as a basis for delta when no other base is available
3839
:param cache: A cache of RevisionTrees that we can use.
3840
:param a_graph: A Graph object to determine the heads() of the
3841
rich-root data stream.
3842
:return: The revision_id of the last converted tree. The RevisionTree
3843
for it will be in cache
3845
# Walk though all revisions; get inventory deltas, copy referenced
3846
# texts that delta references, insert the delta, revision and
3848
root_keys_to_create = set()
3851
pending_revisions = []
3852
parent_map = self.source.get_parent_map(revision_ids)
3853
self._fetch_parent_invs_for_stacking(parent_map, cache)
3854
self.source._safe_to_return_from_cache = True
3855
for tree in self.source.revision_trees(revision_ids):
3856
# Find a inventory delta for this revision.
3857
# Find text entries that need to be copied, too.
3858
current_revision_id = tree.get_revision_id()
3859
parent_ids = parent_map.get(current_revision_id, ())
3860
parent_trees = self._get_trees(parent_ids, cache)
3861
possible_trees = list(parent_trees)
3862
if len(possible_trees) == 0:
3863
# There either aren't any parents, or the parents are ghosts,
3864
# so just use the last converted tree.
3865
possible_trees.append((basis_id, cache[basis_id]))
3866
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3868
revision = self.source.get_revision(current_revision_id)
3869
pending_deltas.append((basis_id, delta,
3870
current_revision_id, revision.parent_ids))
3871
if self._converting_to_rich_root:
3872
self._revision_id_to_root_id[current_revision_id] = \
3874
# Determine which texts are in present in this revision but not in
3875
# any of the available parents.
3876
texts_possibly_new_in_tree = set()
3877
for old_path, new_path, file_id, entry in delta:
3878
if new_path is None:
3879
# This file_id isn't present in the new rev
3883
if not self.target.supports_rich_root():
3884
# The target doesn't support rich root, so we don't
3887
if self._converting_to_rich_root:
3888
# This can't be copied normally, we have to insert
3890
root_keys_to_create.add((file_id, entry.revision))
3893
texts_possibly_new_in_tree.add((file_id, entry.revision))
3894
for basis_id, basis_tree in possible_trees:
3895
basis_inv = basis_tree.inventory
3896
for file_key in list(texts_possibly_new_in_tree):
3897
file_id, file_revision = file_key
3899
entry = basis_inv[file_id]
3900
except errors.NoSuchId:
3902
if entry.revision == file_revision:
3903
texts_possibly_new_in_tree.remove(file_key)
3904
text_keys.update(texts_possibly_new_in_tree)
3905
pending_revisions.append(revision)
3906
cache[current_revision_id] = tree
3907
basis_id = current_revision_id
3908
self.source._safe_to_return_from_cache = False
3910
from_texts = self.source.texts
3911
to_texts = self.target.texts
3912
if root_keys_to_create:
3913
root_stream = _mod_fetch._new_root_data_stream(
3914
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3915
self.source, graph=a_graph)
3916
to_texts.insert_record_stream(root_stream)
3917
to_texts.insert_record_stream(from_texts.get_record_stream(
3918
text_keys, self.target._format._fetch_order,
3919
not self.target._format._fetch_uses_deltas))
3920
# insert inventory deltas
3921
for delta in pending_deltas:
3922
self.target.add_inventory_by_delta(*delta)
3923
if self.target._fallback_repositories:
3924
# Make sure this stacked repository has all the parent inventories
3925
# for the new revisions that we are about to insert. We do this
3926
# before adding the revisions so that no revision is added until
3927
# all the inventories it may depend on are added.
3928
# Note that this is overzealous, as we may have fetched these in an
3931
revision_ids = set()
3932
for revision in pending_revisions:
3933
revision_ids.add(revision.revision_id)
3934
parent_ids.update(revision.parent_ids)
3935
parent_ids.difference_update(revision_ids)
3936
parent_ids.discard(_mod_revision.NULL_REVISION)
3937
parent_map = self.source.get_parent_map(parent_ids)
3938
# we iterate over parent_map and not parent_ids because we don't
3939
# want to try copying any revision which is a ghost
3940
for parent_tree in self.source.revision_trees(parent_map):
3941
current_revision_id = parent_tree.get_revision_id()
3942
parents_parents = parent_map[current_revision_id]
3943
possible_trees = self._get_trees(parents_parents, cache)
3944
if len(possible_trees) == 0:
3945
# There either aren't any parents, or the parents are
3946
# ghosts, so just use the last converted tree.
3947
possible_trees.append((basis_id, cache[basis_id]))
3948
basis_id, delta = self._get_delta_for_revision(parent_tree,
3949
parents_parents, possible_trees)
3950
self.target.add_inventory_by_delta(
3951
basis_id, delta, current_revision_id, parents_parents)
3952
# insert signatures and revisions
3953
for revision in pending_revisions:
3955
signature = self.source.get_signature_text(
3956
revision.revision_id)
3957
self.target.add_signature_text(revision.revision_id,
3959
except errors.NoSuchRevision:
3961
self.target.add_revision(revision.revision_id, revision)
3964
def _fetch_all_revisions(self, revision_ids, pb):
3965
"""Fetch everything for the list of revisions.
3967
:param revision_ids: The list of revisions to fetch. Must be in
3969
:param pb: A ProgressTask
3972
basis_id, basis_tree = self._get_basis(revision_ids[0])
3974
cache = lru_cache.LRUCache(100)
3975
cache[basis_id] = basis_tree
3976
del basis_tree # We don't want to hang on to it here
3978
if self._converting_to_rich_root and len(revision_ids) > 100:
3979
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3984
for offset in range(0, len(revision_ids), batch_size):
3985
self.target.start_write_group()
3987
pb.update('Transferring revisions', offset,
3989
batch = revision_ids[offset:offset+batch_size]
3990
basis_id = self._fetch_batch(batch, basis_id, cache,
3993
self.source._safe_to_return_from_cache = False
3994
self.target.abort_write_group()
3997
hint = self.target.commit_write_group()
4000
if hints and self.target._format.pack_compresses:
4001
self.target.pack(hint=hints)
4002
pb.update('Transferring revisions', len(revision_ids),
4006
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
4008
"""See InterRepository.fetch()."""
4009
if fetch_spec is not None:
4010
raise AssertionError("Not implemented yet...")
4011
if (not self.source.supports_rich_root()
4012
and self.target.supports_rich_root()):
4013
self._converting_to_rich_root = True
4014
self._revision_id_to_root_id = {}
4016
self._converting_to_rich_root = False
4017
revision_ids = self.target.search_missing_revision_ids(self.source,
4018
revision_id, find_ghosts=find_ghosts).get_keys()
4019
if not revision_ids:
4021
revision_ids = tsort.topo_sort(
4022
self.source.get_graph().get_parent_map(revision_ids))
4023
if not revision_ids:
4025
# Walk though all revisions; get inventory deltas, copy referenced
4026
# texts that delta references, insert the delta, revision and
4029
my_pb = ui.ui_factory.nested_progress_bar()
4032
symbol_versioning.warn(
4033
symbol_versioning.deprecated_in((1, 14, 0))
4034
% "pb parameter to fetch()")
4037
self._fetch_all_revisions(revision_ids, pb)
4039
if my_pb is not None:
4041
return len(revision_ids), 0
4043
def _get_basis(self, first_revision_id):
4044
"""Get a revision and tree which exists in the target.
4046
This assumes that first_revision_id is selected for transmission
4047
because all other ancestors are already present. If we can't find an
4048
ancestor we fall back to NULL_REVISION since we know that is safe.
4050
:return: (basis_id, basis_tree)
4052
first_rev = self.source.get_revision(first_revision_id)
4054
basis_id = first_rev.parent_ids[0]
4055
# only valid as a basis if the target has it
4056
self.target.get_revision(basis_id)
4057
# Try to get a basis tree - if its a ghost it will hit the
4058
# NoSuchRevision case.
4059
basis_tree = self.source.revision_tree(basis_id)
4060
except (IndexError, errors.NoSuchRevision):
4061
basis_id = _mod_revision.NULL_REVISION
4062
basis_tree = self.source.revision_tree(basis_id)
4063
return basis_id, basis_tree
4066
InterRepository.register_optimiser(InterDifferingSerializer)
4067
InterRepository.register_optimiser(InterSameDataRepository)
4068
InterRepository.register_optimiser(InterWeaveRepo)
4069
InterRepository.register_optimiser(InterKnitRepo)
4072
class CopyConverter(object):
4073
"""A repository conversion tool which just performs a copy of the content.
4075
This is slow but quite reliable.
4078
def __init__(self, target_format):
4079
"""Create a CopyConverter.
4081
:param target_format: The format the resulting repository should be.
4083
self.target_format = target_format
4085
def convert(self, repo, pb):
4086
"""Perform the conversion of to_convert, giving feedback via pb.
4088
:param to_convert: The disk object to convert.
4089
:param pb: a progress bar to use for progress information.
4094
# this is only useful with metadir layouts - separated repo content.
4095
# trigger an assertion if not such
4096
repo._format.get_format_string()
4097
self.repo_dir = repo.bzrdir
4098
self.step('Moving repository to repository.backup')
4099
self.repo_dir.transport.move('repository', 'repository.backup')
4100
backup_transport = self.repo_dir.transport.clone('repository.backup')
4101
repo._format.check_conversion_target(self.target_format)
4102
self.source_repo = repo._format.open(self.repo_dir,
4104
_override_transport=backup_transport)
4105
self.step('Creating new repository')
4106
converted = self.target_format.initialize(self.repo_dir,
4107
self.source_repo.is_shared())
4108
converted.lock_write()
4110
self.step('Copying content')
4111
self.source_repo.copy_content_into(converted)
4114
self.step('Deleting old repository content')
4115
self.repo_dir.transport.delete_tree('repository.backup')
4116
ui.ui_factory.note('repository converted')
4118
def step(self, message):
4119
"""Update the pb by a step."""
4121
self.pb.update(message, self.count, self.total)
4133
def _unescaper(match, _map=_unescape_map):
4134
code = match.group(1)
4138
if not code.startswith('#'):
4140
return unichr(int(code[1:])).encode('utf8')
4146
def _unescape_xml(data):
4147
"""Unescape predefined XML entities in a string of data."""
4149
if _unescape_re is None:
4150
_unescape_re = re.compile('\&([^;]*);')
4151
return _unescape_re.sub(_unescaper, data)
4154
class _VersionedFileChecker(object):
4156
def __init__(self, repository, text_key_references=None, ancestors=None):
4157
self.repository = repository
4158
self.text_index = self.repository._generate_text_key_index(
4159
text_key_references=text_key_references, ancestors=ancestors)
4161
def calculate_file_version_parents(self, text_key):
4162
"""Calculate the correct parents for a file version according to
4165
parent_keys = self.text_index[text_key]
4166
if parent_keys == [_mod_revision.NULL_REVISION]:
4168
return tuple(parent_keys)
4170
def check_file_version_parents(self, texts, progress_bar=None):
4171
"""Check the parents stored in a versioned file are correct.
4173
It also detects file versions that are not referenced by their
4174
corresponding revision's inventory.
4176
:returns: A tuple of (wrong_parents, dangling_file_versions).
4177
wrong_parents is a dict mapping {revision_id: (stored_parents,
4178
correct_parents)} for each revision_id where the stored parents
4179
are not correct. dangling_file_versions is a set of (file_id,
4180
revision_id) tuples for versions that are present in this versioned
4181
file, but not used by the corresponding inventory.
4183
local_progress = None
4184
if progress_bar is None:
4185
local_progress = ui.ui_factory.nested_progress_bar()
4186
progress_bar = local_progress
4188
return self._check_file_version_parents(texts, progress_bar)
4191
local_progress.finished()
4193
def _check_file_version_parents(self, texts, progress_bar):
4194
"""See check_file_version_parents."""
4196
self.file_ids = set([file_id for file_id, _ in
4197
self.text_index.iterkeys()])
4198
# text keys is now grouped by file_id
4199
n_versions = len(self.text_index)
4200
progress_bar.update('loading text store', 0, n_versions)
4201
parent_map = self.repository.texts.get_parent_map(self.text_index)
4202
# On unlistable transports this could well be empty/error...
4203
text_keys = self.repository.texts.keys()
4204
unused_keys = frozenset(text_keys) - set(self.text_index)
4205
for num, key in enumerate(self.text_index.iterkeys()):
4206
progress_bar.update('checking text graph', num, n_versions)
4207
correct_parents = self.calculate_file_version_parents(key)
4209
knit_parents = parent_map[key]
4210
except errors.RevisionNotPresent:
4213
if correct_parents != knit_parents:
4214
wrong_parents[key] = (knit_parents, correct_parents)
4215
return wrong_parents, unused_keys
4218
def _old_get_graph(repository, revision_id):
4219
"""DO NOT USE. That is all. I'm serious."""
4220
graph = repository.get_graph()
4221
revision_graph = dict(((key, value) for key, value in
4222
graph.iter_ancestry([revision_id]) if value is not None))
4223
return _strip_NULL_ghosts(revision_graph)
4226
def _strip_NULL_ghosts(revision_graph):
4227
"""Also don't use this. more compatibility code for unmigrated clients."""
4228
# Filter ghosts, and null:
4229
if _mod_revision.NULL_REVISION in revision_graph:
4230
del revision_graph[_mod_revision.NULL_REVISION]
4231
for key, parents in revision_graph.items():
4232
revision_graph[key] = tuple(parent for parent in parents if parent
4234
return revision_graph
4237
class StreamSink(object):
4238
"""An object that can insert a stream into a repository.
4240
This interface handles the complexity of reserialising inventories and
4241
revisions from different formats, and allows unidirectional insertion into
4242
stacked repositories without looking for the missing basis parents
4246
def __init__(self, target_repo):
4247
self.target_repo = target_repo
4249
def insert_stream(self, stream, src_format, resume_tokens):
4250
"""Insert a stream's content into the target repository.
4252
:param src_format: a bzr repository format.
4254
:return: a list of resume tokens and an iterable of keys additional
4255
items required before the insertion can be completed.
4257
self.target_repo.lock_write()
4260
self.target_repo.resume_write_group(resume_tokens)
4263
self.target_repo.start_write_group()
4266
# locked_insert_stream performs a commit|suspend.
4267
return self._locked_insert_stream(stream, src_format, is_resume)
4269
self.target_repo.abort_write_group(suppress_errors=True)
4272
self.target_repo.unlock()
4274
def _locked_insert_stream(self, stream, src_format, is_resume):
4275
to_serializer = self.target_repo._format._serializer
4276
src_serializer = src_format._serializer
4278
if to_serializer == src_serializer:
4279
# If serializers match and the target is a pack repository, set the
4280
# write cache size on the new pack. This avoids poor performance
4281
# on transports where append is unbuffered (such as
4282
# RemoteTransport). This is safe to do because nothing should read
4283
# back from the target repository while a stream with matching
4284
# serialization is being inserted.
4285
# The exception is that a delta record from the source that should
4286
# be a fulltext may need to be expanded by the target (see
4287
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4288
# explicitly flush any buffered writes first in that rare case.
4290
new_pack = self.target_repo._pack_collection._new_pack
4291
except AttributeError:
4292
# Not a pack repository
4295
new_pack.set_write_cache_size(1024*1024)
4296
for substream_type, substream in stream:
4297
if 'stream' in debug.debug_flags:
4298
mutter('inserting substream: %s', substream_type)
4299
if substream_type == 'texts':
4300
self.target_repo.texts.insert_record_stream(substream)
4301
elif substream_type == 'inventories':
4302
if src_serializer == to_serializer:
4303
self.target_repo.inventories.insert_record_stream(
4306
self._extract_and_insert_inventories(
4307
substream, src_serializer)
4308
elif substream_type == 'inventory-deltas':
4309
self._extract_and_insert_inventory_deltas(
4310
substream, src_serializer)
4311
elif substream_type == 'chk_bytes':
4312
# XXX: This doesn't support conversions, as it assumes the
4313
# conversion was done in the fetch code.
4314
self.target_repo.chk_bytes.insert_record_stream(substream)
4315
elif substream_type == 'revisions':
4316
# This may fallback to extract-and-insert more often than
4317
# required if the serializers are different only in terms of
4319
if src_serializer == to_serializer:
4320
self.target_repo.revisions.insert_record_stream(
4323
self._extract_and_insert_revisions(substream,
4325
elif substream_type == 'signatures':
4326
self.target_repo.signatures.insert_record_stream(substream)
4328
raise AssertionError('kaboom! %s' % (substream_type,))
4329
# Done inserting data, and the missing_keys calculations will try to
4330
# read back from the inserted data, so flush the writes to the new pack
4331
# (if this is pack format).
4332
if new_pack is not None:
4333
new_pack._write_data('', flush=True)
4334
# Find all the new revisions (including ones from resume_tokens)
4335
missing_keys = self.target_repo.get_missing_parent_inventories(
4336
check_for_missing_texts=is_resume)
4338
for prefix, versioned_file in (
4339
('texts', self.target_repo.texts),
4340
('inventories', self.target_repo.inventories),
4341
('revisions', self.target_repo.revisions),
4342
('signatures', self.target_repo.signatures),
4343
('chk_bytes', self.target_repo.chk_bytes),
4345
if versioned_file is None:
4347
# TODO: key is often going to be a StaticTuple object
4348
# I don't believe we can define a method by which
4349
# (prefix,) + StaticTuple will work, though we could
4350
# define a StaticTuple.sq_concat that would allow you to
4351
# pass in either a tuple or a StaticTuple as the second
4352
# object, so instead we could have:
4353
# StaticTuple(prefix) + key here...
4354
missing_keys.update((prefix,) + key for key in
4355
versioned_file.get_missing_compression_parent_keys())
4356
except NotImplementedError:
4357
# cannot even attempt suspending, and missing would have failed
4358
# during stream insertion.
4359
missing_keys = set()
4362
# suspend the write group and tell the caller what we is
4363
# missing. We know we can suspend or else we would not have
4364
# entered this code path. (All repositories that can handle
4365
# missing keys can handle suspending a write group).
4366
write_group_tokens = self.target_repo.suspend_write_group()
4367
return write_group_tokens, missing_keys
4368
hint = self.target_repo.commit_write_group()
4369
if (to_serializer != src_serializer and
4370
self.target_repo._format.pack_compresses):
4371
self.target_repo.pack(hint=hint)
4374
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4375
target_rich_root = self.target_repo._format.rich_root_data
4376
target_tree_refs = self.target_repo._format.supports_tree_reference
4377
for record in substream:
4378
# Insert the delta directly
4379
inventory_delta_bytes = record.get_bytes_as('fulltext')
4380
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4382
parse_result = deserialiser.parse_text_bytes(
4383
inventory_delta_bytes)
4384
except inventory_delta.IncompatibleInventoryDelta, err:
4385
trace.mutter("Incompatible delta: %s", err.msg)
4386
raise errors.IncompatibleRevision(self.target_repo._format)
4387
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4388
revision_id = new_id
4389
parents = [key[0] for key in record.parents]
4390
self.target_repo.add_inventory_by_delta(
4391
basis_id, inv_delta, revision_id, parents)
4393
def _extract_and_insert_inventories(self, substream, serializer,
4395
"""Generate a new inventory versionedfile in target, converting data.
4397
The inventory is retrieved from the source, (deserializing it), and
4398
stored in the target (reserializing it in a different format).
4400
target_rich_root = self.target_repo._format.rich_root_data
4401
target_tree_refs = self.target_repo._format.supports_tree_reference
4402
for record in substream:
4403
# It's not a delta, so it must be a fulltext in the source
4404
# serializer's format.
4405
bytes = record.get_bytes_as('fulltext')
4406
revision_id = record.key[0]
4407
inv = serializer.read_inventory_from_string(bytes, revision_id)
4408
parents = [key[0] for key in record.parents]
4409
self.target_repo.add_inventory(revision_id, inv, parents)
4410
# No need to keep holding this full inv in memory when the rest of
4411
# the substream is likely to be all deltas.
4414
def _extract_and_insert_revisions(self, substream, serializer):
4415
for record in substream:
4416
bytes = record.get_bytes_as('fulltext')
4417
revision_id = record.key[0]
4418
rev = serializer.read_revision_from_string(bytes)
4419
if rev.revision_id != revision_id:
4420
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4421
self.target_repo.add_revision(revision_id, rev)
4424
if self.target_repo._format._fetch_reconcile:
4425
self.target_repo.reconcile()
4428
class StreamSource(object):
4429
"""A source of a stream for fetching between repositories."""
4431
def __init__(self, from_repository, to_format):
4432
"""Create a StreamSource streaming from from_repository."""
4433
self.from_repository = from_repository
4434
self.to_format = to_format
4436
def delta_on_metadata(self):
4437
"""Return True if delta's are permitted on metadata streams.
4439
That is on revisions and signatures.
4441
src_serializer = self.from_repository._format._serializer
4442
target_serializer = self.to_format._serializer
4443
return (self.to_format._fetch_uses_deltas and
4444
src_serializer == target_serializer)
4446
def _fetch_revision_texts(self, revs):
4447
# fetch signatures first and then the revision texts
4448
# may need to be a InterRevisionStore call here.
4449
from_sf = self.from_repository.signatures
4450
# A missing signature is just skipped.
4451
keys = [(rev_id,) for rev_id in revs]
4452
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4454
self.to_format._fetch_order,
4455
not self.to_format._fetch_uses_deltas))
4456
# If a revision has a delta, this is actually expanded inside the
4457
# insert_record_stream code now, which is an alternate fix for
4459
from_rf = self.from_repository.revisions
4460
revisions = from_rf.get_record_stream(
4462
self.to_format._fetch_order,
4463
not self.delta_on_metadata())
4464
return [('signatures', signatures), ('revisions', revisions)]
4466
def _generate_root_texts(self, revs):
4467
"""This will be called by get_stream between fetching weave texts and
4468
fetching the inventory weave.
4470
if self._rich_root_upgrade():
4471
return _mod_fetch.Inter1and2Helper(
4472
self.from_repository).generate_root_texts(revs)
4476
def get_stream(self, search):
4478
revs = search.get_keys()
4479
graph = self.from_repository.get_graph()
4480
revs = tsort.topo_sort(graph.get_parent_map(revs))
4481
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4483
for knit_kind, file_id, revisions in data_to_fetch:
4484
if knit_kind != phase:
4486
# Make a new progress bar for this phase
4487
if knit_kind == "file":
4488
# Accumulate file texts
4489
text_keys.extend([(file_id, revision) for revision in
4491
elif knit_kind == "inventory":
4492
# Now copy the file texts.
4493
from_texts = self.from_repository.texts
4494
yield ('texts', from_texts.get_record_stream(
4495
text_keys, self.to_format._fetch_order,
4496
not self.to_format._fetch_uses_deltas))
4497
# Cause an error if a text occurs after we have done the
4500
# Before we process the inventory we generate the root
4501
# texts (if necessary) so that the inventories references
4503
for _ in self._generate_root_texts(revs):
4505
# we fetch only the referenced inventories because we do not
4506
# know for unselected inventories whether all their required
4507
# texts are present in the other repository - it could be
4509
for info in self._get_inventory_stream(revs):
4511
elif knit_kind == "signatures":
4512
# Nothing to do here; this will be taken care of when
4513
# _fetch_revision_texts happens.
4515
elif knit_kind == "revisions":
4516
for record in self._fetch_revision_texts(revs):
4519
raise AssertionError("Unknown knit kind %r" % knit_kind)
4521
def get_stream_for_missing_keys(self, missing_keys):
4522
# missing keys can only occur when we are byte copying and not
4523
# translating (because translation means we don't send
4524
# unreconstructable deltas ever).
4526
keys['texts'] = set()
4527
keys['revisions'] = set()
4528
keys['inventories'] = set()
4529
keys['chk_bytes'] = set()
4530
keys['signatures'] = set()
4531
for key in missing_keys:
4532
keys[key[0]].add(key[1:])
4533
if len(keys['revisions']):
4534
# If we allowed copying revisions at this point, we could end up
4535
# copying a revision without copying its required texts: a
4536
# violation of the requirements for repository integrity.
4537
raise AssertionError(
4538
'cannot copy revisions to fill in missing deltas %s' % (
4539
keys['revisions'],))
4540
for substream_kind, keys in keys.iteritems():
4541
vf = getattr(self.from_repository, substream_kind)
4542
if vf is None and keys:
4543
raise AssertionError(
4544
"cannot fill in keys for a versioned file we don't"
4545
" have: %s needs %s" % (substream_kind, keys))
4547
# No need to stream something we don't have
4549
if substream_kind == 'inventories':
4550
# Some missing keys are genuinely ghosts, filter those out.
4551
present = self.from_repository.inventories.get_parent_map(keys)
4552
revs = [key[0] for key in present]
4553
# Get the inventory stream more-or-less as we do for the
4554
# original stream; there's no reason to assume that records
4555
# direct from the source will be suitable for the sink. (Think
4556
# e.g. 2a -> 1.9-rich-root).
4557
for info in self._get_inventory_stream(revs, missing=True):
4561
# Ask for full texts always so that we don't need more round trips
4562
# after this stream.
4563
# Some of the missing keys are genuinely ghosts, so filter absent
4564
# records. The Sink is responsible for doing another check to
4565
# ensure that ghosts don't introduce missing data for future
4567
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4568
self.to_format._fetch_order, True))
4569
yield substream_kind, stream
4571
def inventory_fetch_order(self):
4572
if self._rich_root_upgrade():
4573
return 'topological'
4575
return self.to_format._fetch_order
4577
def _rich_root_upgrade(self):
4578
return (not self.from_repository._format.rich_root_data and
4579
self.to_format.rich_root_data)
4581
def _get_inventory_stream(self, revision_ids, missing=False):
4582
from_format = self.from_repository._format
4583
if (from_format.supports_chks and self.to_format.supports_chks and
4584
from_format.network_name() == self.to_format.network_name()):
4585
raise AssertionError(
4586
"this case should be handled by GroupCHKStreamSource")
4587
elif 'forceinvdeltas' in debug.debug_flags:
4588
return self._get_convertable_inventory_stream(revision_ids,
4589
delta_versus_null=missing)
4590
elif from_format.network_name() == self.to_format.network_name():
4592
return self._get_simple_inventory_stream(revision_ids,
4594
elif (not from_format.supports_chks and not self.to_format.supports_chks
4595
and from_format._serializer == self.to_format._serializer):
4596
# Essentially the same format.
4597
return self._get_simple_inventory_stream(revision_ids,
4600
# Any time we switch serializations, we want to use an
4601
# inventory-delta based approach.
4602
return self._get_convertable_inventory_stream(revision_ids,
4603
delta_versus_null=missing)
4605
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4606
# NB: This currently reopens the inventory weave in source;
4607
# using a single stream interface instead would avoid this.
4608
from_weave = self.from_repository.inventories
4610
delta_closure = True
4612
delta_closure = not self.delta_on_metadata()
4613
yield ('inventories', from_weave.get_record_stream(
4614
[(rev_id,) for rev_id in revision_ids],
4615
self.inventory_fetch_order(), delta_closure))
4617
def _get_convertable_inventory_stream(self, revision_ids,
4618
delta_versus_null=False):
4619
# The source is using CHKs, but the target either doesn't or it has a
4620
# different serializer. The StreamSink code expects to be able to
4621
# convert on the target, so we need to put bytes-on-the-wire that can
4622
# be converted. That means inventory deltas (if the remote is <1.19,
4623
# RemoteStreamSink will fallback to VFS to insert the deltas).
4624
yield ('inventory-deltas',
4625
self._stream_invs_as_deltas(revision_ids,
4626
delta_versus_null=delta_versus_null))
4628
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4629
"""Return a stream of inventory-deltas for the given rev ids.
4631
:param revision_ids: The list of inventories to transmit
4632
:param delta_versus_null: Don't try to find a minimal delta for this
4633
entry, instead compute the delta versus the NULL_REVISION. This
4634
effectively streams a complete inventory. Used for stuff like
4635
filling in missing parents, etc.
4637
from_repo = self.from_repository
4638
revision_keys = [(rev_id,) for rev_id in revision_ids]
4639
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4640
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4642
inventories = self.from_repository.iter_inventories(
4643
revision_ids, 'topological')
4644
format = from_repo._format
4645
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4646
inventory_cache = lru_cache.LRUCache(50)
4647
null_inventory = from_repo.revision_tree(
4648
_mod_revision.NULL_REVISION).inventory
4649
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4650
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4651
# repo back into a non-rich-root repo ought to be allowed)
4652
serializer = inventory_delta.InventoryDeltaSerializer(
4653
versioned_root=format.rich_root_data,
4654
tree_references=format.supports_tree_reference)
4655
for inv in inventories:
4656
key = (inv.revision_id,)
4657
parent_keys = parent_map.get(key, ())
4659
if not delta_versus_null and parent_keys:
4660
# The caller did not ask for complete inventories and we have
4661
# some parents that we can delta against. Make a delta against
4662
# each parent so that we can find the smallest.
4663
parent_ids = [parent_key[0] for parent_key in parent_keys]
4664
for parent_id in parent_ids:
4665
if parent_id not in invs_sent_so_far:
4666
# We don't know that the remote side has this basis, so
4669
if parent_id == _mod_revision.NULL_REVISION:
4670
parent_inv = null_inventory
4672
parent_inv = inventory_cache.get(parent_id, None)
4673
if parent_inv is None:
4674
parent_inv = from_repo.get_inventory(parent_id)
4675
candidate_delta = inv._make_delta(parent_inv)
4676
if (delta is None or
4677
len(delta) > len(candidate_delta)):
4678
delta = candidate_delta
4679
basis_id = parent_id
4681
# Either none of the parents ended up being suitable, or we
4682
# were asked to delta against NULL
4683
basis_id = _mod_revision.NULL_REVISION
4684
delta = inv._make_delta(null_inventory)
4685
invs_sent_so_far.add(inv.revision_id)
4686
inventory_cache[inv.revision_id] = inv
4687
delta_serialized = ''.join(
4688
serializer.delta_to_lines(basis_id, key[-1], delta))
4689
yield versionedfile.FulltextContentFactory(
4690
key, parent_keys, None, delta_serialized)
4693
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4694
stop_revision=None):
4695
"""Extend the partial history to include a given index
4697
If a stop_index is supplied, stop when that index has been reached.
4698
If a stop_revision is supplied, stop when that revision is
4699
encountered. Otherwise, stop when the beginning of history is
4702
:param stop_index: The index which should be present. When it is
4703
present, history extension will stop.
4704
:param stop_revision: The revision id which should be present. When
4705
it is encountered, history extension will stop.
4707
start_revision = partial_history_cache[-1]
4708
iterator = repo.iter_reverse_revision_history(start_revision)
4710
#skip the last revision in the list
4713
if (stop_index is not None and
4714
len(partial_history_cache) > stop_index):
4716
if partial_history_cache[-1] == stop_revision:
4718
revision_id = iterator.next()
4719
partial_history_cache.append(revision_id)
4720
except StopIteration: