1
# Copyright (C) 2005-2011 Canonical Ltd
1
# Copyright (C) 2005, 2006 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
32
revision as _mod_revision,
33
testament as _mod_testament,
37
from bzrlib.bundle import serializer
46
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from copy import deepcopy
18
from cStringIO import StringIO
19
from unittest import TestSuite
21
import bzrlib.bzrdir as bzrdir
22
from bzrlib.decorators import needs_read_lock, needs_write_lock
23
import bzrlib.errors as errors
24
from bzrlib.errors import InvalidRevisionId
25
import bzrlib.gpg as gpg
26
from bzrlib.graph import Graph
47
27
from bzrlib.inter import InterObject
48
from bzrlib.lock import _RelockDebugMixin, LogicalLockResult
49
from bzrlib.trace import (
50
log_exception_quietly, note, mutter, mutter_callsite, warning)
53
# Old formats display a warning, but only once
54
_deprecation_warning_done = False
57
class IsInWriteGroupError(errors.InternalBzrError):
59
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
61
def __init__(self, repo):
62
errors.InternalBzrError.__init__(self, repo=repo)
65
class CommitBuilder(object):
66
"""Provides an interface to build up a commit.
68
This allows describing a tree to be committed without needing to
69
know the internals of the format of the repository.
72
# all clients should supply tree roots.
73
record_root_entry = True
74
# whether this commit builder supports the record_entry_contents interface
75
supports_record_entry_contents = False
77
def __init__(self, repository, parents, config, timestamp=None,
78
timezone=None, committer=None, revprops=None,
79
revision_id=None, lossy=False):
80
"""Initiate a CommitBuilder.
82
:param repository: Repository to commit to.
83
:param parents: Revision ids of the parents of the new revision.
84
:param timestamp: Optional timestamp recorded for commit.
85
:param timezone: Optional timezone for timestamp.
86
:param committer: Optional committer to set for commit.
87
:param revprops: Optional dictionary of revision properties.
88
:param revision_id: Optional revision id.
89
:param lossy: Whether to discard data that can not be natively
90
represented, when pushing to a foreign VCS
96
self._committer = self._config.username()
97
elif not isinstance(committer, unicode):
98
self._committer = committer.decode() # throw if non-ascii
100
self._committer = committer
102
self._new_revision_id = revision_id
103
self.parents = parents
104
self.repository = repository
107
if revprops is not None:
108
self._validate_revprops(revprops)
109
self._revprops.update(revprops)
111
if timestamp is None:
112
timestamp = time.time()
113
# Restrict resolution to 1ms
114
self._timestamp = round(timestamp, 3)
117
self._timezone = osutils.local_time_offset()
119
self._timezone = int(timezone)
121
self._generate_revision_if_needed()
123
def any_changes(self):
124
"""Return True if any entries were changed.
126
This includes merge-only changes. It is the core for the --unchanged
129
:return: True if any changes have occured.
131
raise NotImplementedError(self.any_changes)
133
def _validate_unicode_text(self, text, context):
134
"""Verify things like commit messages don't have bogus characters."""
136
raise ValueError('Invalid value for %s: %r' % (context, text))
138
def _validate_revprops(self, revprops):
139
for key, value in revprops.iteritems():
140
# We know that the XML serializers do not round trip '\r'
141
# correctly, so refuse to accept them
142
if not isinstance(value, basestring):
143
raise ValueError('revision property (%s) is not a valid'
144
' (unicode) string: %r' % (key, value))
145
self._validate_unicode_text(value,
146
'revision property (%s)' % (key,))
148
def commit(self, message):
149
"""Make the actual commit.
151
:return: The revision id of the recorded revision.
153
raise NotImplementedError(self.commit)
156
"""Abort the commit that is being built.
158
raise NotImplementedError(self.abort)
160
def revision_tree(self):
161
"""Return the tree that was just committed.
163
After calling commit() this can be called to get a
164
RevisionTree representing the newly committed tree. This is
165
preferred to calling Repository.revision_tree() because that may
166
require deserializing the inventory, while we already have a copy in
169
raise NotImplementedError(self.revision_tree)
171
def finish_inventory(self):
172
"""Tell the builder that the inventory is finished.
174
:return: The inventory id in the repository, which can be used with
175
repository.get_inventory.
177
raise NotImplementedError(self.finish_inventory)
179
def _gen_revision_id(self):
180
"""Return new revision-id."""
181
return generate_ids.gen_revision_id(self._committer, self._timestamp)
183
def _generate_revision_if_needed(self):
184
"""Create a revision id if None was supplied.
186
If the repository can not support user-specified revision ids
187
they should override this function and raise CannotSetRevisionId
188
if _new_revision_id is not None.
190
:raises: CannotSetRevisionId
192
if self._new_revision_id is None:
193
self._new_revision_id = self._gen_revision_id()
194
self.random_revid = True
196
self.random_revid = False
198
def will_record_deletes(self):
199
"""Tell the commit builder that deletes are being notified.
201
This enables the accumulation of an inventory delta; for the resulting
202
commit to be valid, deletes against the basis MUST be recorded via
203
builder.record_delete().
205
raise NotImplementedError(self.will_record_deletes)
207
def record_iter_changes(self, tree, basis_revision_id, iter_changes):
208
"""Record a new tree via iter_changes.
210
:param tree: The tree to obtain text contents from for changed objects.
211
:param basis_revision_id: The revision id of the tree the iter_changes
212
has been generated against. Currently assumed to be the same
213
as self.parents[0] - if it is not, errors may occur.
214
:param iter_changes: An iter_changes iterator with the changes to apply
215
to basis_revision_id. The iterator must not include any items with
216
a current kind of None - missing items must be either filtered out
217
or errored-on beefore record_iter_changes sees the item.
218
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
221
raise NotImplementedError(self.record_iter_changes)
224
class RepositoryWriteLockResult(LogicalLockResult):
225
"""The result of write locking a repository.
227
:ivar repository_token: The token obtained from the underlying lock, or
229
:ivar unlock: A callable which will unlock the lock.
232
def __init__(self, unlock, repository_token):
233
LogicalLockResult.__init__(self, unlock)
234
self.repository_token = repository_token
237
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
241
######################################################################
245
class Repository(_RelockDebugMixin, controldir.ControlComponent):
28
from bzrlib.knit import KnitVersionedFile
29
from bzrlib.lockable_files import LockableFiles, TransportLock
30
from bzrlib.lockdir import LockDir
31
from bzrlib.osutils import safe_unicode
32
from bzrlib.revision import NULL_REVISION
33
from bzrlib.store.versioned import VersionedFileStore, WeaveStore
34
from bzrlib.store.text import TextStore
35
from bzrlib.symbol_versioning import *
36
from bzrlib.trace import mutter
37
from bzrlib.tree import RevisionTree
38
from bzrlib.tsort import topo_sort
39
from bzrlib.testament import Testament
40
from bzrlib.tree import EmptyTree
42
from bzrlib.weave import WeaveFile
46
class Repository(object):
246
47
"""Repository holding history for one or more branches.
248
49
The repository holds and retrieves historical information including
249
50
revisions and file history. It's normally accessed only by the Branch,
250
51
which views a particular line of development through that history.
252
See VersionedFileRepository in bzrlib.vf_repository for the
253
base class for most Bazaar repositories.
53
The Repository builds on top of Stores and a Transport, which respectively
54
describe the disk data format and the way of accessing the (possibly
256
def abort_write_group(self, suppress_errors=False):
257
"""Commit the contents accrued within the current write group.
259
:param suppress_errors: if true, abort_write_group will catch and log
260
unexpected errors that happen during the abort, rather than
261
allowing them to propagate. Defaults to False.
263
:seealso: start_write_group.
265
if self._write_group is not self.get_transaction():
266
# has an unlock or relock occured ?
269
'(suppressed) mismatched lock context and write group. %r, %r',
270
self._write_group, self.get_transaction())
272
raise errors.BzrError(
273
'mismatched lock context and write group. %r, %r' %
274
(self._write_group, self.get_transaction()))
276
self._abort_write_group()
277
except Exception, exc:
278
self._write_group = None
279
if not suppress_errors:
281
mutter('abort_write_group failed')
282
log_exception_quietly()
283
note('bzr: ERROR (ignored): %s', exc)
284
self._write_group = None
286
def _abort_write_group(self):
287
"""Template method for per-repository write group cleanup.
289
This is called during abort before the write group is considered to be
290
finished and should cleanup any internal state accrued during the write
291
group. There is no requirement that data handed to the repository be
292
*not* made available - this is not a rollback - but neither should any
293
attempt be made to ensure that data added is fully commited. Abort is
294
invoked when an error has occured so futher disk or network operations
295
may not be possible or may error and if possible should not be
299
def add_fallback_repository(self, repository):
300
"""Add a repository to use for looking up data not held locally.
302
:param repository: A repository.
304
raise NotImplementedError(self.add_fallback_repository)
306
def _check_fallback_repository(self, repository):
307
"""Check that this repository can fallback to repository safely.
309
Raise an error if not.
311
:param repository: A repository to fallback to.
313
return InterRepository._assert_same_model(self, repository)
59
def add_inventory(self, revid, inv, parents):
60
"""Add the inventory inv to the repository as revid.
62
:param parents: The revision ids of the parents that revid
63
is known to have and are in the repository already.
65
returns the sha1 of the serialized inventory.
67
inv_text = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
68
inv_sha1 = bzrlib.osutils.sha_string(inv_text)
69
inv_vf = self.control_weaves.get_weave('inventory',
70
self.get_transaction())
71
inv_vf.add_lines(revid, parents, bzrlib.osutils.split_lines(inv_text))
75
def add_revision(self, rev_id, rev, inv=None, config=None):
76
"""Add rev to the revision store as rev_id.
78
:param rev_id: the revision id to use.
79
:param rev: The revision object.
80
:param inv: The inventory for the revision. if None, it will be looked
81
up in the inventory storer
82
:param config: If None no digital signature will be created.
83
If supplied its signature_needed method will be used
84
to determine if a signature should be made.
86
if config is not None and config.signature_needed():
88
inv = self.get_inventory(rev_id)
89
plaintext = Testament(rev, inv).as_short_text()
90
self.store_revision_signature(
91
gpg.GPGStrategy(config), plaintext, rev_id)
92
if not rev_id in self.get_inventory_weave():
94
raise errors.WeaveRevisionNotPresent(rev_id,
95
self.get_inventory_weave())
97
# yes, this is not suitable for adding with ghosts.
98
self.add_inventory(rev_id, inv, rev.parent_ids)
99
self._revision_store.add_revision(rev, self.get_transaction())
102
def _all_possible_ids(self):
103
"""Return all the possible revisions that we could find."""
104
return self.get_inventory_weave().versions()
315
107
def all_revision_ids(self):
316
"""Returns a list of all the revision ids in the repository.
318
This is conceptually deprecated because code should generally work on
319
the graph reachable from a particular revision, and ignore any other
320
revisions that might be present. There is no direct replacement
323
if 'evil' in debug.debug_flags:
324
mutter_callsite(2, "all_revision_ids is linear with history.")
325
return self._all_revision_ids()
327
def _all_revision_ids(self):
328
"""Returns a list of all the revision ids in the repository.
330
These are in as much topological order as the underlying store can
333
raise NotImplementedError(self._all_revision_ids)
335
def break_lock(self):
336
"""Break a lock if one is present from another instance.
338
Uses the ui factory to ask for confirmation if the lock may be from
341
self.control_files.break_lock()
108
"""Returns a list of all the revision ids in the repository.
110
These are in as much topological order as the underlying store can
111
present: for weaves ghosts may lead to a lack of correctness until
112
the reweave updates the parents list.
114
if self._revision_store.text_store.listable():
115
return self._revision_store.all_revision_ids(self.get_transaction())
116
result = self._all_possible_ids()
117
return self._eliminate_revisions_not_present(result)
120
def _eliminate_revisions_not_present(self, revision_ids):
121
"""Check every revision id in revision_ids to see if we have it.
123
Returns a set of the present revisions.
126
for id in revision_ids:
127
if self.has_revision(id):
344
132
def create(a_bzrdir):
345
133
"""Construct the current default format repository in a_bzrdir."""
346
134
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
348
def __init__(self, _format, a_bzrdir, control_files):
136
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
349
137
"""instantiate a Repository.
351
139
:param _format: The format of the repository on disk.
352
140
:param a_bzrdir: The BzrDir of the repository.
353
:param control_files: Control files to use for locking, etc.
142
In the future we will have a single api for all stores for
143
getting file texts, inventories and revisions, then
144
this construct will accept instances of those things.
355
# In the future we will have a single api for all stores for
356
# getting file texts, inventories and revisions, then
357
# this construct will accept instances of those things.
358
super(Repository, self).__init__()
146
object.__init__(self)
359
147
self._format = _format
360
148
# the following are part of the public API for Repository:
361
149
self.bzrdir = a_bzrdir
362
150
self.control_files = control_files
363
self._transport = control_files._transport
364
self.base = self._transport.base
366
self._write_group = None
367
# Additional places to query for data.
368
self._fallback_repositories = []
371
def user_transport(self):
372
return self.bzrdir.user_transport
375
def control_transport(self):
376
return self._transport
379
if self._fallback_repositories:
380
return '%s(%r, fallback_repositories=%r)' % (
381
self.__class__.__name__,
383
self._fallback_repositories)
385
return '%s(%r)' % (self.__class__.__name__,
388
def _has_same_fallbacks(self, other_repo):
389
"""Returns true if the repositories have the same fallbacks."""
390
my_fb = self._fallback_repositories
391
other_fb = other_repo._fallback_repositories
392
if len(my_fb) != len(other_fb):
394
for f, g in zip(my_fb, other_fb):
395
if not f.has_same_location(g):
399
def has_same_location(self, other):
400
"""Returns a boolean indicating if this repository is at the same
401
location as another repository.
403
This might return False even when two repository objects are accessing
404
the same physical repository via different URLs.
406
if self.__class__ is not other.__class__:
408
return (self._transport.base == other._transport.base)
410
def is_in_write_group(self):
411
"""Return True if there is an open write group.
413
:seealso: start_write_group.
415
return self._write_group is not None
151
self._revision_store = _revision_store
152
self.text_store = text_store
153
# backwards compatability
154
self.weave_store = text_store
155
# not right yet - should be more semantically clear ?
157
self.control_store = control_store
158
self.control_weaves = control_store
160
def lock_write(self):
161
self.control_files.lock_write()
164
self.control_files.lock_read()
417
166
def is_locked(self):
418
167
return self.control_files.is_locked()
420
def is_write_locked(self):
421
"""Return True if this object is write locked."""
422
return self.is_locked() and self.control_files._lock_mode == 'w'
424
def lock_write(self, token=None):
425
"""Lock this repository for writing.
427
This causes caching within the repository obejct to start accumlating
428
data during reads, and allows a 'write_group' to be obtained. Write
429
groups must be used for actual data insertion.
431
A token should be passed in if you know that you have locked the object
432
some other way, and need to synchronise this object's state with that
435
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
437
:param token: if this is already locked, then lock_write will fail
438
unless the token matches the existing lock.
439
:returns: a token if this instance supports tokens, otherwise None.
440
:raises TokenLockingNotSupported: when a token is given but this
441
instance doesn't support using token locks.
442
:raises MismatchedToken: if the specified token doesn't match the token
443
of the existing lock.
444
:seealso: start_write_group.
445
:return: A RepositoryWriteLockResult.
447
locked = self.is_locked()
448
token = self.control_files.lock_write(token=token)
450
self._warn_if_deprecated()
452
for repo in self._fallback_repositories:
453
# Writes don't affect fallback repos
456
return RepositoryWriteLockResult(self.unlock, token)
459
"""Lock the repository for read operations.
461
:return: An object with an unlock method which will release the lock
464
locked = self.is_locked()
465
self.control_files.lock_read()
467
self._warn_if_deprecated()
469
for repo in self._fallback_repositories:
472
return LogicalLockResult(self.unlock)
474
def get_physical_lock_status(self):
475
return self.control_files.get_physical_lock_status()
477
def leave_lock_in_place(self):
478
"""Tell this repository not to release the physical lock when this
481
If lock_write doesn't return a token, then this method is not supported.
483
self.control_files.leave_in_place()
485
def dont_leave_lock_in_place(self):
486
"""Tell this repository to release the physical lock when this
487
object is unlocked, even if it didn't originally acquire it.
489
If lock_write doesn't return a token, then this method is not supported.
491
self.control_files.dont_leave_in_place()
494
def gather_stats(self, revid=None, committers=None):
495
"""Gather statistics from a revision id.
497
:param revid: The revision id to gather statistics from, if None, then
498
no revision specific statistics are gathered.
499
:param committers: Optional parameter controlling whether to grab
500
a count of committers from the revision specific statistics.
501
:return: A dictionary of statistics. Currently this contains:
502
committers: The number of committers if requested.
503
firstrev: A tuple with timestamp, timezone for the penultimate left
504
most ancestor of revid, if revid is not the NULL_REVISION.
505
latestrev: A tuple with timestamp, timezone for revid, if revid is
506
not the NULL_REVISION.
507
revisions: The total revision count in the repository.
508
size: An estimate disk size of the repository in bytes.
511
if revid and committers:
512
result['committers'] = 0
513
if revid and revid != _mod_revision.NULL_REVISION:
514
graph = self.get_graph()
516
all_committers = set()
517
revisions = [r for (r, p) in graph.iter_ancestry([revid])
518
if r != _mod_revision.NULL_REVISION]
521
# ignore the revisions in the middle - just grab first and last
522
revisions = revisions[0], revisions[-1]
523
for revision in self.get_revisions(revisions):
524
if not last_revision:
525
last_revision = revision
527
all_committers.add(revision.committer)
528
first_revision = revision
530
result['committers'] = len(all_committers)
531
result['firstrev'] = (first_revision.timestamp,
532
first_revision.timezone)
533
result['latestrev'] = (last_revision.timestamp,
534
last_revision.timezone)
537
def find_branches(self, using=False):
538
"""Find branches underneath this repository.
540
This will include branches inside other branches.
542
:param using: If True, list only branches using this repository.
544
if using and not self.is_shared():
545
return self.bzrdir.list_branches()
546
class Evaluator(object):
549
self.first_call = True
551
def __call__(self, bzrdir):
552
# On the first call, the parameter is always the bzrdir
553
# containing the current repo.
554
if not self.first_call:
556
repository = bzrdir.open_repository()
557
except errors.NoRepositoryPresent:
560
return False, ([], repository)
561
self.first_call = False
562
value = (bzrdir.list_branches(), None)
566
for branches, repository in bzrdir.BzrDir.find_bzrdirs(
567
self.user_transport, evaluate=Evaluator()):
568
if branches is not None:
570
if not using and repository is not None:
571
ret.extend(repository.find_branches())
575
def search_missing_revision_ids(self, other,
576
revision_id=symbol_versioning.DEPRECATED_PARAMETER,
577
find_ghosts=True, revision_ids=None, if_present_ids=None,
170
def missing_revision_ids(self, other, revision_id=None):
579
171
"""Return the revision ids that other has that this does not.
581
173
These are returned in topological order.
583
175
revision_id: only return revision ids included by revision_id.
585
if symbol_versioning.deprecated_passed(revision_id):
586
symbol_versioning.warn(
587
'search_missing_revision_ids(revision_id=...) was '
588
'deprecated in 2.4. Use revision_ids=[...] instead.',
589
DeprecationWarning, stacklevel=3)
590
if revision_ids is not None:
591
raise AssertionError(
592
'revision_ids is mutually exclusive with revision_id')
593
if revision_id is not None:
594
revision_ids = [revision_id]
595
return InterRepository.get(other, self).search_missing_revision_ids(
596
find_ghosts=find_ghosts, revision_ids=revision_ids,
597
if_present_ids=if_present_ids, limit=limit)
177
return InterRepository.get(other, self).missing_revision_ids(revision_id)
603
183
For instance, if the repository is at URL/.bzr/repository,
604
184
Repository.open(URL) -> a Repository instance.
606
control = bzrdir.BzrDir.open(base)
186
control = bzrlib.bzrdir.BzrDir.open(base)
607
187
return control.open_repository()
609
def copy_content_into(self, destination, revision_id=None):
189
def copy_content_into(self, destination, revision_id=None, basis=None):
610
190
"""Make a complete copy of the content in self into destination.
612
This is a destructive operation! Do not use it on existing
192
This is a destructive operation! Do not use it on existing
615
return InterRepository.get(self, destination).copy_content(revision_id)
617
def commit_write_group(self):
618
"""Commit the contents accrued within the current write group.
620
:seealso: start_write_group.
622
:return: it may return an opaque hint that can be passed to 'pack'.
624
if self._write_group is not self.get_transaction():
625
# has an unlock or relock occured ?
626
raise errors.BzrError('mismatched lock context %r and '
628
(self.get_transaction(), self._write_group))
629
result = self._commit_write_group()
630
self._write_group = None
633
def _commit_write_group(self):
634
"""Template method for per-repository write group cleanup.
636
This is called before the write group is considered to be
637
finished and should ensure that all data handed to the repository
638
for writing during the write group is safely committed (to the
639
extent possible considering file system caching etc).
642
def suspend_write_group(self):
643
raise errors.UnsuspendableWriteGroup(self)
645
def refresh_data(self):
646
"""Re-read any data needed to synchronise with disk.
648
This method is intended to be called after another repository instance
649
(such as one used by a smart server) has inserted data into the
650
repository. On all repositories this will work outside of write groups.
651
Some repository formats (pack and newer for bzrlib native formats)
652
support refresh_data inside write groups. If called inside a write
653
group on a repository that does not support refreshing in a write group
654
IsInWriteGroupError will be raised.
658
def resume_write_group(self, tokens):
659
if not self.is_write_locked():
660
raise errors.NotWriteLocked(self)
661
if self._write_group:
662
raise errors.BzrError('already in a write group')
663
self._resume_write_group(tokens)
664
# so we can detect unlock/relock - the write group is now entered.
665
self._write_group = self.get_transaction()
667
def _resume_write_group(self, tokens):
668
raise errors.UnsuspendableWriteGroup(self)
670
def fetch(self, source, revision_id=None, find_ghosts=False,
195
return InterRepository.get(self, destination).copy_content(revision_id, basis)
197
def fetch(self, source, revision_id=None, pb=None):
672
198
"""Fetch the content required to construct revision_id from source.
674
If revision_id is None and fetch_spec is None, then all content is
677
fetch() may not be used when the repository is in a write group -
678
either finish the current write group before using fetch, or use
679
fetch before starting the write group.
681
:param find_ghosts: Find and copy revisions in the source that are
682
ghosts in the target (and not reachable directly by walking out to
683
the first-present revision in target from revision_id).
684
:param revision_id: If specified, all the content needed for this
685
revision ID will be copied to the target. Fetch will determine for
686
itself which content needs to be copied.
687
:param fetch_spec: If specified, a SearchResult or
688
PendingAncestryResult that describes which revisions to copy. This
689
allows copying multiple heads at once. Mutually exclusive with
692
if fetch_spec is not None and revision_id is not None:
693
raise AssertionError(
694
"fetch_spec and revision_id are mutually exclusive.")
695
if self.is_in_write_group():
696
raise errors.InternalBzrError(
697
"May not fetch while in a write group.")
698
# fast path same-url fetch operations
699
# TODO: lift out to somewhere common with RemoteRepository
700
# <https://bugs.launchpad.net/bzr/+bug/401646>
701
if (self.has_same_location(source)
702
and fetch_spec is None
703
and self._has_same_fallbacks(source)):
704
# check that last_revision is in 'from' and then return a
706
if (revision_id is not None and
707
not _mod_revision.is_null(revision_id)):
708
self.get_revision(revision_id)
710
inter = InterRepository.get(source, self)
711
return inter.fetch(revision_id=revision_id,
712
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
714
def create_bundle(self, target, base, fileobj, format=None):
715
return serializer.write_bundle(self, target, base, fileobj, format)
717
def get_commit_builder(self, branch, parents, config, timestamp=None,
718
timezone=None, committer=None, revprops=None,
719
revision_id=None, lossy=False):
720
"""Obtain a CommitBuilder for this repository.
722
:param branch: Branch to commit to.
723
:param parents: Revision ids of the parents of the new revision.
724
:param config: Configuration to use.
725
:param timestamp: Optional timestamp recorded for commit.
726
:param timezone: Optional timezone for timestamp.
727
:param committer: Optional committer to set for commit.
728
:param revprops: Optional dictionary of revision properties.
729
:param revision_id: Optional revision id.
730
:param lossy: Whether to discard data that can not be natively
731
represented, when pushing to a foreign VCS
733
raise NotImplementedError(self.get_commit_builder)
735
@only_raises(errors.LockNotHeld, errors.LockBroken)
200
If revision_id is None all content is copied.
202
return InterRepository.get(source, self).fetch(revision_id=revision_id,
736
205
def unlock(self):
737
if (self.control_files._lock_count == 1 and
738
self.control_files._lock_mode == 'w'):
739
if self._write_group is not None:
740
self.abort_write_group()
741
self.control_files.unlock()
742
raise errors.BzrError(
743
'Must end write groups before releasing write locks.')
744
206
self.control_files.unlock()
745
if self.control_files._lock_count == 0:
746
for repo in self._fallback_repositories:
750
def clone(self, a_bzrdir, revision_id=None):
209
def clone(self, a_bzrdir, revision_id=None, basis=None):
751
210
"""Clone this repository into a_bzrdir using the current format.
753
212
Currently no check is made that the format of this repository and
754
213
the bzrdir format are compatible. FIXME RBC 20060201.
756
:return: The newly created destination repository.
758
# TODO: deprecate after 0.16; cloning this with all its settings is
759
# probably not very useful -- mbp 20070423
760
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
761
self.copy_content_into(dest_repo, revision_id)
764
def start_write_group(self):
765
"""Start a write group in the repository.
767
Write groups are used by repositories which do not have a 1:1 mapping
768
between file ids and backend store to manage the insertion of data from
769
both fetch and commit operations.
771
A write lock is required around the start_write_group/commit_write_group
772
for the support of lock-requiring repository formats.
774
One can only insert data into a repository inside a write group.
778
if not self.is_write_locked():
779
raise errors.NotWriteLocked(self)
780
if self._write_group:
781
raise errors.BzrError('already in a write group')
782
self._start_write_group()
783
# so we can detect unlock/relock - the write group is now entered.
784
self._write_group = self.get_transaction()
786
def _start_write_group(self):
787
"""Template method for per-repository write group startup.
789
This is called before the write group is considered to be
794
def sprout(self, to_bzrdir, revision_id=None):
795
"""Create a descendent repository for new development.
797
Unlike clone, this does not copy the settings of the repository.
799
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
800
dest_repo.fetch(self, revision_id=revision_id)
803
def _create_sprouting_repo(self, a_bzrdir, shared):
804
215
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
805
216
# use target default format.
806
dest_repo = a_bzrdir.create_repository()
217
result = a_bzrdir.create_repository()
218
# FIXME RBC 20060209 split out the repository type to avoid this check ?
219
elif isinstance(a_bzrdir._format,
220
(bzrlib.bzrdir.BzrDirFormat4,
221
bzrlib.bzrdir.BzrDirFormat5,
222
bzrlib.bzrdir.BzrDirFormat6)):
223
result = a_bzrdir.open_repository()
808
# Most control formats need the repository to be specifically
809
# created, but on some old all-in-one formats it's not needed
811
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
812
except errors.UninitializableFormat:
813
dest_repo = a_bzrdir.open_repository()
225
result = self._format.initialize(a_bzrdir, shared=self.is_shared())
226
self.copy_content_into(result, revision_id, basis)
817
230
def has_revision(self, revision_id):
818
231
"""True if this repository has a copy of the revision."""
819
return revision_id in self.has_revisions((revision_id,))
822
def has_revisions(self, revision_ids):
823
"""Probe to find out the presence of multiple revisions.
825
:param revision_ids: An iterable of revision_ids.
826
:return: A set of the revision_ids that were present.
828
raise NotImplementedError(self.has_revisions)
831
def get_revision(self, revision_id):
832
"""Return the Revision object for a named revision."""
833
return self.get_revisions([revision_id])[0]
232
return self._revision_store.has_revision_id(revision_id,
233
self.get_transaction())
835
236
def get_revision_reconcile(self, revision_id):
836
237
"""'reconcile' helper routine that allows access to a revision always.
838
239
This variant of get_revision does not cross check the weave graph
839
240
against the revision one as get_revision does: but it should only
840
241
be used by reconcile, or reconcile-alike commands that are correcting
841
242
or testing the revision graph.
843
raise NotImplementedError(self.get_revision_reconcile)
845
def get_revisions(self, revision_ids):
846
"""Get many revisions at once.
244
if not revision_id or not isinstance(revision_id, basestring):
245
raise InvalidRevisionId(revision_id=revision_id, branch=self)
246
return self._revision_store.get_revision(revision_id,
247
self.get_transaction())
250
def get_revision_xml(self, revision_id):
251
rev = self.get_revision(revision_id)
253
# the current serializer..
254
self._revision_store._serializer.write_revision(rev, rev_tmp)
256
return rev_tmp.getvalue()
259
def get_revision(self, revision_id):
260
"""Return the Revision object for a named revision"""
261
r = self.get_revision_reconcile(revision_id)
262
# weave corruption can lead to absent revision markers that should be
264
# the following test is reasonably cheap (it needs a single weave read)
265
# and the weave is cached in read transactions. In write transactions
266
# it is not cached but typically we only read a small number of
267
# revisions. For knits when they are introduced we will probably want
268
# to ensure that caching write transactions are in use.
269
inv = self.get_inventory_weave()
270
self._check_revision_parents(r, inv)
273
def _check_revision_parents(self, revision, inventory):
274
"""Private to Repository and Fetch.
848
Repositories that need to check data on every revision read should
849
subclass this method.
851
raise NotImplementedError(self.get_revisions)
853
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
854
"""Produce a generator of revision deltas.
856
Note that the input is a sequence of REVISIONS, not revision_ids.
857
Trees will be held in memory until the generator exits.
858
Each delta is relative to the revision's lefthand predecessor.
860
:param specific_fileids: if not None, the result is filtered
861
so that only those file-ids, their parents and their
862
children are included.
864
# Get the revision-ids of interest
865
required_trees = set()
866
for revision in revisions:
867
required_trees.add(revision.revision_id)
868
required_trees.update(revision.parent_ids[:1])
870
# Get the matching filtered trees. Note that it's more
871
# efficient to pass filtered trees to changes_from() rather
872
# than doing the filtering afterwards. changes_from() could
873
# arguably do the filtering itself but it's path-based, not
874
# file-id based, so filtering before or afterwards is
876
if specific_fileids is None:
877
trees = dict((t.get_revision_id(), t) for
878
t in self.revision_trees(required_trees))
880
trees = dict((t.get_revision_id(), t) for
881
t in self._filtered_revision_trees(required_trees,
884
# Calculate the deltas
885
for revision in revisions:
886
if not revision.parent_ids:
887
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
889
old_tree = trees[revision.parent_ids[0]]
890
yield trees[revision.revision_id].changes_from(old_tree)
893
def get_revision_delta(self, revision_id, specific_fileids=None):
894
"""Return the delta for one revision.
896
The delta is relative to the left-hand predecessor of the
899
:param specific_fileids: if not None, the result is filtered
900
so that only those file-ids, their parents and their
901
children are included.
903
r = self.get_revision(revision_id)
904
return list(self.get_deltas_for_revisions([r],
905
specific_fileids=specific_fileids))[0]
276
This checks the parentage of revision in an inventory weave for
277
consistency and is only applicable to inventory-weave-for-ancestry
278
using repository formats & fetchers.
280
weave_parents = inventory.get_parents(revision.revision_id)
281
weave_names = inventory.versions()
282
for parent_id in revision.parent_ids:
283
if parent_id in weave_names:
284
# this parent must not be a ghost.
285
if not parent_id in weave_parents:
287
raise errors.CorruptRepository(self)
907
289
@needs_write_lock
908
290
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
909
291
signature = gpg_strategy.sign(plaintext)
910
self.add_signature_text(revision_id, signature)
912
def add_signature_text(self, revision_id, signature):
913
"""Store a signature text for a revision.
915
:param revision_id: Revision id of the revision
916
:param signature: Signature text.
918
raise NotImplementedError(self.add_signature_text)
920
def _find_parent_ids_of_revisions(self, revision_ids):
921
"""Find all parent ids that are mentioned in the revision graph.
923
:return: set of revisions that are parents of revision_ids which are
924
not part of revision_ids themselves
926
parent_map = self.get_parent_map(revision_ids)
928
map(parent_ids.update, parent_map.itervalues())
929
parent_ids.difference_update(revision_ids)
930
parent_ids.discard(_mod_revision.NULL_REVISION)
933
def fileids_altered_by_revision_ids(self, revision_ids):
934
"""Find the file ids and versions affected by revisions.
936
:param revisions: an iterable containing revision ids.
937
:return: a dictionary mapping altered file-ids to an iterable of
938
revision_ids. Each altered file-ids has the exact revision_ids
939
that altered it listed explicitly.
941
raise NotImplementedError(self.fileids_altered_by_revision_ids)
943
def iter_files_bytes(self, desired_files):
944
"""Iterate through file versions.
946
Files will not necessarily be returned in the order they occur in
947
desired_files. No specific order is guaranteed.
949
Yields pairs of identifier, bytes_iterator. identifier is an opaque
950
value supplied by the caller as part of desired_files. It should
951
uniquely identify the file version in the caller's context. (Examples:
952
an index number or a TreeTransform trans_id.)
954
:param desired_files: a list of (file_id, revision_id, identifier)
957
raise NotImplementedError(self.iter_files_bytes)
959
def get_rev_id_for_revno(self, revno, known_pair):
960
"""Return the revision id of a revno, given a later (revno, revid)
961
pair in the same history.
963
:return: if found (True, revid). If the available history ran out
964
before reaching the revno, then this returns
965
(False, (closest_revno, closest_revid)).
967
known_revno, known_revid = known_pair
968
partial_history = [known_revid]
969
distance_from_known = known_revno - revno
970
if distance_from_known < 0:
972
'requested revno (%d) is later than given known revno (%d)'
973
% (revno, known_revno))
292
self._revision_store.add_revision_signature_text(revision_id,
294
self.get_transaction())
296
def fileid_involved_between_revs(self, from_revid, to_revid):
297
"""Find file_id(s) which are involved in the changes between revisions.
299
This determines the set of revisions which are involved, and then
300
finds all file ids affected by those revisions.
302
w = self.get_inventory_weave()
303
from_set = set(w.get_ancestry(from_revid))
304
to_set = set(w.get_ancestry(to_revid))
305
changed = to_set.difference(from_set)
306
return self._fileid_involved_by_set(changed)
308
def fileid_involved(self, last_revid=None):
309
"""Find all file_ids modified in the ancestry of last_revid.
311
:param last_revid: If None, last_revision() will be used.
313
w = self.get_inventory_weave()
315
changed = set(w.versions())
317
changed = set(w.get_ancestry(last_revid))
318
return self._fileid_involved_by_set(changed)
320
def fileid_involved_by_set(self, changes):
321
"""Find all file_ids modified by the set of revisions passed in.
323
:param changes: A set() of revision ids
325
# TODO: jam 20060119 This line does *nothing*, remove it.
326
# or better yet, change _fileid_involved_by_set so
327
# that it takes the inventory weave, rather than
328
# pulling it out by itself.
329
return self._fileid_involved_by_set(changes)
331
def _fileid_involved_by_set(self, changes):
332
"""Find the set of file-ids affected by the set of revisions.
334
:param changes: A set() of revision ids.
335
:return: A set() of file ids.
337
This peaks at the Weave, interpreting each line, looking to
338
see if it mentions one of the revisions. And if so, includes
339
the file id mentioned.
340
This expects both the Weave format, and the serialization
341
to have a single line per file/directory, and to have
342
fileid="" and revision="" on that line.
344
assert isinstance(self._format, (RepositoryFormat5,
347
RepositoryFormatKnit1)), \
348
"fileid_involved only supported for branches which store inventory as unnested xml"
350
w = self.get_inventory_weave()
353
# this code needs to read every line in every inventory for the
354
# inventories [changes]. Seeing a line twice is ok. Seeing a line
355
# not pesent in one of those inventories is unnecessary and not
356
# harmful because we are filtering by the revision id marker in the
357
# inventory lines to only select file ids altered in one of those
358
# revisions. We dont need to see all lines in the inventory because
359
# only those added in an inventory in rev X can contain a revision=X
361
for line in w.iter_lines_added_or_present_in_versions(changes):
362
start = line.find('file_id="')+9
363
if start < 9: continue
364
end = line.find('"', start)
366
file_id = _unescape_xml(line[start:end])
368
# check if file_id is already present
369
if file_id in file_ids: continue
371
start = line.find('revision="')+10
372
if start < 10: continue
373
end = line.find('"', start)
375
revision_id = _unescape_xml(line[start:end])
376
if revision_id in changes:
377
file_ids.add(file_id)
381
def get_inventory_weave(self):
382
return self.control_weaves.get_weave('inventory',
383
self.get_transaction())
386
def get_inventory(self, revision_id):
387
"""Get Inventory object by hash."""
388
xml = self.get_inventory_xml(revision_id)
389
return bzrlib.xml5.serializer_v5.read_inventory_from_string(xml)
392
def get_inventory_xml(self, revision_id):
393
"""Get inventory XML as a file object."""
976
self, partial_history, stop_index=distance_from_known)
977
except errors.RevisionNotPresent, err:
978
if err.revision_id == known_revid:
979
# The start revision (known_revid) wasn't found.
981
# This is a stacked repository with no fallbacks, or a there's a
982
# left-hand ghost. Either way, even though the revision named in
983
# the error isn't in this repo, we know it's the next step in this
985
partial_history.append(err.revision_id)
986
if len(partial_history) <= distance_from_known:
987
# Didn't find enough history to get a revid for the revno.
988
earliest_revno = known_revno - len(partial_history) + 1
989
return (False, (earliest_revno, partial_history[-1]))
990
if len(partial_history) - 1 > distance_from_known:
991
raise AssertionError('_iter_for_revno returned too much history')
992
return (True, partial_history[-1])
994
@symbol_versioning.deprecated_method(symbol_versioning.deprecated_in((2, 4, 0)))
995
def iter_reverse_revision_history(self, revision_id):
996
"""Iterate backwards through revision ids in the lefthand history
998
:param revision_id: The revision id to start with. All its lefthand
999
ancestors will be traversed.
1001
graph = self.get_graph()
1002
stop_revisions = (None, _mod_revision.NULL_REVISION)
1003
return graph.iter_lefthand_ancestry(revision_id, stop_revisions)
395
assert isinstance(revision_id, basestring), type(revision_id)
396
iw = self.get_inventory_weave()
397
return iw.get_text(revision_id)
399
raise bzrlib.errors.HistoryMissing(self, 'inventory', revision_id)
402
def get_inventory_sha1(self, revision_id):
403
"""Return the sha1 hash of the inventory entry
405
return self.get_revision(revision_id).inventory_sha1
408
def get_revision_graph(self, revision_id=None):
409
"""Return a dictionary containing the revision graph.
411
:return: a dictionary of revision_id->revision_parents_list.
413
weave = self.get_inventory_weave()
414
all_revisions = self._eliminate_revisions_not_present(weave.versions())
415
entire_graph = dict([(node, weave.get_parents(node)) for
416
node in all_revisions])
417
if revision_id is None:
419
elif revision_id not in entire_graph:
420
raise errors.NoSuchRevision(self, revision_id)
422
# add what can be reached from revision_id
424
pending = set([revision_id])
425
while len(pending) > 0:
427
result[node] = entire_graph[node]
428
for revision_id in result[node]:
429
if revision_id not in result:
430
pending.add(revision_id)
434
def get_revision_graph_with_ghosts(self, revision_ids=None):
435
"""Return a graph of the revisions with ghosts marked as applicable.
437
:param revision_ids: an iterable of revisions to graph or None for all.
438
:return: a Graph object with the graph reachable from revision_ids.
442
pending = set(self.all_revision_ids())
445
pending = set(revision_ids)
446
required = set(revision_ids)
449
revision_id = pending.pop()
451
rev = self.get_revision(revision_id)
452
except errors.NoSuchRevision:
453
if revision_id in required:
456
result.add_ghost(revision_id)
458
for parent_id in rev.parent_ids:
459
# is this queued or done ?
460
if (parent_id not in pending and
461
parent_id not in done):
463
pending.add(parent_id)
464
result.add_node(revision_id, rev.parent_ids)
465
done.add(revision_id)
469
def get_revision_inventory(self, revision_id):
470
"""Return inventory of a past revision."""
471
# TODO: Unify this with get_inventory()
472
# bzr 0.0.6 and later imposes the constraint that the inventory_id
473
# must be the same as its revision, so this is trivial.
474
if revision_id is None:
475
# This does not make sense: if there is no revision,
476
# then it is the current tree inventory surely ?!
477
# and thus get_root_id() is something that looks at the last
478
# commit on the branch, and the get_root_id is an inventory check.
479
raise NotImplementedError
480
# return Inventory(self.get_root_id())
482
return self.get_inventory(revision_id)
1005
485
def is_shared(self):
1006
486
"""Return True if this repository is flagged as a shared repository."""
1007
raise NotImplementedError(self.is_shared)
487
# FIXME format 4-6 cannot be shared, this is technically faulty.
488
return self.control_files._transport.has('shared-storage')
1009
490
@needs_write_lock
1010
def reconcile(self, other=None, thorough=False):
1011
492
"""Reconcile this repository."""
1012
493
from bzrlib.reconcile import RepoReconciler
1013
reconciler = RepoReconciler(self, thorough=thorough)
494
reconciler = RepoReconciler(self)
1014
495
reconciler.reconcile()
1015
496
return reconciler
1017
def _refresh_data(self):
1018
"""Helper called from lock_* to ensure coherency with disk.
1020
The default implementation does nothing; it is however possible
1021
for repositories to maintain loaded indices across multiple locks
1022
by checking inside their implementation of this method to see
1023
whether their indices are still valid. This depends of course on
1024
the disk format being validatable in this manner. This method is
1025
also called by the refresh_data() public interface to cause a refresh
1026
to occur while in a write lock so that data inserted by a smart server
1027
push operation is visible on the client's instance of the physical
1031
498
@needs_read_lock
1032
499
def revision_tree(self, revision_id):
1033
500
"""Return Tree for a revision on this branch.
1035
`revision_id` may be NULL_REVISION for the empty tree revision.
1037
raise NotImplementedError(self.revision_tree)
1039
def revision_trees(self, revision_ids):
1040
"""Return Trees for revisions in this repository.
1042
:param revision_ids: a sequence of revision-ids;
1043
a revision-id may not be None or 'null:'
1045
raise NotImplementedError(self.revision_trees)
502
`revision_id` may be None for the null revision, in which case
503
an `EmptyTree` is returned."""
504
# TODO: refactor this to use an existing revision object
505
# so we don't need to read it in twice.
506
if revision_id is None or revision_id == NULL_REVISION:
509
inv = self.get_revision_inventory(revision_id)
510
return RevisionTree(self, inv, revision_id)
1047
512
@needs_read_lock
1048
@symbol_versioning.deprecated_method(
1049
symbol_versioning.deprecated_in((2, 4, 0)))
1050
def get_ancestry(self, revision_id, topo_sorted=True):
513
def get_ancestry(self, revision_id):
1051
514
"""Return a list of revision-ids integrated by a revision.
1053
The first element of the list is always None, indicating the origin
1054
revision. This might change when we have history horizons, or
1055
perhaps we should have a new API.
1057
516
This is topologically sorted.
1059
if 'evil' in debug.debug_flags:
1060
mutter_callsite(2, "get_ancestry is linear with history.")
1061
if _mod_revision.is_null(revision_id):
518
if revision_id is None:
1063
520
if not self.has_revision(revision_id):
1064
521
raise errors.NoSuchRevision(self, revision_id)
1065
graph = self.get_graph()
1067
search = graph._make_breadth_first_searcher([revision_id])
522
w = self.get_inventory_weave()
523
candidates = w.get_ancestry(revision_id)
524
return [None] + candidates # self._eliminate_revisions_not_present(candidates)
527
def print_file(self, file, revision_id):
528
"""Print `file` to stdout.
530
FIXME RBC 20060125 as John Meinel points out this is a bad api
531
- it writes to stdout, it assumes that that is valid etc. Fix
532
by creating a new more flexible convenience function.
534
tree = self.revision_tree(revision_id)
535
# use inventory as it was in that revision
536
file_id = tree.inventory.path2id(file)
538
raise BzrError("%r is not present in revision %s" % (file, revno))
1070
found, ghosts = search.next_with_ghosts()
1071
except StopIteration:
1074
if _mod_revision.NULL_REVISION in keys:
1075
keys.remove(_mod_revision.NULL_REVISION)
1077
parent_map = graph.get_parent_map(keys)
1078
keys = tsort.topo_sort(parent_map)
1079
return [None] + list(keys)
1081
def pack(self, hint=None, clean_obsolete_packs=False):
1082
"""Compress the data within the repository.
1084
This operation only makes sense for some repository types. For other
1085
types it should be a no-op that just returns.
1087
This stub method does not require a lock, but subclasses should use
1088
@needs_write_lock as this is a long running call it's reasonable to
1089
implicitly lock for the user.
1091
:param hint: If not supplied, the whole repository is packed.
1092
If supplied, the repository may use the hint parameter as a
1093
hint for the parts of the repository to pack. A hint can be
1094
obtained from the result of commit_write_group(). Out of
1095
date hints are simply ignored, because concurrent operations
1096
can obsolete them rapidly.
1098
:param clean_obsolete_packs: Clean obsolete packs immediately after
540
revno = self.revision_id_to_revno(revision_id)
541
except errors.NoSuchRevision:
542
# TODO: This should not be BzrError,
543
# but NoSuchFile doesn't fit either
544
raise BzrError('%r is not present in revision %s'
545
% (file, revision_id))
547
raise BzrError('%r is not present in revision %s'
549
tree.print_file(file_id)
1102
551
def get_transaction(self):
1103
552
return self.control_files.get_transaction()
1105
def get_parent_map(self, revision_ids):
1106
"""See graph.StackedParentsProvider.get_parent_map"""
1107
raise NotImplementedError(self.get_parent_map)
1109
def _get_parent_map_no_fallbacks(self, revision_ids):
1110
"""Same as Repository.get_parent_map except doesn't query fallbacks."""
1111
# revisions index works in keys; this just works in revisions
1112
# therefore wrap and unwrap
1115
for revision_id in revision_ids:
1116
if revision_id == _mod_revision.NULL_REVISION:
1117
result[revision_id] = ()
1118
elif revision_id is None:
1119
raise ValueError('get_parent_map(None) is not valid')
1121
query_keys.append((revision_id ,))
1122
vf = self.revisions.without_fallbacks()
1123
for ((revision_id,), parent_keys) in \
1124
vf.get_parent_map(query_keys).iteritems():
1126
result[revision_id] = tuple([parent_revid
1127
for (parent_revid,) in parent_keys])
1129
result[revision_id] = (_mod_revision.NULL_REVISION,)
1132
def _make_parents_provider(self):
1133
if not self._format.supports_external_lookups:
1135
return graph.StackedParentsProvider(_LazyListJoin(
1136
[self._make_parents_provider_unstacked()],
1137
self._fallback_repositories))
1139
def _make_parents_provider_unstacked(self):
1140
return graph.CallableToParentsProviderAdapter(
1141
self._get_parent_map_no_fallbacks)
1144
def get_known_graph_ancestry(self, revision_ids):
1145
"""Return the known graph for a set of revision ids and their ancestors.
1147
raise NotImplementedError(self.get_known_graph_ancestry)
1149
def get_file_graph(self):
1150
"""Return the graph walker for files."""
1151
raise NotImplementedError(self.get_file_graph)
1153
def get_graph(self, other_repository=None):
1154
"""Return the graph walker for this repository format"""
1155
parents_provider = self._make_parents_provider()
1156
if (other_repository is not None and
1157
not self.has_same_location(other_repository)):
1158
parents_provider = graph.StackedParentsProvider(
1159
[parents_provider, other_repository._make_parents_provider()])
1160
return graph.Graph(parents_provider)
1162
def revision_ids_to_search_result(self, result_set):
1163
"""Convert a set of revision ids to a graph SearchResult."""
1164
result_parents = set()
1165
for parents in self.get_graph().get_parent_map(
1166
result_set).itervalues():
1167
result_parents.update(parents)
1168
included_keys = result_set.intersection(result_parents)
1169
start_keys = result_set.difference(included_keys)
1170
exclude_keys = result_parents.difference(result_set)
1171
result = graph.SearchResult(start_keys, exclude_keys,
1172
len(result_set), result_set)
554
def revision_parents(self, revid):
555
return self.get_inventory_weave().parent_names(revid)
1175
557
@needs_write_lock
1176
558
def set_make_working_trees(self, new_value):
1182
564
:param new_value: True to restore the default, False to disable making
1185
raise NotImplementedError(self.set_make_working_trees)
567
# FIXME: split out into a new class/strategy ?
568
if isinstance(self._format, (RepositoryFormat4,
571
raise NotImplementedError(self.set_make_working_trees)
574
self.control_files._transport.delete('no-working-trees')
575
except errors.NoSuchFile:
578
self.control_files.put_utf8('no-working-trees', '')
1187
580
def make_working_trees(self):
1188
581
"""Returns the policy for making working trees on new branches."""
1189
raise NotImplementedError(self.make_working_trees)
582
# FIXME: split out into a new class/strategy ?
583
if isinstance(self._format, (RepositoryFormat4,
587
return not self.control_files._transport.has('no-working-trees')
1191
589
@needs_write_lock
1192
590
def sign_revision(self, revision_id, gpg_strategy):
1193
testament = _mod_testament.Testament.from_revision(self, revision_id)
1194
plaintext = testament.as_short_text()
591
plaintext = Testament.from_revision(self, revision_id).as_short_text()
1195
592
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1197
594
@needs_read_lock
1198
def verify_revision(self, revision_id, gpg_strategy):
1199
"""Verify the signature on a revision.
1201
:param revision_id: the revision to verify
1202
:gpg_strategy: the GPGStrategy object to used
1204
:return: gpg.SIGNATURE_VALID or a failed SIGNATURE_ value
1206
if not self.has_signature_for_revision_id(revision_id):
1207
return gpg.SIGNATURE_NOT_SIGNED, None
1208
signature = self.get_signature_text(revision_id)
1210
testament = _mod_testament.Testament.from_revision(self, revision_id)
1211
plaintext = testament.as_short_text()
1213
return gpg_strategy.verify(signature, plaintext)
1215
595
def has_signature_for_revision_id(self, revision_id):
1216
596
"""Query for a revision signature for revision_id in the repository."""
1217
raise NotImplementedError(self.has_signature_for_revision_id)
597
return self._revision_store.has_signature(revision_id,
598
self.get_transaction())
1219
601
def get_signature_text(self, revision_id):
1220
602
"""Return the text for a signature."""
1221
raise NotImplementedError(self.get_signature_text)
1223
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
1224
"""Check consistency of all history of given revision_ids.
1226
Different repository implementations should override _check().
1228
:param revision_ids: A non-empty list of revision_ids whose ancestry
1229
will be checked. Typically the last revision_id of a branch.
1230
:param callback_refs: A dict of check-refs to resolve and callback
1231
the check/_check method on the items listed as wanting the ref.
1233
:param check_repo: If False do not check the repository contents, just
1234
calculate the data callback_refs requires and call them back.
1236
return self._check(revision_ids=revision_ids, callback_refs=callback_refs,
1237
check_repo=check_repo)
1239
def _check(self, revision_ids=None, callback_refs=None, check_repo=True):
1240
raise NotImplementedError(self.check)
1242
def _warn_if_deprecated(self, branch=None):
1243
if not self._format.is_deprecated():
1245
global _deprecation_warning_done
1246
if _deprecation_warning_done:
1250
conf = config.GlobalConfig()
1252
conf = branch.get_config()
1253
if conf.suppress_warning('format_deprecation'):
1255
warning("Format %s for %s is deprecated -"
1256
" please use 'bzr upgrade' to get better performance"
1257
% (self._format, self.bzrdir.transport.base))
1259
_deprecation_warning_done = True
1261
def supports_rich_root(self):
1262
return self._format.rich_root_data
1264
def _check_ascii_revisionid(self, revision_id, method):
1265
"""Private helper for ascii-only repositories."""
1266
# weave repositories refuse to store revisionids that are non-ascii.
1267
if revision_id is not None:
1268
# weaves require ascii revision ids.
1269
if isinstance(revision_id, unicode):
1271
revision_id.encode('ascii')
1272
except UnicodeEncodeError:
1273
raise errors.NonAsciiRevisionId(method, self)
1276
revision_id.decode('ascii')
1277
except UnicodeDecodeError:
1278
raise errors.NonAsciiRevisionId(method, self)
603
return self._revision_store.get_signature_text(revision_id,
604
self.get_transaction())
607
class AllInOneRepository(Repository):
608
"""Legacy support - the repository behaviour for all-in-one branches."""
610
def __init__(self, _format, a_bzrdir, _revision_store, control_store, text_store):
611
# we reuse one control files instance.
612
dir_mode = a_bzrdir._control_files._dir_mode
613
file_mode = a_bzrdir._control_files._file_mode
615
def get_weave(name, prefixed=False):
617
name = safe_unicode(name)
620
relpath = a_bzrdir._control_files._escape(name)
621
weave_transport = a_bzrdir._control_files._transport.clone(relpath)
622
ws = WeaveStore(weave_transport, prefixed=prefixed,
625
if a_bzrdir._control_files._transport.should_cache():
626
ws.enable_cache = True
629
def get_store(name, compressed=True, prefixed=False):
630
# FIXME: This approach of assuming stores are all entirely compressed
631
# or entirely uncompressed is tidy, but breaks upgrade from
632
# some existing branches where there's a mixture; we probably
633
# still want the option to look for both.
634
relpath = a_bzrdir._control_files._escape(name)
635
store = TextStore(a_bzrdir._control_files._transport.clone(relpath),
636
prefixed=prefixed, compressed=compressed,
639
#if self._transport.should_cache():
640
# cache_path = os.path.join(self.cache_root, name)
641
# os.mkdir(cache_path)
642
# store = bzrlib.store.CachedStore(store, cache_path)
645
# not broken out yet because the controlweaves|inventory_store
646
# and text_store | weave_store bits are still different.
647
if isinstance(_format, RepositoryFormat4):
648
# cannot remove these - there is still no consistent api
649
# which allows access to this old info.
650
self.inventory_store = get_store('inventory-store')
651
text_store = get_store('text-store')
652
super(AllInOneRepository, self).__init__(_format, a_bzrdir, a_bzrdir._control_files, _revision_store, control_store, text_store)
1281
655
class MetaDirRepository(Repository):
1282
"""Repositories in the new meta-dir layout.
1284
:ivar _transport: Transport for access to repository control files,
1285
typically pointing to .bzr/repository.
1288
def __init__(self, _format, a_bzrdir, control_files):
1289
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
1290
self._transport = control_files._transport
1292
def is_shared(self):
1293
"""Return True if this repository is flagged as a shared repository."""
1294
return self._transport.has('shared-storage')
656
"""Repositories in the new meta-dir layout."""
658
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
659
super(MetaDirRepository, self).__init__(_format,
666
dir_mode = self.control_files._dir_mode
667
file_mode = self.control_files._file_mode
669
def get_weave(name, prefixed=False):
671
name = safe_unicode(name)
674
relpath = self.control_files._escape(name)
675
weave_transport = self.control_files._transport.clone(relpath)
676
ws = WeaveStore(weave_transport, prefixed=prefixed,
679
if self.control_files._transport.should_cache():
680
ws.enable_cache = True
684
class KnitRepository(MetaDirRepository):
685
"""Knit format repository."""
688
def all_revision_ids(self):
689
"""See Repository.all_revision_ids()."""
690
return self._revision_store.all_revision_ids(self.get_transaction())
693
def get_ancestry(self, revision_id):
694
"""Return a list of revision-ids integrated by a revision.
696
This is topologically sorted.
698
if revision_id is None:
700
vf = self._revision_store.get_revision_file(self.get_transaction())
702
return [None] + vf.get_ancestry(revision_id)
703
except errors.RevisionNotPresent:
704
raise errors.NoSuchRevision(self, revision_id)
707
def get_revision(self, revision_id):
708
"""Return the Revision object for a named revision"""
709
return self.get_revision_reconcile(revision_id)
712
def get_revision_graph_with_ghosts(self, revision_ids=None):
713
"""Return a graph of the revisions with ghosts marked as applicable.
715
:param revision_ids: an iterable of revisions to graph or None for all.
716
:return: a Graph object with the graph reachable from revision_ids.
719
vf = self._revision_store.get_revision_file(self.get_transaction())
720
versions = vf.versions()
722
pending = set(self.all_revision_ids())
725
pending = set(revision_ids)
726
required = set(revision_ids)
729
revision_id = pending.pop()
730
if not revision_id in versions:
731
if revision_id in required:
732
raise errors.NoSuchRevision(self, revision_id)
734
result.add_ghost(revision_id)
736
parent_ids = vf.get_parents_with_ghosts(revision_id)
737
for parent_id in parent_ids:
738
# is this queued or done ?
739
if (parent_id not in pending and
740
parent_id not in done):
742
pending.add(parent_id)
743
result.add_node(revision_id, parent_ids)
1296
747
@needs_write_lock
1297
def set_make_working_trees(self, new_value):
1298
"""Set the policy flag for making working trees when creating branches.
1300
This only applies to branches that use this repository.
1302
The default is 'True'.
1303
:param new_value: True to restore the default, False to disable making
1308
self._transport.delete('no-working-trees')
1309
except errors.NoSuchFile:
1312
self._transport.put_bytes('no-working-trees', '',
1313
mode=self.bzrdir._get_file_mode())
1315
def make_working_trees(self):
1316
"""Returns the policy for making working trees on new branches."""
1317
return not self._transport.has('no-working-trees')
1320
class RepositoryFormatRegistry(controldir.ControlComponentFormatRegistry):
1321
"""Repository format registry."""
1323
def get_default(self):
1324
"""Return the current default format."""
1325
from bzrlib import bzrdir
1326
return bzrdir.format_registry.make_bzrdir('default').repository_format
1329
network_format_registry = registry.FormatRegistry()
1330
"""Registry of formats indexed by their network name.
1332
The network name for a repository format is an identifier that can be used when
1333
referring to formats with smart server operations. See
1334
RepositoryFormat.network_name() for more detail.
1338
format_registry = RepositoryFormatRegistry(network_format_registry)
1339
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
1341
This can contain either format instances themselves, or classes/factories that
1342
can be called to obtain one.
1346
#####################################################################
1347
# Repository Formats
1349
class RepositoryFormat(controldir.ControlComponentFormat):
749
"""Reconcile this repository."""
750
from bzrlib.reconcile import KnitReconciler
751
reconciler = KnitReconciler(self)
752
reconciler.reconcile()
756
class RepositoryFormat(object):
1350
757
"""A repository format.
1352
Formats provide four things:
759
Formats provide three things:
1353
760
* An initialization routine to construct repository data on disk.
1354
* a optional format string which is used when the BzrDir supports
761
* a format string which is used when the BzrDir supports versioned
1356
763
* an open routine which returns a Repository instance.
1357
* A network name for referring to the format in smart server RPC
1360
There is one and only one Format subclass for each on-disk format. But
1361
there can be one Repository subclass that is used for several different
1362
formats. The _format attribute on a Repository instance can be used to
1363
determine the disk format.
1365
Formats are placed in a registry by their format string for reference
1366
during opening. These should be subclasses of RepositoryFormat for
765
Formats are placed in an dict by their format string for reference
766
during opening. These should be subclasses of RepositoryFormat
1369
769
Once a format is deprecated, just deprecate the initialize and open
1370
methods on the format class. Do not deprecate the object, as the
1371
object may be created even when a repository instance hasn't been
770
methods on the format class. Do not deprecate the object, as the
771
object will be created every system load.
1374
773
Common instance attributes:
1375
774
_matchingbzrdir - the bzrdir format that the repository format was
1376
775
originally written to work with. This can be used if manually
1377
776
constructing a bzrdir and repository, or more commonly for test suite
1381
# Set to True or False in derived classes. True indicates that the format
1382
# supports ghosts gracefully.
1383
supports_ghosts = None
1384
# Can this repository be given external locations to lookup additional
1385
# data. Set to True or False in derived classes.
1386
supports_external_lookups = None
1387
# Does this format support CHK bytestring lookups. Set to True or False in
1389
supports_chks = None
1390
# Should fetch trigger a reconcile after the fetch? Only needed for
1391
# some repository formats that can suffer internal inconsistencies.
1392
_fetch_reconcile = False
1393
# Does this format have < O(tree_size) delta generation. Used to hint what
1394
# code path for commit, amongst other things.
1396
# Does doing a pack operation compress data? Useful for the pack UI command
1397
# (so if there is one pack, the operation can still proceed because it may
1398
# help), and for fetching when data won't have come from the same
1400
pack_compresses = False
1401
# Does the repository storage understand references to trees?
1402
supports_tree_reference = None
1403
# Is the format experimental ?
1404
experimental = False
1405
# Does this repository format escape funky characters, or does it create
1406
# files with similar names as the versioned files in its contents on disk
1408
supports_funky_characters = None
1409
# Does this repository format support leaving locks?
1410
supports_leaving_lock = None
1411
# Does this format support the full VersionedFiles interface?
1412
supports_full_versioned_files = None
1413
# Does this format support signing revision signatures?
1414
supports_revision_signatures = True
1415
# Can the revision graph have incorrect parents?
1416
revision_graph_can_have_wrong_parents = None
1417
# Does this format support rich root data?
1418
rich_root_data = None
1419
# Does this format support explicitly versioned directories?
1420
supports_versioned_directories = None
1423
return "%s()" % self.__class__.__name__
1425
def __eq__(self, other):
1426
# format objects are generally stateless
1427
return isinstance(other, self.__class__)
1429
def __ne__(self, other):
1430
return not self == other
780
_default_format = None
781
"""The default format used for new repositories."""
784
"""The known formats."""
1433
787
def find_format(klass, a_bzrdir):
1434
"""Return the format for the repository object in a_bzrdir.
1436
This is used by bzr native formats that have a "format" file in
1437
the repository. Other methods may be used by different types of
788
"""Return the format for the repository object in a_bzrdir."""
1441
790
transport = a_bzrdir.get_repository_transport(None)
1442
format_string = transport.get_bytes("format")
1443
return format_registry.get(format_string)
791
format_string = transport.get("format").read()
792
return klass._formats[format_string]
1444
793
except errors.NoSuchFile:
1445
794
raise errors.NoRepositoryPresent(a_bzrdir)
1446
795
except KeyError:
1447
raise errors.UnknownFormatError(format=format_string,
1451
@symbol_versioning.deprecated_method(symbol_versioning.deprecated_in((2, 4, 0)))
1452
def register_format(klass, format):
1453
format_registry.register(format)
1456
@symbol_versioning.deprecated_method(symbol_versioning.deprecated_in((2, 4, 0)))
1457
def unregister_format(klass, format):
1458
format_registry.remove(format)
1461
@symbol_versioning.deprecated_method(symbol_versioning.deprecated_in((2, 4, 0)))
796
raise errors.UnknownFormatError(format_string)
798
def _get_control_store(self, repo_transport, control_files):
799
"""Return the control store for this repository."""
800
raise NotImplementedError(self._get_control_store)
1462
803
def get_default_format(klass):
1463
804
"""Return the current default format."""
1464
return format_registry.get_default()
805
return klass._default_format
1466
807
def get_format_string(self):
1467
808
"""Return the ASCII format string that identifies this format.
1469
Note that in pre format ?? repositories the format string is
810
Note that in pre format ?? repositories the format string is
1470
811
not permitted nor written to disk.
1472
813
raise NotImplementedError(self.get_format_string)
1474
def get_format_description(self):
1475
"""Return the short description for this format."""
1476
raise NotImplementedError(self.get_format_description)
815
def _get_revision_store(self, repo_transport, control_files):
816
"""Return the revision store object for this a_bzrdir."""
817
raise NotImplementedError(self._get_revision_store)
819
def _get_text_rev_store(self,
826
"""Common logic for getting a revision store for a repository.
828
see self._get_revision_store for the subclass-overridable method to
829
get the store for a repository.
831
from bzrlib.store.revision.text import TextRevisionStore
832
dir_mode = control_files._dir_mode
833
file_mode = control_files._file_mode
834
text_store =TextStore(transport.clone(name),
836
compressed=compressed,
839
_revision_store = TextRevisionStore(text_store, serializer)
840
return _revision_store
842
def _get_versioned_file_store(self,
847
versionedfile_class=WeaveFile):
848
weave_transport = control_files._transport.clone(name)
849
dir_mode = control_files._dir_mode
850
file_mode = control_files._file_mode
851
return VersionedFileStore(weave_transport, prefixed=prefixed,
854
versionedfile_class=versionedfile_class)
1478
856
def initialize(self, a_bzrdir, shared=False):
1479
857
"""Initialize a repository of this format in a_bzrdir.
1481
859
:param a_bzrdir: The bzrdir to put the new repository in it.
1482
860
:param shared: The repository should be initialized as a sharable one.
1483
:returns: The new repository object.
1485
862
This may raise UninitializableFormat if shared repository are not
1486
863
compatible the a_bzrdir.
1488
raise NotImplementedError(self.initialize)
1490
866
def is_supported(self):
1491
867
"""Is this format supported?
1493
869
Supported formats must be initializable and openable.
1494
Unsupported formats may not support initialization or committing or
870
Unsupported formats may not support initialization or committing or
1495
871
some other features depending on the reason for not being supported.
1499
def is_deprecated(self):
1500
"""Is this format deprecated?
1502
Deprecated formats may trigger a user-visible warning recommending
1503
the user to upgrade. They are still fully supported.
1507
def network_name(self):
1508
"""A simple byte string uniquely identifying this format for RPC calls.
1510
MetaDir repository formats use their disk format string to identify the
1511
repository over the wire. All in one formats such as bzr < 0.8, and
1512
foreign formats like svn/git and hg should use some marker which is
1513
unique and immutable.
1515
raise NotImplementedError(self.network_name)
1517
def check_conversion_target(self, target_format):
1518
if self.rich_root_data and not target_format.rich_root_data:
1519
raise errors.BadConversionTarget(
1520
'Does not support rich root data.', target_format,
1522
if (self.supports_tree_reference and
1523
not getattr(target_format, 'supports_tree_reference', False)):
1524
raise errors.BadConversionTarget(
1525
'Does not support nested trees', target_format,
1528
875
def open(self, a_bzrdir, _found=False):
1529
876
"""Return an instance of this format for the bzrdir a_bzrdir.
1531
878
_found is a private parameter, do not use it.
1533
880
raise NotImplementedError(self.open)
1535
def _run_post_repo_init_hooks(self, repository, a_bzrdir, shared):
1536
from bzrlib.bzrdir import BzrDir, RepoInitHookParams
1537
hooks = BzrDir.hooks['post_repo_init']
1540
params = RepoInitHookParams(repository, self, a_bzrdir, shared)
883
def register_format(klass, format):
884
klass._formats[format.get_format_string()] = format
887
def set_default_format(klass, format):
888
klass._default_format = format
891
def unregister_format(klass, format):
892
assert klass._formats[format.get_format_string()] is format
893
del klass._formats[format.get_format_string()]
896
class PreSplitOutRepositoryFormat(RepositoryFormat):
897
"""Base class for the pre split out repository formats."""
899
def initialize(self, a_bzrdir, shared=False, _internal=False):
900
"""Create a weave repository.
902
TODO: when creating split out bzr branch formats, move this to a common
903
base for Format5, Format6. or something like that.
905
from bzrlib.weavefile import write_weave_v5
906
from bzrlib.weave import Weave
909
raise errors.IncompatibleFormat(self, a_bzrdir._format)
912
# always initialized when the bzrdir is.
913
return self.open(a_bzrdir, _found=True)
915
# Create an empty weave
917
bzrlib.weavefile.write_weave_v5(Weave(), sio)
918
empty_weave = sio.getvalue()
920
mutter('creating repository in %s.', a_bzrdir.transport.base)
921
dirs = ['revision-store', 'weaves']
922
files = [('inventory.weave', StringIO(empty_weave)),
925
# FIXME: RBC 20060125 dont peek under the covers
926
# NB: no need to escape relative paths that are url safe.
927
control_files = LockableFiles(a_bzrdir.transport, 'branch-lock',
929
control_files.create_lock()
930
control_files.lock_write()
931
control_files._transport.mkdir_multi(dirs,
932
mode=control_files._dir_mode)
934
for file, content in files:
935
control_files.put(file, content)
937
control_files.unlock()
938
return self.open(a_bzrdir, _found=True)
940
def _get_control_store(self, repo_transport, control_files):
941
"""Return the control store for this repository."""
942
return self._get_versioned_file_store('',
947
def _get_text_store(self, transport, control_files):
948
"""Get a store for file texts for this format."""
949
raise NotImplementedError(self._get_text_store)
951
def open(self, a_bzrdir, _found=False):
952
"""See RepositoryFormat.open()."""
954
# we are being called directly and must probe.
955
raise NotImplementedError
957
repo_transport = a_bzrdir.get_repository_transport(None)
958
control_files = a_bzrdir._control_files
959
text_store = self._get_text_store(repo_transport, control_files)
960
control_store = self._get_control_store(repo_transport, control_files)
961
_revision_store = self._get_revision_store(repo_transport, control_files)
962
return AllInOneRepository(_format=self,
964
_revision_store=_revision_store,
965
control_store=control_store,
966
text_store=text_store)
969
class RepositoryFormat4(PreSplitOutRepositoryFormat):
970
"""Bzr repository format 4.
972
This repository format has:
974
- TextStores for texts, inventories,revisions.
976
This format is deprecated: it indexes texts using a text id which is
977
removed in format 5; initializationa and write support for this format
982
super(RepositoryFormat4, self).__init__()
983
self._matchingbzrdir = bzrlib.bzrdir.BzrDirFormat4()
985
def initialize(self, url, shared=False, _internal=False):
986
"""Format 4 branches cannot be created."""
987
raise errors.UninitializableFormat(self)
989
def is_supported(self):
990
"""Format 4 is not supported.
992
It is not supported because the model changed from 4 to 5 and the
993
conversion logic is expensive - so doing it on the fly was not
998
def _get_control_store(self, repo_transport, control_files):
999
"""Format 4 repositories have no formal control store at this point.
1001
This will cause any control-file-needing apis to fail - this is desired.
1005
def _get_revision_store(self, repo_transport, control_files):
1006
"""See RepositoryFormat._get_revision_store()."""
1007
from bzrlib.xml4 import serializer_v4
1008
return self._get_text_rev_store(repo_transport,
1011
serializer=serializer_v4)
1013
def _get_text_store(self, transport, control_files):
1014
"""See RepositoryFormat._get_text_store()."""
1017
class RepositoryFormat5(PreSplitOutRepositoryFormat):
1018
"""Bzr control format 5.
1020
This repository format has:
1021
- weaves for file texts and inventory
1023
- TextStores for revisions and signatures.
1027
super(RepositoryFormat5, self).__init__()
1028
self._matchingbzrdir = bzrlib.bzrdir.BzrDirFormat5()
1030
def _get_revision_store(self, repo_transport, control_files):
1031
"""See RepositoryFormat._get_revision_store()."""
1032
"""Return the revision store object for this a_bzrdir."""
1033
return self._get_text_rev_store(repo_transport,
1038
def _get_text_store(self, transport, control_files):
1039
"""See RepositoryFormat._get_text_store()."""
1040
return self._get_versioned_file_store('weaves', transport, control_files, prefixed=False)
1043
class RepositoryFormat6(PreSplitOutRepositoryFormat):
1044
"""Bzr control format 6.
1046
This repository format has:
1047
- weaves for file texts and inventory
1048
- hash subdirectory based stores.
1049
- TextStores for revisions and signatures.
1053
super(RepositoryFormat6, self).__init__()
1054
self._matchingbzrdir = bzrlib.bzrdir.BzrDirFormat6()
1056
def _get_revision_store(self, repo_transport, control_files):
1057
"""See RepositoryFormat._get_revision_store()."""
1058
return self._get_text_rev_store(repo_transport,
1064
def _get_text_store(self, transport, control_files):
1065
"""See RepositoryFormat._get_text_store()."""
1066
return self._get_versioned_file_store('weaves', transport, control_files)
1545
1069
class MetaDirRepositoryFormat(RepositoryFormat):
1546
"""Common base class for the new repositories using the metadir layout."""
1548
rich_root_data = False
1549
supports_tree_reference = False
1550
supports_external_lookups = False
1551
supports_leaving_lock = True
1554
def _matchingbzrdir(self):
1555
matching = bzrdir.BzrDirMetaFormat1()
1556
matching.repository_format = self
1070
"""Common base class for the new repositories using the metadir layour."""
1559
1072
def __init__(self):
1560
1073
super(MetaDirRepositoryFormat, self).__init__()
1074
self._matchingbzrdir = bzrlib.bzrdir.BzrDirMetaFormat1()
1562
1076
def _create_control_files(self, a_bzrdir):
1563
1077
"""Create the required files and the initial control_files object."""
1564
# FIXME: RBC 20060125 don't peek under the covers
1078
# FIXME: RBC 20060125 dont peek under the covers
1565
1079
# NB: no need to escape relative paths that are url safe.
1566
1080
repository_transport = a_bzrdir.get_repository_transport(self)
1567
control_files = lockable_files.LockableFiles(repository_transport,
1568
'lock', lockdir.LockDir)
1081
control_files = LockableFiles(repository_transport, 'lock', LockDir)
1569
1082
control_files.create_lock()
1570
1083
return control_files
1573
1086
"""Upload the initial blank content."""
1574
1087
control_files = self._create_control_files(a_bzrdir)
1575
1088
control_files.lock_write()
1576
transport = control_files._transport
1578
utf8_files += [('shared-storage', '')]
1580
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
1581
for (filename, content_stream) in files:
1582
transport.put_file(filename, content_stream,
1583
mode=a_bzrdir._get_file_mode())
1584
for (filename, content_bytes) in utf8_files:
1585
transport.put_bytes_non_atomic(filename, content_bytes,
1586
mode=a_bzrdir._get_file_mode())
1090
control_files._transport.mkdir_multi(dirs,
1091
mode=control_files._dir_mode)
1092
for file, content in files:
1093
control_files.put(file, content)
1094
for file, content in utf8_files:
1095
control_files.put_utf8(file, content)
1097
control_files.put_utf8('shared-storage', '')
1588
1099
control_files.unlock()
1590
def network_name(self):
1591
"""Metadir formats have matching disk and network format strings."""
1592
return self.get_format_string()
1595
# formats which have no format string are not discoverable or independently
1596
# creatable on disk, so are not registered in format_registry. They're
1597
# all in bzrlib.repofmt.knitreponow. When an instance of one of these is
1598
# needed, it's constructed directly by the BzrDir. Non-native formats where
1599
# the repository is not separately opened are similar.
1601
format_registry.register_lazy(
1602
'Bazaar-NG Knit Repository Format 1',
1603
'bzrlib.repofmt.knitrepo',
1604
'RepositoryFormatKnit1',
1607
format_registry.register_lazy(
1608
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1609
'bzrlib.repofmt.knitrepo',
1610
'RepositoryFormatKnit3',
1613
format_registry.register_lazy(
1614
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
1615
'bzrlib.repofmt.knitrepo',
1616
'RepositoryFormatKnit4',
1619
# Pack-based formats. There is one format for pre-subtrees, and one for
1620
# post-subtrees to allow ease of testing.
1621
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
1622
format_registry.register_lazy(
1623
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1624
'bzrlib.repofmt.knitpack_repo',
1625
'RepositoryFormatKnitPack1',
1627
format_registry.register_lazy(
1628
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1629
'bzrlib.repofmt.knitpack_repo',
1630
'RepositoryFormatKnitPack3',
1632
format_registry.register_lazy(
1633
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
1634
'bzrlib.repofmt.knitpack_repo',
1635
'RepositoryFormatKnitPack4',
1637
format_registry.register_lazy(
1638
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
1639
'bzrlib.repofmt.knitpack_repo',
1640
'RepositoryFormatKnitPack5',
1642
format_registry.register_lazy(
1643
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
1644
'bzrlib.repofmt.knitpack_repo',
1645
'RepositoryFormatKnitPack5RichRoot',
1647
format_registry.register_lazy(
1648
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
1649
'bzrlib.repofmt.knitpack_repo',
1650
'RepositoryFormatKnitPack5RichRootBroken',
1652
format_registry.register_lazy(
1653
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
1654
'bzrlib.repofmt.knitpack_repo',
1655
'RepositoryFormatKnitPack6',
1657
format_registry.register_lazy(
1658
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
1659
'bzrlib.repofmt.knitpack_repo',
1660
'RepositoryFormatKnitPack6RichRoot',
1662
format_registry.register_lazy(
1663
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
1664
'bzrlib.repofmt.groupcompress_repo',
1665
'RepositoryFormat2a',
1668
# Development formats.
1669
# Check their docstrings to see if/when they are obsolete.
1670
format_registry.register_lazy(
1671
("Bazaar development format 2 with subtree support "
1672
"(needs bzr.dev from before 1.8)\n"),
1673
'bzrlib.repofmt.knitpack_repo',
1674
'RepositoryFormatPackDevelopment2Subtree',
1676
format_registry.register_lazy(
1677
'Bazaar development format 8\n',
1678
'bzrlib.repofmt.groupcompress_repo',
1679
'RepositoryFormat2aSubtree',
1102
class RepositoryFormat7(MetaDirRepositoryFormat):
1103
"""Bzr repository 7.
1105
This repository format has:
1106
- weaves for file texts and inventory
1107
- hash subdirectory based stores.
1108
- TextStores for revisions and signatures.
1109
- a format marker of its own
1110
- an optional 'shared-storage' flag
1111
- an optional 'no-working-trees' flag
1114
def _get_control_store(self, repo_transport, control_files):
1115
"""Return the control store for this repository."""
1116
return self._get_versioned_file_store('',
1121
def get_format_string(self):
1122
"""See RepositoryFormat.get_format_string()."""
1123
return "Bazaar-NG Repository format 7"
1125
def _get_revision_store(self, repo_transport, control_files):
1126
"""See RepositoryFormat._get_revision_store()."""
1127
return self._get_text_rev_store(repo_transport,
1134
def _get_text_store(self, transport, control_files):
1135
"""See RepositoryFormat._get_text_store()."""
1136
return self._get_versioned_file_store('weaves',
1140
def initialize(self, a_bzrdir, shared=False):
1141
"""Create a weave repository.
1143
:param shared: If true the repository will be initialized as a shared
1146
from bzrlib.weavefile import write_weave_v5
1147
from bzrlib.weave import Weave
1149
# Create an empty weave
1151
bzrlib.weavefile.write_weave_v5(Weave(), sio)
1152
empty_weave = sio.getvalue()
1154
mutter('creating repository in %s.', a_bzrdir.transport.base)
1155
dirs = ['revision-store', 'weaves']
1156
files = [('inventory.weave', StringIO(empty_weave)),
1158
utf8_files = [('format', self.get_format_string())]
1160
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1161
return self.open(a_bzrdir=a_bzrdir, _found=True)
1163
def open(self, a_bzrdir, _found=False, _override_transport=None):
1164
"""See RepositoryFormat.open().
1166
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1167
repository at a slightly different url
1168
than normal. I.e. during 'upgrade'.
1171
format = RepositoryFormat.find_format(a_bzrdir)
1172
assert format.__class__ == self.__class__
1173
if _override_transport is not None:
1174
repo_transport = _override_transport
1176
repo_transport = a_bzrdir.get_repository_transport(None)
1177
control_files = LockableFiles(repo_transport, 'lock', LockDir)
1178
text_store = self._get_text_store(repo_transport, control_files)
1179
control_store = self._get_control_store(repo_transport, control_files)
1180
_revision_store = self._get_revision_store(repo_transport, control_files)
1181
return MetaDirRepository(_format=self,
1183
control_files=control_files,
1184
_revision_store=_revision_store,
1185
control_store=control_store,
1186
text_store=text_store)
1189
class RepositoryFormatKnit1(MetaDirRepositoryFormat):
1190
"""Bzr repository knit format 1.
1192
This repository format has:
1193
- knits for file texts and inventory
1194
- hash subdirectory based stores.
1195
- knits for revisions and signatures
1196
- TextStores for revisions and signatures.
1197
- a format marker of its own
1198
- an optional 'shared-storage' flag
1199
- an optional 'no-working-trees' flag
1203
def _get_control_store(self, repo_transport, control_files):
1204
"""Return the control store for this repository."""
1205
return self._get_versioned_file_store('',
1209
versionedfile_class=KnitVersionedFile)
1211
def get_format_string(self):
1212
"""See RepositoryFormat.get_format_string()."""
1213
return "Bazaar-NG Knit Repository Format 1"
1215
def _get_revision_store(self, repo_transport, control_files):
1216
"""See RepositoryFormat._get_revision_store()."""
1217
from bzrlib.store.revision.knit import KnitRevisionStore
1218
versioned_file_store = VersionedFileStore(
1220
file_mode = control_files._file_mode,
1223
versionedfile_class=KnitVersionedFile)
1224
return KnitRevisionStore(versioned_file_store)
1226
def _get_text_store(self, transport, control_files):
1227
"""See RepositoryFormat._get_text_store()."""
1228
return self._get_versioned_file_store('knits',
1231
versionedfile_class=KnitVersionedFile)
1233
def initialize(self, a_bzrdir, shared=False):
1234
"""Create a knit format 1 repository.
1236
:param shared: If true the repository will be initialized as a shared
1238
XXX NOTE that this current uses a Weave for testing and will become
1239
A Knit in due course.
1241
from bzrlib.weavefile import write_weave_v5
1242
from bzrlib.weave import Weave
1244
# Create an empty weave
1246
bzrlib.weavefile.write_weave_v5(Weave(), sio)
1247
empty_weave = sio.getvalue()
1249
mutter('creating repository in %s.', a_bzrdir.transport.base)
1250
dirs = ['revision-store', 'knits', 'control']
1251
files = [('control/inventory.weave', StringIO(empty_weave)),
1253
utf8_files = [('format', self.get_format_string())]
1255
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1256
repo_transport = a_bzrdir.get_repository_transport(None)
1257
control_files = LockableFiles(repo_transport, 'lock', LockDir)
1258
control_store = self._get_control_store(repo_transport, control_files)
1259
transaction = bzrlib.transactions.WriteTransaction()
1260
# trigger a write of the inventory store.
1261
control_store.get_weave_or_empty('inventory', transaction)
1262
_revision_store = self._get_revision_store(repo_transport, control_files)
1263
_revision_store.has_revision_id('A', transaction)
1264
_revision_store.get_signature_file(transaction)
1265
return self.open(a_bzrdir=a_bzrdir, _found=True)
1267
def open(self, a_bzrdir, _found=False, _override_transport=None):
1268
"""See RepositoryFormat.open().
1270
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1271
repository at a slightly different url
1272
than normal. I.e. during 'upgrade'.
1275
format = RepositoryFormat.find_format(a_bzrdir)
1276
assert format.__class__ == self.__class__
1277
if _override_transport is not None:
1278
repo_transport = _override_transport
1280
repo_transport = a_bzrdir.get_repository_transport(None)
1281
control_files = LockableFiles(repo_transport, 'lock', LockDir)
1282
text_store = self._get_text_store(repo_transport, control_files)
1283
control_store = self._get_control_store(repo_transport, control_files)
1284
_revision_store = self._get_revision_store(repo_transport, control_files)
1285
return KnitRepository(_format=self,
1287
control_files=control_files,
1288
_revision_store=_revision_store,
1289
control_store=control_store,
1290
text_store=text_store)
1293
# formats which have no format string are not discoverable
1294
# and not independently creatable, so are not registered.
1295
_default_format = RepositoryFormat7()
1296
RepositoryFormat.register_format(_default_format)
1297
RepositoryFormat.register_format(RepositoryFormatKnit1())
1298
RepositoryFormat.set_default_format(_default_format)
1299
_legacy_formats = [RepositoryFormat4(),
1300
RepositoryFormat5(),
1301
RepositoryFormat6()]
1683
1304
class InterRepository(InterObject):
1684
1305
"""This class represents operations taking place between two repositories.
1686
1307
Its instances have methods like copy_content and fetch, and contain
1687
references to the source and target repositories these operations can be
1308
references to the source and target repositories these operations can be
1688
1309
carried out on.
1690
1311
Often we will provide convenience methods on 'repository' which carry out
1692
1313
InterRepository.get(other).method_name(parameters).
1696
1317
"""The available optimised InterRepository types."""
1698
1319
@needs_write_lock
1699
def copy_content(self, revision_id=None):
1320
def copy_content(self, revision_id=None, basis=None):
1700
1321
"""Make a complete copy of the content in self into destination.
1702
This is a destructive operation! Do not use it on existing
1323
This is a destructive operation! Do not use it on existing
1705
1326
:param revision_id: Only copy the content needed to construct
1706
1327
revision_id and its parents.
1328
:param basis: Copy the needed data preferentially from basis.
1709
1331
self.target.set_make_working_trees(self.source.make_working_trees())
1710
1332
except NotImplementedError:
1334
# grab the basis available data
1335
if basis is not None:
1336
self.target.fetch(basis, revision_id=revision_id)
1337
# but dont bother fetching if we have the needed data now.
1338
if (revision_id not in (None, NULL_REVISION) and
1339
self.target.has_revision(revision_id)):
1712
1341
self.target.fetch(self.source, revision_id=revision_id)
1343
def _double_lock(self, lock_source, lock_target):
1344
"""Take out too locks, rolling back the first if the second throws."""
1349
# we want to ensure that we don't leave source locked by mistake.
1350
# and any error on target should not confuse source.
1351
self.source.unlock()
1714
1354
@needs_write_lock
1715
def fetch(self, revision_id=None, find_ghosts=False,
1355
def fetch(self, revision_id=None, pb=None):
1717
1356
"""Fetch the content required to construct revision_id.
1719
The content is copied from self.source to self.target.
1358
The content is copied from source to target.
1721
1360
:param revision_id: if None all content is copied, if NULL_REVISION no
1722
1361
content is copied.
1725
raise NotImplementedError(self.fetch)
1362
:param pb: optional progress bar to use for progress reports. If not
1363
provided a default one will be created.
1365
Returns the copied revision count and the failed revisions in a tuple:
1368
from bzrlib.fetch import GenericRepoFetcher
1369
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
1370
self.source, self.source._format, self.target, self.target._format)
1371
f = GenericRepoFetcher(to_repository=self.target,
1372
from_repository=self.source,
1373
last_revision=revision_id,
1375
return f.count_copied, f.failed_revisions
1377
def lock_read(self):
1378
"""Take out a logical read lock.
1380
This will lock the source branch and the target branch. The source gets
1381
a read lock and the target a read lock.
1383
self._double_lock(self.source.lock_read, self.target.lock_read)
1385
def lock_write(self):
1386
"""Take out a logical write lock.
1388
This will lock the source branch and the target branch. The source gets
1389
a read lock and the target a write lock.
1391
self._double_lock(self.source.lock_read, self.target.lock_write)
1727
1393
@needs_read_lock
1728
def search_missing_revision_ids(self,
1729
revision_id=symbol_versioning.DEPRECATED_PARAMETER,
1730
find_ghosts=True, revision_ids=None, if_present_ids=None,
1394
def missing_revision_ids(self, revision_id=None):
1732
1395
"""Return the revision ids that source has that target does not.
1397
These are returned in topological order.
1734
1399
:param revision_id: only return revision ids included by this
1736
:param revision_ids: return revision ids included by these
1737
revision_ids. NoSuchRevision will be raised if any of these
1738
revisions are not present.
1739
:param if_present_ids: like revision_ids, but will not cause
1740
NoSuchRevision if any of these are absent, instead they will simply
1741
not be in the result. This is useful for e.g. finding revisions
1742
to fetch for tags, which may reference absent revisions.
1743
:param find_ghosts: If True find missing revisions in deep history
1744
rather than just finding the surface difference.
1745
:param limit: Maximum number of revisions to return, topologically
1747
:return: A bzrlib.graph.SearchResult.
1749
raise NotImplementedError(self.search_missing_revision_ids)
1752
def _same_model(source, target):
1753
"""True if source and target have the same data representation.
1755
Note: this is always called on the base class; overriding it in a
1756
subclass will have no effect.
1759
InterRepository._assert_same_model(source, target)
1761
except errors.IncompatibleRepositories, e:
1765
def _assert_same_model(source, target):
1766
"""Raise an exception if two repositories do not use the same model.
1768
if source.supports_rich_root() != target.supports_rich_root():
1769
raise errors.IncompatibleRepositories(source, target,
1770
"different rich-root support")
1771
if source._serializer != target._serializer:
1772
raise errors.IncompatibleRepositories(source, target,
1773
"different serializers")
1402
# generic, possibly worst case, slow code path.
1403
target_ids = set(self.target.all_revision_ids())
1404
if revision_id is not None:
1405
source_ids = self.source.get_ancestry(revision_id)
1406
assert source_ids.pop(0) == None
1408
source_ids = self.source.all_revision_ids()
1409
result_set = set(source_ids).difference(target_ids)
1410
# this may look like a no-op: its not. It preserves the ordering
1411
# other_ids had while only returning the members from other_ids
1412
# that we've decided we need.
1413
return [rev_id for rev_id in source_ids if rev_id in result_set]
1416
"""Release the locks on source and target."""
1418
self.target.unlock()
1420
self.source.unlock()
1423
class InterWeaveRepo(InterRepository):
1424
"""Optimised code paths between Weave based repositories."""
1426
_matching_repo_format = _default_format
1427
"""Repository format for testing with."""
1430
def is_compatible(source, target):
1431
"""Be compatible with known Weave formats.
1433
We dont test for the stores being of specific types becase that
1434
could lead to confusing results, and there is no need to be
1438
return (isinstance(source._format, (RepositoryFormat5,
1440
RepositoryFormat7)) and
1441
isinstance(target._format, (RepositoryFormat5,
1443
RepositoryFormat7)))
1444
except AttributeError:
1448
def copy_content(self, revision_id=None, basis=None):
1449
"""See InterRepository.copy_content()."""
1450
# weave specific optimised path:
1451
if basis is not None:
1452
# copy the basis in, then fetch remaining data.
1453
basis.copy_content_into(self.target, revision_id)
1454
# the basis copy_content_into could misset this.
1456
self.target.set_make_working_trees(self.source.make_working_trees())
1457
except NotImplementedError:
1459
self.target.fetch(self.source, revision_id=revision_id)
1462
self.target.set_make_working_trees(self.source.make_working_trees())
1463
except NotImplementedError:
1465
# FIXME do not peek!
1466
if self.source.control_files._transport.listable():
1467
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1469
self.target.weave_store.copy_all_ids(
1470
self.source.weave_store,
1472
from_transaction=self.source.get_transaction(),
1473
to_transaction=self.target.get_transaction())
1474
pb.update('copying inventory', 0, 1)
1475
self.target.control_weaves.copy_multi(
1476
self.source.control_weaves, ['inventory'],
1477
from_transaction=self.source.get_transaction(),
1478
to_transaction=self.target.get_transaction())
1479
self.target._revision_store.text_store.copy_all_ids(
1480
self.source._revision_store.text_store,
1485
self.target.fetch(self.source, revision_id=revision_id)
1488
def fetch(self, revision_id=None, pb=None):
1489
"""See InterRepository.fetch()."""
1490
from bzrlib.fetch import GenericRepoFetcher
1491
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
1492
self.source, self.source._format, self.target, self.target._format)
1493
f = GenericRepoFetcher(to_repository=self.target,
1494
from_repository=self.source,
1495
last_revision=revision_id,
1497
return f.count_copied, f.failed_revisions
1500
def missing_revision_ids(self, revision_id=None):
1501
"""See InterRepository.missing_revision_ids()."""
1502
# we want all revisions to satisfy revision_id in source.
1503
# but we dont want to stat every file here and there.
1504
# we want then, all revisions other needs to satisfy revision_id
1505
# checked, but not those that we have locally.
1506
# so the first thing is to get a subset of the revisions to
1507
# satisfy revision_id in source, and then eliminate those that
1508
# we do already have.
1509
# this is slow on high latency connection to self, but as as this
1510
# disk format scales terribly for push anyway due to rewriting
1511
# inventory.weave, this is considered acceptable.
1513
if revision_id is not None:
1514
source_ids = self.source.get_ancestry(revision_id)
1515
assert source_ids.pop(0) == None
1517
source_ids = self.source._all_possible_ids()
1518
source_ids_set = set(source_ids)
1519
# source_ids is the worst possible case we may need to pull.
1520
# now we want to filter source_ids against what we actually
1521
# have in target, but dont try to check for existence where we know
1522
# we do not have a revision as that would be pointless.
1523
target_ids = set(self.target._all_possible_ids())
1524
possibly_present_revisions = target_ids.intersection(source_ids_set)
1525
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
1526
required_revisions = source_ids_set.difference(actually_present_revisions)
1527
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
1528
if revision_id is not None:
1529
# we used get_ancestry to determine source_ids then we are assured all
1530
# revisions referenced are present as they are installed in topological order.
1531
# and the tip revision was validated by get_ancestry.
1532
return required_topo_revisions
1534
# if we just grabbed the possibly available ids, then
1535
# we only have an estimate of whats available and need to validate
1536
# that against the revision records.
1537
return self.source._eliminate_revisions_not_present(required_topo_revisions)
1540
class InterKnitRepo(InterRepository):
1541
"""Optimised code paths between Knit based repositories."""
1543
_matching_repo_format = RepositoryFormatKnit1()
1544
"""Repository format for testing with."""
1547
def is_compatible(source, target):
1548
"""Be compatible with known Knit formats.
1550
We dont test for the stores being of specific types becase that
1551
could lead to confusing results, and there is no need to be
1555
return (isinstance(source._format, (RepositoryFormatKnit1)) and
1556
isinstance(target._format, (RepositoryFormatKnit1)))
1557
except AttributeError:
1561
def fetch(self, revision_id=None, pb=None):
1562
"""See InterRepository.fetch()."""
1563
from bzrlib.fetch import KnitRepoFetcher
1564
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
1565
self.source, self.source._format, self.target, self.target._format)
1566
f = KnitRepoFetcher(to_repository=self.target,
1567
from_repository=self.source,
1568
last_revision=revision_id,
1570
return f.count_copied, f.failed_revisions
1573
def missing_revision_ids(self, revision_id=None):
1574
"""See InterRepository.missing_revision_ids()."""
1575
if revision_id is not None:
1576
source_ids = self.source.get_ancestry(revision_id)
1577
assert source_ids.pop(0) == None
1579
source_ids = self.source._all_possible_ids()
1580
source_ids_set = set(source_ids)
1581
# source_ids is the worst possible case we may need to pull.
1582
# now we want to filter source_ids against what we actually
1583
# have in target, but dont try to check for existence where we know
1584
# we do not have a revision as that would be pointless.
1585
target_ids = set(self.target._all_possible_ids())
1586
possibly_present_revisions = target_ids.intersection(source_ids_set)
1587
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
1588
required_revisions = source_ids_set.difference(actually_present_revisions)
1589
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
1590
if revision_id is not None:
1591
# we used get_ancestry to determine source_ids then we are assured all
1592
# revisions referenced are present as they are installed in topological order.
1593
# and the tip revision was validated by get_ancestry.
1594
return required_topo_revisions
1596
# if we just grabbed the possibly available ids, then
1597
# we only have an estimate of whats available and need to validate
1598
# that against the revision records.
1599
return self.source._eliminate_revisions_not_present(required_topo_revisions)
1601
InterRepository.register_optimiser(InterWeaveRepo)
1602
InterRepository.register_optimiser(InterKnitRepo)
1605
class RepositoryTestProviderAdapter(object):
1606
"""A tool to generate a suite testing multiple repository formats at once.
1608
This is done by copying the test once for each transport and injecting
1609
the transport_server, transport_readonly_server, and bzrdir_format and
1610
repository_format classes into each copy. Each copy is also given a new id()
1611
to make it easy to identify.
1614
def __init__(self, transport_server, transport_readonly_server, formats):
1615
self._transport_server = transport_server
1616
self._transport_readonly_server = transport_readonly_server
1617
self._formats = formats
1619
def adapt(self, test):
1620
result = TestSuite()
1621
for repository_format, bzrdir_format in self._formats:
1622
new_test = deepcopy(test)
1623
new_test.transport_server = self._transport_server
1624
new_test.transport_readonly_server = self._transport_readonly_server
1625
new_test.bzrdir_format = bzrdir_format
1626
new_test.repository_format = repository_format
1627
def make_new_test_id():
1628
new_id = "%s(%s)" % (new_test.id(), repository_format.__class__.__name__)
1629
return lambda: new_id
1630
new_test.id = make_new_test_id()
1631
result.addTest(new_test)
1635
class InterRepositoryTestProviderAdapter(object):
1636
"""A tool to generate a suite testing multiple inter repository formats.
1638
This is done by copying the test once for each interrepo provider and injecting
1639
the transport_server, transport_readonly_server, repository_format and
1640
repository_to_format classes into each copy.
1641
Each copy is also given a new id() to make it easy to identify.
1644
def __init__(self, transport_server, transport_readonly_server, formats):
1645
self._transport_server = transport_server
1646
self._transport_readonly_server = transport_readonly_server
1647
self._formats = formats
1649
def adapt(self, test):
1650
result = TestSuite()
1651
for interrepo_class, repository_format, repository_format_to in self._formats:
1652
new_test = deepcopy(test)
1653
new_test.transport_server = self._transport_server
1654
new_test.transport_readonly_server = self._transport_readonly_server
1655
new_test.interrepo_class = interrepo_class
1656
new_test.repository_format = repository_format
1657
new_test.repository_format_to = repository_format_to
1658
def make_new_test_id():
1659
new_id = "%s(%s)" % (new_test.id(), interrepo_class.__name__)
1660
return lambda: new_id
1661
new_test.id = make_new_test_id()
1662
result.addTest(new_test)
1666
def default_test_list():
1667
"""Generate the default list of interrepo permutations to test."""
1669
# test the default InterRepository between format 6 and the current
1671
# XXX: robertc 20060220 reinstate this when there are two supported
1672
# formats which do not have an optimal code path between them.
1673
result.append((InterRepository,
1674
RepositoryFormat6(),
1675
RepositoryFormatKnit1()))
1676
for optimiser in InterRepository._optimisers:
1677
result.append((optimiser,
1678
optimiser._matching_repo_format,
1679
optimiser._matching_repo_format
1681
# if there are specific combinations we want to use, we can add them
1776
1686
class CopyConverter(object):
1777
1687
"""A repository conversion tool which just performs a copy of the content.
1779
1689
This is slow but quite reliable.