1
# Copyright (C) 2005 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from copy import deepcopy
18
from cStringIO import StringIO
19
from unittest import TestSuite
20
import xml.sax.saxutils
23
import bzrlib.bzrdir as bzrdir
24
from bzrlib.decorators import needs_read_lock, needs_write_lock
25
import bzrlib.errors as errors
26
from bzrlib.errors import InvalidRevisionId
27
from bzrlib.lockable_files import LockableFiles
28
from bzrlib.osutils import safe_unicode
29
from bzrlib.revision import NULL_REVISION
30
from bzrlib.store import copy_all
31
from bzrlib.store.weave import WeaveStore
32
from bzrlib.store.text import TextStore
33
from bzrlib.symbol_versioning import *
34
from bzrlib.trace import mutter
35
from bzrlib.tree import RevisionTree
36
from bzrlib.testament import Testament
37
from bzrlib.tree import EmptyTree
42
class Repository(object):
43
"""Repository holding history for one or more branches.
45
The repository holds and retrieves historical information including
46
revisions and file history. It's normally accessed only by the Branch,
47
which views a particular line of development through that history.
49
The Repository builds on top of Stores and a Transport, which respectively
50
describe the disk data format and the way of accessing the (possibly
55
def _all_possible_ids(self):
56
"""Return all the possible revisions that we could find."""
57
return self.get_inventory_weave().names()
60
def all_revision_ids(self):
61
"""Returns a list of all the revision ids in the repository.
63
These are in as much topological order as the underlying store can
64
present: for weaves ghosts may lead to a lack of correctness until
65
the reweave updates the parents list.
67
result = self._all_possible_ids()
68
return self._eliminate_revisions_not_present(result)
71
def _eliminate_revisions_not_present(self, revision_ids):
72
"""Check every revision id in revision_ids to see if we have it.
74
Returns a set of the present revisions.
77
for id in revision_ids:
78
if self.has_revision(id):
84
"""Construct the current default format repository in a_bzrdir."""
85
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
87
def __init__(self, _format, a_bzrdir):
89
if isinstance(_format, (RepositoryFormat4,
92
# legacy: use a common control files.
93
self.control_files = a_bzrdir._control_files
95
self.control_files = LockableFiles(a_bzrdir.get_repository_transport(None),
98
dir_mode = self.control_files._dir_mode
99
file_mode = self.control_files._file_mode
100
self._format = _format
101
self.bzrdir = a_bzrdir
103
def get_weave(name, prefixed=False):
105
name = safe_unicode(name)
108
relpath = self.control_files._escape(name)
109
weave_transport = self.control_files._transport.clone(relpath)
110
ws = WeaveStore(weave_transport, prefixed=prefixed,
113
if self.control_files._transport.should_cache():
114
ws.enable_cache = True
117
def get_store(name, compressed=True, prefixed=False):
118
# FIXME: This approach of assuming stores are all entirely compressed
119
# or entirely uncompressed is tidy, but breaks upgrade from
120
# some existing branches where there's a mixture; we probably
121
# still want the option to look for both.
123
name = safe_unicode(name)
126
relpath = self.control_files._escape(name)
127
store = TextStore(self.control_files._transport.clone(relpath),
128
prefixed=prefixed, compressed=compressed,
131
#if self._transport.should_cache():
132
# cache_path = os.path.join(self.cache_root, name)
133
# os.mkdir(cache_path)
134
# store = bzrlib.store.CachedStore(store, cache_path)
137
if isinstance(self._format, RepositoryFormat4):
138
self.inventory_store = get_store('inventory-store')
139
self.text_store = get_store('text-store')
140
self.revision_store = get_store('revision-store')
141
elif isinstance(self._format, RepositoryFormat5):
142
self.control_weaves = get_weave('')
143
self.weave_store = get_weave('weaves')
144
self.revision_store = get_store('revision-store', compressed=False)
145
elif isinstance(self._format, RepositoryFormat6):
146
self.control_weaves = get_weave('')
147
self.weave_store = get_weave('weaves', prefixed=True)
148
self.revision_store = get_store('revision-store', compressed=False,
150
elif isinstance(self._format, RepositoryFormat7):
151
self.control_weaves = get_weave('')
152
self.weave_store = get_weave('weaves', prefixed=True)
153
self.revision_store = get_store('revision-store', compressed=False,
155
self.revision_store.register_suffix('sig')
157
def lock_write(self):
158
self.control_files.lock_write()
161
self.control_files.lock_read()
164
def missing_revision_ids(self, other, revision_id=None):
165
"""Return the revision ids that other has that this does not.
167
These are returned in topological order.
169
revision_id: only return revision ids included by revision_id.
171
return InterRepository.get(other, self).missing_revision_ids(revision_id)
175
"""Open the repository rooted at base.
177
For instance, if the repository is at URL/.bzr/repository,
178
Repository.open(URL) -> a Repository instance.
180
control = bzrdir.BzrDir.open(base)
181
return control.open_repository()
183
def copy_content_into(self, destination, revision_id=None, basis=None):
184
"""Make a complete copy of the content in self into destination.
186
This is a destructive operation! Do not use it on existing
189
return InterRepository.get(self, destination).copy_content(revision_id, basis)
191
def fetch(self, source, revision_id=None, pb=None):
192
"""Fetch the content required to construct revision_id from source.
194
If revision_id is None all content is copied.
196
return InterRepository.get(source, self).fetch(revision_id=revision_id,
200
self.control_files.unlock()
203
def clone(self, a_bzrdir, revision_id=None, basis=None):
204
"""Clone this repository into a_bzrdir using the current format.
206
Currently no check is made that the format of this repository and
207
the bzrdir format are compatible. FIXME RBC 20060201.
209
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
210
# use target default format.
211
result = a_bzrdir.create_repository()
212
# FIXME RBC 20060209 split out the repository type to avoid this check ?
213
elif isinstance(a_bzrdir._format,
214
(bzrdir.BzrDirFormat4,
215
bzrdir.BzrDirFormat5,
216
bzrdir.BzrDirFormat6)):
217
result = a_bzrdir.open_repository()
219
result = self._format.initialize(a_bzrdir, shared=self.is_shared())
220
self.copy_content_into(result, revision_id, basis)
223
def has_revision(self, revision_id):
224
"""True if this branch has a copy of the revision.
226
This does not necessarily imply the revision is merge
227
or on the mainline."""
228
return (revision_id is None
229
or self.revision_store.has_id(revision_id))
232
def get_revision_xml_file(self, revision_id):
233
"""Return XML file object for revision object."""
234
if not revision_id or not isinstance(revision_id, basestring):
235
raise InvalidRevisionId(revision_id=revision_id, branch=self)
237
return self.revision_store.get(revision_id)
238
except (IndexError, KeyError):
239
raise bzrlib.errors.NoSuchRevision(self, revision_id)
242
def get_revision_xml(self, revision_id):
243
return self.get_revision_xml_file(revision_id).read()
246
def get_revision(self, revision_id):
247
"""Return the Revision object for a named revision"""
248
xml_file = self.get_revision_xml_file(revision_id)
251
r = bzrlib.xml5.serializer_v5.read_revision(xml_file)
252
except SyntaxError, e:
253
raise bzrlib.errors.BzrError('failed to unpack revision_xml',
257
assert r.revision_id == revision_id
261
def get_revision_sha1(self, revision_id):
262
"""Hash the stored value of a revision, and return it."""
263
# In the future, revision entries will be signed. At that
264
# point, it is probably best *not* to include the signature
265
# in the revision hash. Because that lets you re-sign
266
# the revision, (add signatures/remove signatures) and still
267
# have all hash pointers stay consistent.
268
# But for now, just hash the contents.
269
return bzrlib.osutils.sha_file(self.get_revision_xml_file(revision_id))
272
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
273
self.revision_store.add(StringIO(gpg_strategy.sign(plaintext)),
276
def fileid_involved_between_revs(self, from_revid, to_revid):
277
"""Find file_id(s) which are involved in the changes between revisions.
279
This determines the set of revisions which are involved, and then
280
finds all file ids affected by those revisions.
282
# TODO: jam 20060119 This code assumes that w.inclusions will
283
# always be correct. But because of the presence of ghosts
284
# it is possible to be wrong.
285
# One specific example from Robert Collins:
286
# Two branches, with revisions ABC, and AD
287
# C is a ghost merge of D.
288
# Inclusions doesn't recognize D as an ancestor.
289
# If D is ever merged in the future, the weave
290
# won't be fixed, because AD never saw revision C
291
# to cause a conflict which would force a reweave.
292
w = self.get_inventory_weave()
293
from_set = set(w.inclusions([w.lookup(from_revid)]))
294
to_set = set(w.inclusions([w.lookup(to_revid)]))
295
included = to_set.difference(from_set)
296
changed = map(w.idx_to_name, included)
297
return self._fileid_involved_by_set(changed)
299
def fileid_involved(self, last_revid=None):
300
"""Find all file_ids modified in the ancestry of last_revid.
302
:param last_revid: If None, last_revision() will be used.
304
w = self.get_inventory_weave()
306
changed = set(w._names)
308
included = w.inclusions([w.lookup(last_revid)])
309
changed = map(w.idx_to_name, included)
310
return self._fileid_involved_by_set(changed)
312
def fileid_involved_by_set(self, changes):
313
"""Find all file_ids modified by the set of revisions passed in.
315
:param changes: A set() of revision ids
317
# TODO: jam 20060119 This line does *nothing*, remove it.
318
# or better yet, change _fileid_involved_by_set so
319
# that it takes the inventory weave, rather than
320
# pulling it out by itself.
321
return self._fileid_involved_by_set(changes)
323
def _fileid_involved_by_set(self, changes):
324
"""Find the set of file-ids affected by the set of revisions.
326
:param changes: A set() of revision ids.
327
:return: A set() of file ids.
329
This peaks at the Weave, interpreting each line, looking to
330
see if it mentions one of the revisions. And if so, includes
331
the file id mentioned.
332
This expects both the Weave format, and the serialization
333
to have a single line per file/directory, and to have
334
fileid="" and revision="" on that line.
336
assert isinstance(self._format, (RepositoryFormat5,
338
RepositoryFormat7)), \
339
"fileid_involved only supported for branches which store inventory as unnested xml"
341
w = self.get_inventory_weave()
343
for line in w._weave:
345
# it is ugly, but it is due to the weave structure
346
if not isinstance(line, basestring): continue
348
start = line.find('file_id="')+9
349
if start < 9: continue
350
end = line.find('"', start)
352
file_id = xml.sax.saxutils.unescape(line[start:end])
354
# check if file_id is already present
355
if file_id in file_ids: continue
357
start = line.find('revision="')+10
358
if start < 10: continue
359
end = line.find('"', start)
361
revision_id = xml.sax.saxutils.unescape(line[start:end])
363
if revision_id in changes:
364
file_ids.add(file_id)
368
def get_inventory_weave(self):
369
return self.control_weaves.get_weave('inventory',
370
self.get_transaction())
373
def get_inventory(self, revision_id):
374
"""Get Inventory object by hash."""
375
xml = self.get_inventory_xml(revision_id)
376
return bzrlib.xml5.serializer_v5.read_inventory_from_string(xml)
379
def get_inventory_xml(self, revision_id):
380
"""Get inventory XML as a file object."""
382
assert isinstance(revision_id, basestring), type(revision_id)
383
iw = self.get_inventory_weave()
384
return iw.get_text(iw.lookup(revision_id))
386
raise bzrlib.errors.HistoryMissing(self, 'inventory', revision_id)
389
def get_inventory_sha1(self, revision_id):
390
"""Return the sha1 hash of the inventory entry
392
return self.get_revision(revision_id).inventory_sha1
395
def get_revision_inventory(self, revision_id):
396
"""Return inventory of a past revision."""
397
# TODO: Unify this with get_inventory()
398
# bzr 0.0.6 and later imposes the constraint that the inventory_id
399
# must be the same as its revision, so this is trivial.
400
if revision_id is None:
401
# This does not make sense: if there is no revision,
402
# then it is the current tree inventory surely ?!
403
# and thus get_root_id() is something that looks at the last
404
# commit on the branch, and the get_root_id is an inventory check.
405
raise NotImplementedError
406
# return Inventory(self.get_root_id())
408
return self.get_inventory(revision_id)
412
"""Return True if this repository is flagged as a shared repository."""
413
# FIXME format 4-6 cannot be shared, this is technically faulty.
414
return self.control_files._transport.has('shared-storage')
417
def revision_tree(self, revision_id):
418
"""Return Tree for a revision on this branch.
420
`revision_id` may be None for the null revision, in which case
421
an `EmptyTree` is returned."""
422
# TODO: refactor this to use an existing revision object
423
# so we don't need to read it in twice.
424
if revision_id is None or revision_id == NULL_REVISION:
427
inv = self.get_revision_inventory(revision_id)
428
return RevisionTree(self, inv, revision_id)
431
def get_ancestry(self, revision_id):
432
"""Return a list of revision-ids integrated by a revision.
434
This is topologically sorted.
436
if revision_id is None:
438
if not self.has_revision(revision_id):
439
raise errors.NoSuchRevision(self, revision_id)
440
w = self.get_inventory_weave()
441
return [None] + map(w.idx_to_name,
442
w.inclusions([w.lookup(revision_id)]))
445
def print_file(self, file, revision_id):
446
"""Print `file` to stdout.
448
FIXME RBC 20060125 as John Meinel points out this is a bad api
449
- it writes to stdout, it assumes that that is valid etc. Fix
450
by creating a new more flexible convenience function.
452
tree = self.revision_tree(revision_id)
453
# use inventory as it was in that revision
454
file_id = tree.inventory.path2id(file)
456
raise BzrError("%r is not present in revision %s" % (file, revno))
458
revno = self.revision_id_to_revno(revision_id)
459
except errors.NoSuchRevision:
460
# TODO: This should not be BzrError,
461
# but NoSuchFile doesn't fit either
462
raise BzrError('%r is not present in revision %s'
463
% (file, revision_id))
465
raise BzrError('%r is not present in revision %s'
467
tree.print_file(file_id)
469
def get_transaction(self):
470
return self.control_files.get_transaction()
473
def set_make_working_trees(self, new_value):
474
"""Set the policy flag for making working trees when creating branches.
476
This only applies to branches that use this repository.
478
The default is 'True'.
479
:param new_value: True to restore the default, False to disable making
482
# FIXME: split out into a new class/strategy ?
483
if isinstance(self._format, (RepositoryFormat4,
486
raise NotImplementedError(self.set_make_working_trees)
489
self.control_files._transport.delete('no-working-trees')
490
except errors.NoSuchFile:
493
self.control_files.put_utf8('no-working-trees', '')
495
def make_working_trees(self):
496
"""Returns the policy for making working trees on new branches."""
497
# FIXME: split out into a new class/strategy ?
498
if isinstance(self._format, (RepositoryFormat4,
502
return not self.control_files._transport.has('no-working-trees')
505
def sign_revision(self, revision_id, gpg_strategy):
506
plaintext = Testament.from_revision(self, revision_id).as_short_text()
507
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
510
class RepositoryFormat(object):
511
"""A repository format.
513
Formats provide three things:
514
* An initialization routine to construct repository data on disk.
515
* a format string which is used when the BzrDir supports versioned
517
* an open routine which returns a Repository instance.
519
Formats are placed in an dict by their format string for reference
520
during opening. These should be subclasses of RepositoryFormat
523
Once a format is deprecated, just deprecate the initialize and open
524
methods on the format class. Do not deprecate the object, as the
525
object will be created every system load.
527
Common instance attributes:
528
_matchingbzrdir - the bzrdir format that the repository format was
529
originally written to work with. This can be used if manually
530
constructing a bzrdir and repository, or more commonly for test suite
534
_default_format = None
535
"""The default format used for new repositories."""
538
"""The known formats."""
541
def find_format(klass, a_bzrdir):
542
"""Return the format for the repository object in a_bzrdir."""
544
transport = a_bzrdir.get_repository_transport(None)
545
format_string = transport.get("format").read()
546
return klass._formats[format_string]
547
except errors.NoSuchFile:
548
raise errors.NoRepositoryPresent(a_bzrdir)
550
raise errors.UnknownFormatError(format_string)
553
def get_default_format(klass):
554
"""Return the current default format."""
555
return klass._default_format
557
def get_format_string(self):
558
"""Return the ASCII format string that identifies this format.
560
Note that in pre format ?? repositories the format string is
561
not permitted nor written to disk.
563
raise NotImplementedError(self.get_format_string)
565
def initialize(self, a_bzrdir, shared=False):
566
"""Initialize a repository of this format in a_bzrdir.
568
:param a_bzrdir: The bzrdir to put the new repository in it.
569
:param shared: The repository should be initialized as a sharable one.
571
This may raise UninitializableFormat if shared repository are not
572
compatible the a_bzrdir.
575
def is_supported(self):
576
"""Is this format supported?
578
Supported formats must be initializable and openable.
579
Unsupported formats may not support initialization or committing or
580
some other features depending on the reason for not being supported.
584
def open(self, a_bzrdir, _found=False):
585
"""Return an instance of this format for the bzrdir a_bzrdir.
587
_found is a private parameter, do not use it.
590
# we are being called directly and must probe.
591
raise NotImplementedError
592
return Repository(_format=self, a_bzrdir=a_bzrdir)
595
def register_format(klass, format):
596
klass._formats[format.get_format_string()] = format
599
def set_default_format(klass, format):
600
klass._default_format = format
603
def unregister_format(klass, format):
604
assert klass._formats[format.get_format_string()] is format
605
del klass._formats[format.get_format_string()]
608
class PreSplitOutRepositoryFormat(RepositoryFormat):
609
"""Base class for the pre split out repository formats."""
611
def initialize(self, a_bzrdir, shared=False, _internal=False):
612
"""Create a weave repository.
614
TODO: when creating split out bzr branch formats, move this to a common
615
base for Format5, Format6. or something like that.
617
from bzrlib.weavefile import write_weave_v5
618
from bzrlib.weave import Weave
621
raise errors.IncompatibleFormat(self, a_bzrdir._format)
624
# always initialized when the bzrdir is.
625
return Repository(_format=self, a_bzrdir=a_bzrdir)
627
# Create an empty weave
629
bzrlib.weavefile.write_weave_v5(Weave(), sio)
630
empty_weave = sio.getvalue()
632
mutter('creating repository in %s.', a_bzrdir.transport.base)
633
dirs = ['revision-store', 'weaves']
634
lock_file = 'branch-lock'
635
files = [('inventory.weave', StringIO(empty_weave)),
638
# FIXME: RBC 20060125 dont peek under the covers
639
# NB: no need to escape relative paths that are url safe.
640
control_files = LockableFiles(a_bzrdir.transport, 'branch-lock')
641
control_files.lock_write()
642
control_files._transport.mkdir_multi(dirs,
643
mode=control_files._dir_mode)
645
for file, content in files:
646
control_files.put(file, content)
648
control_files.unlock()
649
return Repository(_format=self, a_bzrdir=a_bzrdir)
652
class RepositoryFormat4(PreSplitOutRepositoryFormat):
653
"""Bzr repository format 4.
655
This repository format has:
657
- TextStores for texts, inventories,revisions.
659
This format is deprecated: it indexes texts using a text id which is
660
removed in format 5; initializationa and write support for this format
665
super(RepositoryFormat4, self).__init__()
666
self._matchingbzrdir = bzrdir.BzrDirFormat4()
668
def initialize(self, url, shared=False, _internal=False):
669
"""Format 4 branches cannot be created."""
670
raise errors.UninitializableFormat(self)
672
def is_supported(self):
673
"""Format 4 is not supported.
675
It is not supported because the model changed from 4 to 5 and the
676
conversion logic is expensive - so doing it on the fly was not
682
class RepositoryFormat5(PreSplitOutRepositoryFormat):
683
"""Bzr control format 5.
685
This repository format has:
686
- weaves for file texts and inventory
688
- TextStores for revisions and signatures.
692
super(RepositoryFormat5, self).__init__()
693
self._matchingbzrdir = bzrdir.BzrDirFormat5()
696
class RepositoryFormat6(PreSplitOutRepositoryFormat):
697
"""Bzr control format 6.
699
This repository format has:
700
- weaves for file texts and inventory
701
- hash subdirectory based stores.
702
- TextStores for revisions and signatures.
706
super(RepositoryFormat6, self).__init__()
707
self._matchingbzrdir = bzrdir.BzrDirFormat6()
710
class RepositoryFormat7(RepositoryFormat):
713
This repository format has:
714
- weaves for file texts and inventory
715
- hash subdirectory based stores.
716
- TextStores for revisions and signatures.
717
- a format marker of its own
718
- an optional 'shared-storage' flag
721
def get_format_string(self):
722
"""See RepositoryFormat.get_format_string()."""
723
return "Bazaar-NG Repository format 7"
725
def initialize(self, a_bzrdir, shared=False):
726
"""Create a weave repository.
728
:param shared: If true the repository will be initialized as a shared
731
from bzrlib.weavefile import write_weave_v5
732
from bzrlib.weave import Weave
734
# Create an empty weave
736
bzrlib.weavefile.write_weave_v5(Weave(), sio)
737
empty_weave = sio.getvalue()
739
mutter('creating repository in %s.', a_bzrdir.transport.base)
740
dirs = ['revision-store', 'weaves']
741
files = [('inventory.weave', StringIO(empty_weave)),
743
utf8_files = [('format', self.get_format_string())]
745
# FIXME: RBC 20060125 dont peek under the covers
746
# NB: no need to escape relative paths that are url safe.
748
repository_transport = a_bzrdir.get_repository_transport(self)
749
repository_transport.put(lock_file, StringIO()) # TODO get the file mode from the bzrdir lock files., mode=file_mode)
750
control_files = LockableFiles(repository_transport, 'lock')
751
control_files.lock_write()
752
control_files._transport.mkdir_multi(dirs,
753
mode=control_files._dir_mode)
755
for file, content in files:
756
control_files.put(file, content)
757
for file, content in utf8_files:
758
control_files.put_utf8(file, content)
760
control_files.put_utf8('shared-storage', '')
762
control_files.unlock()
763
return Repository(_format=self, a_bzrdir=a_bzrdir)
766
super(RepositoryFormat7, self).__init__()
767
self._matchingbzrdir = bzrdir.BzrDirMetaFormat1()
770
# formats which have no format string are not discoverable
771
# and not independently creatable, so are not registered.
772
_default_format = RepositoryFormat7()
773
RepositoryFormat.register_format(_default_format)
774
RepositoryFormat.set_default_format(_default_format)
775
_legacy_formats = [RepositoryFormat4(),
780
class InterRepository(object):
781
"""This class represents operations taking place between two repositories.
783
Its instances have methods like copy_content and fetch, and contain
784
references to the source and target repositories these operations can be
787
Often we will provide convenience methods on 'repository' which carry out
788
operations with another repository - they will always forward to
789
InterRepository.get(other).method_name(parameters).
791
# XXX: FIXME: FUTURE: robertc
792
# testing of these probably requires a factory in optimiser type, and
793
# then a test adapter to test each type thoroughly.
797
"""The available optimised InterRepository types."""
799
def __init__(self, source, target):
800
"""Construct a default InterRepository instance. Please use 'get'.
802
Only subclasses of InterRepository should call
803
InterRepository.__init__ - clients should call InterRepository.get
804
instead which will create an optimised InterRepository if possible.
810
def copy_content(self, revision_id=None, basis=None):
811
"""Make a complete copy of the content in self into destination.
813
This is a destructive operation! Do not use it on existing
816
:param revision_id: Only copy the content needed to construct
817
revision_id and its parents.
818
:param basis: Copy the needed data preferentially from basis.
821
self.target.set_make_working_trees(self.source.make_working_trees())
822
except NotImplementedError:
824
# grab the basis available data
825
if basis is not None:
826
self.target.fetch(basis, revision_id=revision_id)
827
# but dont both fetching if we have the needed data now.
828
if (revision_id not in (None, NULL_REVISION) and
829
self.target.has_revision(revision_id)):
831
self.target.fetch(self.source, revision_id=revision_id)
833
def _double_lock(self, lock_source, lock_target):
834
"""Take out too locks, rolling back the first if the second throws."""
839
# we want to ensure that we don't leave source locked by mistake.
840
# and any error on target should not confuse source.
845
def fetch(self, revision_id=None, pb=None):
846
"""Fetch the content required to construct revision_id.
848
The content is copied from source to target.
850
:param revision_id: if None all content is copied, if NULL_REVISION no
852
:param pb: optional progress bar to use for progress reports. If not
853
provided a default one will be created.
855
Returns the copied revision count and the failed revisions in a tuple:
858
from bzrlib.fetch import RepoFetcher
859
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
860
self.source, self.source._format, self.target, self.target._format)
861
f = RepoFetcher(to_repository=self.target,
862
from_repository=self.source,
863
last_revision=revision_id,
865
return f.count_copied, f.failed_revisions
868
def get(klass, repository_source, repository_target):
869
"""Retrieve a InterRepository worker object for these repositories.
871
:param repository_source: the repository to be the 'source' member of
872
the InterRepository instance.
873
:param repository_target: the repository to be the 'target' member of
874
the InterRepository instance.
875
If an optimised InterRepository worker exists it will be used otherwise
876
a default InterRepository instance will be created.
878
for provider in klass._optimisers:
879
if provider.is_compatible(repository_source, repository_target):
880
return provider(repository_source, repository_target)
881
return InterRepository(repository_source, repository_target)
884
"""Take out a logical read lock.
886
This will lock the source branch and the target branch. The source gets
887
a read lock and the target a read lock.
889
self._double_lock(self.source.lock_read, self.target.lock_read)
891
def lock_write(self):
892
"""Take out a logical write lock.
894
This will lock the source branch and the target branch. The source gets
895
a read lock and the target a write lock.
897
self._double_lock(self.source.lock_read, self.target.lock_write)
900
def missing_revision_ids(self, revision_id=None):
901
"""Return the revision ids that source has that target does not.
903
These are returned in topological order.
905
:param revision_id: only return revision ids included by this
908
# generic, possibly worst case, slow code path.
909
target_ids = set(self.source.all_revision_ids())
910
if revision_id is not None:
911
source_ids = self.target.get_ancestry(revision_id)
912
assert source_ids.pop(0) == None
914
source_ids = self.target.all_revision_ids()
915
result_set = set(source_ids).difference(target_ids)
916
# this may look like a no-op: its not. It preserves the ordering
917
# other_ids had while only returning the members from other_ids
918
# that we've decided we need.
919
return [rev_id for rev_id in other_ids if rev_id in result_set]
922
def register_optimiser(klass, optimiser):
923
"""Register an InterRepository optimiser."""
924
klass._optimisers.add(optimiser)
927
"""Release the locks on source and target."""
934
def unregister_optimiser(klass, optimiser):
935
"""Unregister an InterRepository optimiser."""
936
klass._optimisers.remove(optimiser)
939
class InterWeaveRepo(InterRepository):
940
"""Optimised code paths between Weave based repositories."""
942
_matching_repo_format = _default_format
943
"""Repository format for testing with."""
946
def is_compatible(source, target):
947
"""Be compatible with known Weave formats.
949
We dont test for the stores being of specific types becase that
950
could lead to confusing results, and there is no need to be
954
return (isinstance(source._format, (RepositoryFormat5,
956
RepositoryFormat7)) and
957
isinstance(target._format, (RepositoryFormat5,
960
except AttributeError:
964
def copy_content(self, revision_id=None, basis=None):
965
"""See InterRepository.copy_content()."""
966
# weave specific optimised path:
967
if basis is not None:
968
# copy the basis in, then fetch remaining data.
969
basis.copy_content_into(self.target, revision_id)
970
# the basis copy_content_into could misset this.
972
self.target.set_make_working_trees(self.source.make_working_trees())
973
except NotImplementedError:
975
self.target.fetch(self.source, revision_id=revision_id)
978
self.target.set_make_working_trees(self.source.make_working_trees())
979
except NotImplementedError:
982
if self.source.control_files._transport.listable():
983
pb = bzrlib.ui.ui_factory.progress_bar()
984
copy_all(self.source.weave_store,
985
self.target.weave_store, pb=pb)
986
pb.update('copying inventory', 0, 1)
987
self.target.control_weaves.copy_multi(
988
self.source.control_weaves, ['inventory'])
989
copy_all(self.source.revision_store,
990
self.target.revision_store, pb=pb)
992
self.target.fetch(self.source, revision_id=revision_id)
995
def fetch(self, revision_id=None, pb=None):
996
"""See InterRepository.fetch()."""
997
from bzrlib.fetch import RepoFetcher
998
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
999
self.source, self.source._format, self.target, self.target._format)
1000
f = RepoFetcher(to_repository=self.target,
1001
from_repository=self.source,
1002
last_revision=revision_id,
1004
return f.count_copied, f.failed_revisions
1007
def missing_revision_ids(self, revision_id=None):
1008
"""See InterRepository.missing_revision_ids()."""
1009
# we want all revisions to satisfy revision_id in source.
1010
# but we dont want to stat every file here and there.
1011
# we want then, all revisions other needs to satisfy revision_id
1012
# checked, but not those that we have locally.
1013
# so the first thing is to get a subset of the revisions to
1014
# satisfy revision_id in source, and then eliminate those that
1015
# we do already have.
1016
# this is slow on high latency connection to self, but as as this
1017
# disk format scales terribly for push anyway due to rewriting
1018
# inventory.weave, this is considered acceptable.
1020
if revision_id is not None:
1021
source_ids = self.source.get_ancestry(revision_id)
1022
assert source_ids.pop(0) == None
1024
source_ids = self.source._all_possible_ids()
1025
source_ids_set = set(source_ids)
1026
# source_ids is the worst possible case we may need to pull.
1027
# now we want to filter source_ids against what we actually
1028
# have in target, but dont try to check for existence where we know
1029
# we do not have a revision as that would be pointless.
1030
target_ids = set(self.target._all_possible_ids())
1031
possibly_present_revisions = target_ids.intersection(source_ids_set)
1032
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
1033
required_revisions = source_ids_set.difference(actually_present_revisions)
1034
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
1035
if revision_id is not None:
1036
# we used get_ancestry to determine source_ids then we are assured all
1037
# revisions referenced are present as they are installed in topological order.
1038
# and the tip revision was validated by get_ancestry.
1039
return required_topo_revisions
1041
# if we just grabbed the possibly available ids, then
1042
# we only have an estimate of whats available and need to validate
1043
# that against the revision records.
1044
return self.source._eliminate_revisions_not_present(required_topo_revisions)
1047
InterRepository.register_optimiser(InterWeaveRepo)
1050
class RepositoryTestProviderAdapter(object):
1051
"""A tool to generate a suite testing multiple repository formats at once.
1053
This is done by copying the test once for each transport and injecting
1054
the transport_server, transport_readonly_server, and bzrdir_format and
1055
repository_format classes into each copy. Each copy is also given a new id()
1056
to make it easy to identify.
1059
def __init__(self, transport_server, transport_readonly_server, formats):
1060
self._transport_server = transport_server
1061
self._transport_readonly_server = transport_readonly_server
1062
self._formats = formats
1064
def adapt(self, test):
1065
result = TestSuite()
1066
for repository_format, bzrdir_format in self._formats:
1067
new_test = deepcopy(test)
1068
new_test.transport_server = self._transport_server
1069
new_test.transport_readonly_server = self._transport_readonly_server
1070
new_test.bzrdir_format = bzrdir_format
1071
new_test.repository_format = repository_format
1072
def make_new_test_id():
1073
new_id = "%s(%s)" % (new_test.id(), repository_format.__class__.__name__)
1074
return lambda: new_id
1075
new_test.id = make_new_test_id()
1076
result.addTest(new_test)
1080
class InterRepositoryTestProviderAdapter(object):
1081
"""A tool to generate a suite testing multiple inter repository formats.
1083
This is done by copying the test once for each interrepo provider and injecting
1084
the transport_server, transport_readonly_server, repository_format and
1085
repository_to_format classes into each copy.
1086
Each copy is also given a new id() to make it easy to identify.
1089
def __init__(self, transport_server, transport_readonly_server, formats):
1090
self._transport_server = transport_server
1091
self._transport_readonly_server = transport_readonly_server
1092
self._formats = formats
1094
def adapt(self, test):
1095
result = TestSuite()
1096
for interrepo_class, repository_format, repository_format_to in self._formats:
1097
new_test = deepcopy(test)
1098
new_test.transport_server = self._transport_server
1099
new_test.transport_readonly_server = self._transport_readonly_server
1100
new_test.interrepo_class = interrepo_class
1101
new_test.repository_format = repository_format
1102
new_test.repository_format_to = repository_format_to
1103
def make_new_test_id():
1104
new_id = "%s(%s)" % (new_test.id(), interrepo_class.__name__)
1105
return lambda: new_id
1106
new_test.id = make_new_test_id()
1107
result.addTest(new_test)
1111
def default_test_list():
1112
"""Generate the default list of interrepo permutations to test."""
1114
# test the default InterRepository between format 6 and the current
1116
# XXX: robertc 20060220 reinstate this when there are two supported
1117
# formats which do not have an optimal code path between them.
1118
#result.append((InterRepository, RepositoryFormat6(),
1119
# RepositoryFormat.get_default_format()))
1120
for optimiser in InterRepository._optimisers:
1121
result.append((optimiser,
1122
optimiser._matching_repo_format,
1123
optimiser._matching_repo_format
1125
# if there are specific combinations we want to use, we can add them