1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from cStringIO import StringIO
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
39
revision as _mod_revision,
44
from bzrlib.bundle import serializer
45
from bzrlib.revisiontree import RevisionTree
46
from bzrlib.store.versioned import VersionedFileStore
47
from bzrlib.store.text import TextStore
48
from bzrlib.testament import Testament
49
from bzrlib.util import bencode
52
from bzrlib.decorators import needs_read_lock, needs_write_lock
53
from bzrlib.inter import InterObject
54
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
55
from bzrlib.symbol_versioning import (
58
from bzrlib.trace import mutter, mutter_callsite, note, warning
61
# Old formats display a warning, but only once
62
_deprecation_warning_done = False
65
class CommitBuilder(object):
66
"""Provides an interface to build up a commit.
68
This allows describing a tree to be committed without needing to
69
know the internals of the format of the repository.
72
# all clients should supply tree roots.
73
record_root_entry = True
74
# the default CommitBuilder does not manage trees whose root is versioned.
75
_versioned_root = False
77
def __init__(self, repository, parents, config, timestamp=None,
78
timezone=None, committer=None, revprops=None,
80
"""Initiate a CommitBuilder.
82
:param repository: Repository to commit to.
83
:param parents: Revision ids of the parents of the new revision.
84
:param config: Configuration to use.
85
:param timestamp: Optional timestamp recorded for commit.
86
:param timezone: Optional timezone for timestamp.
87
:param committer: Optional committer to set for commit.
88
:param revprops: Optional dictionary of revision properties.
89
:param revision_id: Optional revision id.
94
self._committer = self._config.username()
96
assert isinstance(committer, basestring), type(committer)
97
self._committer = committer
99
self.new_inventory = Inventory(None)
100
self._new_revision_id = revision_id
101
self.parents = parents
102
self.repository = repository
105
if revprops is not None:
106
self._revprops.update(revprops)
108
if timestamp is None:
109
timestamp = time.time()
110
# Restrict resolution to 1ms
111
self._timestamp = round(timestamp, 3)
114
self._timezone = osutils.local_time_offset()
116
self._timezone = int(timezone)
118
self._generate_revision_if_needed()
119
self._heads = graph.HeadsCache(repository.get_graph()).heads
121
def commit(self, message):
122
"""Make the actual commit.
124
:return: The revision id of the recorded revision.
126
rev = _mod_revision.Revision(
127
timestamp=self._timestamp,
128
timezone=self._timezone,
129
committer=self._committer,
131
inventory_sha1=self.inv_sha1,
132
revision_id=self._new_revision_id,
133
properties=self._revprops)
134
rev.parent_ids = self.parents
135
self.repository.add_revision(self._new_revision_id, rev,
136
self.new_inventory, self._config)
137
self.repository.commit_write_group()
138
return self._new_revision_id
141
"""Abort the commit that is being built.
143
self.repository.abort_write_group()
145
def revision_tree(self):
146
"""Return the tree that was just committed.
148
After calling commit() this can be called to get a RevisionTree
149
representing the newly committed tree. This is preferred to
150
calling Repository.revision_tree() because that may require
151
deserializing the inventory, while we already have a copy in
154
return RevisionTree(self.repository, self.new_inventory,
155
self._new_revision_id)
157
def finish_inventory(self):
158
"""Tell the builder that the inventory is finished."""
159
if self.new_inventory.root is None:
160
raise AssertionError('Root entry should be supplied to'
161
' record_entry_contents, as of bzr 0.10.',
162
DeprecationWarning, stacklevel=2)
163
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
164
self.new_inventory.revision_id = self._new_revision_id
165
self.inv_sha1 = self.repository.add_inventory(
166
self._new_revision_id,
171
def _gen_revision_id(self):
172
"""Return new revision-id."""
173
return generate_ids.gen_revision_id(self._config.username(),
176
def _generate_revision_if_needed(self):
177
"""Create a revision id if None was supplied.
179
If the repository can not support user-specified revision ids
180
they should override this function and raise CannotSetRevisionId
181
if _new_revision_id is not None.
183
:raises: CannotSetRevisionId
185
if self._new_revision_id is None:
186
self._new_revision_id = self._gen_revision_id()
187
self.random_revid = True
189
self.random_revid = False
191
def _check_root(self, ie, parent_invs, tree):
192
"""Helper for record_entry_contents.
194
:param ie: An entry being added.
195
:param parent_invs: The inventories of the parent revisions of the
197
:param tree: The tree that is being committed.
199
# In this revision format, root entries have no knit or weave When
200
# serializing out to disk and back in root.revision is always
202
ie.revision = self._new_revision_id
204
def _get_delta(self, ie, basis_inv, path):
205
"""Get a delta against the basis inventory for ie."""
206
if ie.file_id not in basis_inv:
208
return (None, path, ie.file_id, ie)
209
elif ie != basis_inv[ie.file_id]:
211
# TODO: avoid tis id2path call.
212
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
217
def record_entry_contents(self, ie, parent_invs, path, tree,
219
"""Record the content of ie from tree into the commit if needed.
221
Side effect: sets ie.revision when unchanged
223
:param ie: An inventory entry present in the commit.
224
:param parent_invs: The inventories of the parent revisions of the
226
:param path: The path the entry is at in the tree.
227
:param tree: The tree which contains this entry and should be used to
229
:param content_summary: Summary data from the tree about the paths
230
content - stat, length, exec, sha/link target. This is only
231
accessed when the entry has a revision of None - that is when it is
232
a candidate to commit.
233
:return: A tuple (change_delta, version_recorded). change_delta is
234
an inventory_delta change for this entry against the basis tree of
235
the commit, or None if no change occured against the basis tree.
236
version_recorded is True if a new version of the entry has been
237
recorded. For instance, committing a merge where a file was only
238
changed on the other side will return (delta, False).
240
if self.new_inventory.root is None:
241
if ie.parent_id is not None:
242
raise errors.RootMissing()
243
self._check_root(ie, parent_invs, tree)
244
if ie.revision is None:
245
kind = content_summary[0]
247
# ie is carried over from a prior commit
249
# XXX: repository specific check for nested tree support goes here - if
250
# the repo doesn't want nested trees we skip it ?
251
if (kind == 'tree-reference' and
252
not self.repository._format.supports_tree_reference):
253
# mismatch between commit builder logic and repository:
254
# this needs the entry creation pushed down into the builder.
255
raise NotImplementedError('Missing repository subtree support.')
256
self.new_inventory.add(ie)
258
# TODO: slow, take it out of the inner loop.
260
basis_inv = parent_invs[0]
262
basis_inv = Inventory(root_id=None)
264
# ie.revision is always None if the InventoryEntry is considered
265
# for committing. We may record the previous parents revision if the
266
# content is actually unchanged against a sole head.
267
if ie.revision is not None:
268
if not self._versioned_root and path == '':
269
# repositories that do not version the root set the root's
270
# revision to the new commit even when no change occurs, and
271
# this masks when a change may have occurred against the basis,
272
# so calculate if one happened.
273
if ie.file_id in basis_inv:
274
delta = (basis_inv.id2path(ie.file_id), path,
278
delta = (None, path, ie.file_id, ie)
281
# we don't need to commit this, because the caller already
282
# determined that an existing revision of this file is
284
return None, (ie.revision == self._new_revision_id)
285
# XXX: Friction: parent_candidates should return a list not a dict
286
# so that we don't have to walk the inventories again.
287
parent_candiate_entries = ie.parent_candidates(parent_invs)
288
head_set = self._heads(parent_candiate_entries.keys())
290
for inv in parent_invs:
291
if ie.file_id in inv:
292
old_rev = inv[ie.file_id].revision
293
if old_rev in head_set:
294
heads.append(inv[ie.file_id].revision)
295
head_set.remove(inv[ie.file_id].revision)
298
# now we check to see if we need to write a new record to the
300
# We write a new entry unless there is one head to the ancestors, and
301
# the kind-derived content is unchanged.
303
# Cheapest check first: no ancestors, or more the one head in the
304
# ancestors, we write a new node.
308
# There is a single head, look it up for comparison
309
parent_entry = parent_candiate_entries[heads[0]]
310
# if the non-content specific data has changed, we'll be writing a
312
if (parent_entry.parent_id != ie.parent_id or
313
parent_entry.name != ie.name):
315
# now we need to do content specific checks:
317
# if the kind changed the content obviously has
318
if kind != parent_entry.kind:
321
assert content_summary[2] is not None, \
322
"Files must not have executable = None"
324
if (# if the file length changed we have to store:
325
parent_entry.text_size != content_summary[1] or
326
# if the exec bit has changed we have to store:
327
parent_entry.executable != content_summary[2]):
329
elif parent_entry.text_sha1 == content_summary[3]:
330
# all meta and content is unchanged (using a hash cache
331
# hit to check the sha)
332
ie.revision = parent_entry.revision
333
ie.text_size = parent_entry.text_size
334
ie.text_sha1 = parent_entry.text_sha1
335
ie.executable = parent_entry.executable
336
return self._get_delta(ie, basis_inv, path), False
338
# Either there is only a hash change(no hash cache entry,
339
# or same size content change), or there is no change on
341
# Provide the parent's hash to the store layer, so that the
342
# content is unchanged we will not store a new node.
343
nostore_sha = parent_entry.text_sha1
345
# We want to record a new node regardless of the presence or
346
# absence of a content change in the file.
348
ie.executable = content_summary[2]
349
lines = tree.get_file(ie.file_id, path).readlines()
351
ie.text_sha1, ie.text_size = self._add_text_to_weave(
352
ie.file_id, lines, heads, nostore_sha)
353
except errors.ExistingContent:
354
# Turns out that the file content was unchanged, and we were
355
# only going to store a new node if it was changed. Carry over
357
ie.revision = parent_entry.revision
358
ie.text_size = parent_entry.text_size
359
ie.text_sha1 = parent_entry.text_sha1
360
ie.executable = parent_entry.executable
361
return self._get_delta(ie, basis_inv, path), False
362
elif kind == 'directory':
364
# all data is meta here, nothing specific to directory, so
366
ie.revision = parent_entry.revision
367
return self._get_delta(ie, basis_inv, path), False
369
self._add_text_to_weave(ie.file_id, lines, heads, None)
370
elif kind == 'symlink':
371
current_link_target = content_summary[3]
373
# symlink target is not generic metadata, check if it has
375
if current_link_target != parent_entry.symlink_target:
378
# unchanged, carry over.
379
ie.revision = parent_entry.revision
380
ie.symlink_target = parent_entry.symlink_target
381
return self._get_delta(ie, basis_inv, path), False
382
ie.symlink_target = current_link_target
384
self._add_text_to_weave(ie.file_id, lines, heads, None)
385
elif kind == 'tree-reference':
387
if content_summary[3] != parent_entry.reference_revision:
390
# unchanged, carry over.
391
ie.reference_revision = parent_entry.reference_revision
392
ie.revision = parent_entry.revision
393
return self._get_delta(ie, basis_inv, path), False
394
ie.reference_revision = content_summary[3]
396
self._add_text_to_weave(ie.file_id, lines, heads, None)
398
raise NotImplementedError('unknown kind')
399
ie.revision = self._new_revision_id
400
return self._get_delta(ie, basis_inv, path), True
402
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
403
versionedfile = self.repository.weave_store.get_weave_or_empty(
404
file_id, self.repository.get_transaction())
405
# Don't change this to add_lines - add_lines_with_ghosts is cheaper
406
# than add_lines, and allows committing when a parent is ghosted for
408
# Note: as we read the content directly from the tree, we know its not
409
# been turned into unicode or badly split - but a broken tree
410
# implementation could give us bad output from readlines() so this is
411
# not a guarantee of safety. What would be better is always checking
412
# the content during test suite execution. RBC 20070912
414
return versionedfile.add_lines_with_ghosts(
415
self._new_revision_id, parents, new_lines,
416
nostore_sha=nostore_sha, random_id=self.random_revid,
417
check_content=False)[0:2]
419
versionedfile.clear_cache()
422
class RootCommitBuilder(CommitBuilder):
423
"""This commitbuilder actually records the root id"""
425
# the root entry gets versioned properly by this builder.
426
_versioned_root = True
428
def _check_root(self, ie, parent_invs, tree):
429
"""Helper for record_entry_contents.
431
:param ie: An entry being added.
432
:param parent_invs: The inventories of the parent revisions of the
434
:param tree: The tree that is being committed.
438
######################################################################
441
class Repository(object):
442
"""Repository holding history for one or more branches.
444
The repository holds and retrieves historical information including
445
revisions and file history. It's normally accessed only by the Branch,
446
which views a particular line of development through that history.
448
The Repository builds on top of Stores and a Transport, which respectively
449
describe the disk data format and the way of accessing the (possibly
453
# What class to use for a CommitBuilder. Often its simpler to change this
454
# in a Repository class subclass rather than to override
455
# get_commit_builder.
456
_commit_builder_class = CommitBuilder
457
# The search regex used by xml based repositories to determine what things
458
# where changed in a single commit.
459
_file_ids_altered_regex = lazy_regex.lazy_compile(
460
r'file_id="(?P<file_id>[^"]+)"'
461
r'.* revision="(?P<revision_id>[^"]+)"'
464
def abort_write_group(self):
465
"""Commit the contents accrued within the current write group.
467
:seealso: start_write_group.
469
if self._write_group is not self.get_transaction():
470
# has an unlock or relock occured ?
471
raise errors.BzrError('mismatched lock context and write group.')
472
self._abort_write_group()
473
self._write_group = None
475
def _abort_write_group(self):
476
"""Template method for per-repository write group cleanup.
478
This is called during abort before the write group is considered to be
479
finished and should cleanup any internal state accrued during the write
480
group. There is no requirement that data handed to the repository be
481
*not* made available - this is not a rollback - but neither should any
482
attempt be made to ensure that data added is fully commited. Abort is
483
invoked when an error has occured so futher disk or network operations
484
may not be possible or may error and if possible should not be
489
def add_inventory(self, revision_id, inv, parents):
490
"""Add the inventory inv to the repository as revision_id.
492
:param parents: The revision ids of the parents that revision_id
493
is known to have and are in the repository already.
495
returns the sha1 of the serialized inventory.
497
assert self.is_in_write_group()
498
_mod_revision.check_not_reserved_id(revision_id)
499
assert inv.revision_id is None or inv.revision_id == revision_id, \
500
"Mismatch between inventory revision" \
501
" id and insertion revid (%r, %r)" % (inv.revision_id, revision_id)
502
assert inv.root is not None
503
inv_lines = self._serialise_inventory_to_lines(inv)
504
inv_vf = self.get_inventory_weave()
505
return self._inventory_add_lines(inv_vf, revision_id, parents,
506
inv_lines, check_content=False)
508
def _inventory_add_lines(self, inv_vf, revision_id, parents, lines,
510
"""Store lines in inv_vf and return the sha1 of the inventory."""
512
for parent in parents:
514
final_parents.append(parent)
515
return inv_vf.add_lines(revision_id, final_parents, lines,
516
check_content=check_content)[0]
519
def add_revision(self, revision_id, rev, inv=None, config=None):
520
"""Add rev to the revision store as revision_id.
522
:param revision_id: the revision id to use.
523
:param rev: The revision object.
524
:param inv: The inventory for the revision. if None, it will be looked
525
up in the inventory storer
526
:param config: If None no digital signature will be created.
527
If supplied its signature_needed method will be used
528
to determine if a signature should be made.
530
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
532
_mod_revision.check_not_reserved_id(revision_id)
533
if config is not None and config.signature_needed():
535
inv = self.get_inventory(revision_id)
536
plaintext = Testament(rev, inv).as_short_text()
537
self.store_revision_signature(
538
gpg.GPGStrategy(config), plaintext, revision_id)
539
if not revision_id in self.get_inventory_weave():
541
raise errors.WeaveRevisionNotPresent(revision_id,
542
self.get_inventory_weave())
544
# yes, this is not suitable for adding with ghosts.
545
self.add_inventory(revision_id, inv, rev.parent_ids)
546
self._revision_store.add_revision(rev, self.get_transaction())
548
def _add_revision_text(self, revision_id, text):
549
revision = self._revision_store._serializer.read_revision_from_string(
551
self._revision_store._add_revision(revision, StringIO(text),
552
self.get_transaction())
554
def all_revision_ids(self):
555
"""Returns a list of all the revision ids in the repository.
557
This is deprecated because code should generally work on the graph
558
reachable from a particular revision, and ignore any other revisions
559
that might be present. There is no direct replacement method.
561
if 'evil' in debug.debug_flags:
562
mutter_callsite(2, "all_revision_ids is linear with history.")
563
return self._all_revision_ids()
565
def _all_revision_ids(self):
566
"""Returns a list of all the revision ids in the repository.
568
These are in as much topological order as the underlying store can
571
raise NotImplementedError(self._all_revision_ids)
573
def break_lock(self):
574
"""Break a lock if one is present from another instance.
576
Uses the ui factory to ask for confirmation if the lock may be from
579
self.control_files.break_lock()
582
def _eliminate_revisions_not_present(self, revision_ids):
583
"""Check every revision id in revision_ids to see if we have it.
585
Returns a set of the present revisions.
588
for id in revision_ids:
589
if self.has_revision(id):
594
def create(a_bzrdir):
595
"""Construct the current default format repository in a_bzrdir."""
596
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
598
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
599
"""instantiate a Repository.
601
:param _format: The format of the repository on disk.
602
:param a_bzrdir: The BzrDir of the repository.
604
In the future we will have a single api for all stores for
605
getting file texts, inventories and revisions, then
606
this construct will accept instances of those things.
608
super(Repository, self).__init__()
609
self._format = _format
610
# the following are part of the public API for Repository:
611
self.bzrdir = a_bzrdir
612
self.control_files = control_files
613
self._revision_store = _revision_store
614
# backwards compatibility
615
self.weave_store = text_store
617
self._reconcile_does_inventory_gc = True
618
self._reconcile_fixes_text_parents = False
619
# not right yet - should be more semantically clear ?
621
self.control_store = control_store
622
self.control_weaves = control_store
623
# TODO: make sure to construct the right store classes, etc, depending
624
# on whether escaping is required.
625
self._warn_if_deprecated()
626
self._write_group = None
627
self.base = control_files._transport.base
630
return '%s(%r)' % (self.__class__.__name__,
633
def has_same_location(self, other):
634
"""Returns a boolean indicating if this repository is at the same
635
location as another repository.
637
This might return False even when two repository objects are accessing
638
the same physical repository via different URLs.
640
if self.__class__ is not other.__class__:
642
return (self.control_files._transport.base ==
643
other.control_files._transport.base)
645
def is_in_write_group(self):
646
"""Return True if there is an open write group.
648
:seealso: start_write_group.
650
return self._write_group is not None
653
return self.control_files.is_locked()
655
def is_write_locked(self):
656
"""Return True if this object is write locked."""
657
return self.is_locked() and self.control_files._lock_mode == 'w'
659
def lock_write(self, token=None):
660
"""Lock this repository for writing.
662
This causes caching within the repository obejct to start accumlating
663
data during reads, and allows a 'write_group' to be obtained. Write
664
groups must be used for actual data insertion.
666
:param token: if this is already locked, then lock_write will fail
667
unless the token matches the existing lock.
668
:returns: a token if this instance supports tokens, otherwise None.
669
:raises TokenLockingNotSupported: when a token is given but this
670
instance doesn't support using token locks.
671
:raises MismatchedToken: if the specified token doesn't match the token
672
of the existing lock.
673
:seealso: start_write_group.
675
A token should be passed in if you know that you have locked the object
676
some other way, and need to synchronise this object's state with that
679
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
681
result = self.control_files.lock_write(token=token)
686
self.control_files.lock_read()
689
def get_physical_lock_status(self):
690
return self.control_files.get_physical_lock_status()
692
def leave_lock_in_place(self):
693
"""Tell this repository not to release the physical lock when this
696
If lock_write doesn't return a token, then this method is not supported.
698
self.control_files.leave_in_place()
700
def dont_leave_lock_in_place(self):
701
"""Tell this repository to release the physical lock when this
702
object is unlocked, even if it didn't originally acquire it.
704
If lock_write doesn't return a token, then this method is not supported.
706
self.control_files.dont_leave_in_place()
709
def gather_stats(self, revid=None, committers=None):
710
"""Gather statistics from a revision id.
712
:param revid: The revision id to gather statistics from, if None, then
713
no revision specific statistics are gathered.
714
:param committers: Optional parameter controlling whether to grab
715
a count of committers from the revision specific statistics.
716
:return: A dictionary of statistics. Currently this contains:
717
committers: The number of committers if requested.
718
firstrev: A tuple with timestamp, timezone for the penultimate left
719
most ancestor of revid, if revid is not the NULL_REVISION.
720
latestrev: A tuple with timestamp, timezone for revid, if revid is
721
not the NULL_REVISION.
722
revisions: The total revision count in the repository.
723
size: An estimate disk size of the repository in bytes.
726
if revid and committers:
727
result['committers'] = 0
728
if revid and revid != _mod_revision.NULL_REVISION:
730
all_committers = set()
731
revisions = self.get_ancestry(revid)
732
# pop the leading None
734
first_revision = None
736
# ignore the revisions in the middle - just grab first and last
737
revisions = revisions[0], revisions[-1]
738
for revision in self.get_revisions(revisions):
739
if not first_revision:
740
first_revision = revision
742
all_committers.add(revision.committer)
743
last_revision = revision
745
result['committers'] = len(all_committers)
746
result['firstrev'] = (first_revision.timestamp,
747
first_revision.timezone)
748
result['latestrev'] = (last_revision.timestamp,
749
last_revision.timezone)
751
# now gather global repository information
752
if self.bzrdir.root_transport.listable():
753
c, t = self._revision_store.total_size(self.get_transaction())
754
result['revisions'] = c
758
def get_data_stream(self, revision_ids):
759
raise NotImplementedError(self.get_data_stream)
761
def insert_data_stream(self, stream):
762
"""XXX What does this really do?
764
Is it a substitute for fetch?
765
Should it manage its own write group ?
767
for item_key, bytes in stream:
768
if item_key[0] == 'file':
769
(file_id,) = item_key[1:]
770
knit = self.weave_store.get_weave_or_empty(
771
file_id, self.get_transaction())
772
elif item_key == ('inventory',):
773
knit = self.get_inventory_weave()
774
elif item_key == ('revisions',):
775
knit = self._revision_store.get_revision_file(
776
self.get_transaction())
777
elif item_key == ('signatures',):
778
knit = self._revision_store.get_signature_file(
779
self.get_transaction())
781
raise RepositoryDataStreamError(
782
"Unrecognised data stream key '%s'" % (item_key,))
783
decoded_list = bencode.bdecode(bytes)
784
format = decoded_list.pop(0)
787
for version, options, parents, some_bytes in decoded_list:
788
data_list.append((version, options, len(some_bytes), parents))
789
knit_bytes += some_bytes
790
knit.insert_data_stream(
791
(format, data_list, StringIO(knit_bytes).read))
794
def missing_revision_ids(self, other, revision_id=None):
795
"""Return the revision ids that other has that this does not.
797
These are returned in topological order.
799
revision_id: only return revision ids included by revision_id.
801
return InterRepository.get(other, self).missing_revision_ids(revision_id)
805
"""Open the repository rooted at base.
807
For instance, if the repository is at URL/.bzr/repository,
808
Repository.open(URL) -> a Repository instance.
810
control = bzrdir.BzrDir.open(base)
811
return control.open_repository()
813
def copy_content_into(self, destination, revision_id=None):
814
"""Make a complete copy of the content in self into destination.
816
This is a destructive operation! Do not use it on existing
819
return InterRepository.get(self, destination).copy_content(revision_id)
821
def commit_write_group(self):
822
"""Commit the contents accrued within the current write group.
824
:seealso: start_write_group.
826
if self._write_group is not self.get_transaction():
827
# has an unlock or relock occured ?
828
raise errors.BzrError('mismatched lock context %r and '
830
(self.get_transaction(), self._write_group))
831
self._commit_write_group()
832
self._write_group = None
834
def _commit_write_group(self):
835
"""Template method for per-repository write group cleanup.
837
This is called before the write group is considered to be
838
finished and should ensure that all data handed to the repository
839
for writing during the write group is safely committed (to the
840
extent possible considering file system caching etc).
843
def fetch(self, source, revision_id=None, pb=None):
844
"""Fetch the content required to construct revision_id from source.
846
If revision_id is None all content is copied.
848
# fast path same-url fetch operations
849
if self.has_same_location(source):
850
# check that last_revision is in 'from' and then return a
852
if (revision_id is not None and
853
not _mod_revision.is_null(revision_id)):
854
self.get_revision(revision_id)
856
inter = InterRepository.get(source, self)
858
return inter.fetch(revision_id=revision_id, pb=pb)
859
except NotImplementedError:
860
raise errors.IncompatibleRepositories(source, self)
862
def create_bundle(self, target, base, fileobj, format=None):
863
return serializer.write_bundle(self, target, base, fileobj, format)
865
def get_commit_builder(self, branch, parents, config, timestamp=None,
866
timezone=None, committer=None, revprops=None,
868
"""Obtain a CommitBuilder for this repository.
870
:param branch: Branch to commit to.
871
:param parents: Revision ids of the parents of the new revision.
872
:param config: Configuration to use.
873
:param timestamp: Optional timestamp recorded for commit.
874
:param timezone: Optional timezone for timestamp.
875
:param committer: Optional committer to set for commit.
876
:param revprops: Optional dictionary of revision properties.
877
:param revision_id: Optional revision id.
879
result = self._commit_builder_class(self, parents, config,
880
timestamp, timezone, committer, revprops, revision_id)
881
self.start_write_group()
885
if (self.control_files._lock_count == 1 and
886
self.control_files._lock_mode == 'w'):
887
if self._write_group is not None:
888
self.abort_write_group()
889
self.control_files.unlock()
890
raise errors.BzrError(
891
'Must end write groups before releasing write locks.')
892
self.control_files.unlock()
895
def clone(self, a_bzrdir, revision_id=None):
896
"""Clone this repository into a_bzrdir using the current format.
898
Currently no check is made that the format of this repository and
899
the bzrdir format are compatible. FIXME RBC 20060201.
901
:return: The newly created destination repository.
903
# TODO: deprecate after 0.16; cloning this with all its settings is
904
# probably not very useful -- mbp 20070423
905
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
906
self.copy_content_into(dest_repo, revision_id)
909
def start_write_group(self):
910
"""Start a write group in the repository.
912
Write groups are used by repositories which do not have a 1:1 mapping
913
between file ids and backend store to manage the insertion of data from
914
both fetch and commit operations.
916
A write lock is required around the start_write_group/commit_write_group
917
for the support of lock-requiring repository formats.
919
One can only insert data into a repository inside a write group.
923
if not self.is_write_locked():
924
raise errors.NotWriteLocked(self)
925
if self._write_group:
926
raise errors.BzrError('already in a write group')
927
self._start_write_group()
928
# so we can detect unlock/relock - the write group is now entered.
929
self._write_group = self.get_transaction()
931
def _start_write_group(self):
932
"""Template method for per-repository write group startup.
934
This is called before the write group is considered to be
939
def sprout(self, to_bzrdir, revision_id=None):
940
"""Create a descendent repository for new development.
942
Unlike clone, this does not copy the settings of the repository.
944
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
945
dest_repo.fetch(self, revision_id=revision_id)
948
def _create_sprouting_repo(self, a_bzrdir, shared):
949
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
950
# use target default format.
951
dest_repo = a_bzrdir.create_repository()
953
# Most control formats need the repository to be specifically
954
# created, but on some old all-in-one formats it's not needed
956
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
957
except errors.UninitializableFormat:
958
dest_repo = a_bzrdir.open_repository()
962
def has_revision(self, revision_id):
963
"""True if this repository has a copy of the revision."""
964
if 'evil' in debug.debug_flags:
965
mutter_callsite(3, "has_revision is a LBYL symptom.")
966
return self._revision_store.has_revision_id(revision_id,
967
self.get_transaction())
970
def get_revision(self, revision_id):
971
"""Return the Revision object for a named revision."""
972
return self.get_revisions([revision_id])[0]
975
def get_revision_reconcile(self, revision_id):
976
"""'reconcile' helper routine that allows access to a revision always.
978
This variant of get_revision does not cross check the weave graph
979
against the revision one as get_revision does: but it should only
980
be used by reconcile, or reconcile-alike commands that are correcting
981
or testing the revision graph.
983
return self._get_revisions([revision_id])[0]
986
def get_revisions(self, revision_ids):
987
"""Get many revisions at once."""
988
return self._get_revisions(revision_ids)
991
def _get_revisions(self, revision_ids):
992
"""Core work logic to get many revisions without sanity checks."""
993
for rev_id in revision_ids:
994
if not rev_id or not isinstance(rev_id, basestring):
995
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
996
revs = self._revision_store.get_revisions(revision_ids,
997
self.get_transaction())
999
assert not isinstance(rev.revision_id, unicode)
1000
for parent_id in rev.parent_ids:
1001
assert not isinstance(parent_id, unicode)
1005
def get_revision_xml(self, revision_id):
1006
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1007
# would have already do it.
1008
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1009
rev = self.get_revision(revision_id)
1010
rev_tmp = StringIO()
1011
# the current serializer..
1012
self._revision_store._serializer.write_revision(rev, rev_tmp)
1014
return rev_tmp.getvalue()
1017
def get_deltas_for_revisions(self, revisions):
1018
"""Produce a generator of revision deltas.
1020
Note that the input is a sequence of REVISIONS, not revision_ids.
1021
Trees will be held in memory until the generator exits.
1022
Each delta is relative to the revision's lefthand predecessor.
1024
required_trees = set()
1025
for revision in revisions:
1026
required_trees.add(revision.revision_id)
1027
required_trees.update(revision.parent_ids[:1])
1028
trees = dict((t.get_revision_id(), t) for
1029
t in self.revision_trees(required_trees))
1030
for revision in revisions:
1031
if not revision.parent_ids:
1032
old_tree = self.revision_tree(None)
1034
old_tree = trees[revision.parent_ids[0]]
1035
yield trees[revision.revision_id].changes_from(old_tree)
1038
def get_revision_delta(self, revision_id):
1039
"""Return the delta for one revision.
1041
The delta is relative to the left-hand predecessor of the
1044
r = self.get_revision(revision_id)
1045
return list(self.get_deltas_for_revisions([r]))[0]
1048
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1049
signature = gpg_strategy.sign(plaintext)
1050
self._revision_store.add_revision_signature_text(revision_id,
1052
self.get_transaction())
1054
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1056
"""Helper routine for fileids_altered_by_revision_ids.
1058
This performs the translation of xml lines to revision ids.
1060
:param line_iterator: An iterator of lines
1061
:param revision_ids: The revision ids to filter for. This should be a
1062
set or other type which supports efficient __contains__ lookups, as
1063
the revision id from each parsed line will be looked up in the
1064
revision_ids filter.
1065
:return: a dictionary mapping altered file-ids to an iterable of
1066
revision_ids. Each altered file-ids has the exact revision_ids that
1067
altered it listed explicitly.
1071
# this code needs to read every new line in every inventory for the
1072
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1073
# not present in one of those inventories is unnecessary but not
1074
# harmful because we are filtering by the revision id marker in the
1075
# inventory lines : we only select file ids altered in one of those
1076
# revisions. We don't need to see all lines in the inventory because
1077
# only those added in an inventory in rev X can contain a revision=X
1079
unescape_revid_cache = {}
1080
unescape_fileid_cache = {}
1082
# jam 20061218 In a big fetch, this handles hundreds of thousands
1083
# of lines, so it has had a lot of inlining and optimizing done.
1084
# Sorry that it is a little bit messy.
1085
# Move several functions to be local variables, since this is a long
1087
search = self._file_ids_altered_regex.search
1088
unescape = _unescape_xml
1089
setdefault = result.setdefault
1090
for line in line_iterator:
1091
match = search(line)
1094
# One call to match.group() returning multiple items is quite a
1095
# bit faster than 2 calls to match.group() each returning 1
1096
file_id, revision_id = match.group('file_id', 'revision_id')
1098
# Inlining the cache lookups helps a lot when you make 170,000
1099
# lines and 350k ids, versus 8.4 unique ids.
1100
# Using a cache helps in 2 ways:
1101
# 1) Avoids unnecessary decoding calls
1102
# 2) Re-uses cached strings, which helps in future set and
1104
# (2) is enough that removing encoding entirely along with
1105
# the cache (so we are using plain strings) results in no
1106
# performance improvement.
1108
revision_id = unescape_revid_cache[revision_id]
1110
unescaped = unescape(revision_id)
1111
unescape_revid_cache[revision_id] = unescaped
1112
revision_id = unescaped
1114
if revision_id in revision_ids:
1116
file_id = unescape_fileid_cache[file_id]
1118
unescaped = unescape(file_id)
1119
unescape_fileid_cache[file_id] = unescaped
1121
setdefault(file_id, set()).add(revision_id)
1124
def fileids_altered_by_revision_ids(self, revision_ids):
1125
"""Find the file ids and versions affected by revisions.
1127
:param revisions: an iterable containing revision ids.
1128
:return: a dictionary mapping altered file-ids to an iterable of
1129
revision_ids. Each altered file-ids has the exact revision_ids that
1130
altered it listed explicitly.
1132
assert self._serializer.support_altered_by_hack, \
1133
("fileids_altered_by_revision_ids only supported for branches "
1134
"which store inventory as unnested xml, not on %r" % self)
1135
selected_revision_ids = set(revision_ids)
1136
w = self.get_inventory_weave()
1137
pb = ui.ui_factory.nested_progress_bar()
1139
return self._find_file_ids_from_xml_inventory_lines(
1140
w.iter_lines_added_or_present_in_versions(
1141
selected_revision_ids, pb=pb),
1142
selected_revision_ids)
1146
def iter_files_bytes(self, desired_files):
1147
"""Iterate through file versions.
1149
Files will not necessarily be returned in the order they occur in
1150
desired_files. No specific order is guaranteed.
1152
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1153
value supplied by the caller as part of desired_files. It should
1154
uniquely identify the file version in the caller's context. (Examples:
1155
an index number or a TreeTransform trans_id.)
1157
bytes_iterator is an iterable of bytestrings for the file. The
1158
kind of iterable and length of the bytestrings are unspecified, but for
1159
this implementation, it is a list of lines produced by
1160
VersionedFile.get_lines().
1162
:param desired_files: a list of (file_id, revision_id, identifier)
1165
transaction = self.get_transaction()
1166
for file_id, revision_id, callable_data in desired_files:
1168
weave = self.weave_store.get_weave(file_id, transaction)
1169
except errors.NoSuchFile:
1170
raise errors.NoSuchIdInRepository(self, file_id)
1171
yield callable_data, weave.get_lines(revision_id)
1173
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1174
"""Get an iterable listing the keys of all the data introduced by a set
1177
The keys will be ordered so that the corresponding items can be safely
1178
fetched and inserted in that order.
1180
:returns: An iterable producing tuples of (knit-kind, file-id,
1181
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1182
'revisions'. file-id is None unless knit-kind is 'file'.
1184
# XXX: it's a bit weird to control the inventory weave caching in this
1185
# generator. Ideally the caching would be done in fetch.py I think. Or
1186
# maybe this generator should explicitly have the contract that it
1187
# should not be iterated until the previously yielded item has been
1190
inv_w = self.get_inventory_weave()
1191
inv_w.enable_cache()
1193
# file ids that changed
1194
file_ids = self.fileids_altered_by_revision_ids(revision_ids)
1196
num_file_ids = len(file_ids)
1197
for file_id, altered_versions in file_ids.iteritems():
1198
if _files_pb is not None:
1199
_files_pb.update("fetch texts", count, num_file_ids)
1201
yield ("file", file_id, altered_versions)
1202
# We're done with the files_pb. Note that it finished by the caller,
1203
# just as it was created by the caller.
1207
yield ("inventory", None, revision_ids)
1211
revisions_with_signatures = set()
1212
for rev_id in revision_ids:
1214
self.get_signature_text(rev_id)
1215
except errors.NoSuchRevision:
1219
revisions_with_signatures.add(rev_id)
1221
yield ("signatures", None, revisions_with_signatures)
1224
yield ("revisions", None, revision_ids)
1227
def get_inventory_weave(self):
1228
return self.control_weaves.get_weave('inventory',
1229
self.get_transaction())
1232
def get_inventory(self, revision_id):
1233
"""Get Inventory object by hash."""
1234
return self.deserialise_inventory(
1235
revision_id, self.get_inventory_xml(revision_id))
1237
def deserialise_inventory(self, revision_id, xml):
1238
"""Transform the xml into an inventory object.
1240
:param revision_id: The expected revision id of the inventory.
1241
:param xml: A serialised inventory.
1243
return self._serializer.read_inventory_from_string(xml, revision_id)
1245
def serialise_inventory(self, inv):
1246
return self._serializer.write_inventory_to_string(inv)
1248
def _serialise_inventory_to_lines(self, inv):
1249
return self._serializer.write_inventory_to_lines(inv)
1251
def get_serializer_format(self):
1252
return self._serializer.format_num
1255
def get_inventory_xml(self, revision_id):
1256
"""Get inventory XML as a file object."""
1258
assert isinstance(revision_id, str), type(revision_id)
1259
iw = self.get_inventory_weave()
1260
return iw.get_text(revision_id)
1262
raise errors.HistoryMissing(self, 'inventory', revision_id)
1265
def get_inventory_sha1(self, revision_id):
1266
"""Return the sha1 hash of the inventory entry
1268
return self.get_revision(revision_id).inventory_sha1
1271
def get_revision_graph(self, revision_id=None):
1272
"""Return a dictionary containing the revision graph.
1274
NB: This method should not be used as it accesses the entire graph all
1275
at once, which is much more data than most operations should require.
1277
:param revision_id: The revision_id to get a graph from. If None, then
1278
the entire revision graph is returned. This is a deprecated mode of
1279
operation and will be removed in the future.
1280
:return: a dictionary of revision_id->revision_parents_list.
1282
raise NotImplementedError(self.get_revision_graph)
1285
def get_revision_graph_with_ghosts(self, revision_ids=None):
1286
"""Return a graph of the revisions with ghosts marked as applicable.
1288
:param revision_ids: an iterable of revisions to graph or None for all.
1289
:return: a Graph object with the graph reachable from revision_ids.
1291
if 'evil' in debug.debug_flags:
1293
"get_revision_graph_with_ghosts scales with size of history.")
1294
result = deprecated_graph.Graph()
1295
if not revision_ids:
1296
pending = set(self.all_revision_ids())
1299
pending = set(revision_ids)
1300
# special case NULL_REVISION
1301
if _mod_revision.NULL_REVISION in pending:
1302
pending.remove(_mod_revision.NULL_REVISION)
1303
required = set(pending)
1306
revision_id = pending.pop()
1308
rev = self.get_revision(revision_id)
1309
except errors.NoSuchRevision:
1310
if revision_id in required:
1313
result.add_ghost(revision_id)
1315
for parent_id in rev.parent_ids:
1316
# is this queued or done ?
1317
if (parent_id not in pending and
1318
parent_id not in done):
1320
pending.add(parent_id)
1321
result.add_node(revision_id, rev.parent_ids)
1322
done.add(revision_id)
1325
def _get_history_vf(self):
1326
"""Get a versionedfile whose history graph reflects all revisions.
1328
For weave repositories, this is the inventory weave.
1330
return self.get_inventory_weave()
1332
def iter_reverse_revision_history(self, revision_id):
1333
"""Iterate backwards through revision ids in the lefthand history
1335
:param revision_id: The revision id to start with. All its lefthand
1336
ancestors will be traversed.
1338
if revision_id in (None, _mod_revision.NULL_REVISION):
1340
next_id = revision_id
1341
versionedfile = self._get_history_vf()
1344
parents = versionedfile.get_parents(next_id)
1345
if len(parents) == 0:
1348
next_id = parents[0]
1351
def get_revision_inventory(self, revision_id):
1352
"""Return inventory of a past revision."""
1353
# TODO: Unify this with get_inventory()
1354
# bzr 0.0.6 and later imposes the constraint that the inventory_id
1355
# must be the same as its revision, so this is trivial.
1356
if revision_id is None:
1357
# This does not make sense: if there is no revision,
1358
# then it is the current tree inventory surely ?!
1359
# and thus get_root_id() is something that looks at the last
1360
# commit on the branch, and the get_root_id is an inventory check.
1361
raise NotImplementedError
1362
# return Inventory(self.get_root_id())
1364
return self.get_inventory(revision_id)
1367
def is_shared(self):
1368
"""Return True if this repository is flagged as a shared repository."""
1369
raise NotImplementedError(self.is_shared)
1372
def reconcile(self, other=None, thorough=False):
1373
"""Reconcile this repository."""
1374
from bzrlib.reconcile import RepoReconciler
1375
reconciler = RepoReconciler(self, thorough=thorough)
1376
reconciler.reconcile()
1379
def _refresh_data(self):
1380
"""Helper called from lock_* to ensure coherency with disk.
1382
The default implementation does nothing; it is however possible
1383
for repositories to maintain loaded indices across multiple locks
1384
by checking inside their implementation of this method to see
1385
whether their indices are still valid. This depends of course on
1386
the disk format being validatable in this manner.
1390
def revision_tree(self, revision_id):
1391
"""Return Tree for a revision on this branch.
1393
`revision_id` may be None for the empty tree revision.
1395
# TODO: refactor this to use an existing revision object
1396
# so we don't need to read it in twice.
1397
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
1398
return RevisionTree(self, Inventory(root_id=None),
1399
_mod_revision.NULL_REVISION)
1401
inv = self.get_revision_inventory(revision_id)
1402
return RevisionTree(self, inv, revision_id)
1405
def revision_trees(self, revision_ids):
1406
"""Return Tree for a revision on this branch.
1408
`revision_id` may not be None or 'null:'"""
1409
assert None not in revision_ids
1410
assert _mod_revision.NULL_REVISION not in revision_ids
1411
texts = self.get_inventory_weave().get_texts(revision_ids)
1412
for text, revision_id in zip(texts, revision_ids):
1413
inv = self.deserialise_inventory(revision_id, text)
1414
yield RevisionTree(self, inv, revision_id)
1417
def get_ancestry(self, revision_id, topo_sorted=True):
1418
"""Return a list of revision-ids integrated by a revision.
1420
The first element of the list is always None, indicating the origin
1421
revision. This might change when we have history horizons, or
1422
perhaps we should have a new API.
1424
This is topologically sorted.
1426
if _mod_revision.is_null(revision_id):
1428
if not self.has_revision(revision_id):
1429
raise errors.NoSuchRevision(self, revision_id)
1430
w = self.get_inventory_weave()
1431
candidates = w.get_ancestry(revision_id, topo_sorted)
1432
return [None] + candidates # self._eliminate_revisions_not_present(candidates)
1435
"""Compress the data within the repository.
1437
This operation only makes sense for some repository types. For other
1438
types it should be a no-op that just returns.
1440
This stub method does not require a lock, but subclasses should use
1441
@needs_write_lock as this is a long running call its reasonable to
1442
implicitly lock for the user.
1446
def print_file(self, file, revision_id):
1447
"""Print `file` to stdout.
1449
FIXME RBC 20060125 as John Meinel points out this is a bad api
1450
- it writes to stdout, it assumes that that is valid etc. Fix
1451
by creating a new more flexible convenience function.
1453
tree = self.revision_tree(revision_id)
1454
# use inventory as it was in that revision
1455
file_id = tree.inventory.path2id(file)
1457
# TODO: jam 20060427 Write a test for this code path
1458
# it had a bug in it, and was raising the wrong
1460
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1461
tree.print_file(file_id)
1463
def get_transaction(self):
1464
return self.control_files.get_transaction()
1466
def revision_parents(self, revision_id):
1467
return self.get_inventory_weave().parent_names(revision_id)
1469
def get_parents(self, revision_ids):
1470
"""See StackedParentsProvider.get_parents"""
1472
for revision_id in revision_ids:
1473
if revision_id == _mod_revision.NULL_REVISION:
1477
parents = self.get_revision(revision_id).parent_ids
1478
except errors.NoSuchRevision:
1481
if len(parents) == 0:
1482
parents = [_mod_revision.NULL_REVISION]
1483
parents_list.append(parents)
1486
def _make_parents_provider(self):
1489
def get_graph(self, other_repository=None):
1490
"""Return the graph walker for this repository format"""
1491
parents_provider = self._make_parents_provider()
1492
if (other_repository is not None and
1493
other_repository.bzrdir.transport.base !=
1494
self.bzrdir.transport.base):
1495
parents_provider = graph._StackedParentsProvider(
1496
[parents_provider, other_repository._make_parents_provider()])
1497
return graph.Graph(parents_provider)
1499
def get_versioned_file_checker(self, revisions, revision_versions_cache):
1500
return VersionedFileChecker(revisions, revision_versions_cache, self)
1503
def set_make_working_trees(self, new_value):
1504
"""Set the policy flag for making working trees when creating branches.
1506
This only applies to branches that use this repository.
1508
The default is 'True'.
1509
:param new_value: True to restore the default, False to disable making
1512
raise NotImplementedError(self.set_make_working_trees)
1514
def make_working_trees(self):
1515
"""Returns the policy for making working trees on new branches."""
1516
raise NotImplementedError(self.make_working_trees)
1519
def sign_revision(self, revision_id, gpg_strategy):
1520
plaintext = Testament.from_revision(self, revision_id).as_short_text()
1521
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1524
def has_signature_for_revision_id(self, revision_id):
1525
"""Query for a revision signature for revision_id in the repository."""
1526
return self._revision_store.has_signature(revision_id,
1527
self.get_transaction())
1530
def get_signature_text(self, revision_id):
1531
"""Return the text for a signature."""
1532
return self._revision_store.get_signature_text(revision_id,
1533
self.get_transaction())
1536
def check(self, revision_ids=None):
1537
"""Check consistency of all history of given revision_ids.
1539
Different repository implementations should override _check().
1541
:param revision_ids: A non-empty list of revision_ids whose ancestry
1542
will be checked. Typically the last revision_id of a branch.
1544
return self._check(revision_ids)
1546
def _check(self, revision_ids):
1547
result = check.Check(self)
1551
def _warn_if_deprecated(self):
1552
global _deprecation_warning_done
1553
if _deprecation_warning_done:
1555
_deprecation_warning_done = True
1556
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
1557
% (self._format, self.bzrdir.transport.base))
1559
def supports_rich_root(self):
1560
return self._format.rich_root_data
1562
def _check_ascii_revisionid(self, revision_id, method):
1563
"""Private helper for ascii-only repositories."""
1564
# weave repositories refuse to store revisionids that are non-ascii.
1565
if revision_id is not None:
1566
# weaves require ascii revision ids.
1567
if isinstance(revision_id, unicode):
1569
revision_id.encode('ascii')
1570
except UnicodeEncodeError:
1571
raise errors.NonAsciiRevisionId(method, self)
1574
revision_id.decode('ascii')
1575
except UnicodeDecodeError:
1576
raise errors.NonAsciiRevisionId(method, self)
1578
def revision_graph_can_have_wrong_parents(self):
1579
"""Is it possible for this repository to have a revision graph with
1582
If True, then this repository must also implement
1583
_find_inconsistent_revision_parents so that check and reconcile can
1584
check for inconsistencies before proceeding with other checks that may
1585
depend on the revision index being consistent.
1587
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
1589
# remove these delegates a while after bzr 0.15
1590
def __make_delegated(name, from_module):
1591
def _deprecated_repository_forwarder():
1592
symbol_versioning.warn('%s moved to %s in bzr 0.15'
1593
% (name, from_module),
1596
m = __import__(from_module, globals(), locals(), [name])
1598
return getattr(m, name)
1599
except AttributeError:
1600
raise AttributeError('module %s has no name %s'
1602
globals()[name] = _deprecated_repository_forwarder
1605
'AllInOneRepository',
1606
'WeaveMetaDirRepository',
1607
'PreSplitOutRepositoryFormat',
1608
'RepositoryFormat4',
1609
'RepositoryFormat5',
1610
'RepositoryFormat6',
1611
'RepositoryFormat7',
1613
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
1617
'RepositoryFormatKnit',
1618
'RepositoryFormatKnit1',
1620
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
1623
def install_revision(repository, rev, revision_tree):
1624
"""Install all revision data into a repository."""
1625
repository.start_write_group()
1627
_install_revision(repository, rev, revision_tree)
1629
repository.abort_write_group()
1632
repository.commit_write_group()
1635
def _install_revision(repository, rev, revision_tree):
1636
"""Install all revision data into a repository."""
1637
present_parents = []
1639
for p_id in rev.parent_ids:
1640
if repository.has_revision(p_id):
1641
present_parents.append(p_id)
1642
parent_trees[p_id] = repository.revision_tree(p_id)
1644
parent_trees[p_id] = repository.revision_tree(None)
1646
inv = revision_tree.inventory
1647
entries = inv.iter_entries()
1648
# backwards compatibility hack: skip the root id.
1649
if not repository.supports_rich_root():
1650
path, root = entries.next()
1651
if root.revision != rev.revision_id:
1652
raise errors.IncompatibleRevision(repr(repository))
1653
# Add the texts that are not already present
1654
for path, ie in entries:
1655
w = repository.weave_store.get_weave_or_empty(ie.file_id,
1656
repository.get_transaction())
1657
if ie.revision not in w:
1659
# FIXME: TODO: The following loop *may* be overlapping/duplicate
1660
# with InventoryEntry.find_previous_heads(). if it is, then there
1661
# is a latent bug here where the parents may have ancestors of each
1663
for revision, tree in parent_trees.iteritems():
1664
if ie.file_id not in tree:
1666
parent_id = tree.inventory[ie.file_id].revision
1667
if parent_id in text_parents:
1669
text_parents.append(parent_id)
1671
vfile = repository.weave_store.get_weave_or_empty(ie.file_id,
1672
repository.get_transaction())
1673
lines = revision_tree.get_file(ie.file_id).readlines()
1674
vfile.add_lines(rev.revision_id, text_parents, lines)
1676
# install the inventory
1677
repository.add_inventory(rev.revision_id, inv, present_parents)
1678
except errors.RevisionAlreadyPresent:
1680
repository.add_revision(rev.revision_id, rev, inv)
1683
class MetaDirRepository(Repository):
1684
"""Repositories in the new meta-dir layout."""
1686
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
1687
super(MetaDirRepository, self).__init__(_format,
1693
dir_mode = self.control_files._dir_mode
1694
file_mode = self.control_files._file_mode
1697
def is_shared(self):
1698
"""Return True if this repository is flagged as a shared repository."""
1699
return self.control_files._transport.has('shared-storage')
1702
def set_make_working_trees(self, new_value):
1703
"""Set the policy flag for making working trees when creating branches.
1705
This only applies to branches that use this repository.
1707
The default is 'True'.
1708
:param new_value: True to restore the default, False to disable making
1713
self.control_files._transport.delete('no-working-trees')
1714
except errors.NoSuchFile:
1717
self.control_files.put_utf8('no-working-trees', '')
1719
def make_working_trees(self):
1720
"""Returns the policy for making working trees on new branches."""
1721
return not self.control_files._transport.has('no-working-trees')
1724
class RepositoryFormatRegistry(registry.Registry):
1725
"""Registry of RepositoryFormats."""
1727
def get(self, format_string):
1728
r = registry.Registry.get(self, format_string)
1734
format_registry = RepositoryFormatRegistry()
1735
"""Registry of formats, indexed by their identifying format string.
1737
This can contain either format instances themselves, or classes/factories that
1738
can be called to obtain one.
1742
#####################################################################
1743
# Repository Formats
1745
class RepositoryFormat(object):
1746
"""A repository format.
1748
Formats provide three things:
1749
* An initialization routine to construct repository data on disk.
1750
* a format string which is used when the BzrDir supports versioned
1752
* an open routine which returns a Repository instance.
1754
There is one and only one Format subclass for each on-disk format. But
1755
there can be one Repository subclass that is used for several different
1756
formats. The _format attribute on a Repository instance can be used to
1757
determine the disk format.
1759
Formats are placed in an dict by their format string for reference
1760
during opening. These should be subclasses of RepositoryFormat
1763
Once a format is deprecated, just deprecate the initialize and open
1764
methods on the format class. Do not deprecate the object, as the
1765
object will be created every system load.
1767
Common instance attributes:
1768
_matchingbzrdir - the bzrdir format that the repository format was
1769
originally written to work with. This can be used if manually
1770
constructing a bzrdir and repository, or more commonly for test suite
1775
return "<%s>" % self.__class__.__name__
1777
def __eq__(self, other):
1778
# format objects are generally stateless
1779
return isinstance(other, self.__class__)
1781
def __ne__(self, other):
1782
return not self == other
1785
def find_format(klass, a_bzrdir):
1786
"""Return the format for the repository object in a_bzrdir.
1788
This is used by bzr native formats that have a "format" file in
1789
the repository. Other methods may be used by different types of
1793
transport = a_bzrdir.get_repository_transport(None)
1794
format_string = transport.get("format").read()
1795
return format_registry.get(format_string)
1796
except errors.NoSuchFile:
1797
raise errors.NoRepositoryPresent(a_bzrdir)
1799
raise errors.UnknownFormatError(format=format_string)
1802
def register_format(klass, format):
1803
format_registry.register(format.get_format_string(), format)
1806
def unregister_format(klass, format):
1807
format_registry.remove(format.get_format_string())
1810
def get_default_format(klass):
1811
"""Return the current default format."""
1812
from bzrlib import bzrdir
1813
return bzrdir.format_registry.make_bzrdir('default').repository_format
1815
def _get_control_store(self, repo_transport, control_files):
1816
"""Return the control store for this repository."""
1817
raise NotImplementedError(self._get_control_store)
1819
def get_format_string(self):
1820
"""Return the ASCII format string that identifies this format.
1822
Note that in pre format ?? repositories the format string is
1823
not permitted nor written to disk.
1825
raise NotImplementedError(self.get_format_string)
1827
def get_format_description(self):
1828
"""Return the short description for this format."""
1829
raise NotImplementedError(self.get_format_description)
1831
def _get_revision_store(self, repo_transport, control_files):
1832
"""Return the revision store object for this a_bzrdir."""
1833
raise NotImplementedError(self._get_revision_store)
1835
def _get_text_rev_store(self,
1842
"""Common logic for getting a revision store for a repository.
1844
see self._get_revision_store for the subclass-overridable method to
1845
get the store for a repository.
1847
from bzrlib.store.revision.text import TextRevisionStore
1848
dir_mode = control_files._dir_mode
1849
file_mode = control_files._file_mode
1850
text_store = TextStore(transport.clone(name),
1852
compressed=compressed,
1854
file_mode=file_mode)
1855
_revision_store = TextRevisionStore(text_store, serializer)
1856
return _revision_store
1858
# TODO: this shouldn't be in the base class, it's specific to things that
1859
# use weaves or knits -- mbp 20070207
1860
def _get_versioned_file_store(self,
1865
versionedfile_class=None,
1866
versionedfile_kwargs={},
1868
if versionedfile_class is None:
1869
versionedfile_class = self._versionedfile_class
1870
weave_transport = control_files._transport.clone(name)
1871
dir_mode = control_files._dir_mode
1872
file_mode = control_files._file_mode
1873
return VersionedFileStore(weave_transport, prefixed=prefixed,
1875
file_mode=file_mode,
1876
versionedfile_class=versionedfile_class,
1877
versionedfile_kwargs=versionedfile_kwargs,
1880
def initialize(self, a_bzrdir, shared=False):
1881
"""Initialize a repository of this format in a_bzrdir.
1883
:param a_bzrdir: The bzrdir to put the new repository in it.
1884
:param shared: The repository should be initialized as a sharable one.
1885
:returns: The new repository object.
1887
This may raise UninitializableFormat if shared repository are not
1888
compatible the a_bzrdir.
1890
raise NotImplementedError(self.initialize)
1892
def is_supported(self):
1893
"""Is this format supported?
1895
Supported formats must be initializable and openable.
1896
Unsupported formats may not support initialization or committing or
1897
some other features depending on the reason for not being supported.
1901
def check_conversion_target(self, target_format):
1902
raise NotImplementedError(self.check_conversion_target)
1904
def open(self, a_bzrdir, _found=False):
1905
"""Return an instance of this format for the bzrdir a_bzrdir.
1907
_found is a private parameter, do not use it.
1909
raise NotImplementedError(self.open)
1912
class MetaDirRepositoryFormat(RepositoryFormat):
1913
"""Common base class for the new repositories using the metadir layout."""
1915
rich_root_data = False
1916
supports_tree_reference = False
1917
_matchingbzrdir = bzrdir.BzrDirMetaFormat1()
1920
super(MetaDirRepositoryFormat, self).__init__()
1922
def _create_control_files(self, a_bzrdir):
1923
"""Create the required files and the initial control_files object."""
1924
# FIXME: RBC 20060125 don't peek under the covers
1925
# NB: no need to escape relative paths that are url safe.
1926
repository_transport = a_bzrdir.get_repository_transport(self)
1927
control_files = lockable_files.LockableFiles(repository_transport,
1928
'lock', lockdir.LockDir)
1929
control_files.create_lock()
1930
return control_files
1932
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
1933
"""Upload the initial blank content."""
1934
control_files = self._create_control_files(a_bzrdir)
1935
control_files.lock_write()
1937
control_files._transport.mkdir_multi(dirs,
1938
mode=control_files._dir_mode)
1939
for file, content in files:
1940
control_files.put(file, content)
1941
for file, content in utf8_files:
1942
control_files.put_utf8(file, content)
1944
control_files.put_utf8('shared-storage', '')
1946
control_files.unlock()
1949
# formats which have no format string are not discoverable
1950
# and not independently creatable, so are not registered. They're
1951
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
1952
# needed, it's constructed directly by the BzrDir. Non-native formats where
1953
# the repository is not separately opened are similar.
1955
format_registry.register_lazy(
1956
'Bazaar-NG Repository format 7',
1957
'bzrlib.repofmt.weaverepo',
1961
# KEEP in sync with bzrdir.format_registry default, which controls the overall
1962
# default control directory format
1963
format_registry.register_lazy(
1964
'Bazaar-NG Knit Repository Format 1',
1965
'bzrlib.repofmt.knitrepo',
1966
'RepositoryFormatKnit1',
1968
format_registry.default_key = 'Bazaar-NG Knit Repository Format 1'
1970
format_registry.register_lazy(
1971
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
1972
'bzrlib.repofmt.knitrepo',
1973
'RepositoryFormatKnit3',
1976
# Pack-based formats. There is one format for pre-subtrees, and one for
1977
# post-subtrees to allow ease of testing.
1978
# NOTE: These are experimental in 0.92.
1979
format_registry.register_lazy(
1980
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
1981
'bzrlib.repofmt.pack_repo',
1982
'RepositoryFormatKnitPack1',
1984
format_registry.register_lazy(
1985
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
1986
'bzrlib.repofmt.pack_repo',
1987
'RepositoryFormatKnitPack3',
1991
class InterRepository(InterObject):
1992
"""This class represents operations taking place between two repositories.
1994
Its instances have methods like copy_content and fetch, and contain
1995
references to the source and target repositories these operations can be
1998
Often we will provide convenience methods on 'repository' which carry out
1999
operations with another repository - they will always forward to
2000
InterRepository.get(other).method_name(parameters).
2004
"""The available optimised InterRepository types."""
2006
def copy_content(self, revision_id=None):
2007
raise NotImplementedError(self.copy_content)
2009
def fetch(self, revision_id=None, pb=None):
2010
"""Fetch the content required to construct revision_id.
2012
The content is copied from self.source to self.target.
2014
:param revision_id: if None all content is copied, if NULL_REVISION no
2016
:param pb: optional progress bar to use for progress reports. If not
2017
provided a default one will be created.
2019
Returns the copied revision count and the failed revisions in a tuple:
2022
raise NotImplementedError(self.fetch)
2025
def missing_revision_ids(self, revision_id=None):
2026
"""Return the revision ids that source has that target does not.
2028
These are returned in topological order.
2030
:param revision_id: only return revision ids included by this
2033
# generic, possibly worst case, slow code path.
2034
target_ids = set(self.target.all_revision_ids())
2035
if revision_id is not None:
2036
source_ids = self.source.get_ancestry(revision_id)
2037
assert source_ids[0] is None
2040
source_ids = self.source.all_revision_ids()
2041
result_set = set(source_ids).difference(target_ids)
2042
# this may look like a no-op: its not. It preserves the ordering
2043
# other_ids had while only returning the members from other_ids
2044
# that we've decided we need.
2045
return [rev_id for rev_id in source_ids if rev_id in result_set]
2048
def _same_model(source, target):
2049
"""True if source and target have the same data representation."""
2050
if source.supports_rich_root() != target.supports_rich_root():
2052
if source._serializer != target._serializer:
2057
class InterSameDataRepository(InterRepository):
2058
"""Code for converting between repositories that represent the same data.
2060
Data format and model must match for this to work.
2064
def _get_repo_format_to_test(self):
2065
"""Repository format for testing with.
2067
InterSameData can pull from subtree to subtree and from non-subtree to
2068
non-subtree, so we test this with the richest repository format.
2070
from bzrlib.repofmt import knitrepo
2071
return knitrepo.RepositoryFormatKnit3()
2074
def is_compatible(source, target):
2075
return InterRepository._same_model(source, target)
2078
def copy_content(self, revision_id=None):
2079
"""Make a complete copy of the content in self into destination.
2081
This copies both the repository's revision data, and configuration information
2082
such as the make_working_trees setting.
2084
This is a destructive operation! Do not use it on existing
2087
:param revision_id: Only copy the content needed to construct
2088
revision_id and its parents.
2091
self.target.set_make_working_trees(self.source.make_working_trees())
2092
except NotImplementedError:
2094
# but don't bother fetching if we have the needed data now.
2095
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2096
self.target.has_revision(revision_id)):
2098
self.target.fetch(self.source, revision_id=revision_id)
2101
def fetch(self, revision_id=None, pb=None):
2102
"""See InterRepository.fetch()."""
2103
from bzrlib.fetch import GenericRepoFetcher
2104
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2105
self.source, self.source._format, self.target,
2106
self.target._format)
2107
f = GenericRepoFetcher(to_repository=self.target,
2108
from_repository=self.source,
2109
last_revision=revision_id,
2111
return f.count_copied, f.failed_revisions
2114
class InterWeaveRepo(InterSameDataRepository):
2115
"""Optimised code paths between Weave based repositories.
2117
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2118
implemented lazy inter-object optimisation.
2122
def _get_repo_format_to_test(self):
2123
from bzrlib.repofmt import weaverepo
2124
return weaverepo.RepositoryFormat7()
2127
def is_compatible(source, target):
2128
"""Be compatible with known Weave formats.
2130
We don't test for the stores being of specific types because that
2131
could lead to confusing results, and there is no need to be
2134
from bzrlib.repofmt.weaverepo import (
2140
return (isinstance(source._format, (RepositoryFormat5,
2142
RepositoryFormat7)) and
2143
isinstance(target._format, (RepositoryFormat5,
2145
RepositoryFormat7)))
2146
except AttributeError:
2150
def copy_content(self, revision_id=None):
2151
"""See InterRepository.copy_content()."""
2152
# weave specific optimised path:
2154
self.target.set_make_working_trees(self.source.make_working_trees())
2155
except NotImplementedError:
2157
# FIXME do not peek!
2158
if self.source.control_files._transport.listable():
2159
pb = ui.ui_factory.nested_progress_bar()
2161
self.target.weave_store.copy_all_ids(
2162
self.source.weave_store,
2164
from_transaction=self.source.get_transaction(),
2165
to_transaction=self.target.get_transaction())
2166
pb.update('copying inventory', 0, 1)
2167
self.target.control_weaves.copy_multi(
2168
self.source.control_weaves, ['inventory'],
2169
from_transaction=self.source.get_transaction(),
2170
to_transaction=self.target.get_transaction())
2171
self.target._revision_store.text_store.copy_all_ids(
2172
self.source._revision_store.text_store,
2177
self.target.fetch(self.source, revision_id=revision_id)
2180
def fetch(self, revision_id=None, pb=None):
2181
"""See InterRepository.fetch()."""
2182
from bzrlib.fetch import GenericRepoFetcher
2183
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2184
self.source, self.source._format, self.target, self.target._format)
2185
f = GenericRepoFetcher(to_repository=self.target,
2186
from_repository=self.source,
2187
last_revision=revision_id,
2189
return f.count_copied, f.failed_revisions
2192
def missing_revision_ids(self, revision_id=None):
2193
"""See InterRepository.missing_revision_ids()."""
2194
# we want all revisions to satisfy revision_id in source.
2195
# but we don't want to stat every file here and there.
2196
# we want then, all revisions other needs to satisfy revision_id
2197
# checked, but not those that we have locally.
2198
# so the first thing is to get a subset of the revisions to
2199
# satisfy revision_id in source, and then eliminate those that
2200
# we do already have.
2201
# this is slow on high latency connection to self, but as as this
2202
# disk format scales terribly for push anyway due to rewriting
2203
# inventory.weave, this is considered acceptable.
2205
if revision_id is not None:
2206
source_ids = self.source.get_ancestry(revision_id)
2207
assert source_ids[0] is None
2210
source_ids = self.source._all_possible_ids()
2211
source_ids_set = set(source_ids)
2212
# source_ids is the worst possible case we may need to pull.
2213
# now we want to filter source_ids against what we actually
2214
# have in target, but don't try to check for existence where we know
2215
# we do not have a revision as that would be pointless.
2216
target_ids = set(self.target._all_possible_ids())
2217
possibly_present_revisions = target_ids.intersection(source_ids_set)
2218
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2219
required_revisions = source_ids_set.difference(actually_present_revisions)
2220
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2221
if revision_id is not None:
2222
# we used get_ancestry to determine source_ids then we are assured all
2223
# revisions referenced are present as they are installed in topological order.
2224
# and the tip revision was validated by get_ancestry.
2225
return required_topo_revisions
2227
# if we just grabbed the possibly available ids, then
2228
# we only have an estimate of whats available and need to validate
2229
# that against the revision records.
2230
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2233
class InterKnitRepo(InterSameDataRepository):
2234
"""Optimised code paths between Knit based repositories."""
2237
def _get_repo_format_to_test(self):
2238
from bzrlib.repofmt import knitrepo
2239
return knitrepo.RepositoryFormatKnit1()
2242
def is_compatible(source, target):
2243
"""Be compatible with known Knit formats.
2245
We don't test for the stores being of specific types because that
2246
could lead to confusing results, and there is no need to be
2249
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2251
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2252
isinstance(target._format, RepositoryFormatKnit))
2253
except AttributeError:
2255
return are_knits and InterRepository._same_model(source, target)
2258
def fetch(self, revision_id=None, pb=None):
2259
"""See InterRepository.fetch()."""
2260
from bzrlib.fetch import KnitRepoFetcher
2261
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2262
self.source, self.source._format, self.target, self.target._format)
2263
f = KnitRepoFetcher(to_repository=self.target,
2264
from_repository=self.source,
2265
last_revision=revision_id,
2267
return f.count_copied, f.failed_revisions
2270
def missing_revision_ids(self, revision_id=None):
2271
"""See InterRepository.missing_revision_ids()."""
2272
if revision_id is not None:
2273
source_ids = self.source.get_ancestry(revision_id)
2274
assert source_ids[0] is None
2277
source_ids = self.source.all_revision_ids()
2278
source_ids_set = set(source_ids)
2279
# source_ids is the worst possible case we may need to pull.
2280
# now we want to filter source_ids against what we actually
2281
# have in target, but don't try to check for existence where we know
2282
# we do not have a revision as that would be pointless.
2283
target_ids = set(self.target.all_revision_ids())
2284
possibly_present_revisions = target_ids.intersection(source_ids_set)
2285
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2286
required_revisions = source_ids_set.difference(actually_present_revisions)
2287
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2288
if revision_id is not None:
2289
# we used get_ancestry to determine source_ids then we are assured all
2290
# revisions referenced are present as they are installed in topological order.
2291
# and the tip revision was validated by get_ancestry.
2292
return required_topo_revisions
2294
# if we just grabbed the possibly available ids, then
2295
# we only have an estimate of whats available and need to validate
2296
# that against the revision records.
2297
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2300
class InterPackRepo(InterSameDataRepository):
2301
"""Optimised code paths between Pack based repositories."""
2304
def _get_repo_format_to_test(self):
2305
from bzrlib.repofmt import pack_repo
2306
return pack_repo.RepositoryFormatKnitPack1()
2309
def is_compatible(source, target):
2310
"""Be compatible with known Pack formats.
2312
We don't test for the stores being of specific types because that
2313
could lead to confusing results, and there is no need to be
2316
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2318
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2319
isinstance(target._format, RepositoryFormatPack))
2320
except AttributeError:
2322
return are_packs and InterRepository._same_model(source, target)
2325
def fetch(self, revision_id=None, pb=None):
2326
"""See InterRepository.fetch()."""
2327
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2328
self.source, self.source._format, self.target, self.target._format)
2329
self.count_copied = 0
2330
if revision_id is None:
2332
# everything to do - use pack logic
2333
# to fetch from all packs to one without
2334
# inventory parsing etc, IFF nothing to be copied is in the target.
2336
revision_ids = self.source.all_revision_ids()
2337
# implementing the TODO will involve:
2338
# - detecting when all of a pack is selected
2339
# - avoiding as much as possible pre-selection, so the
2340
# more-core routines such as create_pack_from_packs can filter in
2341
# a just-in-time fashion. (though having a HEADS list on a
2342
# repository might make this a lot easier, because we could
2343
# sensibly detect 'new revisions' without doing a full index scan.
2344
elif _mod_revision.is_null(revision_id):
2349
revision_ids = self.missing_revision_ids(revision_id)
2350
except errors.NoSuchRevision:
2351
raise errors.InstallFailed([revision_id])
2352
packs = self.source._pack_collection.all_packs()
2353
pack = self.target._pack_collection.create_pack_from_packs(
2354
packs, '.fetch', revision_ids,
2356
if pack is not None:
2357
self.target._pack_collection._save_pack_names()
2358
# Trigger an autopack. This may duplicate effort as we've just done
2359
# a pack creation, but for now it is simpler to think about as
2360
# 'upload data, then repack if needed'.
2361
self.target._pack_collection.autopack()
2362
return pack.get_revision_count()
2367
def missing_revision_ids(self, revision_id=None):
2368
"""See InterRepository.missing_revision_ids()."""
2369
if revision_id is not None:
2370
source_ids = self.source.get_ancestry(revision_id)
2371
assert source_ids[0] is None
2374
source_ids = self.source.all_revision_ids()
2375
# source_ids is the worst possible case we may need to pull.
2376
# now we want to filter source_ids against what we actually
2377
# have in target, but don't try to check for existence where we know
2378
# we do not have a revision as that would be pointless.
2379
target_ids = set(self.target.all_revision_ids())
2380
return [r for r in source_ids if (r not in target_ids)]
2383
class InterModel1and2(InterRepository):
2386
def _get_repo_format_to_test(self):
2390
def is_compatible(source, target):
2391
if not source.supports_rich_root() and target.supports_rich_root():
2397
def fetch(self, revision_id=None, pb=None):
2398
"""See InterRepository.fetch()."""
2399
from bzrlib.fetch import Model1toKnit2Fetcher
2400
f = Model1toKnit2Fetcher(to_repository=self.target,
2401
from_repository=self.source,
2402
last_revision=revision_id,
2404
return f.count_copied, f.failed_revisions
2407
def copy_content(self, revision_id=None):
2408
"""Make a complete copy of the content in self into destination.
2410
This is a destructive operation! Do not use it on existing
2413
:param revision_id: Only copy the content needed to construct
2414
revision_id and its parents.
2417
self.target.set_make_working_trees(self.source.make_working_trees())
2418
except NotImplementedError:
2420
# but don't bother fetching if we have the needed data now.
2421
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2422
self.target.has_revision(revision_id)):
2424
self.target.fetch(self.source, revision_id=revision_id)
2427
class InterKnit1and2(InterKnitRepo):
2430
def _get_repo_format_to_test(self):
2434
def is_compatible(source, target):
2435
"""Be compatible with Knit1 source and Knit3 target"""
2436
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2438
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2439
RepositoryFormatKnit3)
2440
from bzrlib.repofmt.pack_repo import (RepositoryFormatKnitPack1,
2441
RepositoryFormatKnitPack3)
2442
return (isinstance(source._format,
2443
(RepositoryFormatKnit1, RepositoryFormatKnitPack1)) and
2444
isinstance(target._format,
2445
(RepositoryFormatKnit3, RepositoryFormatKnitPack3))
2447
except AttributeError:
2451
def fetch(self, revision_id=None, pb=None):
2452
"""See InterRepository.fetch()."""
2453
from bzrlib.fetch import Knit1to2Fetcher
2454
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2455
self.source, self.source._format, self.target,
2456
self.target._format)
2457
f = Knit1to2Fetcher(to_repository=self.target,
2458
from_repository=self.source,
2459
last_revision=revision_id,
2461
return f.count_copied, f.failed_revisions
2464
class InterRemoteToOther(InterRepository):
2466
def __init__(self, source, target):
2467
InterRepository.__init__(self, source, target)
2468
self._real_inter = None
2471
def is_compatible(source, target):
2472
if not isinstance(source, remote.RemoteRepository):
2474
source._ensure_real()
2475
real_source = source._real_repository
2476
# Is source's model compatible with target's model, and are they the
2477
# same format? Currently we can only optimise fetching from an
2478
# identical model & format repo.
2479
assert not isinstance(real_source, remote.RemoteRepository), (
2480
"We don't support remote repos backed by remote repos yet.")
2481
return real_source._format == target._format
2484
def fetch(self, revision_id=None, pb=None):
2485
"""See InterRepository.fetch()."""
2486
from bzrlib.fetch import RemoteToOtherFetcher
2487
mutter("Using fetch logic to copy between %s(remote) and %s(%s)",
2488
self.source, self.target, self.target._format)
2489
# TODO: jam 20070210 This should be an assert, not a translate
2490
revision_id = osutils.safe_revision_id(revision_id)
2491
f = RemoteToOtherFetcher(to_repository=self.target,
2492
from_repository=self.source,
2493
last_revision=revision_id,
2495
return f.count_copied, f.failed_revisions
2498
def _get_repo_format_to_test(self):
2502
class InterOtherToRemote(InterRepository):
2504
def __init__(self, source, target):
2505
InterRepository.__init__(self, source, target)
2506
self._real_inter = None
2509
def is_compatible(source, target):
2510
if isinstance(target, remote.RemoteRepository):
2514
def _ensure_real_inter(self):
2515
if self._real_inter is None:
2516
self.target._ensure_real()
2517
real_target = self.target._real_repository
2518
self._real_inter = InterRepository.get(self.source, real_target)
2520
def copy_content(self, revision_id=None):
2521
self._ensure_real_inter()
2522
self._real_inter.copy_content(revision_id=revision_id)
2524
def fetch(self, revision_id=None, pb=None):
2525
self._ensure_real_inter()
2526
self._real_inter.fetch(revision_id=revision_id, pb=pb)
2529
def _get_repo_format_to_test(self):
2533
InterRepository.register_optimiser(InterSameDataRepository)
2534
InterRepository.register_optimiser(InterWeaveRepo)
2535
InterRepository.register_optimiser(InterKnitRepo)
2536
InterRepository.register_optimiser(InterModel1and2)
2537
InterRepository.register_optimiser(InterKnit1and2)
2538
InterRepository.register_optimiser(InterPackRepo)
2539
InterRepository.register_optimiser(InterRemoteToOther)
2540
InterRepository.register_optimiser(InterOtherToRemote)
2543
class CopyConverter(object):
2544
"""A repository conversion tool which just performs a copy of the content.
2546
This is slow but quite reliable.
2549
def __init__(self, target_format):
2550
"""Create a CopyConverter.
2552
:param target_format: The format the resulting repository should be.
2554
self.target_format = target_format
2556
def convert(self, repo, pb):
2557
"""Perform the conversion of to_convert, giving feedback via pb.
2559
:param to_convert: The disk object to convert.
2560
:param pb: a progress bar to use for progress information.
2565
# this is only useful with metadir layouts - separated repo content.
2566
# trigger an assertion if not such
2567
repo._format.get_format_string()
2568
self.repo_dir = repo.bzrdir
2569
self.step('Moving repository to repository.backup')
2570
self.repo_dir.transport.move('repository', 'repository.backup')
2571
backup_transport = self.repo_dir.transport.clone('repository.backup')
2572
repo._format.check_conversion_target(self.target_format)
2573
self.source_repo = repo._format.open(self.repo_dir,
2575
_override_transport=backup_transport)
2576
self.step('Creating new repository')
2577
converted = self.target_format.initialize(self.repo_dir,
2578
self.source_repo.is_shared())
2579
converted.lock_write()
2581
self.step('Copying content into repository.')
2582
self.source_repo.copy_content_into(converted)
2585
self.step('Deleting old repository content.')
2586
self.repo_dir.transport.delete_tree('repository.backup')
2587
self.pb.note('repository converted')
2589
def step(self, message):
2590
"""Update the pb by a step."""
2592
self.pb.update(message, self.count, self.total)
2604
def _unescaper(match, _map=_unescape_map):
2605
code = match.group(1)
2609
if not code.startswith('#'):
2611
return unichr(int(code[1:])).encode('utf8')
2617
def _unescape_xml(data):
2618
"""Unescape predefined XML entities in a string of data."""
2620
if _unescape_re is None:
2621
_unescape_re = re.compile('\&([^;]*);')
2622
return _unescape_re.sub(_unescaper, data)
2625
class _RevisionTextVersionCache(object):
2626
"""A cache of the versionedfile versions for revision and file-id."""
2628
def __init__(self, repository):
2629
self.repository = repository
2630
self.revision_versions = {}
2631
self.revision_parents = {}
2632
self.repo_graph = self.repository.get_graph()
2633
# XXX: RBC: I haven't tracked down what uses this, but it would be
2634
# better to use the headscache directly I think.
2635
self.heads = graph.HeadsCache(self.repo_graph).heads
2637
def add_revision_text_versions(self, tree):
2638
"""Cache text version data from the supplied revision tree"""
2640
for path, entry in tree.iter_entries_by_dir():
2641
inv_revisions[entry.file_id] = entry.revision
2642
self.revision_versions[tree.get_revision_id()] = inv_revisions
2643
return inv_revisions
2645
def get_text_version(self, file_id, revision_id):
2646
"""Determine the text version for a given file-id and revision-id"""
2648
inv_revisions = self.revision_versions[revision_id]
2651
tree = self.repository.revision_tree(revision_id)
2652
except errors.RevisionNotPresent:
2653
self.revision_versions[revision_id] = inv_revisions = {}
2655
inv_revisions = self.add_revision_text_versions(tree)
2656
return inv_revisions.get(file_id)
2658
def prepopulate_revs(self, revision_ids):
2659
# Filter out versions that we don't have an inventory for, so that the
2660
# revision_trees() call won't fail.
2661
inv_weave = self.repository.get_inventory_weave()
2662
revs = [r for r in revision_ids if inv_weave.has_version(r)]
2663
# XXX: this loop is very similar to
2664
# bzrlib.fetch.Inter1and2Helper.iter_rev_trees.
2666
mutter('%d revisions left to prepopulate', len(revs))
2667
for tree in self.repository.revision_trees(revs[:100]):
2668
if tree.inventory.revision_id is None:
2669
tree.inventory.revision_id = tree.get_revision_id()
2670
self.add_revision_text_versions(tree)
2673
def get_parents(self, revision_id):
2675
return self.revision_parents[revision_id]
2677
parents = self.repository.get_parents([revision_id])[0]
2678
self.revision_parents[revision_id] = parents
2681
def used_file_versions(self):
2682
"""Return a set of (revision_id, file_id) pairs for each file version
2683
referenced by any inventory cached by this _RevisionTextVersionCache.
2685
If the entire repository has been cached, this can be used to find all
2686
file versions that are actually referenced by inventories. Thus any
2687
other file version is completely unused and can be removed safely.
2690
for inventory_summary in self.revision_versions.itervalues():
2691
result.update(inventory_summary.items())
2695
class VersionedFileChecker(object):
2697
def __init__(self, planned_revisions, revision_versions, repository):
2698
self.planned_revisions = planned_revisions
2699
self.revision_versions = revision_versions
2700
self.repository = repository
2702
def calculate_file_version_parents(self, revision_id, file_id):
2703
"""Calculate the correct parents for a file version according to
2706
text_revision = self.revision_versions.get_text_version(
2707
file_id, revision_id)
2708
if text_revision is None:
2710
parents_of_text_revision = self.revision_versions.get_parents(
2712
parents_from_inventories = []
2713
for parent in parents_of_text_revision:
2714
if parent == _mod_revision.NULL_REVISION:
2716
introduced_in = self.revision_versions.get_text_version(file_id,
2718
if introduced_in is not None:
2719
parents_from_inventories.append(introduced_in)
2720
heads = set(self.revision_versions.heads(parents_from_inventories))
2722
for parent in parents_from_inventories:
2723
if parent in heads and parent not in new_parents:
2724
new_parents.append(parent)
2725
return tuple(new_parents)
2727
def check_file_version_parents(self, weave, file_id):
2728
"""Check the parents stored in a versioned file are correct.
2730
It also detects file versions that are not referenced by their
2731
corresponding revision's inventory.
2733
:returns: A tuple of (wrong_parents, dangling_file_versions).
2734
wrong_parents is a dict mapping {revision_id: (stored_parents,
2735
correct_parents)} for each revision_id where the stored parents
2736
are not correct. dangling_file_versions is a set of (file_id,
2737
revision_id) tuples for versions that are present in this versioned
2738
file, but not used by the corresponding inventory.
2741
dangling_file_versions = set()
2742
for num, revision_id in enumerate(self.planned_revisions):
2743
correct_parents = self.calculate_file_version_parents(
2744
revision_id, file_id)
2745
if correct_parents is None:
2747
text_revision = self.revision_versions.get_text_version(
2748
file_id, revision_id)
2750
knit_parents = tuple(weave.get_parents(revision_id))
2751
except errors.RevisionNotPresent:
2753
if text_revision != revision_id:
2754
# This file version is not referenced by its corresponding
2756
dangling_file_versions.add((file_id, revision_id))
2757
if correct_parents != knit_parents:
2758
wrong_parents[revision_id] = (knit_parents, correct_parents)
2759
return wrong_parents, dangling_file_versions