1
# Copyright (C) 2005-2010 Canonical Ltd
1
# Copyright (C) 2005 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
42
revision as _mod_revision,
49
from bzrlib.bundle import serializer
50
from bzrlib.revisiontree import RevisionTree
51
from bzrlib.store.versioned import VersionedFileStore
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from copy import deepcopy
18
from cStringIO import StringIO
19
from unittest import TestSuite
20
import xml.sax.saxutils
23
import bzrlib.bzrdir as bzrdir
24
from bzrlib.decorators import needs_read_lock, needs_write_lock
25
import bzrlib.errors as errors
26
from bzrlib.errors import InvalidRevisionId
27
from bzrlib.lockable_files import LockableFiles
28
from bzrlib.osutils import safe_unicode
29
from bzrlib.revision import NULL_REVISION
30
from bzrlib.store import copy_all
31
from bzrlib.store.weave import WeaveStore
32
from bzrlib.store.text import TextStore
33
from bzrlib.symbol_versioning import *
34
from bzrlib.trace import mutter
35
from bzrlib.tree import RevisionTree
52
36
from bzrlib.testament import Testament
55
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
56
from bzrlib.inter import InterObject
57
from bzrlib.inventory import (
63
from bzrlib.lock import _RelockDebugMixin
64
from bzrlib import registry
65
from bzrlib.trace import (
66
log_exception_quietly, note, mutter, mutter_callsite, warning)
69
# Old formats display a warning, but only once
70
_deprecation_warning_done = False
73
class CommitBuilder(object):
74
"""Provides an interface to build up a commit.
76
This allows describing a tree to be committed without needing to
77
know the internals of the format of the repository.
80
# all clients should supply tree roots.
81
record_root_entry = True
82
# the default CommitBuilder does not manage trees whose root is versioned.
83
_versioned_root = False
85
def __init__(self, repository, parents, config, timestamp=None,
86
timezone=None, committer=None, revprops=None,
88
"""Initiate a CommitBuilder.
90
:param repository: Repository to commit to.
91
:param parents: Revision ids of the parents of the new revision.
92
:param config: Configuration to use.
93
:param timestamp: Optional timestamp recorded for commit.
94
:param timezone: Optional timezone for timestamp.
95
:param committer: Optional committer to set for commit.
96
:param revprops: Optional dictionary of revision properties.
97
:param revision_id: Optional revision id.
101
if committer is None:
102
self._committer = self._config.username()
104
self._committer = committer
106
self.new_inventory = Inventory(None)
107
self._new_revision_id = revision_id
108
self.parents = parents
109
self.repository = repository
112
if revprops is not None:
113
self._validate_revprops(revprops)
114
self._revprops.update(revprops)
116
if timestamp is None:
117
timestamp = time.time()
118
# Restrict resolution to 1ms
119
self._timestamp = round(timestamp, 3)
122
self._timezone = osutils.local_time_offset()
124
self._timezone = int(timezone)
126
self._generate_revision_if_needed()
127
self.__heads = graph.HeadsCache(repository.get_graph()).heads
128
self._basis_delta = []
129
# API compatibility, older code that used CommitBuilder did not call
130
# .record_delete(), which means the delta that is computed would not be
131
# valid. Callers that will call record_delete() should call
132
# .will_record_deletes() to indicate that.
133
self._recording_deletes = False
134
# memo'd check for no-op commits.
135
self._any_changes = False
137
def any_changes(self):
138
"""Return True if any entries were changed.
140
This includes merge-only changes. It is the core for the --unchanged
143
:return: True if any changes have occured.
145
return self._any_changes
147
def _validate_unicode_text(self, text, context):
148
"""Verify things like commit messages don't have bogus characters."""
150
raise ValueError('Invalid value for %s: %r' % (context, text))
152
def _validate_revprops(self, revprops):
153
for key, value in revprops.iteritems():
154
# We know that the XML serializers do not round trip '\r'
155
# correctly, so refuse to accept them
156
if not isinstance(value, basestring):
157
raise ValueError('revision property (%s) is not a valid'
158
' (unicode) string: %r' % (key, value))
159
self._validate_unicode_text(value,
160
'revision property (%s)' % (key,))
162
def commit(self, message):
163
"""Make the actual commit.
165
:return: The revision id of the recorded revision.
167
self._validate_unicode_text(message, 'commit message')
168
rev = _mod_revision.Revision(
169
timestamp=self._timestamp,
170
timezone=self._timezone,
171
committer=self._committer,
173
inventory_sha1=self.inv_sha1,
174
revision_id=self._new_revision_id,
175
properties=self._revprops)
176
rev.parent_ids = self.parents
177
self.repository.add_revision(self._new_revision_id, rev,
178
self.new_inventory, self._config)
179
self.repository.commit_write_group()
180
return self._new_revision_id
183
"""Abort the commit that is being built.
185
self.repository.abort_write_group()
187
def revision_tree(self):
188
"""Return the tree that was just committed.
190
After calling commit() this can be called to get a RevisionTree
191
representing the newly committed tree. This is preferred to
192
calling Repository.revision_tree() because that may require
193
deserializing the inventory, while we already have a copy in
196
if self.new_inventory is None:
197
self.new_inventory = self.repository.get_inventory(
198
self._new_revision_id)
199
return RevisionTree(self.repository, self.new_inventory,
200
self._new_revision_id)
202
def finish_inventory(self):
203
"""Tell the builder that the inventory is finished.
205
:return: The inventory id in the repository, which can be used with
206
repository.get_inventory.
208
if self.new_inventory is None:
209
# an inventory delta was accumulated without creating a new
211
basis_id = self.basis_delta_revision
212
# We ignore the 'inventory' returned by add_inventory_by_delta
213
# because self.new_inventory is used to hint to the rest of the
214
# system what code path was taken
215
self.inv_sha1, _ = self.repository.add_inventory_by_delta(
216
basis_id, self._basis_delta, self._new_revision_id,
219
if self.new_inventory.root is None:
220
raise AssertionError('Root entry should be supplied to'
221
' record_entry_contents, as of bzr 0.10.')
222
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
223
self.new_inventory.revision_id = self._new_revision_id
224
self.inv_sha1 = self.repository.add_inventory(
225
self._new_revision_id,
229
return self._new_revision_id
231
def _gen_revision_id(self):
232
"""Return new revision-id."""
233
return generate_ids.gen_revision_id(self._config.username(),
236
def _generate_revision_if_needed(self):
237
"""Create a revision id if None was supplied.
239
If the repository can not support user-specified revision ids
240
they should override this function and raise CannotSetRevisionId
241
if _new_revision_id is not None.
243
:raises: CannotSetRevisionId
245
if self._new_revision_id is None:
246
self._new_revision_id = self._gen_revision_id()
247
self.random_revid = True
249
self.random_revid = False
251
def _heads(self, file_id, revision_ids):
252
"""Calculate the graph heads for revision_ids in the graph of file_id.
254
This can use either a per-file graph or a global revision graph as we
255
have an identity relationship between the two graphs.
257
return self.__heads(revision_ids)
259
def _check_root(self, ie, parent_invs, tree):
260
"""Helper for record_entry_contents.
262
:param ie: An entry being added.
263
:param parent_invs: The inventories of the parent revisions of the
265
:param tree: The tree that is being committed.
267
# In this revision format, root entries have no knit or weave When
268
# serializing out to disk and back in root.revision is always
270
ie.revision = self._new_revision_id
272
def _require_root_change(self, tree):
273
"""Enforce an appropriate root object change.
275
This is called once when record_iter_changes is called, if and only if
276
the root was not in the delta calculated by record_iter_changes.
278
:param tree: The tree which is being committed.
280
# NB: if there are no parents then this method is not called, so no
281
# need to guard on parents having length.
282
entry = entry_factory['directory'](tree.path2id(''), '',
284
entry.revision = self._new_revision_id
285
self._basis_delta.append(('', '', entry.file_id, entry))
287
def _get_delta(self, ie, basis_inv, path):
288
"""Get a delta against the basis inventory for ie."""
289
if ie.file_id not in basis_inv:
291
result = (None, path, ie.file_id, ie)
292
self._basis_delta.append(result)
294
elif ie != basis_inv[ie.file_id]:
296
# TODO: avoid tis id2path call.
297
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
298
self._basis_delta.append(result)
304
def get_basis_delta(self):
305
"""Return the complete inventory delta versus the basis inventory.
307
This has been built up with the calls to record_delete and
308
record_entry_contents. The client must have already called
309
will_record_deletes() to indicate that they will be generating a
312
:return: An inventory delta, suitable for use with apply_delta, or
313
Repository.add_inventory_by_delta, etc.
315
if not self._recording_deletes:
316
raise AssertionError("recording deletes not activated.")
317
return self._basis_delta
319
def record_delete(self, path, file_id):
320
"""Record that a delete occured against a basis tree.
322
This is an optional API - when used it adds items to the basis_delta
323
being accumulated by the commit builder. It cannot be called unless the
324
method will_record_deletes() has been called to inform the builder that
325
a delta is being supplied.
327
:param path: The path of the thing deleted.
328
:param file_id: The file id that was deleted.
330
if not self._recording_deletes:
331
raise AssertionError("recording deletes not activated.")
332
delta = (path, None, file_id, None)
333
self._basis_delta.append(delta)
334
self._any_changes = True
337
def will_record_deletes(self):
338
"""Tell the commit builder that deletes are being notified.
340
This enables the accumulation of an inventory delta; for the resulting
341
commit to be valid, deletes against the basis MUST be recorded via
342
builder.record_delete().
344
self._recording_deletes = True
346
basis_id = self.parents[0]
348
basis_id = _mod_revision.NULL_REVISION
349
self.basis_delta_revision = basis_id
351
def record_entry_contents(self, ie, parent_invs, path, tree,
353
"""Record the content of ie from tree into the commit if needed.
355
Side effect: sets ie.revision when unchanged
357
:param ie: An inventory entry present in the commit.
358
:param parent_invs: The inventories of the parent revisions of the
360
:param path: The path the entry is at in the tree.
361
:param tree: The tree which contains this entry and should be used to
363
:param content_summary: Summary data from the tree about the paths
364
content - stat, length, exec, sha/link target. This is only
365
accessed when the entry has a revision of None - that is when it is
366
a candidate to commit.
367
:return: A tuple (change_delta, version_recorded, fs_hash).
368
change_delta is an inventory_delta change for this entry against
369
the basis tree of the commit, or None if no change occured against
371
version_recorded is True if a new version of the entry has been
372
recorded. For instance, committing a merge where a file was only
373
changed on the other side will return (delta, False).
374
fs_hash is either None, or the hash details for the path (currently
375
a tuple of the contents sha1 and the statvalue returned by
376
tree.get_file_with_stat()).
378
if self.new_inventory.root is None:
379
if ie.parent_id is not None:
380
raise errors.RootMissing()
381
self._check_root(ie, parent_invs, tree)
382
if ie.revision is None:
383
kind = content_summary[0]
385
# ie is carried over from a prior commit
387
# XXX: repository specific check for nested tree support goes here - if
388
# the repo doesn't want nested trees we skip it ?
389
if (kind == 'tree-reference' and
390
not self.repository._format.supports_tree_reference):
391
# mismatch between commit builder logic and repository:
392
# this needs the entry creation pushed down into the builder.
393
raise NotImplementedError('Missing repository subtree support.')
394
self.new_inventory.add(ie)
396
# TODO: slow, take it out of the inner loop.
398
basis_inv = parent_invs[0]
400
basis_inv = Inventory(root_id=None)
402
# ie.revision is always None if the InventoryEntry is considered
403
# for committing. We may record the previous parents revision if the
404
# content is actually unchanged against a sole head.
405
if ie.revision is not None:
406
if not self._versioned_root and path == '':
407
# repositories that do not version the root set the root's
408
# revision to the new commit even when no change occurs (more
409
# specifically, they do not record a revision on the root; and
410
# the rev id is assigned to the root during deserialisation -
411
# this masks when a change may have occurred against the basis.
412
# To match this we always issue a delta, because the revision
413
# of the root will always be changing.
414
if ie.file_id in basis_inv:
415
delta = (basis_inv.id2path(ie.file_id), path,
419
delta = (None, path, ie.file_id, ie)
420
self._basis_delta.append(delta)
421
return delta, False, None
423
# we don't need to commit this, because the caller already
424
# determined that an existing revision of this file is
425
# appropriate. If its not being considered for committing then
426
# it and all its parents to the root must be unaltered so
427
# no-change against the basis.
428
if ie.revision == self._new_revision_id:
429
raise AssertionError("Impossible situation, a skipped "
430
"inventory entry (%r) claims to be modified in this "
431
"commit (%r).", (ie, self._new_revision_id))
432
return None, False, None
433
# XXX: Friction: parent_candidates should return a list not a dict
434
# so that we don't have to walk the inventories again.
435
parent_candiate_entries = ie.parent_candidates(parent_invs)
436
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
438
for inv in parent_invs:
439
if ie.file_id in inv:
440
old_rev = inv[ie.file_id].revision
441
if old_rev in head_set:
442
heads.append(inv[ie.file_id].revision)
443
head_set.remove(inv[ie.file_id].revision)
446
# now we check to see if we need to write a new record to the
448
# We write a new entry unless there is one head to the ancestors, and
449
# the kind-derived content is unchanged.
451
# Cheapest check first: no ancestors, or more the one head in the
452
# ancestors, we write a new node.
456
# There is a single head, look it up for comparison
457
parent_entry = parent_candiate_entries[heads[0]]
458
# if the non-content specific data has changed, we'll be writing a
460
if (parent_entry.parent_id != ie.parent_id or
461
parent_entry.name != ie.name):
463
# now we need to do content specific checks:
465
# if the kind changed the content obviously has
466
if kind != parent_entry.kind:
468
# Stat cache fingerprint feedback for the caller - None as we usually
469
# don't generate one.
472
if content_summary[2] is None:
473
raise ValueError("Files must not have executable = None")
475
# We can't trust a check of the file length because of content
477
if (# if the exec bit has changed we have to store:
478
parent_entry.executable != content_summary[2]):
480
elif parent_entry.text_sha1 == content_summary[3]:
481
# all meta and content is unchanged (using a hash cache
482
# hit to check the sha)
483
ie.revision = parent_entry.revision
484
ie.text_size = parent_entry.text_size
485
ie.text_sha1 = parent_entry.text_sha1
486
ie.executable = parent_entry.executable
487
return self._get_delta(ie, basis_inv, path), False, None
489
# Either there is only a hash change(no hash cache entry,
490
# or same size content change), or there is no change on
492
# Provide the parent's hash to the store layer, so that the
493
# content is unchanged we will not store a new node.
494
nostore_sha = parent_entry.text_sha1
496
# We want to record a new node regardless of the presence or
497
# absence of a content change in the file.
499
ie.executable = content_summary[2]
500
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
502
text = file_obj.read()
506
ie.text_sha1, ie.text_size = self._add_text_to_weave(
507
ie.file_id, text, heads, nostore_sha)
508
# Let the caller know we generated a stat fingerprint.
509
fingerprint = (ie.text_sha1, stat_value)
510
except errors.ExistingContent:
511
# Turns out that the file content was unchanged, and we were
512
# only going to store a new node if it was changed. Carry over
514
ie.revision = parent_entry.revision
515
ie.text_size = parent_entry.text_size
516
ie.text_sha1 = parent_entry.text_sha1
517
ie.executable = parent_entry.executable
518
return self._get_delta(ie, basis_inv, path), False, None
519
elif kind == 'directory':
521
# all data is meta here, nothing specific to directory, so
523
ie.revision = parent_entry.revision
524
return self._get_delta(ie, basis_inv, path), False, None
525
self._add_text_to_weave(ie.file_id, '', heads, None)
526
elif kind == 'symlink':
527
current_link_target = content_summary[3]
529
# symlink target is not generic metadata, check if it has
531
if current_link_target != parent_entry.symlink_target:
534
# unchanged, carry over.
535
ie.revision = parent_entry.revision
536
ie.symlink_target = parent_entry.symlink_target
537
return self._get_delta(ie, basis_inv, path), False, None
538
ie.symlink_target = current_link_target
539
self._add_text_to_weave(ie.file_id, '', heads, None)
540
elif kind == 'tree-reference':
542
if content_summary[3] != parent_entry.reference_revision:
545
# unchanged, carry over.
546
ie.reference_revision = parent_entry.reference_revision
547
ie.revision = parent_entry.revision
548
return self._get_delta(ie, basis_inv, path), False, None
549
ie.reference_revision = content_summary[3]
550
if ie.reference_revision is None:
551
raise AssertionError("invalid content_summary for nested tree: %r"
552
% (content_summary,))
553
self._add_text_to_weave(ie.file_id, '', heads, None)
555
raise NotImplementedError('unknown kind')
556
ie.revision = self._new_revision_id
557
self._any_changes = True
558
return self._get_delta(ie, basis_inv, path), True, fingerprint
560
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
561
_entry_factory=entry_factory):
562
"""Record a new tree via iter_changes.
564
:param tree: The tree to obtain text contents from for changed objects.
565
:param basis_revision_id: The revision id of the tree the iter_changes
566
has been generated against. Currently assumed to be the same
567
as self.parents[0] - if it is not, errors may occur.
568
:param iter_changes: An iter_changes iterator with the changes to apply
569
to basis_revision_id. The iterator must not include any items with
570
a current kind of None - missing items must be either filtered out
571
or errored-on beefore record_iter_changes sees the item.
572
:param _entry_factory: Private method to bind entry_factory locally for
574
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
577
# Create an inventory delta based on deltas between all the parents and
578
# deltas between all the parent inventories. We use inventory delta's
579
# between the inventory objects because iter_changes masks
580
# last-changed-field only changes.
582
# file_id -> change map, change is fileid, paths, changed, versioneds,
583
# parents, names, kinds, executables
585
# {file_id -> revision_id -> inventory entry, for entries in parent
586
# trees that are not parents[0]
590
revtrees = list(self.repository.revision_trees(self.parents))
591
except errors.NoSuchRevision:
592
# one or more ghosts, slow path.
594
for revision_id in self.parents:
596
revtrees.append(self.repository.revision_tree(revision_id))
597
except errors.NoSuchRevision:
599
basis_revision_id = _mod_revision.NULL_REVISION
601
revtrees.append(self.repository.revision_tree(
602
_mod_revision.NULL_REVISION))
603
# The basis inventory from a repository
605
basis_inv = revtrees[0].inventory
607
basis_inv = self.repository.revision_tree(
608
_mod_revision.NULL_REVISION).inventory
609
if len(self.parents) > 0:
610
if basis_revision_id != self.parents[0] and not ghost_basis:
612
"arbitrary basis parents not yet supported with merges")
613
for revtree in revtrees[1:]:
614
for change in revtree.inventory._make_delta(basis_inv):
615
if change[1] is None:
616
# Not present in this parent.
618
if change[2] not in merged_ids:
619
if change[0] is not None:
620
basis_entry = basis_inv[change[2]]
621
merged_ids[change[2]] = [
623
basis_entry.revision,
626
parent_entries[change[2]] = {
628
basis_entry.revision:basis_entry,
630
change[3].revision:change[3],
633
merged_ids[change[2]] = [change[3].revision]
634
parent_entries[change[2]] = {change[3].revision:change[3]}
636
merged_ids[change[2]].append(change[3].revision)
637
parent_entries[change[2]][change[3].revision] = change[3]
640
# Setup the changes from the tree:
641
# changes maps file_id -> (change, [parent revision_ids])
643
for change in iter_changes:
644
# This probably looks up in basis_inv way to much.
645
if change[1][0] is not None:
646
head_candidate = [basis_inv[change[0]].revision]
649
changes[change[0]] = change, merged_ids.get(change[0],
651
unchanged_merged = set(merged_ids) - set(changes)
652
# Extend the changes dict with synthetic changes to record merges of
654
for file_id in unchanged_merged:
655
# Record a merged version of these items that did not change vs the
656
# basis. This can be either identical parallel changes, or a revert
657
# of a specific file after a merge. The recorded content will be
658
# that of the current tree (which is the same as the basis), but
659
# the per-file graph will reflect a merge.
660
# NB:XXX: We are reconstructing path information we had, this
661
# should be preserved instead.
662
# inv delta change: (file_id, (path_in_source, path_in_target),
663
# changed_content, versioned, parent, name, kind,
666
basis_entry = basis_inv[file_id]
667
except errors.NoSuchId:
668
# a change from basis->some_parents but file_id isn't in basis
669
# so was new in the merge, which means it must have changed
670
# from basis -> current, and as it hasn't the add was reverted
671
# by the user. So we discard this change.
675
(basis_inv.id2path(file_id), tree.id2path(file_id)),
677
(basis_entry.parent_id, basis_entry.parent_id),
678
(basis_entry.name, basis_entry.name),
679
(basis_entry.kind, basis_entry.kind),
680
(basis_entry.executable, basis_entry.executable))
681
changes[file_id] = (change, merged_ids[file_id])
682
# changes contains tuples with the change and a set of inventory
683
# candidates for the file.
685
# old_path, new_path, file_id, new_inventory_entry
686
seen_root = False # Is the root in the basis delta?
687
inv_delta = self._basis_delta
688
modified_rev = self._new_revision_id
689
for change, head_candidates in changes.values():
690
if change[3][1]: # versioned in target.
691
# Several things may be happening here:
692
# We may have a fork in the per-file graph
693
# - record a change with the content from tree
694
# We may have a change against < all trees
695
# - carry over the tree that hasn't changed
696
# We may have a change against all trees
697
# - record the change with the content from tree
700
entry = _entry_factory[kind](file_id, change[5][1],
702
head_set = self._heads(change[0], set(head_candidates))
705
for head_candidate in head_candidates:
706
if head_candidate in head_set:
707
heads.append(head_candidate)
708
head_set.remove(head_candidate)
711
# Could be a carry-over situation:
712
parent_entry_revs = parent_entries.get(file_id, None)
713
if parent_entry_revs:
714
parent_entry = parent_entry_revs.get(heads[0], None)
717
if parent_entry is None:
718
# The parent iter_changes was called against is the one
719
# that is the per-file head, so any change is relevant
720
# iter_changes is valid.
721
carry_over_possible = False
723
# could be a carry over situation
724
# A change against the basis may just indicate a merge,
725
# we need to check the content against the source of the
726
# merge to determine if it was changed after the merge
728
if (parent_entry.kind != entry.kind or
729
parent_entry.parent_id != entry.parent_id or
730
parent_entry.name != entry.name):
731
# Metadata common to all entries has changed
732
# against per-file parent
733
carry_over_possible = False
735
carry_over_possible = True
736
# per-type checks for changes against the parent_entry
739
# Cannot be a carry-over situation
740
carry_over_possible = False
741
# Populate the entry in the delta
743
# XXX: There is still a small race here: If someone reverts the content of a file
744
# after iter_changes examines and decides it has changed,
745
# we will unconditionally record a new version even if some
746
# other process reverts it while commit is running (with
747
# the revert happening after iter_changes did it's
750
entry.executable = True
752
entry.executable = False
753
if (carry_over_possible and
754
parent_entry.executable == entry.executable):
755
# Check the file length, content hash after reading
757
nostore_sha = parent_entry.text_sha1
760
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
762
text = file_obj.read()
766
entry.text_sha1, entry.text_size = self._add_text_to_weave(
767
file_id, text, heads, nostore_sha)
768
yield file_id, change[1][1], (entry.text_sha1, stat_value)
769
except errors.ExistingContent:
770
# No content change against a carry_over parent
771
# Perhaps this should also yield a fs hash update?
773
entry.text_size = parent_entry.text_size
774
entry.text_sha1 = parent_entry.text_sha1
775
elif kind == 'symlink':
777
entry.symlink_target = tree.get_symlink_target(file_id)
778
if (carry_over_possible and
779
parent_entry.symlink_target == entry.symlink_target):
782
self._add_text_to_weave(change[0], '', heads, None)
783
elif kind == 'directory':
784
if carry_over_possible:
787
# Nothing to set on the entry.
788
# XXX: split into the Root and nonRoot versions.
789
if change[1][1] != '' or self.repository.supports_rich_root():
790
self._add_text_to_weave(change[0], '', heads, None)
791
elif kind == 'tree-reference':
792
if not self.repository._format.supports_tree_reference:
793
# This isn't quite sane as an error, but we shouldn't
794
# ever see this code path in practice: tree's don't
795
# permit references when the repo doesn't support tree
797
raise errors.UnsupportedOperation(tree.add_reference,
799
reference_revision = tree.get_reference_revision(change[0])
800
entry.reference_revision = reference_revision
801
if (carry_over_possible and
802
parent_entry.reference_revision == reference_revision):
805
self._add_text_to_weave(change[0], '', heads, None)
807
raise AssertionError('unknown kind %r' % kind)
809
entry.revision = modified_rev
811
entry.revision = parent_entry.revision
814
new_path = change[1][1]
815
inv_delta.append((change[1][0], new_path, change[0], entry))
818
self.new_inventory = None
820
# This should perhaps be guarded by a check that the basis we
821
# commit against is the basis for the commit and if not do a delta
823
self._any_changes = True
825
# housekeeping root entry changes do not affect no-change commits.
826
self._require_root_change(tree)
827
self.basis_delta_revision = basis_revision_id
829
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
830
parent_keys = tuple([(file_id, parent) for parent in parents])
831
return self.repository.texts._add_text(
832
(file_id, self._new_revision_id), parent_keys, new_text,
833
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
836
class RootCommitBuilder(CommitBuilder):
837
"""This commitbuilder actually records the root id"""
839
# the root entry gets versioned properly by this builder.
840
_versioned_root = True
842
def _check_root(self, ie, parent_invs, tree):
843
"""Helper for record_entry_contents.
845
:param ie: An entry being added.
846
:param parent_invs: The inventories of the parent revisions of the
848
:param tree: The tree that is being committed.
851
def _require_root_change(self, tree):
852
"""Enforce an appropriate root object change.
854
This is called once when record_iter_changes is called, if and only if
855
the root was not in the delta calculated by record_iter_changes.
857
:param tree: The tree which is being committed.
859
# versioned roots do not change unless the tree found a change.
862
######################################################################
866
class Repository(_RelockDebugMixin):
37
from bzrlib.tree import EmptyTree
41
class Repository(object):
867
42
"""Repository holding history for one or more branches.
869
44
The repository holds and retrieves historical information including
870
45
revisions and file history. It's normally accessed only by the Branch,
871
46
which views a particular line of development through that history.
873
The Repository builds on top of some byte storage facilies (the revisions,
874
signatures, inventories, texts and chk_bytes attributes) and a Transport,
875
which respectively provide byte storage and a means to access the (possibly
48
The Repository builds on top of Stores and a Transport, which respectively
49
describe the disk data format and the way of accessing the (possibly
878
The byte storage facilities are addressed via tuples, which we refer to
879
as 'keys' throughout the code base. Revision_keys, inventory_keys and
880
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
881
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
882
byte string made up of a hash identifier and a hash value.
883
We use this interface because it allows low friction with the underlying
884
code that implements disk indices, network encoding and other parts of
887
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
888
the serialised revisions for the repository. This can be used to obtain
889
revision graph information or to access raw serialised revisions.
890
The result of trying to insert data into the repository via this store
891
is undefined: it should be considered read-only except for implementors
893
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
894
the serialised signatures for the repository. This can be used to
895
obtain access to raw serialised signatures. The result of trying to
896
insert data into the repository via this store is undefined: it should
897
be considered read-only except for implementors of repositories.
898
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
899
the serialised inventories for the repository. This can be used to
900
obtain unserialised inventories. The result of trying to insert data
901
into the repository via this store is undefined: it should be
902
considered read-only except for implementors of repositories.
903
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
904
texts of files and directories for the repository. This can be used to
905
obtain file texts or file graphs. Note that Repository.iter_file_bytes
906
is usually a better interface for accessing file texts.
907
The result of trying to insert data into the repository via this store
908
is undefined: it should be considered read-only except for implementors
910
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
911
any data the repository chooses to store or have indexed by its hash.
912
The result of trying to insert data into the repository via this store
913
is undefined: it should be considered read-only except for implementors
915
:ivar _transport: Transport for file access to repository, typically
916
pointing to .bzr/repository.
919
# What class to use for a CommitBuilder. Often its simpler to change this
920
# in a Repository class subclass rather than to override
921
# get_commit_builder.
922
_commit_builder_class = CommitBuilder
923
# The search regex used by xml based repositories to determine what things
924
# where changed in a single commit.
925
_file_ids_altered_regex = lazy_regex.lazy_compile(
926
r'file_id="(?P<file_id>[^"]+)"'
927
r'.* revision="(?P<revision_id>[^"]+)"'
930
def abort_write_group(self, suppress_errors=False):
931
"""Commit the contents accrued within the current write group.
933
:param suppress_errors: if true, abort_write_group will catch and log
934
unexpected errors that happen during the abort, rather than
935
allowing them to propagate. Defaults to False.
937
:seealso: start_write_group.
939
if self._write_group is not self.get_transaction():
940
# has an unlock or relock occured ?
943
'(suppressed) mismatched lock context and write group. %r, %r',
944
self._write_group, self.get_transaction())
946
raise errors.BzrError(
947
'mismatched lock context and write group. %r, %r' %
948
(self._write_group, self.get_transaction()))
950
self._abort_write_group()
951
except Exception, exc:
952
self._write_group = None
953
if not suppress_errors:
955
mutter('abort_write_group failed')
956
log_exception_quietly()
957
note('bzr: ERROR (ignored): %s', exc)
958
self._write_group = None
960
def _abort_write_group(self):
961
"""Template method for per-repository write group cleanup.
963
This is called during abort before the write group is considered to be
964
finished and should cleanup any internal state accrued during the write
965
group. There is no requirement that data handed to the repository be
966
*not* made available - this is not a rollback - but neither should any
967
attempt be made to ensure that data added is fully commited. Abort is
968
invoked when an error has occured so futher disk or network operations
969
may not be possible or may error and if possible should not be
973
def add_fallback_repository(self, repository):
974
"""Add a repository to use for looking up data not held locally.
976
:param repository: A repository.
978
if not self._format.supports_external_lookups:
979
raise errors.UnstackableRepositoryFormat(self._format, self.base)
981
# This repository will call fallback.unlock() when we transition to
982
# the unlocked state, so we make sure to increment the lock count
983
repository.lock_read()
984
self._check_fallback_repository(repository)
985
self._fallback_repositories.append(repository)
986
self.texts.add_fallback_versioned_files(repository.texts)
987
self.inventories.add_fallback_versioned_files(repository.inventories)
988
self.revisions.add_fallback_versioned_files(repository.revisions)
989
self.signatures.add_fallback_versioned_files(repository.signatures)
990
if self.chk_bytes is not None:
991
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
993
def _check_fallback_repository(self, repository):
994
"""Check that this repository can fallback to repository safely.
996
Raise an error if not.
998
:param repository: A repository to fallback to.
1000
return InterRepository._assert_same_model(self, repository)
1002
def add_inventory(self, revision_id, inv, parents):
1003
"""Add the inventory inv to the repository as revision_id.
1005
:param parents: The revision ids of the parents that revision_id
1006
is known to have and are in the repository already.
1008
:returns: The validator(which is a sha1 digest, though what is sha'd is
1009
repository format specific) of the serialized inventory.
1011
if not self.is_in_write_group():
1012
raise AssertionError("%r not in write group" % (self,))
1013
_mod_revision.check_not_reserved_id(revision_id)
1014
if not (inv.revision_id is None or inv.revision_id == revision_id):
1015
raise AssertionError(
1016
"Mismatch between inventory revision"
1017
" id and insertion revid (%r, %r)"
1018
% (inv.revision_id, revision_id))
1019
if inv.root is None:
1020
raise AssertionError()
1021
return self._add_inventory_checked(revision_id, inv, parents)
1023
def _add_inventory_checked(self, revision_id, inv, parents):
1024
"""Add inv to the repository after checking the inputs.
1026
This function can be overridden to allow different inventory styles.
1028
:seealso: add_inventory, for the contract.
1030
inv_lines = self._serializer.write_inventory_to_lines(inv)
1031
return self._inventory_add_lines(revision_id, parents,
1032
inv_lines, check_content=False)
1034
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1035
parents, basis_inv=None, propagate_caches=False):
1036
"""Add a new inventory expressed as a delta against another revision.
1038
See the inventory developers documentation for the theory behind
1041
:param basis_revision_id: The inventory id the delta was created
1042
against. (This does not have to be a direct parent.)
1043
:param delta: The inventory delta (see Inventory.apply_delta for
1045
:param new_revision_id: The revision id that the inventory is being
1047
:param parents: The revision ids of the parents that revision_id is
1048
known to have and are in the repository already. These are supplied
1049
for repositories that depend on the inventory graph for revision
1050
graph access, as well as for those that pun ancestry with delta
1052
:param basis_inv: The basis inventory if it is already known,
1054
:param propagate_caches: If True, the caches for this inventory are
1055
copied to and updated for the result if possible.
1057
:returns: (validator, new_inv)
1058
The validator(which is a sha1 digest, though what is sha'd is
1059
repository format specific) of the serialized inventory, and the
1060
resulting inventory.
1062
if not self.is_in_write_group():
1063
raise AssertionError("%r not in write group" % (self,))
1064
_mod_revision.check_not_reserved_id(new_revision_id)
1065
basis_tree = self.revision_tree(basis_revision_id)
1066
basis_tree.lock_read()
1068
# Note that this mutates the inventory of basis_tree, which not all
1069
# inventory implementations may support: A better idiom would be to
1070
# return a new inventory, but as there is no revision tree cache in
1071
# repository this is safe for now - RBC 20081013
1072
if basis_inv is None:
1073
basis_inv = basis_tree.inventory
1074
basis_inv.apply_delta(delta)
1075
basis_inv.revision_id = new_revision_id
1076
return (self.add_inventory(new_revision_id, basis_inv, parents),
1081
def _inventory_add_lines(self, revision_id, parents, lines,
1082
check_content=True):
1083
"""Store lines in inv_vf and return the sha1 of the inventory."""
1084
parents = [(parent,) for parent in parents]
1085
result = self.inventories.add_lines((revision_id,), parents, lines,
1086
check_content=check_content)[0]
1087
self.inventories._access.flush()
1090
def add_revision(self, revision_id, rev, inv=None, config=None):
1091
"""Add rev to the revision store as revision_id.
1093
:param revision_id: the revision id to use.
1094
:param rev: The revision object.
1095
:param inv: The inventory for the revision. if None, it will be looked
1096
up in the inventory storer
1097
:param config: If None no digital signature will be created.
1098
If supplied its signature_needed method will be used
1099
to determine if a signature should be made.
1101
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1103
_mod_revision.check_not_reserved_id(revision_id)
1104
if config is not None and config.signature_needed():
1106
inv = self.get_inventory(revision_id)
1107
plaintext = Testament(rev, inv).as_short_text()
1108
self.store_revision_signature(
1109
gpg.GPGStrategy(config), plaintext, revision_id)
1110
# check inventory present
1111
if not self.inventories.get_parent_map([(revision_id,)]):
1113
raise errors.WeaveRevisionNotPresent(revision_id,
1116
# yes, this is not suitable for adding with ghosts.
1117
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1120
key = (revision_id,)
1121
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1122
self._add_revision(rev)
1124
def _add_revision(self, revision):
1125
text = self._serializer.write_revision_to_string(revision)
1126
key = (revision.revision_id,)
1127
parents = tuple((parent,) for parent in revision.parent_ids)
1128
self.revisions.add_lines(key, parents, osutils.split_lines(text))
54
def _all_possible_ids(self):
55
"""Return all the possible revisions that we could find."""
56
return self.get_inventory_weave().names()
1130
59
def all_revision_ids(self):
1131
"""Returns a list of all the revision ids in the repository.
1133
This is conceptually deprecated because code should generally work on
1134
the graph reachable from a particular revision, and ignore any other
1135
revisions that might be present. There is no direct replacement
1138
if 'evil' in debug.debug_flags:
1139
mutter_callsite(2, "all_revision_ids is linear with history.")
1140
return self._all_revision_ids()
1142
def _all_revision_ids(self):
1143
"""Returns a list of all the revision ids in the repository.
1145
These are in as much topological order as the underlying store can
1148
raise NotImplementedError(self._all_revision_ids)
1150
def break_lock(self):
1151
"""Break a lock if one is present from another instance.
1153
Uses the ui factory to ask for confirmation if the lock may be from
1156
self.control_files.break_lock()
60
"""Returns a list of all the revision ids in the repository.
62
These are in as much topological order as the underlying store can
63
present: for weaves ghosts may lead to a lack of correctness until
64
the reweave updates the parents list.
66
result = self._all_possible_ids()
67
return self._eliminate_revisions_not_present(result)
1159
70
def _eliminate_revisions_not_present(self, revision_ids):
1162
73
Returns a set of the present revisions.
1165
graph = self.get_graph()
1166
parent_map = graph.get_parent_map(revision_ids)
1167
# The old API returned a list, should this actually be a set?
1168
return parent_map.keys()
1170
def _check_inventories(self, checker):
1171
"""Check the inventories found from the revision scan.
1173
This is responsible for verifying the sha1 of inventories and
1174
creating a pending_keys set that covers data referenced by inventories.
1176
bar = ui.ui_factory.nested_progress_bar()
1178
self._do_check_inventories(checker, bar)
1182
def _do_check_inventories(self, checker, bar):
1183
"""Helper for _check_inventories."""
1185
keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1186
kinds = ['chk_bytes', 'texts']
1187
count = len(checker.pending_keys)
1188
bar.update("inventories", 0, 2)
1189
current_keys = checker.pending_keys
1190
checker.pending_keys = {}
1191
# Accumulate current checks.
1192
for key in current_keys:
1193
if key[0] != 'inventories' and key[0] not in kinds:
1194
checker._report_items.append('unknown key type %r' % (key,))
1195
keys[key[0]].add(key[1:])
1196
if keys['inventories']:
1197
# NB: output order *should* be roughly sorted - topo or
1198
# inverse topo depending on repository - either way decent
1199
# to just delta against. However, pre-CHK formats didn't
1200
# try to optimise inventory layout on disk. As such the
1201
# pre-CHK code path does not use inventory deltas.
1203
for record in self.inventories.check(keys=keys['inventories']):
1204
if record.storage_kind == 'absent':
1205
checker._report_items.append(
1206
'Missing inventory {%s}' % (record.key,))
1208
last_object = self._check_record('inventories', record,
1209
checker, last_object,
1210
current_keys[('inventories',) + record.key])
1211
del keys['inventories']
1214
bar.update("texts", 1)
1215
while (checker.pending_keys or keys['chk_bytes']
1217
# Something to check.
1218
current_keys = checker.pending_keys
1219
checker.pending_keys = {}
1220
# Accumulate current checks.
1221
for key in current_keys:
1222
if key[0] not in kinds:
1223
checker._report_items.append('unknown key type %r' % (key,))
1224
keys[key[0]].add(key[1:])
1225
# Check the outermost kind only - inventories || chk_bytes || texts
1229
for record in getattr(self, kind).check(keys=keys[kind]):
1230
if record.storage_kind == 'absent':
1231
checker._report_items.append(
1232
'Missing %s {%s}' % (kind, record.key,))
1234
last_object = self._check_record(kind, record,
1235
checker, last_object, current_keys[(kind,) + record.key])
1239
def _check_record(self, kind, record, checker, last_object, item_data):
1240
"""Check a single text from this repository."""
1241
if kind == 'inventories':
1242
rev_id = record.key[0]
1243
inv = self._deserialise_inventory(rev_id,
1244
record.get_bytes_as('fulltext'))
1245
if last_object is not None:
1246
delta = inv._make_delta(last_object)
1247
for old_path, path, file_id, ie in delta:
1250
ie.check(checker, rev_id, inv)
1252
for path, ie in inv.iter_entries():
1253
ie.check(checker, rev_id, inv)
1254
if self._format.fast_deltas:
1256
elif kind == 'chk_bytes':
1257
# No code written to check chk_bytes for this repo format.
1258
checker._report_items.append(
1259
'unsupported key type chk_bytes for %s' % (record.key,))
1260
elif kind == 'texts':
1261
self._check_text(record, checker, item_data)
1263
checker._report_items.append(
1264
'unknown key type %s for %s' % (kind, record.key))
1266
def _check_text(self, record, checker, item_data):
1267
"""Check a single text."""
1268
# Check it is extractable.
1269
# TODO: check length.
1270
if record.storage_kind == 'chunked':
1271
chunks = record.get_bytes_as(record.storage_kind)
1272
sha1 = osutils.sha_strings(chunks)
1273
length = sum(map(len, chunks))
1275
content = record.get_bytes_as('fulltext')
1276
sha1 = osutils.sha_string(content)
1277
length = len(content)
1278
if item_data and sha1 != item_data[1]:
1279
checker._report_items.append(
1280
'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1281
(record.key, sha1, item_data[1], item_data[2]))
76
for id in revision_ids:
77
if self.has_revision(id):
1284
82
def create(a_bzrdir):
1285
83
"""Construct the current default format repository in a_bzrdir."""
1286
84
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1288
def __init__(self, _format, a_bzrdir, control_files):
1289
"""instantiate a Repository.
1291
:param _format: The format of the repository on disk.
1292
:param a_bzrdir: The BzrDir of the repository.
1294
In the future we will have a single api for all stores for
1295
getting file texts, inventories and revisions, then
1296
this construct will accept instances of those things.
1298
super(Repository, self).__init__()
86
def __init__(self, _format, a_bzrdir):
88
if isinstance(_format, (RepositoryFormat4,
91
# legacy: use a common control files.
92
self.control_files = a_bzrdir._control_files
94
self.control_files = LockableFiles(a_bzrdir.get_repository_transport(None),
97
dir_mode = self.control_files._dir_mode
98
file_mode = self.control_files._file_mode
1299
99
self._format = _format
1300
# the following are part of the public API for Repository:
1301
100
self.bzrdir = a_bzrdir
1302
self.control_files = control_files
1303
self._transport = control_files._transport
1304
self.base = self._transport.base
1306
self._reconcile_does_inventory_gc = True
1307
self._reconcile_fixes_text_parents = False
1308
self._reconcile_backsup_inventory = True
1309
self._write_group = None
1310
# Additional places to query for data.
1311
self._fallback_repositories = []
1312
# An InventoryEntry cache, used during deserialization
1313
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1314
# Is it safe to return inventory entries directly from the entry cache,
1315
# rather copying them?
1316
self._safe_to_return_from_cache = False
1319
if self._fallback_repositories:
1320
return '%s(%r, fallback_repositories=%r)' % (
1321
self.__class__.__name__,
1323
self._fallback_repositories)
1325
return '%s(%r)' % (self.__class__.__name__,
1328
def _has_same_fallbacks(self, other_repo):
1329
"""Returns true if the repositories have the same fallbacks."""
1330
my_fb = self._fallback_repositories
1331
other_fb = other_repo._fallback_repositories
1332
if len(my_fb) != len(other_fb):
1334
for f, g in zip(my_fb, other_fb):
1335
if not f.has_same_location(g):
1339
def has_same_location(self, other):
1340
"""Returns a boolean indicating if this repository is at the same
1341
location as another repository.
1343
This might return False even when two repository objects are accessing
1344
the same physical repository via different URLs.
1346
if self.__class__ is not other.__class__:
1348
return (self._transport.base == other._transport.base)
1350
def is_in_write_group(self):
1351
"""Return True if there is an open write group.
1353
:seealso: start_write_group.
1355
return self._write_group is not None
1357
def is_locked(self):
1358
return self.control_files.is_locked()
1360
def is_write_locked(self):
1361
"""Return True if this object is write locked."""
1362
return self.is_locked() and self.control_files._lock_mode == 'w'
1364
def lock_write(self, token=None):
1365
"""Lock this repository for writing.
1367
This causes caching within the repository obejct to start accumlating
1368
data during reads, and allows a 'write_group' to be obtained. Write
1369
groups must be used for actual data insertion.
1371
:param token: if this is already locked, then lock_write will fail
1372
unless the token matches the existing lock.
1373
:returns: a token if this instance supports tokens, otherwise None.
1374
:raises TokenLockingNotSupported: when a token is given but this
1375
instance doesn't support using token locks.
1376
:raises MismatchedToken: if the specified token doesn't match the token
1377
of the existing lock.
1378
:seealso: start_write_group.
1380
A token should be passed in if you know that you have locked the object
1381
some other way, and need to synchronise this object's state with that
1384
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1386
locked = self.is_locked()
1387
result = self.control_files.lock_write(token=token)
1389
self._warn_if_deprecated()
1390
self._note_lock('w')
1391
for repo in self._fallback_repositories:
1392
# Writes don't affect fallback repos
1394
self._refresh_data()
102
def get_weave(name, prefixed=False):
104
name = safe_unicode(name)
107
relpath = self.control_files._escape(name)
108
weave_transport = self.control_files._transport.clone(relpath)
109
ws = WeaveStore(weave_transport, prefixed=prefixed,
112
if self.control_files._transport.should_cache():
113
ws.enable_cache = True
116
def get_store(name, compressed=True, prefixed=False):
117
# FIXME: This approach of assuming stores are all entirely compressed
118
# or entirely uncompressed is tidy, but breaks upgrade from
119
# some existing branches where there's a mixture; we probably
120
# still want the option to look for both.
122
name = safe_unicode(name)
125
relpath = self.control_files._escape(name)
126
store = TextStore(self.control_files._transport.clone(relpath),
127
prefixed=prefixed, compressed=compressed,
130
#if self._transport.should_cache():
131
# cache_path = os.path.join(self.cache_root, name)
132
# os.mkdir(cache_path)
133
# store = bzrlib.store.CachedStore(store, cache_path)
136
if isinstance(self._format, RepositoryFormat4):
137
self.inventory_store = get_store('inventory-store')
138
self.text_store = get_store('text-store')
139
self.revision_store = get_store('revision-store')
140
elif isinstance(self._format, RepositoryFormat5):
141
self.control_weaves = get_weave('')
142
self.weave_store = get_weave('weaves')
143
self.revision_store = get_store('revision-store', compressed=False)
144
elif isinstance(self._format, RepositoryFormat6):
145
self.control_weaves = get_weave('')
146
self.weave_store = get_weave('weaves', prefixed=True)
147
self.revision_store = get_store('revision-store', compressed=False,
149
elif isinstance(self._format, RepositoryFormat7):
150
self.control_weaves = get_weave('')
151
self.weave_store = get_weave('weaves', prefixed=True)
152
self.revision_store = get_store('revision-store', compressed=False,
154
self.revision_store.register_suffix('sig')
156
def lock_write(self):
157
self.control_files.lock_write()
1397
159
def lock_read(self):
1398
locked = self.is_locked()
1399
160
self.control_files.lock_read()
1401
self._warn_if_deprecated()
1402
self._note_lock('r')
1403
for repo in self._fallback_repositories:
1405
self._refresh_data()
1407
def get_physical_lock_status(self):
1408
return self.control_files.get_physical_lock_status()
1410
def leave_lock_in_place(self):
1411
"""Tell this repository not to release the physical lock when this
1414
If lock_write doesn't return a token, then this method is not supported.
1416
self.control_files.leave_in_place()
1418
def dont_leave_lock_in_place(self):
1419
"""Tell this repository to release the physical lock when this
1420
object is unlocked, even if it didn't originally acquire it.
1422
If lock_write doesn't return a token, then this method is not supported.
1424
self.control_files.dont_leave_in_place()
1427
def gather_stats(self, revid=None, committers=None):
1428
"""Gather statistics from a revision id.
1430
:param revid: The revision id to gather statistics from, if None, then
1431
no revision specific statistics are gathered.
1432
:param committers: Optional parameter controlling whether to grab
1433
a count of committers from the revision specific statistics.
1434
:return: A dictionary of statistics. Currently this contains:
1435
committers: The number of committers if requested.
1436
firstrev: A tuple with timestamp, timezone for the penultimate left
1437
most ancestor of revid, if revid is not the NULL_REVISION.
1438
latestrev: A tuple with timestamp, timezone for revid, if revid is
1439
not the NULL_REVISION.
1440
revisions: The total revision count in the repository.
1441
size: An estimate disk size of the repository in bytes.
1444
if revid and committers:
1445
result['committers'] = 0
1446
if revid and revid != _mod_revision.NULL_REVISION:
1448
all_committers = set()
1449
revisions = self.get_ancestry(revid)
1450
# pop the leading None
1452
first_revision = None
1454
# ignore the revisions in the middle - just grab first and last
1455
revisions = revisions[0], revisions[-1]
1456
for revision in self.get_revisions(revisions):
1457
if not first_revision:
1458
first_revision = revision
1460
all_committers.add(revision.committer)
1461
last_revision = revision
1463
result['committers'] = len(all_committers)
1464
result['firstrev'] = (first_revision.timestamp,
1465
first_revision.timezone)
1466
result['latestrev'] = (last_revision.timestamp,
1467
last_revision.timezone)
1469
# now gather global repository information
1470
# XXX: This is available for many repos regardless of listability.
1471
if self.bzrdir.root_transport.listable():
1472
# XXX: do we want to __define len__() ?
1473
# Maybe the versionedfiles object should provide a different
1474
# method to get the number of keys.
1475
result['revisions'] = len(self.revisions.keys())
1476
# result['size'] = t
1479
def find_branches(self, using=False):
1480
"""Find branches underneath this repository.
1482
This will include branches inside other branches.
1484
:param using: If True, list only branches using this repository.
1486
if using and not self.is_shared():
1487
return self.bzrdir.list_branches()
1488
class Evaluator(object):
1491
self.first_call = True
1493
def __call__(self, bzrdir):
1494
# On the first call, the parameter is always the bzrdir
1495
# containing the current repo.
1496
if not self.first_call:
1498
repository = bzrdir.open_repository()
1499
except errors.NoRepositoryPresent:
1502
return False, ([], repository)
1503
self.first_call = False
1504
value = (bzrdir.list_branches(), None)
1508
for branches, repository in bzrdir.BzrDir.find_bzrdirs(
1509
self.bzrdir.root_transport, evaluate=Evaluator()):
1510
if branches is not None:
1511
ret.extend(branches)
1512
if not using and repository is not None:
1513
ret.extend(repository.find_branches())
1517
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
163
def missing_revision_ids(self, other, revision_id=None):
1518
164
"""Return the revision ids that other has that this does not.
1520
166
These are returned in topological order.
1522
168
revision_id: only return revision ids included by revision_id.
1524
return InterRepository.get(other, self).search_missing_revision_ids(
1525
revision_id, find_ghosts)
170
if self._compatible_formats(other):
171
# fast path for weave-inventory based stores.
172
# we want all revisions to satisft revision_id in other.
173
# but we dont want to stat every file here and there.
174
# we want then, all revisions other needs to satisfy revision_id
175
# checked, but not those that we have locally.
176
# so the first thing is to get a subset of the revisions to
177
# satisfy revision_id in other, and then eliminate those that
178
# we do already have.
179
# this is slow on high latency connection to self, but as as this
180
# disk format scales terribly for push anyway due to rewriting
181
# inventory.weave, this is considered acceptable.
183
if revision_id is not None:
184
other_ids = other.get_ancestry(revision_id)
185
assert other_ids.pop(0) == None
187
other_ids = other._all_possible_ids()
188
other_ids_set = set(other_ids)
189
# other ids is the worst case to pull now.
190
# now we want to filter other_ids against what we actually
191
# have, but dont try to stat what we know we dont.
192
my_ids = set(self._all_possible_ids())
193
possibly_present_revisions = my_ids.intersection(other_ids_set)
194
actually_present_revisions = set(self._eliminate_revisions_not_present(possibly_present_revisions))
195
required_revisions = other_ids_set.difference(actually_present_revisions)
196
required_topo_revisions = [rev_id for rev_id in other_ids if rev_id in required_revisions]
197
if revision_id is not None:
198
# we used get_ancestry to determine other_ids then we are assured all
199
# revisions referenced are present as they are installed in topological order.
200
return required_topo_revisions
202
# we only have an estimate of whats available
203
return other._eliminate_revisions_not_present(required_topo_revisions)
205
my_ids = set(self.all_revision_ids())
206
if revision_id is not None:
207
other_ids = other.get_ancestry(revision_id)
208
assert other_ids.pop(0) == None
210
other_ids = other.all_revision_ids()
211
result_set = set(other_ids).difference(my_ids)
212
return [rev_id for rev_id in other_ids if rev_id in result_set]
1534
221
control = bzrdir.BzrDir.open(base)
1535
222
return control.open_repository()
1537
def copy_content_into(self, destination, revision_id=None):
224
def _compatible_formats(self, other):
225
"""Return True if the stores in self and other are 'compatible'
227
'compatible' means that they are both the same underlying type
228
i.e. both weave stores, or both knits and thus support fast-path
230
return (isinstance(self._format, (RepositoryFormat5,
232
RepositoryFormat7)) and
233
isinstance(other._format, (RepositoryFormat5,
238
def copy_content_into(self, destination, revision_id=None, basis=None):
1538
239
"""Make a complete copy of the content in self into destination.
1540
This is a destructive operation! Do not use it on existing
241
This is a destructive operation! Do not use it on existing
1543
return InterRepository.get(self, destination).copy_content(revision_id)
1545
def commit_write_group(self):
1546
"""Commit the contents accrued within the current write group.
1548
:seealso: start_write_group.
1550
:return: it may return an opaque hint that can be passed to 'pack'.
1552
if self._write_group is not self.get_transaction():
1553
# has an unlock or relock occured ?
1554
raise errors.BzrError('mismatched lock context %r and '
1556
(self.get_transaction(), self._write_group))
1557
result = self._commit_write_group()
1558
self._write_group = None
1561
def _commit_write_group(self):
1562
"""Template method for per-repository write group cleanup.
1564
This is called before the write group is considered to be
1565
finished and should ensure that all data handed to the repository
1566
for writing during the write group is safely committed (to the
1567
extent possible considering file system caching etc).
1570
def suspend_write_group(self):
1571
raise errors.UnsuspendableWriteGroup(self)
1573
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1574
"""Return the keys of missing inventory parents for revisions added in
1577
A revision is not complete if the inventory delta for that revision
1578
cannot be calculated. Therefore if the parent inventories of a
1579
revision are not present, the revision is incomplete, and e.g. cannot
1580
be streamed by a smart server. This method finds missing inventory
1581
parents for revisions added in this write group.
1583
if not self._format.supports_external_lookups:
1584
# This is only an issue for stacked repositories
1586
if not self.is_in_write_group():
1587
raise AssertionError('not in a write group')
1589
# XXX: We assume that every added revision already has its
1590
# corresponding inventory, so we only check for parent inventories that
1591
# might be missing, rather than all inventories.
1592
parents = set(self.revisions._index.get_missing_parents())
1593
parents.discard(_mod_revision.NULL_REVISION)
1594
unstacked_inventories = self.inventories._index
1595
present_inventories = unstacked_inventories.get_parent_map(
1596
key[-1:] for key in parents)
1597
parents.difference_update(present_inventories)
1598
if len(parents) == 0:
1599
# No missing parent inventories.
1601
if not check_for_missing_texts:
1602
return set(('inventories', rev_id) for (rev_id,) in parents)
1603
# Ok, now we have a list of missing inventories. But these only matter
1604
# if the inventories that reference them are missing some texts they
1605
# appear to introduce.
1606
# XXX: Texts referenced by all added inventories need to be present,
1607
# but at the moment we're only checking for texts referenced by
1608
# inventories at the graph's edge.
1609
key_deps = self.revisions._index._key_dependencies
1610
key_deps.satisfy_refs_for_keys(present_inventories)
1611
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1612
file_ids = self.fileids_altered_by_revision_ids(referrers)
1613
missing_texts = set()
1614
for file_id, version_ids in file_ids.iteritems():
1615
missing_texts.update(
1616
(file_id, version_id) for version_id in version_ids)
1617
present_texts = self.texts.get_parent_map(missing_texts)
1618
missing_texts.difference_update(present_texts)
1619
if not missing_texts:
1620
# No texts are missing, so all revisions and their deltas are
1623
# Alternatively the text versions could be returned as the missing
1624
# keys, but this is likely to be less data.
1625
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1628
def refresh_data(self):
1629
"""Re-read any data needed to to synchronise with disk.
1631
This method is intended to be called after another repository instance
1632
(such as one used by a smart server) has inserted data into the
1633
repository. It may not be called during a write group, but may be
1634
called at any other time.
1636
if self.is_in_write_group():
1637
raise errors.InternalBzrError(
1638
"May not refresh_data while in a write group.")
1639
self._refresh_data()
1641
def resume_write_group(self, tokens):
1642
if not self.is_write_locked():
1643
raise errors.NotWriteLocked(self)
1644
if self._write_group:
1645
raise errors.BzrError('already in a write group')
1646
self._resume_write_group(tokens)
1647
# so we can detect unlock/relock - the write group is now entered.
1648
self._write_group = self.get_transaction()
1650
def _resume_write_group(self, tokens):
1651
raise errors.UnsuspendableWriteGroup(self)
1653
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
244
destination.lock_write()
247
destination.set_make_working_trees(self.make_working_trees())
248
except NotImplementedError:
252
if self._compatible_formats(destination):
253
if basis is not None:
254
# copy the basis in, then fetch remaining data.
255
basis.copy_content_into(destination, revision_id)
256
destination.fetch(self, revision_id=revision_id)
259
if self.control_files._transport.listable():
260
destination.control_weaves.copy_multi(self.control_weaves,
262
copy_all(self.weave_store, destination.weave_store)
263
copy_all(self.revision_store, destination.revision_store)
265
destination.fetch(self, revision_id=revision_id)
266
# compatible v4 stores
267
elif isinstance(self._format, RepositoryFormat4):
268
if not isinstance(destination._format, RepositoryFormat4):
269
raise BzrError('cannot copy v4 branches to anything other than v4 branches.')
270
store_pairs = ((self.text_store, destination.text_store),
271
(self.inventory_store, destination.inventory_store),
272
(self.revision_store, destination.revision_store))
274
for from_store, to_store in store_pairs:
275
copy_all(from_store, to_store)
276
except UnlistableStore:
277
raise UnlistableBranch(from_store)
280
destination.fetch(self, revision_id=revision_id)
285
def fetch(self, source, revision_id=None):
1655
286
"""Fetch the content required to construct revision_id from source.
1657
If revision_id is None and fetch_spec is None, then all content is
1660
fetch() may not be used when the repository is in a write group -
1661
either finish the current write group before using fetch, or use
1662
fetch before starting the write group.
1664
:param find_ghosts: Find and copy revisions in the source that are
1665
ghosts in the target (and not reachable directly by walking out to
1666
the first-present revision in target from revision_id).
1667
:param revision_id: If specified, all the content needed for this
1668
revision ID will be copied to the target. Fetch will determine for
1669
itself which content needs to be copied.
1670
:param fetch_spec: If specified, a SearchResult or
1671
PendingAncestryResult that describes which revisions to copy. This
1672
allows copying multiple heads at once. Mutually exclusive with
1675
if fetch_spec is not None and revision_id is not None:
1676
raise AssertionError(
1677
"fetch_spec and revision_id are mutually exclusive.")
1678
if self.is_in_write_group():
1679
raise errors.InternalBzrError(
1680
"May not fetch while in a write group.")
1681
# fast path same-url fetch operations
1682
# TODO: lift out to somewhere common with RemoteRepository
1683
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1684
if (self.has_same_location(source)
1685
and fetch_spec is None
1686
and self._has_same_fallbacks(source)):
1687
# check that last_revision is in 'from' and then return a
1689
if (revision_id is not None and
1690
not _mod_revision.is_null(revision_id)):
1691
self.get_revision(revision_id)
1693
# if there is no specific appropriate InterRepository, this will get
1694
# the InterRepository base class, which raises an
1695
# IncompatibleRepositories when asked to fetch.
1696
inter = InterRepository.get(source, self)
1697
return inter.fetch(revision_id=revision_id, pb=pb,
1698
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1700
def create_bundle(self, target, base, fileobj, format=None):
1701
return serializer.write_bundle(self, target, base, fileobj, format)
1703
def get_commit_builder(self, branch, parents, config, timestamp=None,
1704
timezone=None, committer=None, revprops=None,
1706
"""Obtain a CommitBuilder for this repository.
1708
:param branch: Branch to commit to.
1709
:param parents: Revision ids of the parents of the new revision.
1710
:param config: Configuration to use.
1711
:param timestamp: Optional timestamp recorded for commit.
1712
:param timezone: Optional timezone for timestamp.
1713
:param committer: Optional committer to set for commit.
1714
:param revprops: Optional dictionary of revision properties.
1715
:param revision_id: Optional revision id.
1717
if self._fallback_repositories:
1718
raise errors.BzrError("Cannot commit from a lightweight checkout "
1719
"to a stacked branch. See "
1720
"https://bugs.launchpad.net/bzr/+bug/375013 for details.")
1721
result = self._commit_builder_class(self, parents, config,
1722
timestamp, timezone, committer, revprops, revision_id)
1723
self.start_write_group()
1726
@only_raises(errors.LockNotHeld, errors.LockBroken)
288
If revision_id is None all content is copied.
290
from bzrlib.fetch import RepoFetcher
291
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
292
source, source._format, self, self._format)
293
RepoFetcher(to_repository=self, from_repository=source, last_revision=revision_id)
1727
295
def unlock(self):
1728
if (self.control_files._lock_count == 1 and
1729
self.control_files._lock_mode == 'w'):
1730
if self._write_group is not None:
1731
self.abort_write_group()
1732
self.control_files.unlock()
1733
raise errors.BzrError(
1734
'Must end write groups before releasing write locks.')
1735
296
self.control_files.unlock()
1736
if self.control_files._lock_count == 0:
1737
self._inventory_entry_cache.clear()
1738
for repo in self._fallback_repositories:
1741
298
@needs_read_lock
1742
def clone(self, a_bzrdir, revision_id=None):
299
def clone(self, a_bzrdir, revision_id=None, basis=None):
1743
300
"""Clone this repository into a_bzrdir using the current format.
1745
302
Currently no check is made that the format of this repository and
1746
303
the bzrdir format are compatible. FIXME RBC 20060201.
1748
:return: The newly created destination repository.
1750
# TODO: deprecate after 0.16; cloning this with all its settings is
1751
# probably not very useful -- mbp 20070423
1752
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1753
self.copy_content_into(dest_repo, revision_id)
1756
def start_write_group(self):
1757
"""Start a write group in the repository.
1759
Write groups are used by repositories which do not have a 1:1 mapping
1760
between file ids and backend store to manage the insertion of data from
1761
both fetch and commit operations.
1763
A write lock is required around the start_write_group/commit_write_group
1764
for the support of lock-requiring repository formats.
1766
One can only insert data into a repository inside a write group.
1770
if not self.is_write_locked():
1771
raise errors.NotWriteLocked(self)
1772
if self._write_group:
1773
raise errors.BzrError('already in a write group')
1774
self._start_write_group()
1775
# so we can detect unlock/relock - the write group is now entered.
1776
self._write_group = self.get_transaction()
1778
def _start_write_group(self):
1779
"""Template method for per-repository write group startup.
1781
This is called before the write group is considered to be
1786
def sprout(self, to_bzrdir, revision_id=None):
1787
"""Create a descendent repository for new development.
1789
Unlike clone, this does not copy the settings of the repository.
1791
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1792
dest_repo.fetch(self, revision_id=revision_id)
1795
def _create_sprouting_repo(self, a_bzrdir, shared):
1796
305
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1797
306
# use target default format.
1798
dest_repo = a_bzrdir.create_repository()
307
result = a_bzrdir.create_repository()
308
# FIXME RBC 20060209 split out the repository type to avoid this check ?
309
elif isinstance(a_bzrdir._format,
310
(bzrdir.BzrDirFormat4,
311
bzrdir.BzrDirFormat5,
312
bzrdir.BzrDirFormat6)):
313
result = a_bzrdir.open_repository()
1800
# Most control formats need the repository to be specifically
1801
# created, but on some old all-in-one formats it's not needed
1803
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1804
except errors.UninitializableFormat:
1805
dest_repo = a_bzrdir.open_repository()
1808
def _get_sink(self):
1809
"""Return a sink for streaming into this repository."""
1810
return StreamSink(self)
1812
def _get_source(self, to_format):
1813
"""Return a source for streaming from this repository."""
1814
return StreamSource(self, to_format)
315
result = self._format.initialize(a_bzrdir, shared=self.is_shared())
316
self.copy_content_into(result, revision_id, basis)
1817
319
def has_revision(self, revision_id):
1818
"""True if this repository has a copy of the revision."""
1819
return revision_id in self.has_revisions((revision_id,))
1822
def has_revisions(self, revision_ids):
1823
"""Probe to find out the presence of multiple revisions.
1825
:param revision_ids: An iterable of revision_ids.
1826
:return: A set of the revision_ids that were present.
1828
parent_map = self.revisions.get_parent_map(
1829
[(rev_id,) for rev_id in revision_ids])
1831
if _mod_revision.NULL_REVISION in revision_ids:
1832
result.add(_mod_revision.NULL_REVISION)
1833
result.update([key[0] for key in parent_map])
320
"""True if this branch has a copy of the revision.
322
This does not necessarily imply the revision is merge
323
or on the mainline."""
324
return (revision_id is None
325
or self.revision_store.has_id(revision_id))
328
def get_revision_xml_file(self, revision_id):
329
"""Return XML file object for revision object."""
330
if not revision_id or not isinstance(revision_id, basestring):
331
raise InvalidRevisionId(revision_id=revision_id, branch=self)
333
return self.revision_store.get(revision_id)
334
except (IndexError, KeyError):
335
raise bzrlib.errors.NoSuchRevision(self, revision_id)
338
def get_revision_xml(self, revision_id):
339
return self.get_revision_xml_file(revision_id).read()
1836
341
@needs_read_lock
1837
342
def get_revision(self, revision_id):
1838
"""Return the Revision object for a named revision."""
1839
return self.get_revisions([revision_id])[0]
1842
def get_revision_reconcile(self, revision_id):
1843
"""'reconcile' helper routine that allows access to a revision always.
1845
This variant of get_revision does not cross check the weave graph
1846
against the revision one as get_revision does: but it should only
1847
be used by reconcile, or reconcile-alike commands that are correcting
1848
or testing the revision graph.
1850
return self._get_revisions([revision_id])[0]
1853
def get_revisions(self, revision_ids):
1854
"""Get many revisions at once.
1856
Repositories that need to check data on every revision read should
1857
subclass this method.
1859
return self._get_revisions(revision_ids)
1862
def _get_revisions(self, revision_ids):
1863
"""Core work logic to get many revisions without sanity checks."""
1865
for revid, rev in self._iter_revisions(revision_ids):
1867
raise errors.NoSuchRevision(self, revid)
1869
return [revs[revid] for revid in revision_ids]
1871
def _iter_revisions(self, revision_ids):
1872
"""Iterate over revision objects.
1874
:param revision_ids: An iterable of revisions to examine. None may be
1875
passed to request all revisions known to the repository. Note that
1876
not all repositories can find unreferenced revisions; for those
1877
repositories only referenced ones will be returned.
1878
:return: An iterator of (revid, revision) tuples. Absent revisions (
1879
those asked for but not available) are returned as (revid, None).
1881
if revision_ids is None:
1882
revision_ids = self.all_revision_ids()
1884
for rev_id in revision_ids:
1885
if not rev_id or not isinstance(rev_id, basestring):
1886
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1887
keys = [(key,) for key in revision_ids]
1888
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1889
for record in stream:
1890
revid = record.key[0]
1891
if record.storage_kind == 'absent':
1894
text = record.get_bytes_as('fulltext')
1895
rev = self._serializer.read_revision_from_string(text)
1898
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1899
"""Produce a generator of revision deltas.
1901
Note that the input is a sequence of REVISIONS, not revision_ids.
1902
Trees will be held in memory until the generator exits.
1903
Each delta is relative to the revision's lefthand predecessor.
1905
:param specific_fileids: if not None, the result is filtered
1906
so that only those file-ids, their parents and their
1907
children are included.
1909
# Get the revision-ids of interest
1910
required_trees = set()
1911
for revision in revisions:
1912
required_trees.add(revision.revision_id)
1913
required_trees.update(revision.parent_ids[:1])
1915
# Get the matching filtered trees. Note that it's more
1916
# efficient to pass filtered trees to changes_from() rather
1917
# than doing the filtering afterwards. changes_from() could
1918
# arguably do the filtering itself but it's path-based, not
1919
# file-id based, so filtering before or afterwards is
1921
if specific_fileids is None:
1922
trees = dict((t.get_revision_id(), t) for
1923
t in self.revision_trees(required_trees))
1925
trees = dict((t.get_revision_id(), t) for
1926
t in self._filtered_revision_trees(required_trees,
1929
# Calculate the deltas
1930
for revision in revisions:
1931
if not revision.parent_ids:
1932
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1934
old_tree = trees[revision.parent_ids[0]]
1935
yield trees[revision.revision_id].changes_from(old_tree)
1938
def get_revision_delta(self, revision_id, specific_fileids=None):
1939
"""Return the delta for one revision.
1941
The delta is relative to the left-hand predecessor of the
1944
:param specific_fileids: if not None, the result is filtered
1945
so that only those file-ids, their parents and their
1946
children are included.
1948
r = self.get_revision(revision_id)
1949
return list(self.get_deltas_for_revisions([r],
1950
specific_fileids=specific_fileids))[0]
343
"""Return the Revision object for a named revision"""
344
xml_file = self.get_revision_xml_file(revision_id)
347
r = bzrlib.xml5.serializer_v5.read_revision(xml_file)
348
except SyntaxError, e:
349
raise bzrlib.errors.BzrError('failed to unpack revision_xml',
353
assert r.revision_id == revision_id
357
def get_revision_sha1(self, revision_id):
358
"""Hash the stored value of a revision, and return it."""
359
# In the future, revision entries will be signed. At that
360
# point, it is probably best *not* to include the signature
361
# in the revision hash. Because that lets you re-sign
362
# the revision, (add signatures/remove signatures) and still
363
# have all hash pointers stay consistent.
364
# But for now, just hash the contents.
365
return bzrlib.osutils.sha_file(self.get_revision_xml_file(revision_id))
1952
367
@needs_write_lock
1953
368
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1954
signature = gpg_strategy.sign(plaintext)
1955
self.add_signature_text(revision_id, signature)
1958
def add_signature_text(self, revision_id, signature):
1959
self.signatures.add_lines((revision_id,), (),
1960
osutils.split_lines(signature))
1962
def find_text_key_references(self):
1963
"""Find the text key references within the repository.
1965
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1966
to whether they were referred to by the inventory of the
1967
revision_id that they contain. The inventory texts from all present
1968
revision ids are assessed to generate this report.
1970
revision_keys = self.revisions.keys()
1971
w = self.inventories
1972
pb = ui.ui_factory.nested_progress_bar()
1974
return self._find_text_key_references_from_xml_inventory_lines(
1975
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1979
def _find_text_key_references_from_xml_inventory_lines(self,
1981
"""Core routine for extracting references to texts from inventories.
1983
This performs the translation of xml lines to revision ids.
1985
:param line_iterator: An iterator of lines, origin_version_id
1986
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1987
to whether they were referred to by the inventory of the
1988
revision_id that they contain. Note that if that revision_id was
1989
not part of the line_iterator's output then False will be given -
1990
even though it may actually refer to that key.
1992
if not self._serializer.support_altered_by_hack:
1993
raise AssertionError(
1994
"_find_text_key_references_from_xml_inventory_lines only "
1995
"supported for branches which store inventory as unnested xml"
1996
", not on %r" % self)
1999
# this code needs to read every new line in every inventory for the
2000
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
2001
# not present in one of those inventories is unnecessary but not
2002
# harmful because we are filtering by the revision id marker in the
2003
# inventory lines : we only select file ids altered in one of those
2004
# revisions. We don't need to see all lines in the inventory because
2005
# only those added in an inventory in rev X can contain a revision=X
2007
unescape_revid_cache = {}
2008
unescape_fileid_cache = {}
2010
# jam 20061218 In a big fetch, this handles hundreds of thousands
2011
# of lines, so it has had a lot of inlining and optimizing done.
2012
# Sorry that it is a little bit messy.
2013
# Move several functions to be local variables, since this is a long
2015
search = self._file_ids_altered_regex.search
2016
unescape = _unescape_xml
2017
setdefault = result.setdefault
2018
for line, line_key in line_iterator:
2019
match = search(line)
2022
# One call to match.group() returning multiple items is quite a
2023
# bit faster than 2 calls to match.group() each returning 1
2024
file_id, revision_id = match.group('file_id', 'revision_id')
2026
# Inlining the cache lookups helps a lot when you make 170,000
2027
# lines and 350k ids, versus 8.4 unique ids.
2028
# Using a cache helps in 2 ways:
2029
# 1) Avoids unnecessary decoding calls
2030
# 2) Re-uses cached strings, which helps in future set and
2032
# (2) is enough that removing encoding entirely along with
2033
# the cache (so we are using plain strings) results in no
2034
# performance improvement.
2036
revision_id = unescape_revid_cache[revision_id]
2038
unescaped = unescape(revision_id)
2039
unescape_revid_cache[revision_id] = unescaped
2040
revision_id = unescaped
2042
# Note that unconditionally unescaping means that we deserialise
2043
# every fileid, which for general 'pull' is not great, but we don't
2044
# really want to have some many fulltexts that this matters anyway.
2047
file_id = unescape_fileid_cache[file_id]
2049
unescaped = unescape(file_id)
2050
unescape_fileid_cache[file_id] = unescaped
2053
key = (file_id, revision_id)
2054
setdefault(key, False)
2055
if revision_id == line_key[-1]:
2059
def _inventory_xml_lines_for_keys(self, keys):
2060
"""Get a line iterator of the sort needed for findind references.
2062
Not relevant for non-xml inventory repositories.
2064
Ghosts in revision_keys are ignored.
2066
:param revision_keys: The revision keys for the inventories to inspect.
2067
:return: An iterator over (inventory line, revid) for the fulltexts of
2068
all of the xml inventories specified by revision_keys.
2070
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2071
for record in stream:
2072
if record.storage_kind != 'absent':
2073
chunks = record.get_bytes_as('chunked')
2074
revid = record.key[-1]
2075
lines = osutils.chunks_to_lines(chunks)
2079
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
2081
"""Helper routine for fileids_altered_by_revision_ids.
2083
This performs the translation of xml lines to revision ids.
2085
:param line_iterator: An iterator of lines, origin_version_id
2086
:param revision_keys: The revision ids to filter for. This should be a
2087
set or other type which supports efficient __contains__ lookups, as
2088
the revision key from each parsed line will be looked up in the
2089
revision_keys filter.
2090
:return: a dictionary mapping altered file-ids to an iterable of
2091
revision_ids. Each altered file-ids has the exact revision_ids that
2092
altered it listed explicitly.
2094
seen = set(self._find_text_key_references_from_xml_inventory_lines(
2095
line_iterator).iterkeys())
2096
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
2097
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
2098
self._inventory_xml_lines_for_keys(parent_keys)))
2099
new_keys = seen - parent_seen
2101
setdefault = result.setdefault
2102
for key in new_keys:
2103
setdefault(key[0], set()).add(key[-1])
2106
def _find_parent_ids_of_revisions(self, revision_ids):
2107
"""Find all parent ids that are mentioned in the revision graph.
2109
:return: set of revisions that are parents of revision_ids which are
2110
not part of revision_ids themselves
2112
parent_map = self.get_parent_map(revision_ids)
2114
map(parent_ids.update, parent_map.itervalues())
2115
parent_ids.difference_update(revision_ids)
2116
parent_ids.discard(_mod_revision.NULL_REVISION)
2119
def _find_parent_keys_of_revisions(self, revision_keys):
2120
"""Similar to _find_parent_ids_of_revisions, but used with keys.
2122
:param revision_keys: An iterable of revision_keys.
2123
:return: The parents of all revision_keys that are not already in
2126
parent_map = self.revisions.get_parent_map(revision_keys)
2128
map(parent_keys.update, parent_map.itervalues())
2129
parent_keys.difference_update(revision_keys)
2130
parent_keys.discard(_mod_revision.NULL_REVISION)
2133
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
2134
"""Find the file ids and versions affected by revisions.
2136
:param revisions: an iterable containing revision ids.
2137
:param _inv_weave: The inventory weave from this repository or None.
2138
If None, the inventory weave will be opened automatically.
2139
:return: a dictionary mapping altered file-ids to an iterable of
2140
revision_ids. Each altered file-ids has the exact revision_ids that
2141
altered it listed explicitly.
2143
selected_keys = set((revid,) for revid in revision_ids)
2144
w = _inv_weave or self.inventories
2145
return self._find_file_ids_from_xml_inventory_lines(
2146
w.iter_lines_added_or_present_in_keys(
2147
selected_keys, pb=None),
2150
def iter_files_bytes(self, desired_files):
2151
"""Iterate through file versions.
2153
Files will not necessarily be returned in the order they occur in
2154
desired_files. No specific order is guaranteed.
2156
Yields pairs of identifier, bytes_iterator. identifier is an opaque
2157
value supplied by the caller as part of desired_files. It should
2158
uniquely identify the file version in the caller's context. (Examples:
2159
an index number or a TreeTransform trans_id.)
2161
bytes_iterator is an iterable of bytestrings for the file. The
2162
kind of iterable and length of the bytestrings are unspecified, but for
2163
this implementation, it is a list of bytes produced by
2164
VersionedFile.get_record_stream().
2166
:param desired_files: a list of (file_id, revision_id, identifier)
2170
for file_id, revision_id, callable_data in desired_files:
2171
text_keys[(file_id, revision_id)] = callable_data
2172
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2173
if record.storage_kind == 'absent':
2174
raise errors.RevisionNotPresent(record.key, self)
2175
yield text_keys[record.key], record.get_bytes_as('chunked')
2177
def _generate_text_key_index(self, text_key_references=None,
2179
"""Generate a new text key index for the repository.
2181
This is an expensive function that will take considerable time to run.
2183
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2184
list of parents, also text keys. When a given key has no parents,
2185
the parents list will be [NULL_REVISION].
2187
# All revisions, to find inventory parents.
2188
if ancestors is None:
2189
graph = self.get_graph()
2190
ancestors = graph.get_parent_map(self.all_revision_ids())
2191
if text_key_references is None:
2192
text_key_references = self.find_text_key_references()
2193
pb = ui.ui_factory.nested_progress_bar()
2195
return self._do_generate_text_key_index(ancestors,
2196
text_key_references, pb)
2200
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2201
"""Helper for _generate_text_key_index to avoid deep nesting."""
2202
revision_order = tsort.topo_sort(ancestors)
2203
invalid_keys = set()
2205
for revision_id in revision_order:
2206
revision_keys[revision_id] = set()
2207
text_count = len(text_key_references)
2208
# a cache of the text keys to allow reuse; costs a dict of all the
2209
# keys, but saves a 2-tuple for every child of a given key.
2211
for text_key, valid in text_key_references.iteritems():
2213
invalid_keys.add(text_key)
2215
revision_keys[text_key[1]].add(text_key)
2216
text_key_cache[text_key] = text_key
2217
del text_key_references
2219
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2220
NULL_REVISION = _mod_revision.NULL_REVISION
2221
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2222
# too small for large or very branchy trees. However, for 55K path
2223
# trees, it would be easy to use too much memory trivially. Ideally we
2224
# could gauge this by looking at available real memory etc, but this is
2225
# always a tricky proposition.
2226
inventory_cache = lru_cache.LRUCache(10)
2227
batch_size = 10 # should be ~150MB on a 55K path tree
2228
batch_count = len(revision_order) / batch_size + 1
2230
pb.update("Calculating text parents", processed_texts, text_count)
2231
for offset in xrange(batch_count):
2232
to_query = revision_order[offset * batch_size:(offset + 1) *
2236
for revision_id in to_query:
2237
parent_ids = ancestors[revision_id]
2238
for text_key in revision_keys[revision_id]:
2239
pb.update("Calculating text parents", processed_texts)
2240
processed_texts += 1
2241
candidate_parents = []
2242
for parent_id in parent_ids:
2243
parent_text_key = (text_key[0], parent_id)
2245
check_parent = parent_text_key not in \
2246
revision_keys[parent_id]
2248
# the parent parent_id is a ghost:
2249
check_parent = False
2250
# truncate the derived graph against this ghost.
2251
parent_text_key = None
2253
# look at the parent commit details inventories to
2254
# determine possible candidates in the per file graph.
2257
inv = inventory_cache[parent_id]
2259
inv = self.revision_tree(parent_id).inventory
2260
inventory_cache[parent_id] = inv
2262
parent_entry = inv[text_key[0]]
2263
except (KeyError, errors.NoSuchId):
2265
if parent_entry is not None:
2267
text_key[0], parent_entry.revision)
2269
parent_text_key = None
2270
if parent_text_key is not None:
2271
candidate_parents.append(
2272
text_key_cache[parent_text_key])
2273
parent_heads = text_graph.heads(candidate_parents)
2274
new_parents = list(parent_heads)
2275
new_parents.sort(key=lambda x:candidate_parents.index(x))
2276
if new_parents == []:
2277
new_parents = [NULL_REVISION]
2278
text_index[text_key] = new_parents
2280
for text_key in invalid_keys:
2281
text_index[text_key] = [NULL_REVISION]
2284
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2285
"""Get an iterable listing the keys of all the data introduced by a set
2288
The keys will be ordered so that the corresponding items can be safely
2289
fetched and inserted in that order.
2291
:returns: An iterable producing tuples of (knit-kind, file-id,
2292
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2293
'revisions'. file-id is None unless knit-kind is 'file'.
2295
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2298
for result in self._find_non_file_keys_to_fetch(revision_ids):
2301
def _find_file_keys_to_fetch(self, revision_ids, pb):
2302
# XXX: it's a bit weird to control the inventory weave caching in this
2303
# generator. Ideally the caching would be done in fetch.py I think. Or
2304
# maybe this generator should explicitly have the contract that it
2305
# should not be iterated until the previously yielded item has been
2307
inv_w = self.inventories
2309
# file ids that changed
2310
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2312
num_file_ids = len(file_ids)
2313
for file_id, altered_versions in file_ids.iteritems():
2315
pb.update("Fetch texts", count, num_file_ids)
2317
yield ("file", file_id, altered_versions)
2319
def _find_non_file_keys_to_fetch(self, revision_ids):
2321
yield ("inventory", None, revision_ids)
2324
# XXX: Note ATM no callers actually pay attention to this return
2325
# instead they just use the list of revision ids and ignore
2326
# missing sigs. Consider removing this work entirely
2327
revisions_with_signatures = set(self.signatures.get_parent_map(
2328
[(r,) for r in revision_ids]))
2329
revisions_with_signatures = set(
2330
[r for (r,) in revisions_with_signatures])
2331
revisions_with_signatures.intersection_update(revision_ids)
2332
yield ("signatures", None, revisions_with_signatures)
2335
yield ("revisions", None, revision_ids)
369
self.revision_store.add(StringIO(gpg_strategy.sign(plaintext)),
372
def fileid_involved_between_revs(self, from_revid, to_revid):
373
"""Find file_id(s) which are involved in the changes between revisions.
375
This determines the set of revisions which are involved, and then
376
finds all file ids affected by those revisions.
378
# TODO: jam 20060119 This code assumes that w.inclusions will
379
# always be correct. But because of the presence of ghosts
380
# it is possible to be wrong.
381
# One specific example from Robert Collins:
382
# Two branches, with revisions ABC, and AD
383
# C is a ghost merge of D.
384
# Inclusions doesn't recognize D as an ancestor.
385
# If D is ever merged in the future, the weave
386
# won't be fixed, because AD never saw revision C
387
# to cause a conflict which would force a reweave.
388
w = self.get_inventory_weave()
389
from_set = set(w.inclusions([w.lookup(from_revid)]))
390
to_set = set(w.inclusions([w.lookup(to_revid)]))
391
included = to_set.difference(from_set)
392
changed = map(w.idx_to_name, included)
393
return self._fileid_involved_by_set(changed)
395
def fileid_involved(self, last_revid=None):
396
"""Find all file_ids modified in the ancestry of last_revid.
398
:param last_revid: If None, last_revision() will be used.
400
w = self.get_inventory_weave()
402
changed = set(w._names)
404
included = w.inclusions([w.lookup(last_revid)])
405
changed = map(w.idx_to_name, included)
406
return self._fileid_involved_by_set(changed)
408
def fileid_involved_by_set(self, changes):
409
"""Find all file_ids modified by the set of revisions passed in.
411
:param changes: A set() of revision ids
413
# TODO: jam 20060119 This line does *nothing*, remove it.
414
# or better yet, change _fileid_involved_by_set so
415
# that it takes the inventory weave, rather than
416
# pulling it out by itself.
417
return self._fileid_involved_by_set(changes)
419
def _fileid_involved_by_set(self, changes):
420
"""Find the set of file-ids affected by the set of revisions.
422
:param changes: A set() of revision ids.
423
:return: A set() of file ids.
425
This peaks at the Weave, interpreting each line, looking to
426
see if it mentions one of the revisions. And if so, includes
427
the file id mentioned.
428
This expects both the Weave format, and the serialization
429
to have a single line per file/directory, and to have
430
fileid="" and revision="" on that line.
432
assert isinstance(self._format, (RepositoryFormat5,
434
RepositoryFormat7)), \
435
"fileid_involved only supported for branches which store inventory as unnested xml"
437
w = self.get_inventory_weave()
439
for line in w._weave:
441
# it is ugly, but it is due to the weave structure
442
if not isinstance(line, basestring): continue
444
start = line.find('file_id="')+9
445
if start < 9: continue
446
end = line.find('"', start)
448
file_id = xml.sax.saxutils.unescape(line[start:end])
450
# check if file_id is already present
451
if file_id in file_ids: continue
453
start = line.find('revision="')+10
454
if start < 10: continue
455
end = line.find('"', start)
457
revision_id = xml.sax.saxutils.unescape(line[start:end])
459
if revision_id in changes:
460
file_ids.add(file_id)
464
def get_inventory_weave(self):
465
return self.control_weaves.get_weave('inventory',
466
self.get_transaction())
2337
468
@needs_read_lock
2338
469
def get_inventory(self, revision_id):
2339
"""Get Inventory object by revision id."""
2340
return self.iter_inventories([revision_id]).next()
2342
def iter_inventories(self, revision_ids, ordering=None):
2343
"""Get many inventories by revision_ids.
2345
This will buffer some or all of the texts used in constructing the
2346
inventories in memory, but will only parse a single inventory at a
2349
:param revision_ids: The expected revision ids of the inventories.
2350
:param ordering: optional ordering, e.g. 'topological'. If not
2351
specified, the order of revision_ids will be preserved (by
2352
buffering if necessary).
2353
:return: An iterator of inventories.
470
"""Get Inventory object by hash."""
471
xml = self.get_inventory_xml(revision_id)
472
return bzrlib.xml5.serializer_v5.read_inventory_from_string(xml)
475
def get_inventory_xml(self, revision_id):
476
"""Get inventory XML as a file object."""
478
assert isinstance(revision_id, basestring), type(revision_id)
479
iw = self.get_inventory_weave()
480
return iw.get_text(iw.lookup(revision_id))
482
raise bzrlib.errors.HistoryMissing(self, 'inventory', revision_id)
485
def get_inventory_sha1(self, revision_id):
486
"""Return the sha1 hash of the inventory entry
2355
if ((None in revision_ids)
2356
or (_mod_revision.NULL_REVISION in revision_ids)):
2357
raise ValueError('cannot get null revision inventory')
2358
return self._iter_inventories(revision_ids, ordering)
2360
def _iter_inventories(self, revision_ids, ordering):
2361
"""single-document based inventory iteration."""
2362
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2363
for text, revision_id in inv_xmls:
2364
yield self._deserialise_inventory(revision_id, text)
2366
def _iter_inventory_xmls(self, revision_ids, ordering):
2367
if ordering is None:
2368
order_as_requested = True
2369
ordering = 'unordered'
488
return self.get_revision(revision_id).inventory_sha1
491
def get_revision_inventory(self, revision_id):
492
"""Return inventory of a past revision."""
493
# TODO: Unify this with get_inventory()
494
# bzr 0.0.6 and later imposes the constraint that the inventory_id
495
# must be the same as its revision, so this is trivial.
496
if revision_id is None:
497
# This does not make sense: if there is no revision,
498
# then it is the current tree inventory surely ?!
499
# and thus get_root_id() is something that looks at the last
500
# commit on the branch, and the get_root_id is an inventory check.
501
raise NotImplementedError
502
# return Inventory(self.get_root_id())
2371
order_as_requested = False
2372
keys = [(revision_id,) for revision_id in revision_ids]
2375
if order_as_requested:
2376
key_iter = iter(keys)
2377
next_key = key_iter.next()
2378
stream = self.inventories.get_record_stream(keys, ordering, True)
2380
for record in stream:
2381
if record.storage_kind != 'absent':
2382
chunks = record.get_bytes_as('chunked')
2383
if order_as_requested:
2384
text_chunks[record.key] = chunks
2386
yield ''.join(chunks), record.key[-1]
2388
raise errors.NoSuchRevision(self, record.key)
2389
if order_as_requested:
2390
# Yield as many results as we can while preserving order.
2391
while next_key in text_chunks:
2392
chunks = text_chunks.pop(next_key)
2393
yield ''.join(chunks), next_key[-1]
2395
next_key = key_iter.next()
2396
except StopIteration:
2397
# We still want to fully consume the get_record_stream,
2398
# just in case it is not actually finished at this point
2402
def _deserialise_inventory(self, revision_id, xml):
2403
"""Transform the xml into an inventory object.
2405
:param revision_id: The expected revision id of the inventory.
2406
:param xml: A serialised inventory.
2408
result = self._serializer.read_inventory_from_string(xml, revision_id,
2409
entry_cache=self._inventory_entry_cache,
2410
return_from_cache=self._safe_to_return_from_cache)
2411
if result.revision_id != revision_id:
2412
raise AssertionError('revision id mismatch %s != %s' % (
2413
result.revision_id, revision_id))
2416
def get_serializer_format(self):
2417
return self._serializer.format_num
504
return self.get_inventory(revision_id)
2419
506
@needs_read_lock
2420
def _get_inventory_xml(self, revision_id):
2421
"""Get serialized inventory as a string."""
2422
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2424
text, revision_id = texts.next()
2425
except StopIteration:
2426
raise errors.HistoryMissing(self, 'inventory', revision_id)
2429
def get_rev_id_for_revno(self, revno, known_pair):
2430
"""Return the revision id of a revno, given a later (revno, revid)
2431
pair in the same history.
2433
:return: if found (True, revid). If the available history ran out
2434
before reaching the revno, then this returns
2435
(False, (closest_revno, closest_revid)).
2437
known_revno, known_revid = known_pair
2438
partial_history = [known_revid]
2439
distance_from_known = known_revno - revno
2440
if distance_from_known < 0:
2442
'requested revno (%d) is later than given known revno (%d)'
2443
% (revno, known_revno))
2446
self, partial_history, stop_index=distance_from_known)
2447
except errors.RevisionNotPresent, err:
2448
if err.revision_id == known_revid:
2449
# The start revision (known_revid) wasn't found.
2451
# This is a stacked repository with no fallbacks, or a there's a
2452
# left-hand ghost. Either way, even though the revision named in
2453
# the error isn't in this repo, we know it's the next step in this
2454
# left-hand history.
2455
partial_history.append(err.revision_id)
2456
if len(partial_history) <= distance_from_known:
2457
# Didn't find enough history to get a revid for the revno.
2458
earliest_revno = known_revno - len(partial_history) + 1
2459
return (False, (earliest_revno, partial_history[-1]))
2460
if len(partial_history) - 1 > distance_from_known:
2461
raise AssertionError('_iter_for_revno returned too much history')
2462
return (True, partial_history[-1])
2464
def iter_reverse_revision_history(self, revision_id):
2465
"""Iterate backwards through revision ids in the lefthand history
2467
:param revision_id: The revision id to start with. All its lefthand
2468
ancestors will be traversed.
2470
graph = self.get_graph()
2471
next_id = revision_id
2473
if next_id in (None, _mod_revision.NULL_REVISION):
2476
parents = graph.get_parent_map([next_id])[next_id]
2478
raise errors.RevisionNotPresent(next_id, self)
2480
if len(parents) == 0:
2483
next_id = parents[0]
2485
507
def is_shared(self):
2486
508
"""Return True if this repository is flagged as a shared repository."""
2487
raise NotImplementedError(self.is_shared)
2490
def reconcile(self, other=None, thorough=False):
2491
"""Reconcile this repository."""
2492
from bzrlib.reconcile import RepoReconciler
2493
reconciler = RepoReconciler(self, thorough=thorough)
2494
reconciler.reconcile()
2497
def _refresh_data(self):
2498
"""Helper called from lock_* to ensure coherency with disk.
2500
The default implementation does nothing; it is however possible
2501
for repositories to maintain loaded indices across multiple locks
2502
by checking inside their implementation of this method to see
2503
whether their indices are still valid. This depends of course on
2504
the disk format being validatable in this manner. This method is
2505
also called by the refresh_data() public interface to cause a refresh
2506
to occur while in a write lock so that data inserted by a smart server
2507
push operation is visible on the client's instance of the physical
509
# FIXME format 4-6 cannot be shared, this is technically faulty.
510
return self.control_files._transport.has('shared-storage')
2511
512
@needs_read_lock
2512
513
def revision_tree(self, revision_id):
2513
514
"""Return Tree for a revision on this branch.
2515
`revision_id` may be NULL_REVISION for the empty tree revision.
2517
revision_id = _mod_revision.ensure_null(revision_id)
516
`revision_id` may be None for the null revision, in which case
517
an `EmptyTree` is returned."""
2518
518
# TODO: refactor this to use an existing revision object
2519
519
# so we don't need to read it in twice.
2520
if revision_id == _mod_revision.NULL_REVISION:
2521
return RevisionTree(self, Inventory(root_id=None),
2522
_mod_revision.NULL_REVISION)
520
if revision_id is None or revision_id == NULL_REVISION:
2524
inv = self.get_inventory(revision_id)
523
inv = self.get_revision_inventory(revision_id)
2525
524
return RevisionTree(self, inv, revision_id)
2527
def revision_trees(self, revision_ids):
2528
"""Return Trees for revisions in this repository.
2530
:param revision_ids: a sequence of revision-ids;
2531
a revision-id may not be None or 'null:'
2533
inventories = self.iter_inventories(revision_ids)
2534
for inv in inventories:
2535
yield RevisionTree(self, inv, inv.revision_id)
2537
def _filtered_revision_trees(self, revision_ids, file_ids):
2538
"""Return Tree for a revision on this branch with only some files.
2540
:param revision_ids: a sequence of revision-ids;
2541
a revision-id may not be None or 'null:'
2542
:param file_ids: if not None, the result is filtered
2543
so that only those file-ids, their parents and their
2544
children are included.
2546
inventories = self.iter_inventories(revision_ids)
2547
for inv in inventories:
2548
# Should we introduce a FilteredRevisionTree class rather
2549
# than pre-filter the inventory here?
2550
filtered_inv = inv.filter(file_ids)
2551
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
2553
526
@needs_read_lock
2554
def get_ancestry(self, revision_id, topo_sorted=True):
527
def get_ancestry(self, revision_id):
2555
528
"""Return a list of revision-ids integrated by a revision.
2557
The first element of the list is always None, indicating the origin
2558
revision. This might change when we have history horizons, or
2559
perhaps we should have a new API.
2561
530
This is topologically sorted.
2563
if _mod_revision.is_null(revision_id):
532
if revision_id is None:
2565
534
if not self.has_revision(revision_id):
2566
535
raise errors.NoSuchRevision(self, revision_id)
2567
graph = self.get_graph()
2569
search = graph._make_breadth_first_searcher([revision_id])
536
w = self.get_inventory_weave()
537
return [None] + map(w.idx_to_name,
538
w.inclusions([w.lookup(revision_id)]))
541
def print_file(self, file, revision_id):
542
"""Print `file` to stdout.
544
FIXME RBC 20060125 as John Meinel points out this is a bad api
545
- it writes to stdout, it assumes that that is valid etc. Fix
546
by creating a new more flexible convenience function.
548
tree = self.revision_tree(revision_id)
549
# use inventory as it was in that revision
550
file_id = tree.inventory.path2id(file)
552
raise BzrError("%r is not present in revision %s" % (file, revno))
2572
found, ghosts = search.next_with_ghosts()
2573
except StopIteration:
2576
if _mod_revision.NULL_REVISION in keys:
2577
keys.remove(_mod_revision.NULL_REVISION)
2579
parent_map = graph.get_parent_map(keys)
2580
keys = tsort.topo_sort(parent_map)
2581
return [None] + list(keys)
2583
def pack(self, hint=None):
2584
"""Compress the data within the repository.
2586
This operation only makes sense for some repository types. For other
2587
types it should be a no-op that just returns.
2589
This stub method does not require a lock, but subclasses should use
2590
@needs_write_lock as this is a long running call its reasonable to
2591
implicitly lock for the user.
2593
:param hint: If not supplied, the whole repository is packed.
2594
If supplied, the repository may use the hint parameter as a
2595
hint for the parts of the repository to pack. A hint can be
2596
obtained from the result of commit_write_group(). Out of
2597
date hints are simply ignored, because concurrent operations
2598
can obsolete them rapidly.
554
revno = self.revision_id_to_revno(revision_id)
555
except errors.NoSuchRevision:
556
# TODO: This should not be BzrError,
557
# but NoSuchFile doesn't fit either
558
raise BzrError('%r is not present in revision %s'
559
% (file, revision_id))
561
raise BzrError('%r is not present in revision %s'
563
tree.print_file(file_id)
2601
565
def get_transaction(self):
2602
566
return self.control_files.get_transaction()
2604
def get_parent_map(self, revision_ids):
2605
"""See graph.StackedParentsProvider.get_parent_map"""
2606
# revisions index works in keys; this just works in revisions
2607
# therefore wrap and unwrap
2610
for revision_id in revision_ids:
2611
if revision_id == _mod_revision.NULL_REVISION:
2612
result[revision_id] = ()
2613
elif revision_id is None:
2614
raise ValueError('get_parent_map(None) is not valid')
2616
query_keys.append((revision_id ,))
2617
for ((revision_id,), parent_keys) in \
2618
self.revisions.get_parent_map(query_keys).iteritems():
2620
result[revision_id] = tuple([parent_revid
2621
for (parent_revid,) in parent_keys])
2623
result[revision_id] = (_mod_revision.NULL_REVISION,)
2626
def _make_parents_provider(self):
2629
def get_graph(self, other_repository=None):
2630
"""Return the graph walker for this repository format"""
2631
parents_provider = self._make_parents_provider()
2632
if (other_repository is not None and
2633
not self.has_same_location(other_repository)):
2634
parents_provider = graph.StackedParentsProvider(
2635
[parents_provider, other_repository._make_parents_provider()])
2636
return graph.Graph(parents_provider)
2638
def _get_versioned_file_checker(self, text_key_references=None,
2640
"""Return an object suitable for checking versioned files.
2642
:param text_key_references: if non-None, an already built
2643
dictionary mapping text keys ((fileid, revision_id) tuples)
2644
to whether they were referred to by the inventory of the
2645
revision_id that they contain. If None, this will be
2647
:param ancestors: Optional result from
2648
self.get_graph().get_parent_map(self.all_revision_ids()) if already
2651
return _VersionedFileChecker(self,
2652
text_key_references=text_key_references, ancestors=ancestors)
2654
def revision_ids_to_search_result(self, result_set):
2655
"""Convert a set of revision ids to a graph SearchResult."""
2656
result_parents = set()
2657
for parents in self.get_graph().get_parent_map(
2658
result_set).itervalues():
2659
result_parents.update(parents)
2660
included_keys = result_set.intersection(result_parents)
2661
start_keys = result_set.difference(included_keys)
2662
exclude_keys = result_parents.difference(result_set)
2663
result = graph.SearchResult(start_keys, exclude_keys,
2664
len(result_set), result_set)
2667
568
@needs_write_lock
2668
569
def set_make_working_trees(self, new_value):
2669
570
"""Set the policy flag for making working trees when creating branches.
2674
575
:param new_value: True to restore the default, False to disable making
2677
raise NotImplementedError(self.set_make_working_trees)
578
# FIXME: split out into a new class/strategy ?
579
if isinstance(self._format, (RepositoryFormat4,
582
raise NotImplementedError(self.set_make_working_trees)
585
self.control_files._transport.delete('no-working-trees')
586
except errors.NoSuchFile:
589
self.control_files.put_utf8('no-working-trees', '')
2679
591
def make_working_trees(self):
2680
592
"""Returns the policy for making working trees on new branches."""
2681
raise NotImplementedError(self.make_working_trees)
593
# FIXME: split out into a new class/strategy ?
594
if isinstance(self._format, (RepositoryFormat4,
598
return not self.control_files._transport.has('no-working-trees')
2683
600
@needs_write_lock
2684
601
def sign_revision(self, revision_id, gpg_strategy):
2685
602
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2686
603
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2689
def has_signature_for_revision_id(self, revision_id):
2690
"""Query for a revision signature for revision_id in the repository."""
2691
if not self.has_revision(revision_id):
2692
raise errors.NoSuchRevision(self, revision_id)
2693
sig_present = (1 == len(
2694
self.signatures.get_parent_map([(revision_id,)])))
2698
def get_signature_text(self, revision_id):
2699
"""Return the text for a signature."""
2700
stream = self.signatures.get_record_stream([(revision_id,)],
2702
record = stream.next()
2703
if record.storage_kind == 'absent':
2704
raise errors.NoSuchRevision(self, revision_id)
2705
return record.get_bytes_as('fulltext')
2708
def check(self, revision_ids=None, callback_refs=None, check_repo=True):
2709
"""Check consistency of all history of given revision_ids.
2711
Different repository implementations should override _check().
2713
:param revision_ids: A non-empty list of revision_ids whose ancestry
2714
will be checked. Typically the last revision_id of a branch.
2715
:param callback_refs: A dict of check-refs to resolve and callback
2716
the check/_check method on the items listed as wanting the ref.
2718
:param check_repo: If False do not check the repository contents, just
2719
calculate the data callback_refs requires and call them back.
2721
return self._check(revision_ids, callback_refs=callback_refs,
2722
check_repo=check_repo)
2724
def _check(self, revision_ids, callback_refs, check_repo):
2725
result = check.Check(self, check_repo=check_repo)
2726
result.check(callback_refs)
2729
def _warn_if_deprecated(self, branch=None):
2730
global _deprecation_warning_done
2731
if _deprecation_warning_done:
2735
conf = config.GlobalConfig()
2737
conf = branch.get_config()
2738
if conf.suppress_warning('format_deprecation'):
2740
warning("Format %s for %s is deprecated -"
2741
" please use 'bzr upgrade' to get better performance"
2742
% (self._format, self.bzrdir.transport.base))
2744
_deprecation_warning_done = True
2746
def supports_rich_root(self):
2747
return self._format.rich_root_data
2749
def _check_ascii_revisionid(self, revision_id, method):
2750
"""Private helper for ascii-only repositories."""
2751
# weave repositories refuse to store revisionids that are non-ascii.
2752
if revision_id is not None:
2753
# weaves require ascii revision ids.
2754
if isinstance(revision_id, unicode):
2756
revision_id.encode('ascii')
2757
except UnicodeEncodeError:
2758
raise errors.NonAsciiRevisionId(method, self)
2761
revision_id.decode('ascii')
2762
except UnicodeDecodeError:
2763
raise errors.NonAsciiRevisionId(method, self)
2765
def revision_graph_can_have_wrong_parents(self):
2766
"""Is it possible for this repository to have a revision graph with
2769
If True, then this repository must also implement
2770
_find_inconsistent_revision_parents so that check and reconcile can
2771
check for inconsistencies before proceeding with other checks that may
2772
depend on the revision index being consistent.
2774
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2777
# remove these delegates a while after bzr 0.15
2778
def __make_delegated(name, from_module):
2779
def _deprecated_repository_forwarder():
2780
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2781
% (name, from_module),
2784
m = __import__(from_module, globals(), locals(), [name])
2786
return getattr(m, name)
2787
except AttributeError:
2788
raise AttributeError('module %s has no name %s'
2790
globals()[name] = _deprecated_repository_forwarder
2793
'AllInOneRepository',
2794
'WeaveMetaDirRepository',
2795
'PreSplitOutRepositoryFormat',
2796
'RepositoryFormat4',
2797
'RepositoryFormat5',
2798
'RepositoryFormat6',
2799
'RepositoryFormat7',
2801
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2805
'RepositoryFormatKnit',
2806
'RepositoryFormatKnit1',
2808
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2811
def install_revision(repository, rev, revision_tree):
2812
"""Install all revision data into a repository."""
2813
install_revisions(repository, [(rev, revision_tree, None)])
2816
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2817
"""Install all revision data into a repository.
2819
Accepts an iterable of revision, tree, signature tuples. The signature
2822
repository.start_write_group()
2824
inventory_cache = lru_cache.LRUCache(10)
2825
for n, (revision, revision_tree, signature) in enumerate(iterable):
2826
_install_revision(repository, revision, revision_tree, signature,
2829
pb.update('Transferring revisions', n + 1, num_revisions)
2831
repository.abort_write_group()
2834
repository.commit_write_group()
2837
def _install_revision(repository, rev, revision_tree, signature,
2839
"""Install all revision data into a repository."""
2840
present_parents = []
2842
for p_id in rev.parent_ids:
2843
if repository.has_revision(p_id):
2844
present_parents.append(p_id)
2845
parent_trees[p_id] = repository.revision_tree(p_id)
2847
parent_trees[p_id] = repository.revision_tree(
2848
_mod_revision.NULL_REVISION)
2850
inv = revision_tree.inventory
2851
entries = inv.iter_entries()
2852
# backwards compatibility hack: skip the root id.
2853
if not repository.supports_rich_root():
2854
path, root = entries.next()
2855
if root.revision != rev.revision_id:
2856
raise errors.IncompatibleRevision(repr(repository))
2858
for path, ie in entries:
2859
text_keys[(ie.file_id, ie.revision)] = ie
2860
text_parent_map = repository.texts.get_parent_map(text_keys)
2861
missing_texts = set(text_keys) - set(text_parent_map)
2862
# Add the texts that are not already present
2863
for text_key in missing_texts:
2864
ie = text_keys[text_key]
2866
# FIXME: TODO: The following loop overlaps/duplicates that done by
2867
# commit to determine parents. There is a latent/real bug here where
2868
# the parents inserted are not those commit would do - in particular
2869
# they are not filtered by heads(). RBC, AB
2870
for revision, tree in parent_trees.iteritems():
2871
if ie.file_id not in tree:
2873
parent_id = tree.inventory[ie.file_id].revision
2874
if parent_id in text_parents:
2876
text_parents.append((ie.file_id, parent_id))
2877
lines = revision_tree.get_file(ie.file_id).readlines()
2878
repository.texts.add_lines(text_key, text_parents, lines)
2880
# install the inventory
2881
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2882
# Cache this inventory
2883
inventory_cache[rev.revision_id] = inv
2885
basis_inv = inventory_cache[rev.parent_ids[0]]
2887
repository.add_inventory(rev.revision_id, inv, present_parents)
2889
delta = inv._make_delta(basis_inv)
2890
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2891
rev.revision_id, present_parents)
2893
repository.add_inventory(rev.revision_id, inv, present_parents)
2894
except errors.RevisionAlreadyPresent:
2896
if signature is not None:
2897
repository.add_signature_text(rev.revision_id, signature)
2898
repository.add_revision(rev.revision_id, rev, inv)
2901
class MetaDirRepository(Repository):
2902
"""Repositories in the new meta-dir layout.
2904
:ivar _transport: Transport for access to repository control files,
2905
typically pointing to .bzr/repository.
2908
def __init__(self, _format, a_bzrdir, control_files):
2909
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2910
self._transport = control_files._transport
2912
def is_shared(self):
2913
"""Return True if this repository is flagged as a shared repository."""
2914
return self._transport.has('shared-storage')
2917
def set_make_working_trees(self, new_value):
2918
"""Set the policy flag for making working trees when creating branches.
2920
This only applies to branches that use this repository.
2922
The default is 'True'.
2923
:param new_value: True to restore the default, False to disable making
2928
self._transport.delete('no-working-trees')
2929
except errors.NoSuchFile:
2932
self._transport.put_bytes('no-working-trees', '',
2933
mode=self.bzrdir._get_file_mode())
2935
def make_working_trees(self):
2936
"""Returns the policy for making working trees on new branches."""
2937
return not self._transport.has('no-working-trees')
2940
class MetaDirVersionedFileRepository(MetaDirRepository):
2941
"""Repositories in a meta-dir, that work via versioned file objects."""
2943
def __init__(self, _format, a_bzrdir, control_files):
2944
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2948
network_format_registry = registry.FormatRegistry()
2949
"""Registry of formats indexed by their network name.
2951
The network name for a repository format is an identifier that can be used when
2952
referring to formats with smart server operations. See
2953
RepositoryFormat.network_name() for more detail.
2957
format_registry = registry.FormatRegistry(network_format_registry)
2958
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
2960
This can contain either format instances themselves, or classes/factories that
2961
can be called to obtain one.
2965
#####################################################################
2966
# Repository Formats
2968
606
class RepositoryFormat(object):
2969
607
"""A repository format.
2971
Formats provide four things:
609
Formats provide three things:
2972
610
* An initialization routine to construct repository data on disk.
2973
* a optional format string which is used when the BzrDir supports
611
* a format string which is used when the BzrDir supports versioned
2975
613
* an open routine which returns a Repository instance.
2976
* A network name for referring to the format in smart server RPC
2979
There is one and only one Format subclass for each on-disk format. But
2980
there can be one Repository subclass that is used for several different
2981
formats. The _format attribute on a Repository instance can be used to
2982
determine the disk format.
2984
Formats are placed in a registry by their format string for reference
2985
during opening. These should be subclasses of RepositoryFormat for
615
Formats are placed in an dict by their format string for reference
616
during opening. These should be subclasses of RepositoryFormat
2988
619
Once a format is deprecated, just deprecate the initialize and open
2989
methods on the format class. Do not deprecate the object, as the
2990
object may be created even when a repository instance hasn't been
620
methods on the format class. Do not deprecate the object, as the
621
object will be created every system load.
2993
623
Common instance attributes:
2994
624
_matchingbzrdir - the bzrdir format that the repository format was
2995
625
originally written to work with. This can be used if manually
2996
626
constructing a bzrdir and repository, or more commonly for test suite
3000
# Set to True or False in derived classes. True indicates that the format
3001
# supports ghosts gracefully.
3002
supports_ghosts = None
3003
# Can this repository be given external locations to lookup additional
3004
# data. Set to True or False in derived classes.
3005
supports_external_lookups = None
3006
# Does this format support CHK bytestring lookups. Set to True or False in
3008
supports_chks = None
3009
# Should commit add an inventory, or an inventory delta to the repository.
3010
_commit_inv_deltas = True
3011
# What order should fetch operations request streams in?
3012
# The default is unordered as that is the cheapest for an origin to
3014
_fetch_order = 'unordered'
3015
# Does this repository format use deltas that can be fetched as-deltas ?
3016
# (E.g. knits, where the knit deltas can be transplanted intact.
3017
# We default to False, which will ensure that enough data to get
3018
# a full text out of any fetch stream will be grabbed.
3019
_fetch_uses_deltas = False
3020
# Should fetch trigger a reconcile after the fetch? Only needed for
3021
# some repository formats that can suffer internal inconsistencies.
3022
_fetch_reconcile = False
3023
# Does this format have < O(tree_size) delta generation. Used to hint what
3024
# code path for commit, amongst other things.
3026
# Does doing a pack operation compress data? Useful for the pack UI command
3027
# (so if there is one pack, the operation can still proceed because it may
3028
# help), and for fetching when data won't have come from the same
3030
pack_compresses = False
3031
# Does the repository inventory storage understand references to trees?
3032
supports_tree_reference = None
3033
# Is the format experimental ?
3034
experimental = False
3037
return "<%s>" % self.__class__.__name__
3039
def __eq__(self, other):
3040
# format objects are generally stateless
3041
return isinstance(other, self.__class__)
3043
def __ne__(self, other):
3044
return not self == other
630
_default_format = None
631
"""The default format used for new repositories."""
634
"""The known formats."""
3047
637
def find_format(klass, a_bzrdir):
3048
"""Return the format for the repository object in a_bzrdir.
3050
This is used by bzr native formats that have a "format" file in
3051
the repository. Other methods may be used by different types of
638
"""Return the format for the repository object in a_bzrdir."""
3055
640
transport = a_bzrdir.get_repository_transport(None)
3056
format_string = transport.get_bytes("format")
3057
return format_registry.get(format_string)
641
format_string = transport.get("format").read()
642
return klass._formats[format_string]
3058
643
except errors.NoSuchFile:
3059
644
raise errors.NoRepositoryPresent(a_bzrdir)
3060
645
except KeyError:
3061
raise errors.UnknownFormatError(format=format_string,
3065
def register_format(klass, format):
3066
format_registry.register(format.get_format_string(), format)
3069
def unregister_format(klass, format):
3070
format_registry.remove(format.get_format_string())
646
raise errors.UnknownFormatError(format_string)
3073
649
def get_default_format(klass):
3074
650
"""Return the current default format."""
3075
from bzrlib import bzrdir
3076
return bzrdir.format_registry.make_bzrdir('default').repository_format
651
return klass._default_format
3078
653
def get_format_string(self):
3079
654
"""Return the ASCII format string that identifies this format.
3081
Note that in pre format ?? repositories the format string is
656
Note that in pre format ?? repositories the format string is
3082
657
not permitted nor written to disk.
3084
659
raise NotImplementedError(self.get_format_string)
3086
def get_format_description(self):
3087
"""Return the short description for this format."""
3088
raise NotImplementedError(self.get_format_description)
3090
# TODO: this shouldn't be in the base class, it's specific to things that
3091
# use weaves or knits -- mbp 20070207
3092
def _get_versioned_file_store(self,
3097
versionedfile_class=None,
3098
versionedfile_kwargs={},
3100
if versionedfile_class is None:
3101
versionedfile_class = self._versionedfile_class
3102
weave_transport = control_files._transport.clone(name)
3103
dir_mode = control_files._dir_mode
3104
file_mode = control_files._file_mode
3105
return VersionedFileStore(weave_transport, prefixed=prefixed,
3107
file_mode=file_mode,
3108
versionedfile_class=versionedfile_class,
3109
versionedfile_kwargs=versionedfile_kwargs,
3112
661
def initialize(self, a_bzrdir, shared=False):
3113
662
"""Initialize a repository of this format in a_bzrdir.
3115
664
:param a_bzrdir: The bzrdir to put the new repository in it.
3116
665
:param shared: The repository should be initialized as a sharable one.
3117
:returns: The new repository object.
3119
667
This may raise UninitializableFormat if shared repository are not
3120
668
compatible the a_bzrdir.
3122
raise NotImplementedError(self.initialize)
3124
671
def is_supported(self):
3125
672
"""Is this format supported?
3127
674
Supported formats must be initializable and openable.
3128
Unsupported formats may not support initialization or committing or
675
Unsupported formats may not support initialization or committing or
3129
676
some other features depending on the reason for not being supported.
3133
def network_name(self):
3134
"""A simple byte string uniquely identifying this format for RPC calls.
3136
MetaDir repository formats use their disk format string to identify the
3137
repository over the wire. All in one formats such as bzr < 0.8, and
3138
foreign formats like svn/git and hg should use some marker which is
3139
unique and immutable.
3141
raise NotImplementedError(self.network_name)
3143
def check_conversion_target(self, target_format):
3144
if self.rich_root_data and not target_format.rich_root_data:
3145
raise errors.BadConversionTarget(
3146
'Does not support rich root data.', target_format,
3148
if (self.supports_tree_reference and
3149
not getattr(target_format, 'supports_tree_reference', False)):
3150
raise errors.BadConversionTarget(
3151
'Does not support nested trees', target_format,
3154
680
def open(self, a_bzrdir, _found=False):
3155
681
"""Return an instance of this format for the bzrdir a_bzrdir.
3157
683
_found is a private parameter, do not use it.
3159
raise NotImplementedError(self.open)
3162
class MetaDirRepositoryFormat(RepositoryFormat):
3163
"""Common base class for the new repositories using the metadir layout."""
3165
rich_root_data = False
3166
supports_tree_reference = False
3167
supports_external_lookups = False
3170
def _matchingbzrdir(self):
3171
matching = bzrdir.BzrDirMetaFormat1()
3172
matching.repository_format = self
3176
super(MetaDirRepositoryFormat, self).__init__()
3178
def _create_control_files(self, a_bzrdir):
3179
"""Create the required files and the initial control_files object."""
3180
# FIXME: RBC 20060125 don't peek under the covers
3181
# NB: no need to escape relative paths that are url safe.
686
# we are being called directly and must probe.
687
raise NotImplementedError
688
return Repository(_format=self, a_bzrdir=a_bzrdir)
691
def register_format(klass, format):
692
klass._formats[format.get_format_string()] = format
695
def set_default_format(klass, format):
696
klass._default_format = format
699
def unregister_format(klass, format):
700
assert klass._formats[format.get_format_string()] is format
701
del klass._formats[format.get_format_string()]
704
class PreSplitOutRepositoryFormat(RepositoryFormat):
705
"""Base class for the pre split out repository formats."""
707
def initialize(self, a_bzrdir, shared=False, _internal=False):
708
"""Create a weave repository.
710
TODO: when creating split out bzr branch formats, move this to a common
711
base for Format5, Format6. or something like that.
713
from bzrlib.weavefile import write_weave_v5
714
from bzrlib.weave import Weave
717
raise errors.IncompatibleFormat(self, a_bzrdir._format)
720
# always initialized when the bzrdir is.
721
return Repository(_format=self, a_bzrdir=a_bzrdir)
723
# Create an empty weave
725
bzrlib.weavefile.write_weave_v5(Weave(), sio)
726
empty_weave = sio.getvalue()
728
mutter('creating repository in %s.', a_bzrdir.transport.base)
729
dirs = ['revision-store', 'weaves']
730
lock_file = 'branch-lock'
731
files = [('inventory.weave', StringIO(empty_weave)),
734
# FIXME: RBC 20060125 dont peek under the covers
735
# NB: no need to escape relative paths that are url safe.
736
control_files = LockableFiles(a_bzrdir.transport, 'branch-lock')
737
control_files.lock_write()
738
control_files._transport.mkdir_multi(dirs,
739
mode=control_files._dir_mode)
741
for file, content in files:
742
control_files.put(file, content)
744
control_files.unlock()
745
return Repository(_format=self, a_bzrdir=a_bzrdir)
748
class RepositoryFormat4(PreSplitOutRepositoryFormat):
749
"""Bzr repository format 4.
751
This repository format has:
753
- TextStores for texts, inventories,revisions.
755
This format is deprecated: it indexes texts using a text id which is
756
removed in format 5; initializationa and write support for this format
761
super(RepositoryFormat4, self).__init__()
762
self._matchingbzrdir = bzrdir.BzrDirFormat4()
764
def initialize(self, url, shared=False, _internal=False):
765
"""Format 4 branches cannot be created."""
766
raise errors.UninitializableFormat(self)
768
def is_supported(self):
769
"""Format 4 is not supported.
771
It is not supported because the model changed from 4 to 5 and the
772
conversion logic is expensive - so doing it on the fly was not
778
class RepositoryFormat5(PreSplitOutRepositoryFormat):
779
"""Bzr control format 5.
781
This repository format has:
782
- weaves for file texts and inventory
784
- TextStores for revisions and signatures.
788
super(RepositoryFormat5, self).__init__()
789
self._matchingbzrdir = bzrdir.BzrDirFormat5()
792
class RepositoryFormat6(PreSplitOutRepositoryFormat):
793
"""Bzr control format 6.
795
This repository format has:
796
- weaves for file texts and inventory
797
- hash subdirectory based stores.
798
- TextStores for revisions and signatures.
802
super(RepositoryFormat6, self).__init__()
803
self._matchingbzrdir = bzrdir.BzrDirFormat6()
806
class RepositoryFormat7(RepositoryFormat):
809
This repository format has:
810
- weaves for file texts and inventory
811
- hash subdirectory based stores.
812
- TextStores for revisions and signatures.
813
- a format marker of its own
814
- an optional 'shared-storage' flag
817
def get_format_string(self):
818
"""See RepositoryFormat.get_format_string()."""
819
return "Bazaar-NG Repository format 7"
821
def initialize(self, a_bzrdir, shared=False):
822
"""Create a weave repository.
824
:param shared: If true the repository will be initialized as a shared
827
from bzrlib.weavefile import write_weave_v5
828
from bzrlib.weave import Weave
830
# Create an empty weave
832
bzrlib.weavefile.write_weave_v5(Weave(), sio)
833
empty_weave = sio.getvalue()
835
mutter('creating repository in %s.', a_bzrdir.transport.base)
836
dirs = ['revision-store', 'weaves']
837
files = [('inventory.weave', StringIO(empty_weave)),
839
utf8_files = [('format', self.get_format_string())]
841
# FIXME: RBC 20060125 dont peek under the covers
842
# NB: no need to escape relative paths that are url safe.
3182
844
repository_transport = a_bzrdir.get_repository_transport(self)
3183
control_files = lockable_files.LockableFiles(repository_transport,
3184
'lock', lockdir.LockDir)
3185
control_files.create_lock()
3186
return control_files
3188
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
3189
"""Upload the initial blank content."""
3190
control_files = self._create_control_files(a_bzrdir)
845
repository_transport.put(lock_file, StringIO()) # TODO get the file mode from the bzrdir lock files., mode=file_mode)
846
control_files = LockableFiles(repository_transport, 'lock')
3191
847
control_files.lock_write()
3192
transport = control_files._transport
3194
utf8_files += [('shared-storage', '')]
848
control_files._transport.mkdir_multi(dirs,
849
mode=control_files._dir_mode)
3196
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
3197
for (filename, content_stream) in files:
3198
transport.put_file(filename, content_stream,
3199
mode=a_bzrdir._get_file_mode())
3200
for (filename, content_bytes) in utf8_files:
3201
transport.put_bytes_non_atomic(filename, content_bytes,
3202
mode=a_bzrdir._get_file_mode())
851
for file, content in files:
852
control_files.put(file, content)
853
for file, content in utf8_files:
854
control_files.put_utf8(file, content)
856
control_files.put_utf8('shared-storage', '')
3204
858
control_files.unlock()
3206
def network_name(self):
3207
"""Metadir formats have matching disk and network format strings."""
3208
return self.get_format_string()
3211
# Pre-0.8 formats that don't have a disk format string (because they are
3212
# versioned by the matching control directory). We use the control directories
3213
# disk format string as a key for the network_name because they meet the
3214
# constraints (simple string, unique, immutable).
3215
network_format_registry.register_lazy(
3216
"Bazaar-NG branch, format 5\n",
3217
'bzrlib.repofmt.weaverepo',
3218
'RepositoryFormat5',
3220
network_format_registry.register_lazy(
3221
"Bazaar-NG branch, format 6\n",
3222
'bzrlib.repofmt.weaverepo',
3223
'RepositoryFormat6',
3226
# formats which have no format string are not discoverable or independently
3227
# creatable on disk, so are not registered in format_registry. They're
3228
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
3229
# needed, it's constructed directly by the BzrDir. Non-native formats where
3230
# the repository is not separately opened are similar.
3232
format_registry.register_lazy(
3233
'Bazaar-NG Repository format 7',
3234
'bzrlib.repofmt.weaverepo',
3238
format_registry.register_lazy(
3239
'Bazaar-NG Knit Repository Format 1',
3240
'bzrlib.repofmt.knitrepo',
3241
'RepositoryFormatKnit1',
3244
format_registry.register_lazy(
3245
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
3246
'bzrlib.repofmt.knitrepo',
3247
'RepositoryFormatKnit3',
3250
format_registry.register_lazy(
3251
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
3252
'bzrlib.repofmt.knitrepo',
3253
'RepositoryFormatKnit4',
3256
# Pack-based formats. There is one format for pre-subtrees, and one for
3257
# post-subtrees to allow ease of testing.
3258
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3259
format_registry.register_lazy(
3260
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3261
'bzrlib.repofmt.pack_repo',
3262
'RepositoryFormatKnitPack1',
3264
format_registry.register_lazy(
3265
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3266
'bzrlib.repofmt.pack_repo',
3267
'RepositoryFormatKnitPack3',
3269
format_registry.register_lazy(
3270
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3271
'bzrlib.repofmt.pack_repo',
3272
'RepositoryFormatKnitPack4',
3274
format_registry.register_lazy(
3275
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3276
'bzrlib.repofmt.pack_repo',
3277
'RepositoryFormatKnitPack5',
3279
format_registry.register_lazy(
3280
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3281
'bzrlib.repofmt.pack_repo',
3282
'RepositoryFormatKnitPack5RichRoot',
3284
format_registry.register_lazy(
3285
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3286
'bzrlib.repofmt.pack_repo',
3287
'RepositoryFormatKnitPack5RichRootBroken',
3289
format_registry.register_lazy(
3290
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3291
'bzrlib.repofmt.pack_repo',
3292
'RepositoryFormatKnitPack6',
3294
format_registry.register_lazy(
3295
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3296
'bzrlib.repofmt.pack_repo',
3297
'RepositoryFormatKnitPack6RichRoot',
3300
# Development formats.
3301
# Obsolete but kept pending a CHK based subtree format.
3302
format_registry.register_lazy(
3303
("Bazaar development format 2 with subtree support "
3304
"(needs bzr.dev from before 1.8)\n"),
3305
'bzrlib.repofmt.pack_repo',
3306
'RepositoryFormatPackDevelopment2Subtree',
3309
# 1.14->1.16 go below here
3310
format_registry.register_lazy(
3311
'Bazaar development format - group compression and chk inventory'
3312
' (needs bzr.dev from 1.14)\n',
3313
'bzrlib.repofmt.groupcompress_repo',
3314
'RepositoryFormatCHK1',
3317
format_registry.register_lazy(
3318
'Bazaar development format - chk repository with bencode revision '
3319
'serialization (needs bzr.dev from 1.16)\n',
3320
'bzrlib.repofmt.groupcompress_repo',
3321
'RepositoryFormatCHK2',
3323
format_registry.register_lazy(
3324
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3325
'bzrlib.repofmt.groupcompress_repo',
3326
'RepositoryFormat2a',
3330
class InterRepository(InterObject):
3331
"""This class represents operations taking place between two repositories.
3333
Its instances have methods like copy_content and fetch, and contain
3334
references to the source and target repositories these operations can be
3337
Often we will provide convenience methods on 'repository' which carry out
3338
operations with another repository - they will always forward to
3339
InterRepository.get(other).method_name(parameters).
3342
_walk_to_common_revisions_batch_size = 50
3344
"""The available optimised InterRepository types."""
3347
def copy_content(self, revision_id=None):
3348
"""Make a complete copy of the content in self into destination.
3350
This is a destructive operation! Do not use it on existing
3353
:param revision_id: Only copy the content needed to construct
3354
revision_id and its parents.
3357
self.target.set_make_working_trees(self.source.make_working_trees())
3358
except NotImplementedError:
3360
self.target.fetch(self.source, revision_id=revision_id)
3363
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3365
"""Fetch the content required to construct revision_id.
3367
The content is copied from self.source to self.target.
3369
:param revision_id: if None all content is copied, if NULL_REVISION no
3374
ui.ui_factory.warn_experimental_format_fetch(self)
3375
f = _mod_fetch.RepoFetcher(to_repository=self.target,
3376
from_repository=self.source,
3377
last_revision=revision_id,
3378
fetch_spec=fetch_spec,
3379
find_ghosts=find_ghosts)
3381
def _walk_to_common_revisions(self, revision_ids):
3382
"""Walk out from revision_ids in source to revisions target has.
3384
:param revision_ids: The start point for the search.
3385
:return: A set of revision ids.
3387
target_graph = self.target.get_graph()
3388
revision_ids = frozenset(revision_ids)
3389
missing_revs = set()
3390
source_graph = self.source.get_graph()
3391
# ensure we don't pay silly lookup costs.
3392
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3393
null_set = frozenset([_mod_revision.NULL_REVISION])
3394
searcher_exhausted = False
3398
# Iterate the searcher until we have enough next_revs
3399
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3401
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3402
next_revs.update(next_revs_part)
3403
ghosts.update(ghosts_part)
3404
except StopIteration:
3405
searcher_exhausted = True
3407
# If there are ghosts in the source graph, and the caller asked for
3408
# them, make sure that they are present in the target.
3409
# We don't care about other ghosts as we can't fetch them and
3410
# haven't been asked to.
3411
ghosts_to_check = set(revision_ids.intersection(ghosts))
3412
revs_to_get = set(next_revs).union(ghosts_to_check)
3414
have_revs = set(target_graph.get_parent_map(revs_to_get))
3415
# we always have NULL_REVISION present.
3416
have_revs = have_revs.union(null_set)
3417
# Check if the target is missing any ghosts we need.
3418
ghosts_to_check.difference_update(have_revs)
3420
# One of the caller's revision_ids is a ghost in both the
3421
# source and the target.
3422
raise errors.NoSuchRevision(
3423
self.source, ghosts_to_check.pop())
3424
missing_revs.update(next_revs - have_revs)
3425
# Because we may have walked past the original stop point, make
3426
# sure everything is stopped
3427
stop_revs = searcher.find_seen_ancestors(have_revs)
3428
searcher.stop_searching_any(stop_revs)
3429
if searcher_exhausted:
3431
return searcher.get_result()
3434
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3435
"""Return the revision ids that source has that target does not.
3437
:param revision_id: only return revision ids included by this
3439
:param find_ghosts: If True find missing revisions in deep history
3440
rather than just finding the surface difference.
3441
:return: A bzrlib.graph.SearchResult.
3443
# stop searching at found target revisions.
3444
if not find_ghosts and revision_id is not None:
3445
return self._walk_to_common_revisions([revision_id])
3446
# generic, possibly worst case, slow code path.
3447
target_ids = set(self.target.all_revision_ids())
3448
if revision_id is not None:
3449
source_ids = self.source.get_ancestry(revision_id)
3450
if source_ids[0] is not None:
3451
raise AssertionError()
3454
source_ids = self.source.all_revision_ids()
3455
result_set = set(source_ids).difference(target_ids)
3456
return self.source.revision_ids_to_search_result(result_set)
3459
def _same_model(source, target):
3460
"""True if source and target have the same data representation.
3462
Note: this is always called on the base class; overriding it in a
3463
subclass will have no effect.
3466
InterRepository._assert_same_model(source, target)
3468
except errors.IncompatibleRepositories, e:
3472
def _assert_same_model(source, target):
3473
"""Raise an exception if two repositories do not use the same model.
3475
if source.supports_rich_root() != target.supports_rich_root():
3476
raise errors.IncompatibleRepositories(source, target,
3477
"different rich-root support")
3478
if source._serializer != target._serializer:
3479
raise errors.IncompatibleRepositories(source, target,
3480
"different serializers")
3483
class InterSameDataRepository(InterRepository):
3484
"""Code for converting between repositories that represent the same data.
3486
Data format and model must match for this to work.
3490
def _get_repo_format_to_test(self):
3491
"""Repository format for testing with.
3493
InterSameData can pull from subtree to subtree and from non-subtree to
3494
non-subtree, so we test this with the richest repository format.
3496
from bzrlib.repofmt import knitrepo
3497
return knitrepo.RepositoryFormatKnit3()
3500
def is_compatible(source, target):
3501
return InterRepository._same_model(source, target)
3504
class InterWeaveRepo(InterSameDataRepository):
3505
"""Optimised code paths between Weave based repositories.
3507
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3508
implemented lazy inter-object optimisation.
3512
def _get_repo_format_to_test(self):
3513
from bzrlib.repofmt import weaverepo
3514
return weaverepo.RepositoryFormat7()
3517
def is_compatible(source, target):
3518
"""Be compatible with known Weave formats.
3520
We don't test for the stores being of specific types because that
3521
could lead to confusing results, and there is no need to be
3524
from bzrlib.repofmt.weaverepo import (
3530
return (isinstance(source._format, (RepositoryFormat5,
3532
RepositoryFormat7)) and
3533
isinstance(target._format, (RepositoryFormat5,
3535
RepositoryFormat7)))
3536
except AttributeError:
3540
def copy_content(self, revision_id=None):
3541
"""See InterRepository.copy_content()."""
3542
# weave specific optimised path:
3544
self.target.set_make_working_trees(self.source.make_working_trees())
3545
except (errors.RepositoryUpgradeRequired, NotImplemented):
3547
# FIXME do not peek!
3548
if self.source._transport.listable():
3549
pb = ui.ui_factory.nested_progress_bar()
3551
self.target.texts.insert_record_stream(
3552
self.source.texts.get_record_stream(
3553
self.source.texts.keys(), 'topological', False))
3554
pb.update('Copying inventory', 0, 1)
3555
self.target.inventories.insert_record_stream(
3556
self.source.inventories.get_record_stream(
3557
self.source.inventories.keys(), 'topological', False))
3558
self.target.signatures.insert_record_stream(
3559
self.source.signatures.get_record_stream(
3560
self.source.signatures.keys(),
3562
self.target.revisions.insert_record_stream(
3563
self.source.revisions.get_record_stream(
3564
self.source.revisions.keys(),
3565
'topological', True))
3569
self.target.fetch(self.source, revision_id=revision_id)
3572
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3573
"""See InterRepository.missing_revision_ids()."""
3574
# we want all revisions to satisfy revision_id in source.
3575
# but we don't want to stat every file here and there.
3576
# we want then, all revisions other needs to satisfy revision_id
3577
# checked, but not those that we have locally.
3578
# so the first thing is to get a subset of the revisions to
3579
# satisfy revision_id in source, and then eliminate those that
3580
# we do already have.
3581
# this is slow on high latency connection to self, but as this
3582
# disk format scales terribly for push anyway due to rewriting
3583
# inventory.weave, this is considered acceptable.
3585
if revision_id is not None:
3586
source_ids = self.source.get_ancestry(revision_id)
3587
if source_ids[0] is not None:
3588
raise AssertionError()
3591
source_ids = self.source._all_possible_ids()
3592
source_ids_set = set(source_ids)
3593
# source_ids is the worst possible case we may need to pull.
3594
# now we want to filter source_ids against what we actually
3595
# have in target, but don't try to check for existence where we know
3596
# we do not have a revision as that would be pointless.
3597
target_ids = set(self.target._all_possible_ids())
3598
possibly_present_revisions = target_ids.intersection(source_ids_set)
3599
actually_present_revisions = set(
3600
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3601
required_revisions = source_ids_set.difference(actually_present_revisions)
3602
if revision_id is not None:
3603
# we used get_ancestry to determine source_ids then we are assured all
3604
# revisions referenced are present as they are installed in topological order.
3605
# and the tip revision was validated by get_ancestry.
3606
result_set = required_revisions
3608
# if we just grabbed the possibly available ids, then
3609
# we only have an estimate of whats available and need to validate
3610
# that against the revision records.
3612
self.source._eliminate_revisions_not_present(required_revisions))
3613
return self.source.revision_ids_to_search_result(result_set)
3616
class InterKnitRepo(InterSameDataRepository):
3617
"""Optimised code paths between Knit based repositories."""
3620
def _get_repo_format_to_test(self):
3621
from bzrlib.repofmt import knitrepo
3622
return knitrepo.RepositoryFormatKnit1()
3625
def is_compatible(source, target):
3626
"""Be compatible with known Knit formats.
3628
We don't test for the stores being of specific types because that
3629
could lead to confusing results, and there is no need to be
3632
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3634
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3635
isinstance(target._format, RepositoryFormatKnit))
3636
except AttributeError:
3638
return are_knits and InterRepository._same_model(source, target)
3641
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3642
"""See InterRepository.missing_revision_ids()."""
3643
if revision_id is not None:
3644
source_ids = self.source.get_ancestry(revision_id)
3645
if source_ids[0] is not None:
3646
raise AssertionError()
3649
source_ids = self.source.all_revision_ids()
3650
source_ids_set = set(source_ids)
3651
# source_ids is the worst possible case we may need to pull.
3652
# now we want to filter source_ids against what we actually
3653
# have in target, but don't try to check for existence where we know
3654
# we do not have a revision as that would be pointless.
3655
target_ids = set(self.target.all_revision_ids())
3656
possibly_present_revisions = target_ids.intersection(source_ids_set)
3657
actually_present_revisions = set(
3658
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3659
required_revisions = source_ids_set.difference(actually_present_revisions)
3660
if revision_id is not None:
3661
# we used get_ancestry to determine source_ids then we are assured all
3662
# revisions referenced are present as they are installed in topological order.
3663
# and the tip revision was validated by get_ancestry.
3664
result_set = required_revisions
3666
# if we just grabbed the possibly available ids, then
3667
# we only have an estimate of whats available and need to validate
3668
# that against the revision records.
3670
self.source._eliminate_revisions_not_present(required_revisions))
3671
return self.source.revision_ids_to_search_result(result_set)
3674
class InterDifferingSerializer(InterRepository):
3677
def _get_repo_format_to_test(self):
3681
def is_compatible(source, target):
3682
"""Be compatible with Knit2 source and Knit3 target"""
3683
# This is redundant with format.check_conversion_target(), however that
3684
# raises an exception, and we just want to say "False" as in we won't
3685
# support converting between these formats.
3686
if 'IDS_never' in debug.debug_flags:
3688
if source.supports_rich_root() and not target.supports_rich_root():
3690
if (source._format.supports_tree_reference
3691
and not target._format.supports_tree_reference):
3693
if target._fallback_repositories and target._format.supports_chks:
3694
# IDS doesn't know how to copy CHKs for the parent inventories it
3695
# adds to stacked repos.
3697
if 'IDS_always' in debug.debug_flags:
3699
# Only use this code path for local source and target. IDS does far
3700
# too much IO (both bandwidth and roundtrips) over a network.
3701
if not source.bzrdir.transport.base.startswith('file:///'):
3703
if not target.bzrdir.transport.base.startswith('file:///'):
3707
def _get_trees(self, revision_ids, cache):
3709
for rev_id in revision_ids:
3711
possible_trees.append((rev_id, cache[rev_id]))
3713
# Not cached, but inventory might be present anyway.
3715
tree = self.source.revision_tree(rev_id)
3716
except errors.NoSuchRevision:
3717
# Nope, parent is ghost.
3720
cache[rev_id] = tree
3721
possible_trees.append((rev_id, tree))
3722
return possible_trees
3724
def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
3725
"""Get the best delta and base for this revision.
3727
:return: (basis_id, delta)
3730
# Generate deltas against each tree, to find the shortest.
3731
texts_possibly_new_in_tree = set()
3732
for basis_id, basis_tree in possible_trees:
3733
delta = tree.inventory._make_delta(basis_tree.inventory)
3734
for old_path, new_path, file_id, new_entry in delta:
3735
if new_path is None:
3736
# This file_id isn't present in the new rev, so we don't
3740
# Rich roots are handled elsewhere...
3742
kind = new_entry.kind
3743
if kind != 'directory' and kind != 'file':
3744
# No text record associated with this inventory entry.
3746
# This is a directory or file that has changed somehow.
3747
texts_possibly_new_in_tree.add((file_id, new_entry.revision))
3748
deltas.append((len(delta), basis_id, delta))
3750
return deltas[0][1:]
3752
def _fetch_parent_invs_for_stacking(self, parent_map, cache):
3753
"""Find all parent revisions that are absent, but for which the
3754
inventory is present, and copy those inventories.
3756
This is necessary to preserve correctness when the source is stacked
3757
without fallbacks configured. (Note that in cases like upgrade the
3758
source may be not have _fallback_repositories even though it is
3762
for parents in parent_map.values():
3763
parent_revs.update(parents)
3764
present_parents = self.source.get_parent_map(parent_revs)
3765
absent_parents = set(parent_revs).difference(present_parents)
3766
parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
3767
(rev_id,) for rev_id in absent_parents)
3768
parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
3769
for parent_tree in self.source.revision_trees(parent_inv_ids):
3770
current_revision_id = parent_tree.get_revision_id()
3771
parents_parents_keys = parent_invs_keys_for_stacking[
3772
(current_revision_id,)]
3773
parents_parents = [key[-1] for key in parents_parents_keys]
3774
basis_id = _mod_revision.NULL_REVISION
3775
basis_tree = self.source.revision_tree(basis_id)
3776
delta = parent_tree.inventory._make_delta(basis_tree.inventory)
3777
self.target.add_inventory_by_delta(
3778
basis_id, delta, current_revision_id, parents_parents)
3779
cache[current_revision_id] = parent_tree
3781
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3782
"""Fetch across a few revisions.
3784
:param revision_ids: The revisions to copy
3785
:param basis_id: The revision_id of a tree that must be in cache, used
3786
as a basis for delta when no other base is available
3787
:param cache: A cache of RevisionTrees that we can use.
3788
:param a_graph: A Graph object to determine the heads() of the
3789
rich-root data stream.
3790
:return: The revision_id of the last converted tree. The RevisionTree
3791
for it will be in cache
3793
# Walk though all revisions; get inventory deltas, copy referenced
3794
# texts that delta references, insert the delta, revision and
3796
root_keys_to_create = set()
3799
pending_revisions = []
3800
parent_map = self.source.get_parent_map(revision_ids)
3801
self._fetch_parent_invs_for_stacking(parent_map, cache)
3802
self.source._safe_to_return_from_cache = True
3803
for tree in self.source.revision_trees(revision_ids):
3804
# Find a inventory delta for this revision.
3805
# Find text entries that need to be copied, too.
3806
current_revision_id = tree.get_revision_id()
3807
parent_ids = parent_map.get(current_revision_id, ())
3808
parent_trees = self._get_trees(parent_ids, cache)
3809
possible_trees = list(parent_trees)
3810
if len(possible_trees) == 0:
3811
# There either aren't any parents, or the parents are ghosts,
3812
# so just use the last converted tree.
3813
possible_trees.append((basis_id, cache[basis_id]))
3814
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3816
revision = self.source.get_revision(current_revision_id)
3817
pending_deltas.append((basis_id, delta,
3818
current_revision_id, revision.parent_ids))
3819
if self._converting_to_rich_root:
3820
self._revision_id_to_root_id[current_revision_id] = \
3822
# Determine which texts are in present in this revision but not in
3823
# any of the available parents.
3824
texts_possibly_new_in_tree = set()
3825
for old_path, new_path, file_id, entry in delta:
3826
if new_path is None:
3827
# This file_id isn't present in the new rev
3831
if not self.target.supports_rich_root():
3832
# The target doesn't support rich root, so we don't
3835
if self._converting_to_rich_root:
3836
# This can't be copied normally, we have to insert
3838
root_keys_to_create.add((file_id, entry.revision))
3841
texts_possibly_new_in_tree.add((file_id, entry.revision))
3842
for basis_id, basis_tree in possible_trees:
3843
basis_inv = basis_tree.inventory
3844
for file_key in list(texts_possibly_new_in_tree):
3845
file_id, file_revision = file_key
3847
entry = basis_inv[file_id]
3848
except errors.NoSuchId:
3850
if entry.revision == file_revision:
3851
texts_possibly_new_in_tree.remove(file_key)
3852
text_keys.update(texts_possibly_new_in_tree)
3853
pending_revisions.append(revision)
3854
cache[current_revision_id] = tree
3855
basis_id = current_revision_id
3856
self.source._safe_to_return_from_cache = False
3858
from_texts = self.source.texts
3859
to_texts = self.target.texts
3860
if root_keys_to_create:
3861
root_stream = _mod_fetch._new_root_data_stream(
3862
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3863
self.source, graph=a_graph)
3864
to_texts.insert_record_stream(root_stream)
3865
to_texts.insert_record_stream(from_texts.get_record_stream(
3866
text_keys, self.target._format._fetch_order,
3867
not self.target._format._fetch_uses_deltas))
3868
# insert inventory deltas
3869
for delta in pending_deltas:
3870
self.target.add_inventory_by_delta(*delta)
3871
if self.target._fallback_repositories:
3872
# Make sure this stacked repository has all the parent inventories
3873
# for the new revisions that we are about to insert. We do this
3874
# before adding the revisions so that no revision is added until
3875
# all the inventories it may depend on are added.
3876
# Note that this is overzealous, as we may have fetched these in an
3879
revision_ids = set()
3880
for revision in pending_revisions:
3881
revision_ids.add(revision.revision_id)
3882
parent_ids.update(revision.parent_ids)
3883
parent_ids.difference_update(revision_ids)
3884
parent_ids.discard(_mod_revision.NULL_REVISION)
3885
parent_map = self.source.get_parent_map(parent_ids)
3886
# we iterate over parent_map and not parent_ids because we don't
3887
# want to try copying any revision which is a ghost
3888
for parent_tree in self.source.revision_trees(parent_map):
3889
current_revision_id = parent_tree.get_revision_id()
3890
parents_parents = parent_map[current_revision_id]
3891
possible_trees = self._get_trees(parents_parents, cache)
3892
if len(possible_trees) == 0:
3893
# There either aren't any parents, or the parents are
3894
# ghosts, so just use the last converted tree.
3895
possible_trees.append((basis_id, cache[basis_id]))
3896
basis_id, delta = self._get_delta_for_revision(parent_tree,
3897
parents_parents, possible_trees)
3898
self.target.add_inventory_by_delta(
3899
basis_id, delta, current_revision_id, parents_parents)
3900
# insert signatures and revisions
3901
for revision in pending_revisions:
3903
signature = self.source.get_signature_text(
3904
revision.revision_id)
3905
self.target.add_signature_text(revision.revision_id,
3907
except errors.NoSuchRevision:
3909
self.target.add_revision(revision.revision_id, revision)
3912
def _fetch_all_revisions(self, revision_ids, pb):
3913
"""Fetch everything for the list of revisions.
3915
:param revision_ids: The list of revisions to fetch. Must be in
3917
:param pb: A ProgressTask
3920
basis_id, basis_tree = self._get_basis(revision_ids[0])
3922
cache = lru_cache.LRUCache(100)
3923
cache[basis_id] = basis_tree
3924
del basis_tree # We don't want to hang on to it here
3926
if self._converting_to_rich_root and len(revision_ids) > 100:
3927
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3932
for offset in range(0, len(revision_ids), batch_size):
3933
self.target.start_write_group()
3935
pb.update('Transferring revisions', offset,
3937
batch = revision_ids[offset:offset+batch_size]
3938
basis_id = self._fetch_batch(batch, basis_id, cache,
3941
self.source._safe_to_return_from_cache = False
3942
self.target.abort_write_group()
3945
hint = self.target.commit_write_group()
3948
if hints and self.target._format.pack_compresses:
3949
self.target.pack(hint=hints)
3950
pb.update('Transferring revisions', len(revision_ids),
3954
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3956
"""See InterRepository.fetch()."""
3957
if fetch_spec is not None:
3958
raise AssertionError("Not implemented yet...")
3959
# See <https://launchpad.net/bugs/456077> asking for a warning here
3961
# nb this is only active for local-local fetches; other things using
3963
ui.ui_factory.warn_cross_format_fetch(self.source._format,
3964
self.target._format)
3965
ui.ui_factory.warn_experimental_format_fetch(self)
3966
if (not self.source.supports_rich_root()
3967
and self.target.supports_rich_root()):
3968
self._converting_to_rich_root = True
3969
self._revision_id_to_root_id = {}
3971
self._converting_to_rich_root = False
3972
revision_ids = self.target.search_missing_revision_ids(self.source,
3973
revision_id, find_ghosts=find_ghosts).get_keys()
3974
if not revision_ids:
3976
revision_ids = tsort.topo_sort(
3977
self.source.get_graph().get_parent_map(revision_ids))
3978
if not revision_ids:
3980
# Walk though all revisions; get inventory deltas, copy referenced
3981
# texts that delta references, insert the delta, revision and
3984
my_pb = ui.ui_factory.nested_progress_bar()
3987
symbol_versioning.warn(
3988
symbol_versioning.deprecated_in((1, 14, 0))
3989
% "pb parameter to fetch()")
3992
self._fetch_all_revisions(revision_ids, pb)
3994
if my_pb is not None:
3996
return len(revision_ids), 0
3998
def _get_basis(self, first_revision_id):
3999
"""Get a revision and tree which exists in the target.
4001
This assumes that first_revision_id is selected for transmission
4002
because all other ancestors are already present. If we can't find an
4003
ancestor we fall back to NULL_REVISION since we know that is safe.
4005
:return: (basis_id, basis_tree)
4007
first_rev = self.source.get_revision(first_revision_id)
4009
basis_id = first_rev.parent_ids[0]
4010
# only valid as a basis if the target has it
4011
self.target.get_revision(basis_id)
4012
# Try to get a basis tree - if its a ghost it will hit the
4013
# NoSuchRevision case.
4014
basis_tree = self.source.revision_tree(basis_id)
4015
except (IndexError, errors.NoSuchRevision):
4016
basis_id = _mod_revision.NULL_REVISION
4017
basis_tree = self.source.revision_tree(basis_id)
4018
return basis_id, basis_tree
4021
InterRepository.register_optimiser(InterDifferingSerializer)
4022
InterRepository.register_optimiser(InterSameDataRepository)
4023
InterRepository.register_optimiser(InterWeaveRepo)
4024
InterRepository.register_optimiser(InterKnitRepo)
4027
class CopyConverter(object):
4028
"""A repository conversion tool which just performs a copy of the content.
4030
This is slow but quite reliable.
4033
def __init__(self, target_format):
4034
"""Create a CopyConverter.
4036
:param target_format: The format the resulting repository should be.
4038
self.target_format = target_format
4040
def convert(self, repo, pb):
4041
"""Perform the conversion of to_convert, giving feedback via pb.
4043
:param to_convert: The disk object to convert.
4044
:param pb: a progress bar to use for progress information.
4046
pb = ui.ui_factory.nested_progress_bar()
4049
# this is only useful with metadir layouts - separated repo content.
4050
# trigger an assertion if not such
4051
repo._format.get_format_string()
4052
self.repo_dir = repo.bzrdir
4053
pb.update('Moving repository to repository.backup')
4054
self.repo_dir.transport.move('repository', 'repository.backup')
4055
backup_transport = self.repo_dir.transport.clone('repository.backup')
4056
repo._format.check_conversion_target(self.target_format)
4057
self.source_repo = repo._format.open(self.repo_dir,
4059
_override_transport=backup_transport)
4060
pb.update('Creating new repository')
4061
converted = self.target_format.initialize(self.repo_dir,
4062
self.source_repo.is_shared())
4063
converted.lock_write()
4065
pb.update('Copying content')
4066
self.source_repo.copy_content_into(converted)
4069
pb.update('Deleting old repository content')
4070
self.repo_dir.transport.delete_tree('repository.backup')
4071
ui.ui_factory.note('repository converted')
4084
def _unescaper(match, _map=_unescape_map):
4085
code = match.group(1)
4089
if not code.startswith('#'):
4091
return unichr(int(code[1:])).encode('utf8')
4097
def _unescape_xml(data):
4098
"""Unescape predefined XML entities in a string of data."""
4100
if _unescape_re is None:
4101
_unescape_re = re.compile('\&([^;]*);')
4102
return _unescape_re.sub(_unescaper, data)
4105
class _VersionedFileChecker(object):
4107
def __init__(self, repository, text_key_references=None, ancestors=None):
4108
self.repository = repository
4109
self.text_index = self.repository._generate_text_key_index(
4110
text_key_references=text_key_references, ancestors=ancestors)
4112
def calculate_file_version_parents(self, text_key):
4113
"""Calculate the correct parents for a file version according to
4116
parent_keys = self.text_index[text_key]
4117
if parent_keys == [_mod_revision.NULL_REVISION]:
4119
return tuple(parent_keys)
4121
def check_file_version_parents(self, texts, progress_bar=None):
4122
"""Check the parents stored in a versioned file are correct.
4124
It also detects file versions that are not referenced by their
4125
corresponding revision's inventory.
4127
:returns: A tuple of (wrong_parents, dangling_file_versions).
4128
wrong_parents is a dict mapping {revision_id: (stored_parents,
4129
correct_parents)} for each revision_id where the stored parents
4130
are not correct. dangling_file_versions is a set of (file_id,
4131
revision_id) tuples for versions that are present in this versioned
4132
file, but not used by the corresponding inventory.
4134
local_progress = None
4135
if progress_bar is None:
4136
local_progress = ui.ui_factory.nested_progress_bar()
4137
progress_bar = local_progress
4139
return self._check_file_version_parents(texts, progress_bar)
4142
local_progress.finished()
4144
def _check_file_version_parents(self, texts, progress_bar):
4145
"""See check_file_version_parents."""
4147
self.file_ids = set([file_id for file_id, _ in
4148
self.text_index.iterkeys()])
4149
# text keys is now grouped by file_id
4150
n_versions = len(self.text_index)
4151
progress_bar.update('loading text store', 0, n_versions)
4152
parent_map = self.repository.texts.get_parent_map(self.text_index)
4153
# On unlistable transports this could well be empty/error...
4154
text_keys = self.repository.texts.keys()
4155
unused_keys = frozenset(text_keys) - set(self.text_index)
4156
for num, key in enumerate(self.text_index.iterkeys()):
4157
progress_bar.update('checking text graph', num, n_versions)
4158
correct_parents = self.calculate_file_version_parents(key)
4160
knit_parents = parent_map[key]
4161
except errors.RevisionNotPresent:
4164
if correct_parents != knit_parents:
4165
wrong_parents[key] = (knit_parents, correct_parents)
4166
return wrong_parents, unused_keys
4169
def _old_get_graph(repository, revision_id):
4170
"""DO NOT USE. That is all. I'm serious."""
4171
graph = repository.get_graph()
4172
revision_graph = dict(((key, value) for key, value in
4173
graph.iter_ancestry([revision_id]) if value is not None))
4174
return _strip_NULL_ghosts(revision_graph)
4177
def _strip_NULL_ghosts(revision_graph):
4178
"""Also don't use this. more compatibility code for unmigrated clients."""
4179
# Filter ghosts, and null:
4180
if _mod_revision.NULL_REVISION in revision_graph:
4181
del revision_graph[_mod_revision.NULL_REVISION]
4182
for key, parents in revision_graph.items():
4183
revision_graph[key] = tuple(parent for parent in parents if parent
4185
return revision_graph
4188
class StreamSink(object):
4189
"""An object that can insert a stream into a repository.
4191
This interface handles the complexity of reserialising inventories and
4192
revisions from different formats, and allows unidirectional insertion into
4193
stacked repositories without looking for the missing basis parents
4197
def __init__(self, target_repo):
4198
self.target_repo = target_repo
4200
def insert_stream(self, stream, src_format, resume_tokens):
4201
"""Insert a stream's content into the target repository.
4203
:param src_format: a bzr repository format.
4205
:return: a list of resume tokens and an iterable of keys additional
4206
items required before the insertion can be completed.
4208
self.target_repo.lock_write()
4211
self.target_repo.resume_write_group(resume_tokens)
4214
self.target_repo.start_write_group()
4217
# locked_insert_stream performs a commit|suspend.
4218
return self._locked_insert_stream(stream, src_format, is_resume)
4220
self.target_repo.abort_write_group(suppress_errors=True)
4223
self.target_repo.unlock()
4225
def _locked_insert_stream(self, stream, src_format, is_resume):
4226
to_serializer = self.target_repo._format._serializer
4227
src_serializer = src_format._serializer
4229
if to_serializer == src_serializer:
4230
# If serializers match and the target is a pack repository, set the
4231
# write cache size on the new pack. This avoids poor performance
4232
# on transports where append is unbuffered (such as
4233
# RemoteTransport). This is safe to do because nothing should read
4234
# back from the target repository while a stream with matching
4235
# serialization is being inserted.
4236
# The exception is that a delta record from the source that should
4237
# be a fulltext may need to be expanded by the target (see
4238
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4239
# explicitly flush any buffered writes first in that rare case.
4241
new_pack = self.target_repo._pack_collection._new_pack
4242
except AttributeError:
4243
# Not a pack repository
4246
new_pack.set_write_cache_size(1024*1024)
4247
for substream_type, substream in stream:
4248
if 'stream' in debug.debug_flags:
4249
mutter('inserting substream: %s', substream_type)
4250
if substream_type == 'texts':
4251
self.target_repo.texts.insert_record_stream(substream)
4252
elif substream_type == 'inventories':
4253
if src_serializer == to_serializer:
4254
self.target_repo.inventories.insert_record_stream(
4257
self._extract_and_insert_inventories(
4258
substream, src_serializer)
4259
elif substream_type == 'inventory-deltas':
4260
ui.ui_factory.warn_cross_format_fetch(src_format,
4261
self.target_repo._format)
4262
self._extract_and_insert_inventory_deltas(
4263
substream, src_serializer)
4264
elif substream_type == 'chk_bytes':
4265
# XXX: This doesn't support conversions, as it assumes the
4266
# conversion was done in the fetch code.
4267
self.target_repo.chk_bytes.insert_record_stream(substream)
4268
elif substream_type == 'revisions':
4269
# This may fallback to extract-and-insert more often than
4270
# required if the serializers are different only in terms of
4272
if src_serializer == to_serializer:
4273
self.target_repo.revisions.insert_record_stream(
4276
self._extract_and_insert_revisions(substream,
4278
elif substream_type == 'signatures':
4279
self.target_repo.signatures.insert_record_stream(substream)
4281
raise AssertionError('kaboom! %s' % (substream_type,))
4282
# Done inserting data, and the missing_keys calculations will try to
4283
# read back from the inserted data, so flush the writes to the new pack
4284
# (if this is pack format).
4285
if new_pack is not None:
4286
new_pack._write_data('', flush=True)
4287
# Find all the new revisions (including ones from resume_tokens)
4288
missing_keys = self.target_repo.get_missing_parent_inventories(
4289
check_for_missing_texts=is_resume)
4291
for prefix, versioned_file in (
4292
('texts', self.target_repo.texts),
4293
('inventories', self.target_repo.inventories),
4294
('revisions', self.target_repo.revisions),
4295
('signatures', self.target_repo.signatures),
4296
('chk_bytes', self.target_repo.chk_bytes),
4298
if versioned_file is None:
4300
# TODO: key is often going to be a StaticTuple object
4301
# I don't believe we can define a method by which
4302
# (prefix,) + StaticTuple will work, though we could
4303
# define a StaticTuple.sq_concat that would allow you to
4304
# pass in either a tuple or a StaticTuple as the second
4305
# object, so instead we could have:
4306
# StaticTuple(prefix) + key here...
4307
missing_keys.update((prefix,) + key for key in
4308
versioned_file.get_missing_compression_parent_keys())
4309
except NotImplementedError:
4310
# cannot even attempt suspending, and missing would have failed
4311
# during stream insertion.
4312
missing_keys = set()
4315
# suspend the write group and tell the caller what we is
4316
# missing. We know we can suspend or else we would not have
4317
# entered this code path. (All repositories that can handle
4318
# missing keys can handle suspending a write group).
4319
write_group_tokens = self.target_repo.suspend_write_group()
4320
return write_group_tokens, missing_keys
4321
hint = self.target_repo.commit_write_group()
4322
if (to_serializer != src_serializer and
4323
self.target_repo._format.pack_compresses):
4324
self.target_repo.pack(hint=hint)
4327
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4328
target_rich_root = self.target_repo._format.rich_root_data
4329
target_tree_refs = self.target_repo._format.supports_tree_reference
4330
for record in substream:
4331
# Insert the delta directly
4332
inventory_delta_bytes = record.get_bytes_as('fulltext')
4333
deserialiser = inventory_delta.InventoryDeltaDeserializer()
4335
parse_result = deserialiser.parse_text_bytes(
4336
inventory_delta_bytes)
4337
except inventory_delta.IncompatibleInventoryDelta, err:
4338
trace.mutter("Incompatible delta: %s", err.msg)
4339
raise errors.IncompatibleRevision(self.target_repo._format)
4340
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4341
revision_id = new_id
4342
parents = [key[0] for key in record.parents]
4343
self.target_repo.add_inventory_by_delta(
4344
basis_id, inv_delta, revision_id, parents)
4346
def _extract_and_insert_inventories(self, substream, serializer,
4348
"""Generate a new inventory versionedfile in target, converting data.
4350
The inventory is retrieved from the source, (deserializing it), and
4351
stored in the target (reserializing it in a different format).
4353
target_rich_root = self.target_repo._format.rich_root_data
4354
target_tree_refs = self.target_repo._format.supports_tree_reference
4355
for record in substream:
4356
# It's not a delta, so it must be a fulltext in the source
4357
# serializer's format.
4358
bytes = record.get_bytes_as('fulltext')
4359
revision_id = record.key[0]
4360
inv = serializer.read_inventory_from_string(bytes, revision_id)
4361
parents = [key[0] for key in record.parents]
4362
self.target_repo.add_inventory(revision_id, inv, parents)
4363
# No need to keep holding this full inv in memory when the rest of
4364
# the substream is likely to be all deltas.
4367
def _extract_and_insert_revisions(self, substream, serializer):
4368
for record in substream:
4369
bytes = record.get_bytes_as('fulltext')
4370
revision_id = record.key[0]
4371
rev = serializer.read_revision_from_string(bytes)
4372
if rev.revision_id != revision_id:
4373
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4374
self.target_repo.add_revision(revision_id, rev)
4377
if self.target_repo._format._fetch_reconcile:
4378
self.target_repo.reconcile()
4381
class StreamSource(object):
4382
"""A source of a stream for fetching between repositories."""
4384
def __init__(self, from_repository, to_format):
4385
"""Create a StreamSource streaming from from_repository."""
4386
self.from_repository = from_repository
4387
self.to_format = to_format
4389
def delta_on_metadata(self):
4390
"""Return True if delta's are permitted on metadata streams.
4392
That is on revisions and signatures.
4394
src_serializer = self.from_repository._format._serializer
4395
target_serializer = self.to_format._serializer
4396
return (self.to_format._fetch_uses_deltas and
4397
src_serializer == target_serializer)
4399
def _fetch_revision_texts(self, revs):
4400
# fetch signatures first and then the revision texts
4401
# may need to be a InterRevisionStore call here.
4402
from_sf = self.from_repository.signatures
4403
# A missing signature is just skipped.
4404
keys = [(rev_id,) for rev_id in revs]
4405
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4407
self.to_format._fetch_order,
4408
not self.to_format._fetch_uses_deltas))
4409
# If a revision has a delta, this is actually expanded inside the
4410
# insert_record_stream code now, which is an alternate fix for
4412
from_rf = self.from_repository.revisions
4413
revisions = from_rf.get_record_stream(
4415
self.to_format._fetch_order,
4416
not self.delta_on_metadata())
4417
return [('signatures', signatures), ('revisions', revisions)]
4419
def _generate_root_texts(self, revs):
4420
"""This will be called by get_stream between fetching weave texts and
4421
fetching the inventory weave.
4423
if self._rich_root_upgrade():
4424
return _mod_fetch.Inter1and2Helper(
4425
self.from_repository).generate_root_texts(revs)
4429
def get_stream(self, search):
4431
revs = search.get_keys()
4432
graph = self.from_repository.get_graph()
4433
revs = tsort.topo_sort(graph.get_parent_map(revs))
4434
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4436
for knit_kind, file_id, revisions in data_to_fetch:
4437
if knit_kind != phase:
4439
# Make a new progress bar for this phase
4440
if knit_kind == "file":
4441
# Accumulate file texts
4442
text_keys.extend([(file_id, revision) for revision in
4444
elif knit_kind == "inventory":
4445
# Now copy the file texts.
4446
from_texts = self.from_repository.texts
4447
yield ('texts', from_texts.get_record_stream(
4448
text_keys, self.to_format._fetch_order,
4449
not self.to_format._fetch_uses_deltas))
4450
# Cause an error if a text occurs after we have done the
4453
# Before we process the inventory we generate the root
4454
# texts (if necessary) so that the inventories references
4456
for _ in self._generate_root_texts(revs):
4458
# we fetch only the referenced inventories because we do not
4459
# know for unselected inventories whether all their required
4460
# texts are present in the other repository - it could be
4462
for info in self._get_inventory_stream(revs):
4464
elif knit_kind == "signatures":
4465
# Nothing to do here; this will be taken care of when
4466
# _fetch_revision_texts happens.
4468
elif knit_kind == "revisions":
4469
for record in self._fetch_revision_texts(revs):
4472
raise AssertionError("Unknown knit kind %r" % knit_kind)
4474
def get_stream_for_missing_keys(self, missing_keys):
4475
# missing keys can only occur when we are byte copying and not
4476
# translating (because translation means we don't send
4477
# unreconstructable deltas ever).
4479
keys['texts'] = set()
4480
keys['revisions'] = set()
4481
keys['inventories'] = set()
4482
keys['chk_bytes'] = set()
4483
keys['signatures'] = set()
4484
for key in missing_keys:
4485
keys[key[0]].add(key[1:])
4486
if len(keys['revisions']):
4487
# If we allowed copying revisions at this point, we could end up
4488
# copying a revision without copying its required texts: a
4489
# violation of the requirements for repository integrity.
4490
raise AssertionError(
4491
'cannot copy revisions to fill in missing deltas %s' % (
4492
keys['revisions'],))
4493
for substream_kind, keys in keys.iteritems():
4494
vf = getattr(self.from_repository, substream_kind)
4495
if vf is None and keys:
4496
raise AssertionError(
4497
"cannot fill in keys for a versioned file we don't"
4498
" have: %s needs %s" % (substream_kind, keys))
4500
# No need to stream something we don't have
4502
if substream_kind == 'inventories':
4503
# Some missing keys are genuinely ghosts, filter those out.
4504
present = self.from_repository.inventories.get_parent_map(keys)
4505
revs = [key[0] for key in present]
4506
# Get the inventory stream more-or-less as we do for the
4507
# original stream; there's no reason to assume that records
4508
# direct from the source will be suitable for the sink. (Think
4509
# e.g. 2a -> 1.9-rich-root).
4510
for info in self._get_inventory_stream(revs, missing=True):
4514
# Ask for full texts always so that we don't need more round trips
4515
# after this stream.
4516
# Some of the missing keys are genuinely ghosts, so filter absent
4517
# records. The Sink is responsible for doing another check to
4518
# ensure that ghosts don't introduce missing data for future
4520
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4521
self.to_format._fetch_order, True))
4522
yield substream_kind, stream
4524
def inventory_fetch_order(self):
4525
if self._rich_root_upgrade():
4526
return 'topological'
4528
return self.to_format._fetch_order
4530
def _rich_root_upgrade(self):
4531
return (not self.from_repository._format.rich_root_data and
4532
self.to_format.rich_root_data)
4534
def _get_inventory_stream(self, revision_ids, missing=False):
4535
from_format = self.from_repository._format
4536
if (from_format.supports_chks and self.to_format.supports_chks and
4537
from_format.network_name() == self.to_format.network_name()):
4538
raise AssertionError(
4539
"this case should be handled by GroupCHKStreamSource")
4540
elif 'forceinvdeltas' in debug.debug_flags:
4541
return self._get_convertable_inventory_stream(revision_ids,
4542
delta_versus_null=missing)
4543
elif from_format.network_name() == self.to_format.network_name():
4545
return self._get_simple_inventory_stream(revision_ids,
4547
elif (not from_format.supports_chks and not self.to_format.supports_chks
4548
and from_format._serializer == self.to_format._serializer):
4549
# Essentially the same format.
4550
return self._get_simple_inventory_stream(revision_ids,
4553
# Any time we switch serializations, we want to use an
4554
# inventory-delta based approach.
4555
return self._get_convertable_inventory_stream(revision_ids,
4556
delta_versus_null=missing)
4558
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4559
# NB: This currently reopens the inventory weave in source;
4560
# using a single stream interface instead would avoid this.
4561
from_weave = self.from_repository.inventories
4563
delta_closure = True
4565
delta_closure = not self.delta_on_metadata()
4566
yield ('inventories', from_weave.get_record_stream(
4567
[(rev_id,) for rev_id in revision_ids],
4568
self.inventory_fetch_order(), delta_closure))
4570
def _get_convertable_inventory_stream(self, revision_ids,
4571
delta_versus_null=False):
4572
# The two formats are sufficiently different that there is no fast
4573
# path, so we need to send just inventorydeltas, which any
4574
# sufficiently modern client can insert into any repository.
4575
# The StreamSink code expects to be able to
4576
# convert on the target, so we need to put bytes-on-the-wire that can
4577
# be converted. That means inventory deltas (if the remote is <1.19,
4578
# RemoteStreamSink will fallback to VFS to insert the deltas).
4579
yield ('inventory-deltas',
4580
self._stream_invs_as_deltas(revision_ids,
4581
delta_versus_null=delta_versus_null))
4583
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4584
"""Return a stream of inventory-deltas for the given rev ids.
4586
:param revision_ids: The list of inventories to transmit
4587
:param delta_versus_null: Don't try to find a minimal delta for this
4588
entry, instead compute the delta versus the NULL_REVISION. This
4589
effectively streams a complete inventory. Used for stuff like
4590
filling in missing parents, etc.
4592
from_repo = self.from_repository
4593
revision_keys = [(rev_id,) for rev_id in revision_ids]
4594
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4595
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4597
inventories = self.from_repository.iter_inventories(
4598
revision_ids, 'topological')
4599
format = from_repo._format
4600
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4601
inventory_cache = lru_cache.LRUCache(50)
4602
null_inventory = from_repo.revision_tree(
4603
_mod_revision.NULL_REVISION).inventory
4604
# XXX: ideally the rich-root/tree-refs flags would be per-revision, not
4605
# per-repo (e.g. streaming a non-rich-root revision out of a rich-root
4606
# repo back into a non-rich-root repo ought to be allowed)
4607
serializer = inventory_delta.InventoryDeltaSerializer(
4608
versioned_root=format.rich_root_data,
4609
tree_references=format.supports_tree_reference)
4610
for inv in inventories:
4611
key = (inv.revision_id,)
4612
parent_keys = parent_map.get(key, ())
4614
if not delta_versus_null and parent_keys:
4615
# The caller did not ask for complete inventories and we have
4616
# some parents that we can delta against. Make a delta against
4617
# each parent so that we can find the smallest.
4618
parent_ids = [parent_key[0] for parent_key in parent_keys]
4619
for parent_id in parent_ids:
4620
if parent_id not in invs_sent_so_far:
4621
# We don't know that the remote side has this basis, so
4624
if parent_id == _mod_revision.NULL_REVISION:
4625
parent_inv = null_inventory
4627
parent_inv = inventory_cache.get(parent_id, None)
4628
if parent_inv is None:
4629
parent_inv = from_repo.get_inventory(parent_id)
4630
candidate_delta = inv._make_delta(parent_inv)
4631
if (delta is None or
4632
len(delta) > len(candidate_delta)):
4633
delta = candidate_delta
4634
basis_id = parent_id
4636
# Either none of the parents ended up being suitable, or we
4637
# were asked to delta against NULL
4638
basis_id = _mod_revision.NULL_REVISION
4639
delta = inv._make_delta(null_inventory)
4640
invs_sent_so_far.add(inv.revision_id)
4641
inventory_cache[inv.revision_id] = inv
4642
delta_serialized = ''.join(
4643
serializer.delta_to_lines(basis_id, key[-1], delta))
4644
yield versionedfile.FulltextContentFactory(
4645
key, parent_keys, None, delta_serialized)
4648
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4649
stop_revision=None):
4650
"""Extend the partial history to include a given index
4652
If a stop_index is supplied, stop when that index has been reached.
4653
If a stop_revision is supplied, stop when that revision is
4654
encountered. Otherwise, stop when the beginning of history is
4657
:param stop_index: The index which should be present. When it is
4658
present, history extension will stop.
4659
:param stop_revision: The revision id which should be present. When
4660
it is encountered, history extension will stop.
4662
start_revision = partial_history_cache[-1]
4663
iterator = repo.iter_reverse_revision_history(start_revision)
4665
#skip the last revision in the list
4668
if (stop_index is not None and
4669
len(partial_history_cache) > stop_index):
4671
if partial_history_cache[-1] == stop_revision:
4673
revision_id = iterator.next()
4674
partial_history_cache.append(revision_id)
4675
except StopIteration:
859
return Repository(_format=self, a_bzrdir=a_bzrdir)
862
super(RepositoryFormat7, self).__init__()
863
self._matchingbzrdir = bzrdir.BzrDirMetaFormat1()
866
# formats which have no format string are not discoverable
867
# and not independently creatable, so are not registered.
868
__default_format = RepositoryFormat7()
869
RepositoryFormat.register_format(__default_format)
870
RepositoryFormat.set_default_format(__default_format)
871
_legacy_formats = [RepositoryFormat4(),
876
# TODO: jam 20060108 Create a new branch format, and as part of upgrade
877
# make sure that ancestry.weave is deleted (it is never used, but
878
# used to be created)
880
class RepositoryTestProviderAdapter(object):
881
"""A tool to generate a suite testing multiple repository formats at once.
883
This is done by copying the test once for each transport and injecting
884
the transport_server, transport_readonly_server, and bzrdir_format and
885
repository_format classes into each copy. Each copy is also given a new id()
886
to make it easy to identify.
889
def __init__(self, transport_server, transport_readonly_server, formats):
890
self._transport_server = transport_server
891
self._transport_readonly_server = transport_readonly_server
892
self._formats = formats
894
def adapt(self, test):
896
for repository_format, bzrdir_format in self._formats:
897
new_test = deepcopy(test)
898
new_test.transport_server = self._transport_server
899
new_test.transport_readonly_server = self._transport_readonly_server
900
new_test.bzrdir_format = bzrdir_format
901
new_test.repository_format = repository_format
902
def make_new_test_id():
903
new_id = "%s(%s)" % (new_test.id(), repository_format.__class__.__name__)
904
return lambda: new_id
905
new_test.id = make_new_test_id()
906
result.addTest(new_test)