29
29
WorkingTree.open(dir).
32
MERGE_MODIFIED_HEADER_1 = "BZR merge-modified list format 1"
33
CONFLICT_HEADER_1 = "BZR conflict list format 1"
35
# TODO: Give the workingtree sole responsibility for the working inventory;
36
# remove the variable and references to it from the branch. This may require
37
# updating the commit code so as to update the inventory within the working
38
# copy, and making sure there's only one WorkingTree for any directory on disk.
39
# At the moment they may alias the inventory and have old copies of it in
40
# memory. (Now done? -- mbp 20060309)
42
from binascii import hexlify
44
from copy import deepcopy
33
45
from cStringIO import StringIO
37
from bzrlib.lazy_import import lazy_import
38
lazy_import(globals(), """
39
from bisect import bisect_left
51
conflicts as _mod_conflicts,
61
revision as _mod_revision,
71
from bzrlib.workingtree_4 import (
78
from bzrlib import symbol_versioning
54
from bzrlib import bzrdir, errors, osutils, urlutils
55
from bzrlib.atomicfile import AtomicFile
56
from bzrlib.conflicts import Conflict, ConflictList, CONFLICT_SUFFIXES
79
57
from bzrlib.decorators import needs_read_lock, needs_write_lock
80
from bzrlib.lock import LogicalLockResult
81
from bzrlib.lockable_files import LockableFiles
58
from bzrlib.errors import (BzrCheckError,
61
WeaveRevisionNotPresent,
65
MergeModifiedFormatError,
68
from bzrlib.inventory import InventoryEntry, Inventory
69
from bzrlib.lockable_files import LockableFiles, TransportLock
82
70
from bzrlib.lockdir import LockDir
83
import bzrlib.mutabletree
84
from bzrlib.mutabletree import needs_tree_write_lock
85
from bzrlib import osutils
71
from bzrlib.merge import merge_inner, transform_tree
86
72
from bzrlib.osutils import (
96
from bzrlib.filters import filtered_input_file
89
from bzrlib.progress import DummyProgress, ProgressPhase
90
from bzrlib.revision import NULL_REVISION
91
from bzrlib.rio import RioReader, rio_file, Stanza
92
from bzrlib.symbol_versioning import (deprecated_passed,
99
from bzrlib.textui import show_status
101
from bzrlib.transform import build_tree
97
102
from bzrlib.trace import mutter, note
103
from bzrlib.transport import get_transport
98
104
from bzrlib.transport.local import LocalTransport
99
from bzrlib.revision import CURRENT_REVISION
100
from bzrlib.rio import RioReader, rio_file, Stanza
101
from bzrlib.symbol_versioning import (
103
DEPRECATED_PARAMETER,
107
MERGE_MODIFIED_HEADER_1 = "BZR merge-modified list format 1"
108
# TODO: Modifying the conflict objects or their type is currently nearly
109
# impossible as there is no clear relationship between the working tree format
110
# and the conflict list file format.
111
CONFLICT_HEADER_1 = "BZR conflict list format 1"
113
ERROR_PATH_NOT_FOUND = 3 # WindowsError errno code, equivalent to ENOENT
109
# the regex here does the following:
110
# 1) remove any weird characters; we don't escape them but rather
112
# 2) match leading '.'s to make it not hidden
113
_gen_file_id_re = re.compile(r'[^\w.]|(^\.*)')
114
_gen_id_suffix = None
118
def _next_id_suffix():
119
"""Create a new file id suffix that is reasonably unique.
121
On the first call we combine the current time with 64 bits of randomness
122
to give a highly probably globally unique number. Then each call in the same
123
process adds 1 to a serial number we append to that unique value.
125
# XXX TODO: change bzrlib.add.smart_add to call workingtree.add() rather
126
# than having to move the id randomness out of the inner loop like this.
127
# XXX TODO: for the global randomness this uses we should add the thread-id
128
# before the serial #.
129
global _gen_id_suffix, _gen_id_serial
130
if _gen_id_suffix is None:
131
_gen_id_suffix = "-%s-%s-" % (compact_date(time()), rand_chars(16))
133
return _gen_id_suffix + str(_gen_id_serial)
136
def gen_file_id(name):
137
"""Return new file id for the basename 'name'.
139
The uniqueness is supplied from _next_id_suffix.
141
# XXX TODO: squash the filename to lowercase.
142
# XXX TODO: truncate the filename to something like 20 or 30 chars.
143
# XXX TODO: consider what to do with ids that look like illegal filepaths
144
# on platforms we support.
145
return _gen_file_id_re.sub('', name) + _next_id_suffix()
149
"""Return a new tree-root file id."""
150
return gen_file_id('TREE_ROOT')
116
153
class TreeEntry(object):
117
154
"""An entry that implements the minimum interface used by commands.
119
This needs further inspection, it may be better to have
156
This needs further inspection, it may be better to have
120
157
InventoryEntries without ids - though that seems wrong. For now,
121
158
this is a parallel hierarchy to InventoryEntry, and needs to become
122
159
one of several things: decorates to that hierarchy, children of, or
446
376
inv = self._inventory
447
377
for path, ie in inv.iter_entries():
448
if osutils.lexists(self.abspath(path)):
378
if bzrlib.osutils.lexists(self.abspath(path)):
451
def all_file_ids(self):
452
"""See Tree.iter_all_file_ids"""
453
return set(self.inventory)
455
381
def __repr__(self):
456
382
return "<%s of %s>" % (self.__class__.__name__,
457
383
getattr(self, 'basedir', None))
459
385
def abspath(self, filename):
460
386
return pathjoin(self.basedir, filename)
462
388
def basis_tree(self):
463
"""Return RevisionTree for the current last revision.
465
If the left most parent is a ghost then the returned tree will be an
466
empty tree - one obtained by calling
467
repository.revision_tree(NULL_REVISION).
470
revision_id = self.get_parent_ids()[0]
472
# no parents, return an empty revision tree.
473
# in the future this should return the tree for
474
# 'empty:' - the implicit root empty tree.
475
return self.branch.repository.revision_tree(
476
_mod_revision.NULL_REVISION)
478
return self.revision_tree(revision_id)
479
except errors.NoSuchRevision:
481
# No cached copy available, retrieve from the repository.
482
# FIXME? RBC 20060403 should we cache the inventory locally
485
return self.branch.repository.revision_tree(revision_id)
486
except (errors.RevisionNotPresent, errors.NoSuchRevision):
487
# the basis tree *may* be a ghost or a low level error may have
488
# occurred. If the revision is present, its a problem, if its not
490
if self.branch.repository.has_revision(revision_id):
492
# the basis tree is a ghost so return an empty tree.
493
return self.branch.repository.revision_tree(
494
_mod_revision.NULL_REVISION)
497
self._flush_ignore_list_cache()
389
"""Return RevisionTree for the current last revision."""
390
revision_id = self.last_revision()
391
if revision_id is not None:
393
xml = self.read_basis_inventory()
394
inv = bzrlib.xml5.serializer_v5.read_inventory_from_string(xml)
397
if inv is not None and inv.revision_id == revision_id:
398
return bzrlib.tree.RevisionTree(self.branch.repository, inv,
400
# FIXME? RBC 20060403 should we cache the inventory here ?
401
return self.branch.repository.revision_tree(revision_id)
404
@deprecated_method(zero_eight)
405
def create(branch, directory):
406
"""Create a workingtree for branch at directory.
408
If existing_directory already exists it must have a .bzr directory.
409
If it does not exist, it will be created.
411
This returns a new WorkingTree object for the new checkout.
413
TODO FIXME RBC 20060124 when we have checkout formats in place this
414
should accept an optional revisionid to checkout [and reject this if
415
checking out into the same dir as a pre-checkout-aware branch format.]
417
XXX: When BzrDir is present, these should be created through that
420
warnings.warn('delete WorkingTree.create', stacklevel=3)
421
transport = get_transport(directory)
422
if branch.bzrdir.root_transport.base == transport.base:
424
return branch.bzrdir.create_workingtree()
425
# different directory,
426
# create a branch reference
427
# and now a working tree.
428
raise NotImplementedError
431
@deprecated_method(zero_eight)
432
def create_standalone(directory):
433
"""Create a checkout and a branch and a repo at directory.
435
Directory must exist and be empty.
437
please use BzrDir.create_standalone_workingtree
439
return bzrdir.BzrDir.create_standalone_workingtree(directory)
499
441
def relpath(self, path):
500
442
"""Return the local path portion from a given path.
502
The path may be absolute or relative. If its a relative path it is
444
The path may be absolute or relative. If its a relative path it is
503
445
interpreted relative to the python current working directory.
505
return osutils.relpath(self.basedir, path)
447
return relpath(self.basedir, path)
507
449
def has_filename(self, filename):
508
return osutils.lexists(self.abspath(filename))
510
def get_file(self, file_id, path=None, filtered=True):
511
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
513
def get_file_with_stat(self, file_id, path=None, filtered=True,
515
"""See Tree.get_file_with_stat."""
517
path = self.id2path(file_id)
518
file_obj = self.get_file_byname(path, filtered=False)
519
stat_value = _fstat(file_obj.fileno())
520
if filtered and self.supports_content_filtering():
521
filters = self._content_filter_stack(path)
522
file_obj = filtered_input_file(file_obj, filters)
523
return (file_obj, stat_value)
525
def get_file_text(self, file_id, path=None, filtered=True):
526
my_file = self.get_file(file_id, path=path, filtered=filtered)
528
return my_file.read()
532
def get_file_byname(self, filename, filtered=True):
533
path = self.abspath(filename)
535
if filtered and self.supports_content_filtering():
536
filters = self._content_filter_stack(filename)
537
return filtered_input_file(f, filters)
541
def get_file_lines(self, file_id, path=None, filtered=True):
542
"""See Tree.get_file_lines()"""
543
file = self.get_file(file_id, path, filtered=filtered)
545
return file.readlines()
550
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
551
"""See Tree.annotate_iter
553
This implementation will use the basis tree implementation if possible.
554
Lines not in the basis are attributed to CURRENT_REVISION
556
If there are pending merges, lines added by those merges will be
557
incorrectly attributed to CURRENT_REVISION (but after committing, the
558
attribution will be correct).
560
maybe_file_parent_keys = []
561
for parent_id in self.get_parent_ids():
563
parent_tree = self.revision_tree(parent_id)
564
except errors.NoSuchRevisionInTree:
565
parent_tree = self.branch.repository.revision_tree(parent_id)
566
parent_tree.lock_read()
568
if file_id not in parent_tree:
570
ie = parent_tree.inventory[file_id]
571
if ie.kind != 'file':
572
# Note: this is slightly unnecessary, because symlinks and
573
# directories have a "text" which is the empty text, and we
574
# know that won't mess up annotations. But it seems cleaner
576
parent_text_key = (file_id, ie.revision)
577
if parent_text_key not in maybe_file_parent_keys:
578
maybe_file_parent_keys.append(parent_text_key)
581
graph = _mod_graph.Graph(self.branch.repository.texts)
582
heads = graph.heads(maybe_file_parent_keys)
583
file_parent_keys = []
584
for key in maybe_file_parent_keys:
586
file_parent_keys.append(key)
588
# Now we have the parents of this content
589
annotator = self.branch.repository.texts.get_annotator()
590
text = self.get_file_text(file_id)
591
this_key =(file_id, default_revision)
592
annotator.add_special_text(this_key, file_parent_keys, text)
593
annotations = [(key[-1], line)
594
for key, line in annotator.annotate_flat(this_key)]
597
def _get_ancestors(self, default_revision):
598
ancestors = set([default_revision])
599
for parent_id in self.get_parent_ids():
600
ancestors.update(self.branch.repository.get_ancestry(
601
parent_id, topo_sorted=False))
450
return bzrlib.osutils.lexists(self.abspath(filename))
452
def get_file(self, file_id):
453
return self.get_file_byname(self.id2path(file_id))
455
def get_file_byname(self, filename):
456
return file(self.abspath(filename), 'rb')
604
458
def get_parent_ids(self):
605
459
"""See Tree.get_parent_ids.
607
461
This implementation reads the pending merges list and last_revision
608
462
value and uses that to decide what the parents list should be.
610
last_rev = _mod_revision.ensure_null(self._last_revision())
611
if _mod_revision.NULL_REVISION == last_rev:
464
last_rev = self.last_revision()
614
468
parents = [last_rev]
616
merges_bytes = self._transport.get_bytes('pending-merges')
617
except errors.NoSuchFile:
620
for l in osutils.split_lines(merges_bytes):
621
revision_id = l.rstrip('\n')
622
parents.append(revision_id)
469
other_parents = self.pending_merges()
470
return parents + other_parents
626
472
def get_root_id(self):
627
473
"""Return the id of this trees root"""
628
return self._inventory.root.file_id
474
inv = self.read_working_inventory()
475
return inv.root.file_id
630
477
def _get_store_filename(self, file_id):
631
478
## XXX: badly named; this is not in the store at all
632
479
return self.abspath(self.id2path(file_id))
635
def clone(self, to_bzrdir, revision_id=None):
482
def clone(self, to_bzrdir, revision_id=None, basis=None):
636
483
"""Duplicate this working tree into to_bzr, including all state.
638
485
Specifically modified files are kept as modified, but
639
486
ignored and unknown files are discarded.
641
488
If you want to make a new line of development, see bzrdir.sprout()
644
If not None, the cloned tree will have its last revision set to
645
revision, and difference between the source trees last revision
491
If not None, the cloned tree will have its last revision set to
492
revision, and and difference between the source trees last revision
646
493
and this one merged in.
496
If not None, a closer copy of a tree which may have some files in
497
common, and which file content should be preferentially copied from.
648
499
# assumes the target bzr dir format is compatible.
649
result = to_bzrdir.create_workingtree()
500
result = self._format.initialize(to_bzrdir)
650
501
self.copy_content_into(result, revision_id)
654
505
def copy_content_into(self, tree, revision_id=None):
655
506
"""Copy the current content and user files of this tree into tree."""
656
tree.set_root_id(self.get_root_id())
657
507
if revision_id is None:
658
merge.transform_tree(tree, self)
508
transform_tree(tree, self)
660
# TODO now merge from tree.last_revision to revision (to preserve
661
# user local changes)
662
merge.transform_tree(tree, self)
663
tree.set_parent_ids([revision_id])
510
# TODO now merge from tree.last_revision to revision
511
transform_tree(tree, self)
512
tree.set_last_revision(revision_id)
515
def commit(self, message=None, revprops=None, *args, **kwargs):
516
# avoid circular imports
517
from bzrlib.commit import Commit
520
if not 'branch-nick' in revprops:
521
revprops['branch-nick'] = self.branch.nick
522
# args for wt.commit start at message from the Commit.commit method,
523
# but with branch a kwarg now, passing in args as is results in the
524
#message being used for the branch
525
args = (DEPRECATED_PARAMETER, message, ) + args
526
committed_id = Commit().commit( working_tree=self, revprops=revprops,
528
self._set_inventory(self.read_working_inventory())
665
531
def id2abspath(self, file_id):
666
532
return self.abspath(self.id2path(file_id))
668
534
def has_id(self, file_id):
669
535
# files that have been deleted are excluded
536
inv = self._inventory
671
537
if not inv.has_id(file_id):
673
539
path = inv.id2path(file_id)
674
return osutils.lexists(self.abspath(path))
540
return bzrlib.osutils.lexists(self.abspath(path))
676
542
def has_or_had_id(self, file_id):
677
543
if file_id == self.inventory.root.file_id:
681
547
__contains__ = has_id
683
549
def get_file_size(self, file_id):
684
"""See Tree.get_file_size"""
685
# XXX: this returns the on-disk size; it should probably return the
688
return os.path.getsize(self.id2abspath(file_id))
690
if e.errno != errno.ENOENT:
550
return os.path.getsize(self.id2abspath(file_id))
696
def get_file_sha1(self, file_id, path=None, stat_value=None):
553
def get_file_sha1(self, file_id, path=None):
698
555
path = self._inventory.id2path(file_id)
699
return self._hashcache.get_sha1(path, stat_value)
556
return self._hashcache.get_sha1(path)
701
558
def get_file_mtime(self, file_id, path=None):
703
path = self.inventory.id2path(file_id)
560
path = self._inventory.id2path(file_id)
704
561
return os.lstat(self.abspath(path)).st_mtime
706
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
707
file_id = self.path2id(path)
709
# For unversioned files on win32, we just assume they are not
712
return self._inventory[file_id].executable
714
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
715
mode = stat_result.st_mode
716
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
718
563
if not supports_executable():
719
564
def is_executable(self, file_id, path=None):
720
565
return self._inventory[file_id].executable
722
_is_executable_from_path_and_stat = \
723
_is_executable_from_path_and_stat_from_basis
725
567
def is_executable(self, file_id, path=None):
727
path = self.id2path(file_id)
569
path = self._inventory.id2path(file_id)
728
570
mode = os.lstat(self.abspath(path)).st_mode
729
571
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
731
_is_executable_from_path_and_stat = \
732
_is_executable_from_path_and_stat_from_stat
734
@needs_tree_write_lock
735
def _add(self, files, ids, kinds):
736
"""See MutableTree._add."""
574
def add(self, files, ids=None):
575
"""Make files versioned.
577
Note that the command line normally calls smart_add instead,
578
which can automatically recurse.
580
This adds the files to the inventory, so that they will be
581
recorded by the next commit.
584
List of paths to add, relative to the base of the tree.
587
If set, use these instead of automatically generated ids.
588
Must be the same length as the list of files, but may
589
contain None for ids that are to be autogenerated.
591
TODO: Perhaps have an option to add the ids even if the files do
594
TODO: Perhaps callback with the ids and paths as they're added.
737
596
# TODO: Re-adding a file that is removed in the working copy
738
597
# should probably put it back with the previous ID.
739
# the read and write working inventory should not occur in this
740
# function - they should be part of lock_write and unlock.
742
for f, file_id, kind in zip(files, ids, kinds):
598
if isinstance(files, basestring):
599
assert(ids is None or isinstance(ids, basestring))
605
ids = [None] * len(files)
607
assert(len(ids) == len(files))
609
inv = self.read_working_inventory()
610
for f,file_id in zip(files, ids):
611
if self.is_control_filename(f):
612
raise errors.ForbiddenControlFileError(filename=f)
617
raise BzrError("cannot add top-level %r" % f)
619
fullpath = normpath(self.abspath(f))
621
kind = file_kind(fullpath)
623
if e.errno == errno.ENOENT:
624
raise NoSuchFile(fullpath)
625
if not InventoryEntry.versionable_kind(kind):
626
raise errors.BadFileKindError(filename=f, kind=kind)
743
627
if file_id is None:
744
628
inv.add_path(f, kind=kind)
746
630
inv.add_path(f, kind=kind, file_id=file_id)
747
self._inventory_is_modified = True
749
@needs_tree_write_lock
750
def _gather_kinds(self, files, kinds):
751
"""See MutableTree._gather_kinds."""
752
for pos, f in enumerate(files):
753
if kinds[pos] is None:
754
fullpath = normpath(self.abspath(f))
756
kinds[pos] = file_kind(fullpath)
758
if e.errno == errno.ENOENT:
759
raise errors.NoSuchFile(fullpath)
632
self._write_inventory(inv)
761
634
@needs_write_lock
762
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
763
"""Add revision_id as a parent.
765
This is equivalent to retrieving the current list of parent ids
766
and setting the list to its value plus revision_id.
768
:param revision_id: The revision id to add to the parent list. It may
769
be a ghost revision as long as its not the first parent to be added,
770
or the allow_leftmost_as_ghost parameter is set True.
771
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
773
parents = self.get_parent_ids() + [revision_id]
774
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
775
or allow_leftmost_as_ghost)
777
@needs_tree_write_lock
778
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
779
"""Add revision_id, tree tuple as a parent.
781
This is equivalent to retrieving the current list of parent trees
782
and setting the list to its value plus parent_tuple. See also
783
add_parent_tree_id - if you only have a parent id available it will be
784
simpler to use that api. If you have the parent already available, using
785
this api is preferred.
787
:param parent_tuple: The (revision id, tree) to add to the parent list.
788
If the revision_id is a ghost, pass None for the tree.
789
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
791
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
792
if len(parent_ids) > 1:
793
# the leftmost may have already been a ghost, preserve that if it
795
allow_leftmost_as_ghost = True
796
self.set_parent_ids(parent_ids,
797
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
799
@needs_tree_write_lock
800
635
def add_pending_merge(self, *revision_ids):
801
636
# TODO: Perhaps should check at this point that the
802
637
# history of the revision is actually present?
803
parents = self.get_parent_ids()
638
p = self.pending_merges()
805
640
for rev_id in revision_ids:
806
if rev_id in parents:
808
parents.append(rev_id)
811
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
813
def path_content_summary(self, path, _lstat=os.lstat,
814
_mapper=osutils.file_kind_from_stat_mode):
815
"""See Tree.path_content_summary."""
816
abspath = self.abspath(path)
646
self.set_pending_merges(p)
649
def pending_merges(self):
650
"""Return a list of pending merges.
652
These are revisions that have been merged into the working
653
directory but not yet committed.
818
stat_result = _lstat(abspath)
820
if getattr(e, 'errno', None) == errno.ENOENT:
822
return ('missing', None, None, None)
823
# propagate other errors
825
kind = _mapper(stat_result.st_mode)
827
return self._file_content_summary(path, stat_result)
828
elif kind == 'directory':
829
# perhaps it looks like a plain directory, but it's really a
831
if self._directory_is_tree_reference(path):
832
kind = 'tree-reference'
833
return kind, None, None, None
834
elif kind == 'symlink':
835
target = osutils.readlink(abspath)
836
return ('symlink', None, None, target)
838
return (kind, None, None, None)
840
def _file_content_summary(self, path, stat_result):
841
size = stat_result.st_size
842
executable = self._is_executable_from_path_and_stat(path, stat_result)
843
# try for a stat cache lookup
844
return ('file', size, executable, self._sha_from_stat(
847
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
848
"""Common ghost checking functionality from set_parent_*.
850
This checks that the left hand-parent exists if there are any
853
if len(revision_ids) > 0:
854
leftmost_id = revision_ids[0]
855
if (not allow_leftmost_as_ghost and not
856
self.branch.repository.has_revision(leftmost_id)):
857
raise errors.GhostRevisionUnusableHere(leftmost_id)
859
def _set_merges_from_parent_ids(self, parent_ids):
860
merges = parent_ids[1:]
861
self._transport.put_bytes('pending-merges', '\n'.join(merges),
862
mode=self.bzrdir._get_file_mode())
864
def _filter_parent_ids_by_ancestry(self, revision_ids):
865
"""Check that all merged revisions are proper 'heads'.
867
This will always return the first revision_id, and any merged revisions
870
if len(revision_ids) == 0:
872
graph = self.branch.repository.get_graph()
873
heads = graph.heads(revision_ids)
874
new_revision_ids = revision_ids[:1]
875
for revision_id in revision_ids[1:]:
876
if revision_id in heads and revision_id not in new_revision_ids:
877
new_revision_ids.append(revision_id)
878
if new_revision_ids != revision_ids:
879
trace.mutter('requested to set revision_ids = %s,'
880
' but filtered to %s', revision_ids, new_revision_ids)
881
return new_revision_ids
883
@needs_tree_write_lock
884
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
885
"""Set the parent ids to revision_ids.
887
See also set_parent_trees. This api will try to retrieve the tree data
888
for each element of revision_ids from the trees repository. If you have
889
tree data already available, it is more efficient to use
890
set_parent_trees rather than set_parent_ids. set_parent_ids is however
891
an easier API to use.
893
:param revision_ids: The revision_ids to set as the parent ids of this
894
working tree. Any of these may be ghosts.
896
self._check_parents_for_ghosts(revision_ids,
897
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
898
for revision_id in revision_ids:
899
_mod_revision.check_not_reserved_id(revision_id)
901
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
903
if len(revision_ids) > 0:
904
self.set_last_revision(revision_ids[0])
906
self.set_last_revision(_mod_revision.NULL_REVISION)
908
self._set_merges_from_parent_ids(revision_ids)
910
@needs_tree_write_lock
911
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
912
"""See MutableTree.set_parent_trees."""
913
parent_ids = [rev for (rev, tree) in parents_list]
914
for revision_id in parent_ids:
915
_mod_revision.check_not_reserved_id(revision_id)
917
self._check_parents_for_ghosts(parent_ids,
918
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
920
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
922
if len(parent_ids) == 0:
923
leftmost_parent_id = _mod_revision.NULL_REVISION
924
leftmost_parent_tree = None
926
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
928
if self._change_last_revision(leftmost_parent_id):
929
if leftmost_parent_tree is None:
930
# If we don't have a tree, fall back to reading the
931
# parent tree from the repository.
932
self._cache_basis_inventory(leftmost_parent_id)
934
inv = leftmost_parent_tree.inventory
935
xml = self._create_basis_xml_from_inventory(
936
leftmost_parent_id, inv)
937
self._write_basis_inventory(xml)
938
self._set_merges_from_parent_ids(parent_ids)
940
@needs_tree_write_lock
656
merges_file = self._control_files.get_utf8('pending-merges')
660
for l in merges_file.readlines():
661
p.append(l.rstrip('\n'))
941
665
def set_pending_merges(self, rev_list):
942
parents = self.get_parent_ids()
943
leftmost = parents[:1]
944
new_parents = leftmost + rev_list
945
self.set_parent_ids(new_parents)
666
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
947
@needs_tree_write_lock
948
669
def set_merge_modified(self, modified_hashes):
949
670
def iter_stanzas():
950
671
for file_id, hash in modified_hashes.iteritems():
951
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
672
yield Stanza(file_id=file_id, hash=hash)
952
673
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
954
def _sha_from_stat(self, path, stat_result):
955
"""Get a sha digest from the tree's stat cache.
957
The default implementation assumes no stat cache is present.
959
:param path: The path.
960
:param stat_result: The stat result being looked up.
964
676
def _put_rio(self, filename, stanzas, header):
965
self._must_be_locked()
966
677
my_file = rio_file(stanzas, header)
967
self._transport.put_file(filename, my_file,
968
mode=self.bzrdir._get_file_mode())
970
@needs_write_lock # because merge pulls data into the branch.
971
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
972
merge_type=None, force=False):
973
"""Merge from a branch into this working tree.
975
:param branch: The branch to merge from.
976
:param to_revision: If non-None, the merge will merge to to_revision,
977
but not beyond it. to_revision does not need to be in the history
978
of the branch when it is supplied. If None, to_revision defaults to
979
branch.last_revision().
981
from bzrlib.merge import Merger, Merge3Merger
982
merger = Merger(self.branch, this_tree=self)
983
# check that there are no local alterations
984
if not force and self.has_changes():
985
raise errors.UncommittedChanges(self)
986
if to_revision is None:
987
to_revision = _mod_revision.ensure_null(branch.last_revision())
988
merger.other_rev_id = to_revision
989
if _mod_revision.is_null(merger.other_rev_id):
990
raise errors.NoCommits(branch)
991
self.branch.fetch(branch, last_revision=merger.other_rev_id)
992
merger.other_basis = merger.other_rev_id
993
merger.other_tree = self.branch.repository.revision_tree(
995
merger.other_branch = branch
996
if from_revision is None:
999
merger.set_base_revision(from_revision, branch)
1000
if merger.base_rev_id == merger.other_rev_id:
1001
raise errors.PointlessMerge
1002
merger.backup_files = False
1003
if merge_type is None:
1004
merger.merge_type = Merge3Merger
1006
merger.merge_type = merge_type
1007
merger.set_interesting_files(None)
1008
merger.show_base = False
1009
merger.reprocess = False
1010
conflicts = merger.do_merge()
1011
merger.set_pending()
678
self._control_files.put(filename, my_file)
1014
680
@needs_read_lock
1015
681
def merge_modified(self):
1016
"""Return a dictionary of files modified by a merge.
1018
The list is initialized by WorkingTree.set_merge_modified, which is
1019
typically called after we make some automatic updates to the tree
1022
This returns a map of file_id->sha1, containing only files which are
1023
still in the working inventory and have that text hash.
1026
hashfile = self._transport.get('merge-hashes')
1027
except errors.NoSuchFile:
683
hashfile = self._control_files.get('merge-hashes')
1032
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
1033
raise errors.MergeModifiedFormatError()
1034
except StopIteration:
1035
raise errors.MergeModifiedFormatError()
1036
for s in RioReader(hashfile):
1037
# RioReader reads in Unicode, so convert file_ids back to utf8
1038
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
1039
if file_id not in self.inventory:
1041
text_hash = s.get("hash")
1042
if text_hash == self.get_file_sha1(file_id):
1043
merge_hashes[file_id] = text_hash
1049
def mkdir(self, path, file_id=None):
1050
"""See MutableTree.mkdir()."""
1052
file_id = generate_ids.gen_file_id(os.path.basename(path))
1053
os.mkdir(self.abspath(path))
1054
self.add(path, file_id, 'directory')
688
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
689
raise MergeModifiedFormatError()
690
except StopIteration:
691
raise MergeModifiedFormatError()
692
for s in RioReader(hashfile):
693
file_id = s.get("file_id")
694
if file_id not in self.inventory:
697
if hash == self.get_file_sha1(file_id):
698
merge_hashes[file_id] = hash
1057
701
def get_symlink_target(self, file_id):
1058
abspath = self.id2abspath(file_id)
1059
target = osutils.readlink(abspath)
1063
def subsume(self, other_tree):
1064
def add_children(inventory, entry):
1065
for child_entry in entry.children.values():
1066
inventory._byid[child_entry.file_id] = child_entry
1067
if child_entry.kind == 'directory':
1068
add_children(inventory, child_entry)
1069
if other_tree.get_root_id() == self.get_root_id():
1070
raise errors.BadSubsumeSource(self, other_tree,
1071
'Trees have the same root')
1073
other_tree_path = self.relpath(other_tree.basedir)
1074
except errors.PathNotChild:
1075
raise errors.BadSubsumeSource(self, other_tree,
1076
'Tree is not contained by the other')
1077
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1078
if new_root_parent is None:
1079
raise errors.BadSubsumeSource(self, other_tree,
1080
'Parent directory is not versioned.')
1081
# We need to ensure that the result of a fetch will have a
1082
# versionedfile for the other_tree root, and only fetching into
1083
# RepositoryKnit2 guarantees that.
1084
if not self.branch.repository.supports_rich_root():
1085
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1086
other_tree.lock_tree_write()
1088
new_parents = other_tree.get_parent_ids()
1089
other_root = other_tree.inventory.root
1090
other_root.parent_id = new_root_parent
1091
other_root.name = osutils.basename(other_tree_path)
1092
self.inventory.add(other_root)
1093
add_children(self.inventory, other_root)
1094
self._write_inventory(self.inventory)
1095
# normally we don't want to fetch whole repositories, but i think
1096
# here we really do want to consolidate the whole thing.
1097
for parent_id in other_tree.get_parent_ids():
1098
self.branch.fetch(other_tree.branch, parent_id)
1099
self.add_parent_tree_id(parent_id)
1102
other_tree.bzrdir.retire_bzrdir()
1104
def _setup_directory_is_tree_reference(self):
1105
if self._branch.repository._format.supports_tree_reference:
1106
self._directory_is_tree_reference = \
1107
self._directory_may_be_tree_reference
1109
self._directory_is_tree_reference = \
1110
self._directory_is_never_tree_reference
1112
def _directory_is_never_tree_reference(self, relpath):
1115
def _directory_may_be_tree_reference(self, relpath):
1116
# as a special case, if a directory contains control files then
1117
# it's a tree reference, except that the root of the tree is not
1118
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1119
# TODO: We could ask all the control formats whether they
1120
# recognize this directory, but at the moment there's no cheap api
1121
# to do that. Since we probably can only nest bzr checkouts and
1122
# they always use this name it's ok for now. -- mbp 20060306
1124
# FIXME: There is an unhandled case here of a subdirectory
1125
# containing .bzr but not a branch; that will probably blow up
1126
# when you try to commit it. It might happen if there is a
1127
# checkout in a subdirectory. This can be avoided by not adding
1130
@needs_tree_write_lock
1131
def extract(self, file_id, format=None):
1132
"""Extract a subtree from this tree.
1134
A new branch will be created, relative to the path for this tree.
1138
segments = osutils.splitpath(path)
1139
transport = self.branch.bzrdir.root_transport
1140
for name in segments:
1141
transport = transport.clone(name)
1142
transport.ensure_base()
1145
sub_path = self.id2path(file_id)
1146
branch_transport = mkdirs(sub_path)
1148
format = self.bzrdir.cloning_metadir()
1149
branch_transport.ensure_base()
1150
branch_bzrdir = format.initialize_on_transport(branch_transport)
1152
repo = branch_bzrdir.find_repository()
1153
except errors.NoRepositoryPresent:
1154
repo = branch_bzrdir.create_repository()
1155
if not repo.supports_rich_root():
1156
raise errors.RootNotRich()
1157
new_branch = branch_bzrdir.create_branch()
1158
new_branch.pull(self.branch)
1159
for parent_id in self.get_parent_ids():
1160
new_branch.fetch(self.branch, parent_id)
1161
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1162
if tree_transport.base != branch_transport.base:
1163
tree_bzrdir = format.initialize_on_transport(tree_transport)
1164
branch.BranchReferenceFormat().initialize(tree_bzrdir,
1165
target_branch=new_branch)
1167
tree_bzrdir = branch_bzrdir
1168
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1169
wt.set_parent_ids(self.get_parent_ids())
1170
my_inv = self.inventory
1171
child_inv = inventory.Inventory(root_id=None)
1172
new_root = my_inv[file_id]
1173
my_inv.remove_recursive_id(file_id)
1174
new_root.parent_id = None
1175
child_inv.add(new_root)
1176
self._write_inventory(my_inv)
1177
wt._write_inventory(child_inv)
1180
def _serialize(self, inventory, out_file):
1181
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1184
def _deserialize(selt, in_file):
1185
return xml5.serializer_v5.read_inventory(in_file)
1188
"""Write the in memory inventory to disk."""
1189
# TODO: Maybe this should only write on dirty ?
1190
if self._control_files._lock_mode != 'w':
1191
raise errors.NotWriteLocked(self)
1193
self._serialize(self._inventory, sio)
1195
self._transport.put_file('inventory', sio,
1196
mode=self.bzrdir._get_file_mode())
1197
self._inventory_is_modified = False
1199
def _kind(self, relpath):
1200
return osutils.file_kind(self.abspath(relpath))
1202
def list_files(self, include_root=False, from_dir=None, recursive=True):
1203
"""List all files as (path, class, kind, id, entry).
702
return os.readlink(self.id2abspath(file_id))
704
def file_class(self, filename):
705
if self.path2id(filename):
707
elif self.is_ignored(filename):
712
def list_files(self):
713
"""Recursively list all files as (path, class, kind, id, entry).
1205
715
Lists, but does not descend into unversioned directories.
1206
717
This does not include files that have been deleted in this
1207
tree. Skips the control directory.
1209
:param include_root: if True, return an entry for the root
1210
:param from_dir: start from this directory or None for the root
1211
:param recursive: whether to recurse into subdirectories or not
720
Skips the control directory.
1213
# list_files is an iterator, so @needs_read_lock doesn't work properly
1214
# with it. So callers should be careful to always read_lock the tree.
1215
if not self.is_locked():
1216
raise errors.ObjectNotLocked(self)
1218
inv = self.inventory
1219
if from_dir is None and include_root is True:
1220
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
722
inv = self._inventory
1221
723
# Convert these into local objects to save lookup times
1222
pathjoin = osutils.pathjoin
1223
file_kind = self._kind
724
pathjoin = bzrlib.osutils.pathjoin
725
file_kind = bzrlib.osutils.file_kind
1225
727
# transport.base ends in a slash, we want the piece
1226
728
# between the last two slashes
1309
803
except KeyError:
1310
804
yield fp[1:], c, fk, None, TreeEntry()
1313
807
if fk != 'directory':
1316
# But do this child first if recursing down
1318
new_children = os.listdir(fap)
1320
new_children = collections.deque(new_children)
1321
stack.append((f_ie.file_id, fp, fap, new_children))
1322
# Break out of inner loop,
1323
# so that we start outer loop with child
810
# But do this child first
811
new_children = os.listdir(fap)
813
new_children = collections.deque(new_children)
814
stack.append((f_ie.file_id, fp, fap, new_children))
815
# Break out of inner loop, so that we start outer loop with child
1326
818
# if we finished all children, pop it off the stack
1329
@needs_tree_write_lock
1330
def move(self, from_paths, to_dir=None, after=False):
823
def move(self, from_paths, to_name):
1331
824
"""Rename files.
1333
to_dir must exist in the inventory.
1335
If to_dir exists and is a directory, the files are moved into
1336
it, keeping their old names.
1338
Note that to_dir is only the last component of the new name;
826
to_name must exist in the inventory.
828
If to_name exists and is a directory, the files are moved into
829
it, keeping their old names.
831
Note that to_name is only the last component of the new name;
1339
832
this doesn't change the directory.
1341
For each entry in from_paths the move mode will be determined
1344
The first mode moves the file in the filesystem and updates the
1345
inventory. The second mode only updates the inventory without
1346
touching the file on the filesystem. This is the new mode introduced
1349
move uses the second mode if 'after == True' and the target is not
1350
versioned but present in the working tree.
1352
move uses the second mode if 'after == False' and the source is
1353
versioned but no longer in the working tree, and the target is not
1354
versioned but present in the working tree.
1356
move uses the first mode if 'after == False' and the source is
1357
versioned and present in the working tree, and the target is not
1358
versioned and not present in the working tree.
1360
Everything else results in an error.
1362
834
This returns a list of (from_path, to_path) pairs for each
1363
835
entry that is moved.
1368
# check for deprecated use of signature
1370
raise TypeError('You must supply a target directory')
1371
# check destination directory
1372
if isinstance(from_paths, basestring):
838
## TODO: Option to move IDs only
839
assert not isinstance(from_paths, basestring)
1374
840
inv = self.inventory
1375
to_abs = self.abspath(to_dir)
841
to_abs = self.abspath(to_name)
1376
842
if not isdir(to_abs):
1377
raise errors.BzrMoveFailedError('',to_dir,
1378
errors.NotADirectory(to_abs))
1379
if not self.has_filename(to_dir):
1380
raise errors.BzrMoveFailedError('',to_dir,
1381
errors.NotInWorkingDirectory(to_dir))
1382
to_dir_id = inv.path2id(to_dir)
1383
if to_dir_id is None:
1384
raise errors.BzrMoveFailedError('',to_dir,
1385
errors.NotVersionedError(path=str(to_dir)))
843
raise BzrError("destination %r is not a directory" % to_abs)
844
if not self.has_filename(to_name):
845
raise BzrError("destination %r not in working directory" % to_abs)
846
to_dir_id = inv.path2id(to_name)
847
if to_dir_id == None and to_name != '':
848
raise BzrError("destination %r is not a versioned directory" % to_name)
1387
849
to_dir_ie = inv[to_dir_id]
1388
if to_dir_ie.kind != 'directory':
1389
raise errors.BzrMoveFailedError('',to_dir,
1390
errors.NotADirectory(to_abs))
1392
# create rename entries and tuples
1393
for from_rel in from_paths:
1394
from_tail = splitpath(from_rel)[-1]
1395
from_id = inv.path2id(from_rel)
1397
raise errors.BzrMoveFailedError(from_rel,to_dir,
1398
errors.NotVersionedError(path=str(from_rel)))
1400
from_entry = inv[from_id]
1401
from_parent_id = from_entry.parent_id
1402
to_rel = pathjoin(to_dir, from_tail)
1403
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1405
from_tail=from_tail,
1406
from_parent_id=from_parent_id,
1407
to_rel=to_rel, to_tail=from_tail,
1408
to_parent_id=to_dir_id)
1409
rename_entries.append(rename_entry)
1410
rename_tuples.append((from_rel, to_rel))
1412
# determine which move mode to use. checks also for movability
1413
rename_entries = self._determine_mv_mode(rename_entries, after)
1415
original_modified = self._inventory_is_modified
850
if to_dir_ie.kind not in ('directory', 'root_directory'):
851
raise BzrError("destination %r is not a directory" % to_abs)
853
to_idpath = inv.get_idpath(to_dir_id)
856
if not self.has_filename(f):
857
raise BzrError("%r does not exist in working tree" % f)
858
f_id = inv.path2id(f)
860
raise BzrError("%r is not versioned" % f)
861
name_tail = splitpath(f)[-1]
862
dest_path = pathjoin(to_name, name_tail)
863
if self.has_filename(dest_path):
864
raise BzrError("destination %r already exists" % dest_path)
865
if f_id in to_idpath:
866
raise BzrError("can't move %r to a subdirectory of itself" % f)
868
# OK, so there's a race here, it's possible that someone will
869
# create a file in this interval and then the rename might be
870
# left half-done. But we should have caught most problems.
871
orig_inv = deepcopy(self.inventory)
1418
self._inventory_is_modified = True
1419
self._move(rename_entries)
874
name_tail = splitpath(f)[-1]
875
dest_path = pathjoin(to_name, name_tail)
876
result.append((f, dest_path))
877
inv.rename(inv.path2id(f), to_dir_id, name_tail)
879
rename(self.abspath(f), self.abspath(dest_path))
881
raise BzrError("failed to rename %r to %r: %s" %
882
(f, dest_path, e[1]),
883
["rename rolled back"])
1421
885
# restore the inventory on error
1422
self._inventory_is_modified = original_modified
886
self._set_inventory(orig_inv)
1424
888
self._write_inventory(inv)
1425
return rename_tuples
1427
def _determine_mv_mode(self, rename_entries, after=False):
1428
"""Determines for each from-to pair if both inventory and working tree
1429
or only the inventory has to be changed.
1431
Also does basic plausability tests.
1433
inv = self.inventory
1435
for rename_entry in rename_entries:
1436
# store to local variables for easier reference
1437
from_rel = rename_entry.from_rel
1438
from_id = rename_entry.from_id
1439
to_rel = rename_entry.to_rel
1440
to_id = inv.path2id(to_rel)
1441
only_change_inv = False
1443
# check the inventory for source and destination
1445
raise errors.BzrMoveFailedError(from_rel,to_rel,
1446
errors.NotVersionedError(path=str(from_rel)))
1447
if to_id is not None:
1448
raise errors.BzrMoveFailedError(from_rel,to_rel,
1449
errors.AlreadyVersionedError(path=str(to_rel)))
1451
# try to determine the mode for rename (only change inv or change
1452
# inv and file system)
1454
if not self.has_filename(to_rel):
1455
raise errors.BzrMoveFailedError(from_id,to_rel,
1456
errors.NoSuchFile(path=str(to_rel),
1457
extra="New file has not been created yet"))
1458
only_change_inv = True
1459
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1460
only_change_inv = True
1461
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1462
only_change_inv = False
1463
elif (not self.case_sensitive
1464
and from_rel.lower() == to_rel.lower()
1465
and self.has_filename(from_rel)):
1466
only_change_inv = False
1468
# something is wrong, so lets determine what exactly
1469
if not self.has_filename(from_rel) and \
1470
not self.has_filename(to_rel):
1471
raise errors.BzrRenameFailedError(from_rel,to_rel,
1472
errors.PathsDoNotExist(paths=(str(from_rel),
1475
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1476
rename_entry.only_change_inv = only_change_inv
1477
return rename_entries
1479
def _move(self, rename_entries):
1480
"""Moves a list of files.
1482
Depending on the value of the flag 'only_change_inv', the
1483
file will be moved on the file system or not.
1485
inv = self.inventory
1488
for entry in rename_entries:
1490
self._move_entry(entry)
1492
self._rollback_move(moved)
1496
def _rollback_move(self, moved):
1497
"""Try to rollback a previous move in case of an filesystem error."""
1498
inv = self.inventory
1501
self._move_entry(WorkingTree._RenameEntry(
1502
entry.to_rel, entry.from_id,
1503
entry.to_tail, entry.to_parent_id, entry.from_rel,
1504
entry.from_tail, entry.from_parent_id,
1505
entry.only_change_inv))
1506
except errors.BzrMoveFailedError, e:
1507
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1508
" The working tree is in an inconsistent state."
1509
" Please consider doing a 'bzr revert'."
1510
" Error message is: %s" % e)
1512
def _move_entry(self, entry):
1513
inv = self.inventory
1514
from_rel_abs = self.abspath(entry.from_rel)
1515
to_rel_abs = self.abspath(entry.to_rel)
1516
if from_rel_abs == to_rel_abs:
1517
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1518
"Source and target are identical.")
1520
if not entry.only_change_inv:
1522
osutils.rename(from_rel_abs, to_rel_abs)
1524
raise errors.BzrMoveFailedError(entry.from_rel,
1526
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1528
@needs_tree_write_lock
1529
def rename_one(self, from_rel, to_rel, after=False):
892
def rename_one(self, from_rel, to_rel):
1530
893
"""Rename one file.
1532
895
This can change the directory or the filename or both.
1534
rename_one has several 'modes' to work. First, it can rename a physical
1535
file and change the file_id. That is the normal mode. Second, it can
1536
only change the file_id without touching any physical file. This is
1537
the new mode introduced in version 0.15.
1539
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1540
versioned but present in the working tree.
1542
rename_one uses the second mode if 'after == False' and 'from_rel' is
1543
versioned but no longer in the working tree, and 'to_rel' is not
1544
versioned but present in the working tree.
1546
rename_one uses the first mode if 'after == False' and 'from_rel' is
1547
versioned and present in the working tree, and 'to_rel' is not
1548
versioned and not present in the working tree.
1550
Everything else results in an error.
1552
897
inv = self.inventory
1555
# create rename entries and tuples
1556
from_tail = splitpath(from_rel)[-1]
1557
from_id = inv.path2id(from_rel)
1559
# if file is missing in the inventory maybe it's in the basis_tree
1560
basis_tree = self.branch.basis_tree()
1561
from_id = basis_tree.path2id(from_rel)
1563
raise errors.BzrRenameFailedError(from_rel,to_rel,
1564
errors.NotVersionedError(path=str(from_rel)))
1565
# put entry back in the inventory so we can rename it
1566
from_entry = basis_tree.inventory[from_id].copy()
1569
from_entry = inv[from_id]
1570
from_parent_id = from_entry.parent_id
898
if not self.has_filename(from_rel):
899
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
900
if self.has_filename(to_rel):
901
raise BzrError("can't rename: new working file %r already exists" % to_rel)
903
file_id = inv.path2id(from_rel)
905
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
908
from_parent = entry.parent_id
909
from_name = entry.name
911
if inv.path2id(to_rel):
912
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1571
914
to_dir, to_tail = os.path.split(to_rel)
1572
915
to_dir_id = inv.path2id(to_dir)
1573
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1575
from_tail=from_tail,
1576
from_parent_id=from_parent_id,
1577
to_rel=to_rel, to_tail=to_tail,
1578
to_parent_id=to_dir_id)
1579
rename_entries.append(rename_entry)
1581
# determine which move mode to use. checks also for movability
1582
rename_entries = self._determine_mv_mode(rename_entries, after)
1584
# check if the target changed directory and if the target directory is
1586
if to_dir_id is None:
1587
raise errors.BzrMoveFailedError(from_rel,to_rel,
1588
errors.NotVersionedError(path=str(to_dir)))
1590
# all checks done. now we can continue with our actual work
1591
mutter('rename_one:\n'
1596
' to_dir_id {%s}\n',
1597
from_id, from_rel, to_rel, to_dir, to_dir_id)
1599
self._move(rename_entries)
916
if to_dir_id == None and to_dir != '':
917
raise BzrError("can't determine destination directory id for %r" % to_dir)
919
mutter("rename_one:")
920
mutter(" file_id {%s}" % file_id)
921
mutter(" from_rel %r" % from_rel)
922
mutter(" to_rel %r" % to_rel)
923
mutter(" to_dir %r" % to_dir)
924
mutter(" to_dir_id {%s}" % to_dir_id)
926
inv.rename(file_id, to_dir_id, to_tail)
928
from_abs = self.abspath(from_rel)
929
to_abs = self.abspath(to_rel)
931
rename(from_abs, to_abs)
933
inv.rename(file_id, from_parent, from_name)
934
raise BzrError("failed to rename %r to %r: %s"
935
% (from_abs, to_abs, e[1]),
936
["rename rolled back"])
1600
937
self._write_inventory(inv)
1602
class _RenameEntry(object):
1603
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1604
to_rel, to_tail, to_parent_id, only_change_inv=False):
1605
self.from_rel = from_rel
1606
self.from_id = from_id
1607
self.from_tail = from_tail
1608
self.from_parent_id = from_parent_id
1609
self.to_rel = to_rel
1610
self.to_tail = to_tail
1611
self.to_parent_id = to_parent_id
1612
self.only_change_inv = only_change_inv
1614
939
@needs_read_lock
1615
940
def unknowns(self):
1616
941
"""Return all unknown files.
1775
1099
Cached in the Tree object after the first call.
1777
ignoreset = getattr(self, '_ignoreset', None)
1778
if ignoreset is not None:
1101
if hasattr(self, '_ignorelist'):
1102
return self._ignorelist
1781
ignore_globs = set()
1782
ignore_globs.update(ignores.get_runtime_ignores())
1783
ignore_globs.update(ignores.get_user_ignores())
1784
1105
if self.has_filename(bzrlib.IGNORE_FILENAME):
1785
1106
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
1787
ignore_globs.update(ignores.parse_ignore_file(f))
1790
self._ignoreset = ignore_globs
1793
def _flush_ignore_list_cache(self):
1794
"""Resets the cached ignore list to force a cache rebuild."""
1795
self._ignoreset = None
1796
self._ignoreglobster = None
1107
l.extend([line.rstrip("\n\r").decode('utf-8')
1108
for line in f.readlines()])
1109
self._ignorelist = l
1110
self._ignore_regex = self._combine_ignore_rules(l)
1113
def _get_ignore_rules_as_regex(self):
1114
"""Return a regex of the ignore rules and a mapping dict.
1116
:return: (ignore rules compiled regex, dictionary mapping rule group
1117
indices to original rule.)
1119
if getattr(self, '_ignorelist', None) is None:
1120
self.get_ignore_list()
1121
return self._ignore_regex
1798
1123
def is_ignored(self, filename):
1799
1124
r"""Check whether the filename matches an ignore pattern.
1801
1126
Patterns containing '/' or '\' need to match the whole path;
1802
others match against only the last component. Patterns starting
1803
with '!' are ignore exceptions. Exceptions take precedence
1804
over regular patterns and cause the filename to not be ignored.
1127
others match against only the last component.
1806
1129
If the file is ignored, returns the pattern which caused it to
1807
1130
be ignored, otherwise None. So this can simply be used as a
1808
1131
boolean if desired."""
1809
if getattr(self, '_ignoreglobster', None) is None:
1810
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1811
return self._ignoreglobster.match(filename)
1133
# TODO: Use '**' to match directories, and other extended
1134
# globbing stuff from cvs/rsync.
1136
# XXX: fnmatch is actually not quite what we want: it's only
1137
# approximately the same as real Unix fnmatch, and doesn't
1138
# treat dotfiles correctly and allows * to match /.
1139
# Eventually it should be replaced with something more
1142
rules = self._get_ignore_rules_as_regex()
1143
for regex, mapping in rules:
1144
match = regex.match(filename)
1145
if match is not None:
1146
# one or more of the groups in mapping will have a non-None group
1148
groups = match.groups()
1149
rules = [mapping[group] for group in
1150
mapping if groups[group] is not None]
1813
1154
def kind(self, file_id):
1814
1155
return file_kind(self.id2abspath(file_id))
1816
def stored_kind(self, file_id):
1817
"""See Tree.stored_kind"""
1818
return self.inventory[file_id].kind
1820
def _comparison_data(self, entry, path):
1821
abspath = self.abspath(path)
1823
stat_value = os.lstat(abspath)
1825
if getattr(e, 'errno', None) == errno.ENOENT:
1832
mode = stat_value.st_mode
1833
kind = osutils.file_kind_from_stat_mode(mode)
1834
if not supports_executable():
1835
executable = entry is not None and entry.executable
1837
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1838
return kind, executable, stat_value
1840
def _file_size(self, entry, stat_value):
1841
return stat_value.st_size
1843
1158
def last_revision(self):
1844
"""Return the last revision of the branch for this tree.
1846
This format tree does not support a separate marker for last-revision
1847
compared to the branch.
1849
See MutableTree.last_revision
1159
"""Return the last revision id of this working tree.
1161
In early branch formats this was == the branch last_revision,
1162
but that cannot be relied upon - for working tree operations,
1163
always use tree.last_revision().
1851
return self._last_revision()
1854
def _last_revision(self):
1855
"""helper for get_parent_ids."""
1856
return _mod_revision.ensure_null(self.branch.last_revision())
1165
return self.branch.last_revision()
1858
1167
def is_locked(self):
1859
1168
return self._control_files.is_locked()
1861
def _must_be_locked(self):
1862
if not self.is_locked():
1863
raise errors.ObjectNotLocked(self)
1865
1170
def lock_read(self):
1866
"""Lock the tree for reading.
1868
This also locks the branch, and can be unlocked via self.unlock().
1870
:return: A bzrlib.lock.LogicalLockResult.
1872
if not self.is_locked():
1874
self.branch.lock_read()
1876
self._control_files.lock_read()
1877
return LogicalLockResult(self.unlock)
1879
self.branch.unlock()
1882
def lock_tree_write(self):
1883
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1885
:return: A bzrlib.lock.LogicalLockResult.
1887
if not self.is_locked():
1889
self.branch.lock_read()
1891
self._control_files.lock_write()
1892
return LogicalLockResult(self.unlock)
1171
"""See Branch.lock_read, and WorkingTree.unlock."""
1172
self.branch.lock_read()
1174
return self._control_files.lock_read()
1894
1176
self.branch.unlock()
1897
1179
def lock_write(self):
1898
"""See MutableTree.lock_write, and WorkingTree.unlock.
1900
:return: A bzrlib.lock.LogicalLockResult.
1902
if not self.is_locked():
1180
"""See Branch.lock_write, and WorkingTree.unlock."""
1904
1181
self.branch.lock_write()
1906
self._control_files.lock_write()
1907
return LogicalLockResult(self.unlock)
1183
return self._control_files.lock_write()
1909
1185
self.branch.unlock()
1934
1200
def _change_last_revision(self, new_revision):
1935
1201
"""Template method part of set_last_revision to perform the change.
1937
1203
This is used to allow WorkingTree3 instances to not affect branch
1938
1204
when their last revision is set.
1940
if _mod_revision.is_null(new_revision):
1206
if new_revision is None:
1941
1207
self.branch.set_revision_history([])
1209
# current format is locked in with the branch
1210
revision_history = self.branch.revision_history()
1944
self.branch.generate_revision_history(new_revision)
1945
except errors.NoSuchRevision:
1946
# not present in the repo - dont try to set it deeper than the tip
1947
self.branch.set_revision_history([new_revision])
1212
position = revision_history.index(new_revision)
1214
raise errors.NoSuchRevision(self.branch, new_revision)
1215
self.branch.set_revision_history(revision_history[:position + 1])
1950
def _write_basis_inventory(self, xml):
1951
"""Write the basis inventory XML to the basis-inventory file"""
1952
path = self._basis_inventory_name()
1954
self._transport.put_file(path, sio,
1955
mode=self.bzrdir._get_file_mode())
1957
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1958
"""Create the text that will be saved in basis-inventory"""
1959
inventory.revision_id = revision_id
1960
return xml7.serializer_v7.write_inventory_to_string(inventory)
1962
1218
def _cache_basis_inventory(self, new_revision):
1963
1219
"""Cache new_revision as the basis inventory."""
1964
1220
# TODO: this should allow the ready-to-use inventory to be passed in,
1965
1221
# as commit already has that ready-to-use [while the format is the
1966
1222
# same, that is].
1968
# this double handles the inventory - unpack and repack -
1224
# this double handles the inventory - unpack and repack -
1969
1225
# but is easier to understand. We can/should put a conditional
1970
1226
# in here based on whether the inventory is in the latest format
1971
1227
# - perhaps we should repack all inventories on a repository
1973
1229
# the fast path is to copy the raw xml from the repository. If the
1974
# xml contains 'revision_id="', then we assume the right
1230
# xml contains 'revision_id="', then we assume the right
1975
1231
# revision_id is set. We must check for this full string, because a
1976
1232
# root node id can legitimately look like 'revision_id' but cannot
1977
1233
# contain a '"'.
1978
xml = self.branch.repository._get_inventory_xml(new_revision)
1979
firstline = xml.split('\n', 1)[0]
1980
if (not 'revision_id="' in firstline or
1981
'format="7"' not in firstline):
1982
inv = self.branch.repository._serializer.read_inventory_from_string(
1984
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1985
self._write_basis_inventory(xml)
1986
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1234
xml = self.branch.repository.get_inventory_xml(new_revision)
1235
if not 'revision_id="' in xml.split('\n', 1)[0]:
1236
inv = self.branch.repository.deserialise_inventory(
1238
inv.revision_id = new_revision
1239
xml = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
1240
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1241
path = self._basis_inventory_name()
1243
self._control_files.put(path, sio)
1244
except WeaveRevisionNotPresent:
1989
1247
def read_basis_inventory(self):
1990
1248
"""Read the cached basis inventory."""
1991
1249
path = self._basis_inventory_name()
1992
return self._transport.get_bytes(path)
1250
return self._control_files.get(path).read()
1994
1252
@needs_read_lock
1995
1253
def read_working_inventory(self):
1996
"""Read the working inventory.
1998
:raises errors.InventoryModified: read_working_inventory will fail
1999
when the current in memory inventory has been modified.
2001
# conceptually this should be an implementation detail of the tree.
2002
# XXX: Deprecate this.
1254
"""Read the working inventory."""
2003
1255
# ElementTree does its own conversion from UTF-8, so open in
2005
if self._inventory_is_modified:
2006
raise errors.InventoryModified(self)
2007
f = self._transport.get('inventory')
2009
result = self._deserialize(f)
2012
self._set_inventory(result, dirty=False)
1257
result = bzrlib.xml5.serializer_v5.read_inventory(
1258
self._control_files.get('inventory'))
1259
self._set_inventory(result)
2015
@needs_tree_write_lock
2016
def remove(self, files, verbose=False, to_file=None, keep_files=True,
2018
"""Remove nominated files from the working inventory.
2020
:files: File paths relative to the basedir.
2021
:keep_files: If true, the files will also be kept.
2022
:force: Delete files and directories, even if they are changed and
2023
even if the directories are not empty.
1263
def remove(self, files, verbose=False, to_file=None):
1264
"""Remove nominated files from the working inventory..
1266
This does not remove their text. This does not run on XXX on what? RBC
1268
TODO: Refuse to remove modified files unless --force is given?
1270
TODO: Do something useful with directories.
1272
TODO: Should this remove the text or not? Tough call; not
1273
removing may be useful and the user can just use use rm, and
1274
is the opposite of add. Removing it is consistent with most
1275
other tools. Maybe an option.
1277
## TODO: Normalize names
1278
## TODO: Remove nested loops; better scalability
2025
1279
if isinstance(files, basestring):
2026
1280
files = [files]
2030
all_files = set() # specified and nested files
2031
unknown_nested_files=set()
2033
to_file = sys.stdout
2035
files_to_backup = []
2037
def recurse_directory_to_add_files(directory):
2038
# Recurse directory and add all files
2039
# so we can check if they have changed.
2040
for parent_info, file_infos in self.walkdirs(directory):
2041
for relpath, basename, kind, lstat, fileid, kind in file_infos:
2042
# Is it versioned or ignored?
2043
if self.path2id(relpath):
2044
# Add nested content for deletion.
2045
all_files.add(relpath)
2047
# Files which are not versioned
2048
# should be treated as unknown.
2049
files_to_backup.append(relpath)
2051
for filename in files:
2052
# Get file name into canonical form.
2053
abspath = self.abspath(filename)
2054
filename = self.relpath(abspath)
2055
if len(filename) > 0:
2056
all_files.add(filename)
2057
recurse_directory_to_add_files(filename)
2059
files = list(all_files)
2062
return # nothing to do
2064
# Sort needed to first handle directory content before the directory
2065
files.sort(reverse=True)
2067
# Bail out if we are going to delete files we shouldn't
2068
if not keep_files and not force:
2069
for (file_id, path, content_change, versioned, parent_id, name,
2070
kind, executable) in self.iter_changes(self.basis_tree(),
2071
include_unchanged=True, require_versioned=False,
2072
want_unversioned=True, specific_files=files):
2073
if versioned[0] == False:
2074
# The record is unknown or newly added
2075
files_to_backup.append(path[1])
2076
elif (content_change and (kind[1] is not None) and
2077
osutils.is_inside_any(files, path[1])):
2078
# Versioned and changed, but not deleted, and still
2079
# in one of the dirs to be deleted.
2080
files_to_backup.append(path[1])
2082
def backup(file_to_backup):
2083
backup_name = self.bzrdir._available_backup_name(file_to_backup)
2084
osutils.rename(abs_path, self.abspath(backup_name))
2085
return "removed %s (but kept a copy: %s)" % (file_to_backup,
2088
# Build inv_delta and delete files where applicable,
2089
# do this before any modifications to inventory.
1282
inv = self.inventory
1284
# do this before any modifications
2090
1285
for f in files:
2091
fid = self.path2id(f)
1286
fid = inv.path2id(f)
2094
message = "%s is not versioned." % (f,)
2097
# having removed it, it must be either ignored or unknown
2098
if self.is_ignored(f):
2102
# XXX: Really should be a more abstract reporter interface
2103
kind_ch = osutils.kind_marker(self.kind(fid))
2104
to_file.write(new_status + ' ' + f + kind_ch + '\n')
2106
inv_delta.append((f, None, fid, None))
2107
message = "removed %s" % (f,)
2110
abs_path = self.abspath(f)
2111
if osutils.lexists(abs_path):
2112
if (osutils.isdir(abs_path) and
2113
len(os.listdir(abs_path)) > 0):
2115
osutils.rmtree(abs_path)
2116
message = "deleted %s" % (f,)
2120
if f in files_to_backup:
2123
osutils.delete_any(abs_path)
2124
message = "deleted %s" % (f,)
2125
elif message is not None:
2126
# Only care if we haven't done anything yet.
2127
message = "%s does not exist." % (f,)
2129
# Print only one message (if any) per file.
2130
if message is not None:
2132
self.apply_inventory_delta(inv_delta)
2134
@needs_tree_write_lock
2135
def revert(self, filenames=None, old_tree=None, backups=True,
2136
pb=None, report_changes=False):
2137
from bzrlib.conflicts import resolve
2140
symbol_versioning.warn('Using [] to revert all files is deprecated'
2141
' as of bzr 0.91. Please use None (the default) instead.',
2142
DeprecationWarning, stacklevel=2)
1288
# TODO: Perhaps make this just a warning, and continue?
1289
# This tends to happen when
1290
raise NotVersionedError(path=f)
1292
# having remove it, it must be either ignored or unknown
1293
if self.is_ignored(f):
1297
show_status(new_status, inv[fid].kind, f, to_file=to_file)
1300
self._write_inventory(inv)
1303
def revert(self, filenames, old_tree=None, backups=True,
1304
pb=DummyProgress()):
1305
from transform import revert
1306
from conflicts import resolve
2143
1307
if old_tree is None:
2144
basis_tree = self.basis_tree()
2145
basis_tree.lock_read()
2146
old_tree = basis_tree
1308
old_tree = self.basis_tree()
1309
conflicts = revert(self, old_tree, filenames, backups, pb)
1310
if not len(filenames):
1311
self.set_pending_merges([])
2150
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2152
if filenames is None and len(self.get_parent_ids()) > 1:
2154
last_revision = self.last_revision()
2155
if last_revision != _mod_revision.NULL_REVISION:
2156
if basis_tree is None:
2157
basis_tree = self.basis_tree()
2158
basis_tree.lock_read()
2159
parent_trees.append((last_revision, basis_tree))
2160
self.set_parent_trees(parent_trees)
2163
resolve(self, filenames, ignore_misses=True, recursive=True)
2165
if basis_tree is not None:
1314
resolve(self, filenames, ignore_misses=True)
2167
1315
return conflicts
2169
def revision_tree(self, revision_id):
2170
"""See Tree.revision_tree.
2172
WorkingTree can supply revision_trees for the basis revision only
2173
because there is only one cached inventory in the bzr directory.
2175
if revision_id == self.last_revision():
2177
xml = self.read_basis_inventory()
2178
except errors.NoSuchFile:
2182
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2183
# dont use the repository revision_tree api because we want
2184
# to supply the inventory.
2185
if inv.revision_id == revision_id:
2186
return revisiontree.RevisionTree(self.branch.repository,
2188
except errors.BadInventoryFormat:
2190
# raise if there was no inventory, or if we read the wrong inventory.
2191
raise errors.NoSuchRevisionInTree(self, revision_id)
2193
1317
# XXX: This method should be deprecated in favour of taking in a proper
2194
1318
# new Inventory object.
2195
@needs_tree_write_lock
2196
1320
def set_inventory(self, new_inventory_list):
2197
1321
from bzrlib.inventory import (Inventory,
2198
1322
InventoryDirectory,
2201
1326
inv = Inventory(self.get_root_id())
2211
1336
elif kind == 'symlink':
2212
1337
inv.add(InventoryLink(file_id, name, parent))
2214
raise errors.BzrError("unknown kind %r" % kind)
1339
raise BzrError("unknown kind %r" % kind)
2215
1340
self._write_inventory(inv)
2217
@needs_tree_write_lock
2218
1343
def set_root_id(self, file_id):
2219
1344
"""Set the root id for this tree."""
2223
'WorkingTree.set_root_id with fileid=None')
2224
file_id = osutils.safe_file_id(file_id)
2225
self._set_root_id(file_id)
2227
def _set_root_id(self, file_id):
2228
"""Set the root id for this tree, in a format specific manner.
2230
:param file_id: The file id to assign to the root. It must not be
2231
present in the current inventory or an error will occur. It must
2232
not be None, but rather a valid file id.
2234
inv = self._inventory
1345
inv = self.read_working_inventory()
2235
1346
orig_root_id = inv.root.file_id
2236
# TODO: it might be nice to exit early if there was nothing
2237
# to do, saving us from trigger a sync on unlock.
2238
self._inventory_is_modified = True
2239
# we preserve the root inventory entry object, but
2240
# unlinkit from the byid index
2241
1347
del inv._byid[inv.root.file_id]
2242
1348
inv.root.file_id = file_id
2243
# and link it into the index with the new changed id.
2244
1349
inv._byid[inv.root.file_id] = inv.root
2245
# and finally update all children to reference the new id.
2246
# XXX: this should be safe to just look at the root.children
2247
# list, not the WHOLE INVENTORY.
2248
1350
for fid in inv:
2249
1351
entry = inv[fid]
2250
1352
if entry.parent_id == orig_root_id:
2251
1353
entry.parent_id = inv.root.file_id
1354
self._write_inventory(inv)
2253
1356
def unlock(self):
2254
1357
"""See Branch.unlock.
2256
1359
WorkingTree locking just uses the Branch locking facilities.
2257
1360
This is current because all working trees have an embedded branch
2258
1361
within them. IF in the future, we were to make branch data shareable
2259
between multiple working trees, i.e. via shared storage, then we
1362
between multiple working trees, i.e. via shared storage, then we
2260
1363
would probably want to lock both the local tree, and the branch.
2262
raise NotImplementedError(self.unlock)
2266
def update(self, change_reporter=None, possible_transports=None,
2267
revision=None, old_tip=_marker, show_base=False):
1365
# FIXME: We want to write out the hashcache only when the last lock on
1366
# this working copy is released. Peeking at the lock count is a bit
1367
# of a nasty hack; probably it's better to have a transaction object,
1368
# which can do some finalization when it's either successfully or
1369
# unsuccessfully completed. (Denys's original patch did that.)
1370
# RBC 20060206 hooking into transaction will couple lock and transaction
1371
# wrongly. Hooking into unlock on the control files object is fine though.
1373
# TODO: split this per format so there is no ugly if block
1374
if self._hashcache.needs_write and (
1375
# dedicated lock files
1376
self._control_files._lock_count==1 or
1378
(self._control_files is self.branch.control_files and
1379
self._control_files._lock_count==3)):
1380
self._hashcache.write()
1381
# reverse order of locking.
1383
return self._control_files.unlock()
1385
self.branch.unlock()
2268
1389
"""Update a working tree along its branch.
2270
This will update the branch if its bound too, which means we have
2271
multiple trees involved:
2273
- The new basis tree of the master.
2274
- The old basis tree of the branch.
2275
- The old basis tree of the working tree.
2276
- The current working tree state.
2278
Pathologically, all three may be different, and non-ancestors of each
2279
other. Conceptually we want to:
2281
- Preserve the wt.basis->wt.state changes
2282
- Transform the wt.basis to the new master basis.
2283
- Apply a merge of the old branch basis to get any 'local' changes from
2285
- Restore the wt.basis->wt.state changes.
1391
This will update the branch if its bound too, which means we have multiple trees involved:
1392
The new basis tree of the master.
1393
The old basis tree of the branch.
1394
The old basis tree of the working tree.
1395
The current working tree state.
1396
pathologically all three may be different, and non ancestors of each other.
1397
Conceptually we want to:
1398
Preserve the wt.basis->wt.state changes
1399
Transform the wt.basis to the new master basis.
1400
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1401
Restore the wt.basis->wt.state changes.
2287
1403
There isn't a single operation at the moment to do that, so we:
2288
- Merge current state -> basis tree of the master w.r.t. the old tree
2290
- Do a 'normal' merge of the old branch basis if it is relevant.
2292
:param revision: The target revision to update to. Must be in the
2294
:param old_tip: If branch.update() has already been run, the value it
2295
returned (old tip of the branch or None). _marker is used
1404
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1405
Do a 'normal' merge of the old branch basis if it is relevant.
2298
if self.branch.get_bound_location() is not None:
2300
update_branch = (old_tip is self._marker)
2302
self.lock_tree_write()
2303
update_branch = False
1407
old_tip = self.branch.update()
1408
if old_tip is not None:
1409
self.add_pending_merge(old_tip)
1410
self.branch.lock_read()
2306
old_tip = self.branch.update(possible_transports)
2308
if old_tip is self._marker:
2310
return self._update_tree(old_tip, change_reporter, revision, show_base)
1413
if self.last_revision() != self.branch.last_revision():
1414
# merge tree state up to new branch tip.
1415
basis = self.basis_tree()
1416
to_tree = self.branch.basis_tree()
1417
result += merge_inner(self.branch,
1421
self.set_last_revision(self.branch.last_revision())
1422
if old_tip and old_tip != self.last_revision():
1423
# our last revision was not the prior branch last revision
1424
# and we have converted that last revision to a pending merge.
1425
# base is somewhere between the branch tip now
1426
# and the now pending merge
1427
from bzrlib.revision import common_ancestor
1429
base_rev_id = common_ancestor(self.branch.last_revision(),
1431
self.branch.repository)
1432
except errors.NoCommonAncestor:
1434
base_tree = self.branch.repository.revision_tree(base_rev_id)
1435
other_tree = self.branch.repository.revision_tree(old_tip)
1436
result += merge_inner(self.branch,
2314
@needs_tree_write_lock
2315
def _update_tree(self, old_tip=None, change_reporter=None, revision=None,
2317
"""Update a tree to the master branch.
2319
:param old_tip: if supplied, the previous tip revision the branch,
2320
before it was changed to the master branch's tip.
2322
# here if old_tip is not None, it is the old tip of the branch before
2323
# it was updated from the master branch. This should become a pending
2324
# merge in the working tree to preserve the user existing work. we
2325
# cant set that until we update the working trees last revision to be
2326
# one from the new branch, because it will just get absorbed by the
2327
# parent de-duplication logic.
2329
# We MUST save it even if an error occurs, because otherwise the users
2330
# local work is unreferenced and will appear to have been lost.
2334
last_rev = self.get_parent_ids()[0]
2336
last_rev = _mod_revision.NULL_REVISION
2337
if revision is None:
2338
revision = self.branch.last_revision()
2340
old_tip = old_tip or _mod_revision.NULL_REVISION
2342
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2343
# the branch we are bound to was updated
2344
# merge those changes in first
2345
base_tree = self.basis_tree()
2346
other_tree = self.branch.repository.revision_tree(old_tip)
2347
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2348
base_tree, this_tree=self,
2349
change_reporter=change_reporter,
2350
show_base=show_base)
2352
self.add_parent_tree((old_tip, other_tree))
2353
trace.note('Rerun update after fixing the conflicts.')
2356
if last_rev != _mod_revision.ensure_null(revision):
2357
# the working tree is up to date with the branch
2358
# we can merge the specified revision from master
2359
to_tree = self.branch.repository.revision_tree(revision)
2360
to_root_id = to_tree.get_root_id()
2362
basis = self.basis_tree()
2365
if (basis.inventory.root is None
2366
or basis.inventory.root.file_id != to_root_id):
2367
self.set_root_id(to_root_id)
2372
# determine the branch point
2373
graph = self.branch.repository.get_graph()
2374
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2376
base_tree = self.branch.repository.revision_tree(base_rev_id)
2378
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2380
change_reporter=change_reporter,
2381
show_base=show_base)
2382
self.set_last_revision(revision)
2383
# TODO - dedup parents list with things merged by pull ?
2384
# reuse the tree we've updated to to set the basis:
2385
parent_trees = [(revision, to_tree)]
2386
merges = self.get_parent_ids()[1:]
2387
# Ideally we ask the tree for the trees here, that way the working
2388
# tree can decide whether to give us the entire tree or give us a
2389
# lazy initialised tree. dirstate for instance will have the trees
2390
# in ram already, whereas a last-revision + basis-inventory tree
2391
# will not, but also does not need them when setting parents.
2392
for parent in merges:
2393
parent_trees.append(
2394
(parent, self.branch.repository.revision_tree(parent)))
2395
if not _mod_revision.is_null(old_tip):
2396
parent_trees.append(
2397
(old_tip, self.branch.repository.revision_tree(old_tip)))
2398
self.set_parent_trees(parent_trees)
2399
last_rev = parent_trees[0][0]
2402
def _write_hashcache_if_dirty(self):
2403
"""Write out the hashcache if it is dirty."""
2404
if self._hashcache.needs_write:
2406
self._hashcache.write()
2408
if e.errno not in (errno.EPERM, errno.EACCES):
2410
# TODO: jam 20061219 Should this be a warning? A single line
2411
# warning might be sufficient to let the user know what
2413
mutter('Could not write hashcache for %s\nError: %s',
2414
self._hashcache.cache_file_name(), e)
2416
@needs_tree_write_lock
1442
self.branch.unlock()
2417
1445
def _write_inventory(self, inv):
2418
1446
"""Write inventory as the current inventory."""
2419
self._set_inventory(inv, dirty=True)
1448
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1450
self._control_files.put('inventory', sio)
1451
self._set_inventory(inv)
1452
mutter('wrote working inventory')
2422
1454
def set_conflicts(self, arg):
2423
raise errors.UnsupportedOperation(self.set_conflicts, self)
1455
raise UnsupportedOperation(self.set_conflicts, self)
2425
1457
def add_conflicts(self, arg):
2426
raise errors.UnsupportedOperation(self.add_conflicts, self)
1458
raise UnsupportedOperation(self.add_conflicts, self)
2428
1460
@needs_read_lock
2429
1461
def conflicts(self):
2430
conflicts = _mod_conflicts.ConflictList()
1462
conflicts = ConflictList()
2431
1463
for conflicted in self._iter_conflicts():
2446
1478
if text == False:
2448
1480
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2449
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
1481
conflicts.append(Conflict.factory(ctype, path=conflicted,
2451
1482
file_id=self.path2id(conflicted)))
2452
1483
return conflicts
2454
def walkdirs(self, prefix=""):
2455
"""Walk the directories of this tree.
2457
returns a generator which yields items in the form:
2458
((curren_directory_path, fileid),
2459
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2462
This API returns a generator, which is only valid during the current
2463
tree transaction - within a single lock_read or lock_write duration.
2465
If the tree is not locked, it may cause an error to be raised,
2466
depending on the tree implementation.
2468
disk_top = self.abspath(prefix)
2469
if disk_top.endswith('/'):
2470
disk_top = disk_top[:-1]
2471
top_strip_len = len(disk_top) + 1
2472
inventory_iterator = self._walkdirs(prefix)
2473
disk_iterator = osutils.walkdirs(disk_top, prefix)
2475
current_disk = disk_iterator.next()
2476
disk_finished = False
2478
if not (e.errno == errno.ENOENT or
2479
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2482
disk_finished = True
2484
current_inv = inventory_iterator.next()
2485
inv_finished = False
2486
except StopIteration:
2489
while not inv_finished or not disk_finished:
2491
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2492
cur_disk_dir_content) = current_disk
2494
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2495
cur_disk_dir_content) = ((None, None), None)
2496
if not disk_finished:
2497
# strip out .bzr dirs
2498
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2499
len(cur_disk_dir_content) > 0):
2500
# osutils.walkdirs can be made nicer -
2501
# yield the path-from-prefix rather than the pathjoined
2503
bzrdir_loc = bisect_left(cur_disk_dir_content,
2505
if (bzrdir_loc < len(cur_disk_dir_content)
2506
and self.bzrdir.is_control_filename(
2507
cur_disk_dir_content[bzrdir_loc][0])):
2508
# we dont yield the contents of, or, .bzr itself.
2509
del cur_disk_dir_content[bzrdir_loc]
2511
# everything is unknown
2514
# everything is missing
2517
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2519
# disk is before inventory - unknown
2520
dirblock = [(relpath, basename, kind, stat, None, None) for
2521
relpath, basename, kind, stat, top_path in
2522
cur_disk_dir_content]
2523
yield (cur_disk_dir_relpath, None), dirblock
2525
current_disk = disk_iterator.next()
2526
except StopIteration:
2527
disk_finished = True
2529
# inventory is before disk - missing.
2530
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2531
for relpath, basename, dkind, stat, fileid, kind in
2533
yield (current_inv[0][0], current_inv[0][1]), dirblock
2535
current_inv = inventory_iterator.next()
2536
except StopIteration:
2539
# versioned present directory
2540
# merge the inventory and disk data together
2542
for relpath, subiterator in itertools.groupby(sorted(
2543
current_inv[1] + cur_disk_dir_content,
2544
key=operator.itemgetter(0)), operator.itemgetter(1)):
2545
path_elements = list(subiterator)
2546
if len(path_elements) == 2:
2547
inv_row, disk_row = path_elements
2548
# versioned, present file
2549
dirblock.append((inv_row[0],
2550
inv_row[1], disk_row[2],
2551
disk_row[3], inv_row[4],
2553
elif len(path_elements[0]) == 5:
2555
dirblock.append((path_elements[0][0],
2556
path_elements[0][1], path_elements[0][2],
2557
path_elements[0][3], None, None))
2558
elif len(path_elements[0]) == 6:
2559
# versioned, absent file.
2560
dirblock.append((path_elements[0][0],
2561
path_elements[0][1], 'unknown', None,
2562
path_elements[0][4], path_elements[0][5]))
2564
raise NotImplementedError('unreachable code')
2565
yield current_inv[0], dirblock
2567
current_inv = inventory_iterator.next()
2568
except StopIteration:
2571
current_disk = disk_iterator.next()
2572
except StopIteration:
2573
disk_finished = True
2575
def _walkdirs(self, prefix=""):
2576
"""Walk the directories of this tree.
2578
:prefix: is used as the directrory to start with.
2579
returns a generator which yields items in the form:
2580
((curren_directory_path, fileid),
2581
[(file1_path, file1_name, file1_kind, None, file1_id,
2584
_directory = 'directory'
2585
# get the root in the inventory
2586
inv = self.inventory
2587
top_id = inv.path2id(prefix)
2591
pending = [(prefix, '', _directory, None, top_id, None)]
2594
currentdir = pending.pop()
2595
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2596
top_id = currentdir[4]
2598
relroot = currentdir[0] + '/'
2601
# FIXME: stash the node in pending
2603
if entry.kind == 'directory':
2604
for name, child in entry.sorted_children():
2605
dirblock.append((relroot + name, name, child.kind, None,
2606
child.file_id, child.kind
2608
yield (currentdir[0], entry.file_id), dirblock
2609
# push the user specified dirs from dirblock
2610
for dir in reversed(dirblock):
2611
if dir[2] == _directory:
2614
@needs_tree_write_lock
2615
def auto_resolve(self):
2616
"""Automatically resolve text conflicts according to contents.
2618
Only text conflicts are auto_resolvable. Files with no conflict markers
2619
are considered 'resolved', because bzr always puts conflict markers
2620
into files that have text conflicts. The corresponding .THIS .BASE and
2621
.OTHER files are deleted, as per 'resolve'.
2622
:return: a tuple of ConflictLists: (un_resolved, resolved).
2624
un_resolved = _mod_conflicts.ConflictList()
2625
resolved = _mod_conflicts.ConflictList()
2626
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2627
for conflict in self.conflicts():
2628
if (conflict.typestring != 'text conflict' or
2629
self.kind(conflict.file_id) != 'file'):
2630
un_resolved.append(conflict)
2632
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2634
for line in my_file:
2635
if conflict_re.search(line):
2636
un_resolved.append(conflict)
2639
resolved.append(conflict)
2642
resolved.remove_files(self)
2643
self.set_conflicts(un_resolved)
2644
return un_resolved, resolved
2647
def _check(self, references):
2648
"""Check the tree for consistency.
2650
:param references: A dict with keys matching the items returned by
2651
self._get_check_refs(), and values from looking those keys up in
2654
tree_basis = self.basis_tree()
2655
tree_basis.lock_read()
2657
repo_basis = references[('trees', self.last_revision())]
2658
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2659
raise errors.BzrCheckError(
2660
"Mismatched basis inventory content.")
2665
def _validate(self):
2666
"""Validate internal structures.
2668
This is meant mostly for the test suite. To give it a chance to detect
2669
corruption after actions have occurred. The default implementation is a
2672
:return: None. An exception should be raised if there is an error.
2676
def _get_rules_searcher(self, default_searcher):
2677
"""See Tree._get_rules_searcher."""
2678
if self._rules_searcher is None:
2679
self._rules_searcher = super(WorkingTree,
2680
self)._get_rules_searcher(default_searcher)
2681
return self._rules_searcher
2683
def get_shelf_manager(self):
2684
"""Return the ShelfManager for this WorkingTree."""
2685
from bzrlib.shelf import ShelfManager
2686
return ShelfManager(self, self._transport)
2689
class WorkingTree2(WorkingTree):
2690
"""This is the Format 2 working tree.
2692
This was the first weave based working tree.
2693
- uses os locks for locking.
2694
- uses the branch last-revision.
2697
def __init__(self, *args, **kwargs):
2698
super(WorkingTree2, self).__init__(*args, **kwargs)
2699
# WorkingTree2 has more of a constraint that self._inventory must
2700
# exist. Because this is an older format, we don't mind the overhead
2701
# caused by the extra computation here.
2703
# Newer WorkingTree's should only have self._inventory set when they
2705
if self._inventory is None:
2706
self.read_working_inventory()
2708
def _get_check_refs(self):
2709
"""Return the references needed to perform a check of this tree."""
2710
return [('trees', self.last_revision())]
2712
def lock_tree_write(self):
2713
"""See WorkingTree.lock_tree_write().
2715
In Format2 WorkingTrees we have a single lock for the branch and tree
2716
so lock_tree_write() degrades to lock_write().
2718
:return: An object with an unlock method which will release the lock
2721
self.branch.lock_write()
2723
self._control_files.lock_write()
2726
self.branch.unlock()
2730
# do non-implementation specific cleanup
2733
# we share control files:
2734
if self._control_files._lock_count == 3:
2735
# _inventory_is_modified is always False during a read lock.
2736
if self._inventory_is_modified:
2738
self._write_hashcache_if_dirty()
2740
# reverse order of locking.
2742
return self._control_files.unlock()
2744
self.branch.unlock()
2747
1486
class WorkingTree3(WorkingTree):
2748
1487
"""This is the Format 3 working tree.