36
38
# At the moment they may alias the inventory and have old copies of it in
37
39
# memory. (Now done? -- mbp 20060309)
41
from copy import deepcopy
39
42
from cStringIO import StringIO
43
from bzrlib.lazy_import import lazy_import
44
lazy_import(globals(), """
45
from bisect import bisect_left
59
conflicts as _mod_conflicts,
68
revision as _mod_revision,
81
from bzrlib.transport import get_transport
83
from bzrlib.workingtree_4 import WorkingTreeFormat4
86
from bzrlib import symbol_versioning
49
from bzrlib.atomicfile import AtomicFile
50
from bzrlib.branch import (Branch,
52
import bzrlib.bzrdir as bzrdir
87
53
from bzrlib.decorators import needs_read_lock, needs_write_lock
88
from bzrlib.inventory import InventoryEntry, Inventory, ROOT_ID, TreeReference
54
import bzrlib.errors as errors
55
from bzrlib.errors import (BzrCheckError,
58
WeaveRevisionNotPresent,
62
MergeModifiedFormatError)
63
from bzrlib.inventory import InventoryEntry, Inventory
89
64
from bzrlib.lockable_files import LockableFiles, TransportLock
90
65
from bzrlib.lockdir import LockDir
91
import bzrlib.mutabletree
92
from bzrlib.mutabletree import needs_tree_write_lock
66
from bzrlib.merge import merge_inner, transform_tree
93
67
from bzrlib.osutils import (
105
from bzrlib.trace import mutter, note
85
from bzrlib.progress import DummyProgress, ProgressPhase
86
from bzrlib.revision import NULL_REVISION
87
from bzrlib.rio import RioReader, RioWriter, Stanza
88
from bzrlib.symbol_versioning import *
89
from bzrlib.textui import show_status
91
from bzrlib.trace import mutter
92
from bzrlib.transform import build_tree
93
from bzrlib.transport import get_transport
106
94
from bzrlib.transport.local import LocalTransport
107
from bzrlib.progress import DummyProgress, ProgressPhase
108
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION
109
from bzrlib.rio import RioReader, rio_file, Stanza
110
from bzrlib.symbol_versioning import (deprecated_passed,
113
DEPRECATED_PARAMETER,
120
MERGE_MODIFIED_HEADER_1 = "BZR merge-modified list format 1"
121
CONFLICT_HEADER_1 = "BZR conflict list format 1"
123
ERROR_PATH_NOT_FOUND = 3 # WindowsError errno code, equivalent to ENOENT
126
@deprecated_function(zero_thirteen)
127
99
def gen_file_id(name):
128
"""Return new file id for the basename 'name'.
130
Use bzrlib.generate_ids.gen_file_id() instead
132
return generate_ids.gen_file_id(name)
135
@deprecated_function(zero_thirteen)
100
"""Return new file id.
102
This should probably generate proper UUIDs, but for the moment we
103
cope with just randomness because running uuidgen every time is
106
from binascii import hexlify
107
from time import time
110
idx = name.rfind('/')
112
name = name[idx+1 : ]
113
idx = name.rfind('\\')
115
name = name[idx+1 : ]
117
# make it not a hidden file
118
name = name.lstrip('.')
120
# remove any wierd characters; we don't escape them but rather
122
name = re.sub(r'[^\w.]', '', name)
124
s = hexlify(rand_bytes(8))
125
return '-'.join((name, compact_date(time()), s))
136
128
def gen_root_id():
137
"""Return a new tree-root file id.
139
This has been deprecated in favor of bzrlib.generate_ids.gen_root_id()
141
return generate_ids.gen_root_id()
129
"""Return a new tree-root file id."""
130
return gen_file_id('TREE_ROOT')
144
133
class TreeEntry(object):
145
"""An entry that implements the minimum interface used by commands.
134
"""An entry that implements the minium interface used by commands.
147
136
This needs further inspection, it may be better to have
148
137
InventoryEntries without ids - though that seems wrong. For now,
581
485
__contains__ = has_id
583
487
def get_file_size(self, file_id):
584
file_id = osutils.safe_file_id(file_id)
585
488
return os.path.getsize(self.id2abspath(file_id))
588
def get_file_sha1(self, file_id, path=None, stat_value=None):
589
file_id = osutils.safe_file_id(file_id)
491
def get_file_sha1(self, file_id):
492
path = self._inventory.id2path(file_id)
493
return self._hashcache.get_sha1(path)
495
def is_executable(self, file_id):
496
if not supports_executable():
497
return self._inventory[file_id].executable
591
499
path = self._inventory.id2path(file_id)
592
return self._hashcache.get_sha1(path, stat_value)
594
def get_file_mtime(self, file_id, path=None):
595
file_id = osutils.safe_file_id(file_id)
597
path = self.inventory.id2path(file_id)
598
return os.lstat(self.abspath(path)).st_mtime
600
if not supports_executable():
601
def is_executable(self, file_id, path=None):
602
file_id = osutils.safe_file_id(file_id)
603
return self._inventory[file_id].executable
605
def is_executable(self, file_id, path=None):
607
file_id = osutils.safe_file_id(file_id)
608
path = self.id2path(file_id)
609
500
mode = os.lstat(self.abspath(path)).st_mode
610
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
612
@needs_tree_write_lock
613
def _add(self, files, ids, kinds):
614
"""See MutableTree._add."""
501
return bool(stat.S_ISREG(mode) and stat.S_IEXEC&mode)
504
def add(self, files, ids=None):
505
"""Make files versioned.
507
Note that the command line normally calls smart_add instead,
508
which can automatically recurse.
510
This adds the files to the inventory, so that they will be
511
recorded by the next commit.
514
List of paths to add, relative to the base of the tree.
517
If set, use these instead of automatically generated ids.
518
Must be the same length as the list of files, but may
519
contain None for ids that are to be autogenerated.
521
TODO: Perhaps have an option to add the ids even if the files do
524
TODO: Perhaps callback with the ids and paths as they're added.
615
526
# TODO: Re-adding a file that is removed in the working copy
616
527
# should probably put it back with the previous ID.
617
# the read and write working inventory should not occur in this
618
# function - they should be part of lock_write and unlock.
620
for f, file_id, kind in zip(files, ids, kinds):
621
assert kind is not None
528
if isinstance(files, basestring):
529
assert(ids is None or isinstance(ids, basestring))
535
ids = [None] * len(files)
537
assert(len(ids) == len(files))
539
inv = self.read_working_inventory()
540
for f,file_id in zip(files, ids):
541
if self.is_control_filename(f):
542
raise BzrError("cannot add control file %s" % quotefn(f))
547
raise BzrError("cannot add top-level %r" % f)
549
fullpath = normpath(self.abspath(f))
552
kind = file_kind(fullpath)
554
if e.errno == errno.ENOENT:
555
raise NoSuchFile(fullpath)
556
# maybe something better?
557
raise BzrError('cannot add: not a regular file, symlink or directory: %s' % quotefn(f))
559
if not InventoryEntry.versionable_kind(kind):
560
raise BzrError('cannot add: not a versionable file ('
561
'i.e. regular file, symlink or directory): %s' % quotefn(f))
622
563
if file_id is None:
623
inv.add_path(f, kind=kind)
625
file_id = osutils.safe_file_id(file_id)
626
inv.add_path(f, kind=kind, file_id=file_id)
627
self._inventory_is_modified = True
564
file_id = gen_file_id(f)
565
inv.add_path(f, kind=kind, file_id=file_id)
629
@needs_tree_write_lock
630
def _gather_kinds(self, files, kinds):
631
"""See MutableTree._gather_kinds."""
632
for pos, f in enumerate(files):
633
if kinds[pos] is None:
634
fullpath = normpath(self.abspath(f))
636
kinds[pos] = file_kind(fullpath)
638
if e.errno == errno.ENOENT:
639
raise errors.NoSuchFile(fullpath)
567
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
568
self._write_inventory(inv)
641
570
@needs_write_lock
642
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
643
"""Add revision_id as a parent.
645
This is equivalent to retrieving the current list of parent ids
646
and setting the list to its value plus revision_id.
648
:param revision_id: The revision id to add to the parent list. It may
649
be a ghost revision as long as its not the first parent to be added,
650
or the allow_leftmost_as_ghost parameter is set True.
651
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
653
parents = self.get_parent_ids() + [revision_id]
654
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
655
or allow_leftmost_as_ghost)
657
@needs_tree_write_lock
658
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
659
"""Add revision_id, tree tuple as a parent.
661
This is equivalent to retrieving the current list of parent trees
662
and setting the list to its value plus parent_tuple. See also
663
add_parent_tree_id - if you only have a parent id available it will be
664
simpler to use that api. If you have the parent already available, using
665
this api is preferred.
667
:param parent_tuple: The (revision id, tree) to add to the parent list.
668
If the revision_id is a ghost, pass None for the tree.
669
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
671
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
672
if len(parent_ids) > 1:
673
# the leftmost may have already been a ghost, preserve that if it
675
allow_leftmost_as_ghost = True
676
self.set_parent_ids(parent_ids,
677
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
679
@needs_tree_write_lock
680
571
def add_pending_merge(self, *revision_ids):
681
572
# TODO: Perhaps should check at this point that the
682
573
# history of the revision is actually present?
683
parents = self.get_parent_ids()
574
p = self.pending_merges()
685
576
for rev_id in revision_ids:
686
if rev_id in parents:
688
parents.append(rev_id)
691
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
582
self.set_pending_merges(p)
693
@deprecated_method(zero_eleven)
695
585
def pending_merges(self):
696
586
"""Return a list of pending merges.
698
588
These are revisions that have been merged into the working
699
589
directory but not yet committed.
701
As of 0.11 this is deprecated. Please see WorkingTree.get_parent_ids()
702
instead - which is available on all tree objects.
704
return self.get_parent_ids()[1:]
706
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
707
"""Common ghost checking functionality from set_parent_*.
709
This checks that the left hand-parent exists if there are any
712
if len(revision_ids) > 0:
713
leftmost_id = revision_ids[0]
714
if (not allow_leftmost_as_ghost and not
715
self.branch.repository.has_revision(leftmost_id)):
716
raise errors.GhostRevisionUnusableHere(leftmost_id)
718
def _set_merges_from_parent_ids(self, parent_ids):
719
merges = parent_ids[1:]
720
self._control_files.put_bytes('pending-merges', '\n'.join(merges))
722
@needs_tree_write_lock
723
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
724
"""Set the parent ids to revision_ids.
726
See also set_parent_trees. This api will try to retrieve the tree data
727
for each element of revision_ids from the trees repository. If you have
728
tree data already available, it is more efficient to use
729
set_parent_trees rather than set_parent_ids. set_parent_ids is however
730
an easier API to use.
732
:param revision_ids: The revision_ids to set as the parent ids of this
733
working tree. Any of these may be ghosts.
735
revision_ids = [osutils.safe_revision_id(r) for r in revision_ids]
736
self._check_parents_for_ghosts(revision_ids,
737
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
738
for revision_id in revision_ids:
739
_mod_revision.check_not_reserved_id(revision_id)
741
if len(revision_ids) > 0:
742
self.set_last_revision(revision_ids[0])
744
self.set_last_revision(_mod_revision.NULL_REVISION)
746
self._set_merges_from_parent_ids(revision_ids)
748
@needs_tree_write_lock
749
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
750
"""See MutableTree.set_parent_trees."""
751
parent_ids = [osutils.safe_revision_id(rev) for (rev, tree) in parents_list]
752
for revision_id in parent_ids:
753
_mod_revision.check_not_reserved_id(revision_id)
755
self._check_parents_for_ghosts(parent_ids,
756
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
758
if len(parent_ids) == 0:
759
leftmost_parent_id = _mod_revision.NULL_REVISION
760
leftmost_parent_tree = None
762
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
764
if self._change_last_revision(leftmost_parent_id):
765
if leftmost_parent_tree is None:
766
# If we don't have a tree, fall back to reading the
767
# parent tree from the repository.
768
self._cache_basis_inventory(leftmost_parent_id)
770
inv = leftmost_parent_tree.inventory
771
xml = self._create_basis_xml_from_inventory(
772
leftmost_parent_id, inv)
773
self._write_basis_inventory(xml)
774
self._set_merges_from_parent_ids(parent_ids)
776
@needs_tree_write_lock
592
merges_file = self._control_files.get_utf8('pending-merges')
594
if e.errno != errno.ENOENT:
598
for l in merges_file.readlines():
599
p.append(l.rstrip('\n'))
777
603
def set_pending_merges(self, rev_list):
778
parents = self.get_parent_ids()
779
leftmost = parents[:1]
780
new_parents = leftmost + rev_list
781
self.set_parent_ids(new_parents)
604
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
783
@needs_tree_write_lock
784
607
def set_merge_modified(self, modified_hashes):
786
for file_id, hash in modified_hashes.iteritems():
787
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
788
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
790
def _put_rio(self, filename, stanzas, header):
791
self._must_be_locked()
792
my_file = rio_file(stanzas, header)
793
self._control_files.put(filename, my_file)
795
@needs_write_lock # because merge pulls data into the branch.
796
def merge_from_branch(self, branch, to_revision=None):
797
"""Merge from a branch into this working tree.
799
:param branch: The branch to merge from.
800
:param to_revision: If non-None, the merge will merge to to_revision,
801
but not beyond it. to_revision does not need to be in the history
802
of the branch when it is supplied. If None, to_revision defaults to
803
branch.last_revision().
805
from bzrlib.merge import Merger, Merge3Merger
806
pb = bzrlib.ui.ui_factory.nested_progress_bar()
808
merger = Merger(self.branch, this_tree=self, pb=pb)
809
merger.pp = ProgressPhase("Merge phase", 5, pb)
810
merger.pp.next_phase()
811
# check that there are no
813
merger.check_basis(check_clean=True, require_commits=False)
814
if to_revision is None:
815
to_revision = _mod_revision.ensure_null(branch.last_revision())
817
to_revision = osutils.safe_revision_id(to_revision)
818
merger.other_rev_id = to_revision
819
if _mod_revision.is_null(merger.other_rev_id):
820
raise errors.NoCommits(branch)
821
self.branch.fetch(branch, last_revision=merger.other_rev_id)
822
merger.other_basis = merger.other_rev_id
823
merger.other_tree = self.branch.repository.revision_tree(
825
merger.other_branch = branch
826
merger.pp.next_phase()
828
if merger.base_rev_id == merger.other_rev_id:
829
raise errors.PointlessMerge
830
merger.backup_files = False
831
merger.merge_type = Merge3Merger
832
merger.set_interesting_files(None)
833
merger.show_base = False
834
merger.reprocess = False
835
conflicts = merger.do_merge()
609
my_file.write(MERGE_MODIFIED_HEADER_1 + '\n')
610
writer = RioWriter(my_file)
611
for file_id, hash in modified_hashes.iteritems():
612
s = Stanza(file_id=file_id, hash=hash)
613
writer.write_stanza(s)
615
self._control_files.put('merge-hashes', my_file)
842
618
def merge_modified(self):
843
"""Return a dictionary of files modified by a merge.
845
The list is initialized by WorkingTree.set_merge_modified, which is
846
typically called after we make some automatic updates to the tree
849
This returns a map of file_id->sha1, containing only files which are
850
still in the working inventory and have that text hash.
853
620
hashfile = self._control_files.get('merge-hashes')
854
except errors.NoSuchFile:
856
623
merge_hashes = {}
858
625
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
859
raise errors.MergeModifiedFormatError()
626
raise MergeModifiedFormatError()
860
627
except StopIteration:
861
raise errors.MergeModifiedFormatError()
628
raise MergeModifiedFormatError()
862
629
for s in RioReader(hashfile):
863
# RioReader reads in Unicode, so convert file_ids back to utf8
864
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
865
if file_id not in self.inventory:
867
text_hash = s.get("hash")
868
if text_hash == self.get_file_sha1(file_id):
869
merge_hashes[file_id] = text_hash
630
file_id = s.get("file_id")
632
if hash == self.get_file_sha1(file_id):
633
merge_hashes[file_id] = hash
870
634
return merge_hashes
873
def mkdir(self, path, file_id=None):
874
"""See MutableTree.mkdir()."""
876
file_id = generate_ids.gen_file_id(os.path.basename(path))
877
os.mkdir(self.abspath(path))
878
self.add(path, file_id, 'directory')
881
636
def get_symlink_target(self, file_id):
882
file_id = osutils.safe_file_id(file_id)
883
637
return os.readlink(self.id2abspath(file_id))
886
def subsume(self, other_tree):
887
def add_children(inventory, entry):
888
for child_entry in entry.children.values():
889
inventory._byid[child_entry.file_id] = child_entry
890
if child_entry.kind == 'directory':
891
add_children(inventory, child_entry)
892
if other_tree.get_root_id() == self.get_root_id():
893
raise errors.BadSubsumeSource(self, other_tree,
894
'Trees have the same root')
896
other_tree_path = self.relpath(other_tree.basedir)
897
except errors.PathNotChild:
898
raise errors.BadSubsumeSource(self, other_tree,
899
'Tree is not contained by the other')
900
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
901
if new_root_parent is None:
902
raise errors.BadSubsumeSource(self, other_tree,
903
'Parent directory is not versioned.')
904
# We need to ensure that the result of a fetch will have a
905
# versionedfile for the other_tree root, and only fetching into
906
# RepositoryKnit2 guarantees that.
907
if not self.branch.repository.supports_rich_root():
908
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
909
other_tree.lock_tree_write()
911
new_parents = other_tree.get_parent_ids()
912
other_root = other_tree.inventory.root
913
other_root.parent_id = new_root_parent
914
other_root.name = osutils.basename(other_tree_path)
915
self.inventory.add(other_root)
916
add_children(self.inventory, other_root)
917
self._write_inventory(self.inventory)
918
# normally we don't want to fetch whole repositories, but i think
919
# here we really do want to consolidate the whole thing.
920
for parent_id in other_tree.get_parent_ids():
921
self.branch.fetch(other_tree.branch, parent_id)
922
self.add_parent_tree_id(parent_id)
925
other_tree.bzrdir.retire_bzrdir()
927
@needs_tree_write_lock
928
def extract(self, file_id, format=None):
929
"""Extract a subtree from this tree.
931
A new branch will be created, relative to the path for this tree.
935
segments = osutils.splitpath(path)
936
transport = self.branch.bzrdir.root_transport
937
for name in segments:
938
transport = transport.clone(name)
939
transport.ensure_base()
942
sub_path = self.id2path(file_id)
943
branch_transport = mkdirs(sub_path)
945
format = bzrdir.format_registry.make_bzrdir('dirstate-with-subtree')
946
branch_transport.ensure_base()
947
branch_bzrdir = format.initialize_on_transport(branch_transport)
949
repo = branch_bzrdir.find_repository()
950
except errors.NoRepositoryPresent:
951
repo = branch_bzrdir.create_repository()
952
assert repo.supports_rich_root()
954
if not repo.supports_rich_root():
955
raise errors.RootNotRich()
956
new_branch = branch_bzrdir.create_branch()
957
new_branch.pull(self.branch)
958
for parent_id in self.get_parent_ids():
959
new_branch.fetch(self.branch, parent_id)
960
tree_transport = self.bzrdir.root_transport.clone(sub_path)
961
if tree_transport.base != branch_transport.base:
962
tree_bzrdir = format.initialize_on_transport(tree_transport)
963
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
965
tree_bzrdir = branch_bzrdir
966
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
967
wt.set_parent_ids(self.get_parent_ids())
968
my_inv = self.inventory
969
child_inv = Inventory(root_id=None)
970
new_root = my_inv[file_id]
971
my_inv.remove_recursive_id(file_id)
972
new_root.parent_id = None
973
child_inv.add(new_root)
974
self._write_inventory(my_inv)
975
wt._write_inventory(child_inv)
978
def _serialize(self, inventory, out_file):
979
xml5.serializer_v5.write_inventory(self._inventory, out_file)
981
def _deserialize(selt, in_file):
982
return xml5.serializer_v5.read_inventory(in_file)
985
"""Write the in memory inventory to disk."""
986
# TODO: Maybe this should only write on dirty ?
987
if self._control_files._lock_mode != 'w':
988
raise errors.NotWriteLocked(self)
990
self._serialize(self._inventory, sio)
992
self._control_files.put('inventory', sio)
993
self._inventory_is_modified = False
995
def _kind(self, relpath):
996
return osutils.file_kind(self.abspath(relpath))
998
def list_files(self, include_root=False):
999
"""Recursively list all files as (path, class, kind, id, entry).
639
def file_class(self, filename):
640
if self.path2id(filename):
642
elif self.is_ignored(filename):
647
def list_files(self):
648
"""Recursively list all files as (path, class, kind, id).
1001
650
Lists, but does not descend into unversioned directories.
1006
655
Skips the control directory.
1008
# list_files is an iterator, so @needs_read_lock doesn't work properly
1009
# with it. So callers should be careful to always read_lock the tree.
1010
if not self.is_locked():
1011
raise errors.ObjectNotLocked(self)
1013
inv = self.inventory
1014
if include_root is True:
1015
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1016
# Convert these into local objects to save lookup times
1017
pathjoin = osutils.pathjoin
1018
file_kind = self._kind
1020
# transport.base ends in a slash, we want the piece
1021
# between the last two slashes
1022
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1024
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1026
# directory file_id, relative path, absolute path, reverse sorted children
1027
children = os.listdir(self.basedir)
1029
# jam 20060527 The kernel sized tree seems equivalent whether we
1030
# use a deque and popleft to keep them sorted, or if we use a plain
1031
# list and just reverse() them.
1032
children = collections.deque(children)
1033
stack = [(inv.root.file_id, u'', self.basedir, children)]
1035
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1038
f = children.popleft()
657
inv = self._inventory
659
def descend(from_dir_relpath, from_dir_id, dp):
1039
663
## TODO: If we find a subdirectory with its own .bzr
1040
664
## directory, then that is a separate tree and we
1041
665
## should exclude it.
1043
667
# the bzrdir for this tree
1044
if transport_base_dir == f:
668
if self.bzrdir.transport.base.endswith(f + '/'):
1047
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1048
# and 'f' doesn't begin with one, we can do a string op, rather
1049
# than the checks of pathjoin(), all relative paths will have an extra slash
1051
fp = from_dir_relpath + '/' + f
672
fp = appendpath(from_dir_relpath, f)
1054
fap = from_dir_abspath + '/' + f
675
fap = appendpath(dp, f)
1056
677
f_ie = inv.get_child(from_dir_id, f)
1059
elif self.is_ignored(fp[1:]):
680
elif self.is_ignored(fp):
1062
# we may not have found this file, because of a unicode issue
1063
f_norm, can_access = osutils.normalized_filename(f)
1064
if f == f_norm or not can_access:
1065
# No change, so treat this file normally
1068
# this file can be accessed by a normalized path
1069
# check again if it is versioned
1070
# these lines are repeated here for performance
1072
fp = from_dir_relpath + '/' + f
1073
fap = from_dir_abspath + '/' + f
1074
f_ie = inv.get_child(from_dir_id, f)
1077
elif self.is_ignored(fp[1:]):
1082
685
fk = file_kind(fap)
689
raise BzrCheckError("file %r entered as kind %r id %r, "
691
% (fap, f_ie.kind, f_ie.file_id, fk))
1084
693
# make a last minute entry
1086
yield fp[1:], c, fk, f_ie.file_id, f_ie
1089
yield fp[1:], c, fk, None, fk_entries[fk]()
1091
yield fp[1:], c, fk, None, TreeEntry()
697
if fk == 'directory':
698
entry = TreeDirectory()
701
elif fk == 'symlink':
706
yield fp, c, fk, (f_ie and f_ie.file_id), entry
1094
708
if fk != 'directory':
1097
# But do this child first
1098
new_children = os.listdir(fap)
1100
new_children = collections.deque(new_children)
1101
stack.append((f_ie.file_id, fp, fap, new_children))
1102
# Break out of inner loop,
1103
# so that we start outer loop with child
1106
# if we finished all children, pop it off the stack
1109
@needs_tree_write_lock
1110
def move(self, from_paths, to_dir=None, after=False, **kwargs):
712
# don't descend unversioned directories
715
for ff in descend(fp, f_ie.file_id, fap):
718
for f in descend(u'', inv.root.file_id, self.basedir):
722
def move(self, from_paths, to_name):
1111
723
"""Rename files.
1113
to_dir must exist in the inventory.
725
to_name must exist in the inventory.
1115
If to_dir exists and is a directory, the files are moved into
727
If to_name exists and is a directory, the files are moved into
1116
728
it, keeping their old names.
1118
Note that to_dir is only the last component of the new name;
730
Note that to_name is only the last component of the new name;
1119
731
this doesn't change the directory.
1121
For each entry in from_paths the move mode will be determined
1124
The first mode moves the file in the filesystem and updates the
1125
inventory. The second mode only updates the inventory without
1126
touching the file on the filesystem. This is the new mode introduced
1129
move uses the second mode if 'after == True' and the target is not
1130
versioned but present in the working tree.
1132
move uses the second mode if 'after == False' and the source is
1133
versioned but no longer in the working tree, and the target is not
1134
versioned but present in the working tree.
1136
move uses the first mode if 'after == False' and the source is
1137
versioned and present in the working tree, and the target is not
1138
versioned and not present in the working tree.
1140
Everything else results in an error.
1142
733
This returns a list of (from_path, to_path) pairs for each
1143
734
entry that is moved.
1148
# check for deprecated use of signature
1150
to_dir = kwargs.get('to_name', None)
1152
raise TypeError('You must supply a target directory')
1154
symbol_versioning.warn('The parameter to_name was deprecated'
1155
' in version 0.13. Use to_dir instead',
1158
# check destination directory
737
## TODO: Option to move IDs only
1159
738
assert not isinstance(from_paths, basestring)
1160
739
inv = self.inventory
1161
to_abs = self.abspath(to_dir)
740
to_abs = self.abspath(to_name)
1162
741
if not isdir(to_abs):
1163
raise errors.BzrMoveFailedError('',to_dir,
1164
errors.NotADirectory(to_abs))
1165
if not self.has_filename(to_dir):
1166
raise errors.BzrMoveFailedError('',to_dir,
1167
errors.NotInWorkingDirectory(to_dir))
1168
to_dir_id = inv.path2id(to_dir)
1169
if to_dir_id is None:
1170
raise errors.BzrMoveFailedError('',to_dir,
1171
errors.NotVersionedError(path=str(to_dir)))
742
raise BzrError("destination %r is not a directory" % to_abs)
743
if not self.has_filename(to_name):
744
raise BzrError("destination %r not in working directory" % to_abs)
745
to_dir_id = inv.path2id(to_name)
746
if to_dir_id == None and to_name != '':
747
raise BzrError("destination %r is not a versioned directory" % to_name)
1173
748
to_dir_ie = inv[to_dir_id]
1174
if to_dir_ie.kind != 'directory':
1175
raise errors.BzrMoveFailedError('',to_dir,
1176
errors.NotADirectory(to_abs))
1178
# create rename entries and tuples
1179
for from_rel in from_paths:
1180
from_tail = splitpath(from_rel)[-1]
1181
from_id = inv.path2id(from_rel)
1183
raise errors.BzrMoveFailedError(from_rel,to_dir,
1184
errors.NotVersionedError(path=str(from_rel)))
1186
from_entry = inv[from_id]
1187
from_parent_id = from_entry.parent_id
1188
to_rel = pathjoin(to_dir, from_tail)
1189
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1191
from_tail=from_tail,
1192
from_parent_id=from_parent_id,
1193
to_rel=to_rel, to_tail=from_tail,
1194
to_parent_id=to_dir_id)
1195
rename_entries.append(rename_entry)
1196
rename_tuples.append((from_rel, to_rel))
1198
# determine which move mode to use. checks also for movability
1199
rename_entries = self._determine_mv_mode(rename_entries, after)
1201
original_modified = self._inventory_is_modified
749
if to_dir_ie.kind not in ('directory', 'root_directory'):
750
raise BzrError("destination %r is not a directory" % to_abs)
752
to_idpath = inv.get_idpath(to_dir_id)
755
if not self.has_filename(f):
756
raise BzrError("%r does not exist in working tree" % f)
757
f_id = inv.path2id(f)
759
raise BzrError("%r is not versioned" % f)
760
name_tail = splitpath(f)[-1]
761
dest_path = appendpath(to_name, name_tail)
762
if self.has_filename(dest_path):
763
raise BzrError("destination %r already exists" % dest_path)
764
if f_id in to_idpath:
765
raise BzrError("can't move %r to a subdirectory of itself" % f)
767
# OK, so there's a race here, it's possible that someone will
768
# create a file in this interval and then the rename might be
769
# left half-done. But we should have caught most problems.
770
orig_inv = deepcopy(self.inventory)
1204
self._inventory_is_modified = True
1205
self._move(rename_entries)
773
name_tail = splitpath(f)[-1]
774
dest_path = appendpath(to_name, name_tail)
775
result.append((f, dest_path))
776
inv.rename(inv.path2id(f), to_dir_id, name_tail)
778
rename(self.abspath(f), self.abspath(dest_path))
780
raise BzrError("failed to rename %r to %r: %s" %
781
(f, dest_path, e[1]),
782
["rename rolled back"])
1207
784
# restore the inventory on error
1208
self._inventory_is_modified = original_modified
785
self._set_inventory(orig_inv)
1210
787
self._write_inventory(inv)
1211
return rename_tuples
1213
def _determine_mv_mode(self, rename_entries, after=False):
1214
"""Determines for each from-to pair if both inventory and working tree
1215
or only the inventory has to be changed.
1217
Also does basic plausability tests.
1219
inv = self.inventory
1221
for rename_entry in rename_entries:
1222
# store to local variables for easier reference
1223
from_rel = rename_entry.from_rel
1224
from_id = rename_entry.from_id
1225
to_rel = rename_entry.to_rel
1226
to_id = inv.path2id(to_rel)
1227
only_change_inv = False
1229
# check the inventory for source and destination
1231
raise errors.BzrMoveFailedError(from_rel,to_rel,
1232
errors.NotVersionedError(path=str(from_rel)))
1233
if to_id is not None:
1234
raise errors.BzrMoveFailedError(from_rel,to_rel,
1235
errors.AlreadyVersionedError(path=str(to_rel)))
1237
# try to determine the mode for rename (only change inv or change
1238
# inv and file system)
1240
if not self.has_filename(to_rel):
1241
raise errors.BzrMoveFailedError(from_id,to_rel,
1242
errors.NoSuchFile(path=str(to_rel),
1243
extra="New file has not been created yet"))
1244
only_change_inv = True
1245
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1246
only_change_inv = True
1247
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1248
only_change_inv = False
1250
# something is wrong, so lets determine what exactly
1251
if not self.has_filename(from_rel) and \
1252
not self.has_filename(to_rel):
1253
raise errors.BzrRenameFailedError(from_rel,to_rel,
1254
errors.PathsDoNotExist(paths=(str(from_rel),
1257
raise errors.RenameFailedFilesExist(from_rel, to_rel,
1258
extra="(Use --after to update the Bazaar id)")
1259
rename_entry.only_change_inv = only_change_inv
1260
return rename_entries
1262
def _move(self, rename_entries):
1263
"""Moves a list of files.
1265
Depending on the value of the flag 'only_change_inv', the
1266
file will be moved on the file system or not.
1268
inv = self.inventory
1271
for entry in rename_entries:
1273
self._move_entry(entry)
1275
self._rollback_move(moved)
1279
def _rollback_move(self, moved):
1280
"""Try to rollback a previous move in case of an filesystem error."""
1281
inv = self.inventory
1284
self._move_entry(_RenameEntry(entry.to_rel, entry.from_id,
1285
entry.to_tail, entry.to_parent_id, entry.from_rel,
1286
entry.from_tail, entry.from_parent_id,
1287
entry.only_change_inv))
1288
except errors.BzrMoveFailedError, e:
1289
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1290
" The working tree is in an inconsistent state."
1291
" Please consider doing a 'bzr revert'."
1292
" Error message is: %s" % e)
1294
def _move_entry(self, entry):
1295
inv = self.inventory
1296
from_rel_abs = self.abspath(entry.from_rel)
1297
to_rel_abs = self.abspath(entry.to_rel)
1298
if from_rel_abs == to_rel_abs:
1299
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1300
"Source and target are identical.")
1302
if not entry.only_change_inv:
1304
osutils.rename(from_rel_abs, to_rel_abs)
1306
raise errors.BzrMoveFailedError(entry.from_rel,
1308
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1310
@needs_tree_write_lock
1311
def rename_one(self, from_rel, to_rel, after=False):
791
def rename_one(self, from_rel, to_rel):
1312
792
"""Rename one file.
1314
794
This can change the directory or the filename or both.
1316
rename_one has several 'modes' to work. First, it can rename a physical
1317
file and change the file_id. That is the normal mode. Second, it can
1318
only change the file_id without touching any physical file. This is
1319
the new mode introduced in version 0.15.
1321
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1322
versioned but present in the working tree.
1324
rename_one uses the second mode if 'after == False' and 'from_rel' is
1325
versioned but no longer in the working tree, and 'to_rel' is not
1326
versioned but present in the working tree.
1328
rename_one uses the first mode if 'after == False' and 'from_rel' is
1329
versioned and present in the working tree, and 'to_rel' is not
1330
versioned and not present in the working tree.
1332
Everything else results in an error.
1334
796
inv = self.inventory
1337
# create rename entries and tuples
1338
from_tail = splitpath(from_rel)[-1]
1339
from_id = inv.path2id(from_rel)
1341
raise errors.BzrRenameFailedError(from_rel,to_rel,
1342
errors.NotVersionedError(path=str(from_rel)))
1343
from_entry = inv[from_id]
1344
from_parent_id = from_entry.parent_id
797
if not self.has_filename(from_rel):
798
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
799
if self.has_filename(to_rel):
800
raise BzrError("can't rename: new working file %r already exists" % to_rel)
802
file_id = inv.path2id(from_rel)
804
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
807
from_parent = entry.parent_id
808
from_name = entry.name
810
if inv.path2id(to_rel):
811
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1345
813
to_dir, to_tail = os.path.split(to_rel)
1346
814
to_dir_id = inv.path2id(to_dir)
1347
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1349
from_tail=from_tail,
1350
from_parent_id=from_parent_id,
1351
to_rel=to_rel, to_tail=to_tail,
1352
to_parent_id=to_dir_id)
1353
rename_entries.append(rename_entry)
1355
# determine which move mode to use. checks also for movability
1356
rename_entries = self._determine_mv_mode(rename_entries, after)
1358
# check if the target changed directory and if the target directory is
1360
if to_dir_id is None:
1361
raise errors.BzrMoveFailedError(from_rel,to_rel,
1362
errors.NotVersionedError(path=str(to_dir)))
1364
# all checks done. now we can continue with our actual work
1365
mutter('rename_one:\n'
1370
' to_dir_id {%s}\n',
1371
from_id, from_rel, to_rel, to_dir, to_dir_id)
1373
self._move(rename_entries)
815
if to_dir_id == None and to_dir != '':
816
raise BzrError("can't determine destination directory id for %r" % to_dir)
818
mutter("rename_one:")
819
mutter(" file_id {%s}" % file_id)
820
mutter(" from_rel %r" % from_rel)
821
mutter(" to_rel %r" % to_rel)
822
mutter(" to_dir %r" % to_dir)
823
mutter(" to_dir_id {%s}" % to_dir_id)
825
inv.rename(file_id, to_dir_id, to_tail)
827
from_abs = self.abspath(from_rel)
828
to_abs = self.abspath(to_rel)
830
rename(from_abs, to_abs)
832
inv.rename(file_id, from_parent, from_name)
833
raise BzrError("failed to rename %r to %r: %s"
834
% (from_abs, to_abs, e[1]),
835
["rename rolled back"])
1374
836
self._write_inventory(inv)
1376
class _RenameEntry(object):
1377
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1378
to_rel, to_tail, to_parent_id, only_change_inv=False):
1379
self.from_rel = from_rel
1380
self.from_id = from_id
1381
self.from_tail = from_tail
1382
self.from_parent_id = from_parent_id
1383
self.to_rel = to_rel
1384
self.to_tail = to_tail
1385
self.to_parent_id = to_parent_id
1386
self.only_change_inv = only_change_inv
1388
838
@needs_read_lock
1389
839
def unknowns(self):
1390
840
"""Return all unknown files.
1392
842
These are files in the working directory that are not versioned or
1393
843
control files or ignored.
1395
# force the extras method to be fully executed before returning, to
1396
# prevent race conditions with the lock
1398
[subp for subp in self.extras() if not self.is_ignored(subp)])
1400
@needs_tree_write_lock
1401
def unversion(self, file_ids):
1402
"""Remove the file ids in file_ids from the current versioned set.
1404
When a file_id is unversioned, all of its children are automatically
1407
:param file_ids: The file ids to stop versioning.
1408
:raises: NoSuchId if any fileid is not currently versioned.
1410
for file_id in file_ids:
1411
file_id = osutils.safe_file_id(file_id)
1412
if self._inventory.has_id(file_id):
1413
self._inventory.remove_recursive_id(file_id)
1415
raise errors.NoSuchId(self, file_id)
1417
# in the future this should just set a dirty bit to wait for the
1418
# final unlock. However, until all methods of workingtree start
1419
# with the current in -memory inventory rather than triggering
1420
# a read, it is more complex - we need to teach read_inventory
1421
# to know when to read, and when to not read first... and possibly
1422
# to save first when the in memory one may be corrupted.
1423
# so for now, we just only write it if it is indeed dirty.
1425
self._write_inventory(self._inventory)
1427
@deprecated_method(zero_eight)
845
>>> from bzrlib.bzrdir import ScratchDir
846
>>> d = ScratchDir(files=['foo', 'foo~'])
847
>>> b = d.open_branch()
848
>>> tree = d.open_workingtree()
849
>>> map(str, tree.unknowns())
852
>>> list(b.unknowns())
854
>>> tree.remove('foo')
855
>>> list(b.unknowns())
858
for subp in self.extras():
859
if not self.is_ignored(subp):
1428
862
def iter_conflicts(self):
1429
"""List all files in the tree that have text or content conflicts.
1430
DEPRECATED. Use conflicts instead."""
1431
return self._iter_conflicts()
1433
def _iter_conflicts(self):
1434
863
conflicted = set()
1435
for info in self.list_files():
864
for path in (s[0] for s in self.list_files()):
1437
865
stem = get_conflicted_stem(path)
1438
866
if stem is None:
1665
1024
self.branch.unlock()
1668
def get_physical_lock_status(self):
1669
return self._control_files.get_physical_lock_status()
1671
def _basis_inventory_name(self):
1672
return 'basis-inventory-cache'
1674
def _reset_data(self):
1675
"""Reset transient data that cannot be revalidated."""
1676
self._inventory_is_modified = False
1677
result = self._deserialize(self._control_files.get('inventory'))
1678
self._set_inventory(result, dirty=False)
1680
@needs_tree_write_lock
1681
def set_last_revision(self, new_revision):
1027
def _basis_inventory_name(self, revision_id):
1028
return 'basis-inventory.%s' % revision_id
1031
def set_last_revision(self, new_revision, old_revision=None):
1682
1032
"""Change the last revision in the working tree."""
1683
new_revision = osutils.safe_revision_id(new_revision)
1033
self._remove_old_basis(old_revision)
1684
1034
if self._change_last_revision(new_revision):
1685
1035
self._cache_basis_inventory(new_revision)
1687
1037
def _change_last_revision(self, new_revision):
1688
"""Template method part of set_last_revision to perform the change.
1690
This is used to allow WorkingTree3 instances to not affect branch
1691
when their last revision is set.
1693
if _mod_revision.is_null(new_revision):
1038
"""Template method part of set_last_revision to perform the change."""
1039
if new_revision is None:
1694
1040
self.branch.set_revision_history([])
1042
# current format is locked in with the branch
1043
revision_history = self.branch.revision_history()
1697
self.branch.generate_revision_history(new_revision)
1698
except errors.NoSuchRevision:
1699
# not present in the repo - dont try to set it deeper than the tip
1700
self.branch.set_revision_history([new_revision])
1045
position = revision_history.index(new_revision)
1047
raise errors.NoSuchRevision(self.branch, new_revision)
1048
self.branch.set_revision_history(revision_history[:position + 1])
1703
def _write_basis_inventory(self, xml):
1704
"""Write the basis inventory XML to the basis-inventory file"""
1705
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1706
path = self._basis_inventory_name()
1708
self._control_files.put(path, sio)
1710
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1711
"""Create the text that will be saved in basis-inventory"""
1712
# TODO: jam 20070209 This should be redundant, as the revision_id
1713
# as all callers should have already converted the revision_id to
1715
inventory.revision_id = osutils.safe_revision_id(revision_id)
1716
return xml7.serializer_v7.write_inventory_to_string(inventory)
1718
1051
def _cache_basis_inventory(self, new_revision):
1719
1052
"""Cache new_revision as the basis inventory."""
1720
# TODO: this should allow the ready-to-use inventory to be passed in,
1721
# as commit already has that ready-to-use [while the format is the
1724
# this double handles the inventory - unpack and repack -
1725
# but is easier to understand. We can/should put a conditional
1726
# in here based on whether the inventory is in the latest format
1727
# - perhaps we should repack all inventories on a repository
1729
# the fast path is to copy the raw xml from the repository. If the
1730
# xml contains 'revision_id="', then we assume the right
1731
# revision_id is set. We must check for this full string, because a
1732
# root node id can legitimately look like 'revision_id' but cannot
1734
1054
xml = self.branch.repository.get_inventory_xml(new_revision)
1735
firstline = xml.split('\n', 1)[0]
1736
if (not 'revision_id="' in firstline or
1737
'format="7"' not in firstline):
1738
inv = self.branch.repository.deserialise_inventory(
1740
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1741
self._write_basis_inventory(xml)
1742
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1055
path = self._basis_inventory_name(new_revision)
1056
self._control_files.put_utf8(path, xml)
1057
except WeaveRevisionNotPresent:
1745
def read_basis_inventory(self):
1060
def _remove_old_basis(self, old_revision):
1061
"""Remove the old basis inventory 'old_revision'."""
1062
if old_revision is not None:
1064
path = self._basis_inventory_name(old_revision)
1065
path = self._control_files._escape(path)
1066
self._control_files._transport.delete(path)
1070
def read_basis_inventory(self, revision_id):
1746
1071
"""Read the cached basis inventory."""
1747
path = self._basis_inventory_name()
1748
return self._control_files.get(path).read()
1072
path = self._basis_inventory_name(revision_id)
1073
return self._control_files.get_utf8(path).read()
1750
1075
@needs_read_lock
1751
1076
def read_working_inventory(self):
1752
"""Read the working inventory.
1754
:raises errors.InventoryModified: read_working_inventory will fail
1755
when the current in memory inventory has been modified.
1757
# conceptually this should be an implementation detail of the tree.
1758
# XXX: Deprecate this.
1077
"""Read the working inventory."""
1759
1078
# ElementTree does its own conversion from UTF-8, so open in
1761
if self._inventory_is_modified:
1762
raise errors.InventoryModified(self)
1763
result = self._deserialize(self._control_files.get('inventory'))
1764
self._set_inventory(result, dirty=False)
1080
result = bzrlib.xml5.serializer_v5.read_inventory(
1081
self._control_files.get('inventory'))
1082
self._set_inventory(result)
1767
@needs_tree_write_lock
1768
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1770
"""Remove nominated files from the working inventory.
1772
:files: File paths relative to the basedir.
1773
:keep_files: If true, the files will also be kept.
1774
:force: Delete files and directories, even if they are changed and
1775
even if the directories are not empty.
1086
def remove(self, files, verbose=False):
1087
"""Remove nominated files from the working inventory..
1089
This does not remove their text. This does not run on XXX on what? RBC
1091
TODO: Refuse to remove modified files unless --force is given?
1093
TODO: Do something useful with directories.
1095
TODO: Should this remove the text or not? Tough call; not
1096
removing may be useful and the user can just use use rm, and
1097
is the opposite of add. Removing it is consistent with most
1098
other tools. Maybe an option.
1777
1100
## TODO: Normalize names
1101
## TODO: Remove nested loops; better scalability
1779
1102
if isinstance(files, basestring):
1780
1103
files = [files]
1785
unknown_files_in_directory=set()
1787
def recurse_directory_to_add_files(directory):
1788
# recurse directory and add all files
1789
# so we can check if they have changed.
1790
for parent_info, file_infos in\
1791
osutils.walkdirs(self.abspath(directory),
1793
for relpath, basename, kind, lstat, abspath in file_infos:
1795
if self.path2id(relpath): #is it versioned?
1796
new_files.add(relpath)
1798
unknown_files_in_directory.add(
1799
(relpath, None, kind))
1801
for filename in files:
1802
# Get file name into canonical form.
1803
abspath = self.abspath(filename)
1804
filename = self.relpath(abspath)
1805
if len(filename) > 0:
1806
new_files.add(filename)
1807
if osutils.isdir(abspath):
1808
recurse_directory_to_add_files(filename)
1809
files = [f for f in new_files]
1812
return # nothing to do
1814
# Sort needed to first handle directory content before the directory
1815
files.sort(reverse=True)
1816
if not keep_files and not force:
1817
has_changed_files = len(unknown_files_in_directory) > 0
1818
if not has_changed_files:
1819
for (file_id, path, content_change, versioned, parent_id, name,
1820
kind, executable) in self._iter_changes(self.basis_tree(),
1821
include_unchanged=True, require_versioned=False,
1822
want_unversioned=True, specific_files=files):
1823
# check if it's unknown OR changed but not deleted:
1824
if (versioned == (False, False)
1825
or (content_change and kind[1] != None)):
1826
has_changed_files = True
1829
if has_changed_files:
1830
# make delta to show ALL applicable changes in error message.
1831
tree_delta = self.changes_from(self.basis_tree(),
1832
specific_files=files)
1833
for unknown_file in unknown_files_in_directory:
1834
tree_delta.unversioned.extend((unknown_file,))
1835
raise errors.BzrRemoveChangedFilesError(tree_delta)
1105
inv = self.inventory
1837
1107
# do this before any modifications
1838
1108
for f in files:
1839
fid = self.path2id(f)
1109
fid = inv.path2id(f)
1842
message="%s is not versioned." % (f,)
1845
# having removed it, it must be either ignored or unknown
1846
if self.is_ignored(f):
1850
textui.show_status(new_status, self.kind(fid), f,
1853
inv_delta.append((f, None, fid, None))
1854
message="removed %s" % (f,)
1857
abs_path = self.abspath(f)
1858
if osutils.lexists(abs_path):
1859
if (osutils.isdir(abs_path) and
1860
len(os.listdir(abs_path)) > 0):
1861
message="%s is not empty directory "\
1862
"and won't be deleted." % (f,)
1864
osutils.delete_any(abs_path)
1865
message="deleted %s" % (f,)
1866
elif message is not None:
1867
# only care if we haven't done anything yet.
1868
message="%s does not exist." % (f,)
1870
# print only one message (if any) per file.
1871
if message is not None:
1873
self.apply_inventory_delta(inv_delta)
1875
@needs_tree_write_lock
1111
# TODO: Perhaps make this just a warning, and continue?
1112
# This tends to happen when
1113
raise NotVersionedError(path=f)
1114
mutter("remove inventory entry %s {%s}", quotefn(f), fid)
1116
# having remove it, it must be either ignored or unknown
1117
if self.is_ignored(f):
1121
show_status(new_status, inv[fid].kind, quotefn(f))
1124
self._write_inventory(inv)
1876
1127
def revert(self, filenames, old_tree=None, backups=True,
1877
pb=DummyProgress(), report_changes=False):
1878
from bzrlib.conflicts import resolve
1128
pb=DummyProgress()):
1129
from transform import revert
1879
1130
if old_tree is None:
1880
1131
old_tree = self.basis_tree()
1881
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
1132
revert(self, old_tree, filenames, backups, pb)
1883
1133
if not len(filenames):
1884
self.set_parent_ids(self.get_parent_ids()[:1])
1887
resolve(self, filenames, ignore_misses=True)
1890
def revision_tree(self, revision_id):
1891
"""See Tree.revision_tree.
1893
WorkingTree can supply revision_trees for the basis revision only
1894
because there is only one cached inventory in the bzr directory.
1896
if revision_id == self.last_revision():
1898
xml = self.read_basis_inventory()
1899
except errors.NoSuchFile:
1903
inv = xml7.serializer_v7.read_inventory_from_string(xml)
1904
# dont use the repository revision_tree api because we want
1905
# to supply the inventory.
1906
if inv.revision_id == revision_id:
1907
return revisiontree.RevisionTree(self.branch.repository,
1909
except errors.BadInventoryFormat:
1911
# raise if there was no inventory, or if we read the wrong inventory.
1912
raise errors.NoSuchRevisionInTree(self, revision_id)
1914
# XXX: This method should be deprecated in favour of taking in a proper
1915
# new Inventory object.
1916
@needs_tree_write_lock
1134
self.set_pending_merges([])
1917
1137
def set_inventory(self, new_inventory_list):
1918
1138
from bzrlib.inventory import (Inventory,
1919
1139
InventoryDirectory,
1985
1179
between multiple working trees, i.e. via shared storage, then we
1986
1180
would probably want to lock both the local tree, and the branch.
1988
raise NotImplementedError(self.unlock)
1182
# FIXME: We want to write out the hashcache only when the last lock on
1183
# this working copy is released. Peeking at the lock count is a bit
1184
# of a nasty hack; probably it's better to have a transaction object,
1185
# which can do some finalization when it's either successfully or
1186
# unsuccessfully completed. (Denys's original patch did that.)
1187
# RBC 20060206 hookinhg into transaction will couple lock and transaction
1188
# wrongly. Hookinh into unllock on the control files object is fine though.
1190
# TODO: split this per format so there is no ugly if block
1191
if self._hashcache.needs_write and (
1192
# dedicated lock files
1193
self._control_files._lock_count==1 or
1195
(self._control_files is self.branch.control_files and
1196
self._control_files._lock_count==3)):
1197
self._hashcache.write()
1198
# reverse order of locking.
1199
result = self._control_files.unlock()
1201
self.branch.unlock()
1990
def update(self, change_reporter=None):
1991
1207
"""Update a working tree along its branch.
1993
This will update the branch if its bound too, which means we have
1994
multiple trees involved:
1996
- The new basis tree of the master.
1997
- The old basis tree of the branch.
1998
- The old basis tree of the working tree.
1999
- The current working tree state.
2001
Pathologically, all three may be different, and non-ancestors of each
2002
other. Conceptually we want to:
2004
- Preserve the wt.basis->wt.state changes
2005
- Transform the wt.basis to the new master basis.
2006
- Apply a merge of the old branch basis to get any 'local' changes from
2008
- Restore the wt.basis->wt.state changes.
1209
This will update the branch if its bound too, which means we have multiple trees involved:
1210
The new basis tree of the master.
1211
The old basis tree of the branch.
1212
The old basis tree of the working tree.
1213
The current working tree state.
1214
pathologically all three may be different, and non ancestors of each other.
1215
Conceptually we want to:
1216
Preserve the wt.basis->wt.state changes
1217
Transform the wt.basis to the new master basis.
1218
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1219
Restore the wt.basis->wt.state changes.
2010
1221
There isn't a single operation at the moment to do that, so we:
2011
- Merge current state -> basis tree of the master w.r.t. the old tree
2013
- Do a 'normal' merge of the old branch basis if it is relevant.
2015
if self.branch.get_master_branch() is not None:
2017
update_branch = True
2019
self.lock_tree_write()
2020
update_branch = False
2023
old_tip = self.branch.update()
2026
return self._update_tree(old_tip, change_reporter)
2030
@needs_tree_write_lock
2031
def _update_tree(self, old_tip=None, change_reporter=None):
2032
"""Update a tree to the master branch.
2034
:param old_tip: if supplied, the previous tip revision the branch,
2035
before it was changed to the master branch's tip.
2037
# here if old_tip is not None, it is the old tip of the branch before
2038
# it was updated from the master branch. This should become a pending
2039
# merge in the working tree to preserve the user existing work. we
2040
# cant set that until we update the working trees last revision to be
2041
# one from the new branch, because it will just get absorbed by the
2042
# parent de-duplication logic.
2044
# We MUST save it even if an error occurs, because otherwise the users
2045
# local work is unreferenced and will appear to have been lost.
2049
last_rev = self.get_parent_ids()[0]
2051
last_rev = _mod_revision.NULL_REVISION
2052
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2053
# merge tree state up to new branch tip.
2054
basis = self.basis_tree()
1222
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1223
Do a 'normal' merge of the old branch basis if it is relevant.
1225
old_tip = self.branch.update()
1226
if old_tip is not None:
1227
self.add_pending_merge(old_tip)
1228
self.branch.lock_read()
1231
if self.last_revision() != self.branch.last_revision():
1232
# merge tree state up to new branch tip.
1233
basis = self.basis_tree()
2057
1234
to_tree = self.branch.basis_tree()
2058
if basis.inventory.root is None:
2059
self.set_root_id(to_tree.inventory.root.file_id)
2061
result += merge.merge_inner(
1235
result += merge_inner(self.branch,
2066
change_reporter=change_reporter)
2069
# TODO - dedup parents list with things merged by pull ?
2070
# reuse the tree we've updated to to set the basis:
2071
parent_trees = [(self.branch.last_revision(), to_tree)]
2072
merges = self.get_parent_ids()[1:]
2073
# Ideally we ask the tree for the trees here, that way the working
2074
# tree can decide whether to give us teh entire tree or give us a
2075
# lazy initialised tree. dirstate for instance will have the trees
2076
# in ram already, whereas a last-revision + basis-inventory tree
2077
# will not, but also does not need them when setting parents.
2078
for parent in merges:
2079
parent_trees.append(
2080
(parent, self.branch.repository.revision_tree(parent)))
2081
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2082
parent_trees.append(
2083
(old_tip, self.branch.repository.revision_tree(old_tip)))
2084
self.set_parent_trees(parent_trees)
2085
last_rev = parent_trees[0][0]
2087
# the working tree had the same last-revision as the master
2088
# branch did. We may still have pivot local work from the local
2089
# branch into old_tip:
2090
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2091
self.add_parent_tree_id(old_tip)
2092
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2093
and old_tip != last_rev):
2094
# our last revision was not the prior branch last revision
2095
# and we have converted that last revision to a pending merge.
2096
# base is somewhere between the branch tip now
2097
# and the now pending merge
2099
# Since we just modified the working tree and inventory, flush out
2100
# the current state, before we modify it again.
2101
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2102
# requires it only because TreeTransform directly munges the
2103
# inventory and calls tree._write_inventory(). Ultimately we
2104
# should be able to remove this extra flush.
2106
graph = self.branch.repository.get_graph()
2107
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2109
base_tree = self.branch.repository.revision_tree(base_rev_id)
2110
other_tree = self.branch.repository.revision_tree(old_tip)
2111
result += merge.merge_inner(
2116
change_reporter=change_reporter)
2119
def _write_hashcache_if_dirty(self):
2120
"""Write out the hashcache if it is dirty."""
2121
if self._hashcache.needs_write:
2123
self._hashcache.write()
2125
if e.errno not in (errno.EPERM, errno.EACCES):
2127
# TODO: jam 20061219 Should this be a warning? A single line
2128
# warning might be sufficient to let the user know what
2130
mutter('Could not write hashcache for %s\nError: %s',
2131
self._hashcache.cache_file_name(), e)
2133
@needs_tree_write_lock
1239
self.set_last_revision(self.branch.last_revision())
1240
if old_tip and old_tip != self.last_revision():
1241
# our last revision was not the prior branch last reivison
1242
# and we have converted that last revision to a pending merge.
1243
# base is somewhere between the branch tip now
1244
# and the now pending merge
1245
from bzrlib.revision import common_ancestor
1247
base_rev_id = common_ancestor(self.branch.last_revision(),
1249
self.branch.repository)
1250
except errors.NoCommonAncestor:
1252
base_tree = self.branch.repository.revision_tree(base_rev_id)
1253
other_tree = self.branch.repository.revision_tree(old_tip)
1254
result += merge_inner(self.branch,
1260
self.branch.unlock()
2134
1263
def _write_inventory(self, inv):
2135
1264
"""Write inventory as the current inventory."""
2136
self._set_inventory(inv, dirty=True)
2139
def set_conflicts(self, arg):
2140
raise errors.UnsupportedOperation(self.set_conflicts, self)
2142
def add_conflicts(self, arg):
2143
raise errors.UnsupportedOperation(self.add_conflicts, self)
2146
def conflicts(self):
2147
conflicts = _mod_conflicts.ConflictList()
2148
for conflicted in self._iter_conflicts():
2151
if file_kind(self.abspath(conflicted)) != "file":
2153
except errors.NoSuchFile:
2156
for suffix in ('.THIS', '.OTHER'):
2158
kind = file_kind(self.abspath(conflicted+suffix))
2161
except errors.NoSuchFile:
2165
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2166
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2168
file_id=self.path2id(conflicted)))
2171
def walkdirs(self, prefix=""):
2172
"""Walk the directories of this tree.
2174
returns a generator which yields items in the form:
2175
((curren_directory_path, fileid),
2176
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2179
This API returns a generator, which is only valid during the current
2180
tree transaction - within a single lock_read or lock_write duration.
2182
If the tree is not locked, it may cause an error to be raised,
2183
depending on the tree implementation.
2185
disk_top = self.abspath(prefix)
2186
if disk_top.endswith('/'):
2187
disk_top = disk_top[:-1]
2188
top_strip_len = len(disk_top) + 1
2189
inventory_iterator = self._walkdirs(prefix)
2190
disk_iterator = osutils.walkdirs(disk_top, prefix)
2192
current_disk = disk_iterator.next()
2193
disk_finished = False
2195
if not (e.errno == errno.ENOENT or
2196
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2199
disk_finished = True
2201
current_inv = inventory_iterator.next()
2202
inv_finished = False
2203
except StopIteration:
2206
while not inv_finished or not disk_finished:
2207
if not disk_finished:
2208
# strip out .bzr dirs
2209
if current_disk[0][1][top_strip_len:] == '':
2210
# osutils.walkdirs can be made nicer -
2211
# yield the path-from-prefix rather than the pathjoined
2213
bzrdir_loc = bisect_left(current_disk[1], ('.bzr', '.bzr'))
2214
if current_disk[1][bzrdir_loc][0] == '.bzr':
2215
# we dont yield the contents of, or, .bzr itself.
2216
del current_disk[1][bzrdir_loc]
2218
# everything is unknown
2221
# everything is missing
2224
direction = cmp(current_inv[0][0], current_disk[0][0])
2226
# disk is before inventory - unknown
2227
dirblock = [(relpath, basename, kind, stat, None, None) for
2228
relpath, basename, kind, stat, top_path in current_disk[1]]
2229
yield (current_disk[0][0], None), dirblock
2231
current_disk = disk_iterator.next()
2232
except StopIteration:
2233
disk_finished = True
2235
# inventory is before disk - missing.
2236
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2237
for relpath, basename, dkind, stat, fileid, kind in
2239
yield (current_inv[0][0], current_inv[0][1]), dirblock
2241
current_inv = inventory_iterator.next()
2242
except StopIteration:
2245
# versioned present directory
2246
# merge the inventory and disk data together
2248
for relpath, subiterator in itertools.groupby(sorted(
2249
current_inv[1] + current_disk[1], key=operator.itemgetter(0)), operator.itemgetter(1)):
2250
path_elements = list(subiterator)
2251
if len(path_elements) == 2:
2252
inv_row, disk_row = path_elements
2253
# versioned, present file
2254
dirblock.append((inv_row[0],
2255
inv_row[1], disk_row[2],
2256
disk_row[3], inv_row[4],
2258
elif len(path_elements[0]) == 5:
2260
dirblock.append((path_elements[0][0],
2261
path_elements[0][1], path_elements[0][2],
2262
path_elements[0][3], None, None))
2263
elif len(path_elements[0]) == 6:
2264
# versioned, absent file.
2265
dirblock.append((path_elements[0][0],
2266
path_elements[0][1], 'unknown', None,
2267
path_elements[0][4], path_elements[0][5]))
2269
raise NotImplementedError('unreachable code')
2270
yield current_inv[0], dirblock
2272
current_inv = inventory_iterator.next()
2273
except StopIteration:
2276
current_disk = disk_iterator.next()
2277
except StopIteration:
2278
disk_finished = True
2280
def _walkdirs(self, prefix=""):
2281
"""Walk the directories of this tree.
2283
:prefix: is used as the directrory to start with.
2284
returns a generator which yields items in the form:
2285
((curren_directory_path, fileid),
2286
[(file1_path, file1_name, file1_kind, None, file1_id,
2289
_directory = 'directory'
2290
# get the root in the inventory
2291
inv = self.inventory
2292
top_id = inv.path2id(prefix)
2296
pending = [(prefix, '', _directory, None, top_id, None)]
2299
currentdir = pending.pop()
2300
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2301
top_id = currentdir[4]
2303
relroot = currentdir[0] + '/'
2306
# FIXME: stash the node in pending
2308
for name, child in entry.sorted_children():
2309
dirblock.append((relroot + name, name, child.kind, None,
2310
child.file_id, child.kind
2312
yield (currentdir[0], entry.file_id), dirblock
2313
# push the user specified dirs from dirblock
2314
for dir in reversed(dirblock):
2315
if dir[2] == _directory:
2318
@needs_tree_write_lock
2319
def auto_resolve(self):
2320
"""Automatically resolve text conflicts according to contents.
2322
Only text conflicts are auto_resolvable. Files with no conflict markers
2323
are considered 'resolved', because bzr always puts conflict markers
2324
into files that have text conflicts. The corresponding .THIS .BASE and
2325
.OTHER files are deleted, as per 'resolve'.
2326
:return: a tuple of ConflictLists: (un_resolved, resolved).
2328
un_resolved = _mod_conflicts.ConflictList()
2329
resolved = _mod_conflicts.ConflictList()
2330
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2331
for conflict in self.conflicts():
2332
if (conflict.typestring != 'text conflict' or
2333
self.kind(conflict.file_id) != 'file'):
2334
un_resolved.append(conflict)
2336
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2338
for line in my_file:
2339
if conflict_re.search(line):
2340
un_resolved.append(conflict)
2343
resolved.append(conflict)
2346
resolved.remove_files(self)
2347
self.set_conflicts(un_resolved)
2348
return un_resolved, resolved
2350
def _validate(self):
2351
"""Validate internal structures.
2353
This is meant mostly for the test suite. To give it a chance to detect
2354
corruption after actions have occurred. The default implementation is a
2357
:return: None. An exception should be raised if there is an error.
2362
class WorkingTree2(WorkingTree):
2363
"""This is the Format 2 working tree.
2365
This was the first weave based working tree.
2366
- uses os locks for locking.
2367
- uses the branch last-revision.
2370
def __init__(self, *args, **kwargs):
2371
super(WorkingTree2, self).__init__(*args, **kwargs)
2372
# WorkingTree2 has more of a constraint that self._inventory must
2373
# exist. Because this is an older format, we don't mind the overhead
2374
# caused by the extra computation here.
2376
# Newer WorkingTree's should only have self._inventory set when they
2378
if self._inventory is None:
2379
self.read_working_inventory()
2381
def lock_tree_write(self):
2382
"""See WorkingTree.lock_tree_write().
2384
In Format2 WorkingTrees we have a single lock for the branch and tree
2385
so lock_tree_write() degrades to lock_write().
2387
self.branch.lock_write()
2389
return self._control_files.lock_write()
2391
self.branch.unlock()
2395
# we share control files:
2396
if self._control_files._lock_count == 3:
2397
# _inventory_is_modified is always False during a read lock.
2398
if self._inventory_is_modified:
2400
self._write_hashcache_if_dirty()
2402
# reverse order of locking.
2404
return self._control_files.unlock()
2406
self.branch.unlock()
1266
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1268
self._control_files.put('inventory', sio)
1269
self._set_inventory(inv)
1270
mutter('wrote working inventory')
2409
1273
class WorkingTree3(WorkingTree):