555
527
__contains__ = has_id
557
529
def get_file_size(self, file_id):
558
"""See Tree.get_file_size"""
560
return os.path.getsize(self.id2abspath(file_id))
562
if e.errno != errno.ENOENT:
530
return os.path.getsize(self.id2abspath(file_id))
568
def get_file_sha1(self, file_id, path=None, stat_value=None):
533
def get_file_sha1(self, file_id):
534
path = self._inventory.id2path(file_id)
535
return self._hashcache.get_sha1(path)
537
def is_executable(self, file_id):
538
if not supports_executable():
539
return self._inventory[file_id].executable
570
541
path = self._inventory.id2path(file_id)
571
return self._hashcache.get_sha1(path, stat_value)
573
def get_file_mtime(self, file_id, path=None):
575
path = self.inventory.id2path(file_id)
576
return os.lstat(self.abspath(path)).st_mtime
578
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
579
file_id = self.path2id(path)
580
return self._inventory[file_id].executable
582
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
583
mode = stat_result.st_mode
584
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
586
if not supports_executable():
587
def is_executable(self, file_id, path=None):
588
return self._inventory[file_id].executable
590
_is_executable_from_path_and_stat = \
591
_is_executable_from_path_and_stat_from_basis
593
def is_executable(self, file_id, path=None):
595
path = self.id2path(file_id)
596
542
mode = os.lstat(self.abspath(path)).st_mode
597
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
599
_is_executable_from_path_and_stat = \
600
_is_executable_from_path_and_stat_from_stat
602
@needs_tree_write_lock
603
def _add(self, files, ids, kinds):
604
"""See MutableTree._add."""
543
return bool(stat.S_ISREG(mode) and stat.S_IEXEC&mode)
546
def add(self, files, ids=None):
547
"""Make files versioned.
549
Note that the command line normally calls smart_add instead,
550
which can automatically recurse.
552
This adds the files to the inventory, so that they will be
553
recorded by the next commit.
556
List of paths to add, relative to the base of the tree.
559
If set, use these instead of automatically generated ids.
560
Must be the same length as the list of files, but may
561
contain None for ids that are to be autogenerated.
563
TODO: Perhaps have an option to add the ids even if the files do
566
TODO: Perhaps callback with the ids and paths as they're added.
605
568
# TODO: Re-adding a file that is removed in the working copy
606
569
# should probably put it back with the previous ID.
607
# the read and write working inventory should not occur in this
608
# function - they should be part of lock_write and unlock.
610
for f, file_id, kind in zip(files, ids, kinds):
570
if isinstance(files, basestring):
571
assert(ids is None or isinstance(ids, basestring))
577
ids = [None] * len(files)
579
assert(len(ids) == len(files))
581
inv = self.read_working_inventory()
582
for f,file_id in zip(files, ids):
583
if self.is_control_filename(f):
584
raise BzrError("cannot add control file %s" % quotefn(f))
589
raise BzrError("cannot add top-level %r" % f)
591
fullpath = normpath(self.abspath(f))
594
kind = file_kind(fullpath)
596
if e.errno == errno.ENOENT:
597
raise NoSuchFile(fullpath)
598
# maybe something better?
599
raise BzrError('cannot add: not a regular file, symlink or directory: %s' % quotefn(f))
601
if not InventoryEntry.versionable_kind(kind):
602
raise BzrError('cannot add: not a versionable file ('
603
'i.e. regular file, symlink or directory): %s' % quotefn(f))
611
605
if file_id is None:
612
606
inv.add_path(f, kind=kind)
614
608
inv.add_path(f, kind=kind, file_id=file_id)
615
self._inventory_is_modified = True
617
@needs_tree_write_lock
618
def _gather_kinds(self, files, kinds):
619
"""See MutableTree._gather_kinds."""
620
for pos, f in enumerate(files):
621
if kinds[pos] is None:
622
fullpath = normpath(self.abspath(f))
624
kinds[pos] = file_kind(fullpath)
626
if e.errno == errno.ENOENT:
627
raise errors.NoSuchFile(fullpath)
610
self._write_inventory(inv)
629
612
@needs_write_lock
630
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
631
"""Add revision_id as a parent.
633
This is equivalent to retrieving the current list of parent ids
634
and setting the list to its value plus revision_id.
636
:param revision_id: The revision id to add to the parent list. It may
637
be a ghost revision as long as its not the first parent to be added,
638
or the allow_leftmost_as_ghost parameter is set True.
639
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
641
parents = self.get_parent_ids() + [revision_id]
642
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
643
or allow_leftmost_as_ghost)
645
@needs_tree_write_lock
646
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
647
"""Add revision_id, tree tuple as a parent.
649
This is equivalent to retrieving the current list of parent trees
650
and setting the list to its value plus parent_tuple. See also
651
add_parent_tree_id - if you only have a parent id available it will be
652
simpler to use that api. If you have the parent already available, using
653
this api is preferred.
655
:param parent_tuple: The (revision id, tree) to add to the parent list.
656
If the revision_id is a ghost, pass None for the tree.
657
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
659
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
660
if len(parent_ids) > 1:
661
# the leftmost may have already been a ghost, preserve that if it
663
allow_leftmost_as_ghost = True
664
self.set_parent_ids(parent_ids,
665
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
667
@needs_tree_write_lock
668
613
def add_pending_merge(self, *revision_ids):
669
614
# TODO: Perhaps should check at this point that the
670
615
# history of the revision is actually present?
671
parents = self.get_parent_ids()
616
p = self.pending_merges()
673
618
for rev_id in revision_ids:
674
if rev_id in parents:
676
parents.append(rev_id)
679
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
681
def path_content_summary(self, path, _lstat=os.lstat,
682
_mapper=osutils.file_kind_from_stat_mode):
683
"""See Tree.path_content_summary."""
684
abspath = self.abspath(path)
624
self.set_pending_merges(p)
627
def pending_merges(self):
628
"""Return a list of pending merges.
630
These are revisions that have been merged into the working
631
directory but not yet committed.
686
stat_result = _lstat(abspath)
634
merges_file = self._control_files.get_utf8('pending-merges')
687
635
except OSError, e:
688
if getattr(e, 'errno', None) == errno.ENOENT:
690
return ('missing', None, None, None)
691
# propagate other errors
693
kind = _mapper(stat_result.st_mode)
695
size = stat_result.st_size
696
# try for a stat cache lookup
697
executable = self._is_executable_from_path_and_stat(path, stat_result)
698
return (kind, size, executable, self._sha_from_stat(
700
elif kind == 'directory':
701
# perhaps it looks like a plain directory, but it's really a
703
if self._directory_is_tree_reference(path):
704
kind = 'tree-reference'
705
return kind, None, None, None
706
elif kind == 'symlink':
707
return ('symlink', None, None, os.readlink(abspath))
709
return (kind, None, None, None)
711
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
712
"""Common ghost checking functionality from set_parent_*.
714
This checks that the left hand-parent exists if there are any
717
if len(revision_ids) > 0:
718
leftmost_id = revision_ids[0]
719
if (not allow_leftmost_as_ghost and not
720
self.branch.repository.has_revision(leftmost_id)):
721
raise errors.GhostRevisionUnusableHere(leftmost_id)
723
def _set_merges_from_parent_ids(self, parent_ids):
724
merges = parent_ids[1:]
725
self._transport.put_bytes('pending-merges', '\n'.join(merges),
726
mode=self._control_files._file_mode)
728
def _filter_parent_ids_by_ancestry(self, revision_ids):
729
"""Check that all merged revisions are proper 'heads'.
731
This will always return the first revision_id, and any merged revisions
734
if len(revision_ids) == 0:
736
graph = self.branch.repository.get_graph()
737
heads = graph.heads(revision_ids)
738
new_revision_ids = revision_ids[:1]
739
for revision_id in revision_ids[1:]:
740
if revision_id in heads and revision_id not in new_revision_ids:
741
new_revision_ids.append(revision_id)
742
if new_revision_ids != revision_ids:
743
trace.mutter('requested to set revision_ids = %s,'
744
' but filtered to %s', revision_ids, new_revision_ids)
745
return new_revision_ids
747
@needs_tree_write_lock
748
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
749
"""Set the parent ids to revision_ids.
751
See also set_parent_trees. This api will try to retrieve the tree data
752
for each element of revision_ids from the trees repository. If you have
753
tree data already available, it is more efficient to use
754
set_parent_trees rather than set_parent_ids. set_parent_ids is however
755
an easier API to use.
757
:param revision_ids: The revision_ids to set as the parent ids of this
758
working tree. Any of these may be ghosts.
760
self._check_parents_for_ghosts(revision_ids,
761
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
762
for revision_id in revision_ids:
763
_mod_revision.check_not_reserved_id(revision_id)
765
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
767
if len(revision_ids) > 0:
768
self.set_last_revision(revision_ids[0])
770
self.set_last_revision(_mod_revision.NULL_REVISION)
772
self._set_merges_from_parent_ids(revision_ids)
774
@needs_tree_write_lock
775
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
776
"""See MutableTree.set_parent_trees."""
777
parent_ids = [rev for (rev, tree) in parents_list]
778
for revision_id in parent_ids:
779
_mod_revision.check_not_reserved_id(revision_id)
781
self._check_parents_for_ghosts(parent_ids,
782
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
784
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
786
if len(parent_ids) == 0:
787
leftmost_parent_id = _mod_revision.NULL_REVISION
788
leftmost_parent_tree = None
790
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
792
if self._change_last_revision(leftmost_parent_id):
793
if leftmost_parent_tree is None:
794
# If we don't have a tree, fall back to reading the
795
# parent tree from the repository.
796
self._cache_basis_inventory(leftmost_parent_id)
798
inv = leftmost_parent_tree.inventory
799
xml = self._create_basis_xml_from_inventory(
800
leftmost_parent_id, inv)
801
self._write_basis_inventory(xml)
802
self._set_merges_from_parent_ids(parent_ids)
804
@needs_tree_write_lock
636
if e.errno != errno.ENOENT:
640
for l in merges_file.readlines():
641
p.append(l.rstrip('\n'))
805
645
def set_pending_merges(self, rev_list):
806
parents = self.get_parent_ids()
807
leftmost = parents[:1]
808
new_parents = leftmost + rev_list
809
self.set_parent_ids(new_parents)
646
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
811
@needs_tree_write_lock
812
649
def set_merge_modified(self, modified_hashes):
813
650
def iter_stanzas():
814
651
for file_id, hash in modified_hashes.iteritems():
815
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
652
yield Stanza(file_id=file_id, hash=hash)
816
653
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
818
def _sha_from_stat(self, path, stat_result):
819
"""Get a sha digest from the tree's stat cache.
821
The default implementation assumes no stat cache is present.
823
:param path: The path.
824
:param stat_result: The stat result being looked up.
828
656
def _put_rio(self, filename, stanzas, header):
829
self._must_be_locked()
830
657
my_file = rio_file(stanzas, header)
831
self._transport.put_file(filename, my_file,
832
mode=self._control_files._file_mode)
834
@needs_write_lock # because merge pulls data into the branch.
835
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
837
"""Merge from a branch into this working tree.
839
:param branch: The branch to merge from.
840
:param to_revision: If non-None, the merge will merge to to_revision,
841
but not beyond it. to_revision does not need to be in the history
842
of the branch when it is supplied. If None, to_revision defaults to
843
branch.last_revision().
845
from bzrlib.merge import Merger, Merge3Merger
846
pb = bzrlib.ui.ui_factory.nested_progress_bar()
848
merger = Merger(self.branch, this_tree=self, pb=pb)
849
merger.pp = ProgressPhase("Merge phase", 5, pb)
850
merger.pp.next_phase()
851
# check that there are no
853
merger.check_basis(check_clean=True, require_commits=False)
854
if to_revision is None:
855
to_revision = _mod_revision.ensure_null(branch.last_revision())
856
merger.other_rev_id = to_revision
857
if _mod_revision.is_null(merger.other_rev_id):
858
raise errors.NoCommits(branch)
859
self.branch.fetch(branch, last_revision=merger.other_rev_id)
860
merger.other_basis = merger.other_rev_id
861
merger.other_tree = self.branch.repository.revision_tree(
863
merger.other_branch = branch
864
merger.pp.next_phase()
865
if from_revision is None:
868
merger.set_base_revision(from_revision, branch)
869
if merger.base_rev_id == merger.other_rev_id:
870
raise errors.PointlessMerge
871
merger.backup_files = False
872
if merge_type is None:
873
merger.merge_type = Merge3Merger
875
merger.merge_type = merge_type
876
merger.set_interesting_files(None)
877
merger.show_base = False
878
merger.reprocess = False
879
conflicts = merger.do_merge()
658
self._control_files.put(filename, my_file)
886
661
def merge_modified(self):
887
"""Return a dictionary of files modified by a merge.
889
The list is initialized by WorkingTree.set_merge_modified, which is
890
typically called after we make some automatic updates to the tree
893
This returns a map of file_id->sha1, containing only files which are
894
still in the working inventory and have that text hash.
897
hashfile = self._transport.get('merge-hashes')
898
except errors.NoSuchFile:
663
hashfile = self._control_files.get('merge-hashes')
900
666
merge_hashes = {}
902
668
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
903
raise errors.MergeModifiedFormatError()
669
raise MergeModifiedFormatError()
904
670
except StopIteration:
905
raise errors.MergeModifiedFormatError()
671
raise MergeModifiedFormatError()
906
672
for s in RioReader(hashfile):
907
# RioReader reads in Unicode, so convert file_ids back to utf8
908
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
673
file_id = s.get("file_id")
909
674
if file_id not in self.inventory:
911
text_hash = s.get("hash")
912
if text_hash == self.get_file_sha1(file_id):
913
merge_hashes[file_id] = text_hash
677
if hash == self.get_file_sha1(file_id):
678
merge_hashes[file_id] = hash
914
679
return merge_hashes
917
def mkdir(self, path, file_id=None):
918
"""See MutableTree.mkdir()."""
920
file_id = generate_ids.gen_file_id(os.path.basename(path))
921
os.mkdir(self.abspath(path))
922
self.add(path, file_id, 'directory')
925
681
def get_symlink_target(self, file_id):
926
682
return os.readlink(self.id2abspath(file_id))
929
def subsume(self, other_tree):
930
def add_children(inventory, entry):
931
for child_entry in entry.children.values():
932
inventory._byid[child_entry.file_id] = child_entry
933
if child_entry.kind == 'directory':
934
add_children(inventory, child_entry)
935
if other_tree.get_root_id() == self.get_root_id():
936
raise errors.BadSubsumeSource(self, other_tree,
937
'Trees have the same root')
939
other_tree_path = self.relpath(other_tree.basedir)
940
except errors.PathNotChild:
941
raise errors.BadSubsumeSource(self, other_tree,
942
'Tree is not contained by the other')
943
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
944
if new_root_parent is None:
945
raise errors.BadSubsumeSource(self, other_tree,
946
'Parent directory is not versioned.')
947
# We need to ensure that the result of a fetch will have a
948
# versionedfile for the other_tree root, and only fetching into
949
# RepositoryKnit2 guarantees that.
950
if not self.branch.repository.supports_rich_root():
951
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
952
other_tree.lock_tree_write()
954
new_parents = other_tree.get_parent_ids()
955
other_root = other_tree.inventory.root
956
other_root.parent_id = new_root_parent
957
other_root.name = osutils.basename(other_tree_path)
958
self.inventory.add(other_root)
959
add_children(self.inventory, other_root)
960
self._write_inventory(self.inventory)
961
# normally we don't want to fetch whole repositories, but i think
962
# here we really do want to consolidate the whole thing.
963
for parent_id in other_tree.get_parent_ids():
964
self.branch.fetch(other_tree.branch, parent_id)
965
self.add_parent_tree_id(parent_id)
968
other_tree.bzrdir.retire_bzrdir()
970
def _setup_directory_is_tree_reference(self):
971
if self._branch.repository._format.supports_tree_reference:
972
self._directory_is_tree_reference = \
973
self._directory_may_be_tree_reference
975
self._directory_is_tree_reference = \
976
self._directory_is_never_tree_reference
978
def _directory_is_never_tree_reference(self, relpath):
981
def _directory_may_be_tree_reference(self, relpath):
982
# as a special case, if a directory contains control files then
983
# it's a tree reference, except that the root of the tree is not
984
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
985
# TODO: We could ask all the control formats whether they
986
# recognize this directory, but at the moment there's no cheap api
987
# to do that. Since we probably can only nest bzr checkouts and
988
# they always use this name it's ok for now. -- mbp 20060306
990
# FIXME: There is an unhandled case here of a subdirectory
991
# containing .bzr but not a branch; that will probably blow up
992
# when you try to commit it. It might happen if there is a
993
# checkout in a subdirectory. This can be avoided by not adding
996
@needs_tree_write_lock
997
def extract(self, file_id, format=None):
998
"""Extract a subtree from this tree.
1000
A new branch will be created, relative to the path for this tree.
1004
segments = osutils.splitpath(path)
1005
transport = self.branch.bzrdir.root_transport
1006
for name in segments:
1007
transport = transport.clone(name)
1008
transport.ensure_base()
1011
sub_path = self.id2path(file_id)
1012
branch_transport = mkdirs(sub_path)
1014
format = self.bzrdir.cloning_metadir()
1015
branch_transport.ensure_base()
1016
branch_bzrdir = format.initialize_on_transport(branch_transport)
1018
repo = branch_bzrdir.find_repository()
1019
except errors.NoRepositoryPresent:
1020
repo = branch_bzrdir.create_repository()
1021
if not repo.supports_rich_root():
1022
raise errors.RootNotRich()
1023
new_branch = branch_bzrdir.create_branch()
1024
new_branch.pull(self.branch)
1025
for parent_id in self.get_parent_ids():
1026
new_branch.fetch(self.branch, parent_id)
1027
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1028
if tree_transport.base != branch_transport.base:
1029
tree_bzrdir = format.initialize_on_transport(tree_transport)
1030
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1032
tree_bzrdir = branch_bzrdir
1033
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
1034
wt.set_parent_ids(self.get_parent_ids())
1035
my_inv = self.inventory
1036
child_inv = Inventory(root_id=None)
1037
new_root = my_inv[file_id]
1038
my_inv.remove_recursive_id(file_id)
1039
new_root.parent_id = None
1040
child_inv.add(new_root)
1041
self._write_inventory(my_inv)
1042
wt._write_inventory(child_inv)
1045
def _serialize(self, inventory, out_file):
1046
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1049
def _deserialize(selt, in_file):
1050
return xml5.serializer_v5.read_inventory(in_file)
1053
"""Write the in memory inventory to disk."""
1054
# TODO: Maybe this should only write on dirty ?
1055
if self._control_files._lock_mode != 'w':
1056
raise errors.NotWriteLocked(self)
1058
self._serialize(self._inventory, sio)
1060
self._transport.put_file('inventory', sio,
1061
mode=self._control_files._file_mode)
1062
self._inventory_is_modified = False
1064
def _kind(self, relpath):
1065
return osutils.file_kind(self.abspath(relpath))
1067
def list_files(self, include_root=False):
1068
"""Recursively list all files as (path, class, kind, id, entry).
684
def file_class(self, filename):
685
if self.path2id(filename):
687
elif self.is_ignored(filename):
692
def list_files(self):
693
"""Recursively list all files as (path, class, kind, id).
1070
695
Lists, but does not descend into unversioned directories.
1075
700
Skips the control directory.
1077
# list_files is an iterator, so @needs_read_lock doesn't work properly
1078
# with it. So callers should be careful to always read_lock the tree.
1079
if not self.is_locked():
1080
raise errors.ObjectNotLocked(self)
1082
inv = self.inventory
1083
if include_root is True:
1084
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1085
# Convert these into local objects to save lookup times
1086
pathjoin = osutils.pathjoin
1087
file_kind = self._kind
1089
# transport.base ends in a slash, we want the piece
1090
# between the last two slashes
1091
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1093
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1095
# directory file_id, relative path, absolute path, reverse sorted children
1096
children = os.listdir(self.basedir)
1098
# jam 20060527 The kernel sized tree seems equivalent whether we
1099
# use a deque and popleft to keep them sorted, or if we use a plain
1100
# list and just reverse() them.
1101
children = collections.deque(children)
1102
stack = [(inv.root.file_id, u'', self.basedir, children)]
1104
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1107
f = children.popleft()
702
inv = self._inventory
704
def descend(from_dir_relpath, from_dir_id, dp):
1108
708
## TODO: If we find a subdirectory with its own .bzr
1109
709
## directory, then that is a separate tree and we
1110
710
## should exclude it.
1112
712
# the bzrdir for this tree
1113
if transport_base_dir == f:
713
if self.bzrdir.transport.base.endswith(f + '/'):
1116
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1117
# and 'f' doesn't begin with one, we can do a string op, rather
1118
# than the checks of pathjoin(), all relative paths will have an extra slash
1120
fp = from_dir_relpath + '/' + f
717
fp = appendpath(from_dir_relpath, f)
1123
fap = from_dir_abspath + '/' + f
720
fap = appendpath(dp, f)
1125
722
f_ie = inv.get_child(from_dir_id, f)
1128
elif self.is_ignored(fp[1:]):
725
elif self.is_ignored(fp):
1131
# we may not have found this file, because of a unicode issue
1132
f_norm, can_access = osutils.normalized_filename(f)
1133
if f == f_norm or not can_access:
1134
# No change, so treat this file normally
1137
# this file can be accessed by a normalized path
1138
# check again if it is versioned
1139
# these lines are repeated here for performance
1141
fp = from_dir_relpath + '/' + f
1142
fap = from_dir_abspath + '/' + f
1143
f_ie = inv.get_child(from_dir_id, f)
1146
elif self.is_ignored(fp[1:]):
1151
730
fk = file_kind(fap)
734
raise BzrCheckError("file %r entered as kind %r id %r, "
736
% (fap, f_ie.kind, f_ie.file_id, fk))
1153
738
# make a last minute entry
1155
yield fp[1:], c, fk, f_ie.file_id, f_ie
1158
yield fp[1:], c, fk, None, fk_entries[fk]()
1160
yield fp[1:], c, fk, None, TreeEntry()
742
if fk == 'directory':
743
entry = TreeDirectory()
746
elif fk == 'symlink':
751
yield fp, c, fk, (f_ie and f_ie.file_id), entry
1163
753
if fk != 'directory':
1166
# But do this child first
1167
new_children = os.listdir(fap)
1169
new_children = collections.deque(new_children)
1170
stack.append((f_ie.file_id, fp, fap, new_children))
1171
# Break out of inner loop,
1172
# so that we start outer loop with child
1175
# if we finished all children, pop it off the stack
1178
@needs_tree_write_lock
1179
def move(self, from_paths, to_dir=None, after=False, **kwargs):
757
# don't descend unversioned directories
760
for ff in descend(fp, f_ie.file_id, fap):
763
for f in descend(u'', inv.root.file_id, self.basedir):
767
def move(self, from_paths, to_name):
1180
768
"""Rename files.
1182
to_dir must exist in the inventory.
770
to_name must exist in the inventory.
1184
If to_dir exists and is a directory, the files are moved into
772
If to_name exists and is a directory, the files are moved into
1185
773
it, keeping their old names.
1187
Note that to_dir is only the last component of the new name;
775
Note that to_name is only the last component of the new name;
1188
776
this doesn't change the directory.
1190
For each entry in from_paths the move mode will be determined
1193
The first mode moves the file in the filesystem and updates the
1194
inventory. The second mode only updates the inventory without
1195
touching the file on the filesystem. This is the new mode introduced
1198
move uses the second mode if 'after == True' and the target is not
1199
versioned but present in the working tree.
1201
move uses the second mode if 'after == False' and the source is
1202
versioned but no longer in the working tree, and the target is not
1203
versioned but present in the working tree.
1205
move uses the first mode if 'after == False' and the source is
1206
versioned and present in the working tree, and the target is not
1207
versioned and not present in the working tree.
1209
Everything else results in an error.
1211
778
This returns a list of (from_path, to_path) pairs for each
1212
779
entry that is moved.
1217
# check for deprecated use of signature
1219
to_dir = kwargs.get('to_name', None)
1221
raise TypeError('You must supply a target directory')
1223
symbol_versioning.warn('The parameter to_name was deprecated'
1224
' in version 0.13. Use to_dir instead',
1227
# check destination directory
1228
if isinstance(from_paths, basestring):
782
## TODO: Option to move IDs only
783
assert not isinstance(from_paths, basestring)
1230
784
inv = self.inventory
1231
to_abs = self.abspath(to_dir)
785
to_abs = self.abspath(to_name)
1232
786
if not isdir(to_abs):
1233
raise errors.BzrMoveFailedError('',to_dir,
1234
errors.NotADirectory(to_abs))
1235
if not self.has_filename(to_dir):
1236
raise errors.BzrMoveFailedError('',to_dir,
1237
errors.NotInWorkingDirectory(to_dir))
1238
to_dir_id = inv.path2id(to_dir)
1239
if to_dir_id is None:
1240
raise errors.BzrMoveFailedError('',to_dir,
1241
errors.NotVersionedError(path=str(to_dir)))
787
raise BzrError("destination %r is not a directory" % to_abs)
788
if not self.has_filename(to_name):
789
raise BzrError("destination %r not in working directory" % to_abs)
790
to_dir_id = inv.path2id(to_name)
791
if to_dir_id == None and to_name != '':
792
raise BzrError("destination %r is not a versioned directory" % to_name)
1243
793
to_dir_ie = inv[to_dir_id]
1244
if to_dir_ie.kind != 'directory':
1245
raise errors.BzrMoveFailedError('',to_dir,
1246
errors.NotADirectory(to_abs))
1248
# create rename entries and tuples
1249
for from_rel in from_paths:
1250
from_tail = splitpath(from_rel)[-1]
1251
from_id = inv.path2id(from_rel)
1253
raise errors.BzrMoveFailedError(from_rel,to_dir,
1254
errors.NotVersionedError(path=str(from_rel)))
1256
from_entry = inv[from_id]
1257
from_parent_id = from_entry.parent_id
1258
to_rel = pathjoin(to_dir, from_tail)
1259
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1261
from_tail=from_tail,
1262
from_parent_id=from_parent_id,
1263
to_rel=to_rel, to_tail=from_tail,
1264
to_parent_id=to_dir_id)
1265
rename_entries.append(rename_entry)
1266
rename_tuples.append((from_rel, to_rel))
1268
# determine which move mode to use. checks also for movability
1269
rename_entries = self._determine_mv_mode(rename_entries, after)
1271
original_modified = self._inventory_is_modified
794
if to_dir_ie.kind not in ('directory', 'root_directory'):
795
raise BzrError("destination %r is not a directory" % to_abs)
797
to_idpath = inv.get_idpath(to_dir_id)
800
if not self.has_filename(f):
801
raise BzrError("%r does not exist in working tree" % f)
802
f_id = inv.path2id(f)
804
raise BzrError("%r is not versioned" % f)
805
name_tail = splitpath(f)[-1]
806
dest_path = appendpath(to_name, name_tail)
807
if self.has_filename(dest_path):
808
raise BzrError("destination %r already exists" % dest_path)
809
if f_id in to_idpath:
810
raise BzrError("can't move %r to a subdirectory of itself" % f)
812
# OK, so there's a race here, it's possible that someone will
813
# create a file in this interval and then the rename might be
814
# left half-done. But we should have caught most problems.
815
orig_inv = deepcopy(self.inventory)
1274
self._inventory_is_modified = True
1275
self._move(rename_entries)
818
name_tail = splitpath(f)[-1]
819
dest_path = appendpath(to_name, name_tail)
820
result.append((f, dest_path))
821
inv.rename(inv.path2id(f), to_dir_id, name_tail)
823
rename(self.abspath(f), self.abspath(dest_path))
825
raise BzrError("failed to rename %r to %r: %s" %
826
(f, dest_path, e[1]),
827
["rename rolled back"])
1277
829
# restore the inventory on error
1278
self._inventory_is_modified = original_modified
830
self._set_inventory(orig_inv)
1280
832
self._write_inventory(inv)
1281
return rename_tuples
1283
def _determine_mv_mode(self, rename_entries, after=False):
1284
"""Determines for each from-to pair if both inventory and working tree
1285
or only the inventory has to be changed.
1287
Also does basic plausability tests.
1289
inv = self.inventory
1291
for rename_entry in rename_entries:
1292
# store to local variables for easier reference
1293
from_rel = rename_entry.from_rel
1294
from_id = rename_entry.from_id
1295
to_rel = rename_entry.to_rel
1296
to_id = inv.path2id(to_rel)
1297
only_change_inv = False
1299
# check the inventory for source and destination
1301
raise errors.BzrMoveFailedError(from_rel,to_rel,
1302
errors.NotVersionedError(path=str(from_rel)))
1303
if to_id is not None:
1304
raise errors.BzrMoveFailedError(from_rel,to_rel,
1305
errors.AlreadyVersionedError(path=str(to_rel)))
1307
# try to determine the mode for rename (only change inv or change
1308
# inv and file system)
1310
if not self.has_filename(to_rel):
1311
raise errors.BzrMoveFailedError(from_id,to_rel,
1312
errors.NoSuchFile(path=str(to_rel),
1313
extra="New file has not been created yet"))
1314
only_change_inv = True
1315
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1316
only_change_inv = True
1317
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1318
only_change_inv = False
1319
elif (not self.case_sensitive
1320
and from_rel.lower() == to_rel.lower()
1321
and self.has_filename(from_rel)):
1322
only_change_inv = False
1324
# something is wrong, so lets determine what exactly
1325
if not self.has_filename(from_rel) and \
1326
not self.has_filename(to_rel):
1327
raise errors.BzrRenameFailedError(from_rel,to_rel,
1328
errors.PathsDoNotExist(paths=(str(from_rel),
1331
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1332
rename_entry.only_change_inv = only_change_inv
1333
return rename_entries
1335
def _move(self, rename_entries):
1336
"""Moves a list of files.
1338
Depending on the value of the flag 'only_change_inv', the
1339
file will be moved on the file system or not.
1341
inv = self.inventory
1344
for entry in rename_entries:
1346
self._move_entry(entry)
1348
self._rollback_move(moved)
1352
def _rollback_move(self, moved):
1353
"""Try to rollback a previous move in case of an filesystem error."""
1354
inv = self.inventory
1357
self._move_entry(_RenameEntry(entry.to_rel, entry.from_id,
1358
entry.to_tail, entry.to_parent_id, entry.from_rel,
1359
entry.from_tail, entry.from_parent_id,
1360
entry.only_change_inv))
1361
except errors.BzrMoveFailedError, e:
1362
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1363
" The working tree is in an inconsistent state."
1364
" Please consider doing a 'bzr revert'."
1365
" Error message is: %s" % e)
1367
def _move_entry(self, entry):
1368
inv = self.inventory
1369
from_rel_abs = self.abspath(entry.from_rel)
1370
to_rel_abs = self.abspath(entry.to_rel)
1371
if from_rel_abs == to_rel_abs:
1372
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1373
"Source and target are identical.")
1375
if not entry.only_change_inv:
1377
osutils.rename(from_rel_abs, to_rel_abs)
1379
raise errors.BzrMoveFailedError(entry.from_rel,
1381
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1383
@needs_tree_write_lock
1384
def rename_one(self, from_rel, to_rel, after=False):
836
def rename_one(self, from_rel, to_rel):
1385
837
"""Rename one file.
1387
839
This can change the directory or the filename or both.
1389
rename_one has several 'modes' to work. First, it can rename a physical
1390
file and change the file_id. That is the normal mode. Second, it can
1391
only change the file_id without touching any physical file. This is
1392
the new mode introduced in version 0.15.
1394
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1395
versioned but present in the working tree.
1397
rename_one uses the second mode if 'after == False' and 'from_rel' is
1398
versioned but no longer in the working tree, and 'to_rel' is not
1399
versioned but present in the working tree.
1401
rename_one uses the first mode if 'after == False' and 'from_rel' is
1402
versioned and present in the working tree, and 'to_rel' is not
1403
versioned and not present in the working tree.
1405
Everything else results in an error.
1407
841
inv = self.inventory
1410
# create rename entries and tuples
1411
from_tail = splitpath(from_rel)[-1]
1412
from_id = inv.path2id(from_rel)
1414
raise errors.BzrRenameFailedError(from_rel,to_rel,
1415
errors.NotVersionedError(path=str(from_rel)))
1416
from_entry = inv[from_id]
1417
from_parent_id = from_entry.parent_id
842
if not self.has_filename(from_rel):
843
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
844
if self.has_filename(to_rel):
845
raise BzrError("can't rename: new working file %r already exists" % to_rel)
847
file_id = inv.path2id(from_rel)
849
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
852
from_parent = entry.parent_id
853
from_name = entry.name
855
if inv.path2id(to_rel):
856
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1418
858
to_dir, to_tail = os.path.split(to_rel)
1419
859
to_dir_id = inv.path2id(to_dir)
1420
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1422
from_tail=from_tail,
1423
from_parent_id=from_parent_id,
1424
to_rel=to_rel, to_tail=to_tail,
1425
to_parent_id=to_dir_id)
1426
rename_entries.append(rename_entry)
1428
# determine which move mode to use. checks also for movability
1429
rename_entries = self._determine_mv_mode(rename_entries, after)
1431
# check if the target changed directory and if the target directory is
1433
if to_dir_id is None:
1434
raise errors.BzrMoveFailedError(from_rel,to_rel,
1435
errors.NotVersionedError(path=str(to_dir)))
1437
# all checks done. now we can continue with our actual work
1438
mutter('rename_one:\n'
1443
' to_dir_id {%s}\n',
1444
from_id, from_rel, to_rel, to_dir, to_dir_id)
1446
self._move(rename_entries)
860
if to_dir_id == None and to_dir != '':
861
raise BzrError("can't determine destination directory id for %r" % to_dir)
863
mutter("rename_one:")
864
mutter(" file_id {%s}" % file_id)
865
mutter(" from_rel %r" % from_rel)
866
mutter(" to_rel %r" % to_rel)
867
mutter(" to_dir %r" % to_dir)
868
mutter(" to_dir_id {%s}" % to_dir_id)
870
inv.rename(file_id, to_dir_id, to_tail)
872
from_abs = self.abspath(from_rel)
873
to_abs = self.abspath(to_rel)
875
rename(from_abs, to_abs)
877
inv.rename(file_id, from_parent, from_name)
878
raise BzrError("failed to rename %r to %r: %s"
879
% (from_abs, to_abs, e[1]),
880
["rename rolled back"])
1447
881
self._write_inventory(inv)
1449
class _RenameEntry(object):
1450
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1451
to_rel, to_tail, to_parent_id, only_change_inv=False):
1452
self.from_rel = from_rel
1453
self.from_id = from_id
1454
self.from_tail = from_tail
1455
self.from_parent_id = from_parent_id
1456
self.to_rel = to_rel
1457
self.to_tail = to_tail
1458
self.to_parent_id = to_parent_id
1459
self.only_change_inv = only_change_inv
1461
883
@needs_read_lock
1462
884
def unknowns(self):
1463
885
"""Return all unknown files.
1465
887
These are files in the working directory that are not versioned or
1466
888
control files or ignored.
1468
# force the extras method to be fully executed before returning, to
1469
# prevent race conditions with the lock
1471
[subp for subp in self.extras() if not self.is_ignored(subp)])
1473
@needs_tree_write_lock
1474
def unversion(self, file_ids):
1475
"""Remove the file ids in file_ids from the current versioned set.
1477
When a file_id is unversioned, all of its children are automatically
1480
:param file_ids: The file ids to stop versioning.
1481
:raises: NoSuchId if any fileid is not currently versioned.
1483
for file_id in file_ids:
1484
if self._inventory.has_id(file_id):
1485
self._inventory.remove_recursive_id(file_id)
1487
raise errors.NoSuchId(self, file_id)
1489
# in the future this should just set a dirty bit to wait for the
1490
# final unlock. However, until all methods of workingtree start
1491
# with the current in -memory inventory rather than triggering
1492
# a read, it is more complex - we need to teach read_inventory
1493
# to know when to read, and when to not read first... and possibly
1494
# to save first when the in memory one may be corrupted.
1495
# so for now, we just only write it if it is indeed dirty.
1497
self._write_inventory(self._inventory)
890
>>> from bzrlib.bzrdir import ScratchDir
891
>>> d = ScratchDir(files=['foo', 'foo~'])
892
>>> b = d.open_branch()
893
>>> tree = d.open_workingtree()
894
>>> map(str, tree.unknowns())
897
>>> list(b.unknowns())
899
>>> tree.remove('foo')
900
>>> list(b.unknowns())
903
for subp in self.extras():
904
if not self.is_ignored(subp):
907
@deprecated_method(zero_eight)
908
def iter_conflicts(self):
909
"""List all files in the tree that have text or content conflicts.
910
DEPRECATED. Use conflicts instead."""
911
return self._iter_conflicts()
1499
913
def _iter_conflicts(self):
1500
914
conflicted = set()
1501
for info in self.list_files():
915
for path in (s[0] for s in self.list_files()):
1503
916
stem = get_conflicted_stem(path)
1504
917
if stem is None:
1766
1152
This is used to allow WorkingTree3 instances to not affect branch
1767
1153
when their last revision is set.
1769
if _mod_revision.is_null(new_revision):
1155
if new_revision is None:
1770
1156
self.branch.set_revision_history([])
1158
# current format is locked in with the branch
1159
revision_history = self.branch.revision_history()
1773
self.branch.generate_revision_history(new_revision)
1774
except errors.NoSuchRevision:
1775
# not present in the repo - dont try to set it deeper than the tip
1776
self.branch.set_revision_history([new_revision])
1161
position = revision_history.index(new_revision)
1163
raise errors.NoSuchRevision(self.branch, new_revision)
1164
self.branch.set_revision_history(revision_history[:position + 1])
1779
def _write_basis_inventory(self, xml):
1780
"""Write the basis inventory XML to the basis-inventory file"""
1781
path = self._basis_inventory_name()
1783
self._transport.put_file(path, sio,
1784
mode=self._control_files._file_mode)
1786
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1787
"""Create the text that will be saved in basis-inventory"""
1788
inventory.revision_id = revision_id
1789
return xml7.serializer_v7.write_inventory_to_string(inventory)
1791
1167
def _cache_basis_inventory(self, new_revision):
1792
1168
"""Cache new_revision as the basis inventory."""
1793
# TODO: this should allow the ready-to-use inventory to be passed in,
1794
# as commit already has that ready-to-use [while the format is the
1797
1170
# this double handles the inventory - unpack and repack -
1798
1171
# but is easier to understand. We can/should put a conditional
1799
1172
# in here based on whether the inventory is in the latest format
1800
1173
# - perhaps we should repack all inventories on a repository
1802
# the fast path is to copy the raw xml from the repository. If the
1803
# xml contains 'revision_id="', then we assume the right
1804
# revision_id is set. We must check for this full string, because a
1805
# root node id can legitimately look like 'revision_id' but cannot
1807
xml = self.branch.repository.get_inventory_xml(new_revision)
1808
firstline = xml.split('\n', 1)[0]
1809
if (not 'revision_id="' in firstline or
1810
'format="7"' not in firstline):
1811
inv = self.branch.repository.deserialise_inventory(
1813
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1814
self._write_basis_inventory(xml)
1815
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1175
inv = self.branch.repository.get_inventory(new_revision)
1176
inv.revision_id = new_revision
1177
xml = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
1179
path = self._basis_inventory_name()
1180
self._control_files.put_utf8(path, xml)
1181
except WeaveRevisionNotPresent:
1818
1184
def read_basis_inventory(self):
1819
1185
"""Read the cached basis inventory."""
1820
1186
path = self._basis_inventory_name()
1821
return self._transport.get_bytes(path)
1187
return self._control_files.get_utf8(path).read()
1823
1189
@needs_read_lock
1824
1190
def read_working_inventory(self):
1825
"""Read the working inventory.
1827
:raises errors.InventoryModified: read_working_inventory will fail
1828
when the current in memory inventory has been modified.
1830
# conceptually this should be an implementation detail of the tree.
1831
# XXX: Deprecate this.
1191
"""Read the working inventory."""
1832
1192
# ElementTree does its own conversion from UTF-8, so open in
1834
if self._inventory_is_modified:
1835
raise errors.InventoryModified(self)
1836
result = self._deserialize(self._transport.get('inventory'))
1837
self._set_inventory(result, dirty=False)
1194
result = bzrlib.xml5.serializer_v5.read_inventory(
1195
self._control_files.get('inventory'))
1196
self._set_inventory(result)
1840
@needs_tree_write_lock
1841
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1843
"""Remove nominated files from the working inventory.
1845
:files: File paths relative to the basedir.
1846
:keep_files: If true, the files will also be kept.
1847
:force: Delete files and directories, even if they are changed and
1848
even if the directories are not empty.
1200
def remove(self, files, verbose=False):
1201
"""Remove nominated files from the working inventory..
1203
This does not remove their text. This does not run on XXX on what? RBC
1205
TODO: Refuse to remove modified files unless --force is given?
1207
TODO: Do something useful with directories.
1209
TODO: Should this remove the text or not? Tough call; not
1210
removing may be useful and the user can just use use rm, and
1211
is the opposite of add. Removing it is consistent with most
1212
other tools. Maybe an option.
1214
## TODO: Normalize names
1215
## TODO: Remove nested loops; better scalability
1850
1216
if isinstance(files, basestring):
1851
1217
files = [files]
1856
unknown_nested_files=set()
1858
def recurse_directory_to_add_files(directory):
1859
# Recurse directory and add all files
1860
# so we can check if they have changed.
1861
for parent_info, file_infos in\
1862
self.walkdirs(directory):
1863
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1864
# Is it versioned or ignored?
1865
if self.path2id(relpath) or self.is_ignored(relpath):
1866
# Add nested content for deletion.
1867
new_files.add(relpath)
1869
# Files which are not versioned and not ignored
1870
# should be treated as unknown.
1871
unknown_nested_files.add((relpath, None, kind))
1873
for filename in files:
1874
# Get file name into canonical form.
1875
abspath = self.abspath(filename)
1876
filename = self.relpath(abspath)
1877
if len(filename) > 0:
1878
new_files.add(filename)
1879
recurse_directory_to_add_files(filename)
1881
files = list(new_files)
1884
return # nothing to do
1886
# Sort needed to first handle directory content before the directory
1887
files.sort(reverse=True)
1889
# Bail out if we are going to delete files we shouldn't
1890
if not keep_files and not force:
1891
has_changed_files = len(unknown_nested_files) > 0
1892
if not has_changed_files:
1893
for (file_id, path, content_change, versioned, parent_id, name,
1894
kind, executable) in self.iter_changes(self.basis_tree(),
1895
include_unchanged=True, require_versioned=False,
1896
want_unversioned=True, specific_files=files):
1897
if versioned == (False, False):
1898
# The record is unknown ...
1899
if not self.is_ignored(path[1]):
1900
# ... but not ignored
1901
has_changed_files = True
1903
elif content_change and (kind[1] is not None):
1904
# Versioned and changed, but not deleted
1905
has_changed_files = True
1908
if has_changed_files:
1909
# Make delta show ALL applicable changes in error message.
1910
tree_delta = self.changes_from(self.basis_tree(),
1911
require_versioned=False, want_unversioned=True,
1912
specific_files=files)
1913
for unknown_file in unknown_nested_files:
1914
if unknown_file not in tree_delta.unversioned:
1915
tree_delta.unversioned.extend((unknown_file,))
1916
raise errors.BzrRemoveChangedFilesError(tree_delta)
1918
# Build inv_delta and delete files where applicaple,
1919
# do this before any modifications to inventory.
1219
inv = self.inventory
1221
# do this before any modifications
1920
1222
for f in files:
1921
fid = self.path2id(f)
1223
fid = inv.path2id(f)
1924
message = "%s is not versioned." % (f,)
1927
# having removed it, it must be either ignored or unknown
1928
if self.is_ignored(f):
1932
textui.show_status(new_status, self.kind(fid), f,
1935
inv_delta.append((f, None, fid, None))
1936
message = "removed %s" % (f,)
1939
abs_path = self.abspath(f)
1940
if osutils.lexists(abs_path):
1941
if (osutils.isdir(abs_path) and
1942
len(os.listdir(abs_path)) > 0):
1944
osutils.rmtree(abs_path)
1946
message = "%s is not an empty directory "\
1947
"and won't be deleted." % (f,)
1949
osutils.delete_any(abs_path)
1950
message = "deleted %s" % (f,)
1951
elif message is not None:
1952
# Only care if we haven't done anything yet.
1953
message = "%s does not exist." % (f,)
1955
# Print only one message (if any) per file.
1956
if message is not None:
1958
self.apply_inventory_delta(inv_delta)
1960
@needs_tree_write_lock
1961
def revert(self, filenames=None, old_tree=None, backups=True,
1962
pb=DummyProgress(), report_changes=False):
1963
from bzrlib.conflicts import resolve
1966
symbol_versioning.warn('Using [] to revert all files is deprecated'
1967
' as of bzr 0.91. Please use None (the default) instead.',
1968
DeprecationWarning, stacklevel=2)
1225
# TODO: Perhaps make this just a warning, and continue?
1226
# This tends to happen when
1227
raise NotVersionedError(path=f)
1228
mutter("remove inventory entry %s {%s}", quotefn(f), fid)
1230
# having remove it, it must be either ignored or unknown
1231
if self.is_ignored(f):
1235
show_status(new_status, inv[fid].kind, quotefn(f))
1238
self._write_inventory(inv)
1241
def revert(self, filenames, old_tree=None, backups=True,
1242
pb=DummyProgress()):
1243
from transform import revert
1244
from conflicts import resolve
1969
1245
if old_tree is None:
1970
basis_tree = self.basis_tree()
1971
basis_tree.lock_read()
1972
old_tree = basis_tree
1246
old_tree = self.basis_tree()
1247
conflicts = revert(self, old_tree, filenames, backups, pb)
1248
if not len(filenames):
1249
self.set_pending_merges([])
1976
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
1978
if filenames is None and len(self.get_parent_ids()) > 1:
1980
last_revision = self.last_revision()
1981
if last_revision != NULL_REVISION:
1982
if basis_tree is None:
1983
basis_tree = self.basis_tree()
1984
basis_tree.lock_read()
1985
parent_trees.append((last_revision, basis_tree))
1986
self.set_parent_trees(parent_trees)
1989
resolve(self, filenames, ignore_misses=True, recursive=True)
1991
if basis_tree is not None:
1252
resolve(self, filenames, ignore_misses=True)
1993
1253
return conflicts
1995
def revision_tree(self, revision_id):
1996
"""See Tree.revision_tree.
1998
WorkingTree can supply revision_trees for the basis revision only
1999
because there is only one cached inventory in the bzr directory.
2001
if revision_id == self.last_revision():
2003
xml = self.read_basis_inventory()
2004
except errors.NoSuchFile:
2008
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2009
# dont use the repository revision_tree api because we want
2010
# to supply the inventory.
2011
if inv.revision_id == revision_id:
2012
return revisiontree.RevisionTree(self.branch.repository,
2014
except errors.BadInventoryFormat:
2016
# raise if there was no inventory, or if we read the wrong inventory.
2017
raise errors.NoSuchRevisionInTree(self, revision_id)
2019
1255
# XXX: This method should be deprecated in favour of taking in a proper
2020
1256
# new Inventory object.
2021
@needs_tree_write_lock
2022
1258
def set_inventory(self, new_inventory_list):
2023
1259
from bzrlib.inventory import (Inventory,
2024
1260
InventoryDirectory,
2086
1300
between multiple working trees, i.e. via shared storage, then we
2087
1301
would probably want to lock both the local tree, and the branch.
2089
raise NotImplementedError(self.unlock)
1303
# FIXME: We want to write out the hashcache only when the last lock on
1304
# this working copy is released. Peeking at the lock count is a bit
1305
# of a nasty hack; probably it's better to have a transaction object,
1306
# which can do some finalization when it's either successfully or
1307
# unsuccessfully completed. (Denys's original patch did that.)
1308
# RBC 20060206 hookinhg into transaction will couple lock and transaction
1309
# wrongly. Hookinh into unllock on the control files object is fine though.
1311
# TODO: split this per format so there is no ugly if block
1312
if self._hashcache.needs_write and (
1313
# dedicated lock files
1314
self._control_files._lock_count==1 or
1316
(self._control_files is self.branch.control_files and
1317
self._control_files._lock_count==3)):
1318
self._hashcache.write()
1319
# reverse order of locking.
1321
return self._control_files.unlock()
1323
self.branch.unlock()
2091
def update(self, change_reporter=None, possible_transports=None):
2092
1327
"""Update a working tree along its branch.
2094
This will update the branch if its bound too, which means we have
2095
multiple trees involved:
2097
- The new basis tree of the master.
2098
- The old basis tree of the branch.
2099
- The old basis tree of the working tree.
2100
- The current working tree state.
2102
Pathologically, all three may be different, and non-ancestors of each
2103
other. Conceptually we want to:
2105
- Preserve the wt.basis->wt.state changes
2106
- Transform the wt.basis to the new master basis.
2107
- Apply a merge of the old branch basis to get any 'local' changes from
2109
- Restore the wt.basis->wt.state changes.
1329
This will update the branch if its bound too, which means we have multiple trees involved:
1330
The new basis tree of the master.
1331
The old basis tree of the branch.
1332
The old basis tree of the working tree.
1333
The current working tree state.
1334
pathologically all three may be different, and non ancestors of each other.
1335
Conceptually we want to:
1336
Preserve the wt.basis->wt.state changes
1337
Transform the wt.basis to the new master basis.
1338
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1339
Restore the wt.basis->wt.state changes.
2111
1341
There isn't a single operation at the moment to do that, so we:
2112
- Merge current state -> basis tree of the master w.r.t. the old tree
2114
- Do a 'normal' merge of the old branch basis if it is relevant.
2116
if self.branch.get_bound_location() is not None:
2118
update_branch = True
2120
self.lock_tree_write()
2121
update_branch = False
2124
old_tip = self.branch.update(possible_transports)
2127
return self._update_tree(old_tip, change_reporter)
2131
@needs_tree_write_lock
2132
def _update_tree(self, old_tip=None, change_reporter=None):
2133
"""Update a tree to the master branch.
2135
:param old_tip: if supplied, the previous tip revision the branch,
2136
before it was changed to the master branch's tip.
2138
# here if old_tip is not None, it is the old tip of the branch before
2139
# it was updated from the master branch. This should become a pending
2140
# merge in the working tree to preserve the user existing work. we
2141
# cant set that until we update the working trees last revision to be
2142
# one from the new branch, because it will just get absorbed by the
2143
# parent de-duplication logic.
2145
# We MUST save it even if an error occurs, because otherwise the users
2146
# local work is unreferenced and will appear to have been lost.
2150
last_rev = self.get_parent_ids()[0]
2152
last_rev = _mod_revision.NULL_REVISION
2153
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2154
# merge tree state up to new branch tip.
2155
basis = self.basis_tree()
1342
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1343
Do a 'normal' merge of the old branch basis if it is relevant.
1345
old_tip = self.branch.update()
1346
if old_tip is not None:
1347
self.add_pending_merge(old_tip)
1348
self.branch.lock_read()
1351
if self.last_revision() != self.branch.last_revision():
1352
# merge tree state up to new branch tip.
1353
basis = self.basis_tree()
2158
1354
to_tree = self.branch.basis_tree()
2159
if basis.inventory.root is None:
2160
self.set_root_id(to_tree.get_root_id())
2162
result += merge.merge_inner(
1355
result += merge_inner(self.branch,
2167
change_reporter=change_reporter)
2170
# TODO - dedup parents list with things merged by pull ?
2171
# reuse the tree we've updated to to set the basis:
2172
parent_trees = [(self.branch.last_revision(), to_tree)]
2173
merges = self.get_parent_ids()[1:]
2174
# Ideally we ask the tree for the trees here, that way the working
2175
# tree can decide whether to give us teh entire tree or give us a
2176
# lazy initialised tree. dirstate for instance will have the trees
2177
# in ram already, whereas a last-revision + basis-inventory tree
2178
# will not, but also does not need them when setting parents.
2179
for parent in merges:
2180
parent_trees.append(
2181
(parent, self.branch.repository.revision_tree(parent)))
2182
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2183
parent_trees.append(
2184
(old_tip, self.branch.repository.revision_tree(old_tip)))
2185
self.set_parent_trees(parent_trees)
2186
last_rev = parent_trees[0][0]
2188
# the working tree had the same last-revision as the master
2189
# branch did. We may still have pivot local work from the local
2190
# branch into old_tip:
2191
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2192
self.add_parent_tree_id(old_tip)
2193
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2194
and old_tip != last_rev):
2195
# our last revision was not the prior branch last revision
2196
# and we have converted that last revision to a pending merge.
2197
# base is somewhere between the branch tip now
2198
# and the now pending merge
2200
# Since we just modified the working tree and inventory, flush out
2201
# the current state, before we modify it again.
2202
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2203
# requires it only because TreeTransform directly munges the
2204
# inventory and calls tree._write_inventory(). Ultimately we
2205
# should be able to remove this extra flush.
2207
graph = self.branch.repository.get_graph()
2208
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2210
base_tree = self.branch.repository.revision_tree(base_rev_id)
2211
other_tree = self.branch.repository.revision_tree(old_tip)
2212
result += merge.merge_inner(
2217
change_reporter=change_reporter)
2220
def _write_hashcache_if_dirty(self):
2221
"""Write out the hashcache if it is dirty."""
2222
if self._hashcache.needs_write:
2224
self._hashcache.write()
2226
if e.errno not in (errno.EPERM, errno.EACCES):
2228
# TODO: jam 20061219 Should this be a warning? A single line
2229
# warning might be sufficient to let the user know what
2231
mutter('Could not write hashcache for %s\nError: %s',
2232
self._hashcache.cache_file_name(), e)
2234
@needs_tree_write_lock
1359
self.set_last_revision(self.branch.last_revision())
1360
if old_tip and old_tip != self.last_revision():
1361
# our last revision was not the prior branch last reivison
1362
# and we have converted that last revision to a pending merge.
1363
# base is somewhere between the branch tip now
1364
# and the now pending merge
1365
from bzrlib.revision import common_ancestor
1367
base_rev_id = common_ancestor(self.branch.last_revision(),
1369
self.branch.repository)
1370
except errors.NoCommonAncestor:
1372
base_tree = self.branch.repository.revision_tree(base_rev_id)
1373
other_tree = self.branch.repository.revision_tree(old_tip)
1374
result += merge_inner(self.branch,
1380
self.branch.unlock()
2235
1383
def _write_inventory(self, inv):
2236
1384
"""Write inventory as the current inventory."""
2237
self._set_inventory(inv, dirty=True)
1386
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1388
self._control_files.put('inventory', sio)
1389
self._set_inventory(inv)
1390
mutter('wrote working inventory')
2240
1392
def set_conflicts(self, arg):
2241
raise errors.UnsupportedOperation(self.set_conflicts, self)
2243
def add_conflicts(self, arg):
2244
raise errors.UnsupportedOperation(self.add_conflicts, self)
1393
raise UnsupportedOperation(self.set_conflicts, self)
2246
1395
@needs_read_lock
2247
1396
def conflicts(self):
2248
conflicts = _mod_conflicts.ConflictList()
1397
conflicts = ConflictList()
2249
1398
for conflicted in self._iter_conflicts():
2252
1401
if file_kind(self.abspath(conflicted)) != "file":
2254
except errors.NoSuchFile:
1404
if e.errno == errno.ENOENT:
2256
1408
if text is True:
2257
1409
for suffix in ('.THIS', '.OTHER'):
2259
1411
kind = file_kind(self.abspath(conflicted+suffix))
1413
if e.errno == errno.ENOENT:
2262
except errors.NoSuchFile:
2266
1421
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2267
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
1422
conflicts.append(Conflict.factory(ctype, path=conflicted,
2269
1423
file_id=self.path2id(conflicted)))
2270
1424
return conflicts
2272
def walkdirs(self, prefix=""):
2273
"""Walk the directories of this tree.
2275
returns a generator which yields items in the form:
2276
((curren_directory_path, fileid),
2277
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2280
This API returns a generator, which is only valid during the current
2281
tree transaction - within a single lock_read or lock_write duration.
2283
If the tree is not locked, it may cause an error to be raised,
2284
depending on the tree implementation.
2286
disk_top = self.abspath(prefix)
2287
if disk_top.endswith('/'):
2288
disk_top = disk_top[:-1]
2289
top_strip_len = len(disk_top) + 1
2290
inventory_iterator = self._walkdirs(prefix)
2291
disk_iterator = osutils.walkdirs(disk_top, prefix)
2293
current_disk = disk_iterator.next()
2294
disk_finished = False
2296
if not (e.errno == errno.ENOENT or
2297
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2300
disk_finished = True
2302
current_inv = inventory_iterator.next()
2303
inv_finished = False
2304
except StopIteration:
2307
while not inv_finished or not disk_finished:
2309
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2310
cur_disk_dir_content) = current_disk
2312
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2313
cur_disk_dir_content) = ((None, None), None)
2314
if not disk_finished:
2315
# strip out .bzr dirs
2316
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2317
len(cur_disk_dir_content) > 0):
2318
# osutils.walkdirs can be made nicer -
2319
# yield the path-from-prefix rather than the pathjoined
2321
bzrdir_loc = bisect_left(cur_disk_dir_content,
2323
if cur_disk_dir_content[bzrdir_loc][0] == '.bzr':
2324
# we dont yield the contents of, or, .bzr itself.
2325
del cur_disk_dir_content[bzrdir_loc]
2327
# everything is unknown
2330
# everything is missing
2333
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2335
# disk is before inventory - unknown
2336
dirblock = [(relpath, basename, kind, stat, None, None) for
2337
relpath, basename, kind, stat, top_path in
2338
cur_disk_dir_content]
2339
yield (cur_disk_dir_relpath, None), dirblock
2341
current_disk = disk_iterator.next()
2342
except StopIteration:
2343
disk_finished = True
2345
# inventory is before disk - missing.
2346
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2347
for relpath, basename, dkind, stat, fileid, kind in
2349
yield (current_inv[0][0], current_inv[0][1]), dirblock
2351
current_inv = inventory_iterator.next()
2352
except StopIteration:
2355
# versioned present directory
2356
# merge the inventory and disk data together
2358
for relpath, subiterator in itertools.groupby(sorted(
2359
current_inv[1] + cur_disk_dir_content,
2360
key=operator.itemgetter(0)), operator.itemgetter(1)):
2361
path_elements = list(subiterator)
2362
if len(path_elements) == 2:
2363
inv_row, disk_row = path_elements
2364
# versioned, present file
2365
dirblock.append((inv_row[0],
2366
inv_row[1], disk_row[2],
2367
disk_row[3], inv_row[4],
2369
elif len(path_elements[0]) == 5:
2371
dirblock.append((path_elements[0][0],
2372
path_elements[0][1], path_elements[0][2],
2373
path_elements[0][3], None, None))
2374
elif len(path_elements[0]) == 6:
2375
# versioned, absent file.
2376
dirblock.append((path_elements[0][0],
2377
path_elements[0][1], 'unknown', None,
2378
path_elements[0][4], path_elements[0][5]))
2380
raise NotImplementedError('unreachable code')
2381
yield current_inv[0], dirblock
2383
current_inv = inventory_iterator.next()
2384
except StopIteration:
2387
current_disk = disk_iterator.next()
2388
except StopIteration:
2389
disk_finished = True
2391
def _walkdirs(self, prefix=""):
2392
"""Walk the directories of this tree.
2394
:prefix: is used as the directrory to start with.
2395
returns a generator which yields items in the form:
2396
((curren_directory_path, fileid),
2397
[(file1_path, file1_name, file1_kind, None, file1_id,
2400
_directory = 'directory'
2401
# get the root in the inventory
2402
inv = self.inventory
2403
top_id = inv.path2id(prefix)
2407
pending = [(prefix, '', _directory, None, top_id, None)]
2410
currentdir = pending.pop()
2411
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2412
top_id = currentdir[4]
2414
relroot = currentdir[0] + '/'
2417
# FIXME: stash the node in pending
2419
if entry.kind == 'directory':
2420
for name, child in entry.sorted_children():
2421
dirblock.append((relroot + name, name, child.kind, None,
2422
child.file_id, child.kind
2424
yield (currentdir[0], entry.file_id), dirblock
2425
# push the user specified dirs from dirblock
2426
for dir in reversed(dirblock):
2427
if dir[2] == _directory:
2430
@needs_tree_write_lock
2431
def auto_resolve(self):
2432
"""Automatically resolve text conflicts according to contents.
2434
Only text conflicts are auto_resolvable. Files with no conflict markers
2435
are considered 'resolved', because bzr always puts conflict markers
2436
into files that have text conflicts. The corresponding .THIS .BASE and
2437
.OTHER files are deleted, as per 'resolve'.
2438
:return: a tuple of ConflictLists: (un_resolved, resolved).
2440
un_resolved = _mod_conflicts.ConflictList()
2441
resolved = _mod_conflicts.ConflictList()
2442
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2443
for conflict in self.conflicts():
2444
if (conflict.typestring != 'text conflict' or
2445
self.kind(conflict.file_id) != 'file'):
2446
un_resolved.append(conflict)
2448
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2450
for line in my_file:
2451
if conflict_re.search(line):
2452
un_resolved.append(conflict)
2455
resolved.append(conflict)
2458
resolved.remove_files(self)
2459
self.set_conflicts(un_resolved)
2460
return un_resolved, resolved
2464
tree_basis = self.basis_tree()
2465
tree_basis.lock_read()
2467
repo_basis = self.branch.repository.revision_tree(
2468
self.last_revision())
2469
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2470
raise errors.BzrCheckError(
2471
"Mismatched basis inventory content.")
2476
def _validate(self):
2477
"""Validate internal structures.
2479
This is meant mostly for the test suite. To give it a chance to detect
2480
corruption after actions have occurred. The default implementation is a
2483
:return: None. An exception should be raised if there is an error.
2488
def _get_rules_searcher(self, default_searcher):
2489
"""See Tree._get_rules_searcher."""
2490
if self._rules_searcher is None:
2491
self._rules_searcher = super(WorkingTree,
2492
self)._get_rules_searcher(default_searcher)
2493
return self._rules_searcher
2496
class WorkingTree2(WorkingTree):
2497
"""This is the Format 2 working tree.
2499
This was the first weave based working tree.
2500
- uses os locks for locking.
2501
- uses the branch last-revision.
2504
def __init__(self, *args, **kwargs):
2505
super(WorkingTree2, self).__init__(*args, **kwargs)
2506
# WorkingTree2 has more of a constraint that self._inventory must
2507
# exist. Because this is an older format, we don't mind the overhead
2508
# caused by the extra computation here.
2510
# Newer WorkingTree's should only have self._inventory set when they
2512
if self._inventory is None:
2513
self.read_working_inventory()
2515
def lock_tree_write(self):
2516
"""See WorkingTree.lock_tree_write().
2518
In Format2 WorkingTrees we have a single lock for the branch and tree
2519
so lock_tree_write() degrades to lock_write().
2521
self.branch.lock_write()
2523
return self._control_files.lock_write()
2525
self.branch.unlock()
2529
# do non-implementation specific cleanup
2532
# we share control files:
2533
if self._control_files._lock_count == 3:
2534
# _inventory_is_modified is always False during a read lock.
2535
if self._inventory_is_modified:
2537
self._write_hashcache_if_dirty()
2539
# reverse order of locking.
2541
return self._control_files.unlock()
2543
self.branch.unlock()
2546
1427
class WorkingTree3(WorkingTree):
2547
1428
"""This is the Format 3 working tree.