531
574
__contains__ = has_id
533
576
def get_file_size(self, file_id):
534
return os.path.getsize(self.id2abspath(file_id))
577
"""See Tree.get_file_size"""
579
return os.path.getsize(self.id2abspath(file_id))
581
if e.errno != errno.ENOENT:
537
def get_file_sha1(self, file_id, path=None):
587
def get_file_sha1(self, file_id, path=None, stat_value=None):
539
589
path = self._inventory.id2path(file_id)
540
return self._hashcache.get_sha1(path)
590
return self._hashcache.get_sha1(path, stat_value)
542
592
def get_file_mtime(self, file_id, path=None):
544
path = self._inventory.id2path(file_id)
594
path = self.inventory.id2path(file_id)
545
595
return os.lstat(self.abspath(path)).st_mtime
597
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
598
file_id = self.path2id(path)
599
return self._inventory[file_id].executable
601
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
602
mode = stat_result.st_mode
603
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
547
605
if not supports_executable():
548
606
def is_executable(self, file_id, path=None):
549
607
return self._inventory[file_id].executable
609
_is_executable_from_path_and_stat = \
610
_is_executable_from_path_and_stat_from_basis
551
612
def is_executable(self, file_id, path=None):
553
path = self._inventory.id2path(file_id)
614
path = self.id2path(file_id)
554
615
mode = os.lstat(self.abspath(path)).st_mode
555
616
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
558
def add(self, files, ids=None):
559
"""Make files versioned.
561
Note that the command line normally calls smart_add instead,
562
which can automatically recurse.
564
This adds the files to the inventory, so that they will be
565
recorded by the next commit.
568
List of paths to add, relative to the base of the tree.
571
If set, use these instead of automatically generated ids.
572
Must be the same length as the list of files, but may
573
contain None for ids that are to be autogenerated.
575
TODO: Perhaps have an option to add the ids even if the files do
578
TODO: Perhaps callback with the ids and paths as they're added.
618
_is_executable_from_path_and_stat = \
619
_is_executable_from_path_and_stat_from_stat
621
@needs_tree_write_lock
622
def _add(self, files, ids, kinds):
623
"""See MutableTree._add."""
580
624
# TODO: Re-adding a file that is removed in the working copy
581
625
# should probably put it back with the previous ID.
582
if isinstance(files, basestring):
583
assert(ids is None or isinstance(ids, basestring))
589
ids = [None] * len(files)
591
assert(len(ids) == len(files))
593
inv = self.read_working_inventory()
594
for f,file_id in zip(files, ids):
595
if self.is_control_filename(f):
596
raise BzrError("cannot add control file %s" % quotefn(f))
601
raise BzrError("cannot add top-level %r" % f)
603
fullpath = normpath(self.abspath(f))
606
kind = file_kind(fullpath)
608
if e.errno == errno.ENOENT:
609
raise NoSuchFile(fullpath)
610
# maybe something better?
611
raise BzrError('cannot add: not a regular file, symlink or directory: %s' % quotefn(f))
613
if not InventoryEntry.versionable_kind(kind):
614
raise BzrError('cannot add: not a versionable file ('
615
'i.e. regular file, symlink or directory): %s' % quotefn(f))
626
# the read and write working inventory should not occur in this
627
# function - they should be part of lock_write and unlock.
629
for f, file_id, kind in zip(files, ids, kinds):
617
630
if file_id is None:
618
631
inv.add_path(f, kind=kind)
620
633
inv.add_path(f, kind=kind, file_id=file_id)
634
self._inventory_is_modified = True
622
self._write_inventory(inv)
636
@needs_tree_write_lock
637
def _gather_kinds(self, files, kinds):
638
"""See MutableTree._gather_kinds."""
639
for pos, f in enumerate(files):
640
if kinds[pos] is None:
641
fullpath = normpath(self.abspath(f))
643
kinds[pos] = file_kind(fullpath)
645
if e.errno == errno.ENOENT:
646
raise errors.NoSuchFile(fullpath)
624
648
@needs_write_lock
649
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
650
"""Add revision_id as a parent.
652
This is equivalent to retrieving the current list of parent ids
653
and setting the list to its value plus revision_id.
655
:param revision_id: The revision id to add to the parent list. It may
656
be a ghost revision as long as its not the first parent to be added,
657
or the allow_leftmost_as_ghost parameter is set True.
658
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
660
parents = self.get_parent_ids() + [revision_id]
661
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
662
or allow_leftmost_as_ghost)
664
@needs_tree_write_lock
665
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
666
"""Add revision_id, tree tuple as a parent.
668
This is equivalent to retrieving the current list of parent trees
669
and setting the list to its value plus parent_tuple. See also
670
add_parent_tree_id - if you only have a parent id available it will be
671
simpler to use that api. If you have the parent already available, using
672
this api is preferred.
674
:param parent_tuple: The (revision id, tree) to add to the parent list.
675
If the revision_id is a ghost, pass None for the tree.
676
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
678
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
679
if len(parent_ids) > 1:
680
# the leftmost may have already been a ghost, preserve that if it
682
allow_leftmost_as_ghost = True
683
self.set_parent_ids(parent_ids,
684
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
686
@needs_tree_write_lock
625
687
def add_pending_merge(self, *revision_ids):
626
688
# TODO: Perhaps should check at this point that the
627
689
# history of the revision is actually present?
628
p = self.pending_merges()
690
parents = self.get_parent_ids()
630
692
for rev_id in revision_ids:
693
if rev_id in parents:
695
parents.append(rev_id)
636
self.set_pending_merges(p)
639
def pending_merges(self):
640
"""Return a list of pending merges.
642
These are revisions that have been merged into the working
643
directory but not yet committed.
698
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
700
def path_content_summary(self, path, _lstat=os.lstat,
701
_mapper=osutils.file_kind_from_stat_mode):
702
"""See Tree.path_content_summary."""
703
abspath = self.abspath(path)
646
merges_file = self._control_files.get_utf8('pending-merges')
705
stat_result = _lstat(abspath)
647
706
except OSError, e:
648
if e.errno != errno.ENOENT:
652
for l in merges_file.readlines():
653
p.append(l.rstrip('\n'))
707
if getattr(e, 'errno', None) == errno.ENOENT:
709
return ('missing', None, None, None)
710
# propagate other errors
712
kind = _mapper(stat_result.st_mode)
714
size = stat_result.st_size
715
# try for a stat cache lookup
716
executable = self._is_executable_from_path_and_stat(path, stat_result)
717
return (kind, size, executable, self._sha_from_stat(
719
elif kind == 'directory':
720
# perhaps it looks like a plain directory, but it's really a
722
if self._directory_is_tree_reference(path):
723
kind = 'tree-reference'
724
return kind, None, None, None
725
elif kind == 'symlink':
726
return ('symlink', None, None, os.readlink(abspath))
728
return (kind, None, None, None)
730
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
731
"""Common ghost checking functionality from set_parent_*.
733
This checks that the left hand-parent exists if there are any
736
if len(revision_ids) > 0:
737
leftmost_id = revision_ids[0]
738
if (not allow_leftmost_as_ghost and not
739
self.branch.repository.has_revision(leftmost_id)):
740
raise errors.GhostRevisionUnusableHere(leftmost_id)
742
def _set_merges_from_parent_ids(self, parent_ids):
743
merges = parent_ids[1:]
744
self._transport.put_bytes('pending-merges', '\n'.join(merges),
745
mode=self._control_files._file_mode)
747
def _filter_parent_ids_by_ancestry(self, revision_ids):
748
"""Check that all merged revisions are proper 'heads'.
750
This will always return the first revision_id, and any merged revisions
753
if len(revision_ids) == 0:
755
graph = self.branch.repository.get_graph()
756
heads = graph.heads(revision_ids)
757
new_revision_ids = revision_ids[:1]
758
for revision_id in revision_ids[1:]:
759
if revision_id in heads and revision_id not in new_revision_ids:
760
new_revision_ids.append(revision_id)
761
if new_revision_ids != revision_ids:
762
trace.mutter('requested to set revision_ids = %s,'
763
' but filtered to %s', revision_ids, new_revision_ids)
764
return new_revision_ids
766
@needs_tree_write_lock
767
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
768
"""Set the parent ids to revision_ids.
770
See also set_parent_trees. This api will try to retrieve the tree data
771
for each element of revision_ids from the trees repository. If you have
772
tree data already available, it is more efficient to use
773
set_parent_trees rather than set_parent_ids. set_parent_ids is however
774
an easier API to use.
776
:param revision_ids: The revision_ids to set as the parent ids of this
777
working tree. Any of these may be ghosts.
779
self._check_parents_for_ghosts(revision_ids,
780
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
781
for revision_id in revision_ids:
782
_mod_revision.check_not_reserved_id(revision_id)
784
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
786
if len(revision_ids) > 0:
787
self.set_last_revision(revision_ids[0])
789
self.set_last_revision(_mod_revision.NULL_REVISION)
791
self._set_merges_from_parent_ids(revision_ids)
793
@needs_tree_write_lock
794
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
795
"""See MutableTree.set_parent_trees."""
796
parent_ids = [rev for (rev, tree) in parents_list]
797
for revision_id in parent_ids:
798
_mod_revision.check_not_reserved_id(revision_id)
800
self._check_parents_for_ghosts(parent_ids,
801
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
803
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
805
if len(parent_ids) == 0:
806
leftmost_parent_id = _mod_revision.NULL_REVISION
807
leftmost_parent_tree = None
809
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
811
if self._change_last_revision(leftmost_parent_id):
812
if leftmost_parent_tree is None:
813
# If we don't have a tree, fall back to reading the
814
# parent tree from the repository.
815
self._cache_basis_inventory(leftmost_parent_id)
817
inv = leftmost_parent_tree.inventory
818
xml = self._create_basis_xml_from_inventory(
819
leftmost_parent_id, inv)
820
self._write_basis_inventory(xml)
821
self._set_merges_from_parent_ids(parent_ids)
823
@needs_tree_write_lock
657
824
def set_pending_merges(self, rev_list):
658
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
825
parents = self.get_parent_ids()
826
leftmost = parents[:1]
827
new_parents = leftmost + rev_list
828
self.set_parent_ids(new_parents)
830
@needs_tree_write_lock
661
831
def set_merge_modified(self, modified_hashes):
662
832
def iter_stanzas():
663
833
for file_id, hash in modified_hashes.iteritems():
664
yield Stanza(file_id=file_id, hash=hash)
834
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
665
835
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
837
def _sha_from_stat(self, path, stat_result):
838
"""Get a sha digest from the tree's stat cache.
840
The default implementation assumes no stat cache is present.
842
:param path: The path.
843
:param stat_result: The stat result being looked up.
668
847
def _put_rio(self, filename, stanzas, header):
848
self._must_be_locked()
669
849
my_file = rio_file(stanzas, header)
670
self._control_files.put(filename, my_file)
850
self._transport.put_file(filename, my_file,
851
mode=self._control_files._file_mode)
853
@needs_write_lock # because merge pulls data into the branch.
854
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
856
"""Merge from a branch into this working tree.
858
:param branch: The branch to merge from.
859
:param to_revision: If non-None, the merge will merge to to_revision,
860
but not beyond it. to_revision does not need to be in the history
861
of the branch when it is supplied. If None, to_revision defaults to
862
branch.last_revision().
864
from bzrlib.merge import Merger, Merge3Merger
865
pb = bzrlib.ui.ui_factory.nested_progress_bar()
867
merger = Merger(self.branch, this_tree=self, pb=pb)
868
merger.pp = ProgressPhase("Merge phase", 5, pb)
869
merger.pp.next_phase()
870
# check that there are no
872
merger.check_basis(check_clean=True, require_commits=False)
873
if to_revision is None:
874
to_revision = _mod_revision.ensure_null(branch.last_revision())
875
merger.other_rev_id = to_revision
876
if _mod_revision.is_null(merger.other_rev_id):
877
raise errors.NoCommits(branch)
878
self.branch.fetch(branch, last_revision=merger.other_rev_id)
879
merger.other_basis = merger.other_rev_id
880
merger.other_tree = self.branch.repository.revision_tree(
882
merger.other_branch = branch
883
merger.pp.next_phase()
884
if from_revision is None:
887
merger.set_base_revision(from_revision, branch)
888
if merger.base_rev_id == merger.other_rev_id:
889
raise errors.PointlessMerge
890
merger.backup_files = False
891
if merge_type is None:
892
merger.merge_type = Merge3Merger
894
merger.merge_type = merge_type
895
merger.set_interesting_files(None)
896
merger.show_base = False
897
merger.reprocess = False
898
conflicts = merger.do_merge()
673
905
def merge_modified(self):
906
"""Return a dictionary of files modified by a merge.
908
The list is initialized by WorkingTree.set_merge_modified, which is
909
typically called after we make some automatic updates to the tree
912
This returns a map of file_id->sha1, containing only files which are
913
still in the working inventory and have that text hash.
675
hashfile = self._control_files.get('merge-hashes')
916
hashfile = self._transport.get('merge-hashes')
917
except errors.NoSuchFile:
680
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
681
raise MergeModifiedFormatError()
682
except StopIteration:
683
raise MergeModifiedFormatError()
684
for s in RioReader(hashfile):
685
file_id = s.get("file_id")
686
if file_id not in self.inventory:
689
if hash == self.get_file_sha1(file_id):
690
merge_hashes[file_id] = hash
922
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
923
raise errors.MergeModifiedFormatError()
924
except StopIteration:
925
raise errors.MergeModifiedFormatError()
926
for s in RioReader(hashfile):
927
# RioReader reads in Unicode, so convert file_ids back to utf8
928
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
929
if file_id not in self.inventory:
931
text_hash = s.get("hash")
932
if text_hash == self.get_file_sha1(file_id):
933
merge_hashes[file_id] = text_hash
939
def mkdir(self, path, file_id=None):
940
"""See MutableTree.mkdir()."""
942
file_id = generate_ids.gen_file_id(os.path.basename(path))
943
os.mkdir(self.abspath(path))
944
self.add(path, file_id, 'directory')
693
947
def get_symlink_target(self, file_id):
694
948
return os.readlink(self.id2abspath(file_id))
696
def file_class(self, filename):
697
if self.path2id(filename):
699
elif self.is_ignored(filename):
704
def list_files(self):
951
def subsume(self, other_tree):
952
def add_children(inventory, entry):
953
for child_entry in entry.children.values():
954
inventory._byid[child_entry.file_id] = child_entry
955
if child_entry.kind == 'directory':
956
add_children(inventory, child_entry)
957
if other_tree.get_root_id() == self.get_root_id():
958
raise errors.BadSubsumeSource(self, other_tree,
959
'Trees have the same root')
961
other_tree_path = self.relpath(other_tree.basedir)
962
except errors.PathNotChild:
963
raise errors.BadSubsumeSource(self, other_tree,
964
'Tree is not contained by the other')
965
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
966
if new_root_parent is None:
967
raise errors.BadSubsumeSource(self, other_tree,
968
'Parent directory is not versioned.')
969
# We need to ensure that the result of a fetch will have a
970
# versionedfile for the other_tree root, and only fetching into
971
# RepositoryKnit2 guarantees that.
972
if not self.branch.repository.supports_rich_root():
973
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
974
other_tree.lock_tree_write()
976
new_parents = other_tree.get_parent_ids()
977
other_root = other_tree.inventory.root
978
other_root.parent_id = new_root_parent
979
other_root.name = osutils.basename(other_tree_path)
980
self.inventory.add(other_root)
981
add_children(self.inventory, other_root)
982
self._write_inventory(self.inventory)
983
# normally we don't want to fetch whole repositories, but i think
984
# here we really do want to consolidate the whole thing.
985
for parent_id in other_tree.get_parent_ids():
986
self.branch.fetch(other_tree.branch, parent_id)
987
self.add_parent_tree_id(parent_id)
990
other_tree.bzrdir.retire_bzrdir()
992
def _setup_directory_is_tree_reference(self):
993
if self._branch.repository._format.supports_tree_reference:
994
self._directory_is_tree_reference = \
995
self._directory_may_be_tree_reference
997
self._directory_is_tree_reference = \
998
self._directory_is_never_tree_reference
1000
def _directory_is_never_tree_reference(self, relpath):
1003
def _directory_may_be_tree_reference(self, relpath):
1004
# as a special case, if a directory contains control files then
1005
# it's a tree reference, except that the root of the tree is not
1006
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1007
# TODO: We could ask all the control formats whether they
1008
# recognize this directory, but at the moment there's no cheap api
1009
# to do that. Since we probably can only nest bzr checkouts and
1010
# they always use this name it's ok for now. -- mbp 20060306
1012
# FIXME: There is an unhandled case here of a subdirectory
1013
# containing .bzr but not a branch; that will probably blow up
1014
# when you try to commit it. It might happen if there is a
1015
# checkout in a subdirectory. This can be avoided by not adding
1018
@needs_tree_write_lock
1019
def extract(self, file_id, format=None):
1020
"""Extract a subtree from this tree.
1022
A new branch will be created, relative to the path for this tree.
1026
segments = osutils.splitpath(path)
1027
transport = self.branch.bzrdir.root_transport
1028
for name in segments:
1029
transport = transport.clone(name)
1030
transport.ensure_base()
1033
sub_path = self.id2path(file_id)
1034
branch_transport = mkdirs(sub_path)
1036
format = self.bzrdir.cloning_metadir()
1037
branch_transport.ensure_base()
1038
branch_bzrdir = format.initialize_on_transport(branch_transport)
1040
repo = branch_bzrdir.find_repository()
1041
except errors.NoRepositoryPresent:
1042
repo = branch_bzrdir.create_repository()
1043
if not repo.supports_rich_root():
1044
raise errors.RootNotRich()
1045
new_branch = branch_bzrdir.create_branch()
1046
new_branch.pull(self.branch)
1047
for parent_id in self.get_parent_ids():
1048
new_branch.fetch(self.branch, parent_id)
1049
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1050
if tree_transport.base != branch_transport.base:
1051
tree_bzrdir = format.initialize_on_transport(tree_transport)
1052
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1054
tree_bzrdir = branch_bzrdir
1055
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
1056
wt.set_parent_ids(self.get_parent_ids())
1057
my_inv = self.inventory
1058
child_inv = Inventory(root_id=None)
1059
new_root = my_inv[file_id]
1060
my_inv.remove_recursive_id(file_id)
1061
new_root.parent_id = None
1062
child_inv.add(new_root)
1063
self._write_inventory(my_inv)
1064
wt._write_inventory(child_inv)
1067
def _serialize(self, inventory, out_file):
1068
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1071
def _deserialize(selt, in_file):
1072
return xml5.serializer_v5.read_inventory(in_file)
1075
"""Write the in memory inventory to disk."""
1076
# TODO: Maybe this should only write on dirty ?
1077
if self._control_files._lock_mode != 'w':
1078
raise errors.NotWriteLocked(self)
1080
self._serialize(self._inventory, sio)
1082
self._transport.put_file('inventory', sio,
1083
mode=self._control_files._file_mode)
1084
self._inventory_is_modified = False
1086
def _kind(self, relpath):
1087
return osutils.file_kind(self.abspath(relpath))
1089
def list_files(self, include_root=False):
705
1090
"""Recursively list all files as (path, class, kind, id, entry).
707
1092
Lists, but does not descend into unversioned directories.
786
1190
new_children.sort()
787
1191
new_children = collections.deque(new_children)
788
1192
stack.append((f_ie.file_id, fp, fap, new_children))
789
# Break out of inner loop, so that we start outer loop with child
1193
# Break out of inner loop,
1194
# so that we start outer loop with child
792
1197
# if we finished all children, pop it off the stack
797
def move(self, from_paths, to_name):
1200
@needs_tree_write_lock
1201
def move(self, from_paths, to_dir=None, after=False, **kwargs):
798
1202
"""Rename files.
800
to_name must exist in the inventory.
1204
to_dir must exist in the inventory.
802
If to_name exists and is a directory, the files are moved into
1206
If to_dir exists and is a directory, the files are moved into
803
1207
it, keeping their old names.
805
Note that to_name is only the last component of the new name;
1209
Note that to_dir is only the last component of the new name;
806
1210
this doesn't change the directory.
1212
For each entry in from_paths the move mode will be determined
1215
The first mode moves the file in the filesystem and updates the
1216
inventory. The second mode only updates the inventory without
1217
touching the file on the filesystem. This is the new mode introduced
1220
move uses the second mode if 'after == True' and the target is not
1221
versioned but present in the working tree.
1223
move uses the second mode if 'after == False' and the source is
1224
versioned but no longer in the working tree, and the target is not
1225
versioned but present in the working tree.
1227
move uses the first mode if 'after == False' and the source is
1228
versioned and present in the working tree, and the target is not
1229
versioned and not present in the working tree.
1231
Everything else results in an error.
808
1233
This returns a list of (from_path, to_path) pairs for each
809
1234
entry that is moved.
812
## TODO: Option to move IDs only
813
assert not isinstance(from_paths, basestring)
1239
# check for deprecated use of signature
1241
to_dir = kwargs.get('to_name', None)
1243
raise TypeError('You must supply a target directory')
1245
symbol_versioning.warn('The parameter to_name was deprecated'
1246
' in version 0.13. Use to_dir instead',
1249
# check destination directory
1250
if isinstance(from_paths, basestring):
814
1252
inv = self.inventory
815
to_abs = self.abspath(to_name)
1253
to_abs = self.abspath(to_dir)
816
1254
if not isdir(to_abs):
817
raise BzrError("destination %r is not a directory" % to_abs)
818
if not self.has_filename(to_name):
819
raise BzrError("destination %r not in working directory" % to_abs)
820
to_dir_id = inv.path2id(to_name)
821
if to_dir_id == None and to_name != '':
822
raise BzrError("destination %r is not a versioned directory" % to_name)
1255
raise errors.BzrMoveFailedError('',to_dir,
1256
errors.NotADirectory(to_abs))
1257
if not self.has_filename(to_dir):
1258
raise errors.BzrMoveFailedError('',to_dir,
1259
errors.NotInWorkingDirectory(to_dir))
1260
to_dir_id = inv.path2id(to_dir)
1261
if to_dir_id is None:
1262
raise errors.BzrMoveFailedError('',to_dir,
1263
errors.NotVersionedError(path=str(to_dir)))
823
1265
to_dir_ie = inv[to_dir_id]
824
if to_dir_ie.kind not in ('directory', 'root_directory'):
825
raise BzrError("destination %r is not a directory" % to_abs)
827
to_idpath = inv.get_idpath(to_dir_id)
830
if not self.has_filename(f):
831
raise BzrError("%r does not exist in working tree" % f)
832
f_id = inv.path2id(f)
834
raise BzrError("%r is not versioned" % f)
835
name_tail = splitpath(f)[-1]
836
dest_path = pathjoin(to_name, name_tail)
837
if self.has_filename(dest_path):
838
raise BzrError("destination %r already exists" % dest_path)
839
if f_id in to_idpath:
840
raise BzrError("can't move %r to a subdirectory of itself" % f)
842
# OK, so there's a race here, it's possible that someone will
843
# create a file in this interval and then the rename might be
844
# left half-done. But we should have caught most problems.
845
orig_inv = deepcopy(self.inventory)
1266
if to_dir_ie.kind != 'directory':
1267
raise errors.BzrMoveFailedError('',to_dir,
1268
errors.NotADirectory(to_abs))
1270
# create rename entries and tuples
1271
for from_rel in from_paths:
1272
from_tail = splitpath(from_rel)[-1]
1273
from_id = inv.path2id(from_rel)
1275
raise errors.BzrMoveFailedError(from_rel,to_dir,
1276
errors.NotVersionedError(path=str(from_rel)))
1278
from_entry = inv[from_id]
1279
from_parent_id = from_entry.parent_id
1280
to_rel = pathjoin(to_dir, from_tail)
1281
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1283
from_tail=from_tail,
1284
from_parent_id=from_parent_id,
1285
to_rel=to_rel, to_tail=from_tail,
1286
to_parent_id=to_dir_id)
1287
rename_entries.append(rename_entry)
1288
rename_tuples.append((from_rel, to_rel))
1290
# determine which move mode to use. checks also for movability
1291
rename_entries = self._determine_mv_mode(rename_entries, after)
1293
original_modified = self._inventory_is_modified
848
name_tail = splitpath(f)[-1]
849
dest_path = pathjoin(to_name, name_tail)
850
result.append((f, dest_path))
851
inv.rename(inv.path2id(f), to_dir_id, name_tail)
853
rename(self.abspath(f), self.abspath(dest_path))
855
raise BzrError("failed to rename %r to %r: %s" %
856
(f, dest_path, e[1]),
857
["rename rolled back"])
1296
self._inventory_is_modified = True
1297
self._move(rename_entries)
859
1299
# restore the inventory on error
860
self._set_inventory(orig_inv)
1300
self._inventory_is_modified = original_modified
862
1302
self._write_inventory(inv)
866
def rename_one(self, from_rel, to_rel):
1303
return rename_tuples
1305
def _determine_mv_mode(self, rename_entries, after=False):
1306
"""Determines for each from-to pair if both inventory and working tree
1307
or only the inventory has to be changed.
1309
Also does basic plausability tests.
1311
inv = self.inventory
1313
for rename_entry in rename_entries:
1314
# store to local variables for easier reference
1315
from_rel = rename_entry.from_rel
1316
from_id = rename_entry.from_id
1317
to_rel = rename_entry.to_rel
1318
to_id = inv.path2id(to_rel)
1319
only_change_inv = False
1321
# check the inventory for source and destination
1323
raise errors.BzrMoveFailedError(from_rel,to_rel,
1324
errors.NotVersionedError(path=str(from_rel)))
1325
if to_id is not None:
1326
raise errors.BzrMoveFailedError(from_rel,to_rel,
1327
errors.AlreadyVersionedError(path=str(to_rel)))
1329
# try to determine the mode for rename (only change inv or change
1330
# inv and file system)
1332
if not self.has_filename(to_rel):
1333
raise errors.BzrMoveFailedError(from_id,to_rel,
1334
errors.NoSuchFile(path=str(to_rel),
1335
extra="New file has not been created yet"))
1336
only_change_inv = True
1337
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1338
only_change_inv = True
1339
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1340
only_change_inv = False
1341
elif (not self.case_sensitive
1342
and from_rel.lower() == to_rel.lower()
1343
and self.has_filename(from_rel)):
1344
only_change_inv = False
1346
# something is wrong, so lets determine what exactly
1347
if not self.has_filename(from_rel) and \
1348
not self.has_filename(to_rel):
1349
raise errors.BzrRenameFailedError(from_rel,to_rel,
1350
errors.PathsDoNotExist(paths=(str(from_rel),
1353
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1354
rename_entry.only_change_inv = only_change_inv
1355
return rename_entries
1357
def _move(self, rename_entries):
1358
"""Moves a list of files.
1360
Depending on the value of the flag 'only_change_inv', the
1361
file will be moved on the file system or not.
1363
inv = self.inventory
1366
for entry in rename_entries:
1368
self._move_entry(entry)
1370
self._rollback_move(moved)
1374
def _rollback_move(self, moved):
1375
"""Try to rollback a previous move in case of an filesystem error."""
1376
inv = self.inventory
1379
self._move_entry(_RenameEntry(entry.to_rel, entry.from_id,
1380
entry.to_tail, entry.to_parent_id, entry.from_rel,
1381
entry.from_tail, entry.from_parent_id,
1382
entry.only_change_inv))
1383
except errors.BzrMoveFailedError, e:
1384
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1385
" The working tree is in an inconsistent state."
1386
" Please consider doing a 'bzr revert'."
1387
" Error message is: %s" % e)
1389
def _move_entry(self, entry):
1390
inv = self.inventory
1391
from_rel_abs = self.abspath(entry.from_rel)
1392
to_rel_abs = self.abspath(entry.to_rel)
1393
if from_rel_abs == to_rel_abs:
1394
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1395
"Source and target are identical.")
1397
if not entry.only_change_inv:
1399
osutils.rename(from_rel_abs, to_rel_abs)
1401
raise errors.BzrMoveFailedError(entry.from_rel,
1403
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1405
@needs_tree_write_lock
1406
def rename_one(self, from_rel, to_rel, after=False):
867
1407
"""Rename one file.
869
1409
This can change the directory or the filename or both.
1411
rename_one has several 'modes' to work. First, it can rename a physical
1412
file and change the file_id. That is the normal mode. Second, it can
1413
only change the file_id without touching any physical file. This is
1414
the new mode introduced in version 0.15.
1416
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1417
versioned but present in the working tree.
1419
rename_one uses the second mode if 'after == False' and 'from_rel' is
1420
versioned but no longer in the working tree, and 'to_rel' is not
1421
versioned but present in the working tree.
1423
rename_one uses the first mode if 'after == False' and 'from_rel' is
1424
versioned and present in the working tree, and 'to_rel' is not
1425
versioned and not present in the working tree.
1427
Everything else results in an error.
871
1429
inv = self.inventory
872
if not self.has_filename(from_rel):
873
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
874
if self.has_filename(to_rel):
875
raise BzrError("can't rename: new working file %r already exists" % to_rel)
877
file_id = inv.path2id(from_rel)
879
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
882
from_parent = entry.parent_id
883
from_name = entry.name
885
if inv.path2id(to_rel):
886
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1432
# create rename entries and tuples
1433
from_tail = splitpath(from_rel)[-1]
1434
from_id = inv.path2id(from_rel)
1436
raise errors.BzrRenameFailedError(from_rel,to_rel,
1437
errors.NotVersionedError(path=str(from_rel)))
1438
from_entry = inv[from_id]
1439
from_parent_id = from_entry.parent_id
888
1440
to_dir, to_tail = os.path.split(to_rel)
889
1441
to_dir_id = inv.path2id(to_dir)
890
if to_dir_id == None and to_dir != '':
891
raise BzrError("can't determine destination directory id for %r" % to_dir)
893
mutter("rename_one:")
894
mutter(" file_id {%s}" % file_id)
895
mutter(" from_rel %r" % from_rel)
896
mutter(" to_rel %r" % to_rel)
897
mutter(" to_dir %r" % to_dir)
898
mutter(" to_dir_id {%s}" % to_dir_id)
900
inv.rename(file_id, to_dir_id, to_tail)
902
from_abs = self.abspath(from_rel)
903
to_abs = self.abspath(to_rel)
905
rename(from_abs, to_abs)
907
inv.rename(file_id, from_parent, from_name)
908
raise BzrError("failed to rename %r to %r: %s"
909
% (from_abs, to_abs, e[1]),
910
["rename rolled back"])
1442
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1444
from_tail=from_tail,
1445
from_parent_id=from_parent_id,
1446
to_rel=to_rel, to_tail=to_tail,
1447
to_parent_id=to_dir_id)
1448
rename_entries.append(rename_entry)
1450
# determine which move mode to use. checks also for movability
1451
rename_entries = self._determine_mv_mode(rename_entries, after)
1453
# check if the target changed directory and if the target directory is
1455
if to_dir_id is None:
1456
raise errors.BzrMoveFailedError(from_rel,to_rel,
1457
errors.NotVersionedError(path=str(to_dir)))
1459
# all checks done. now we can continue with our actual work
1460
mutter('rename_one:\n'
1465
' to_dir_id {%s}\n',
1466
from_id, from_rel, to_rel, to_dir, to_dir_id)
1468
self._move(rename_entries)
911
1469
self._write_inventory(inv)
1471
class _RenameEntry(object):
1472
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1473
to_rel, to_tail, to_parent_id, only_change_inv=False):
1474
self.from_rel = from_rel
1475
self.from_id = from_id
1476
self.from_tail = from_tail
1477
self.from_parent_id = from_parent_id
1478
self.to_rel = to_rel
1479
self.to_tail = to_tail
1480
self.to_parent_id = to_parent_id
1481
self.only_change_inv = only_change_inv
913
1483
@needs_read_lock
914
1484
def unknowns(self):
915
1485
"""Return all unknown files.
917
1487
These are files in the working directory that are not versioned or
918
1488
control files or ignored.
920
>>> from bzrlib.bzrdir import ScratchDir
921
>>> d = ScratchDir(files=['foo', 'foo~'])
922
>>> b = d.open_branch()
923
>>> tree = d.open_workingtree()
924
>>> map(str, tree.unknowns())
927
>>> list(b.unknowns())
929
>>> tree.remove('foo')
930
>>> list(b.unknowns())
933
for subp in self.extras():
934
if not self.is_ignored(subp):
937
@deprecated_method(zero_eight)
938
def iter_conflicts(self):
939
"""List all files in the tree that have text or content conflicts.
940
DEPRECATED. Use conflicts instead."""
941
return self._iter_conflicts()
1490
# force the extras method to be fully executed before returning, to
1491
# prevent race conditions with the lock
1493
[subp for subp in self.extras() if not self.is_ignored(subp)])
1495
@needs_tree_write_lock
1496
def unversion(self, file_ids):
1497
"""Remove the file ids in file_ids from the current versioned set.
1499
When a file_id is unversioned, all of its children are automatically
1502
:param file_ids: The file ids to stop versioning.
1503
:raises: NoSuchId if any fileid is not currently versioned.
1505
for file_id in file_ids:
1506
if self._inventory.has_id(file_id):
1507
self._inventory.remove_recursive_id(file_id)
1509
raise errors.NoSuchId(self, file_id)
1511
# in the future this should just set a dirty bit to wait for the
1512
# final unlock. However, until all methods of workingtree start
1513
# with the current in -memory inventory rather than triggering
1514
# a read, it is more complex - we need to teach read_inventory
1515
# to know when to read, and when to not read first... and possibly
1516
# to save first when the in memory one may be corrupted.
1517
# so for now, we just only write it if it is indeed dirty.
1519
self._write_inventory(self._inventory)
943
1521
def _iter_conflicts(self):
944
1522
conflicted = set()
945
1523
for info in self.list_files():
1213
1827
# root node id can legitimately look like 'revision_id' but cannot
1214
1828
# contain a '"'.
1215
1829
xml = self.branch.repository.get_inventory_xml(new_revision)
1216
if not 'revision_id="' in xml.split('\n', 1)[0]:
1830
firstline = xml.split('\n', 1)[0]
1831
if (not 'revision_id="' in firstline or
1832
'format="7"' not in firstline):
1217
1833
inv = self.branch.repository.deserialise_inventory(
1218
1834
new_revision, xml)
1219
inv.revision_id = new_revision
1220
xml = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
1221
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1222
path = self._basis_inventory_name()
1224
self._control_files.put(path, sio)
1225
except WeaveRevisionNotPresent:
1835
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1836
self._write_basis_inventory(xml)
1837
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1228
1840
def read_basis_inventory(self):
1229
1841
"""Read the cached basis inventory."""
1230
1842
path = self._basis_inventory_name()
1231
return self._control_files.get(path).read()
1843
return self._transport.get_bytes(path)
1233
1845
@needs_read_lock
1234
1846
def read_working_inventory(self):
1235
"""Read the working inventory."""
1847
"""Read the working inventory.
1849
:raises errors.InventoryModified: read_working_inventory will fail
1850
when the current in memory inventory has been modified.
1852
# conceptually this should be an implementation detail of the tree.
1853
# XXX: Deprecate this.
1236
1854
# ElementTree does its own conversion from UTF-8, so open in
1238
result = bzrlib.xml5.serializer_v5.read_inventory(
1239
self._control_files.get('inventory'))
1240
self._set_inventory(result)
1856
if self._inventory_is_modified:
1857
raise errors.InventoryModified(self)
1858
result = self._deserialize(self._transport.get('inventory'))
1859
self._set_inventory(result, dirty=False)
1244
def remove(self, files, verbose=False, to_file=None):
1245
"""Remove nominated files from the working inventory..
1247
This does not remove their text. This does not run on XXX on what? RBC
1249
TODO: Refuse to remove modified files unless --force is given?
1251
TODO: Do something useful with directories.
1253
TODO: Should this remove the text or not? Tough call; not
1254
removing may be useful and the user can just use use rm, and
1255
is the opposite of add. Removing it is consistent with most
1256
other tools. Maybe an option.
1862
@needs_tree_write_lock
1863
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1865
"""Remove nominated files from the working inventory.
1867
:files: File paths relative to the basedir.
1868
:keep_files: If true, the files will also be kept.
1869
:force: Delete files and directories, even if they are changed and
1870
even if the directories are not empty.
1258
## TODO: Normalize names
1259
## TODO: Remove nested loops; better scalability
1260
1872
if isinstance(files, basestring):
1261
1873
files = [files]
1263
inv = self.inventory
1265
# do this before any modifications
1878
unknown_nested_files=set()
1880
def recurse_directory_to_add_files(directory):
1881
# Recurse directory and add all files
1882
# so we can check if they have changed.
1883
for parent_info, file_infos in\
1884
self.walkdirs(directory):
1885
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1886
# Is it versioned or ignored?
1887
if self.path2id(relpath) or self.is_ignored(relpath):
1888
# Add nested content for deletion.
1889
new_files.add(relpath)
1891
# Files which are not versioned and not ignored
1892
# should be treated as unknown.
1893
unknown_nested_files.add((relpath, None, kind))
1895
for filename in files:
1896
# Get file name into canonical form.
1897
abspath = self.abspath(filename)
1898
filename = self.relpath(abspath)
1899
if len(filename) > 0:
1900
new_files.add(filename)
1901
recurse_directory_to_add_files(filename)
1903
files = list(new_files)
1906
return # nothing to do
1908
# Sort needed to first handle directory content before the directory
1909
files.sort(reverse=True)
1911
# Bail out if we are going to delete files we shouldn't
1912
if not keep_files and not force:
1913
has_changed_files = len(unknown_nested_files) > 0
1914
if not has_changed_files:
1915
for (file_id, path, content_change, versioned, parent_id, name,
1916
kind, executable) in self.iter_changes(self.basis_tree(),
1917
include_unchanged=True, require_versioned=False,
1918
want_unversioned=True, specific_files=files):
1919
if versioned == (False, False):
1920
# The record is unknown ...
1921
if not self.is_ignored(path[1]):
1922
# ... but not ignored
1923
has_changed_files = True
1925
elif content_change and (kind[1] is not None):
1926
# Versioned and changed, but not deleted
1927
has_changed_files = True
1930
if has_changed_files:
1931
# Make delta show ALL applicable changes in error message.
1932
tree_delta = self.changes_from(self.basis_tree(),
1933
require_versioned=False, want_unversioned=True,
1934
specific_files=files)
1935
for unknown_file in unknown_nested_files:
1936
if unknown_file not in tree_delta.unversioned:
1937
tree_delta.unversioned.extend((unknown_file,))
1938
raise errors.BzrRemoveChangedFilesError(tree_delta)
1940
# Build inv_delta and delete files where applicaple,
1941
# do this before any modifications to inventory.
1266
1942
for f in files:
1267
fid = inv.path2id(f)
1943
fid = self.path2id(f)
1269
# TODO: Perhaps make this just a warning, and continue?
1270
# This tends to happen when
1271
raise NotVersionedError(path=f)
1272
mutter("remove inventory entry %s {%s}", quotefn(f), fid)
1274
# having remove it, it must be either ignored or unknown
1275
if self.is_ignored(f):
1279
show_status(new_status, inv[fid].kind, quotefn(f), to_file=to_file)
1282
self._write_inventory(inv)
1285
def revert(self, filenames, old_tree=None, backups=True,
1286
pb=DummyProgress()):
1287
from transform import revert
1288
from conflicts import resolve
1946
message = "%s is not versioned." % (f,)
1949
# having removed it, it must be either ignored or unknown
1950
if self.is_ignored(f):
1954
textui.show_status(new_status, self.kind(fid), f,
1957
inv_delta.append((f, None, fid, None))
1958
message = "removed %s" % (f,)
1961
abs_path = self.abspath(f)
1962
if osutils.lexists(abs_path):
1963
if (osutils.isdir(abs_path) and
1964
len(os.listdir(abs_path)) > 0):
1966
osutils.rmtree(abs_path)
1968
message = "%s is not an empty directory "\
1969
"and won't be deleted." % (f,)
1971
osutils.delete_any(abs_path)
1972
message = "deleted %s" % (f,)
1973
elif message is not None:
1974
# Only care if we haven't done anything yet.
1975
message = "%s does not exist." % (f,)
1977
# Print only one message (if any) per file.
1978
if message is not None:
1980
self.apply_inventory_delta(inv_delta)
1982
@needs_tree_write_lock
1983
def revert(self, filenames=None, old_tree=None, backups=True,
1984
pb=DummyProgress(), report_changes=False):
1985
from bzrlib.conflicts import resolve
1988
symbol_versioning.warn('Using [] to revert all files is deprecated'
1989
' as of bzr 0.91. Please use None (the default) instead.',
1990
DeprecationWarning, stacklevel=2)
1289
1991
if old_tree is None:
1290
old_tree = self.basis_tree()
1291
conflicts = revert(self, old_tree, filenames, backups, pb)
1292
if not len(filenames):
1293
self.set_pending_merges([])
1992
basis_tree = self.basis_tree()
1993
basis_tree.lock_read()
1994
old_tree = basis_tree
1296
resolve(self, filenames, ignore_misses=True)
1998
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2000
if filenames is None and len(self.get_parent_ids()) > 1:
2002
last_revision = self.last_revision()
2003
if last_revision != NULL_REVISION:
2004
if basis_tree is None:
2005
basis_tree = self.basis_tree()
2006
basis_tree.lock_read()
2007
parent_trees.append((last_revision, basis_tree))
2008
self.set_parent_trees(parent_trees)
2011
resolve(self, filenames, ignore_misses=True, recursive=True)
2013
if basis_tree is not None:
1297
2015
return conflicts
2017
def revision_tree(self, revision_id):
2018
"""See Tree.revision_tree.
2020
WorkingTree can supply revision_trees for the basis revision only
2021
because there is only one cached inventory in the bzr directory.
2023
if revision_id == self.last_revision():
2025
xml = self.read_basis_inventory()
2026
except errors.NoSuchFile:
2030
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2031
# dont use the repository revision_tree api because we want
2032
# to supply the inventory.
2033
if inv.revision_id == revision_id:
2034
return revisiontree.RevisionTree(self.branch.repository,
2036
except errors.BadInventoryFormat:
2038
# raise if there was no inventory, or if we read the wrong inventory.
2039
raise errors.NoSuchRevisionInTree(self, revision_id)
1299
2041
# XXX: This method should be deprecated in favour of taking in a proper
1300
2042
# new Inventory object.
2043
@needs_tree_write_lock
1302
2044
def set_inventory(self, new_inventory_list):
1303
2045
from bzrlib.inventory import (Inventory,
1304
2046
InventoryDirectory,
1344
2108
between multiple working trees, i.e. via shared storage, then we
1345
2109
would probably want to lock both the local tree, and the branch.
1347
# FIXME: We want to write out the hashcache only when the last lock on
1348
# this working copy is released. Peeking at the lock count is a bit
1349
# of a nasty hack; probably it's better to have a transaction object,
1350
# which can do some finalization when it's either successfully or
1351
# unsuccessfully completed. (Denys's original patch did that.)
1352
# RBC 20060206 hooking into transaction will couple lock and transaction
1353
# wrongly. Hooking into unlock on the control files object is fine though.
1355
# TODO: split this per format so there is no ugly if block
1356
if self._hashcache.needs_write and (
1357
# dedicated lock files
1358
self._control_files._lock_count==1 or
1360
(self._control_files is self.branch.control_files and
1361
self._control_files._lock_count==3)):
1362
self._hashcache.write()
1363
# reverse order of locking.
1365
return self._control_files.unlock()
1367
self.branch.unlock()
2111
raise NotImplementedError(self.unlock)
2113
def update(self, change_reporter=None, possible_transports=None):
1371
2114
"""Update a working tree along its branch.
1373
This will update the branch if its bound too, which means we have multiple trees involved:
1374
The new basis tree of the master.
1375
The old basis tree of the branch.
1376
The old basis tree of the working tree.
1377
The current working tree state.
1378
pathologically all three may be different, and non ancestors of each other.
1379
Conceptually we want to:
1380
Preserve the wt.basis->wt.state changes
1381
Transform the wt.basis to the new master basis.
1382
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1383
Restore the wt.basis->wt.state changes.
2116
This will update the branch if its bound too, which means we have
2117
multiple trees involved:
2119
- The new basis tree of the master.
2120
- The old basis tree of the branch.
2121
- The old basis tree of the working tree.
2122
- The current working tree state.
2124
Pathologically, all three may be different, and non-ancestors of each
2125
other. Conceptually we want to:
2127
- Preserve the wt.basis->wt.state changes
2128
- Transform the wt.basis to the new master basis.
2129
- Apply a merge of the old branch basis to get any 'local' changes from
2131
- Restore the wt.basis->wt.state changes.
1385
2133
There isn't a single operation at the moment to do that, so we:
1386
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1387
Do a 'normal' merge of the old branch basis if it is relevant.
1389
old_tip = self.branch.update()
1390
if old_tip is not None:
1391
self.add_pending_merge(old_tip)
1392
self.branch.lock_read()
1395
if self.last_revision() != self.branch.last_revision():
1396
# merge tree state up to new branch tip.
1397
basis = self.basis_tree()
2134
- Merge current state -> basis tree of the master w.r.t. the old tree
2136
- Do a 'normal' merge of the old branch basis if it is relevant.
2138
if self.branch.get_bound_location() is not None:
2140
update_branch = True
2142
self.lock_tree_write()
2143
update_branch = False
2146
old_tip = self.branch.update(possible_transports)
2149
return self._update_tree(old_tip, change_reporter)
2153
@needs_tree_write_lock
2154
def _update_tree(self, old_tip=None, change_reporter=None):
2155
"""Update a tree to the master branch.
2157
:param old_tip: if supplied, the previous tip revision the branch,
2158
before it was changed to the master branch's tip.
2160
# here if old_tip is not None, it is the old tip of the branch before
2161
# it was updated from the master branch. This should become a pending
2162
# merge in the working tree to preserve the user existing work. we
2163
# cant set that until we update the working trees last revision to be
2164
# one from the new branch, because it will just get absorbed by the
2165
# parent de-duplication logic.
2167
# We MUST save it even if an error occurs, because otherwise the users
2168
# local work is unreferenced and will appear to have been lost.
2172
last_rev = self.get_parent_ids()[0]
2174
last_rev = _mod_revision.NULL_REVISION
2175
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2176
# merge tree state up to new branch tip.
2177
basis = self.basis_tree()
1398
2180
to_tree = self.branch.basis_tree()
1399
result += merge_inner(self.branch,
2181
if basis.inventory.root is None:
2182
self.set_root_id(to_tree.get_root_id())
2184
result += merge.merge_inner(
1403
self.set_last_revision(self.branch.last_revision())
1404
if old_tip and old_tip != self.last_revision():
1405
# our last revision was not the prior branch last revision
1406
# and we have converted that last revision to a pending merge.
1407
# base is somewhere between the branch tip now
1408
# and the now pending merge
1409
from bzrlib.revision import common_ancestor
1411
base_rev_id = common_ancestor(self.branch.last_revision(),
1413
self.branch.repository)
1414
except errors.NoCommonAncestor:
1416
base_tree = self.branch.repository.revision_tree(base_rev_id)
1417
other_tree = self.branch.repository.revision_tree(old_tip)
1418
result += merge_inner(self.branch,
1424
self.branch.unlock()
2189
change_reporter=change_reporter)
2192
# TODO - dedup parents list with things merged by pull ?
2193
# reuse the tree we've updated to to set the basis:
2194
parent_trees = [(self.branch.last_revision(), to_tree)]
2195
merges = self.get_parent_ids()[1:]
2196
# Ideally we ask the tree for the trees here, that way the working
2197
# tree can decide whether to give us teh entire tree or give us a
2198
# lazy initialised tree. dirstate for instance will have the trees
2199
# in ram already, whereas a last-revision + basis-inventory tree
2200
# will not, but also does not need them when setting parents.
2201
for parent in merges:
2202
parent_trees.append(
2203
(parent, self.branch.repository.revision_tree(parent)))
2204
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2205
parent_trees.append(
2206
(old_tip, self.branch.repository.revision_tree(old_tip)))
2207
self.set_parent_trees(parent_trees)
2208
last_rev = parent_trees[0][0]
2210
# the working tree had the same last-revision as the master
2211
# branch did. We may still have pivot local work from the local
2212
# branch into old_tip:
2213
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2214
self.add_parent_tree_id(old_tip)
2215
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2216
and old_tip != last_rev):
2217
# our last revision was not the prior branch last revision
2218
# and we have converted that last revision to a pending merge.
2219
# base is somewhere between the branch tip now
2220
# and the now pending merge
2222
# Since we just modified the working tree and inventory, flush out
2223
# the current state, before we modify it again.
2224
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2225
# requires it only because TreeTransform directly munges the
2226
# inventory and calls tree._write_inventory(). Ultimately we
2227
# should be able to remove this extra flush.
2229
graph = self.branch.repository.get_graph()
2230
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2232
base_tree = self.branch.repository.revision_tree(base_rev_id)
2233
other_tree = self.branch.repository.revision_tree(old_tip)
2234
result += merge.merge_inner(
2239
change_reporter=change_reporter)
2242
def _write_hashcache_if_dirty(self):
2243
"""Write out the hashcache if it is dirty."""
2244
if self._hashcache.needs_write:
2246
self._hashcache.write()
2248
if e.errno not in (errno.EPERM, errno.EACCES):
2250
# TODO: jam 20061219 Should this be a warning? A single line
2251
# warning might be sufficient to let the user know what
2253
mutter('Could not write hashcache for %s\nError: %s',
2254
self._hashcache.cache_file_name(), e)
2256
@needs_tree_write_lock
1427
2257
def _write_inventory(self, inv):
1428
2258
"""Write inventory as the current inventory."""
1430
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1432
self._control_files.put('inventory', sio)
1433
self._set_inventory(inv)
1434
mutter('wrote working inventory')
2259
self._set_inventory(inv, dirty=True)
1436
2262
def set_conflicts(self, arg):
1437
raise UnsupportedOperation(self.set_conflicts, self)
2263
raise errors.UnsupportedOperation(self.set_conflicts, self)
2265
def add_conflicts(self, arg):
2266
raise errors.UnsupportedOperation(self.add_conflicts, self)
1439
2268
@needs_read_lock
1440
2269
def conflicts(self):
1441
conflicts = ConflictList()
2270
conflicts = _mod_conflicts.ConflictList()
1442
2271
for conflicted in self._iter_conflicts():
1457
2286
if text == False:
1459
2288
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
1460
conflicts.append(Conflict.factory(ctype, path=conflicted,
2289
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
1461
2291
file_id=self.path2id(conflicted)))
1462
2292
return conflicts
2294
def walkdirs(self, prefix=""):
2295
"""Walk the directories of this tree.
2297
returns a generator which yields items in the form:
2298
((curren_directory_path, fileid),
2299
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2302
This API returns a generator, which is only valid during the current
2303
tree transaction - within a single lock_read or lock_write duration.
2305
If the tree is not locked, it may cause an error to be raised,
2306
depending on the tree implementation.
2308
disk_top = self.abspath(prefix)
2309
if disk_top.endswith('/'):
2310
disk_top = disk_top[:-1]
2311
top_strip_len = len(disk_top) + 1
2312
inventory_iterator = self._walkdirs(prefix)
2313
disk_iterator = osutils.walkdirs(disk_top, prefix)
2315
current_disk = disk_iterator.next()
2316
disk_finished = False
2318
if not (e.errno == errno.ENOENT or
2319
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2322
disk_finished = True
2324
current_inv = inventory_iterator.next()
2325
inv_finished = False
2326
except StopIteration:
2329
while not inv_finished or not disk_finished:
2331
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2332
cur_disk_dir_content) = current_disk
2334
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2335
cur_disk_dir_content) = ((None, None), None)
2336
if not disk_finished:
2337
# strip out .bzr dirs
2338
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2339
len(cur_disk_dir_content) > 0):
2340
# osutils.walkdirs can be made nicer -
2341
# yield the path-from-prefix rather than the pathjoined
2343
bzrdir_loc = bisect_left(cur_disk_dir_content,
2345
if (bzrdir_loc < len(cur_disk_dir_content)
2346
and cur_disk_dir_content[bzrdir_loc][0] == '.bzr'):
2347
# we dont yield the contents of, or, .bzr itself.
2348
del cur_disk_dir_content[bzrdir_loc]
2350
# everything is unknown
2353
# everything is missing
2356
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2358
# disk is before inventory - unknown
2359
dirblock = [(relpath, basename, kind, stat, None, None) for
2360
relpath, basename, kind, stat, top_path in
2361
cur_disk_dir_content]
2362
yield (cur_disk_dir_relpath, None), dirblock
2364
current_disk = disk_iterator.next()
2365
except StopIteration:
2366
disk_finished = True
2368
# inventory is before disk - missing.
2369
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2370
for relpath, basename, dkind, stat, fileid, kind in
2372
yield (current_inv[0][0], current_inv[0][1]), dirblock
2374
current_inv = inventory_iterator.next()
2375
except StopIteration:
2378
# versioned present directory
2379
# merge the inventory and disk data together
2381
for relpath, subiterator in itertools.groupby(sorted(
2382
current_inv[1] + cur_disk_dir_content,
2383
key=operator.itemgetter(0)), operator.itemgetter(1)):
2384
path_elements = list(subiterator)
2385
if len(path_elements) == 2:
2386
inv_row, disk_row = path_elements
2387
# versioned, present file
2388
dirblock.append((inv_row[0],
2389
inv_row[1], disk_row[2],
2390
disk_row[3], inv_row[4],
2392
elif len(path_elements[0]) == 5:
2394
dirblock.append((path_elements[0][0],
2395
path_elements[0][1], path_elements[0][2],
2396
path_elements[0][3], None, None))
2397
elif len(path_elements[0]) == 6:
2398
# versioned, absent file.
2399
dirblock.append((path_elements[0][0],
2400
path_elements[0][1], 'unknown', None,
2401
path_elements[0][4], path_elements[0][5]))
2403
raise NotImplementedError('unreachable code')
2404
yield current_inv[0], dirblock
2406
current_inv = inventory_iterator.next()
2407
except StopIteration:
2410
current_disk = disk_iterator.next()
2411
except StopIteration:
2412
disk_finished = True
2414
def _walkdirs(self, prefix=""):
2415
"""Walk the directories of this tree.
2417
:prefix: is used as the directrory to start with.
2418
returns a generator which yields items in the form:
2419
((curren_directory_path, fileid),
2420
[(file1_path, file1_name, file1_kind, None, file1_id,
2423
_directory = 'directory'
2424
# get the root in the inventory
2425
inv = self.inventory
2426
top_id = inv.path2id(prefix)
2430
pending = [(prefix, '', _directory, None, top_id, None)]
2433
currentdir = pending.pop()
2434
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2435
top_id = currentdir[4]
2437
relroot = currentdir[0] + '/'
2440
# FIXME: stash the node in pending
2442
if entry.kind == 'directory':
2443
for name, child in entry.sorted_children():
2444
dirblock.append((relroot + name, name, child.kind, None,
2445
child.file_id, child.kind
2447
yield (currentdir[0], entry.file_id), dirblock
2448
# push the user specified dirs from dirblock
2449
for dir in reversed(dirblock):
2450
if dir[2] == _directory:
2453
@needs_tree_write_lock
2454
def auto_resolve(self):
2455
"""Automatically resolve text conflicts according to contents.
2457
Only text conflicts are auto_resolvable. Files with no conflict markers
2458
are considered 'resolved', because bzr always puts conflict markers
2459
into files that have text conflicts. The corresponding .THIS .BASE and
2460
.OTHER files are deleted, as per 'resolve'.
2461
:return: a tuple of ConflictLists: (un_resolved, resolved).
2463
un_resolved = _mod_conflicts.ConflictList()
2464
resolved = _mod_conflicts.ConflictList()
2465
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2466
for conflict in self.conflicts():
2467
if (conflict.typestring != 'text conflict' or
2468
self.kind(conflict.file_id) != 'file'):
2469
un_resolved.append(conflict)
2471
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2473
for line in my_file:
2474
if conflict_re.search(line):
2475
un_resolved.append(conflict)
2478
resolved.append(conflict)
2481
resolved.remove_files(self)
2482
self.set_conflicts(un_resolved)
2483
return un_resolved, resolved
2487
tree_basis = self.basis_tree()
2488
tree_basis.lock_read()
2490
repo_basis = self.branch.repository.revision_tree(
2491
self.last_revision())
2492
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2493
raise errors.BzrCheckError(
2494
"Mismatched basis inventory content.")
2499
def _validate(self):
2500
"""Validate internal structures.
2502
This is meant mostly for the test suite. To give it a chance to detect
2503
corruption after actions have occurred. The default implementation is a
2506
:return: None. An exception should be raised if there is an error.
2511
def _get_rules_searcher(self, default_searcher):
2512
"""See Tree._get_rules_searcher."""
2513
if self._rules_searcher is None:
2514
self._rules_searcher = super(WorkingTree,
2515
self)._get_rules_searcher(default_searcher)
2516
return self._rules_searcher
2518
def get_shelf_manager(self):
2519
"""Return the ShelfManager for this WorkingTree."""
2520
from bzrlib.shelf import ShelfManager
2521
return ShelfManager(self, self._transport)
2524
class WorkingTree2(WorkingTree):
2525
"""This is the Format 2 working tree.
2527
This was the first weave based working tree.
2528
- uses os locks for locking.
2529
- uses the branch last-revision.
2532
def __init__(self, *args, **kwargs):
2533
super(WorkingTree2, self).__init__(*args, **kwargs)
2534
# WorkingTree2 has more of a constraint that self._inventory must
2535
# exist. Because this is an older format, we don't mind the overhead
2536
# caused by the extra computation here.
2538
# Newer WorkingTree's should only have self._inventory set when they
2540
if self._inventory is None:
2541
self.read_working_inventory()
2543
def lock_tree_write(self):
2544
"""See WorkingTree.lock_tree_write().
2546
In Format2 WorkingTrees we have a single lock for the branch and tree
2547
so lock_tree_write() degrades to lock_write().
2549
self.branch.lock_write()
2551
return self._control_files.lock_write()
2553
self.branch.unlock()
2557
# do non-implementation specific cleanup
2560
# we share control files:
2561
if self._control_files._lock_count == 3:
2562
# _inventory_is_modified is always False during a read lock.
2563
if self._inventory_is_modified:
2565
self._write_hashcache_if_dirty()
2567
# reverse order of locking.
2569
return self._control_files.unlock()
2571
self.branch.unlock()
1465
2574
class WorkingTree3(WorkingTree):
1466
2575
"""This is the Format 3 working tree.