563
531
__contains__ = has_id
565
533
def get_file_size(self, file_id):
566
"""See Tree.get_file_size"""
568
return os.path.getsize(self.id2abspath(file_id))
570
if e.errno != errno.ENOENT:
534
return os.path.getsize(self.id2abspath(file_id))
576
def get_file_sha1(self, file_id, path=None, stat_value=None):
537
def get_file_sha1(self, file_id, path=None):
578
539
path = self._inventory.id2path(file_id)
579
return self._hashcache.get_sha1(path, stat_value)
540
return self._hashcache.get_sha1(path)
581
542
def get_file_mtime(self, file_id, path=None):
583
path = self.inventory.id2path(file_id)
544
path = self._inventory.id2path(file_id)
584
545
return os.lstat(self.abspath(path)).st_mtime
586
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
587
file_id = self.path2id(path)
588
return self._inventory[file_id].executable
590
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
591
mode = stat_result.st_mode
592
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
594
547
if not supports_executable():
595
548
def is_executable(self, file_id, path=None):
596
549
return self._inventory[file_id].executable
598
_is_executable_from_path_and_stat = \
599
_is_executable_from_path_and_stat_from_basis
601
551
def is_executable(self, file_id, path=None):
603
path = self.id2path(file_id)
553
path = self._inventory.id2path(file_id)
604
554
mode = os.lstat(self.abspath(path)).st_mode
605
555
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
607
_is_executable_from_path_and_stat = \
608
_is_executable_from_path_and_stat_from_stat
610
@needs_tree_write_lock
611
def _add(self, files, ids, kinds):
612
"""See MutableTree._add."""
558
def add(self, files, ids=None):
559
"""Make files versioned.
561
Note that the command line normally calls smart_add instead,
562
which can automatically recurse.
564
This adds the files to the inventory, so that they will be
565
recorded by the next commit.
568
List of paths to add, relative to the base of the tree.
571
If set, use these instead of automatically generated ids.
572
Must be the same length as the list of files, but may
573
contain None for ids that are to be autogenerated.
575
TODO: Perhaps have an option to add the ids even if the files do
578
TODO: Perhaps callback with the ids and paths as they're added.
613
580
# TODO: Re-adding a file that is removed in the working copy
614
581
# should probably put it back with the previous ID.
615
# the read and write working inventory should not occur in this
616
# function - they should be part of lock_write and unlock.
618
for f, file_id, kind in zip(files, ids, kinds):
582
if isinstance(files, basestring):
583
assert(ids is None or isinstance(ids, basestring))
589
ids = [None] * len(files)
591
assert(len(ids) == len(files))
593
inv = self.read_working_inventory()
594
for f,file_id in zip(files, ids):
595
if self.is_control_filename(f):
596
raise BzrError("cannot add control file %s" % quotefn(f))
601
raise BzrError("cannot add top-level %r" % f)
603
fullpath = normpath(self.abspath(f))
606
kind = file_kind(fullpath)
608
if e.errno == errno.ENOENT:
609
raise NoSuchFile(fullpath)
610
# maybe something better?
611
raise BzrError('cannot add: not a regular file, symlink or directory: %s' % quotefn(f))
613
if not InventoryEntry.versionable_kind(kind):
614
raise BzrError('cannot add: not a versionable file ('
615
'i.e. regular file, symlink or directory): %s' % quotefn(f))
619
617
if file_id is None:
620
618
inv.add_path(f, kind=kind)
622
620
inv.add_path(f, kind=kind, file_id=file_id)
623
self._inventory_is_modified = True
625
@needs_tree_write_lock
626
def _gather_kinds(self, files, kinds):
627
"""See MutableTree._gather_kinds."""
628
for pos, f in enumerate(files):
629
if kinds[pos] is None:
630
fullpath = normpath(self.abspath(f))
632
kinds[pos] = file_kind(fullpath)
634
if e.errno == errno.ENOENT:
635
raise errors.NoSuchFile(fullpath)
622
self._write_inventory(inv)
637
624
@needs_write_lock
638
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
639
"""Add revision_id as a parent.
641
This is equivalent to retrieving the current list of parent ids
642
and setting the list to its value plus revision_id.
644
:param revision_id: The revision id to add to the parent list. It may
645
be a ghost revision as long as its not the first parent to be added,
646
or the allow_leftmost_as_ghost parameter is set True.
647
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
649
parents = self.get_parent_ids() + [revision_id]
650
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
651
or allow_leftmost_as_ghost)
653
@needs_tree_write_lock
654
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
655
"""Add revision_id, tree tuple as a parent.
657
This is equivalent to retrieving the current list of parent trees
658
and setting the list to its value plus parent_tuple. See also
659
add_parent_tree_id - if you only have a parent id available it will be
660
simpler to use that api. If you have the parent already available, using
661
this api is preferred.
663
:param parent_tuple: The (revision id, tree) to add to the parent list.
664
If the revision_id is a ghost, pass None for the tree.
665
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
667
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
668
if len(parent_ids) > 1:
669
# the leftmost may have already been a ghost, preserve that if it
671
allow_leftmost_as_ghost = True
672
self.set_parent_ids(parent_ids,
673
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
675
@needs_tree_write_lock
676
625
def add_pending_merge(self, *revision_ids):
677
626
# TODO: Perhaps should check at this point that the
678
627
# history of the revision is actually present?
679
parents = self.get_parent_ids()
628
p = self.pending_merges()
681
630
for rev_id in revision_ids:
682
if rev_id in parents:
684
parents.append(rev_id)
687
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
689
def path_content_summary(self, path, _lstat=os.lstat,
690
_mapper=osutils.file_kind_from_stat_mode):
691
"""See Tree.path_content_summary."""
692
abspath = self.abspath(path)
636
self.set_pending_merges(p)
639
def pending_merges(self):
640
"""Return a list of pending merges.
642
These are revisions that have been merged into the working
643
directory but not yet committed.
694
stat_result = _lstat(abspath)
646
merges_file = self._control_files.get_utf8('pending-merges')
695
647
except OSError, e:
696
if getattr(e, 'errno', None) == errno.ENOENT:
698
return ('missing', None, None, None)
699
# propagate other errors
701
kind = _mapper(stat_result.st_mode)
703
size = stat_result.st_size
704
# try for a stat cache lookup
705
executable = self._is_executable_from_path_and_stat(path, stat_result)
706
return (kind, size, executable, self._sha_from_stat(
708
elif kind == 'directory':
709
# perhaps it looks like a plain directory, but it's really a
711
if self._directory_is_tree_reference(path):
712
kind = 'tree-reference'
713
return kind, None, None, None
714
elif kind == 'symlink':
715
return ('symlink', None, None, os.readlink(abspath))
717
return (kind, None, None, None)
719
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
720
"""Common ghost checking functionality from set_parent_*.
722
This checks that the left hand-parent exists if there are any
725
if len(revision_ids) > 0:
726
leftmost_id = revision_ids[0]
727
if (not allow_leftmost_as_ghost and not
728
self.branch.repository.has_revision(leftmost_id)):
729
raise errors.GhostRevisionUnusableHere(leftmost_id)
731
def _set_merges_from_parent_ids(self, parent_ids):
732
merges = parent_ids[1:]
733
self._transport.put_bytes('pending-merges', '\n'.join(merges),
734
mode=self._control_files._file_mode)
736
def _filter_parent_ids_by_ancestry(self, revision_ids):
737
"""Check that all merged revisions are proper 'heads'.
739
This will always return the first revision_id, and any merged revisions
742
if len(revision_ids) == 0:
744
graph = self.branch.repository.get_graph()
745
heads = graph.heads(revision_ids)
746
new_revision_ids = revision_ids[:1]
747
for revision_id in revision_ids[1:]:
748
if revision_id in heads and revision_id not in new_revision_ids:
749
new_revision_ids.append(revision_id)
750
if new_revision_ids != revision_ids:
751
trace.mutter('requested to set revision_ids = %s,'
752
' but filtered to %s', revision_ids, new_revision_ids)
753
return new_revision_ids
755
@needs_tree_write_lock
756
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
757
"""Set the parent ids to revision_ids.
759
See also set_parent_trees. This api will try to retrieve the tree data
760
for each element of revision_ids from the trees repository. If you have
761
tree data already available, it is more efficient to use
762
set_parent_trees rather than set_parent_ids. set_parent_ids is however
763
an easier API to use.
765
:param revision_ids: The revision_ids to set as the parent ids of this
766
working tree. Any of these may be ghosts.
768
self._check_parents_for_ghosts(revision_ids,
769
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
770
for revision_id in revision_ids:
771
_mod_revision.check_not_reserved_id(revision_id)
773
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
775
if len(revision_ids) > 0:
776
self.set_last_revision(revision_ids[0])
778
self.set_last_revision(_mod_revision.NULL_REVISION)
780
self._set_merges_from_parent_ids(revision_ids)
782
@needs_tree_write_lock
783
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
784
"""See MutableTree.set_parent_trees."""
785
parent_ids = [rev for (rev, tree) in parents_list]
786
for revision_id in parent_ids:
787
_mod_revision.check_not_reserved_id(revision_id)
789
self._check_parents_for_ghosts(parent_ids,
790
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
792
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
794
if len(parent_ids) == 0:
795
leftmost_parent_id = _mod_revision.NULL_REVISION
796
leftmost_parent_tree = None
798
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
800
if self._change_last_revision(leftmost_parent_id):
801
if leftmost_parent_tree is None:
802
# If we don't have a tree, fall back to reading the
803
# parent tree from the repository.
804
self._cache_basis_inventory(leftmost_parent_id)
806
inv = leftmost_parent_tree.inventory
807
xml = self._create_basis_xml_from_inventory(
808
leftmost_parent_id, inv)
809
self._write_basis_inventory(xml)
810
self._set_merges_from_parent_ids(parent_ids)
812
@needs_tree_write_lock
648
if e.errno != errno.ENOENT:
652
for l in merges_file.readlines():
653
p.append(l.rstrip('\n'))
813
657
def set_pending_merges(self, rev_list):
814
parents = self.get_parent_ids()
815
leftmost = parents[:1]
816
new_parents = leftmost + rev_list
817
self.set_parent_ids(new_parents)
658
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
819
@needs_tree_write_lock
820
661
def set_merge_modified(self, modified_hashes):
821
662
def iter_stanzas():
822
663
for file_id, hash in modified_hashes.iteritems():
823
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
664
yield Stanza(file_id=file_id, hash=hash)
824
665
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
826
def _sha_from_stat(self, path, stat_result):
827
"""Get a sha digest from the tree's stat cache.
829
The default implementation assumes no stat cache is present.
831
:param path: The path.
832
:param stat_result: The stat result being looked up.
836
668
def _put_rio(self, filename, stanzas, header):
837
self._must_be_locked()
838
669
my_file = rio_file(stanzas, header)
839
self._transport.put_file(filename, my_file,
840
mode=self._control_files._file_mode)
842
@needs_write_lock # because merge pulls data into the branch.
843
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
845
"""Merge from a branch into this working tree.
847
:param branch: The branch to merge from.
848
:param to_revision: If non-None, the merge will merge to to_revision,
849
but not beyond it. to_revision does not need to be in the history
850
of the branch when it is supplied. If None, to_revision defaults to
851
branch.last_revision().
853
from bzrlib.merge import Merger, Merge3Merger
854
pb = bzrlib.ui.ui_factory.nested_progress_bar()
856
merger = Merger(self.branch, this_tree=self, pb=pb)
857
merger.pp = ProgressPhase("Merge phase", 5, pb)
858
merger.pp.next_phase()
859
# check that there are no
861
merger.check_basis(check_clean=True, require_commits=False)
862
if to_revision is None:
863
to_revision = _mod_revision.ensure_null(branch.last_revision())
864
merger.other_rev_id = to_revision
865
if _mod_revision.is_null(merger.other_rev_id):
866
raise errors.NoCommits(branch)
867
self.branch.fetch(branch, last_revision=merger.other_rev_id)
868
merger.other_basis = merger.other_rev_id
869
merger.other_tree = self.branch.repository.revision_tree(
871
merger.other_branch = branch
872
merger.pp.next_phase()
873
if from_revision is None:
876
merger.set_base_revision(from_revision, branch)
877
if merger.base_rev_id == merger.other_rev_id:
878
raise errors.PointlessMerge
879
merger.backup_files = False
880
if merge_type is None:
881
merger.merge_type = Merge3Merger
883
merger.merge_type = merge_type
884
merger.set_interesting_files(None)
885
merger.show_base = False
886
merger.reprocess = False
887
conflicts = merger.do_merge()
670
self._control_files.put(filename, my_file)
894
673
def merge_modified(self):
895
"""Return a dictionary of files modified by a merge.
897
The list is initialized by WorkingTree.set_merge_modified, which is
898
typically called after we make some automatic updates to the tree
901
This returns a map of file_id->sha1, containing only files which are
902
still in the working inventory and have that text hash.
905
hashfile = self._transport.get('merge-hashes')
906
except errors.NoSuchFile:
675
hashfile = self._control_files.get('merge-hashes')
911
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
912
raise errors.MergeModifiedFormatError()
913
except StopIteration:
914
raise errors.MergeModifiedFormatError()
915
for s in RioReader(hashfile):
916
# RioReader reads in Unicode, so convert file_ids back to utf8
917
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
918
if file_id not in self.inventory:
920
text_hash = s.get("hash")
921
if text_hash == self.get_file_sha1(file_id):
922
merge_hashes[file_id] = text_hash
928
def mkdir(self, path, file_id=None):
929
"""See MutableTree.mkdir()."""
931
file_id = generate_ids.gen_file_id(os.path.basename(path))
932
os.mkdir(self.abspath(path))
933
self.add(path, file_id, 'directory')
680
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
681
raise MergeModifiedFormatError()
682
except StopIteration:
683
raise MergeModifiedFormatError()
684
for s in RioReader(hashfile):
685
file_id = s.get("file_id")
686
if file_id not in self.inventory:
689
if hash == self.get_file_sha1(file_id):
690
merge_hashes[file_id] = hash
936
693
def get_symlink_target(self, file_id):
937
694
return os.readlink(self.id2abspath(file_id))
940
def subsume(self, other_tree):
941
def add_children(inventory, entry):
942
for child_entry in entry.children.values():
943
inventory._byid[child_entry.file_id] = child_entry
944
if child_entry.kind == 'directory':
945
add_children(inventory, child_entry)
946
if other_tree.get_root_id() == self.get_root_id():
947
raise errors.BadSubsumeSource(self, other_tree,
948
'Trees have the same root')
950
other_tree_path = self.relpath(other_tree.basedir)
951
except errors.PathNotChild:
952
raise errors.BadSubsumeSource(self, other_tree,
953
'Tree is not contained by the other')
954
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
955
if new_root_parent is None:
956
raise errors.BadSubsumeSource(self, other_tree,
957
'Parent directory is not versioned.')
958
# We need to ensure that the result of a fetch will have a
959
# versionedfile for the other_tree root, and only fetching into
960
# RepositoryKnit2 guarantees that.
961
if not self.branch.repository.supports_rich_root():
962
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
963
other_tree.lock_tree_write()
965
new_parents = other_tree.get_parent_ids()
966
other_root = other_tree.inventory.root
967
other_root.parent_id = new_root_parent
968
other_root.name = osutils.basename(other_tree_path)
969
self.inventory.add(other_root)
970
add_children(self.inventory, other_root)
971
self._write_inventory(self.inventory)
972
# normally we don't want to fetch whole repositories, but i think
973
# here we really do want to consolidate the whole thing.
974
for parent_id in other_tree.get_parent_ids():
975
self.branch.fetch(other_tree.branch, parent_id)
976
self.add_parent_tree_id(parent_id)
979
other_tree.bzrdir.retire_bzrdir()
981
def _setup_directory_is_tree_reference(self):
982
if self._branch.repository._format.supports_tree_reference:
983
self._directory_is_tree_reference = \
984
self._directory_may_be_tree_reference
986
self._directory_is_tree_reference = \
987
self._directory_is_never_tree_reference
989
def _directory_is_never_tree_reference(self, relpath):
992
def _directory_may_be_tree_reference(self, relpath):
993
# as a special case, if a directory contains control files then
994
# it's a tree reference, except that the root of the tree is not
995
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
996
# TODO: We could ask all the control formats whether they
997
# recognize this directory, but at the moment there's no cheap api
998
# to do that. Since we probably can only nest bzr checkouts and
999
# they always use this name it's ok for now. -- mbp 20060306
1001
# FIXME: There is an unhandled case here of a subdirectory
1002
# containing .bzr but not a branch; that will probably blow up
1003
# when you try to commit it. It might happen if there is a
1004
# checkout in a subdirectory. This can be avoided by not adding
1007
@needs_tree_write_lock
1008
def extract(self, file_id, format=None):
1009
"""Extract a subtree from this tree.
1011
A new branch will be created, relative to the path for this tree.
1015
segments = osutils.splitpath(path)
1016
transport = self.branch.bzrdir.root_transport
1017
for name in segments:
1018
transport = transport.clone(name)
1019
transport.ensure_base()
1022
sub_path = self.id2path(file_id)
1023
branch_transport = mkdirs(sub_path)
1025
format = self.bzrdir.cloning_metadir()
1026
branch_transport.ensure_base()
1027
branch_bzrdir = format.initialize_on_transport(branch_transport)
1029
repo = branch_bzrdir.find_repository()
1030
except errors.NoRepositoryPresent:
1031
repo = branch_bzrdir.create_repository()
1032
if not repo.supports_rich_root():
1033
raise errors.RootNotRich()
1034
new_branch = branch_bzrdir.create_branch()
1035
new_branch.pull(self.branch)
1036
for parent_id in self.get_parent_ids():
1037
new_branch.fetch(self.branch, parent_id)
1038
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1039
if tree_transport.base != branch_transport.base:
1040
tree_bzrdir = format.initialize_on_transport(tree_transport)
1041
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1043
tree_bzrdir = branch_bzrdir
1044
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
1045
wt.set_parent_ids(self.get_parent_ids())
1046
my_inv = self.inventory
1047
child_inv = Inventory(root_id=None)
1048
new_root = my_inv[file_id]
1049
my_inv.remove_recursive_id(file_id)
1050
new_root.parent_id = None
1051
child_inv.add(new_root)
1052
self._write_inventory(my_inv)
1053
wt._write_inventory(child_inv)
1056
def _serialize(self, inventory, out_file):
1057
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1060
def _deserialize(selt, in_file):
1061
return xml5.serializer_v5.read_inventory(in_file)
1064
"""Write the in memory inventory to disk."""
1065
# TODO: Maybe this should only write on dirty ?
1066
if self._control_files._lock_mode != 'w':
1067
raise errors.NotWriteLocked(self)
1069
self._serialize(self._inventory, sio)
1071
self._transport.put_file('inventory', sio,
1072
mode=self._control_files._file_mode)
1073
self._inventory_is_modified = False
1075
def _kind(self, relpath):
1076
return osutils.file_kind(self.abspath(relpath))
1078
def list_files(self, include_root=False):
696
def file_class(self, filename):
697
if self.path2id(filename):
699
elif self.is_ignored(filename):
704
def list_files(self):
1079
705
"""Recursively list all files as (path, class, kind, id, entry).
1081
707
Lists, but does not descend into unversioned directories.
1179
786
new_children.sort()
1180
787
new_children = collections.deque(new_children)
1181
788
stack.append((f_ie.file_id, fp, fap, new_children))
1182
# Break out of inner loop,
1183
# so that we start outer loop with child
789
# Break out of inner loop, so that we start outer loop with child
1186
792
# if we finished all children, pop it off the stack
1189
@needs_tree_write_lock
1190
def move(self, from_paths, to_dir=None, after=False, **kwargs):
797
def move(self, from_paths, to_name):
1191
798
"""Rename files.
1193
to_dir must exist in the inventory.
800
to_name must exist in the inventory.
1195
If to_dir exists and is a directory, the files are moved into
802
If to_name exists and is a directory, the files are moved into
1196
803
it, keeping their old names.
1198
Note that to_dir is only the last component of the new name;
805
Note that to_name is only the last component of the new name;
1199
806
this doesn't change the directory.
1201
For each entry in from_paths the move mode will be determined
1204
The first mode moves the file in the filesystem and updates the
1205
inventory. The second mode only updates the inventory without
1206
touching the file on the filesystem. This is the new mode introduced
1209
move uses the second mode if 'after == True' and the target is not
1210
versioned but present in the working tree.
1212
move uses the second mode if 'after == False' and the source is
1213
versioned but no longer in the working tree, and the target is not
1214
versioned but present in the working tree.
1216
move uses the first mode if 'after == False' and the source is
1217
versioned and present in the working tree, and the target is not
1218
versioned and not present in the working tree.
1220
Everything else results in an error.
1222
808
This returns a list of (from_path, to_path) pairs for each
1223
809
entry that is moved.
1228
# check for deprecated use of signature
1230
to_dir = kwargs.get('to_name', None)
1232
raise TypeError('You must supply a target directory')
1234
symbol_versioning.warn('The parameter to_name was deprecated'
1235
' in version 0.13. Use to_dir instead',
1238
# check destination directory
1239
if isinstance(from_paths, basestring):
812
## TODO: Option to move IDs only
813
assert not isinstance(from_paths, basestring)
1241
814
inv = self.inventory
1242
to_abs = self.abspath(to_dir)
815
to_abs = self.abspath(to_name)
1243
816
if not isdir(to_abs):
1244
raise errors.BzrMoveFailedError('',to_dir,
1245
errors.NotADirectory(to_abs))
1246
if not self.has_filename(to_dir):
1247
raise errors.BzrMoveFailedError('',to_dir,
1248
errors.NotInWorkingDirectory(to_dir))
1249
to_dir_id = inv.path2id(to_dir)
1250
if to_dir_id is None:
1251
raise errors.BzrMoveFailedError('',to_dir,
1252
errors.NotVersionedError(path=str(to_dir)))
817
raise BzrError("destination %r is not a directory" % to_abs)
818
if not self.has_filename(to_name):
819
raise BzrError("destination %r not in working directory" % to_abs)
820
to_dir_id = inv.path2id(to_name)
821
if to_dir_id == None and to_name != '':
822
raise BzrError("destination %r is not a versioned directory" % to_name)
1254
823
to_dir_ie = inv[to_dir_id]
1255
if to_dir_ie.kind != 'directory':
1256
raise errors.BzrMoveFailedError('',to_dir,
1257
errors.NotADirectory(to_abs))
1259
# create rename entries and tuples
1260
for from_rel in from_paths:
1261
from_tail = splitpath(from_rel)[-1]
1262
from_id = inv.path2id(from_rel)
1264
raise errors.BzrMoveFailedError(from_rel,to_dir,
1265
errors.NotVersionedError(path=str(from_rel)))
1267
from_entry = inv[from_id]
1268
from_parent_id = from_entry.parent_id
1269
to_rel = pathjoin(to_dir, from_tail)
1270
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1272
from_tail=from_tail,
1273
from_parent_id=from_parent_id,
1274
to_rel=to_rel, to_tail=from_tail,
1275
to_parent_id=to_dir_id)
1276
rename_entries.append(rename_entry)
1277
rename_tuples.append((from_rel, to_rel))
1279
# determine which move mode to use. checks also for movability
1280
rename_entries = self._determine_mv_mode(rename_entries, after)
1282
original_modified = self._inventory_is_modified
824
if to_dir_ie.kind not in ('directory', 'root_directory'):
825
raise BzrError("destination %r is not a directory" % to_abs)
827
to_idpath = inv.get_idpath(to_dir_id)
830
if not self.has_filename(f):
831
raise BzrError("%r does not exist in working tree" % f)
832
f_id = inv.path2id(f)
834
raise BzrError("%r is not versioned" % f)
835
name_tail = splitpath(f)[-1]
836
dest_path = pathjoin(to_name, name_tail)
837
if self.has_filename(dest_path):
838
raise BzrError("destination %r already exists" % dest_path)
839
if f_id in to_idpath:
840
raise BzrError("can't move %r to a subdirectory of itself" % f)
842
# OK, so there's a race here, it's possible that someone will
843
# create a file in this interval and then the rename might be
844
# left half-done. But we should have caught most problems.
845
orig_inv = deepcopy(self.inventory)
1285
self._inventory_is_modified = True
1286
self._move(rename_entries)
848
name_tail = splitpath(f)[-1]
849
dest_path = pathjoin(to_name, name_tail)
850
result.append((f, dest_path))
851
inv.rename(inv.path2id(f), to_dir_id, name_tail)
853
rename(self.abspath(f), self.abspath(dest_path))
855
raise BzrError("failed to rename %r to %r: %s" %
856
(f, dest_path, e[1]),
857
["rename rolled back"])
1288
859
# restore the inventory on error
1289
self._inventory_is_modified = original_modified
860
self._set_inventory(orig_inv)
1291
862
self._write_inventory(inv)
1292
return rename_tuples
1294
def _determine_mv_mode(self, rename_entries, after=False):
1295
"""Determines for each from-to pair if both inventory and working tree
1296
or only the inventory has to be changed.
1298
Also does basic plausability tests.
1300
inv = self.inventory
1302
for rename_entry in rename_entries:
1303
# store to local variables for easier reference
1304
from_rel = rename_entry.from_rel
1305
from_id = rename_entry.from_id
1306
to_rel = rename_entry.to_rel
1307
to_id = inv.path2id(to_rel)
1308
only_change_inv = False
1310
# check the inventory for source and destination
1312
raise errors.BzrMoveFailedError(from_rel,to_rel,
1313
errors.NotVersionedError(path=str(from_rel)))
1314
if to_id is not None:
1315
raise errors.BzrMoveFailedError(from_rel,to_rel,
1316
errors.AlreadyVersionedError(path=str(to_rel)))
1318
# try to determine the mode for rename (only change inv or change
1319
# inv and file system)
1321
if not self.has_filename(to_rel):
1322
raise errors.BzrMoveFailedError(from_id,to_rel,
1323
errors.NoSuchFile(path=str(to_rel),
1324
extra="New file has not been created yet"))
1325
only_change_inv = True
1326
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1327
only_change_inv = True
1328
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1329
only_change_inv = False
1330
elif (not self.case_sensitive
1331
and from_rel.lower() == to_rel.lower()
1332
and self.has_filename(from_rel)):
1333
only_change_inv = False
1335
# something is wrong, so lets determine what exactly
1336
if not self.has_filename(from_rel) and \
1337
not self.has_filename(to_rel):
1338
raise errors.BzrRenameFailedError(from_rel,to_rel,
1339
errors.PathsDoNotExist(paths=(str(from_rel),
1342
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1343
rename_entry.only_change_inv = only_change_inv
1344
return rename_entries
1346
def _move(self, rename_entries):
1347
"""Moves a list of files.
1349
Depending on the value of the flag 'only_change_inv', the
1350
file will be moved on the file system or not.
1352
inv = self.inventory
1355
for entry in rename_entries:
1357
self._move_entry(entry)
1359
self._rollback_move(moved)
1363
def _rollback_move(self, moved):
1364
"""Try to rollback a previous move in case of an filesystem error."""
1365
inv = self.inventory
1368
self._move_entry(_RenameEntry(entry.to_rel, entry.from_id,
1369
entry.to_tail, entry.to_parent_id, entry.from_rel,
1370
entry.from_tail, entry.from_parent_id,
1371
entry.only_change_inv))
1372
except errors.BzrMoveFailedError, e:
1373
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1374
" The working tree is in an inconsistent state."
1375
" Please consider doing a 'bzr revert'."
1376
" Error message is: %s" % e)
1378
def _move_entry(self, entry):
1379
inv = self.inventory
1380
from_rel_abs = self.abspath(entry.from_rel)
1381
to_rel_abs = self.abspath(entry.to_rel)
1382
if from_rel_abs == to_rel_abs:
1383
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1384
"Source and target are identical.")
1386
if not entry.only_change_inv:
1388
osutils.rename(from_rel_abs, to_rel_abs)
1390
raise errors.BzrMoveFailedError(entry.from_rel,
1392
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1394
@needs_tree_write_lock
1395
def rename_one(self, from_rel, to_rel, after=False):
866
def rename_one(self, from_rel, to_rel):
1396
867
"""Rename one file.
1398
869
This can change the directory or the filename or both.
1400
rename_one has several 'modes' to work. First, it can rename a physical
1401
file and change the file_id. That is the normal mode. Second, it can
1402
only change the file_id without touching any physical file. This is
1403
the new mode introduced in version 0.15.
1405
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1406
versioned but present in the working tree.
1408
rename_one uses the second mode if 'after == False' and 'from_rel' is
1409
versioned but no longer in the working tree, and 'to_rel' is not
1410
versioned but present in the working tree.
1412
rename_one uses the first mode if 'after == False' and 'from_rel' is
1413
versioned and present in the working tree, and 'to_rel' is not
1414
versioned and not present in the working tree.
1416
Everything else results in an error.
1418
871
inv = self.inventory
1421
# create rename entries and tuples
1422
from_tail = splitpath(from_rel)[-1]
1423
from_id = inv.path2id(from_rel)
1425
raise errors.BzrRenameFailedError(from_rel,to_rel,
1426
errors.NotVersionedError(path=str(from_rel)))
1427
from_entry = inv[from_id]
1428
from_parent_id = from_entry.parent_id
872
if not self.has_filename(from_rel):
873
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
874
if self.has_filename(to_rel):
875
raise BzrError("can't rename: new working file %r already exists" % to_rel)
877
file_id = inv.path2id(from_rel)
879
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
882
from_parent = entry.parent_id
883
from_name = entry.name
885
if inv.path2id(to_rel):
886
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1429
888
to_dir, to_tail = os.path.split(to_rel)
1430
889
to_dir_id = inv.path2id(to_dir)
1431
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1433
from_tail=from_tail,
1434
from_parent_id=from_parent_id,
1435
to_rel=to_rel, to_tail=to_tail,
1436
to_parent_id=to_dir_id)
1437
rename_entries.append(rename_entry)
1439
# determine which move mode to use. checks also for movability
1440
rename_entries = self._determine_mv_mode(rename_entries, after)
1442
# check if the target changed directory and if the target directory is
1444
if to_dir_id is None:
1445
raise errors.BzrMoveFailedError(from_rel,to_rel,
1446
errors.NotVersionedError(path=str(to_dir)))
1448
# all checks done. now we can continue with our actual work
1449
mutter('rename_one:\n'
1454
' to_dir_id {%s}\n',
1455
from_id, from_rel, to_rel, to_dir, to_dir_id)
1457
self._move(rename_entries)
890
if to_dir_id == None and to_dir != '':
891
raise BzrError("can't determine destination directory id for %r" % to_dir)
893
mutter("rename_one:")
894
mutter(" file_id {%s}" % file_id)
895
mutter(" from_rel %r" % from_rel)
896
mutter(" to_rel %r" % to_rel)
897
mutter(" to_dir %r" % to_dir)
898
mutter(" to_dir_id {%s}" % to_dir_id)
900
inv.rename(file_id, to_dir_id, to_tail)
902
from_abs = self.abspath(from_rel)
903
to_abs = self.abspath(to_rel)
905
rename(from_abs, to_abs)
907
inv.rename(file_id, from_parent, from_name)
908
raise BzrError("failed to rename %r to %r: %s"
909
% (from_abs, to_abs, e[1]),
910
["rename rolled back"])
1458
911
self._write_inventory(inv)
1460
class _RenameEntry(object):
1461
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1462
to_rel, to_tail, to_parent_id, only_change_inv=False):
1463
self.from_rel = from_rel
1464
self.from_id = from_id
1465
self.from_tail = from_tail
1466
self.from_parent_id = from_parent_id
1467
self.to_rel = to_rel
1468
self.to_tail = to_tail
1469
self.to_parent_id = to_parent_id
1470
self.only_change_inv = only_change_inv
1472
913
@needs_read_lock
1473
914
def unknowns(self):
1474
915
"""Return all unknown files.
1476
917
These are files in the working directory that are not versioned or
1477
918
control files or ignored.
1479
# force the extras method to be fully executed before returning, to
1480
# prevent race conditions with the lock
1482
[subp for subp in self.extras() if not self.is_ignored(subp)])
1484
@needs_tree_write_lock
1485
def unversion(self, file_ids):
1486
"""Remove the file ids in file_ids from the current versioned set.
1488
When a file_id is unversioned, all of its children are automatically
1491
:param file_ids: The file ids to stop versioning.
1492
:raises: NoSuchId if any fileid is not currently versioned.
1494
for file_id in file_ids:
1495
if self._inventory.has_id(file_id):
1496
self._inventory.remove_recursive_id(file_id)
1498
raise errors.NoSuchId(self, file_id)
1500
# in the future this should just set a dirty bit to wait for the
1501
# final unlock. However, until all methods of workingtree start
1502
# with the current in -memory inventory rather than triggering
1503
# a read, it is more complex - we need to teach read_inventory
1504
# to know when to read, and when to not read first... and possibly
1505
# to save first when the in memory one may be corrupted.
1506
# so for now, we just only write it if it is indeed dirty.
1508
self._write_inventory(self._inventory)
920
>>> from bzrlib.bzrdir import ScratchDir
921
>>> d = ScratchDir(files=['foo', 'foo~'])
922
>>> b = d.open_branch()
923
>>> tree = d.open_workingtree()
924
>>> map(str, tree.unknowns())
927
>>> list(b.unknowns())
929
>>> tree.remove('foo')
930
>>> list(b.unknowns())
933
for subp in self.extras():
934
if not self.is_ignored(subp):
937
@deprecated_method(zero_eight)
938
def iter_conflicts(self):
939
"""List all files in the tree that have text or content conflicts.
940
DEPRECATED. Use conflicts instead."""
941
return self._iter_conflicts()
1510
943
def _iter_conflicts(self):
1511
944
conflicted = set()
1512
945
for info in self.list_files():
1816
1213
# root node id can legitimately look like 'revision_id' but cannot
1817
1214
# contain a '"'.
1818
1215
xml = self.branch.repository.get_inventory_xml(new_revision)
1819
firstline = xml.split('\n', 1)[0]
1820
if (not 'revision_id="' in firstline or
1821
'format="7"' not in firstline):
1216
if not 'revision_id="' in xml.split('\n', 1)[0]:
1822
1217
inv = self.branch.repository.deserialise_inventory(
1823
1218
new_revision, xml)
1824
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1825
self._write_basis_inventory(xml)
1826
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1219
inv.revision_id = new_revision
1220
xml = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
1221
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1222
path = self._basis_inventory_name()
1224
self._control_files.put(path, sio)
1225
except WeaveRevisionNotPresent:
1829
1228
def read_basis_inventory(self):
1830
1229
"""Read the cached basis inventory."""
1831
1230
path = self._basis_inventory_name()
1832
return self._transport.get_bytes(path)
1231
return self._control_files.get(path).read()
1834
1233
@needs_read_lock
1835
1234
def read_working_inventory(self):
1836
"""Read the working inventory.
1838
:raises errors.InventoryModified: read_working_inventory will fail
1839
when the current in memory inventory has been modified.
1841
# conceptually this should be an implementation detail of the tree.
1842
# XXX: Deprecate this.
1235
"""Read the working inventory."""
1843
1236
# ElementTree does its own conversion from UTF-8, so open in
1845
if self._inventory_is_modified:
1846
raise errors.InventoryModified(self)
1847
result = self._deserialize(self._transport.get('inventory'))
1848
self._set_inventory(result, dirty=False)
1238
result = bzrlib.xml5.serializer_v5.read_inventory(
1239
self._control_files.get('inventory'))
1240
self._set_inventory(result)
1851
@needs_tree_write_lock
1852
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1854
"""Remove nominated files from the working inventory.
1856
:files: File paths relative to the basedir.
1857
:keep_files: If true, the files will also be kept.
1858
:force: Delete files and directories, even if they are changed and
1859
even if the directories are not empty.
1244
def remove(self, files, verbose=False, to_file=None):
1245
"""Remove nominated files from the working inventory..
1247
This does not remove their text. This does not run on XXX on what? RBC
1249
TODO: Refuse to remove modified files unless --force is given?
1251
TODO: Do something useful with directories.
1253
TODO: Should this remove the text or not? Tough call; not
1254
removing may be useful and the user can just use use rm, and
1255
is the opposite of add. Removing it is consistent with most
1256
other tools. Maybe an option.
1258
## TODO: Normalize names
1259
## TODO: Remove nested loops; better scalability
1861
1260
if isinstance(files, basestring):
1862
1261
files = [files]
1867
unknown_nested_files=set()
1869
def recurse_directory_to_add_files(directory):
1870
# Recurse directory and add all files
1871
# so we can check if they have changed.
1872
for parent_info, file_infos in\
1873
self.walkdirs(directory):
1874
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1875
# Is it versioned or ignored?
1876
if self.path2id(relpath) or self.is_ignored(relpath):
1877
# Add nested content for deletion.
1878
new_files.add(relpath)
1880
# Files which are not versioned and not ignored
1881
# should be treated as unknown.
1882
unknown_nested_files.add((relpath, None, kind))
1884
for filename in files:
1885
# Get file name into canonical form.
1886
abspath = self.abspath(filename)
1887
filename = self.relpath(abspath)
1888
if len(filename) > 0:
1889
new_files.add(filename)
1890
recurse_directory_to_add_files(filename)
1892
files = list(new_files)
1895
return # nothing to do
1897
# Sort needed to first handle directory content before the directory
1898
files.sort(reverse=True)
1900
# Bail out if we are going to delete files we shouldn't
1901
if not keep_files and not force:
1902
has_changed_files = len(unknown_nested_files) > 0
1903
if not has_changed_files:
1904
for (file_id, path, content_change, versioned, parent_id, name,
1905
kind, executable) in self.iter_changes(self.basis_tree(),
1906
include_unchanged=True, require_versioned=False,
1907
want_unversioned=True, specific_files=files):
1908
if versioned == (False, False):
1909
# The record is unknown ...
1910
if not self.is_ignored(path[1]):
1911
# ... but not ignored
1912
has_changed_files = True
1914
elif content_change and (kind[1] is not None):
1915
# Versioned and changed, but not deleted
1916
has_changed_files = True
1919
if has_changed_files:
1920
# Make delta show ALL applicable changes in error message.
1921
tree_delta = self.changes_from(self.basis_tree(),
1922
require_versioned=False, want_unversioned=True,
1923
specific_files=files)
1924
for unknown_file in unknown_nested_files:
1925
if unknown_file not in tree_delta.unversioned:
1926
tree_delta.unversioned.extend((unknown_file,))
1927
raise errors.BzrRemoveChangedFilesError(tree_delta)
1929
# Build inv_delta and delete files where applicaple,
1930
# do this before any modifications to inventory.
1263
inv = self.inventory
1265
# do this before any modifications
1931
1266
for f in files:
1932
fid = self.path2id(f)
1267
fid = inv.path2id(f)
1935
message = "%s is not versioned." % (f,)
1938
# having removed it, it must be either ignored or unknown
1939
if self.is_ignored(f):
1943
textui.show_status(new_status, self.kind(fid), f,
1946
inv_delta.append((f, None, fid, None))
1947
message = "removed %s" % (f,)
1950
abs_path = self.abspath(f)
1951
if osutils.lexists(abs_path):
1952
if (osutils.isdir(abs_path) and
1953
len(os.listdir(abs_path)) > 0):
1955
osutils.rmtree(abs_path)
1957
message = "%s is not an empty directory "\
1958
"and won't be deleted." % (f,)
1960
osutils.delete_any(abs_path)
1961
message = "deleted %s" % (f,)
1962
elif message is not None:
1963
# Only care if we haven't done anything yet.
1964
message = "%s does not exist." % (f,)
1966
# Print only one message (if any) per file.
1967
if message is not None:
1969
self.apply_inventory_delta(inv_delta)
1971
@needs_tree_write_lock
1972
def revert(self, filenames=None, old_tree=None, backups=True,
1973
pb=DummyProgress(), report_changes=False):
1974
from bzrlib.conflicts import resolve
1977
symbol_versioning.warn('Using [] to revert all files is deprecated'
1978
' as of bzr 0.91. Please use None (the default) instead.',
1979
DeprecationWarning, stacklevel=2)
1269
# TODO: Perhaps make this just a warning, and continue?
1270
# This tends to happen when
1271
raise NotVersionedError(path=f)
1272
mutter("remove inventory entry %s {%s}", quotefn(f), fid)
1274
# having remove it, it must be either ignored or unknown
1275
if self.is_ignored(f):
1279
show_status(new_status, inv[fid].kind, quotefn(f), to_file=to_file)
1282
self._write_inventory(inv)
1285
def revert(self, filenames, old_tree=None, backups=True,
1286
pb=DummyProgress()):
1287
from transform import revert
1288
from conflicts import resolve
1980
1289
if old_tree is None:
1981
basis_tree = self.basis_tree()
1982
basis_tree.lock_read()
1983
old_tree = basis_tree
1290
old_tree = self.basis_tree()
1291
conflicts = revert(self, old_tree, filenames, backups, pb)
1292
if not len(filenames):
1293
self.set_pending_merges([])
1987
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
1989
if filenames is None and len(self.get_parent_ids()) > 1:
1991
last_revision = self.last_revision()
1992
if last_revision != NULL_REVISION:
1993
if basis_tree is None:
1994
basis_tree = self.basis_tree()
1995
basis_tree.lock_read()
1996
parent_trees.append((last_revision, basis_tree))
1997
self.set_parent_trees(parent_trees)
2000
resolve(self, filenames, ignore_misses=True, recursive=True)
2002
if basis_tree is not None:
1296
resolve(self, filenames, ignore_misses=True)
2004
1297
return conflicts
2006
def revision_tree(self, revision_id):
2007
"""See Tree.revision_tree.
2009
WorkingTree can supply revision_trees for the basis revision only
2010
because there is only one cached inventory in the bzr directory.
2012
if revision_id == self.last_revision():
2014
xml = self.read_basis_inventory()
2015
except errors.NoSuchFile:
2019
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2020
# dont use the repository revision_tree api because we want
2021
# to supply the inventory.
2022
if inv.revision_id == revision_id:
2023
return revisiontree.RevisionTree(self.branch.repository,
2025
except errors.BadInventoryFormat:
2027
# raise if there was no inventory, or if we read the wrong inventory.
2028
raise errors.NoSuchRevisionInTree(self, revision_id)
2030
1299
# XXX: This method should be deprecated in favour of taking in a proper
2031
1300
# new Inventory object.
2032
@needs_tree_write_lock
2033
1302
def set_inventory(self, new_inventory_list):
2034
1303
from bzrlib.inventory import (Inventory,
2035
1304
InventoryDirectory,
2097
1344
between multiple working trees, i.e. via shared storage, then we
2098
1345
would probably want to lock both the local tree, and the branch.
2100
raise NotImplementedError(self.unlock)
1347
# FIXME: We want to write out the hashcache only when the last lock on
1348
# this working copy is released. Peeking at the lock count is a bit
1349
# of a nasty hack; probably it's better to have a transaction object,
1350
# which can do some finalization when it's either successfully or
1351
# unsuccessfully completed. (Denys's original patch did that.)
1352
# RBC 20060206 hooking into transaction will couple lock and transaction
1353
# wrongly. Hooking into unlock on the control files object is fine though.
1355
# TODO: split this per format so there is no ugly if block
1356
if self._hashcache.needs_write and (
1357
# dedicated lock files
1358
self._control_files._lock_count==1 or
1360
(self._control_files is self.branch.control_files and
1361
self._control_files._lock_count==3)):
1362
self._hashcache.write()
1363
# reverse order of locking.
1365
return self._control_files.unlock()
1367
self.branch.unlock()
2102
def update(self, change_reporter=None, possible_transports=None):
2103
1371
"""Update a working tree along its branch.
2105
This will update the branch if its bound too, which means we have
2106
multiple trees involved:
2108
- The new basis tree of the master.
2109
- The old basis tree of the branch.
2110
- The old basis tree of the working tree.
2111
- The current working tree state.
2113
Pathologically, all three may be different, and non-ancestors of each
2114
other. Conceptually we want to:
2116
- Preserve the wt.basis->wt.state changes
2117
- Transform the wt.basis to the new master basis.
2118
- Apply a merge of the old branch basis to get any 'local' changes from
2120
- Restore the wt.basis->wt.state changes.
1373
This will update the branch if its bound too, which means we have multiple trees involved:
1374
The new basis tree of the master.
1375
The old basis tree of the branch.
1376
The old basis tree of the working tree.
1377
The current working tree state.
1378
pathologically all three may be different, and non ancestors of each other.
1379
Conceptually we want to:
1380
Preserve the wt.basis->wt.state changes
1381
Transform the wt.basis to the new master basis.
1382
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1383
Restore the wt.basis->wt.state changes.
2122
1385
There isn't a single operation at the moment to do that, so we:
2123
- Merge current state -> basis tree of the master w.r.t. the old tree
2125
- Do a 'normal' merge of the old branch basis if it is relevant.
2127
if self.branch.get_bound_location() is not None:
2129
update_branch = True
2131
self.lock_tree_write()
2132
update_branch = False
2135
old_tip = self.branch.update(possible_transports)
2138
return self._update_tree(old_tip, change_reporter)
2142
@needs_tree_write_lock
2143
def _update_tree(self, old_tip=None, change_reporter=None):
2144
"""Update a tree to the master branch.
2146
:param old_tip: if supplied, the previous tip revision the branch,
2147
before it was changed to the master branch's tip.
2149
# here if old_tip is not None, it is the old tip of the branch before
2150
# it was updated from the master branch. This should become a pending
2151
# merge in the working tree to preserve the user existing work. we
2152
# cant set that until we update the working trees last revision to be
2153
# one from the new branch, because it will just get absorbed by the
2154
# parent de-duplication logic.
2156
# We MUST save it even if an error occurs, because otherwise the users
2157
# local work is unreferenced and will appear to have been lost.
2161
last_rev = self.get_parent_ids()[0]
2163
last_rev = _mod_revision.NULL_REVISION
2164
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2165
# merge tree state up to new branch tip.
2166
basis = self.basis_tree()
1386
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1387
Do a 'normal' merge of the old branch basis if it is relevant.
1389
old_tip = self.branch.update()
1390
if old_tip is not None:
1391
self.add_pending_merge(old_tip)
1392
self.branch.lock_read()
1395
if self.last_revision() != self.branch.last_revision():
1396
# merge tree state up to new branch tip.
1397
basis = self.basis_tree()
2169
1398
to_tree = self.branch.basis_tree()
2170
if basis.inventory.root is None:
2171
self.set_root_id(to_tree.get_root_id())
2173
result += merge.merge_inner(
1399
result += merge_inner(self.branch,
2178
change_reporter=change_reporter)
2181
# TODO - dedup parents list with things merged by pull ?
2182
# reuse the tree we've updated to to set the basis:
2183
parent_trees = [(self.branch.last_revision(), to_tree)]
2184
merges = self.get_parent_ids()[1:]
2185
# Ideally we ask the tree for the trees here, that way the working
2186
# tree can decide whether to give us teh entire tree or give us a
2187
# lazy initialised tree. dirstate for instance will have the trees
2188
# in ram already, whereas a last-revision + basis-inventory tree
2189
# will not, but also does not need them when setting parents.
2190
for parent in merges:
2191
parent_trees.append(
2192
(parent, self.branch.repository.revision_tree(parent)))
2193
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2194
parent_trees.append(
2195
(old_tip, self.branch.repository.revision_tree(old_tip)))
2196
self.set_parent_trees(parent_trees)
2197
last_rev = parent_trees[0][0]
2199
# the working tree had the same last-revision as the master
2200
# branch did. We may still have pivot local work from the local
2201
# branch into old_tip:
2202
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2203
self.add_parent_tree_id(old_tip)
2204
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2205
and old_tip != last_rev):
2206
# our last revision was not the prior branch last revision
2207
# and we have converted that last revision to a pending merge.
2208
# base is somewhere between the branch tip now
2209
# and the now pending merge
2211
# Since we just modified the working tree and inventory, flush out
2212
# the current state, before we modify it again.
2213
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2214
# requires it only because TreeTransform directly munges the
2215
# inventory and calls tree._write_inventory(). Ultimately we
2216
# should be able to remove this extra flush.
2218
graph = self.branch.repository.get_graph()
2219
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2221
base_tree = self.branch.repository.revision_tree(base_rev_id)
2222
other_tree = self.branch.repository.revision_tree(old_tip)
2223
result += merge.merge_inner(
2228
change_reporter=change_reporter)
2231
def _write_hashcache_if_dirty(self):
2232
"""Write out the hashcache if it is dirty."""
2233
if self._hashcache.needs_write:
2235
self._hashcache.write()
2237
if e.errno not in (errno.EPERM, errno.EACCES):
2239
# TODO: jam 20061219 Should this be a warning? A single line
2240
# warning might be sufficient to let the user know what
2242
mutter('Could not write hashcache for %s\nError: %s',
2243
self._hashcache.cache_file_name(), e)
2245
@needs_tree_write_lock
1403
self.set_last_revision(self.branch.last_revision())
1404
if old_tip and old_tip != self.last_revision():
1405
# our last revision was not the prior branch last revision
1406
# and we have converted that last revision to a pending merge.
1407
# base is somewhere between the branch tip now
1408
# and the now pending merge
1409
from bzrlib.revision import common_ancestor
1411
base_rev_id = common_ancestor(self.branch.last_revision(),
1413
self.branch.repository)
1414
except errors.NoCommonAncestor:
1416
base_tree = self.branch.repository.revision_tree(base_rev_id)
1417
other_tree = self.branch.repository.revision_tree(old_tip)
1418
result += merge_inner(self.branch,
1424
self.branch.unlock()
2246
1427
def _write_inventory(self, inv):
2247
1428
"""Write inventory as the current inventory."""
2248
self._set_inventory(inv, dirty=True)
1430
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1432
self._control_files.put('inventory', sio)
1433
self._set_inventory(inv)
1434
mutter('wrote working inventory')
2251
1436
def set_conflicts(self, arg):
2252
raise errors.UnsupportedOperation(self.set_conflicts, self)
2254
def add_conflicts(self, arg):
2255
raise errors.UnsupportedOperation(self.add_conflicts, self)
1437
raise UnsupportedOperation(self.set_conflicts, self)
2257
1439
@needs_read_lock
2258
1440
def conflicts(self):
2259
conflicts = _mod_conflicts.ConflictList()
1441
conflicts = ConflictList()
2260
1442
for conflicted in self._iter_conflicts():
2275
1457
if text == False:
2277
1459
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2278
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
1460
conflicts.append(Conflict.factory(ctype, path=conflicted,
2280
1461
file_id=self.path2id(conflicted)))
2281
1462
return conflicts
2283
def walkdirs(self, prefix=""):
2284
"""Walk the directories of this tree.
2286
returns a generator which yields items in the form:
2287
((curren_directory_path, fileid),
2288
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2291
This API returns a generator, which is only valid during the current
2292
tree transaction - within a single lock_read or lock_write duration.
2294
If the tree is not locked, it may cause an error to be raised,
2295
depending on the tree implementation.
2297
disk_top = self.abspath(prefix)
2298
if disk_top.endswith('/'):
2299
disk_top = disk_top[:-1]
2300
top_strip_len = len(disk_top) + 1
2301
inventory_iterator = self._walkdirs(prefix)
2302
disk_iterator = osutils.walkdirs(disk_top, prefix)
2304
current_disk = disk_iterator.next()
2305
disk_finished = False
2307
if not (e.errno == errno.ENOENT or
2308
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2311
disk_finished = True
2313
current_inv = inventory_iterator.next()
2314
inv_finished = False
2315
except StopIteration:
2318
while not inv_finished or not disk_finished:
2320
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2321
cur_disk_dir_content) = current_disk
2323
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2324
cur_disk_dir_content) = ((None, None), None)
2325
if not disk_finished:
2326
# strip out .bzr dirs
2327
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2328
len(cur_disk_dir_content) > 0):
2329
# osutils.walkdirs can be made nicer -
2330
# yield the path-from-prefix rather than the pathjoined
2332
bzrdir_loc = bisect_left(cur_disk_dir_content,
2334
if (bzrdir_loc < len(cur_disk_dir_content)
2335
and cur_disk_dir_content[bzrdir_loc][0] == '.bzr'):
2336
# we dont yield the contents of, or, .bzr itself.
2337
del cur_disk_dir_content[bzrdir_loc]
2339
# everything is unknown
2342
# everything is missing
2345
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2347
# disk is before inventory - unknown
2348
dirblock = [(relpath, basename, kind, stat, None, None) for
2349
relpath, basename, kind, stat, top_path in
2350
cur_disk_dir_content]
2351
yield (cur_disk_dir_relpath, None), dirblock
2353
current_disk = disk_iterator.next()
2354
except StopIteration:
2355
disk_finished = True
2357
# inventory is before disk - missing.
2358
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2359
for relpath, basename, dkind, stat, fileid, kind in
2361
yield (current_inv[0][0], current_inv[0][1]), dirblock
2363
current_inv = inventory_iterator.next()
2364
except StopIteration:
2367
# versioned present directory
2368
# merge the inventory and disk data together
2370
for relpath, subiterator in itertools.groupby(sorted(
2371
current_inv[1] + cur_disk_dir_content,
2372
key=operator.itemgetter(0)), operator.itemgetter(1)):
2373
path_elements = list(subiterator)
2374
if len(path_elements) == 2:
2375
inv_row, disk_row = path_elements
2376
# versioned, present file
2377
dirblock.append((inv_row[0],
2378
inv_row[1], disk_row[2],
2379
disk_row[3], inv_row[4],
2381
elif len(path_elements[0]) == 5:
2383
dirblock.append((path_elements[0][0],
2384
path_elements[0][1], path_elements[0][2],
2385
path_elements[0][3], None, None))
2386
elif len(path_elements[0]) == 6:
2387
# versioned, absent file.
2388
dirblock.append((path_elements[0][0],
2389
path_elements[0][1], 'unknown', None,
2390
path_elements[0][4], path_elements[0][5]))
2392
raise NotImplementedError('unreachable code')
2393
yield current_inv[0], dirblock
2395
current_inv = inventory_iterator.next()
2396
except StopIteration:
2399
current_disk = disk_iterator.next()
2400
except StopIteration:
2401
disk_finished = True
2403
def _walkdirs(self, prefix=""):
2404
"""Walk the directories of this tree.
2406
:prefix: is used as the directrory to start with.
2407
returns a generator which yields items in the form:
2408
((curren_directory_path, fileid),
2409
[(file1_path, file1_name, file1_kind, None, file1_id,
2412
_directory = 'directory'
2413
# get the root in the inventory
2414
inv = self.inventory
2415
top_id = inv.path2id(prefix)
2419
pending = [(prefix, '', _directory, None, top_id, None)]
2422
currentdir = pending.pop()
2423
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2424
top_id = currentdir[4]
2426
relroot = currentdir[0] + '/'
2429
# FIXME: stash the node in pending
2431
if entry.kind == 'directory':
2432
for name, child in entry.sorted_children():
2433
dirblock.append((relroot + name, name, child.kind, None,
2434
child.file_id, child.kind
2436
yield (currentdir[0], entry.file_id), dirblock
2437
# push the user specified dirs from dirblock
2438
for dir in reversed(dirblock):
2439
if dir[2] == _directory:
2442
@needs_tree_write_lock
2443
def auto_resolve(self):
2444
"""Automatically resolve text conflicts according to contents.
2446
Only text conflicts are auto_resolvable. Files with no conflict markers
2447
are considered 'resolved', because bzr always puts conflict markers
2448
into files that have text conflicts. The corresponding .THIS .BASE and
2449
.OTHER files are deleted, as per 'resolve'.
2450
:return: a tuple of ConflictLists: (un_resolved, resolved).
2452
un_resolved = _mod_conflicts.ConflictList()
2453
resolved = _mod_conflicts.ConflictList()
2454
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2455
for conflict in self.conflicts():
2456
if (conflict.typestring != 'text conflict' or
2457
self.kind(conflict.file_id) != 'file'):
2458
un_resolved.append(conflict)
2460
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2462
for line in my_file:
2463
if conflict_re.search(line):
2464
un_resolved.append(conflict)
2467
resolved.append(conflict)
2470
resolved.remove_files(self)
2471
self.set_conflicts(un_resolved)
2472
return un_resolved, resolved
2476
tree_basis = self.basis_tree()
2477
tree_basis.lock_read()
2479
repo_basis = self.branch.repository.revision_tree(
2480
self.last_revision())
2481
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2482
raise errors.BzrCheckError(
2483
"Mismatched basis inventory content.")
2488
def _validate(self):
2489
"""Validate internal structures.
2491
This is meant mostly for the test suite. To give it a chance to detect
2492
corruption after actions have occurred. The default implementation is a
2495
:return: None. An exception should be raised if there is an error.
2500
def _get_rules_searcher(self, default_searcher):
2501
"""See Tree._get_rules_searcher."""
2502
if self._rules_searcher is None:
2503
self._rules_searcher = super(WorkingTree,
2504
self)._get_rules_searcher(default_searcher)
2505
return self._rules_searcher
2508
class WorkingTree2(WorkingTree):
2509
"""This is the Format 2 working tree.
2511
This was the first weave based working tree.
2512
- uses os locks for locking.
2513
- uses the branch last-revision.
2516
def __init__(self, *args, **kwargs):
2517
super(WorkingTree2, self).__init__(*args, **kwargs)
2518
# WorkingTree2 has more of a constraint that self._inventory must
2519
# exist. Because this is an older format, we don't mind the overhead
2520
# caused by the extra computation here.
2522
# Newer WorkingTree's should only have self._inventory set when they
2524
if self._inventory is None:
2525
self.read_working_inventory()
2527
def lock_tree_write(self):
2528
"""See WorkingTree.lock_tree_write().
2530
In Format2 WorkingTrees we have a single lock for the branch and tree
2531
so lock_tree_write() degrades to lock_write().
2533
self.branch.lock_write()
2535
return self._control_files.lock_write()
2537
self.branch.unlock()
2541
# do non-implementation specific cleanup
2544
# we share control files:
2545
if self._control_files._lock_count == 3:
2546
# _inventory_is_modified is always False during a read lock.
2547
if self._inventory_is_modified:
2549
self._write_hashcache_if_dirty()
2551
# reverse order of locking.
2553
return self._control_files.unlock()
2555
self.branch.unlock()
2558
1465
class WorkingTree3(WorkingTree):
2559
1466
"""This is the Format 3 working tree.