446
68
inv = self._inventory
447
69
for path, ie in inv.iter_entries():
448
if osutils.lexists(self.abspath(path)):
70
if os.path.exists(self.abspath(path)):
451
def all_file_ids(self):
452
"""See Tree.iter_all_file_ids"""
453
return set(self.inventory)
455
74
def __repr__(self):
456
75
return "<%s of %s>" % (self.__class__.__name__,
457
76
getattr(self, 'basedir', None))
459
80
def abspath(self, filename):
460
return pathjoin(self.basedir, filename)
462
def basis_tree(self):
463
"""Return RevisionTree for the current last revision.
465
If the left most parent is a ghost then the returned tree will be an
466
empty tree - one obtained by calling
467
repository.revision_tree(NULL_REVISION).
470
revision_id = self.get_parent_ids()[0]
472
# no parents, return an empty revision tree.
473
# in the future this should return the tree for
474
# 'empty:' - the implicit root empty tree.
475
return self.branch.repository.revision_tree(
476
_mod_revision.NULL_REVISION)
478
return self.revision_tree(revision_id)
479
except errors.NoSuchRevision:
481
# No cached copy available, retrieve from the repository.
482
# FIXME? RBC 20060403 should we cache the inventory locally
485
return self.branch.repository.revision_tree(revision_id)
486
except (errors.RevisionNotPresent, errors.NoSuchRevision):
487
# the basis tree *may* be a ghost or a low level error may have
488
# occurred. If the revision is present, its a problem, if its not
490
if self.branch.repository.has_revision(revision_id):
492
# the basis tree is a ghost so return an empty tree.
493
return self.branch.repository.revision_tree(
494
_mod_revision.NULL_REVISION)
497
self._flush_ignore_list_cache()
499
def relpath(self, path):
500
"""Return the local path portion from a given path.
502
The path may be absolute or relative. If its a relative path it is
503
interpreted relative to the python current working directory.
505
return osutils.relpath(self.basedir, path)
81
return os.path.join(self.basedir, filename)
507
83
def has_filename(self, filename):
508
return osutils.lexists(self.abspath(filename))
510
def get_file(self, file_id, path=None, filtered=True):
511
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
513
def get_file_with_stat(self, file_id, path=None, filtered=True,
515
"""See Tree.get_file_with_stat."""
517
path = self.id2path(file_id)
518
file_obj = self.get_file_byname(path, filtered=False)
519
stat_value = _fstat(file_obj.fileno())
520
if filtered and self.supports_content_filtering():
521
filters = self._content_filter_stack(path)
522
file_obj = filtered_input_file(file_obj, filters)
523
return (file_obj, stat_value)
525
def get_file_text(self, file_id, path=None, filtered=True):
526
my_file = self.get_file(file_id, path=path, filtered=filtered)
528
return my_file.read()
532
def get_file_byname(self, filename, filtered=True):
533
path = self.abspath(filename)
535
if filtered and self.supports_content_filtering():
536
filters = self._content_filter_stack(filename)
537
return filtered_input_file(f, filters)
541
def get_file_lines(self, file_id, path=None, filtered=True):
542
"""See Tree.get_file_lines()"""
543
file = self.get_file(file_id, path, filtered=filtered)
545
return file.readlines()
550
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
551
"""See Tree.annotate_iter
553
This implementation will use the basis tree implementation if possible.
554
Lines not in the basis are attributed to CURRENT_REVISION
556
If there are pending merges, lines added by those merges will be
557
incorrectly attributed to CURRENT_REVISION (but after committing, the
558
attribution will be correct).
560
maybe_file_parent_keys = []
561
for parent_id in self.get_parent_ids():
563
parent_tree = self.revision_tree(parent_id)
564
except errors.NoSuchRevisionInTree:
565
parent_tree = self.branch.repository.revision_tree(parent_id)
566
parent_tree.lock_read()
568
if file_id not in parent_tree:
570
ie = parent_tree.inventory[file_id]
571
if ie.kind != 'file':
572
# Note: this is slightly unnecessary, because symlinks and
573
# directories have a "text" which is the empty text, and we
574
# know that won't mess up annotations. But it seems cleaner
576
parent_text_key = (file_id, ie.revision)
577
if parent_text_key not in maybe_file_parent_keys:
578
maybe_file_parent_keys.append(parent_text_key)
581
graph = _mod_graph.Graph(self.branch.repository.texts)
582
heads = graph.heads(maybe_file_parent_keys)
583
file_parent_keys = []
584
for key in maybe_file_parent_keys:
586
file_parent_keys.append(key)
588
# Now we have the parents of this content
589
annotator = self.branch.repository.texts.get_annotator()
590
text = self.get_file_text(file_id)
591
this_key =(file_id, default_revision)
592
annotator.add_special_text(this_key, file_parent_keys, text)
593
annotations = [(key[-1], line)
594
for key, line in annotator.annotate_flat(this_key)]
597
def _get_ancestors(self, default_revision):
598
ancestors = set([default_revision])
599
for parent_id in self.get_parent_ids():
600
ancestors.update(self.branch.repository.get_ancestry(
601
parent_id, topo_sorted=False))
604
def get_parent_ids(self):
605
"""See Tree.get_parent_ids.
607
This implementation reads the pending merges list and last_revision
608
value and uses that to decide what the parents list should be.
610
last_rev = _mod_revision.ensure_null(self._last_revision())
611
if _mod_revision.NULL_REVISION == last_rev:
616
merges_bytes = self._transport.get_bytes('pending-merges')
617
except errors.NoSuchFile:
620
for l in osutils.split_lines(merges_bytes):
621
revision_id = l.rstrip('\n')
622
parents.append(revision_id)
626
def get_root_id(self):
627
"""Return the id of this trees root"""
628
return self._inventory.root.file_id
84
return os.path.exists(self.abspath(filename))
86
def get_file(self, file_id):
87
return self.get_file_byname(self.id2path(file_id))
89
def get_file_byname(self, filename):
90
return file(self.abspath(filename), 'rb')
630
92
def _get_store_filename(self, file_id):
631
## XXX: badly named; this is not in the store at all
632
return self.abspath(self.id2path(file_id))
635
def clone(self, to_bzrdir, revision_id=None):
636
"""Duplicate this working tree into to_bzr, including all state.
638
Specifically modified files are kept as modified, but
639
ignored and unknown files are discarded.
641
If you want to make a new line of development, see bzrdir.sprout()
644
If not None, the cloned tree will have its last revision set to
645
revision, and difference between the source trees last revision
646
and this one merged in.
648
# assumes the target bzr dir format is compatible.
649
result = to_bzrdir.create_workingtree()
650
self.copy_content_into(result, revision_id)
654
def copy_content_into(self, tree, revision_id=None):
655
"""Copy the current content and user files of this tree into tree."""
656
tree.set_root_id(self.get_root_id())
657
if revision_id is None:
658
merge.transform_tree(tree, self)
660
# TODO now merge from tree.last_revision to revision (to preserve
661
# user local changes)
662
merge.transform_tree(tree, self)
663
tree.set_parent_ids([revision_id])
665
def id2abspath(self, file_id):
666
return self.abspath(self.id2path(file_id))
93
## XXX: badly named; this isn't in the store at all
94
return self.abspath(self.id2path(file_id))
668
97
def has_id(self, file_id):
669
98
# files that have been deleted are excluded
671
100
if not inv.has_id(file_id):
673
102
path = inv.id2path(file_id)
674
return osutils.lexists(self.abspath(path))
103
return os.path.exists(self.abspath(path))
676
def has_or_had_id(self, file_id):
677
if file_id == self.inventory.root.file_id:
679
return self.inventory.has_id(file_id)
681
106
__contains__ = has_id
683
109
def get_file_size(self, file_id):
684
"""See Tree.get_file_size"""
685
# XXX: this returns the on-disk size; it should probably return the
688
return os.path.getsize(self.id2abspath(file_id))
690
if e.errno != errno.ENOENT:
696
def get_file_sha1(self, file_id, path=None, stat_value=None):
698
path = self._inventory.id2path(file_id)
699
return self._hashcache.get_sha1(path, stat_value)
701
def get_file_mtime(self, file_id, path=None):
703
path = self.inventory.id2path(file_id)
704
return os.lstat(self.abspath(path)).st_mtime
706
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
707
file_id = self.path2id(path)
709
# For unversioned files on win32, we just assume they are not
712
return self._inventory[file_id].executable
714
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
715
mode = stat_result.st_mode
716
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
718
if not supports_executable():
719
def is_executable(self, file_id, path=None):
720
return self._inventory[file_id].executable
722
_is_executable_from_path_and_stat = \
723
_is_executable_from_path_and_stat_from_basis
725
def is_executable(self, file_id, path=None):
727
path = self.id2path(file_id)
728
mode = os.lstat(self.abspath(path)).st_mode
729
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
731
_is_executable_from_path_and_stat = \
732
_is_executable_from_path_and_stat_from_stat
734
@needs_tree_write_lock
735
def _add(self, files, ids, kinds):
736
"""See MutableTree._add."""
737
# TODO: Re-adding a file that is removed in the working copy
738
# should probably put it back with the previous ID.
739
# the read and write working inventory should not occur in this
740
# function - they should be part of lock_write and unlock.
742
for f, file_id, kind in zip(files, ids, kinds):
744
inv.add_path(f, kind=kind)
746
inv.add_path(f, kind=kind, file_id=file_id)
747
self._inventory_is_modified = True
749
@needs_tree_write_lock
750
def _gather_kinds(self, files, kinds):
751
"""See MutableTree._gather_kinds."""
752
for pos, f in enumerate(files):
753
if kinds[pos] is None:
754
fullpath = normpath(self.abspath(f))
756
kinds[pos] = file_kind(fullpath)
758
if e.errno == errno.ENOENT:
759
raise errors.NoSuchFile(fullpath)
762
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
763
"""Add revision_id as a parent.
765
This is equivalent to retrieving the current list of parent ids
766
and setting the list to its value plus revision_id.
768
:param revision_id: The revision id to add to the parent list. It may
769
be a ghost revision as long as its not the first parent to be added,
770
or the allow_leftmost_as_ghost parameter is set True.
771
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
773
parents = self.get_parent_ids() + [revision_id]
774
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
775
or allow_leftmost_as_ghost)
777
@needs_tree_write_lock
778
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
779
"""Add revision_id, tree tuple as a parent.
781
This is equivalent to retrieving the current list of parent trees
782
and setting the list to its value plus parent_tuple. See also
783
add_parent_tree_id - if you only have a parent id available it will be
784
simpler to use that api. If you have the parent already available, using
785
this api is preferred.
787
:param parent_tuple: The (revision id, tree) to add to the parent list.
788
If the revision_id is a ghost, pass None for the tree.
789
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
791
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
792
if len(parent_ids) > 1:
793
# the leftmost may have already been a ghost, preserve that if it
795
allow_leftmost_as_ghost = True
796
self.set_parent_ids(parent_ids,
797
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
799
@needs_tree_write_lock
800
def add_pending_merge(self, *revision_ids):
801
# TODO: Perhaps should check at this point that the
802
# history of the revision is actually present?
803
parents = self.get_parent_ids()
805
for rev_id in revision_ids:
806
if rev_id in parents:
808
parents.append(rev_id)
811
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
813
def path_content_summary(self, path, _lstat=os.lstat,
814
_mapper=osutils.file_kind_from_stat_mode):
815
"""See Tree.path_content_summary."""
816
abspath = self.abspath(path)
818
stat_result = _lstat(abspath)
820
if getattr(e, 'errno', None) == errno.ENOENT:
822
return ('missing', None, None, None)
823
# propagate other errors
825
kind = _mapper(stat_result.st_mode)
827
return self._file_content_summary(path, stat_result)
828
elif kind == 'directory':
829
# perhaps it looks like a plain directory, but it's really a
831
if self._directory_is_tree_reference(path):
832
kind = 'tree-reference'
833
return kind, None, None, None
834
elif kind == 'symlink':
835
target = osutils.readlink(abspath)
836
return ('symlink', None, None, target)
838
return (kind, None, None, None)
840
def _file_content_summary(self, path, stat_result):
841
size = stat_result.st_size
842
executable = self._is_executable_from_path_and_stat(path, stat_result)
843
# try for a stat cache lookup
844
return ('file', size, executable, self._sha_from_stat(
847
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
848
"""Common ghost checking functionality from set_parent_*.
850
This checks that the left hand-parent exists if there are any
853
if len(revision_ids) > 0:
854
leftmost_id = revision_ids[0]
855
if (not allow_leftmost_as_ghost and not
856
self.branch.repository.has_revision(leftmost_id)):
857
raise errors.GhostRevisionUnusableHere(leftmost_id)
859
def _set_merges_from_parent_ids(self, parent_ids):
860
merges = parent_ids[1:]
861
self._transport.put_bytes('pending-merges', '\n'.join(merges),
862
mode=self.bzrdir._get_file_mode())
864
def _filter_parent_ids_by_ancestry(self, revision_ids):
865
"""Check that all merged revisions are proper 'heads'.
867
This will always return the first revision_id, and any merged revisions
870
if len(revision_ids) == 0:
872
graph = self.branch.repository.get_graph()
873
heads = graph.heads(revision_ids)
874
new_revision_ids = revision_ids[:1]
875
for revision_id in revision_ids[1:]:
876
if revision_id in heads and revision_id not in new_revision_ids:
877
new_revision_ids.append(revision_id)
878
if new_revision_ids != revision_ids:
879
trace.mutter('requested to set revision_ids = %s,'
880
' but filtered to %s', revision_ids, new_revision_ids)
881
return new_revision_ids
883
@needs_tree_write_lock
884
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
885
"""Set the parent ids to revision_ids.
887
See also set_parent_trees. This api will try to retrieve the tree data
888
for each element of revision_ids from the trees repository. If you have
889
tree data already available, it is more efficient to use
890
set_parent_trees rather than set_parent_ids. set_parent_ids is however
891
an easier API to use.
893
:param revision_ids: The revision_ids to set as the parent ids of this
894
working tree. Any of these may be ghosts.
896
self._check_parents_for_ghosts(revision_ids,
897
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
898
for revision_id in revision_ids:
899
_mod_revision.check_not_reserved_id(revision_id)
901
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
903
if len(revision_ids) > 0:
904
self.set_last_revision(revision_ids[0])
906
self.set_last_revision(_mod_revision.NULL_REVISION)
908
self._set_merges_from_parent_ids(revision_ids)
910
@needs_tree_write_lock
911
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
912
"""See MutableTree.set_parent_trees."""
913
parent_ids = [rev for (rev, tree) in parents_list]
914
for revision_id in parent_ids:
915
_mod_revision.check_not_reserved_id(revision_id)
917
self._check_parents_for_ghosts(parent_ids,
918
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
920
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
922
if len(parent_ids) == 0:
923
leftmost_parent_id = _mod_revision.NULL_REVISION
924
leftmost_parent_tree = None
926
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
928
if self._change_last_revision(leftmost_parent_id):
929
if leftmost_parent_tree is None:
930
# If we don't have a tree, fall back to reading the
931
# parent tree from the repository.
932
self._cache_basis_inventory(leftmost_parent_id)
934
inv = leftmost_parent_tree.inventory
935
xml = self._create_basis_xml_from_inventory(
936
leftmost_parent_id, inv)
937
self._write_basis_inventory(xml)
938
self._set_merges_from_parent_ids(parent_ids)
940
@needs_tree_write_lock
941
def set_pending_merges(self, rev_list):
942
parents = self.get_parent_ids()
943
leftmost = parents[:1]
944
new_parents = leftmost + rev_list
945
self.set_parent_ids(new_parents)
947
@needs_tree_write_lock
948
def set_merge_modified(self, modified_hashes):
950
for file_id, hash in modified_hashes.iteritems():
951
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
952
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
954
def _sha_from_stat(self, path, stat_result):
955
"""Get a sha digest from the tree's stat cache.
957
The default implementation assumes no stat cache is present.
959
:param path: The path.
960
:param stat_result: The stat result being looked up.
964
def _put_rio(self, filename, stanzas, header):
965
self._must_be_locked()
966
my_file = rio_file(stanzas, header)
967
self._transport.put_file(filename, my_file,
968
mode=self.bzrdir._get_file_mode())
970
@needs_write_lock # because merge pulls data into the branch.
971
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
972
merge_type=None, force=False):
973
"""Merge from a branch into this working tree.
975
:param branch: The branch to merge from.
976
:param to_revision: If non-None, the merge will merge to to_revision,
977
but not beyond it. to_revision does not need to be in the history
978
of the branch when it is supplied. If None, to_revision defaults to
979
branch.last_revision().
981
from bzrlib.merge import Merger, Merge3Merger
982
merger = Merger(self.branch, this_tree=self)
983
# check that there are no local alterations
984
if not force and self.has_changes():
985
raise errors.UncommittedChanges(self)
986
if to_revision is None:
987
to_revision = _mod_revision.ensure_null(branch.last_revision())
988
merger.other_rev_id = to_revision
989
if _mod_revision.is_null(merger.other_rev_id):
990
raise errors.NoCommits(branch)
991
self.branch.fetch(branch, last_revision=merger.other_rev_id)
992
merger.other_basis = merger.other_rev_id
993
merger.other_tree = self.branch.repository.revision_tree(
995
merger.other_branch = branch
996
if from_revision is None:
999
merger.set_base_revision(from_revision, branch)
1000
if merger.base_rev_id == merger.other_rev_id:
1001
raise errors.PointlessMerge
1002
merger.backup_files = False
1003
if merge_type is None:
1004
merger.merge_type = Merge3Merger
1006
merger.merge_type = merge_type
1007
merger.set_interesting_files(None)
1008
merger.show_base = False
1009
merger.reprocess = False
1010
conflicts = merger.do_merge()
1011
merger.set_pending()
1015
def merge_modified(self):
1016
"""Return a dictionary of files modified by a merge.
1018
The list is initialized by WorkingTree.set_merge_modified, which is
1019
typically called after we make some automatic updates to the tree
1022
This returns a map of file_id->sha1, containing only files which are
1023
still in the working inventory and have that text hash.
1026
hashfile = self._transport.get('merge-hashes')
1027
except errors.NoSuchFile:
1032
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
1033
raise errors.MergeModifiedFormatError()
1034
except StopIteration:
1035
raise errors.MergeModifiedFormatError()
1036
for s in RioReader(hashfile):
1037
# RioReader reads in Unicode, so convert file_ids back to utf8
1038
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
1039
if file_id not in self.inventory:
1041
text_hash = s.get("hash")
1042
if text_hash == self.get_file_sha1(file_id):
1043
merge_hashes[file_id] = text_hash
1049
def mkdir(self, path, file_id=None):
1050
"""See MutableTree.mkdir()."""
1052
file_id = generate_ids.gen_file_id(os.path.basename(path))
1053
os.mkdir(self.abspath(path))
1054
self.add(path, file_id, 'directory')
1057
def get_symlink_target(self, file_id):
1058
abspath = self.id2abspath(file_id)
1059
target = osutils.readlink(abspath)
1063
def subsume(self, other_tree):
1064
def add_children(inventory, entry):
1065
for child_entry in entry.children.values():
1066
inventory._byid[child_entry.file_id] = child_entry
1067
if child_entry.kind == 'directory':
1068
add_children(inventory, child_entry)
1069
if other_tree.get_root_id() == self.get_root_id():
1070
raise errors.BadSubsumeSource(self, other_tree,
1071
'Trees have the same root')
1073
other_tree_path = self.relpath(other_tree.basedir)
1074
except errors.PathNotChild:
1075
raise errors.BadSubsumeSource(self, other_tree,
1076
'Tree is not contained by the other')
1077
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1078
if new_root_parent is None:
1079
raise errors.BadSubsumeSource(self, other_tree,
1080
'Parent directory is not versioned.')
1081
# We need to ensure that the result of a fetch will have a
1082
# versionedfile for the other_tree root, and only fetching into
1083
# RepositoryKnit2 guarantees that.
1084
if not self.branch.repository.supports_rich_root():
1085
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1086
other_tree.lock_tree_write()
1088
new_parents = other_tree.get_parent_ids()
1089
other_root = other_tree.inventory.root
1090
other_root.parent_id = new_root_parent
1091
other_root.name = osutils.basename(other_tree_path)
1092
self.inventory.add(other_root)
1093
add_children(self.inventory, other_root)
1094
self._write_inventory(self.inventory)
1095
# normally we don't want to fetch whole repositories, but i think
1096
# here we really do want to consolidate the whole thing.
1097
for parent_id in other_tree.get_parent_ids():
1098
self.branch.fetch(other_tree.branch, parent_id)
1099
self.add_parent_tree_id(parent_id)
1102
other_tree.bzrdir.retire_bzrdir()
1104
def _setup_directory_is_tree_reference(self):
1105
if self._branch.repository._format.supports_tree_reference:
1106
self._directory_is_tree_reference = \
1107
self._directory_may_be_tree_reference
1109
self._directory_is_tree_reference = \
1110
self._directory_is_never_tree_reference
1112
def _directory_is_never_tree_reference(self, relpath):
1115
def _directory_may_be_tree_reference(self, relpath):
1116
# as a special case, if a directory contains control files then
1117
# it's a tree reference, except that the root of the tree is not
1118
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1119
# TODO: We could ask all the control formats whether they
1120
# recognize this directory, but at the moment there's no cheap api
1121
# to do that. Since we probably can only nest bzr checkouts and
1122
# they always use this name it's ok for now. -- mbp 20060306
1124
# FIXME: There is an unhandled case here of a subdirectory
1125
# containing .bzr but not a branch; that will probably blow up
1126
# when you try to commit it. It might happen if there is a
1127
# checkout in a subdirectory. This can be avoided by not adding
1130
@needs_tree_write_lock
1131
def extract(self, file_id, format=None):
1132
"""Extract a subtree from this tree.
1134
A new branch will be created, relative to the path for this tree.
1138
segments = osutils.splitpath(path)
1139
transport = self.branch.bzrdir.root_transport
1140
for name in segments:
1141
transport = transport.clone(name)
1142
transport.ensure_base()
1145
sub_path = self.id2path(file_id)
1146
branch_transport = mkdirs(sub_path)
1148
format = self.bzrdir.cloning_metadir()
1149
branch_transport.ensure_base()
1150
branch_bzrdir = format.initialize_on_transport(branch_transport)
1152
repo = branch_bzrdir.find_repository()
1153
except errors.NoRepositoryPresent:
1154
repo = branch_bzrdir.create_repository()
1155
if not repo.supports_rich_root():
1156
raise errors.RootNotRich()
1157
new_branch = branch_bzrdir.create_branch()
1158
new_branch.pull(self.branch)
1159
for parent_id in self.get_parent_ids():
1160
new_branch.fetch(self.branch, parent_id)
1161
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1162
if tree_transport.base != branch_transport.base:
1163
tree_bzrdir = format.initialize_on_transport(tree_transport)
1164
branch.BranchReferenceFormat().initialize(tree_bzrdir,
1165
target_branch=new_branch)
1167
tree_bzrdir = branch_bzrdir
1168
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1169
wt.set_parent_ids(self.get_parent_ids())
1170
my_inv = self.inventory
1171
child_inv = inventory.Inventory(root_id=None)
1172
new_root = my_inv[file_id]
1173
my_inv.remove_recursive_id(file_id)
1174
new_root.parent_id = None
1175
child_inv.add(new_root)
1176
self._write_inventory(my_inv)
1177
wt._write_inventory(child_inv)
1180
def _serialize(self, inventory, out_file):
1181
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1184
def _deserialize(selt, in_file):
1185
return xml5.serializer_v5.read_inventory(in_file)
1188
"""Write the in memory inventory to disk."""
1189
# TODO: Maybe this should only write on dirty ?
1190
if self._control_files._lock_mode != 'w':
1191
raise errors.NotWriteLocked(self)
1193
self._serialize(self._inventory, sio)
1195
self._transport.put_file('inventory', sio,
1196
mode=self.bzrdir._get_file_mode())
1197
self._inventory_is_modified = False
1199
def _kind(self, relpath):
1200
return osutils.file_kind(self.abspath(relpath))
1202
def list_files(self, include_root=False, from_dir=None, recursive=True):
1203
"""List all files as (path, class, kind, id, entry).
110
# is this still called?
111
raise NotImplementedError()
114
def get_file_sha1(self, file_id):
115
path = self._inventory.id2path(file_id)
116
return self._hashcache.get_sha1(path)
119
def file_class(self, filename):
120
if self.path2id(filename):
122
elif self.is_ignored(filename):
128
def list_files(self):
129
"""Recursively list all files as (path, class, kind, id).
1205
131
Lists, but does not descend into unversioned directories.
1206
133
This does not include files that have been deleted in this
1207
tree. Skips the control directory.
1209
:param include_root: if True, return an entry for the root
1210
:param from_dir: start from this directory or None for the root
1211
:param recursive: whether to recurse into subdirectories or not
136
Skips the control directory.
1213
# list_files is an iterator, so @needs_read_lock doesn't work properly
1214
# with it. So callers should be careful to always read_lock the tree.
1215
if not self.is_locked():
1216
raise errors.ObjectNotLocked(self)
1218
inv = self.inventory
1219
if from_dir is None and include_root is True:
1220
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1221
# Convert these into local objects to save lookup times
1222
pathjoin = osutils.pathjoin
1223
file_kind = self._kind
1225
# transport.base ends in a slash, we want the piece
1226
# between the last two slashes
1227
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1229
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1231
# directory file_id, relative path, absolute path, reverse sorted children
1232
if from_dir is not None:
1233
from_dir_id = inv.path2id(from_dir)
1234
if from_dir_id is None:
1235
# Directory not versioned
1237
from_dir_abspath = pathjoin(self.basedir, from_dir)
1239
from_dir_id = inv.root.file_id
1240
from_dir_abspath = self.basedir
1241
children = os.listdir(from_dir_abspath)
1243
# jam 20060527 The kernel sized tree seems equivalent whether we
1244
# use a deque and popleft to keep them sorted, or if we use a plain
1245
# list and just reverse() them.
1246
children = collections.deque(children)
1247
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1249
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1252
f = children.popleft()
138
from osutils import appendpath, file_kind
141
inv = self._inventory
143
def descend(from_dir_relpath, from_dir_id, dp):
1253
147
## TODO: If we find a subdirectory with its own .bzr
1254
148
## directory, then that is a separate tree and we
1255
149
## should exclude it.
1257
# the bzrdir for this tree
1258
if transport_base_dir == f:
150
if bzrlib.BZRDIR == f:
1261
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1262
# and 'f' doesn't begin with one, we can do a string op, rather
1263
# than the checks of pathjoin(), all relative paths will have an extra slash
1265
fp = from_dir_relpath + '/' + f
154
fp = appendpath(from_dir_relpath, f)
1268
fap = from_dir_abspath + '/' + f
1270
dir_ie = inv[from_dir_id]
1271
if dir_ie.kind == 'directory':
1272
f_ie = dir_ie.children.get(f)
157
fap = appendpath(dp, f)
159
f_ie = inv.get_child(from_dir_id, f)
1277
elif self.is_ignored(fp[1:]):
162
elif self.is_ignored(fp):
1280
# we may not have found this file, because of a unicode
1281
# issue, or because the directory was actually a symlink.
1282
f_norm, can_access = osutils.normalized_filename(f)
1283
if f == f_norm or not can_access:
1284
# No change, so treat this file normally
1287
# this file can be accessed by a normalized path
1288
# check again if it is versioned
1289
# these lines are repeated here for performance
1291
fp = from_dir_relpath + '/' + f
1292
fap = from_dir_abspath + '/' + f
1293
f_ie = inv.get_child(from_dir_id, f)
1296
elif self.is_ignored(fp[1:]):
1301
167
fk = file_kind(fap)
1303
# make a last minute entry
1305
yield fp[1:], c, fk, f_ie.file_id, f_ie
1308
yield fp[1:], c, fk, None, fk_entries[fk]()
1310
yield fp[1:], c, fk, None, TreeEntry()
171
raise BzrCheckError("file %r entered as kind %r id %r, "
173
% (fap, f_ie.kind, f_ie.file_id, fk))
175
yield fp, c, fk, (f_ie and f_ie.file_id)
1313
177
if fk != 'directory':
1316
# But do this child first if recursing down
1318
new_children = os.listdir(fap)
1320
new_children = collections.deque(new_children)
1321
stack.append((f_ie.file_id, fp, fap, new_children))
1322
# Break out of inner loop,
1323
# so that we start outer loop with child
1326
# if we finished all children, pop it off the stack
1329
@needs_tree_write_lock
1330
def move(self, from_paths, to_dir=None, after=False):
1333
to_dir must exist in the inventory.
1335
If to_dir exists and is a directory, the files are moved into
1336
it, keeping their old names.
1338
Note that to_dir is only the last component of the new name;
1339
this doesn't change the directory.
1341
For each entry in from_paths the move mode will be determined
1344
The first mode moves the file in the filesystem and updates the
1345
inventory. The second mode only updates the inventory without
1346
touching the file on the filesystem. This is the new mode introduced
1349
move uses the second mode if 'after == True' and the target is not
1350
versioned but present in the working tree.
1352
move uses the second mode if 'after == False' and the source is
1353
versioned but no longer in the working tree, and the target is not
1354
versioned but present in the working tree.
1356
move uses the first mode if 'after == False' and the source is
1357
versioned and present in the working tree, and the target is not
1358
versioned and not present in the working tree.
1360
Everything else results in an error.
1362
This returns a list of (from_path, to_path) pairs for each
1363
entry that is moved.
1368
# check for deprecated use of signature
1370
raise TypeError('You must supply a target directory')
1371
# check destination directory
1372
if isinstance(from_paths, basestring):
1374
inv = self.inventory
1375
to_abs = self.abspath(to_dir)
1376
if not isdir(to_abs):
1377
raise errors.BzrMoveFailedError('',to_dir,
1378
errors.NotADirectory(to_abs))
1379
if not self.has_filename(to_dir):
1380
raise errors.BzrMoveFailedError('',to_dir,
1381
errors.NotInWorkingDirectory(to_dir))
1382
to_dir_id = inv.path2id(to_dir)
1383
if to_dir_id is None:
1384
raise errors.BzrMoveFailedError('',to_dir,
1385
errors.NotVersionedError(path=str(to_dir)))
1387
to_dir_ie = inv[to_dir_id]
1388
if to_dir_ie.kind != 'directory':
1389
raise errors.BzrMoveFailedError('',to_dir,
1390
errors.NotADirectory(to_abs))
1392
# create rename entries and tuples
1393
for from_rel in from_paths:
1394
from_tail = splitpath(from_rel)[-1]
1395
from_id = inv.path2id(from_rel)
1397
raise errors.BzrMoveFailedError(from_rel,to_dir,
1398
errors.NotVersionedError(path=str(from_rel)))
1400
from_entry = inv[from_id]
1401
from_parent_id = from_entry.parent_id
1402
to_rel = pathjoin(to_dir, from_tail)
1403
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1405
from_tail=from_tail,
1406
from_parent_id=from_parent_id,
1407
to_rel=to_rel, to_tail=from_tail,
1408
to_parent_id=to_dir_id)
1409
rename_entries.append(rename_entry)
1410
rename_tuples.append((from_rel, to_rel))
1412
# determine which move mode to use. checks also for movability
1413
rename_entries = self._determine_mv_mode(rename_entries, after)
1415
original_modified = self._inventory_is_modified
1418
self._inventory_is_modified = True
1419
self._move(rename_entries)
1421
# restore the inventory on error
1422
self._inventory_is_modified = original_modified
1424
self._write_inventory(inv)
1425
return rename_tuples
1427
def _determine_mv_mode(self, rename_entries, after=False):
1428
"""Determines for each from-to pair if both inventory and working tree
1429
or only the inventory has to be changed.
1431
Also does basic plausability tests.
1433
inv = self.inventory
1435
for rename_entry in rename_entries:
1436
# store to local variables for easier reference
1437
from_rel = rename_entry.from_rel
1438
from_id = rename_entry.from_id
1439
to_rel = rename_entry.to_rel
1440
to_id = inv.path2id(to_rel)
1441
only_change_inv = False
1443
# check the inventory for source and destination
1445
raise errors.BzrMoveFailedError(from_rel,to_rel,
1446
errors.NotVersionedError(path=str(from_rel)))
1447
if to_id is not None:
1448
raise errors.BzrMoveFailedError(from_rel,to_rel,
1449
errors.AlreadyVersionedError(path=str(to_rel)))
1451
# try to determine the mode for rename (only change inv or change
1452
# inv and file system)
1454
if not self.has_filename(to_rel):
1455
raise errors.BzrMoveFailedError(from_id,to_rel,
1456
errors.NoSuchFile(path=str(to_rel),
1457
extra="New file has not been created yet"))
1458
only_change_inv = True
1459
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1460
only_change_inv = True
1461
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1462
only_change_inv = False
1463
elif (not self.case_sensitive
1464
and from_rel.lower() == to_rel.lower()
1465
and self.has_filename(from_rel)):
1466
only_change_inv = False
1468
# something is wrong, so lets determine what exactly
1469
if not self.has_filename(from_rel) and \
1470
not self.has_filename(to_rel):
1471
raise errors.BzrRenameFailedError(from_rel,to_rel,
1472
errors.PathsDoNotExist(paths=(str(from_rel),
1475
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1476
rename_entry.only_change_inv = only_change_inv
1477
return rename_entries
1479
def _move(self, rename_entries):
1480
"""Moves a list of files.
1482
Depending on the value of the flag 'only_change_inv', the
1483
file will be moved on the file system or not.
1485
inv = self.inventory
1488
for entry in rename_entries:
1490
self._move_entry(entry)
1492
self._rollback_move(moved)
1496
def _rollback_move(self, moved):
1497
"""Try to rollback a previous move in case of an filesystem error."""
1498
inv = self.inventory
1501
self._move_entry(WorkingTree._RenameEntry(
1502
entry.to_rel, entry.from_id,
1503
entry.to_tail, entry.to_parent_id, entry.from_rel,
1504
entry.from_tail, entry.from_parent_id,
1505
entry.only_change_inv))
1506
except errors.BzrMoveFailedError, e:
1507
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1508
" The working tree is in an inconsistent state."
1509
" Please consider doing a 'bzr revert'."
1510
" Error message is: %s" % e)
1512
def _move_entry(self, entry):
1513
inv = self.inventory
1514
from_rel_abs = self.abspath(entry.from_rel)
1515
to_rel_abs = self.abspath(entry.to_rel)
1516
if from_rel_abs == to_rel_abs:
1517
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1518
"Source and target are identical.")
1520
if not entry.only_change_inv:
1522
osutils.rename(from_rel_abs, to_rel_abs)
1524
raise errors.BzrMoveFailedError(entry.from_rel,
1526
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1528
@needs_tree_write_lock
1529
def rename_one(self, from_rel, to_rel, after=False):
1532
This can change the directory or the filename or both.
1534
rename_one has several 'modes' to work. First, it can rename a physical
1535
file and change the file_id. That is the normal mode. Second, it can
1536
only change the file_id without touching any physical file. This is
1537
the new mode introduced in version 0.15.
1539
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1540
versioned but present in the working tree.
1542
rename_one uses the second mode if 'after == False' and 'from_rel' is
1543
versioned but no longer in the working tree, and 'to_rel' is not
1544
versioned but present in the working tree.
1546
rename_one uses the first mode if 'after == False' and 'from_rel' is
1547
versioned and present in the working tree, and 'to_rel' is not
1548
versioned and not present in the working tree.
1550
Everything else results in an error.
1552
inv = self.inventory
1555
# create rename entries and tuples
1556
from_tail = splitpath(from_rel)[-1]
1557
from_id = inv.path2id(from_rel)
1559
# if file is missing in the inventory maybe it's in the basis_tree
1560
basis_tree = self.branch.basis_tree()
1561
from_id = basis_tree.path2id(from_rel)
1563
raise errors.BzrRenameFailedError(from_rel,to_rel,
1564
errors.NotVersionedError(path=str(from_rel)))
1565
# put entry back in the inventory so we can rename it
1566
from_entry = basis_tree.inventory[from_id].copy()
1569
from_entry = inv[from_id]
1570
from_parent_id = from_entry.parent_id
1571
to_dir, to_tail = os.path.split(to_rel)
1572
to_dir_id = inv.path2id(to_dir)
1573
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1575
from_tail=from_tail,
1576
from_parent_id=from_parent_id,
1577
to_rel=to_rel, to_tail=to_tail,
1578
to_parent_id=to_dir_id)
1579
rename_entries.append(rename_entry)
1581
# determine which move mode to use. checks also for movability
1582
rename_entries = self._determine_mv_mode(rename_entries, after)
1584
# check if the target changed directory and if the target directory is
1586
if to_dir_id is None:
1587
raise errors.BzrMoveFailedError(from_rel,to_rel,
1588
errors.NotVersionedError(path=str(to_dir)))
1590
# all checks done. now we can continue with our actual work
1591
mutter('rename_one:\n'
1596
' to_dir_id {%s}\n',
1597
from_id, from_rel, to_rel, to_dir, to_dir_id)
1599
self._move(rename_entries)
1600
self._write_inventory(inv)
1602
class _RenameEntry(object):
1603
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1604
to_rel, to_tail, to_parent_id, only_change_inv=False):
1605
self.from_rel = from_rel
1606
self.from_id = from_id
1607
self.from_tail = from_tail
1608
self.from_parent_id = from_parent_id
1609
self.to_rel = to_rel
1610
self.to_tail = to_tail
1611
self.to_parent_id = to_parent_id
1612
self.only_change_inv = only_change_inv
181
# don't descend unversioned directories
184
for ff in descend(fp, f_ie.file_id, fap):
187
for f in descend('', inv.root.file_id, self.basedir):
1615
192
def unknowns(self):
1616
"""Return all unknown files.
1618
These are files in the working directory that are not versioned or
1619
control files or ignored.
1621
# force the extras method to be fully executed before returning, to
1622
# prevent race conditions with the lock
1624
[subp for subp in self.extras() if not self.is_ignored(subp)])
1626
@needs_tree_write_lock
1627
def unversion(self, file_ids):
1628
"""Remove the file ids in file_ids from the current versioned set.
1630
When a file_id is unversioned, all of its children are automatically
1633
:param file_ids: The file ids to stop versioning.
1634
:raises: NoSuchId if any fileid is not currently versioned.
1636
for file_id in file_ids:
1637
if file_id not in self._inventory:
1638
raise errors.NoSuchId(self, file_id)
1639
for file_id in file_ids:
1640
if self._inventory.has_id(file_id):
1641
self._inventory.remove_recursive_id(file_id)
1643
# in the future this should just set a dirty bit to wait for the
1644
# final unlock. However, until all methods of workingtree start
1645
# with the current in -memory inventory rather than triggering
1646
# a read, it is more complex - we need to teach read_inventory
1647
# to know when to read, and when to not read first... and possibly
1648
# to save first when the in memory one may be corrupted.
1649
# so for now, we just only write it if it is indeed dirty.
1651
self._write_inventory(self._inventory)
1653
def _iter_conflicts(self):
1655
for info in self.list_files():
1657
stem = get_conflicted_stem(path)
1660
if stem not in conflicted:
1661
conflicted.add(stem)
1665
def pull(self, source, overwrite=False, stop_revision=None,
1666
change_reporter=None, possible_transports=None, local=False):
1669
old_revision_info = self.branch.last_revision_info()
1670
basis_tree = self.basis_tree()
1671
count = self.branch.pull(source, overwrite, stop_revision,
1672
possible_transports=possible_transports,
1674
new_revision_info = self.branch.last_revision_info()
1675
if new_revision_info != old_revision_info:
1676
repository = self.branch.repository
1677
basis_tree.lock_read()
1679
new_basis_tree = self.branch.basis_tree()
1686
change_reporter=change_reporter)
1687
basis_root_id = basis_tree.get_root_id()
1688
new_root_id = new_basis_tree.get_root_id()
1689
if basis_root_id != new_root_id:
1690
self.set_root_id(new_root_id)
1693
# TODO - dedup parents list with things merged by pull ?
1694
# reuse the revisiontree we merged against to set the new
1696
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1697
# we have to pull the merge trees out again, because
1698
# merge_inner has set the ids. - this corner is not yet
1699
# layered well enough to prevent double handling.
1700
# XXX TODO: Fix the double handling: telling the tree about
1701
# the already known parent data is wasteful.
1702
merges = self.get_parent_ids()[1:]
1703
parent_trees.extend([
1704
(parent, repository.revision_tree(parent)) for
1706
self.set_parent_trees(parent_trees)
1712
def put_file_bytes_non_atomic(self, file_id, bytes):
1713
"""See MutableTree.put_file_bytes_non_atomic."""
1714
stream = file(self.id2abspath(file_id), 'wb')
1719
# TODO: update the hashcache here ?
193
for subp in self.extras():
194
if not self.is_ignored(subp):
1721
198
def extras(self):
1722
"""Yield all unversioned files in this WorkingTree.
199
"""Yield all unknown files in this WorkingTree.
1724
If there are any unversioned directories then only the directory is
1725
returned, not all its children. But if there are unversioned files
201
If there are any unknown directories then only the directory is
202
returned, not all its children. But if there are unknown files
1726
203
under a versioned subdirectory, they are returned.
1728
205
Currently returned depth-first, sorted by name within directories.
1729
This is the same order used by 'osutils.walkdirs'.
1731
207
## TODO: Work from given directory downwards
208
from osutils import isdir, appendpath
1732
210
for path, dir_entry in self.inventory.directories():
1733
# mutter("search for unknowns in %r", path)
211
mutter("search for unknowns in %r" % path)
1734
212
dirabs = self.abspath(path)
1735
213
if not isdir(dirabs):
1736
214
# e.g. directory deleted
1740
218
for subf in os.listdir(dirabs):
1741
if self.bzrdir.is_control_filename(subf):
1743
if subf not in dir_entry.children:
1746
can_access) = osutils.normalized_filename(subf)
1747
except UnicodeDecodeError:
1748
path_os_enc = path.encode(osutils._fs_enc)
1749
relpath = path_os_enc + '/' + subf
1750
raise errors.BadFilenameEncoding(relpath,
1752
if subf_norm != subf and can_access:
1753
if subf_norm not in dir_entry.children:
1754
fl.append(subf_norm)
220
and (subf not in dir_entry.children)):
1760
subp = pathjoin(path, subf)
225
subp = appendpath(path, subf)
1763
229
def ignored_files(self):
1764
230
"""Yield list of PATH, IGNORE_PATTERN"""
1765
231
for subp in self.extras():
1766
232
pat = self.is_ignored(subp)
1770
237
def get_ignore_list(self):
1771
238
"""Return list of ignore patterns.
1773
240
Cached in the Tree object after the first call.
1775
ignoreset = getattr(self, '_ignoreset', None)
1776
if ignoreset is not None:
242
if hasattr(self, '_ignorelist'):
243
return self._ignorelist
1779
ignore_globs = set()
1780
ignore_globs.update(ignores.get_runtime_ignores())
1781
ignore_globs.update(ignores.get_user_ignores())
245
l = bzrlib.DEFAULT_IGNORE[:]
1782
246
if self.has_filename(bzrlib.IGNORE_FILENAME):
1783
247
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
1785
ignore_globs.update(ignores.parse_ignore_file(f))
1788
self._ignoreset = ignore_globs
248
l.extend([line.rstrip("\n\r") for line in f.readlines()])
1791
def _flush_ignore_list_cache(self):
1792
"""Resets the cached ignore list to force a cache rebuild."""
1793
self._ignoreset = None
1794
self._ignoreglobster = None
1796
253
def is_ignored(self, filename):
1797
254
r"""Check whether the filename matches an ignore pattern.
1799
256
Patterns containing '/' or '\' need to match the whole path;
1800
others match against only the last component. Patterns starting
1801
with '!' are ignore exceptions. Exceptions take precedence
1802
over regular patterns and cause the filename to not be ignored.
257
others match against only the last component.
1804
259
If the file is ignored, returns the pattern which caused it to
1805
260
be ignored, otherwise None. So this can simply be used as a
1806
261
boolean if desired."""
1807
if getattr(self, '_ignoreglobster', None) is None:
1808
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1809
return self._ignoreglobster.match(filename)
1811
def kind(self, file_id):
1812
return file_kind(self.id2abspath(file_id))
1814
def stored_kind(self, file_id):
1815
"""See Tree.stored_kind"""
1816
return self.inventory[file_id].kind
1818
def _comparison_data(self, entry, path):
1819
abspath = self.abspath(path)
1821
stat_value = os.lstat(abspath)
1823
if getattr(e, 'errno', None) == errno.ENOENT:
1830
mode = stat_value.st_mode
1831
kind = osutils.file_kind_from_stat_mode(mode)
1832
if not supports_executable():
1833
executable = entry is not None and entry.executable
1835
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1836
return kind, executable, stat_value
1838
def _file_size(self, entry, stat_value):
1839
return stat_value.st_size
1841
def last_revision(self):
1842
"""Return the last revision of the branch for this tree.
1844
This format tree does not support a separate marker for last-revision
1845
compared to the branch.
1847
See MutableTree.last_revision
1849
return self._last_revision()
1852
def _last_revision(self):
1853
"""helper for get_parent_ids."""
1854
return _mod_revision.ensure_null(self.branch.last_revision())
1856
def is_locked(self):
1857
return self._control_files.is_locked()
1859
def _must_be_locked(self):
1860
if not self.is_locked():
1861
raise errors.ObjectNotLocked(self)
1863
def lock_read(self):
1864
"""Lock the tree for reading.
1866
This also locks the branch, and can be unlocked via self.unlock().
1868
:return: A bzrlib.lock.LogicalLockResult.
1870
if not self.is_locked():
1872
self.branch.lock_read()
1874
self._control_files.lock_read()
1875
return LogicalLockResult(self.unlock)
1877
self.branch.unlock()
1880
def lock_tree_write(self):
1881
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1883
:return: A bzrlib.lock.LogicalLockResult.
1885
if not self.is_locked():
1887
self.branch.lock_read()
1889
self._control_files.lock_write()
1890
return LogicalLockResult(self.unlock)
1892
self.branch.unlock()
1895
def lock_write(self):
1896
"""See MutableTree.lock_write, and WorkingTree.unlock.
1898
:return: A bzrlib.lock.LogicalLockResult.
1900
if not self.is_locked():
1902
self.branch.lock_write()
1904
self._control_files.lock_write()
1905
return LogicalLockResult(self.unlock)
1907
self.branch.unlock()
1910
def get_physical_lock_status(self):
1911
return self._control_files.get_physical_lock_status()
1913
def _basis_inventory_name(self):
1914
return 'basis-inventory-cache'
1916
def _reset_data(self):
1917
"""Reset transient data that cannot be revalidated."""
1918
self._inventory_is_modified = False
1919
f = self._transport.get('inventory')
1921
result = self._deserialize(f)
1924
self._set_inventory(result, dirty=False)
1926
@needs_tree_write_lock
1927
def set_last_revision(self, new_revision):
1928
"""Change the last revision in the working tree."""
1929
if self._change_last_revision(new_revision):
1930
self._cache_basis_inventory(new_revision)
1932
def _change_last_revision(self, new_revision):
1933
"""Template method part of set_last_revision to perform the change.
1935
This is used to allow WorkingTree3 instances to not affect branch
1936
when their last revision is set.
1938
if _mod_revision.is_null(new_revision):
1939
self.branch.set_revision_history([])
1942
self.branch.generate_revision_history(new_revision)
1943
except errors.NoSuchRevision:
1944
# not present in the repo - dont try to set it deeper than the tip
1945
self.branch.set_revision_history([new_revision])
1948
def _write_basis_inventory(self, xml):
1949
"""Write the basis inventory XML to the basis-inventory file"""
1950
path = self._basis_inventory_name()
1952
self._transport.put_file(path, sio,
1953
mode=self.bzrdir._get_file_mode())
1955
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1956
"""Create the text that will be saved in basis-inventory"""
1957
inventory.revision_id = revision_id
1958
return xml7.serializer_v7.write_inventory_to_string(inventory)
1960
def _cache_basis_inventory(self, new_revision):
1961
"""Cache new_revision as the basis inventory."""
1962
# TODO: this should allow the ready-to-use inventory to be passed in,
1963
# as commit already has that ready-to-use [while the format is the
1966
# this double handles the inventory - unpack and repack -
1967
# but is easier to understand. We can/should put a conditional
1968
# in here based on whether the inventory is in the latest format
1969
# - perhaps we should repack all inventories on a repository
1971
# the fast path is to copy the raw xml from the repository. If the
1972
# xml contains 'revision_id="', then we assume the right
1973
# revision_id is set. We must check for this full string, because a
1974
# root node id can legitimately look like 'revision_id' but cannot
1976
xml = self.branch.repository._get_inventory_xml(new_revision)
1977
firstline = xml.split('\n', 1)[0]
1978
if (not 'revision_id="' in firstline or
1979
'format="7"' not in firstline):
1980
inv = self.branch.repository._serializer.read_inventory_from_string(
1982
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1983
self._write_basis_inventory(xml)
1984
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1987
def read_basis_inventory(self):
1988
"""Read the cached basis inventory."""
1989
path = self._basis_inventory_name()
1990
return self._transport.get_bytes(path)
1993
def read_working_inventory(self):
1994
"""Read the working inventory.
1996
:raises errors.InventoryModified: read_working_inventory will fail
1997
when the current in memory inventory has been modified.
1999
# conceptually this should be an implementation detail of the tree.
2000
# XXX: Deprecate this.
2001
# ElementTree does its own conversion from UTF-8, so open in
2003
if self._inventory_is_modified:
2004
raise errors.InventoryModified(self)
2005
f = self._transport.get('inventory')
2007
result = self._deserialize(f)
2010
self._set_inventory(result, dirty=False)
2013
@needs_tree_write_lock
2014
def remove(self, files, verbose=False, to_file=None, keep_files=True,
2016
"""Remove nominated files from the working inventory.
2018
:files: File paths relative to the basedir.
2019
:keep_files: If true, the files will also be kept.
2020
:force: Delete files and directories, even if they are changed and
2021
even if the directories are not empty.
2023
if isinstance(files, basestring):
2028
all_files = set() # specified and nested files
2029
unknown_nested_files=set()
2031
to_file = sys.stdout
2033
files_to_backup = []
2035
def recurse_directory_to_add_files(directory):
2036
# Recurse directory and add all files
2037
# so we can check if they have changed.
2038
for parent_info, file_infos in self.walkdirs(directory):
2039
for relpath, basename, kind, lstat, fileid, kind in file_infos:
2040
# Is it versioned or ignored?
2041
if self.path2id(relpath):
2042
# Add nested content for deletion.
2043
all_files.add(relpath)
2045
# Files which are not versioned
2046
# should be treated as unknown.
2047
files_to_backup.append(relpath)
2049
for filename in files:
2050
# Get file name into canonical form.
2051
abspath = self.abspath(filename)
2052
filename = self.relpath(abspath)
2053
if len(filename) > 0:
2054
all_files.add(filename)
2055
recurse_directory_to_add_files(filename)
2057
files = list(all_files)
2060
return # nothing to do
2062
# Sort needed to first handle directory content before the directory
2063
files.sort(reverse=True)
2065
# Bail out if we are going to delete files we shouldn't
2066
if not keep_files and not force:
2067
for (file_id, path, content_change, versioned, parent_id, name,
2068
kind, executable) in self.iter_changes(self.basis_tree(),
2069
include_unchanged=True, require_versioned=False,
2070
want_unversioned=True, specific_files=files):
2071
if versioned[0] == False:
2072
# The record is unknown or newly added
2073
files_to_backup.append(path[1])
2074
elif (content_change and (kind[1] is not None) and
2075
osutils.is_inside_any(files, path[1])):
2076
# Versioned and changed, but not deleted, and still
2077
# in one of the dirs to be deleted.
2078
files_to_backup.append(path[1])
2080
def backup(file_to_backup):
2081
backup_name = self.bzrdir.generate_backup_name(file_to_backup)
2082
osutils.rename(abs_path, self.abspath(backup_name))
2083
return "removed %s (but kept a copy: %s)" % (file_to_backup, backup_name)
2085
# Build inv_delta and delete files where applicable,
2086
# do this before any modifications to inventory.
2088
fid = self.path2id(f)
2091
message = "%s is not versioned." % (f,)
2094
# having removed it, it must be either ignored or unknown
2095
if self.is_ignored(f):
2099
# XXX: Really should be a more abstract reporter interface
2100
kind_ch = osutils.kind_marker(self.kind(fid))
2101
to_file.write(new_status + ' ' + f + kind_ch + '\n')
2103
inv_delta.append((f, None, fid, None))
2104
message = "removed %s" % (f,)
2107
abs_path = self.abspath(f)
2108
if osutils.lexists(abs_path):
2109
if (osutils.isdir(abs_path) and
2110
len(os.listdir(abs_path)) > 0):
2112
osutils.rmtree(abs_path)
2113
message = "deleted %s" % (f,)
2117
if f in files_to_backup:
2120
osutils.delete_any(abs_path)
2121
message = "deleted %s" % (f,)
2122
elif message is not None:
2123
# Only care if we haven't done anything yet.
2124
message = "%s does not exist." % (f,)
2126
# Print only one message (if any) per file.
2127
if message is not None:
2129
self.apply_inventory_delta(inv_delta)
2131
@needs_tree_write_lock
2132
def revert(self, filenames=None, old_tree=None, backups=True,
2133
pb=None, report_changes=False):
2134
from bzrlib.conflicts import resolve
2137
symbol_versioning.warn('Using [] to revert all files is deprecated'
2138
' as of bzr 0.91. Please use None (the default) instead.',
2139
DeprecationWarning, stacklevel=2)
2140
if old_tree is None:
2141
basis_tree = self.basis_tree()
2142
basis_tree.lock_read()
2143
old_tree = basis_tree
2147
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2149
if filenames is None and len(self.get_parent_ids()) > 1:
2151
last_revision = self.last_revision()
2152
if last_revision != _mod_revision.NULL_REVISION:
2153
if basis_tree is None:
2154
basis_tree = self.basis_tree()
2155
basis_tree.lock_read()
2156
parent_trees.append((last_revision, basis_tree))
2157
self.set_parent_trees(parent_trees)
2160
resolve(self, filenames, ignore_misses=True, recursive=True)
2162
if basis_tree is not None:
2166
def revision_tree(self, revision_id):
2167
"""See Tree.revision_tree.
2169
WorkingTree can supply revision_trees for the basis revision only
2170
because there is only one cached inventory in the bzr directory.
2172
if revision_id == self.last_revision():
2174
xml = self.read_basis_inventory()
2175
except errors.NoSuchFile:
2179
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2180
# dont use the repository revision_tree api because we want
2181
# to supply the inventory.
2182
if inv.revision_id == revision_id:
2183
return revisiontree.RevisionTree(self.branch.repository,
2185
except errors.BadInventoryFormat:
2187
# raise if there was no inventory, or if we read the wrong inventory.
2188
raise errors.NoSuchRevisionInTree(self, revision_id)
2190
# XXX: This method should be deprecated in favour of taking in a proper
2191
# new Inventory object.
2192
@needs_tree_write_lock
2193
def set_inventory(self, new_inventory_list):
2194
from bzrlib.inventory import (Inventory,
2198
inv = Inventory(self.get_root_id())
2199
for path, file_id, parent, kind in new_inventory_list:
2200
name = os.path.basename(path)
2203
# fixme, there should be a factory function inv,add_??
2204
if kind == 'directory':
2205
inv.add(InventoryDirectory(file_id, name, parent))
2206
elif kind == 'file':
2207
inv.add(InventoryFile(file_id, name, parent))
2208
elif kind == 'symlink':
2209
inv.add(InventoryLink(file_id, name, parent))
2211
raise errors.BzrError("unknown kind %r" % kind)
2212
self._write_inventory(inv)
2214
@needs_tree_write_lock
2215
def set_root_id(self, file_id):
2216
"""Set the root id for this tree."""
2220
'WorkingTree.set_root_id with fileid=None')
2221
file_id = osutils.safe_file_id(file_id)
2222
self._set_root_id(file_id)
2224
def _set_root_id(self, file_id):
2225
"""Set the root id for this tree, in a format specific manner.
2227
:param file_id: The file id to assign to the root. It must not be
2228
present in the current inventory or an error will occur. It must
2229
not be None, but rather a valid file id.
2231
inv = self._inventory
2232
orig_root_id = inv.root.file_id
2233
# TODO: it might be nice to exit early if there was nothing
2234
# to do, saving us from trigger a sync on unlock.
2235
self._inventory_is_modified = True
2236
# we preserve the root inventory entry object, but
2237
# unlinkit from the byid index
2238
del inv._byid[inv.root.file_id]
2239
inv.root.file_id = file_id
2240
# and link it into the index with the new changed id.
2241
inv._byid[inv.root.file_id] = inv.root
2242
# and finally update all children to reference the new id.
2243
# XXX: this should be safe to just look at the root.children
2244
# list, not the WHOLE INVENTORY.
2247
if entry.parent_id == orig_root_id:
2248
entry.parent_id = inv.root.file_id
2251
"""See Branch.unlock.
2253
WorkingTree locking just uses the Branch locking facilities.
2254
This is current because all working trees have an embedded branch
2255
within them. IF in the future, we were to make branch data shareable
2256
between multiple working trees, i.e. via shared storage, then we
2257
would probably want to lock both the local tree, and the branch.
2259
raise NotImplementedError(self.unlock)
2263
def update(self, change_reporter=None, possible_transports=None,
2264
revision=None, old_tip=_marker):
2265
"""Update a working tree along its branch.
2267
This will update the branch if its bound too, which means we have
2268
multiple trees involved:
2270
- The new basis tree of the master.
2271
- The old basis tree of the branch.
2272
- The old basis tree of the working tree.
2273
- The current working tree state.
2275
Pathologically, all three may be different, and non-ancestors of each
2276
other. Conceptually we want to:
2278
- Preserve the wt.basis->wt.state changes
2279
- Transform the wt.basis to the new master basis.
2280
- Apply a merge of the old branch basis to get any 'local' changes from
2282
- Restore the wt.basis->wt.state changes.
2284
There isn't a single operation at the moment to do that, so we:
2285
- Merge current state -> basis tree of the master w.r.t. the old tree
2287
- Do a 'normal' merge of the old branch basis if it is relevant.
2289
:param revision: The target revision to update to. Must be in the
2291
:param old_tip: If branch.update() has already been run, the value it
2292
returned (old tip of the branch or None). _marker is used
2295
if self.branch.get_bound_location() is not None:
2297
update_branch = (old_tip is self._marker)
2299
self.lock_tree_write()
2300
update_branch = False
2303
old_tip = self.branch.update(possible_transports)
2305
if old_tip is self._marker:
2307
return self._update_tree(old_tip, change_reporter, revision)
2311
@needs_tree_write_lock
2312
def _update_tree(self, old_tip=None, change_reporter=None, revision=None):
2313
"""Update a tree to the master branch.
2315
:param old_tip: if supplied, the previous tip revision the branch,
2316
before it was changed to the master branch's tip.
2318
# here if old_tip is not None, it is the old tip of the branch before
2319
# it was updated from the master branch. This should become a pending
2320
# merge in the working tree to preserve the user existing work. we
2321
# cant set that until we update the working trees last revision to be
2322
# one from the new branch, because it will just get absorbed by the
2323
# parent de-duplication logic.
2325
# We MUST save it even if an error occurs, because otherwise the users
2326
# local work is unreferenced and will appear to have been lost.
2330
last_rev = self.get_parent_ids()[0]
2332
last_rev = _mod_revision.NULL_REVISION
2333
if revision is None:
2334
revision = self.branch.last_revision()
2336
old_tip = old_tip or _mod_revision.NULL_REVISION
2338
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2339
# the branch we are bound to was updated
2340
# merge those changes in first
2341
base_tree = self.basis_tree()
2342
other_tree = self.branch.repository.revision_tree(old_tip)
2343
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2344
base_tree, this_tree=self,
2345
change_reporter=change_reporter)
2347
self.add_parent_tree((old_tip, other_tree))
2348
trace.note('Rerun update after fixing the conflicts.')
2351
if last_rev != _mod_revision.ensure_null(revision):
2352
# the working tree is up to date with the branch
2353
# we can merge the specified revision from master
2354
to_tree = self.branch.repository.revision_tree(revision)
2355
to_root_id = to_tree.get_root_id()
2357
basis = self.basis_tree()
2360
if (basis.inventory.root is None
2361
or basis.inventory.root.file_id != to_root_id):
2362
self.set_root_id(to_root_id)
2367
# determine the branch point
2368
graph = self.branch.repository.get_graph()
2369
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2371
base_tree = self.branch.repository.revision_tree(base_rev_id)
2373
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2375
change_reporter=change_reporter)
2376
self.set_last_revision(revision)
2377
# TODO - dedup parents list with things merged by pull ?
2378
# reuse the tree we've updated to to set the basis:
2379
parent_trees = [(revision, to_tree)]
2380
merges = self.get_parent_ids()[1:]
2381
# Ideally we ask the tree for the trees here, that way the working
2382
# tree can decide whether to give us the entire tree or give us a
2383
# lazy initialised tree. dirstate for instance will have the trees
2384
# in ram already, whereas a last-revision + basis-inventory tree
2385
# will not, but also does not need them when setting parents.
2386
for parent in merges:
2387
parent_trees.append(
2388
(parent, self.branch.repository.revision_tree(parent)))
2389
if not _mod_revision.is_null(old_tip):
2390
parent_trees.append(
2391
(old_tip, self.branch.repository.revision_tree(old_tip)))
2392
self.set_parent_trees(parent_trees)
2393
last_rev = parent_trees[0][0]
2396
def _write_hashcache_if_dirty(self):
2397
"""Write out the hashcache if it is dirty."""
2398
if self._hashcache.needs_write:
2400
self._hashcache.write()
2402
if e.errno not in (errno.EPERM, errno.EACCES):
2404
# TODO: jam 20061219 Should this be a warning? A single line
2405
# warning might be sufficient to let the user know what
2407
mutter('Could not write hashcache for %s\nError: %s',
2408
self._hashcache.cache_file_name(), e)
2410
@needs_tree_write_lock
2411
def _write_inventory(self, inv):
2412
"""Write inventory as the current inventory."""
2413
self._set_inventory(inv, dirty=True)
2416
def set_conflicts(self, arg):
2417
raise errors.UnsupportedOperation(self.set_conflicts, self)
2419
def add_conflicts(self, arg):
2420
raise errors.UnsupportedOperation(self.add_conflicts, self)
2423
def conflicts(self):
2424
conflicts = _mod_conflicts.ConflictList()
2425
for conflicted in self._iter_conflicts():
2428
if file_kind(self.abspath(conflicted)) != "file":
2430
except errors.NoSuchFile:
2433
for suffix in ('.THIS', '.OTHER'):
2435
kind = file_kind(self.abspath(conflicted+suffix))
2438
except errors.NoSuchFile:
2442
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2443
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2445
file_id=self.path2id(conflicted)))
2448
def walkdirs(self, prefix=""):
2449
"""Walk the directories of this tree.
2451
returns a generator which yields items in the form:
2452
((curren_directory_path, fileid),
2453
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2456
This API returns a generator, which is only valid during the current
2457
tree transaction - within a single lock_read or lock_write duration.
2459
If the tree is not locked, it may cause an error to be raised,
2460
depending on the tree implementation.
2462
disk_top = self.abspath(prefix)
2463
if disk_top.endswith('/'):
2464
disk_top = disk_top[:-1]
2465
top_strip_len = len(disk_top) + 1
2466
inventory_iterator = self._walkdirs(prefix)
2467
disk_iterator = osutils.walkdirs(disk_top, prefix)
2469
current_disk = disk_iterator.next()
2470
disk_finished = False
2472
if not (e.errno == errno.ENOENT or
2473
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2476
disk_finished = True
2478
current_inv = inventory_iterator.next()
2479
inv_finished = False
2480
except StopIteration:
2483
while not inv_finished or not disk_finished:
2485
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2486
cur_disk_dir_content) = current_disk
2488
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2489
cur_disk_dir_content) = ((None, None), None)
2490
if not disk_finished:
2491
# strip out .bzr dirs
2492
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2493
len(cur_disk_dir_content) > 0):
2494
# osutils.walkdirs can be made nicer -
2495
# yield the path-from-prefix rather than the pathjoined
2497
bzrdir_loc = bisect_left(cur_disk_dir_content,
2499
if (bzrdir_loc < len(cur_disk_dir_content)
2500
and self.bzrdir.is_control_filename(
2501
cur_disk_dir_content[bzrdir_loc][0])):
2502
# we dont yield the contents of, or, .bzr itself.
2503
del cur_disk_dir_content[bzrdir_loc]
2505
# everything is unknown
2508
# everything is missing
2511
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2513
# disk is before inventory - unknown
2514
dirblock = [(relpath, basename, kind, stat, None, None) for
2515
relpath, basename, kind, stat, top_path in
2516
cur_disk_dir_content]
2517
yield (cur_disk_dir_relpath, None), dirblock
2519
current_disk = disk_iterator.next()
2520
except StopIteration:
2521
disk_finished = True
2523
# inventory is before disk - missing.
2524
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2525
for relpath, basename, dkind, stat, fileid, kind in
2527
yield (current_inv[0][0], current_inv[0][1]), dirblock
2529
current_inv = inventory_iterator.next()
2530
except StopIteration:
2533
# versioned present directory
2534
# merge the inventory and disk data together
2536
for relpath, subiterator in itertools.groupby(sorted(
2537
current_inv[1] + cur_disk_dir_content,
2538
key=operator.itemgetter(0)), operator.itemgetter(1)):
2539
path_elements = list(subiterator)
2540
if len(path_elements) == 2:
2541
inv_row, disk_row = path_elements
2542
# versioned, present file
2543
dirblock.append((inv_row[0],
2544
inv_row[1], disk_row[2],
2545
disk_row[3], inv_row[4],
2547
elif len(path_elements[0]) == 5:
2549
dirblock.append((path_elements[0][0],
2550
path_elements[0][1], path_elements[0][2],
2551
path_elements[0][3], None, None))
2552
elif len(path_elements[0]) == 6:
2553
# versioned, absent file.
2554
dirblock.append((path_elements[0][0],
2555
path_elements[0][1], 'unknown', None,
2556
path_elements[0][4], path_elements[0][5]))
2558
raise NotImplementedError('unreachable code')
2559
yield current_inv[0], dirblock
2561
current_inv = inventory_iterator.next()
2562
except StopIteration:
2565
current_disk = disk_iterator.next()
2566
except StopIteration:
2567
disk_finished = True
2569
def _walkdirs(self, prefix=""):
2570
"""Walk the directories of this tree.
2572
:prefix: is used as the directrory to start with.
2573
returns a generator which yields items in the form:
2574
((curren_directory_path, fileid),
2575
[(file1_path, file1_name, file1_kind, None, file1_id,
2578
_directory = 'directory'
2579
# get the root in the inventory
2580
inv = self.inventory
2581
top_id = inv.path2id(prefix)
2585
pending = [(prefix, '', _directory, None, top_id, None)]
2588
currentdir = pending.pop()
2589
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2590
top_id = currentdir[4]
2592
relroot = currentdir[0] + '/'
2595
# FIXME: stash the node in pending
2597
if entry.kind == 'directory':
2598
for name, child in entry.sorted_children():
2599
dirblock.append((relroot + name, name, child.kind, None,
2600
child.file_id, child.kind
2602
yield (currentdir[0], entry.file_id), dirblock
2603
# push the user specified dirs from dirblock
2604
for dir in reversed(dirblock):
2605
if dir[2] == _directory:
2608
@needs_tree_write_lock
2609
def auto_resolve(self):
2610
"""Automatically resolve text conflicts according to contents.
2612
Only text conflicts are auto_resolvable. Files with no conflict markers
2613
are considered 'resolved', because bzr always puts conflict markers
2614
into files that have text conflicts. The corresponding .THIS .BASE and
2615
.OTHER files are deleted, as per 'resolve'.
2616
:return: a tuple of ConflictLists: (un_resolved, resolved).
2618
un_resolved = _mod_conflicts.ConflictList()
2619
resolved = _mod_conflicts.ConflictList()
2620
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2621
for conflict in self.conflicts():
2622
if (conflict.typestring != 'text conflict' or
2623
self.kind(conflict.file_id) != 'file'):
2624
un_resolved.append(conflict)
2626
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2628
for line in my_file:
2629
if conflict_re.search(line):
2630
un_resolved.append(conflict)
263
# TODO: Use '**' to match directories, and other extended
264
# globbing stuff from cvs/rsync.
266
# XXX: fnmatch is actually not quite what we want: it's only
267
# approximately the same as real Unix fnmatch, and doesn't
268
# treat dotfiles correctly and allows * to match /.
269
# Eventually it should be replaced with something more
273
from osutils import splitpath
275
for pat in self.get_ignore_list():
276
if '/' in pat or '\\' in pat:
278
# as a special case, you can put ./ at the start of a
279
# pattern; this is good to match in the top-level
282
if (pat[:2] == './') or (pat[:2] == '.\\'):
2633
resolved.append(conflict)
2636
resolved.remove_files(self)
2637
self.set_conflicts(un_resolved)
2638
return un_resolved, resolved
2641
def _check(self, references):
2642
"""Check the tree for consistency.
2644
:param references: A dict with keys matching the items returned by
2645
self._get_check_refs(), and values from looking those keys up in
2648
tree_basis = self.basis_tree()
2649
tree_basis.lock_read()
2651
repo_basis = references[('trees', self.last_revision())]
2652
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2653
raise errors.BzrCheckError(
2654
"Mismatched basis inventory content.")
2659
def _validate(self):
2660
"""Validate internal structures.
2662
This is meant mostly for the test suite. To give it a chance to detect
2663
corruption after actions have occurred. The default implementation is a
2666
:return: None. An exception should be raised if there is an error.
2670
def _get_rules_searcher(self, default_searcher):
2671
"""See Tree._get_rules_searcher."""
2672
if self._rules_searcher is None:
2673
self._rules_searcher = super(WorkingTree,
2674
self)._get_rules_searcher(default_searcher)
2675
return self._rules_searcher
2677
def get_shelf_manager(self):
2678
"""Return the ShelfManager for this WorkingTree."""
2679
from bzrlib.shelf import ShelfManager
2680
return ShelfManager(self, self._transport)
2683
class WorkingTree2(WorkingTree):
2684
"""This is the Format 2 working tree.
2686
This was the first weave based working tree.
2687
- uses os locks for locking.
2688
- uses the branch last-revision.
2691
def __init__(self, *args, **kwargs):
2692
super(WorkingTree2, self).__init__(*args, **kwargs)
2693
# WorkingTree2 has more of a constraint that self._inventory must
2694
# exist. Because this is an older format, we don't mind the overhead
2695
# caused by the extra computation here.
2697
# Newer WorkingTree's should only have self._inventory set when they
2699
if self._inventory is None:
2700
self.read_working_inventory()
2702
def _get_check_refs(self):
2703
"""Return the references needed to perform a check of this tree."""
2704
return [('trees', self.last_revision())]
2706
def lock_tree_write(self):
2707
"""See WorkingTree.lock_tree_write().
2709
In Format2 WorkingTrees we have a single lock for the branch and tree
2710
so lock_tree_write() degrades to lock_write().
2712
:return: An object with an unlock method which will release the lock
2715
self.branch.lock_write()
2717
self._control_files.lock_write()
2720
self.branch.unlock()
2724
# do non-implementation specific cleanup
2727
# we share control files:
2728
if self._control_files._lock_count == 3:
2729
# _inventory_is_modified is always False during a read lock.
2730
if self._inventory_is_modified:
2732
self._write_hashcache_if_dirty()
2734
# reverse order of locking.
2736
return self._control_files.unlock()
2738
self.branch.unlock()
2741
class WorkingTree3(WorkingTree):
2742
"""This is the Format 3 working tree.
2744
This differs from the base WorkingTree by:
2745
- having its own file lock
2746
- having its own last-revision property.
2748
This is new in bzr 0.8
2752
def _last_revision(self):
2753
"""See Mutable.last_revision."""
2755
return self._transport.get_bytes('last-revision')
2756
except errors.NoSuchFile:
2757
return _mod_revision.NULL_REVISION
2759
def _change_last_revision(self, revision_id):
2760
"""See WorkingTree._change_last_revision."""
2761
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
2763
self._transport.delete('last-revision')
2764
except errors.NoSuchFile:
2768
self._transport.put_bytes('last-revision', revision_id,
2769
mode=self.bzrdir._get_file_mode())
2772
def _get_check_refs(self):
2773
"""Return the references needed to perform a check of this tree."""
2774
return [('trees', self.last_revision())]
2776
@needs_tree_write_lock
2777
def set_conflicts(self, conflicts):
2778
self._put_rio('conflicts', conflicts.to_stanzas(),
2781
@needs_tree_write_lock
2782
def add_conflicts(self, new_conflicts):
2783
conflict_set = set(self.conflicts())
2784
conflict_set.update(set(list(new_conflicts)))
2785
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2786
key=_mod_conflicts.Conflict.sort_key)))
2789
def conflicts(self):
2791
confile = self._transport.get('conflicts')
2792
except errors.NoSuchFile:
2793
return _mod_conflicts.ConflictList()
2796
if confile.next() != CONFLICT_HEADER_1 + '\n':
2797
raise errors.ConflictFormatError()
2798
except StopIteration:
2799
raise errors.ConflictFormatError()
2800
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2805
# do non-implementation specific cleanup
2807
if self._control_files._lock_count == 1:
2808
# _inventory_is_modified is always False during a read lock.
2809
if self._inventory_is_modified:
2811
self._write_hashcache_if_dirty()
2812
# reverse order of locking.
2814
return self._control_files.unlock()
2816
self.branch.unlock()
2819
def get_conflicted_stem(path):
2820
for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
2821
if path.endswith(suffix):
2822
return path[:-len(suffix)]
2825
class WorkingTreeFormat(object):
2826
"""An encapsulation of the initialization and open routines for a format.
2828
Formats provide three things:
2829
* An initialization routine,
2833
Formats are placed in an dict by their format string for reference
2834
during workingtree opening. Its not required that these be instances, they
2835
can be classes themselves with class methods - it simply depends on
2836
whether state is needed for a given format or not.
2838
Once a format is deprecated, just deprecate the initialize and open
2839
methods on the format class. Do not deprecate the object, as the
2840
object will be created every time regardless.
2843
_default_format = None
2844
"""The default format used for new trees."""
2847
"""The known formats."""
2849
requires_rich_root = False
2851
upgrade_recommended = False
2854
def find_format(klass, a_bzrdir):
2855
"""Return the format for the working tree object in a_bzrdir."""
2857
transport = a_bzrdir.get_workingtree_transport(None)
2858
format_string = transport.get_bytes("format")
2859
return klass._formats[format_string]
2860
except errors.NoSuchFile:
2861
raise errors.NoWorkingTree(base=transport.base)
2863
raise errors.UnknownFormatError(format=format_string,
2864
kind="working tree")
2866
def __eq__(self, other):
2867
return self.__class__ is other.__class__
2869
def __ne__(self, other):
2870
return not (self == other)
2873
def get_default_format(klass):
2874
"""Return the current default format."""
2875
return klass._default_format
2877
def get_format_string(self):
2878
"""Return the ASCII format string that identifies this format."""
2879
raise NotImplementedError(self.get_format_string)
2881
def get_format_description(self):
2882
"""Return the short description for this format."""
2883
raise NotImplementedError(self.get_format_description)
2885
def is_supported(self):
2886
"""Is this format supported?
2888
Supported formats can be initialized and opened.
2889
Unsupported formats may not support initialization or committing or
2890
some other features depending on the reason for not being supported.
2894
def supports_content_filtering(self):
2895
"""True if this format supports content filtering."""
2898
def supports_views(self):
2899
"""True if this format supports stored views."""
2903
def register_format(klass, format):
2904
klass._formats[format.get_format_string()] = format
2907
def set_default_format(klass, format):
2908
klass._default_format = format
2911
def unregister_format(klass, format):
2912
del klass._formats[format.get_format_string()]
2915
class WorkingTreeFormat2(WorkingTreeFormat):
2916
"""The second working tree format.
2918
This format modified the hash cache from the format 1 hash cache.
2921
upgrade_recommended = True
2923
def get_format_description(self):
2924
"""See WorkingTreeFormat.get_format_description()."""
2925
return "Working tree format 2"
2927
def _stub_initialize_on_transport(self, transport, file_mode):
2928
"""Workaround: create control files for a remote working tree.
2930
This ensures that it can later be updated and dealt with locally,
2931
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2932
no working tree. (See bug #43064).
2935
inv = inventory.Inventory()
2936
xml5.serializer_v5.write_inventory(inv, sio, working=True)
2938
transport.put_file('inventory', sio, file_mode)
2939
transport.put_bytes('pending-merges', '', file_mode)
2941
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2942
accelerator_tree=None, hardlink=False):
2943
"""See WorkingTreeFormat.initialize()."""
2944
if not isinstance(a_bzrdir.transport, LocalTransport):
2945
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2946
if from_branch is not None:
2947
branch = from_branch
2949
branch = a_bzrdir.open_branch()
2950
if revision_id is None:
2951
revision_id = _mod_revision.ensure_null(branch.last_revision())
2954
branch.generate_revision_history(revision_id)
2957
inv = inventory.Inventory()
2958
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2964
basis_tree = branch.repository.revision_tree(revision_id)
2965
if basis_tree.inventory.root is not None:
2966
wt.set_root_id(basis_tree.get_root_id())
2967
# set the parent list and cache the basis tree.
2968
if _mod_revision.is_null(revision_id):
2971
parent_trees = [(revision_id, basis_tree)]
2972
wt.set_parent_trees(parent_trees)
2973
transform.build_tree(basis_tree, wt)
2977
super(WorkingTreeFormat2, self).__init__()
2978
self._matchingbzrdir = bzrdir.BzrDirFormat6()
2980
def open(self, a_bzrdir, _found=False):
2981
"""Return the WorkingTree object for a_bzrdir
2983
_found is a private parameter, do not use it. It is used to indicate
2984
if format probing has already been done.
2987
# we are being called directly and must probe.
2988
raise NotImplementedError
2989
if not isinstance(a_bzrdir.transport, LocalTransport):
2990
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2991
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2997
class WorkingTreeFormat3(WorkingTreeFormat):
2998
"""The second working tree format updated to record a format marker.
3001
- exists within a metadir controlling .bzr
3002
- includes an explicit version marker for the workingtree control
3003
files, separate from the BzrDir format
3004
- modifies the hash cache format
3006
- uses a LockDir to guard access for writes.
3009
upgrade_recommended = True
3011
def get_format_string(self):
3012
"""See WorkingTreeFormat.get_format_string()."""
3013
return "Bazaar-NG Working Tree format 3"
3015
def get_format_description(self):
3016
"""See WorkingTreeFormat.get_format_description()."""
3017
return "Working tree format 3"
3019
_lock_file_name = 'lock'
3020
_lock_class = LockDir
3022
_tree_class = WorkingTree3
3024
def __get_matchingbzrdir(self):
3025
return bzrdir.BzrDirMetaFormat1()
3027
_matchingbzrdir = property(__get_matchingbzrdir)
3029
def _open_control_files(self, a_bzrdir):
3030
transport = a_bzrdir.get_workingtree_transport(None)
3031
return LockableFiles(transport, self._lock_file_name,
3034
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
3035
accelerator_tree=None, hardlink=False):
3036
"""See WorkingTreeFormat.initialize().
3038
:param revision_id: if supplied, create a working tree at a different
3039
revision than the branch is at.
3040
:param accelerator_tree: A tree which can be used for retrieving file
3041
contents more quickly than the revision tree, i.e. a workingtree.
3042
The revision tree will be used for cases where accelerator_tree's
3043
content is different.
3044
:param hardlink: If true, hard-link files from accelerator_tree,
3047
if not isinstance(a_bzrdir.transport, LocalTransport):
3048
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3049
transport = a_bzrdir.get_workingtree_transport(self)
3050
control_files = self._open_control_files(a_bzrdir)
3051
control_files.create_lock()
3052
control_files.lock_write()
3053
transport.put_bytes('format', self.get_format_string(),
3054
mode=a_bzrdir._get_file_mode())
3055
if from_branch is not None:
3056
branch = from_branch
3058
branch = a_bzrdir.open_branch()
3059
if revision_id is None:
3060
revision_id = _mod_revision.ensure_null(branch.last_revision())
3061
# WorkingTree3 can handle an inventory which has a unique root id.
3062
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
3063
# those trees. And because there isn't a format bump inbetween, we
3064
# are maintaining compatibility with older clients.
3065
# inv = Inventory(root_id=gen_root_id())
3066
inv = self._initial_inventory()
3067
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3073
_control_files=control_files)
3074
wt.lock_tree_write()
3076
basis_tree = branch.repository.revision_tree(revision_id)
3077
# only set an explicit root id if there is one to set.
3078
if basis_tree.inventory.root is not None:
3079
wt.set_root_id(basis_tree.get_root_id())
3080
if revision_id == _mod_revision.NULL_REVISION:
3081
wt.set_parent_trees([])
286
if fnmatch.fnmatchcase(filename, newpat):
3083
wt.set_parent_trees([(revision_id, basis_tree)])
3084
transform.build_tree(basis_tree, wt)
3086
# Unlock in this order so that the unlock-triggers-flush in
3087
# WorkingTree is given a chance to fire.
3088
control_files.unlock()
3092
def _initial_inventory(self):
3093
return inventory.Inventory()
3096
super(WorkingTreeFormat3, self).__init__()
3098
def open(self, a_bzrdir, _found=False):
3099
"""Return the WorkingTree object for a_bzrdir
3101
_found is a private parameter, do not use it. It is used to indicate
3102
if format probing has already been done.
3105
# we are being called directly and must probe.
3106
raise NotImplementedError
3107
if not isinstance(a_bzrdir.transport, LocalTransport):
3108
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3109
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
3112
def _open(self, a_bzrdir, control_files):
3113
"""Open the tree itself.
3115
:param a_bzrdir: the dir for the tree.
3116
:param control_files: the control files for the tree.
3118
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3122
_control_files=control_files)
3125
return self.get_format_string()
3128
__default_format = WorkingTreeFormat6()
3129
WorkingTreeFormat.register_format(__default_format)
3130
WorkingTreeFormat.register_format(WorkingTreeFormat5())
3131
WorkingTreeFormat.register_format(WorkingTreeFormat4())
3132
WorkingTreeFormat.register_format(WorkingTreeFormat3())
3133
WorkingTreeFormat.set_default_format(__default_format)
3134
# formats which have no format string are not discoverable
3135
# and not independently creatable, so are not registered.
3136
_legacy_formats = [WorkingTreeFormat2(),
289
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
b'\\ No newline at end of file'