68
445
inv = self._inventory
69
446
for path, ie in inv.iter_entries():
70
if os.path.exists(self.abspath(path)):
447
if osutils.lexists(self.abspath(path)):
450
def all_file_ids(self):
451
"""See Tree.iter_all_file_ids"""
452
return set(self.inventory)
74
454
def __repr__(self):
75
455
return "<%s of %s>" % (self.__class__.__name__,
76
456
getattr(self, 'basedir', None))
80
458
def abspath(self, filename):
81
return os.path.join(self.basedir, filename)
459
return pathjoin(self.basedir, filename)
461
def basis_tree(self):
462
"""Return RevisionTree for the current last revision.
464
If the left most parent is a ghost then the returned tree will be an
465
empty tree - one obtained by calling
466
repository.revision_tree(NULL_REVISION).
469
revision_id = self.get_parent_ids()[0]
471
# no parents, return an empty revision tree.
472
# in the future this should return the tree for
473
# 'empty:' - the implicit root empty tree.
474
return self.branch.repository.revision_tree(
475
_mod_revision.NULL_REVISION)
477
return self.revision_tree(revision_id)
478
except errors.NoSuchRevision:
480
# No cached copy available, retrieve from the repository.
481
# FIXME? RBC 20060403 should we cache the inventory locally
484
return self.branch.repository.revision_tree(revision_id)
485
except (errors.RevisionNotPresent, errors.NoSuchRevision):
486
# the basis tree *may* be a ghost or a low level error may have
487
# occurred. If the revision is present, its a problem, if its not
489
if self.branch.repository.has_revision(revision_id):
491
# the basis tree is a ghost so return an empty tree.
492
return self.branch.repository.revision_tree(
493
_mod_revision.NULL_REVISION)
496
self._flush_ignore_list_cache()
498
def relpath(self, path):
499
"""Return the local path portion from a given path.
501
The path may be absolute or relative. If its a relative path it is
502
interpreted relative to the python current working directory.
504
return osutils.relpath(self.basedir, path)
83
506
def has_filename(self, filename):
84
return os.path.exists(self.abspath(filename))
86
def get_file(self, file_id):
87
return self.get_file_byname(self.id2path(file_id))
89
def get_file_byname(self, filename):
90
return file(self.abspath(filename), 'rb')
507
return osutils.lexists(self.abspath(filename))
509
def get_file(self, file_id, path=None, filtered=True):
510
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
512
def get_file_with_stat(self, file_id, path=None, filtered=True,
514
"""See Tree.get_file_with_stat."""
516
path = self.id2path(file_id)
517
file_obj = self.get_file_byname(path, filtered=False)
518
stat_value = _fstat(file_obj.fileno())
519
if filtered and self.supports_content_filtering():
520
filters = self._content_filter_stack(path)
521
file_obj = filtered_input_file(file_obj, filters)
522
return (file_obj, stat_value)
524
def get_file_text(self, file_id, path=None, filtered=True):
525
my_file = self.get_file(file_id, path=path, filtered=filtered)
527
return my_file.read()
531
def get_file_byname(self, filename, filtered=True):
532
path = self.abspath(filename)
534
if filtered and self.supports_content_filtering():
535
filters = self._content_filter_stack(filename)
536
return filtered_input_file(f, filters)
540
def get_file_lines(self, file_id, path=None, filtered=True):
541
"""See Tree.get_file_lines()"""
542
file = self.get_file(file_id, path, filtered=filtered)
544
return file.readlines()
549
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
550
"""See Tree.annotate_iter
552
This implementation will use the basis tree implementation if possible.
553
Lines not in the basis are attributed to CURRENT_REVISION
555
If there are pending merges, lines added by those merges will be
556
incorrectly attributed to CURRENT_REVISION (but after committing, the
557
attribution will be correct).
559
maybe_file_parent_keys = []
560
for parent_id in self.get_parent_ids():
562
parent_tree = self.revision_tree(parent_id)
563
except errors.NoSuchRevisionInTree:
564
parent_tree = self.branch.repository.revision_tree(parent_id)
565
parent_tree.lock_read()
567
if file_id not in parent_tree:
569
ie = parent_tree.inventory[file_id]
570
if ie.kind != 'file':
571
# Note: this is slightly unnecessary, because symlinks and
572
# directories have a "text" which is the empty text, and we
573
# know that won't mess up annotations. But it seems cleaner
575
parent_text_key = (file_id, ie.revision)
576
if parent_text_key not in maybe_file_parent_keys:
577
maybe_file_parent_keys.append(parent_text_key)
580
graph = _mod_graph.Graph(self.branch.repository.texts)
581
heads = graph.heads(maybe_file_parent_keys)
582
file_parent_keys = []
583
for key in maybe_file_parent_keys:
585
file_parent_keys.append(key)
587
# Now we have the parents of this content
588
annotator = self.branch.repository.texts.get_annotator()
589
text = self.get_file_text(file_id)
590
this_key =(file_id, default_revision)
591
annotator.add_special_text(this_key, file_parent_keys, text)
592
annotations = [(key[-1], line)
593
for key, line in annotator.annotate_flat(this_key)]
596
def _get_ancestors(self, default_revision):
597
ancestors = set([default_revision])
598
for parent_id in self.get_parent_ids():
599
ancestors.update(self.branch.repository.get_ancestry(
600
parent_id, topo_sorted=False))
603
def get_parent_ids(self):
604
"""See Tree.get_parent_ids.
606
This implementation reads the pending merges list and last_revision
607
value and uses that to decide what the parents list should be.
609
last_rev = _mod_revision.ensure_null(self._last_revision())
610
if _mod_revision.NULL_REVISION == last_rev:
615
merges_bytes = self._transport.get_bytes('pending-merges')
616
except errors.NoSuchFile:
619
for l in osutils.split_lines(merges_bytes):
620
revision_id = l.rstrip('\n')
621
parents.append(revision_id)
625
def get_root_id(self):
626
"""Return the id of this trees root"""
627
return self._inventory.root.file_id
92
629
def _get_store_filename(self, file_id):
93
## XXX: badly named; this isn't in the store at all
94
return self.abspath(self.id2path(file_id))
630
## XXX: badly named; this is not in the store at all
631
return self.abspath(self.id2path(file_id))
634
def clone(self, to_bzrdir, revision_id=None):
635
"""Duplicate this working tree into to_bzr, including all state.
637
Specifically modified files are kept as modified, but
638
ignored and unknown files are discarded.
640
If you want to make a new line of development, see bzrdir.sprout()
643
If not None, the cloned tree will have its last revision set to
644
revision, and difference between the source trees last revision
645
and this one merged in.
647
# assumes the target bzr dir format is compatible.
648
result = to_bzrdir.create_workingtree()
649
self.copy_content_into(result, revision_id)
653
def copy_content_into(self, tree, revision_id=None):
654
"""Copy the current content and user files of this tree into tree."""
655
tree.set_root_id(self.get_root_id())
656
if revision_id is None:
657
merge.transform_tree(tree, self)
659
# TODO now merge from tree.last_revision to revision (to preserve
660
# user local changes)
661
merge.transform_tree(tree, self)
662
tree.set_parent_ids([revision_id])
664
def id2abspath(self, file_id):
665
return self.abspath(self.id2path(file_id))
97
667
def has_id(self, file_id):
98
668
# files that have been deleted are excluded
100
670
if not inv.has_id(file_id):
102
672
path = inv.id2path(file_id)
103
return os.path.exists(self.abspath(path))
673
return osutils.lexists(self.abspath(path))
675
def has_or_had_id(self, file_id):
676
if file_id == self.inventory.root.file_id:
678
return self.inventory.has_id(file_id)
106
680
__contains__ = has_id
109
682
def get_file_size(self, file_id):
110
# is this still called?
111
raise NotImplementedError()
114
def get_file_sha1(self, file_id):
115
path = self._inventory.id2path(file_id)
116
return self._hashcache.get_sha1(path)
119
def file_class(self, filename):
120
if self.path2id(filename):
122
elif self.is_ignored(filename):
128
def list_files(self):
129
"""Recursively list all files as (path, class, kind, id).
683
"""See Tree.get_file_size"""
684
# XXX: this returns the on-disk size; it should probably return the
687
return os.path.getsize(self.id2abspath(file_id))
689
if e.errno != errno.ENOENT:
695
def get_file_sha1(self, file_id, path=None, stat_value=None):
697
path = self._inventory.id2path(file_id)
698
return self._hashcache.get_sha1(path, stat_value)
700
def get_file_mtime(self, file_id, path=None):
702
path = self.inventory.id2path(file_id)
703
return os.lstat(self.abspath(path)).st_mtime
705
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
706
file_id = self.path2id(path)
708
# For unversioned files on win32, we just assume they are not
711
return self._inventory[file_id].executable
713
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
714
mode = stat_result.st_mode
715
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
717
if not supports_executable():
718
def is_executable(self, file_id, path=None):
719
return self._inventory[file_id].executable
721
_is_executable_from_path_and_stat = \
722
_is_executable_from_path_and_stat_from_basis
724
def is_executable(self, file_id, path=None):
726
path = self.id2path(file_id)
727
mode = os.lstat(self.abspath(path)).st_mode
728
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
730
_is_executable_from_path_and_stat = \
731
_is_executable_from_path_and_stat_from_stat
733
@needs_tree_write_lock
734
def _add(self, files, ids, kinds):
735
"""See MutableTree._add."""
736
# TODO: Re-adding a file that is removed in the working copy
737
# should probably put it back with the previous ID.
738
# the read and write working inventory should not occur in this
739
# function - they should be part of lock_write and unlock.
741
for f, file_id, kind in zip(files, ids, kinds):
743
inv.add_path(f, kind=kind)
745
inv.add_path(f, kind=kind, file_id=file_id)
746
self._inventory_is_modified = True
748
@needs_tree_write_lock
749
def _gather_kinds(self, files, kinds):
750
"""See MutableTree._gather_kinds."""
751
for pos, f in enumerate(files):
752
if kinds[pos] is None:
753
fullpath = normpath(self.abspath(f))
755
kinds[pos] = file_kind(fullpath)
757
if e.errno == errno.ENOENT:
758
raise errors.NoSuchFile(fullpath)
761
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
762
"""Add revision_id as a parent.
764
This is equivalent to retrieving the current list of parent ids
765
and setting the list to its value plus revision_id.
767
:param revision_id: The revision id to add to the parent list. It may
768
be a ghost revision as long as its not the first parent to be added,
769
or the allow_leftmost_as_ghost parameter is set True.
770
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
772
parents = self.get_parent_ids() + [revision_id]
773
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
774
or allow_leftmost_as_ghost)
776
@needs_tree_write_lock
777
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
778
"""Add revision_id, tree tuple as a parent.
780
This is equivalent to retrieving the current list of parent trees
781
and setting the list to its value plus parent_tuple. See also
782
add_parent_tree_id - if you only have a parent id available it will be
783
simpler to use that api. If you have the parent already available, using
784
this api is preferred.
786
:param parent_tuple: The (revision id, tree) to add to the parent list.
787
If the revision_id is a ghost, pass None for the tree.
788
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
790
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
791
if len(parent_ids) > 1:
792
# the leftmost may have already been a ghost, preserve that if it
794
allow_leftmost_as_ghost = True
795
self.set_parent_ids(parent_ids,
796
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
798
@needs_tree_write_lock
799
def add_pending_merge(self, *revision_ids):
800
# TODO: Perhaps should check at this point that the
801
# history of the revision is actually present?
802
parents = self.get_parent_ids()
804
for rev_id in revision_ids:
805
if rev_id in parents:
807
parents.append(rev_id)
810
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
812
def path_content_summary(self, path, _lstat=os.lstat,
813
_mapper=osutils.file_kind_from_stat_mode):
814
"""See Tree.path_content_summary."""
815
abspath = self.abspath(path)
817
stat_result = _lstat(abspath)
819
if getattr(e, 'errno', None) == errno.ENOENT:
821
return ('missing', None, None, None)
822
# propagate other errors
824
kind = _mapper(stat_result.st_mode)
826
return self._file_content_summary(path, stat_result)
827
elif kind == 'directory':
828
# perhaps it looks like a plain directory, but it's really a
830
if self._directory_is_tree_reference(path):
831
kind = 'tree-reference'
832
return kind, None, None, None
833
elif kind == 'symlink':
834
target = osutils.readlink(abspath)
835
return ('symlink', None, None, target)
837
return (kind, None, None, None)
839
def _file_content_summary(self, path, stat_result):
840
size = stat_result.st_size
841
executable = self._is_executable_from_path_and_stat(path, stat_result)
842
# try for a stat cache lookup
843
return ('file', size, executable, self._sha_from_stat(
846
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
847
"""Common ghost checking functionality from set_parent_*.
849
This checks that the left hand-parent exists if there are any
852
if len(revision_ids) > 0:
853
leftmost_id = revision_ids[0]
854
if (not allow_leftmost_as_ghost and not
855
self.branch.repository.has_revision(leftmost_id)):
856
raise errors.GhostRevisionUnusableHere(leftmost_id)
858
def _set_merges_from_parent_ids(self, parent_ids):
859
merges = parent_ids[1:]
860
self._transport.put_bytes('pending-merges', '\n'.join(merges),
861
mode=self.bzrdir._get_file_mode())
863
def _filter_parent_ids_by_ancestry(self, revision_ids):
864
"""Check that all merged revisions are proper 'heads'.
866
This will always return the first revision_id, and any merged revisions
869
if len(revision_ids) == 0:
871
graph = self.branch.repository.get_graph()
872
heads = graph.heads(revision_ids)
873
new_revision_ids = revision_ids[:1]
874
for revision_id in revision_ids[1:]:
875
if revision_id in heads and revision_id not in new_revision_ids:
876
new_revision_ids.append(revision_id)
877
if new_revision_ids != revision_ids:
878
trace.mutter('requested to set revision_ids = %s,'
879
' but filtered to %s', revision_ids, new_revision_ids)
880
return new_revision_ids
882
@needs_tree_write_lock
883
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
884
"""Set the parent ids to revision_ids.
886
See also set_parent_trees. This api will try to retrieve the tree data
887
for each element of revision_ids from the trees repository. If you have
888
tree data already available, it is more efficient to use
889
set_parent_trees rather than set_parent_ids. set_parent_ids is however
890
an easier API to use.
892
:param revision_ids: The revision_ids to set as the parent ids of this
893
working tree. Any of these may be ghosts.
895
self._check_parents_for_ghosts(revision_ids,
896
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
897
for revision_id in revision_ids:
898
_mod_revision.check_not_reserved_id(revision_id)
900
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
902
if len(revision_ids) > 0:
903
self.set_last_revision(revision_ids[0])
905
self.set_last_revision(_mod_revision.NULL_REVISION)
907
self._set_merges_from_parent_ids(revision_ids)
909
@needs_tree_write_lock
910
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
911
"""See MutableTree.set_parent_trees."""
912
parent_ids = [rev for (rev, tree) in parents_list]
913
for revision_id in parent_ids:
914
_mod_revision.check_not_reserved_id(revision_id)
916
self._check_parents_for_ghosts(parent_ids,
917
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
919
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
921
if len(parent_ids) == 0:
922
leftmost_parent_id = _mod_revision.NULL_REVISION
923
leftmost_parent_tree = None
925
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
927
if self._change_last_revision(leftmost_parent_id):
928
if leftmost_parent_tree is None:
929
# If we don't have a tree, fall back to reading the
930
# parent tree from the repository.
931
self._cache_basis_inventory(leftmost_parent_id)
933
inv = leftmost_parent_tree.inventory
934
xml = self._create_basis_xml_from_inventory(
935
leftmost_parent_id, inv)
936
self._write_basis_inventory(xml)
937
self._set_merges_from_parent_ids(parent_ids)
939
@needs_tree_write_lock
940
def set_pending_merges(self, rev_list):
941
parents = self.get_parent_ids()
942
leftmost = parents[:1]
943
new_parents = leftmost + rev_list
944
self.set_parent_ids(new_parents)
946
@needs_tree_write_lock
947
def set_merge_modified(self, modified_hashes):
949
for file_id, hash in modified_hashes.iteritems():
950
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
951
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
953
def _sha_from_stat(self, path, stat_result):
954
"""Get a sha digest from the tree's stat cache.
956
The default implementation assumes no stat cache is present.
958
:param path: The path.
959
:param stat_result: The stat result being looked up.
963
def _put_rio(self, filename, stanzas, header):
964
self._must_be_locked()
965
my_file = rio_file(stanzas, header)
966
self._transport.put_file(filename, my_file,
967
mode=self.bzrdir._get_file_mode())
969
@needs_write_lock # because merge pulls data into the branch.
970
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
971
merge_type=None, force=False):
972
"""Merge from a branch into this working tree.
974
:param branch: The branch to merge from.
975
:param to_revision: If non-None, the merge will merge to to_revision,
976
but not beyond it. to_revision does not need to be in the history
977
of the branch when it is supplied. If None, to_revision defaults to
978
branch.last_revision().
980
from bzrlib.merge import Merger, Merge3Merger
981
merger = Merger(self.branch, this_tree=self)
982
# check that there are no local alterations
983
if not force and self.has_changes():
984
raise errors.UncommittedChanges(self)
985
if to_revision is None:
986
to_revision = _mod_revision.ensure_null(branch.last_revision())
987
merger.other_rev_id = to_revision
988
if _mod_revision.is_null(merger.other_rev_id):
989
raise errors.NoCommits(branch)
990
self.branch.fetch(branch, last_revision=merger.other_rev_id)
991
merger.other_basis = merger.other_rev_id
992
merger.other_tree = self.branch.repository.revision_tree(
994
merger.other_branch = branch
995
if from_revision is None:
998
merger.set_base_revision(from_revision, branch)
999
if merger.base_rev_id == merger.other_rev_id:
1000
raise errors.PointlessMerge
1001
merger.backup_files = False
1002
if merge_type is None:
1003
merger.merge_type = Merge3Merger
1005
merger.merge_type = merge_type
1006
merger.set_interesting_files(None)
1007
merger.show_base = False
1008
merger.reprocess = False
1009
conflicts = merger.do_merge()
1010
merger.set_pending()
1014
def merge_modified(self):
1015
"""Return a dictionary of files modified by a merge.
1017
The list is initialized by WorkingTree.set_merge_modified, which is
1018
typically called after we make some automatic updates to the tree
1021
This returns a map of file_id->sha1, containing only files which are
1022
still in the working inventory and have that text hash.
1025
hashfile = self._transport.get('merge-hashes')
1026
except errors.NoSuchFile:
1031
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
1032
raise errors.MergeModifiedFormatError()
1033
except StopIteration:
1034
raise errors.MergeModifiedFormatError()
1035
for s in RioReader(hashfile):
1036
# RioReader reads in Unicode, so convert file_ids back to utf8
1037
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
1038
if file_id not in self.inventory:
1040
text_hash = s.get("hash")
1041
if text_hash == self.get_file_sha1(file_id):
1042
merge_hashes[file_id] = text_hash
1048
def mkdir(self, path, file_id=None):
1049
"""See MutableTree.mkdir()."""
1051
file_id = generate_ids.gen_file_id(os.path.basename(path))
1052
os.mkdir(self.abspath(path))
1053
self.add(path, file_id, 'directory')
1056
def get_symlink_target(self, file_id):
1057
abspath = self.id2abspath(file_id)
1058
target = osutils.readlink(abspath)
1062
def subsume(self, other_tree):
1063
def add_children(inventory, entry):
1064
for child_entry in entry.children.values():
1065
inventory._byid[child_entry.file_id] = child_entry
1066
if child_entry.kind == 'directory':
1067
add_children(inventory, child_entry)
1068
if other_tree.get_root_id() == self.get_root_id():
1069
raise errors.BadSubsumeSource(self, other_tree,
1070
'Trees have the same root')
1072
other_tree_path = self.relpath(other_tree.basedir)
1073
except errors.PathNotChild:
1074
raise errors.BadSubsumeSource(self, other_tree,
1075
'Tree is not contained by the other')
1076
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1077
if new_root_parent is None:
1078
raise errors.BadSubsumeSource(self, other_tree,
1079
'Parent directory is not versioned.')
1080
# We need to ensure that the result of a fetch will have a
1081
# versionedfile for the other_tree root, and only fetching into
1082
# RepositoryKnit2 guarantees that.
1083
if not self.branch.repository.supports_rich_root():
1084
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1085
other_tree.lock_tree_write()
1087
new_parents = other_tree.get_parent_ids()
1088
other_root = other_tree.inventory.root
1089
other_root.parent_id = new_root_parent
1090
other_root.name = osutils.basename(other_tree_path)
1091
self.inventory.add(other_root)
1092
add_children(self.inventory, other_root)
1093
self._write_inventory(self.inventory)
1094
# normally we don't want to fetch whole repositories, but i think
1095
# here we really do want to consolidate the whole thing.
1096
for parent_id in other_tree.get_parent_ids():
1097
self.branch.fetch(other_tree.branch, parent_id)
1098
self.add_parent_tree_id(parent_id)
1101
other_tree.bzrdir.retire_bzrdir()
1103
def _setup_directory_is_tree_reference(self):
1104
if self._branch.repository._format.supports_tree_reference:
1105
self._directory_is_tree_reference = \
1106
self._directory_may_be_tree_reference
1108
self._directory_is_tree_reference = \
1109
self._directory_is_never_tree_reference
1111
def _directory_is_never_tree_reference(self, relpath):
1114
def _directory_may_be_tree_reference(self, relpath):
1115
# as a special case, if a directory contains control files then
1116
# it's a tree reference, except that the root of the tree is not
1117
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1118
# TODO: We could ask all the control formats whether they
1119
# recognize this directory, but at the moment there's no cheap api
1120
# to do that. Since we probably can only nest bzr checkouts and
1121
# they always use this name it's ok for now. -- mbp 20060306
1123
# FIXME: There is an unhandled case here of a subdirectory
1124
# containing .bzr but not a branch; that will probably blow up
1125
# when you try to commit it. It might happen if there is a
1126
# checkout in a subdirectory. This can be avoided by not adding
1129
@needs_tree_write_lock
1130
def extract(self, file_id, format=None):
1131
"""Extract a subtree from this tree.
1133
A new branch will be created, relative to the path for this tree.
1137
segments = osutils.splitpath(path)
1138
transport = self.branch.bzrdir.root_transport
1139
for name in segments:
1140
transport = transport.clone(name)
1141
transport.ensure_base()
1144
sub_path = self.id2path(file_id)
1145
branch_transport = mkdirs(sub_path)
1147
format = self.bzrdir.cloning_metadir()
1148
branch_transport.ensure_base()
1149
branch_bzrdir = format.initialize_on_transport(branch_transport)
1151
repo = branch_bzrdir.find_repository()
1152
except errors.NoRepositoryPresent:
1153
repo = branch_bzrdir.create_repository()
1154
if not repo.supports_rich_root():
1155
raise errors.RootNotRich()
1156
new_branch = branch_bzrdir.create_branch()
1157
new_branch.pull(self.branch)
1158
for parent_id in self.get_parent_ids():
1159
new_branch.fetch(self.branch, parent_id)
1160
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1161
if tree_transport.base != branch_transport.base:
1162
tree_bzrdir = format.initialize_on_transport(tree_transport)
1163
branch.BranchReferenceFormat().initialize(tree_bzrdir,
1164
target_branch=new_branch)
1166
tree_bzrdir = branch_bzrdir
1167
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1168
wt.set_parent_ids(self.get_parent_ids())
1169
my_inv = self.inventory
1170
child_inv = inventory.Inventory(root_id=None)
1171
new_root = my_inv[file_id]
1172
my_inv.remove_recursive_id(file_id)
1173
new_root.parent_id = None
1174
child_inv.add(new_root)
1175
self._write_inventory(my_inv)
1176
wt._write_inventory(child_inv)
1179
def _serialize(self, inventory, out_file):
1180
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1183
def _deserialize(selt, in_file):
1184
return xml5.serializer_v5.read_inventory(in_file)
1187
"""Write the in memory inventory to disk."""
1188
# TODO: Maybe this should only write on dirty ?
1189
if self._control_files._lock_mode != 'w':
1190
raise errors.NotWriteLocked(self)
1192
self._serialize(self._inventory, sio)
1194
self._transport.put_file('inventory', sio,
1195
mode=self.bzrdir._get_file_mode())
1196
self._inventory_is_modified = False
1198
def _kind(self, relpath):
1199
return osutils.file_kind(self.abspath(relpath))
1201
def list_files(self, include_root=False, from_dir=None, recursive=True):
1202
"""List all files as (path, class, kind, id, entry).
131
1204
Lists, but does not descend into unversioned directories.
133
1205
This does not include files that have been deleted in this
1206
tree. Skips the control directory.
136
Skips the control directory.
1208
:param include_root: if True, return an entry for the root
1209
:param from_dir: start from this directory or None for the root
1210
:param recursive: whether to recurse into subdirectories or not
138
from osutils import appendpath, file_kind
141
inv = self._inventory
143
def descend(from_dir_relpath, from_dir_id, dp):
1212
# list_files is an iterator, so @needs_read_lock doesn't work properly
1213
# with it. So callers should be careful to always read_lock the tree.
1214
if not self.is_locked():
1215
raise errors.ObjectNotLocked(self)
1217
inv = self.inventory
1218
if from_dir is None and include_root is True:
1219
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1220
# Convert these into local objects to save lookup times
1221
pathjoin = osutils.pathjoin
1222
file_kind = self._kind
1224
# transport.base ends in a slash, we want the piece
1225
# between the last two slashes
1226
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1228
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1230
# directory file_id, relative path, absolute path, reverse sorted children
1231
if from_dir is not None:
1232
from_dir_id = inv.path2id(from_dir)
1233
if from_dir_id is None:
1234
# Directory not versioned
1236
from_dir_abspath = pathjoin(self.basedir, from_dir)
1238
from_dir_id = inv.root.file_id
1239
from_dir_abspath = self.basedir
1240
children = os.listdir(from_dir_abspath)
1242
# jam 20060527 The kernel sized tree seems equivalent whether we
1243
# use a deque and popleft to keep them sorted, or if we use a plain
1244
# list and just reverse() them.
1245
children = collections.deque(children)
1246
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1248
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1251
f = children.popleft()
147
1252
## TODO: If we find a subdirectory with its own .bzr
148
1253
## directory, then that is a separate tree and we
149
1254
## should exclude it.
150
if bzrlib.BZRDIR == f:
1256
# the bzrdir for this tree
1257
if transport_base_dir == f:
154
fp = appendpath(from_dir_relpath, f)
1260
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1261
# and 'f' doesn't begin with one, we can do a string op, rather
1262
# than the checks of pathjoin(), all relative paths will have an extra slash
1264
fp = from_dir_relpath + '/' + f
157
fap = appendpath(dp, f)
159
f_ie = inv.get_child(from_dir_id, f)
1267
fap = from_dir_abspath + '/' + f
1269
dir_ie = inv[from_dir_id]
1270
if dir_ie.kind == 'directory':
1271
f_ie = dir_ie.children.get(f)
162
elif self.is_ignored(fp):
1276
elif self.is_ignored(fp[1:]):
1279
# we may not have found this file, because of a unicode
1280
# issue, or because the directory was actually a symlink.
1281
f_norm, can_access = osutils.normalized_filename(f)
1282
if f == f_norm or not can_access:
1283
# No change, so treat this file normally
1286
# this file can be accessed by a normalized path
1287
# check again if it is versioned
1288
# these lines are repeated here for performance
1290
fp = from_dir_relpath + '/' + f
1291
fap = from_dir_abspath + '/' + f
1292
f_ie = inv.get_child(from_dir_id, f)
1295
elif self.is_ignored(fp[1:]):
167
1300
fk = file_kind(fap)
1302
# make a last minute entry
171
raise BzrCheckError("file %r entered as kind %r id %r, "
173
% (fap, f_ie.kind, f_ie.file_id, fk))
175
yield fp, c, fk, (f_ie and f_ie.file_id)
1304
yield fp[1:], c, fk, f_ie.file_id, f_ie
1307
yield fp[1:], c, fk, None, fk_entries[fk]()
1309
yield fp[1:], c, fk, None, TreeEntry()
177
1312
if fk != 'directory':
181
# don't descend unversioned directories
184
for ff in descend(fp, f_ie.file_id, fap):
187
for f in descend('', inv.root.file_id, self.basedir):
1315
# But do this child first if recursing down
1317
new_children = os.listdir(fap)
1319
new_children = collections.deque(new_children)
1320
stack.append((f_ie.file_id, fp, fap, new_children))
1321
# Break out of inner loop,
1322
# so that we start outer loop with child
1325
# if we finished all children, pop it off the stack
1328
@needs_tree_write_lock
1329
def move(self, from_paths, to_dir=None, after=False):
1332
to_dir must exist in the inventory.
1334
If to_dir exists and is a directory, the files are moved into
1335
it, keeping their old names.
1337
Note that to_dir is only the last component of the new name;
1338
this doesn't change the directory.
1340
For each entry in from_paths the move mode will be determined
1343
The first mode moves the file in the filesystem and updates the
1344
inventory. The second mode only updates the inventory without
1345
touching the file on the filesystem. This is the new mode introduced
1348
move uses the second mode if 'after == True' and the target is not
1349
versioned but present in the working tree.
1351
move uses the second mode if 'after == False' and the source is
1352
versioned but no longer in the working tree, and the target is not
1353
versioned but present in the working tree.
1355
move uses the first mode if 'after == False' and the source is
1356
versioned and present in the working tree, and the target is not
1357
versioned and not present in the working tree.
1359
Everything else results in an error.
1361
This returns a list of (from_path, to_path) pairs for each
1362
entry that is moved.
1367
# check for deprecated use of signature
1369
raise TypeError('You must supply a target directory')
1370
# check destination directory
1371
if isinstance(from_paths, basestring):
1373
inv = self.inventory
1374
to_abs = self.abspath(to_dir)
1375
if not isdir(to_abs):
1376
raise errors.BzrMoveFailedError('',to_dir,
1377
errors.NotADirectory(to_abs))
1378
if not self.has_filename(to_dir):
1379
raise errors.BzrMoveFailedError('',to_dir,
1380
errors.NotInWorkingDirectory(to_dir))
1381
to_dir_id = inv.path2id(to_dir)
1382
if to_dir_id is None:
1383
raise errors.BzrMoveFailedError('',to_dir,
1384
errors.NotVersionedError(path=str(to_dir)))
1386
to_dir_ie = inv[to_dir_id]
1387
if to_dir_ie.kind != 'directory':
1388
raise errors.BzrMoveFailedError('',to_dir,
1389
errors.NotADirectory(to_abs))
1391
# create rename entries and tuples
1392
for from_rel in from_paths:
1393
from_tail = splitpath(from_rel)[-1]
1394
from_id = inv.path2id(from_rel)
1396
raise errors.BzrMoveFailedError(from_rel,to_dir,
1397
errors.NotVersionedError(path=str(from_rel)))
1399
from_entry = inv[from_id]
1400
from_parent_id = from_entry.parent_id
1401
to_rel = pathjoin(to_dir, from_tail)
1402
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1404
from_tail=from_tail,
1405
from_parent_id=from_parent_id,
1406
to_rel=to_rel, to_tail=from_tail,
1407
to_parent_id=to_dir_id)
1408
rename_entries.append(rename_entry)
1409
rename_tuples.append((from_rel, to_rel))
1411
# determine which move mode to use. checks also for movability
1412
rename_entries = self._determine_mv_mode(rename_entries, after)
1414
original_modified = self._inventory_is_modified
1417
self._inventory_is_modified = True
1418
self._move(rename_entries)
1420
# restore the inventory on error
1421
self._inventory_is_modified = original_modified
1423
self._write_inventory(inv)
1424
return rename_tuples
1426
def _determine_mv_mode(self, rename_entries, after=False):
1427
"""Determines for each from-to pair if both inventory and working tree
1428
or only the inventory has to be changed.
1430
Also does basic plausability tests.
1432
inv = self.inventory
1434
for rename_entry in rename_entries:
1435
# store to local variables for easier reference
1436
from_rel = rename_entry.from_rel
1437
from_id = rename_entry.from_id
1438
to_rel = rename_entry.to_rel
1439
to_id = inv.path2id(to_rel)
1440
only_change_inv = False
1442
# check the inventory for source and destination
1444
raise errors.BzrMoveFailedError(from_rel,to_rel,
1445
errors.NotVersionedError(path=str(from_rel)))
1446
if to_id is not None:
1447
raise errors.BzrMoveFailedError(from_rel,to_rel,
1448
errors.AlreadyVersionedError(path=str(to_rel)))
1450
# try to determine the mode for rename (only change inv or change
1451
# inv and file system)
1453
if not self.has_filename(to_rel):
1454
raise errors.BzrMoveFailedError(from_id,to_rel,
1455
errors.NoSuchFile(path=str(to_rel),
1456
extra="New file has not been created yet"))
1457
only_change_inv = True
1458
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1459
only_change_inv = True
1460
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1461
only_change_inv = False
1462
elif (not self.case_sensitive
1463
and from_rel.lower() == to_rel.lower()
1464
and self.has_filename(from_rel)):
1465
only_change_inv = False
1467
# something is wrong, so lets determine what exactly
1468
if not self.has_filename(from_rel) and \
1469
not self.has_filename(to_rel):
1470
raise errors.BzrRenameFailedError(from_rel,to_rel,
1471
errors.PathsDoNotExist(paths=(str(from_rel),
1474
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1475
rename_entry.only_change_inv = only_change_inv
1476
return rename_entries
1478
def _move(self, rename_entries):
1479
"""Moves a list of files.
1481
Depending on the value of the flag 'only_change_inv', the
1482
file will be moved on the file system or not.
1484
inv = self.inventory
1487
for entry in rename_entries:
1489
self._move_entry(entry)
1491
self._rollback_move(moved)
1495
def _rollback_move(self, moved):
1496
"""Try to rollback a previous move in case of an filesystem error."""
1497
inv = self.inventory
1500
self._move_entry(WorkingTree._RenameEntry(
1501
entry.to_rel, entry.from_id,
1502
entry.to_tail, entry.to_parent_id, entry.from_rel,
1503
entry.from_tail, entry.from_parent_id,
1504
entry.only_change_inv))
1505
except errors.BzrMoveFailedError, e:
1506
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1507
" The working tree is in an inconsistent state."
1508
" Please consider doing a 'bzr revert'."
1509
" Error message is: %s" % e)
1511
def _move_entry(self, entry):
1512
inv = self.inventory
1513
from_rel_abs = self.abspath(entry.from_rel)
1514
to_rel_abs = self.abspath(entry.to_rel)
1515
if from_rel_abs == to_rel_abs:
1516
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1517
"Source and target are identical.")
1519
if not entry.only_change_inv:
1521
osutils.rename(from_rel_abs, to_rel_abs)
1523
raise errors.BzrMoveFailedError(entry.from_rel,
1525
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1527
@needs_tree_write_lock
1528
def rename_one(self, from_rel, to_rel, after=False):
1531
This can change the directory or the filename or both.
1533
rename_one has several 'modes' to work. First, it can rename a physical
1534
file and change the file_id. That is the normal mode. Second, it can
1535
only change the file_id without touching any physical file. This is
1536
the new mode introduced in version 0.15.
1538
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1539
versioned but present in the working tree.
1541
rename_one uses the second mode if 'after == False' and 'from_rel' is
1542
versioned but no longer in the working tree, and 'to_rel' is not
1543
versioned but present in the working tree.
1545
rename_one uses the first mode if 'after == False' and 'from_rel' is
1546
versioned and present in the working tree, and 'to_rel' is not
1547
versioned and not present in the working tree.
1549
Everything else results in an error.
1551
inv = self.inventory
1554
# create rename entries and tuples
1555
from_tail = splitpath(from_rel)[-1]
1556
from_id = inv.path2id(from_rel)
1558
# if file is missing in the inventory maybe it's in the basis_tree
1559
basis_tree = self.branch.basis_tree()
1560
from_id = basis_tree.path2id(from_rel)
1562
raise errors.BzrRenameFailedError(from_rel,to_rel,
1563
errors.NotVersionedError(path=str(from_rel)))
1564
# put entry back in the inventory so we can rename it
1565
from_entry = basis_tree.inventory[from_id].copy()
1568
from_entry = inv[from_id]
1569
from_parent_id = from_entry.parent_id
1570
to_dir, to_tail = os.path.split(to_rel)
1571
to_dir_id = inv.path2id(to_dir)
1572
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1574
from_tail=from_tail,
1575
from_parent_id=from_parent_id,
1576
to_rel=to_rel, to_tail=to_tail,
1577
to_parent_id=to_dir_id)
1578
rename_entries.append(rename_entry)
1580
# determine which move mode to use. checks also for movability
1581
rename_entries = self._determine_mv_mode(rename_entries, after)
1583
# check if the target changed directory and if the target directory is
1585
if to_dir_id is None:
1586
raise errors.BzrMoveFailedError(from_rel,to_rel,
1587
errors.NotVersionedError(path=str(to_dir)))
1589
# all checks done. now we can continue with our actual work
1590
mutter('rename_one:\n'
1595
' to_dir_id {%s}\n',
1596
from_id, from_rel, to_rel, to_dir, to_dir_id)
1598
self._move(rename_entries)
1599
self._write_inventory(inv)
1601
class _RenameEntry(object):
1602
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1603
to_rel, to_tail, to_parent_id, only_change_inv=False):
1604
self.from_rel = from_rel
1605
self.from_id = from_id
1606
self.from_tail = from_tail
1607
self.from_parent_id = from_parent_id
1608
self.to_rel = to_rel
1609
self.to_tail = to_tail
1610
self.to_parent_id = to_parent_id
1611
self.only_change_inv = only_change_inv
192
1614
def unknowns(self):
193
for subp in self.extras():
194
if not self.is_ignored(subp):
1615
"""Return all unknown files.
1617
These are files in the working directory that are not versioned or
1618
control files or ignored.
1620
# force the extras method to be fully executed before returning, to
1621
# prevent race conditions with the lock
1623
[subp for subp in self.extras() if not self.is_ignored(subp)])
1625
@needs_tree_write_lock
1626
def unversion(self, file_ids):
1627
"""Remove the file ids in file_ids from the current versioned set.
1629
When a file_id is unversioned, all of its children are automatically
1632
:param file_ids: The file ids to stop versioning.
1633
:raises: NoSuchId if any fileid is not currently versioned.
1635
for file_id in file_ids:
1636
if file_id not in self._inventory:
1637
raise errors.NoSuchId(self, file_id)
1638
for file_id in file_ids:
1639
if self._inventory.has_id(file_id):
1640
self._inventory.remove_recursive_id(file_id)
1642
# in the future this should just set a dirty bit to wait for the
1643
# final unlock. However, until all methods of workingtree start
1644
# with the current in -memory inventory rather than triggering
1645
# a read, it is more complex - we need to teach read_inventory
1646
# to know when to read, and when to not read first... and possibly
1647
# to save first when the in memory one may be corrupted.
1648
# so for now, we just only write it if it is indeed dirty.
1650
self._write_inventory(self._inventory)
1652
def _iter_conflicts(self):
1654
for info in self.list_files():
1656
stem = get_conflicted_stem(path)
1659
if stem not in conflicted:
1660
conflicted.add(stem)
1664
def pull(self, source, overwrite=False, stop_revision=None,
1665
change_reporter=None, possible_transports=None, local=False):
1668
old_revision_info = self.branch.last_revision_info()
1669
basis_tree = self.basis_tree()
1670
count = self.branch.pull(source, overwrite, stop_revision,
1671
possible_transports=possible_transports,
1673
new_revision_info = self.branch.last_revision_info()
1674
if new_revision_info != old_revision_info:
1675
repository = self.branch.repository
1676
basis_tree.lock_read()
1678
new_basis_tree = self.branch.basis_tree()
1685
change_reporter=change_reporter)
1686
basis_root_id = basis_tree.get_root_id()
1687
new_root_id = new_basis_tree.get_root_id()
1688
if basis_root_id != new_root_id:
1689
self.set_root_id(new_root_id)
1692
# TODO - dedup parents list with things merged by pull ?
1693
# reuse the revisiontree we merged against to set the new
1695
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1696
# we have to pull the merge trees out again, because
1697
# merge_inner has set the ids. - this corner is not yet
1698
# layered well enough to prevent double handling.
1699
# XXX TODO: Fix the double handling: telling the tree about
1700
# the already known parent data is wasteful.
1701
merges = self.get_parent_ids()[1:]
1702
parent_trees.extend([
1703
(parent, repository.revision_tree(parent)) for
1705
self.set_parent_trees(parent_trees)
1711
def put_file_bytes_non_atomic(self, file_id, bytes):
1712
"""See MutableTree.put_file_bytes_non_atomic."""
1713
stream = file(self.id2abspath(file_id), 'wb')
1718
# TODO: update the hashcache here ?
198
1720
def extras(self):
199
"""Yield all unknown files in this WorkingTree.
1721
"""Yield all unversioned files in this WorkingTree.
201
If there are any unknown directories then only the directory is
202
returned, not all its children. But if there are unknown files
1723
If there are any unversioned directories then only the directory is
1724
returned, not all its children. But if there are unversioned files
203
1725
under a versioned subdirectory, they are returned.
205
1727
Currently returned depth-first, sorted by name within directories.
1728
This is the same order used by 'osutils.walkdirs'.
207
1730
## TODO: Work from given directory downwards
208
from osutils import isdir, appendpath
210
1731
for path, dir_entry in self.inventory.directories():
211
mutter("search for unknowns in %r" % path)
1732
# mutter("search for unknowns in %r", path)
212
1733
dirabs = self.abspath(path)
213
1734
if not isdir(dirabs):
214
1735
# e.g. directory deleted
218
1739
for subf in os.listdir(dirabs):
220
and (subf not in dir_entry.children)):
1740
if self.bzrdir.is_control_filename(subf):
1742
if subf not in dir_entry.children:
1745
can_access) = osutils.normalized_filename(subf)
1746
except UnicodeDecodeError:
1747
path_os_enc = path.encode(osutils._fs_enc)
1748
relpath = path_os_enc + '/' + subf
1749
raise errors.BadFilenameEncoding(relpath,
1751
if subf_norm != subf and can_access:
1752
if subf_norm not in dir_entry.children:
1753
fl.append(subf_norm)
225
subp = appendpath(path, subf)
1759
subp = pathjoin(path, subf)
229
1762
def ignored_files(self):
230
1763
"""Yield list of PATH, IGNORE_PATTERN"""
231
1764
for subp in self.extras():
232
1765
pat = self.is_ignored(subp)
237
1769
def get_ignore_list(self):
238
1770
"""Return list of ignore patterns.
240
1772
Cached in the Tree object after the first call.
242
if hasattr(self, '_ignorelist'):
243
return self._ignorelist
1774
ignoreset = getattr(self, '_ignoreset', None)
1775
if ignoreset is not None:
245
l = bzrlib.DEFAULT_IGNORE[:]
1778
ignore_globs = set()
1779
ignore_globs.update(ignores.get_runtime_ignores())
1780
ignore_globs.update(ignores.get_user_ignores())
246
1781
if self.has_filename(bzrlib.IGNORE_FILENAME):
247
1782
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
248
l.extend([line.rstrip("\n\r") for line in f.readlines()])
1784
ignore_globs.update(ignores.parse_ignore_file(f))
1787
self._ignoreset = ignore_globs
1790
def _flush_ignore_list_cache(self):
1791
"""Resets the cached ignore list to force a cache rebuild."""
1792
self._ignoreset = None
1793
self._ignoreglobster = None
253
1795
def is_ignored(self, filename):
254
1796
r"""Check whether the filename matches an ignore pattern.
256
1798
Patterns containing '/' or '\' need to match the whole path;
257
others match against only the last component.
1799
others match against only the last component. Patterns starting
1800
with '!' are ignore exceptions. Exceptions take precedence
1801
over regular patterns and cause the filename to not be ignored.
259
1803
If the file is ignored, returns the pattern which caused it to
260
1804
be ignored, otherwise None. So this can simply be used as a
261
1805
boolean if desired."""
263
# TODO: Use '**' to match directories, and other extended
264
# globbing stuff from cvs/rsync.
266
# XXX: fnmatch is actually not quite what we want: it's only
267
# approximately the same as real Unix fnmatch, and doesn't
268
# treat dotfiles correctly and allows * to match /.
269
# Eventually it should be replaced with something more
273
from osutils import splitpath
275
for pat in self.get_ignore_list():
276
if '/' in pat or '\\' in pat:
278
# as a special case, you can put ./ at the start of a
279
# pattern; this is good to match in the top-level
282
if (pat[:2] == './') or (pat[:2] == '.\\'):
1806
if getattr(self, '_ignoreglobster', None) is None:
1807
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1808
return self._ignoreglobster.match(filename)
1810
def kind(self, file_id):
1811
return file_kind(self.id2abspath(file_id))
1813
def stored_kind(self, file_id):
1814
"""See Tree.stored_kind"""
1815
return self.inventory[file_id].kind
1817
def _comparison_data(self, entry, path):
1818
abspath = self.abspath(path)
1820
stat_value = os.lstat(abspath)
1822
if getattr(e, 'errno', None) == errno.ENOENT:
1829
mode = stat_value.st_mode
1830
kind = osutils.file_kind_from_stat_mode(mode)
1831
if not supports_executable():
1832
executable = entry is not None and entry.executable
1834
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1835
return kind, executable, stat_value
1837
def _file_size(self, entry, stat_value):
1838
return stat_value.st_size
1840
def last_revision(self):
1841
"""Return the last revision of the branch for this tree.
1843
This format tree does not support a separate marker for last-revision
1844
compared to the branch.
1846
See MutableTree.last_revision
1848
return self._last_revision()
1851
def _last_revision(self):
1852
"""helper for get_parent_ids."""
1853
return _mod_revision.ensure_null(self.branch.last_revision())
1855
def is_locked(self):
1856
return self._control_files.is_locked()
1858
def _must_be_locked(self):
1859
if not self.is_locked():
1860
raise errors.ObjectNotLocked(self)
1862
def lock_read(self):
1863
"""Lock the tree for reading.
1865
This also locks the branch, and can be unlocked via self.unlock().
1867
:return: A bzrlib.lock.LogicalLockResult.
1869
if not self.is_locked():
1871
self.branch.lock_read()
1873
self._control_files.lock_read()
1874
return LogicalLockResult(self.unlock)
1876
self.branch.unlock()
1879
def lock_tree_write(self):
1880
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1882
:return: A bzrlib.lock.LogicalLockResult.
1884
if not self.is_locked():
1886
self.branch.lock_read()
1888
self._control_files.lock_write()
1889
return LogicalLockResult(self.unlock)
1891
self.branch.unlock()
1894
def lock_write(self):
1895
"""See MutableTree.lock_write, and WorkingTree.unlock.
1897
:return: A bzrlib.lock.LogicalLockResult.
1899
if not self.is_locked():
1901
self.branch.lock_write()
1903
self._control_files.lock_write()
1904
return LogicalLockResult(self.unlock)
1906
self.branch.unlock()
1909
def get_physical_lock_status(self):
1910
return self._control_files.get_physical_lock_status()
1912
def _basis_inventory_name(self):
1913
return 'basis-inventory-cache'
1915
def _reset_data(self):
1916
"""Reset transient data that cannot be revalidated."""
1917
self._inventory_is_modified = False
1918
f = self._transport.get('inventory')
1920
result = self._deserialize(f)
1923
self._set_inventory(result, dirty=False)
1925
@needs_tree_write_lock
1926
def set_last_revision(self, new_revision):
1927
"""Change the last revision in the working tree."""
1928
if self._change_last_revision(new_revision):
1929
self._cache_basis_inventory(new_revision)
1931
def _change_last_revision(self, new_revision):
1932
"""Template method part of set_last_revision to perform the change.
1934
This is used to allow WorkingTree3 instances to not affect branch
1935
when their last revision is set.
1937
if _mod_revision.is_null(new_revision):
1938
self.branch.set_revision_history([])
1941
self.branch.generate_revision_history(new_revision)
1942
except errors.NoSuchRevision:
1943
# not present in the repo - dont try to set it deeper than the tip
1944
self.branch.set_revision_history([new_revision])
1947
def _write_basis_inventory(self, xml):
1948
"""Write the basis inventory XML to the basis-inventory file"""
1949
path = self._basis_inventory_name()
1951
self._transport.put_file(path, sio,
1952
mode=self.bzrdir._get_file_mode())
1954
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1955
"""Create the text that will be saved in basis-inventory"""
1956
inventory.revision_id = revision_id
1957
return xml7.serializer_v7.write_inventory_to_string(inventory)
1959
def _cache_basis_inventory(self, new_revision):
1960
"""Cache new_revision as the basis inventory."""
1961
# TODO: this should allow the ready-to-use inventory to be passed in,
1962
# as commit already has that ready-to-use [while the format is the
1965
# this double handles the inventory - unpack and repack -
1966
# but is easier to understand. We can/should put a conditional
1967
# in here based on whether the inventory is in the latest format
1968
# - perhaps we should repack all inventories on a repository
1970
# the fast path is to copy the raw xml from the repository. If the
1971
# xml contains 'revision_id="', then we assume the right
1972
# revision_id is set. We must check for this full string, because a
1973
# root node id can legitimately look like 'revision_id' but cannot
1975
xml = self.branch.repository._get_inventory_xml(new_revision)
1976
firstline = xml.split('\n', 1)[0]
1977
if (not 'revision_id="' in firstline or
1978
'format="7"' not in firstline):
1979
inv = self.branch.repository._serializer.read_inventory_from_string(
1981
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1982
self._write_basis_inventory(xml)
1983
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1986
def read_basis_inventory(self):
1987
"""Read the cached basis inventory."""
1988
path = self._basis_inventory_name()
1989
return self._transport.get_bytes(path)
1992
def read_working_inventory(self):
1993
"""Read the working inventory.
1995
:raises errors.InventoryModified: read_working_inventory will fail
1996
when the current in memory inventory has been modified.
1998
# conceptually this should be an implementation detail of the tree.
1999
# XXX: Deprecate this.
2000
# ElementTree does its own conversion from UTF-8, so open in
2002
if self._inventory_is_modified:
2003
raise errors.InventoryModified(self)
2004
f = self._transport.get('inventory')
2006
result = self._deserialize(f)
2009
self._set_inventory(result, dirty=False)
2012
@needs_tree_write_lock
2013
def remove(self, files, verbose=False, to_file=None, keep_files=True,
2015
"""Remove nominated files from the working inventory.
2017
:files: File paths relative to the basedir.
2018
:keep_files: If true, the files will also be kept.
2019
:force: Delete files and directories, even if they are changed and
2020
even if the directories are not empty.
2022
if isinstance(files, basestring):
2027
all_files = set() # specified and nested files
2028
unknown_nested_files=set()
2030
to_file = sys.stdout
2032
files_to_backup = []
2034
def recurse_directory_to_add_files(directory):
2035
# Recurse directory and add all files
2036
# so we can check if they have changed.
2037
for parent_info, file_infos in self.walkdirs(directory):
2038
for relpath, basename, kind, lstat, fileid, kind in file_infos:
2039
# Is it versioned or ignored?
2040
if self.path2id(relpath):
2041
# Add nested content for deletion.
2042
all_files.add(relpath)
2044
# Files which are not versioned
2045
# should be treated as unknown.
2046
files_to_backup.append(relpath)
2048
for filename in files:
2049
# Get file name into canonical form.
2050
abspath = self.abspath(filename)
2051
filename = self.relpath(abspath)
2052
if len(filename) > 0:
2053
all_files.add(filename)
2054
recurse_directory_to_add_files(filename)
2056
files = list(all_files)
2059
return # nothing to do
2061
# Sort needed to first handle directory content before the directory
2062
files.sort(reverse=True)
2064
# Bail out if we are going to delete files we shouldn't
2065
if not keep_files and not force:
2066
for (file_id, path, content_change, versioned, parent_id, name,
2067
kind, executable) in self.iter_changes(self.basis_tree(),
2068
include_unchanged=True, require_versioned=False,
2069
want_unversioned=True, specific_files=files):
2070
if versioned[0] == False:
2071
# The record is unknown or newly added
2072
files_to_backup.append(path[1])
2073
elif (content_change and (kind[1] is not None) and
2074
osutils.is_inside_any(files, path[1])):
2075
# Versioned and changed, but not deleted, and still
2076
# in one of the dirs to be deleted.
2077
files_to_backup.append(path[1])
2079
def backup(file_to_backup):
2080
backup_name = self.bzrdir.generate_backup_name(file_to_backup)
2081
osutils.rename(abs_path, self.abspath(backup_name))
2082
return "removed %s (but kept a copy: %s)" % (file_to_backup, backup_name)
2084
# Build inv_delta and delete files where applicable,
2085
# do this before any modifications to inventory.
2087
fid = self.path2id(f)
2090
message = "%s is not versioned." % (f,)
2093
# having removed it, it must be either ignored or unknown
2094
if self.is_ignored(f):
2098
# XXX: Really should be a more abstract reporter interface
2099
kind_ch = osutils.kind_marker(self.kind(fid))
2100
to_file.write(new_status + ' ' + f + kind_ch + '\n')
2102
inv_delta.append((f, None, fid, None))
2103
message = "removed %s" % (f,)
2106
abs_path = self.abspath(f)
2107
if osutils.lexists(abs_path):
2108
if (osutils.isdir(abs_path) and
2109
len(os.listdir(abs_path)) > 0):
2111
osutils.rmtree(abs_path)
2112
message = "deleted %s" % (f,)
2116
if f in files_to_backup:
2119
osutils.delete_any(abs_path)
2120
message = "deleted %s" % (f,)
2121
elif message is not None:
2122
# Only care if we haven't done anything yet.
2123
message = "%s does not exist." % (f,)
2125
# Print only one message (if any) per file.
2126
if message is not None:
2128
self.apply_inventory_delta(inv_delta)
2130
@needs_tree_write_lock
2131
def revert(self, filenames=None, old_tree=None, backups=True,
2132
pb=None, report_changes=False):
2133
from bzrlib.conflicts import resolve
2136
symbol_versioning.warn('Using [] to revert all files is deprecated'
2137
' as of bzr 0.91. Please use None (the default) instead.',
2138
DeprecationWarning, stacklevel=2)
2139
if old_tree is None:
2140
basis_tree = self.basis_tree()
2141
basis_tree.lock_read()
2142
old_tree = basis_tree
2146
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2148
if filenames is None and len(self.get_parent_ids()) > 1:
2150
last_revision = self.last_revision()
2151
if last_revision != _mod_revision.NULL_REVISION:
2152
if basis_tree is None:
2153
basis_tree = self.basis_tree()
2154
basis_tree.lock_read()
2155
parent_trees.append((last_revision, basis_tree))
2156
self.set_parent_trees(parent_trees)
2159
resolve(self, filenames, ignore_misses=True, recursive=True)
2161
if basis_tree is not None:
2165
def revision_tree(self, revision_id):
2166
"""See Tree.revision_tree.
2168
WorkingTree can supply revision_trees for the basis revision only
2169
because there is only one cached inventory in the bzr directory.
2171
if revision_id == self.last_revision():
2173
xml = self.read_basis_inventory()
2174
except errors.NoSuchFile:
2178
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2179
# dont use the repository revision_tree api because we want
2180
# to supply the inventory.
2181
if inv.revision_id == revision_id:
2182
return revisiontree.RevisionTree(self.branch.repository,
2184
except errors.BadInventoryFormat:
2186
# raise if there was no inventory, or if we read the wrong inventory.
2187
raise errors.NoSuchRevisionInTree(self, revision_id)
2189
# XXX: This method should be deprecated in favour of taking in a proper
2190
# new Inventory object.
2191
@needs_tree_write_lock
2192
def set_inventory(self, new_inventory_list):
2193
from bzrlib.inventory import (Inventory,
2197
inv = Inventory(self.get_root_id())
2198
for path, file_id, parent, kind in new_inventory_list:
2199
name = os.path.basename(path)
2202
# fixme, there should be a factory function inv,add_??
2203
if kind == 'directory':
2204
inv.add(InventoryDirectory(file_id, name, parent))
2205
elif kind == 'file':
2206
inv.add(InventoryFile(file_id, name, parent))
2207
elif kind == 'symlink':
2208
inv.add(InventoryLink(file_id, name, parent))
2210
raise errors.BzrError("unknown kind %r" % kind)
2211
self._write_inventory(inv)
2213
@needs_tree_write_lock
2214
def set_root_id(self, file_id):
2215
"""Set the root id for this tree."""
2219
'WorkingTree.set_root_id with fileid=None')
2220
file_id = osutils.safe_file_id(file_id)
2221
self._set_root_id(file_id)
2223
def _set_root_id(self, file_id):
2224
"""Set the root id for this tree, in a format specific manner.
2226
:param file_id: The file id to assign to the root. It must not be
2227
present in the current inventory or an error will occur. It must
2228
not be None, but rather a valid file id.
2230
inv = self._inventory
2231
orig_root_id = inv.root.file_id
2232
# TODO: it might be nice to exit early if there was nothing
2233
# to do, saving us from trigger a sync on unlock.
2234
self._inventory_is_modified = True
2235
# we preserve the root inventory entry object, but
2236
# unlinkit from the byid index
2237
del inv._byid[inv.root.file_id]
2238
inv.root.file_id = file_id
2239
# and link it into the index with the new changed id.
2240
inv._byid[inv.root.file_id] = inv.root
2241
# and finally update all children to reference the new id.
2242
# XXX: this should be safe to just look at the root.children
2243
# list, not the WHOLE INVENTORY.
2246
if entry.parent_id == orig_root_id:
2247
entry.parent_id = inv.root.file_id
2250
"""See Branch.unlock.
2252
WorkingTree locking just uses the Branch locking facilities.
2253
This is current because all working trees have an embedded branch
2254
within them. IF in the future, we were to make branch data shareable
2255
between multiple working trees, i.e. via shared storage, then we
2256
would probably want to lock both the local tree, and the branch.
2258
raise NotImplementedError(self.unlock)
2262
def update(self, change_reporter=None, possible_transports=None,
2263
revision=None, old_tip=_marker):
2264
"""Update a working tree along its branch.
2266
This will update the branch if its bound too, which means we have
2267
multiple trees involved:
2269
- The new basis tree of the master.
2270
- The old basis tree of the branch.
2271
- The old basis tree of the working tree.
2272
- The current working tree state.
2274
Pathologically, all three may be different, and non-ancestors of each
2275
other. Conceptually we want to:
2277
- Preserve the wt.basis->wt.state changes
2278
- Transform the wt.basis to the new master basis.
2279
- Apply a merge of the old branch basis to get any 'local' changes from
2281
- Restore the wt.basis->wt.state changes.
2283
There isn't a single operation at the moment to do that, so we:
2284
- Merge current state -> basis tree of the master w.r.t. the old tree
2286
- Do a 'normal' merge of the old branch basis if it is relevant.
2288
:param revision: The target revision to update to. Must be in the
2290
:param old_tip: If branch.update() has already been run, the value it
2291
returned (old tip of the branch or None). _marker is used
2294
if self.branch.get_bound_location() is not None:
2296
update_branch = (old_tip is self._marker)
2298
self.lock_tree_write()
2299
update_branch = False
2302
old_tip = self.branch.update(possible_transports)
2304
if old_tip is self._marker:
2306
return self._update_tree(old_tip, change_reporter, revision)
2310
@needs_tree_write_lock
2311
def _update_tree(self, old_tip=None, change_reporter=None, revision=None):
2312
"""Update a tree to the master branch.
2314
:param old_tip: if supplied, the previous tip revision the branch,
2315
before it was changed to the master branch's tip.
2317
# here if old_tip is not None, it is the old tip of the branch before
2318
# it was updated from the master branch. This should become a pending
2319
# merge in the working tree to preserve the user existing work. we
2320
# cant set that until we update the working trees last revision to be
2321
# one from the new branch, because it will just get absorbed by the
2322
# parent de-duplication logic.
2324
# We MUST save it even if an error occurs, because otherwise the users
2325
# local work is unreferenced and will appear to have been lost.
2329
last_rev = self.get_parent_ids()[0]
2331
last_rev = _mod_revision.NULL_REVISION
2332
if revision is None:
2333
revision = self.branch.last_revision()
2335
old_tip = old_tip or _mod_revision.NULL_REVISION
2337
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2338
# the branch we are bound to was updated
2339
# merge those changes in first
2340
base_tree = self.basis_tree()
2341
other_tree = self.branch.repository.revision_tree(old_tip)
2342
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2343
base_tree, this_tree=self,
2344
change_reporter=change_reporter)
2346
self.add_parent_tree((old_tip, other_tree))
2347
trace.note('Rerun update after fixing the conflicts.')
2350
if last_rev != _mod_revision.ensure_null(revision):
2351
# the working tree is up to date with the branch
2352
# we can merge the specified revision from master
2353
to_tree = self.branch.repository.revision_tree(revision)
2354
to_root_id = to_tree.get_root_id()
2356
basis = self.basis_tree()
2359
if (basis.inventory.root is None
2360
or basis.inventory.root.file_id != to_root_id):
2361
self.set_root_id(to_root_id)
2366
# determine the branch point
2367
graph = self.branch.repository.get_graph()
2368
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2370
base_tree = self.branch.repository.revision_tree(base_rev_id)
2372
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2374
change_reporter=change_reporter)
2375
self.set_last_revision(revision)
2376
# TODO - dedup parents list with things merged by pull ?
2377
# reuse the tree we've updated to to set the basis:
2378
parent_trees = [(revision, to_tree)]
2379
merges = self.get_parent_ids()[1:]
2380
# Ideally we ask the tree for the trees here, that way the working
2381
# tree can decide whether to give us the entire tree or give us a
2382
# lazy initialised tree. dirstate for instance will have the trees
2383
# in ram already, whereas a last-revision + basis-inventory tree
2384
# will not, but also does not need them when setting parents.
2385
for parent in merges:
2386
parent_trees.append(
2387
(parent, self.branch.repository.revision_tree(parent)))
2388
if not _mod_revision.is_null(old_tip):
2389
parent_trees.append(
2390
(old_tip, self.branch.repository.revision_tree(old_tip)))
2391
self.set_parent_trees(parent_trees)
2392
last_rev = parent_trees[0][0]
2395
def _write_hashcache_if_dirty(self):
2396
"""Write out the hashcache if it is dirty."""
2397
if self._hashcache.needs_write:
2399
self._hashcache.write()
2401
if e.errno not in (errno.EPERM, errno.EACCES):
2403
# TODO: jam 20061219 Should this be a warning? A single line
2404
# warning might be sufficient to let the user know what
2406
mutter('Could not write hashcache for %s\nError: %s',
2407
self._hashcache.cache_file_name(), e)
2409
@needs_tree_write_lock
2410
def _write_inventory(self, inv):
2411
"""Write inventory as the current inventory."""
2412
self._set_inventory(inv, dirty=True)
2415
def set_conflicts(self, arg):
2416
raise errors.UnsupportedOperation(self.set_conflicts, self)
2418
def add_conflicts(self, arg):
2419
raise errors.UnsupportedOperation(self.add_conflicts, self)
2422
def conflicts(self):
2423
conflicts = _mod_conflicts.ConflictList()
2424
for conflicted in self._iter_conflicts():
2427
if file_kind(self.abspath(conflicted)) != "file":
2429
except errors.NoSuchFile:
2432
for suffix in ('.THIS', '.OTHER'):
2434
kind = file_kind(self.abspath(conflicted+suffix))
2437
except errors.NoSuchFile:
2441
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2442
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2444
file_id=self.path2id(conflicted)))
2447
def walkdirs(self, prefix=""):
2448
"""Walk the directories of this tree.
2450
returns a generator which yields items in the form:
2451
((curren_directory_path, fileid),
2452
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2455
This API returns a generator, which is only valid during the current
2456
tree transaction - within a single lock_read or lock_write duration.
2458
If the tree is not locked, it may cause an error to be raised,
2459
depending on the tree implementation.
2461
disk_top = self.abspath(prefix)
2462
if disk_top.endswith('/'):
2463
disk_top = disk_top[:-1]
2464
top_strip_len = len(disk_top) + 1
2465
inventory_iterator = self._walkdirs(prefix)
2466
disk_iterator = osutils.walkdirs(disk_top, prefix)
2468
current_disk = disk_iterator.next()
2469
disk_finished = False
2471
if not (e.errno == errno.ENOENT or
2472
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2475
disk_finished = True
2477
current_inv = inventory_iterator.next()
2478
inv_finished = False
2479
except StopIteration:
2482
while not inv_finished or not disk_finished:
2484
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2485
cur_disk_dir_content) = current_disk
2487
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2488
cur_disk_dir_content) = ((None, None), None)
2489
if not disk_finished:
2490
# strip out .bzr dirs
2491
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2492
len(cur_disk_dir_content) > 0):
2493
# osutils.walkdirs can be made nicer -
2494
# yield the path-from-prefix rather than the pathjoined
2496
bzrdir_loc = bisect_left(cur_disk_dir_content,
2498
if (bzrdir_loc < len(cur_disk_dir_content)
2499
and self.bzrdir.is_control_filename(
2500
cur_disk_dir_content[bzrdir_loc][0])):
2501
# we dont yield the contents of, or, .bzr itself.
2502
del cur_disk_dir_content[bzrdir_loc]
2504
# everything is unknown
2507
# everything is missing
2510
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2512
# disk is before inventory - unknown
2513
dirblock = [(relpath, basename, kind, stat, None, None) for
2514
relpath, basename, kind, stat, top_path in
2515
cur_disk_dir_content]
2516
yield (cur_disk_dir_relpath, None), dirblock
2518
current_disk = disk_iterator.next()
2519
except StopIteration:
2520
disk_finished = True
2522
# inventory is before disk - missing.
2523
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2524
for relpath, basename, dkind, stat, fileid, kind in
2526
yield (current_inv[0][0], current_inv[0][1]), dirblock
2528
current_inv = inventory_iterator.next()
2529
except StopIteration:
2532
# versioned present directory
2533
# merge the inventory and disk data together
2535
for relpath, subiterator in itertools.groupby(sorted(
2536
current_inv[1] + cur_disk_dir_content,
2537
key=operator.itemgetter(0)), operator.itemgetter(1)):
2538
path_elements = list(subiterator)
2539
if len(path_elements) == 2:
2540
inv_row, disk_row = path_elements
2541
# versioned, present file
2542
dirblock.append((inv_row[0],
2543
inv_row[1], disk_row[2],
2544
disk_row[3], inv_row[4],
2546
elif len(path_elements[0]) == 5:
2548
dirblock.append((path_elements[0][0],
2549
path_elements[0][1], path_elements[0][2],
2550
path_elements[0][3], None, None))
2551
elif len(path_elements[0]) == 6:
2552
# versioned, absent file.
2553
dirblock.append((path_elements[0][0],
2554
path_elements[0][1], 'unknown', None,
2555
path_elements[0][4], path_elements[0][5]))
2557
raise NotImplementedError('unreachable code')
2558
yield current_inv[0], dirblock
2560
current_inv = inventory_iterator.next()
2561
except StopIteration:
2564
current_disk = disk_iterator.next()
2565
except StopIteration:
2566
disk_finished = True
2568
def _walkdirs(self, prefix=""):
2569
"""Walk the directories of this tree.
2571
:prefix: is used as the directrory to start with.
2572
returns a generator which yields items in the form:
2573
((curren_directory_path, fileid),
2574
[(file1_path, file1_name, file1_kind, None, file1_id,
2577
_directory = 'directory'
2578
# get the root in the inventory
2579
inv = self.inventory
2580
top_id = inv.path2id(prefix)
2584
pending = [(prefix, '', _directory, None, top_id, None)]
2587
currentdir = pending.pop()
2588
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2589
top_id = currentdir[4]
2591
relroot = currentdir[0] + '/'
2594
# FIXME: stash the node in pending
2596
if entry.kind == 'directory':
2597
for name, child in entry.sorted_children():
2598
dirblock.append((relroot + name, name, child.kind, None,
2599
child.file_id, child.kind
2601
yield (currentdir[0], entry.file_id), dirblock
2602
# push the user specified dirs from dirblock
2603
for dir in reversed(dirblock):
2604
if dir[2] == _directory:
2607
@needs_tree_write_lock
2608
def auto_resolve(self):
2609
"""Automatically resolve text conflicts according to contents.
2611
Only text conflicts are auto_resolvable. Files with no conflict markers
2612
are considered 'resolved', because bzr always puts conflict markers
2613
into files that have text conflicts. The corresponding .THIS .BASE and
2614
.OTHER files are deleted, as per 'resolve'.
2615
:return: a tuple of ConflictLists: (un_resolved, resolved).
2617
un_resolved = _mod_conflicts.ConflictList()
2618
resolved = _mod_conflicts.ConflictList()
2619
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2620
for conflict in self.conflicts():
2621
if (conflict.typestring != 'text conflict' or
2622
self.kind(conflict.file_id) != 'file'):
2623
un_resolved.append(conflict)
2625
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2627
for line in my_file:
2628
if conflict_re.search(line):
2629
un_resolved.append(conflict)
286
if fnmatch.fnmatchcase(filename, newpat):
2632
resolved.append(conflict)
2635
resolved.remove_files(self)
2636
self.set_conflicts(un_resolved)
2637
return un_resolved, resolved
2640
def _check(self, references):
2641
"""Check the tree for consistency.
2643
:param references: A dict with keys matching the items returned by
2644
self._get_check_refs(), and values from looking those keys up in
2647
tree_basis = self.basis_tree()
2648
tree_basis.lock_read()
2650
repo_basis = references[('trees', self.last_revision())]
2651
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2652
raise errors.BzrCheckError(
2653
"Mismatched basis inventory content.")
2658
def _validate(self):
2659
"""Validate internal structures.
2661
This is meant mostly for the test suite. To give it a chance to detect
2662
corruption after actions have occurred. The default implementation is a
2665
:return: None. An exception should be raised if there is an error.
2669
def _get_rules_searcher(self, default_searcher):
2670
"""See Tree._get_rules_searcher."""
2671
if self._rules_searcher is None:
2672
self._rules_searcher = super(WorkingTree,
2673
self)._get_rules_searcher(default_searcher)
2674
return self._rules_searcher
2676
def get_shelf_manager(self):
2677
"""Return the ShelfManager for this WorkingTree."""
2678
from bzrlib.shelf import ShelfManager
2679
return ShelfManager(self, self._transport)
2682
class WorkingTree2(WorkingTree):
2683
"""This is the Format 2 working tree.
2685
This was the first weave based working tree.
2686
- uses os locks for locking.
2687
- uses the branch last-revision.
2690
def __init__(self, *args, **kwargs):
2691
super(WorkingTree2, self).__init__(*args, **kwargs)
2692
# WorkingTree2 has more of a constraint that self._inventory must
2693
# exist. Because this is an older format, we don't mind the overhead
2694
# caused by the extra computation here.
2696
# Newer WorkingTree's should only have self._inventory set when they
2698
if self._inventory is None:
2699
self.read_working_inventory()
2701
def _get_check_refs(self):
2702
"""Return the references needed to perform a check of this tree."""
2703
return [('trees', self.last_revision())]
2705
def lock_tree_write(self):
2706
"""See WorkingTree.lock_tree_write().
2708
In Format2 WorkingTrees we have a single lock for the branch and tree
2709
so lock_tree_write() degrades to lock_write().
2711
:return: An object with an unlock method which will release the lock
2714
self.branch.lock_write()
2716
self._control_files.lock_write()
2719
self.branch.unlock()
2723
# do non-implementation specific cleanup
2726
# we share control files:
2727
if self._control_files._lock_count == 3:
2728
# _inventory_is_modified is always False during a read lock.
2729
if self._inventory_is_modified:
2731
self._write_hashcache_if_dirty()
2733
# reverse order of locking.
2735
return self._control_files.unlock()
2737
self.branch.unlock()
2740
class WorkingTree3(WorkingTree):
2741
"""This is the Format 3 working tree.
2743
This differs from the base WorkingTree by:
2744
- having its own file lock
2745
- having its own last-revision property.
2747
This is new in bzr 0.8
2751
def _last_revision(self):
2752
"""See Mutable.last_revision."""
2754
return self._transport.get_bytes('last-revision')
2755
except errors.NoSuchFile:
2756
return _mod_revision.NULL_REVISION
2758
def _change_last_revision(self, revision_id):
2759
"""See WorkingTree._change_last_revision."""
2760
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
2762
self._transport.delete('last-revision')
2763
except errors.NoSuchFile:
2767
self._transport.put_bytes('last-revision', revision_id,
2768
mode=self.bzrdir._get_file_mode())
2771
def _get_check_refs(self):
2772
"""Return the references needed to perform a check of this tree."""
2773
return [('trees', self.last_revision())]
2775
@needs_tree_write_lock
2776
def set_conflicts(self, conflicts):
2777
self._put_rio('conflicts', conflicts.to_stanzas(),
2780
@needs_tree_write_lock
2781
def add_conflicts(self, new_conflicts):
2782
conflict_set = set(self.conflicts())
2783
conflict_set.update(set(list(new_conflicts)))
2784
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2785
key=_mod_conflicts.Conflict.sort_key)))
2788
def conflicts(self):
2790
confile = self._transport.get('conflicts')
2791
except errors.NoSuchFile:
2792
return _mod_conflicts.ConflictList()
2795
if confile.next() != CONFLICT_HEADER_1 + '\n':
2796
raise errors.ConflictFormatError()
2797
except StopIteration:
2798
raise errors.ConflictFormatError()
2799
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2804
# do non-implementation specific cleanup
2806
if self._control_files._lock_count == 1:
2807
# _inventory_is_modified is always False during a read lock.
2808
if self._inventory_is_modified:
2810
self._write_hashcache_if_dirty()
2811
# reverse order of locking.
2813
return self._control_files.unlock()
2815
self.branch.unlock()
2818
def get_conflicted_stem(path):
2819
for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
2820
if path.endswith(suffix):
2821
return path[:-len(suffix)]
2824
class WorkingTreeFormat(object):
2825
"""An encapsulation of the initialization and open routines for a format.
2827
Formats provide three things:
2828
* An initialization routine,
2832
Formats are placed in an dict by their format string for reference
2833
during workingtree opening. Its not required that these be instances, they
2834
can be classes themselves with class methods - it simply depends on
2835
whether state is needed for a given format or not.
2837
Once a format is deprecated, just deprecate the initialize and open
2838
methods on the format class. Do not deprecate the object, as the
2839
object will be created every time regardless.
2842
_default_format = None
2843
"""The default format used for new trees."""
2846
"""The known formats."""
2848
requires_rich_root = False
2850
upgrade_recommended = False
2853
def find_format(klass, a_bzrdir):
2854
"""Return the format for the working tree object in a_bzrdir."""
2856
transport = a_bzrdir.get_workingtree_transport(None)
2857
format_string = transport.get_bytes("format")
2858
return klass._formats[format_string]
2859
except errors.NoSuchFile:
2860
raise errors.NoWorkingTree(base=transport.base)
2862
raise errors.UnknownFormatError(format=format_string,
2863
kind="working tree")
2865
def __eq__(self, other):
2866
return self.__class__ is other.__class__
2868
def __ne__(self, other):
2869
return not (self == other)
2872
def get_default_format(klass):
2873
"""Return the current default format."""
2874
return klass._default_format
2876
def get_format_string(self):
2877
"""Return the ASCII format string that identifies this format."""
2878
raise NotImplementedError(self.get_format_string)
2880
def get_format_description(self):
2881
"""Return the short description for this format."""
2882
raise NotImplementedError(self.get_format_description)
2884
def is_supported(self):
2885
"""Is this format supported?
2887
Supported formats can be initialized and opened.
2888
Unsupported formats may not support initialization or committing or
2889
some other features depending on the reason for not being supported.
2893
def supports_content_filtering(self):
2894
"""True if this format supports content filtering."""
2897
def supports_views(self):
2898
"""True if this format supports stored views."""
2902
def register_format(klass, format):
2903
klass._formats[format.get_format_string()] = format
2906
def set_default_format(klass, format):
2907
klass._default_format = format
2910
def unregister_format(klass, format):
2911
del klass._formats[format.get_format_string()]
2914
class WorkingTreeFormat2(WorkingTreeFormat):
2915
"""The second working tree format.
2917
This format modified the hash cache from the format 1 hash cache.
2920
upgrade_recommended = True
2922
def get_format_description(self):
2923
"""See WorkingTreeFormat.get_format_description()."""
2924
return "Working tree format 2"
2926
def _stub_initialize_on_transport(self, transport, file_mode):
2927
"""Workaround: create control files for a remote working tree.
2929
This ensures that it can later be updated and dealt with locally,
2930
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2931
no working tree. (See bug #43064).
2934
inv = inventory.Inventory()
2935
xml5.serializer_v5.write_inventory(inv, sio, working=True)
2937
transport.put_file('inventory', sio, file_mode)
2938
transport.put_bytes('pending-merges', '', file_mode)
2940
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2941
accelerator_tree=None, hardlink=False):
2942
"""See WorkingTreeFormat.initialize()."""
2943
if not isinstance(a_bzrdir.transport, LocalTransport):
2944
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2945
if from_branch is not None:
2946
branch = from_branch
2948
branch = a_bzrdir.open_branch()
2949
if revision_id is None:
2950
revision_id = _mod_revision.ensure_null(branch.last_revision())
2953
branch.generate_revision_history(revision_id)
2956
inv = inventory.Inventory()
2957
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2963
basis_tree = branch.repository.revision_tree(revision_id)
2964
if basis_tree.inventory.root is not None:
2965
wt.set_root_id(basis_tree.get_root_id())
2966
# set the parent list and cache the basis tree.
2967
if _mod_revision.is_null(revision_id):
2970
parent_trees = [(revision_id, basis_tree)]
2971
wt.set_parent_trees(parent_trees)
2972
transform.build_tree(basis_tree, wt)
2976
super(WorkingTreeFormat2, self).__init__()
2977
self._matchingbzrdir = bzrdir.BzrDirFormat6()
2979
def open(self, a_bzrdir, _found=False):
2980
"""Return the WorkingTree object for a_bzrdir
2982
_found is a private parameter, do not use it. It is used to indicate
2983
if format probing has already been done.
2986
# we are being called directly and must probe.
2987
raise NotImplementedError
2988
if not isinstance(a_bzrdir.transport, LocalTransport):
2989
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2990
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2996
class WorkingTreeFormat3(WorkingTreeFormat):
2997
"""The second working tree format updated to record a format marker.
3000
- exists within a metadir controlling .bzr
3001
- includes an explicit version marker for the workingtree control
3002
files, separate from the BzrDir format
3003
- modifies the hash cache format
3005
- uses a LockDir to guard access for writes.
3008
upgrade_recommended = True
3010
def get_format_string(self):
3011
"""See WorkingTreeFormat.get_format_string()."""
3012
return "Bazaar-NG Working Tree format 3"
3014
def get_format_description(self):
3015
"""See WorkingTreeFormat.get_format_description()."""
3016
return "Working tree format 3"
3018
_lock_file_name = 'lock'
3019
_lock_class = LockDir
3021
_tree_class = WorkingTree3
3023
def __get_matchingbzrdir(self):
3024
return bzrdir.BzrDirMetaFormat1()
3026
_matchingbzrdir = property(__get_matchingbzrdir)
3028
def _open_control_files(self, a_bzrdir):
3029
transport = a_bzrdir.get_workingtree_transport(None)
3030
return LockableFiles(transport, self._lock_file_name,
3033
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
3034
accelerator_tree=None, hardlink=False):
3035
"""See WorkingTreeFormat.initialize().
3037
:param revision_id: if supplied, create a working tree at a different
3038
revision than the branch is at.
3039
:param accelerator_tree: A tree which can be used for retrieving file
3040
contents more quickly than the revision tree, i.e. a workingtree.
3041
The revision tree will be used for cases where accelerator_tree's
3042
content is different.
3043
:param hardlink: If true, hard-link files from accelerator_tree,
3046
if not isinstance(a_bzrdir.transport, LocalTransport):
3047
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3048
transport = a_bzrdir.get_workingtree_transport(self)
3049
control_files = self._open_control_files(a_bzrdir)
3050
control_files.create_lock()
3051
control_files.lock_write()
3052
transport.put_bytes('format', self.get_format_string(),
3053
mode=a_bzrdir._get_file_mode())
3054
if from_branch is not None:
3055
branch = from_branch
3057
branch = a_bzrdir.open_branch()
3058
if revision_id is None:
3059
revision_id = _mod_revision.ensure_null(branch.last_revision())
3060
# WorkingTree3 can handle an inventory which has a unique root id.
3061
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
3062
# those trees. And because there isn't a format bump inbetween, we
3063
# are maintaining compatibility with older clients.
3064
# inv = Inventory(root_id=gen_root_id())
3065
inv = self._initial_inventory()
3066
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3072
_control_files=control_files)
3073
wt.lock_tree_write()
3075
basis_tree = branch.repository.revision_tree(revision_id)
3076
# only set an explicit root id if there is one to set.
3077
if basis_tree.inventory.root is not None:
3078
wt.set_root_id(basis_tree.get_root_id())
3079
if revision_id == _mod_revision.NULL_REVISION:
3080
wt.set_parent_trees([])
289
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
b'\\ No newline at end of file'
3082
wt.set_parent_trees([(revision_id, basis_tree)])
3083
transform.build_tree(basis_tree, wt)
3085
# Unlock in this order so that the unlock-triggers-flush in
3086
# WorkingTree is given a chance to fire.
3087
control_files.unlock()
3091
def _initial_inventory(self):
3092
return inventory.Inventory()
3095
super(WorkingTreeFormat3, self).__init__()
3097
def open(self, a_bzrdir, _found=False):
3098
"""Return the WorkingTree object for a_bzrdir
3100
_found is a private parameter, do not use it. It is used to indicate
3101
if format probing has already been done.
3104
# we are being called directly and must probe.
3105
raise NotImplementedError
3106
if not isinstance(a_bzrdir.transport, LocalTransport):
3107
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3108
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
3111
def _open(self, a_bzrdir, control_files):
3112
"""Open the tree itself.
3114
:param a_bzrdir: the dir for the tree.
3115
:param control_files: the control files for the tree.
3117
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3121
_control_files=control_files)
3124
return self.get_format_string()
3127
__default_format = WorkingTreeFormat6()
3128
WorkingTreeFormat.register_format(__default_format)
3129
WorkingTreeFormat.register_format(WorkingTreeFormat5())
3130
WorkingTreeFormat.register_format(WorkingTreeFormat4())
3131
WorkingTreeFormat.register_format(WorkingTreeFormat3())
3132
WorkingTreeFormat.set_default_format(__default_format)
3133
# formats which have no format string are not discoverable
3134
# and not independently creatable, so are not registered.
3135
_legacy_formats = [WorkingTreeFormat2(),