46
456
and the working file exists.
48
458
inv = self._inventory
49
for file_id in self._inventory:
50
# TODO: This is slightly redundant; we should be able to just
51
# check the statcache but it only includes regular files.
52
# only include files which still exist on disk
55
if ((file_id in self._statcache)
56
or (os.path.exists(self.abspath(inv.id2path(file_id))))):
459
for path, ie in inv.iter_entries():
460
if osutils.lexists(self.abspath(path)):
463
def all_file_ids(self):
464
"""See Tree.iter_all_file_ids"""
465
return set(self.inventory)
61
467
def __repr__(self):
62
468
return "<%s of %s>" % (self.__class__.__name__,
469
getattr(self, 'basedir', None))
65
471
def abspath(self, filename):
66
return os.path.join(self.basedir, filename)
472
return pathjoin(self.basedir, filename)
474
def basis_tree(self):
475
"""Return RevisionTree for the current last revision.
477
If the left most parent is a ghost then the returned tree will be an
478
empty tree - one obtained by calling
479
repository.revision_tree(NULL_REVISION).
482
revision_id = self.get_parent_ids()[0]
484
# no parents, return an empty revision tree.
485
# in the future this should return the tree for
486
# 'empty:' - the implicit root empty tree.
487
return self.branch.repository.revision_tree(
488
_mod_revision.NULL_REVISION)
490
return self.revision_tree(revision_id)
491
except errors.NoSuchRevision:
493
# No cached copy available, retrieve from the repository.
494
# FIXME? RBC 20060403 should we cache the inventory locally
497
return self.branch.repository.revision_tree(revision_id)
498
except (errors.RevisionNotPresent, errors.NoSuchRevision):
499
# the basis tree *may* be a ghost or a low level error may have
500
# occurred. If the revision is present, its a problem, if its not
502
if self.branch.repository.has_revision(revision_id):
504
# the basis tree is a ghost so return an empty tree.
505
return self.branch.repository.revision_tree(
506
_mod_revision.NULL_REVISION)
509
self._flush_ignore_list_cache()
511
def relpath(self, path):
512
"""Return the local path portion from a given path.
514
The path may be absolute or relative. If its a relative path it is
515
interpreted relative to the python current working directory.
517
return osutils.relpath(self.basedir, path)
68
519
def has_filename(self, filename):
69
return os.path.exists(self.abspath(filename))
71
def get_file(self, file_id):
72
return self.get_file_byname(self.id2path(file_id))
74
def get_file_byname(self, filename):
75
return file(self.abspath(filename), 'rb')
77
def _get_store_filename(self, file_id):
78
## XXX: badly named; this isn't in the store at all
520
return osutils.lexists(self.abspath(filename))
522
def get_file(self, file_id, path=None, filtered=True):
523
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
525
def get_file_with_stat(self, file_id, path=None, filtered=True,
526
_fstat=osutils.fstat):
527
"""See Tree.get_file_with_stat."""
529
path = self.id2path(file_id)
530
file_obj = self.get_file_byname(path, filtered=False)
531
stat_value = _fstat(file_obj.fileno())
532
if filtered and self.supports_content_filtering():
533
filters = self._content_filter_stack(path)
534
file_obj = _mod_filters.filtered_input_file(file_obj, filters)
535
return (file_obj, stat_value)
537
def get_file_text(self, file_id, path=None, filtered=True):
538
my_file = self.get_file(file_id, path=path, filtered=filtered)
540
return my_file.read()
544
def get_file_byname(self, filename, filtered=True):
545
path = self.abspath(filename)
547
if filtered and self.supports_content_filtering():
548
filters = self._content_filter_stack(filename)
549
return _mod_filters.filtered_input_file(f, filters)
553
def get_file_lines(self, file_id, path=None, filtered=True):
554
"""See Tree.get_file_lines()"""
555
file = self.get_file(file_id, path, filtered=filtered)
557
return file.readlines()
562
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
563
"""See Tree.annotate_iter
565
This implementation will use the basis tree implementation if possible.
566
Lines not in the basis are attributed to CURRENT_REVISION
568
If there are pending merges, lines added by those merges will be
569
incorrectly attributed to CURRENT_REVISION (but after committing, the
570
attribution will be correct).
572
maybe_file_parent_keys = []
573
for parent_id in self.get_parent_ids():
575
parent_tree = self.revision_tree(parent_id)
576
except errors.NoSuchRevisionInTree:
577
parent_tree = self.branch.repository.revision_tree(parent_id)
578
parent_tree.lock_read()
580
if file_id not in parent_tree:
582
ie = parent_tree.inventory[file_id]
583
if ie.kind != 'file':
584
# Note: this is slightly unnecessary, because symlinks and
585
# directories have a "text" which is the empty text, and we
586
# know that won't mess up annotations. But it seems cleaner
588
parent_text_key = (file_id, ie.revision)
589
if parent_text_key not in maybe_file_parent_keys:
590
maybe_file_parent_keys.append(parent_text_key)
593
graph = _mod_graph.Graph(self.branch.repository.texts)
594
heads = graph.heads(maybe_file_parent_keys)
595
file_parent_keys = []
596
for key in maybe_file_parent_keys:
598
file_parent_keys.append(key)
600
# Now we have the parents of this content
601
annotator = self.branch.repository.texts.get_annotator()
602
text = self.get_file_text(file_id)
603
this_key =(file_id, default_revision)
604
annotator.add_special_text(this_key, file_parent_keys, text)
605
annotations = [(key[-1], line)
606
for key, line in annotator.annotate_flat(this_key)]
609
def _get_ancestors(self, default_revision):
610
ancestors = set([default_revision])
611
for parent_id in self.get_parent_ids():
612
ancestors.update(self.branch.repository.get_ancestry(
613
parent_id, topo_sorted=False))
616
def get_parent_ids(self):
617
"""See Tree.get_parent_ids.
619
This implementation reads the pending merges list and last_revision
620
value and uses that to decide what the parents list should be.
622
last_rev = _mod_revision.ensure_null(self._last_revision())
623
if _mod_revision.NULL_REVISION == last_rev:
628
merges_bytes = self._transport.get_bytes('pending-merges')
629
except errors.NoSuchFile:
632
for l in osutils.split_lines(merges_bytes):
633
revision_id = l.rstrip('\n')
634
parents.append(revision_id)
638
def get_root_id(self):
639
"""Return the id of this trees root"""
640
return self._inventory.root.file_id
643
def clone(self, to_bzrdir, revision_id=None):
644
"""Duplicate this working tree into to_bzr, including all state.
646
Specifically modified files are kept as modified, but
647
ignored and unknown files are discarded.
649
If you want to make a new line of development, see bzrdir.sprout()
652
If not None, the cloned tree will have its last revision set to
653
revision, and difference between the source trees last revision
654
and this one merged in.
656
# assumes the target bzr dir format is compatible.
657
result = to_bzrdir.create_workingtree()
658
self.copy_content_into(result, revision_id)
662
def copy_content_into(self, tree, revision_id=None):
663
"""Copy the current content and user files of this tree into tree."""
664
tree.set_root_id(self.get_root_id())
665
if revision_id is None:
666
merge.transform_tree(tree, self)
668
# TODO now merge from tree.last_revision to revision (to preserve
669
# user local changes)
670
merge.transform_tree(tree, self)
671
tree.set_parent_ids([revision_id])
673
def id2abspath(self, file_id):
79
674
return self.abspath(self.id2path(file_id))
82
676
def has_id(self, file_id):
83
677
# files that have been deleted are excluded
84
if not self.inventory.has_id(file_id):
679
if not inv.has_id(file_id):
86
if file_id in self._statcache:
681
path = inv.id2path(file_id)
682
return osutils.lexists(self.abspath(path))
684
def has_or_had_id(self, file_id):
685
if file_id == self.inventory.root.file_id:
88
return os.path.exists(self.abspath(self.id2path(file_id)))
687
return self.inventory.has_id(file_id)
91
689
__contains__ = has_id
94
def _update_statcache(self):
96
if not self._statcache:
97
self._statcache = statcache.update_cache(self.basedir, self.inventory)
99
691
def get_file_size(self, file_id):
101
return os.stat(self._get_store_filename(file_id))[stat.ST_SIZE]
104
def get_file_sha1(self, file_id):
105
return self._statcache[file_id][statcache.SC_SHA1]
108
def file_class(self, filename):
109
if self.path2id(filename):
111
elif self.is_ignored(filename):
117
def list_files(self):
118
"""Recursively list all files as (path, class, kind, id).
692
"""See Tree.get_file_size"""
693
# XXX: this returns the on-disk size; it should probably return the
696
return os.path.getsize(self.id2abspath(file_id))
698
if e.errno != errno.ENOENT:
704
def get_file_sha1(self, file_id, path=None, stat_value=None):
706
path = self._inventory.id2path(file_id)
707
return self._hashcache.get_sha1(path, stat_value)
709
def get_file_mtime(self, file_id, path=None):
711
path = self.inventory.id2path(file_id)
712
return os.lstat(self.abspath(path)).st_mtime
714
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
715
file_id = self.path2id(path)
717
# For unversioned files on win32, we just assume they are not
720
return self._inventory[file_id].executable
722
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
723
mode = stat_result.st_mode
724
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
726
if not supports_executable():
727
def is_executable(self, file_id, path=None):
728
return self._inventory[file_id].executable
730
_is_executable_from_path_and_stat = \
731
_is_executable_from_path_and_stat_from_basis
733
def is_executable(self, file_id, path=None):
735
path = self.id2path(file_id)
736
mode = os.lstat(self.abspath(path)).st_mode
737
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
739
_is_executable_from_path_and_stat = \
740
_is_executable_from_path_and_stat_from_stat
742
@needs_tree_write_lock
743
def _add(self, files, ids, kinds):
744
"""See MutableTree._add."""
745
# TODO: Re-adding a file that is removed in the working copy
746
# should probably put it back with the previous ID.
747
# the read and write working inventory should not occur in this
748
# function - they should be part of lock_write and unlock.
750
for f, file_id, kind in zip(files, ids, kinds):
752
inv.add_path(f, kind=kind)
754
inv.add_path(f, kind=kind, file_id=file_id)
755
self._inventory_is_modified = True
757
@needs_tree_write_lock
758
def _gather_kinds(self, files, kinds):
759
"""See MutableTree._gather_kinds."""
760
for pos, f in enumerate(files):
761
if kinds[pos] is None:
762
fullpath = normpath(self.abspath(f))
764
kinds[pos] = file_kind(fullpath)
766
if e.errno == errno.ENOENT:
767
raise errors.NoSuchFile(fullpath)
770
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
771
"""Add revision_id as a parent.
773
This is equivalent to retrieving the current list of parent ids
774
and setting the list to its value plus revision_id.
776
:param revision_id: The revision id to add to the parent list. It may
777
be a ghost revision as long as its not the first parent to be added,
778
or the allow_leftmost_as_ghost parameter is set True.
779
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
781
parents = self.get_parent_ids() + [revision_id]
782
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
783
or allow_leftmost_as_ghost)
785
@needs_tree_write_lock
786
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
787
"""Add revision_id, tree tuple as a parent.
789
This is equivalent to retrieving the current list of parent trees
790
and setting the list to its value plus parent_tuple. See also
791
add_parent_tree_id - if you only have a parent id available it will be
792
simpler to use that api. If you have the parent already available, using
793
this api is preferred.
795
:param parent_tuple: The (revision id, tree) to add to the parent list.
796
If the revision_id is a ghost, pass None for the tree.
797
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
799
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
800
if len(parent_ids) > 1:
801
# the leftmost may have already been a ghost, preserve that if it
803
allow_leftmost_as_ghost = True
804
self.set_parent_ids(parent_ids,
805
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
807
@needs_tree_write_lock
808
def add_pending_merge(self, *revision_ids):
809
# TODO: Perhaps should check at this point that the
810
# history of the revision is actually present?
811
parents = self.get_parent_ids()
813
for rev_id in revision_ids:
814
if rev_id in parents:
816
parents.append(rev_id)
819
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
821
def path_content_summary(self, path, _lstat=os.lstat,
822
_mapper=osutils.file_kind_from_stat_mode):
823
"""See Tree.path_content_summary."""
824
abspath = self.abspath(path)
826
stat_result = _lstat(abspath)
828
if getattr(e, 'errno', None) == errno.ENOENT:
830
return ('missing', None, None, None)
831
# propagate other errors
833
kind = _mapper(stat_result.st_mode)
835
return self._file_content_summary(path, stat_result)
836
elif kind == 'directory':
837
# perhaps it looks like a plain directory, but it's really a
839
if self._directory_is_tree_reference(path):
840
kind = 'tree-reference'
841
return kind, None, None, None
842
elif kind == 'symlink':
843
target = osutils.readlink(abspath)
844
return ('symlink', None, None, target)
846
return (kind, None, None, None)
848
def _file_content_summary(self, path, stat_result):
849
size = stat_result.st_size
850
executable = self._is_executable_from_path_and_stat(path, stat_result)
851
# try for a stat cache lookup
852
return ('file', size, executable, self._sha_from_stat(
855
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
856
"""Common ghost checking functionality from set_parent_*.
858
This checks that the left hand-parent exists if there are any
861
if len(revision_ids) > 0:
862
leftmost_id = revision_ids[0]
863
if (not allow_leftmost_as_ghost and not
864
self.branch.repository.has_revision(leftmost_id)):
865
raise errors.GhostRevisionUnusableHere(leftmost_id)
867
def _set_merges_from_parent_ids(self, parent_ids):
868
merges = parent_ids[1:]
869
self._transport.put_bytes('pending-merges', '\n'.join(merges),
870
mode=self.bzrdir._get_file_mode())
872
def _filter_parent_ids_by_ancestry(self, revision_ids):
873
"""Check that all merged revisions are proper 'heads'.
875
This will always return the first revision_id, and any merged revisions
878
if len(revision_ids) == 0:
880
graph = self.branch.repository.get_graph()
881
heads = graph.heads(revision_ids)
882
new_revision_ids = revision_ids[:1]
883
for revision_id in revision_ids[1:]:
884
if revision_id in heads and revision_id not in new_revision_ids:
885
new_revision_ids.append(revision_id)
886
if new_revision_ids != revision_ids:
887
mutter('requested to set revision_ids = %s,'
888
' but filtered to %s', revision_ids, new_revision_ids)
889
return new_revision_ids
891
@needs_tree_write_lock
892
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
893
"""Set the parent ids to revision_ids.
895
See also set_parent_trees. This api will try to retrieve the tree data
896
for each element of revision_ids from the trees repository. If you have
897
tree data already available, it is more efficient to use
898
set_parent_trees rather than set_parent_ids. set_parent_ids is however
899
an easier API to use.
901
:param revision_ids: The revision_ids to set as the parent ids of this
902
working tree. Any of these may be ghosts.
904
self._check_parents_for_ghosts(revision_ids,
905
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
906
for revision_id in revision_ids:
907
_mod_revision.check_not_reserved_id(revision_id)
909
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
911
if len(revision_ids) > 0:
912
self.set_last_revision(revision_ids[0])
914
self.set_last_revision(_mod_revision.NULL_REVISION)
916
self._set_merges_from_parent_ids(revision_ids)
918
@needs_tree_write_lock
919
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
920
"""See MutableTree.set_parent_trees."""
921
parent_ids = [rev for (rev, tree) in parents_list]
922
for revision_id in parent_ids:
923
_mod_revision.check_not_reserved_id(revision_id)
925
self._check_parents_for_ghosts(parent_ids,
926
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
928
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
930
if len(parent_ids) == 0:
931
leftmost_parent_id = _mod_revision.NULL_REVISION
932
leftmost_parent_tree = None
934
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
936
if self._change_last_revision(leftmost_parent_id):
937
if leftmost_parent_tree is None:
938
# If we don't have a tree, fall back to reading the
939
# parent tree from the repository.
940
self._cache_basis_inventory(leftmost_parent_id)
942
inv = leftmost_parent_tree.inventory
943
xml = self._create_basis_xml_from_inventory(
944
leftmost_parent_id, inv)
945
self._write_basis_inventory(xml)
946
self._set_merges_from_parent_ids(parent_ids)
948
@needs_tree_write_lock
949
def set_pending_merges(self, rev_list):
950
parents = self.get_parent_ids()
951
leftmost = parents[:1]
952
new_parents = leftmost + rev_list
953
self.set_parent_ids(new_parents)
955
@needs_tree_write_lock
956
def set_merge_modified(self, modified_hashes):
958
for file_id, hash in modified_hashes.iteritems():
959
yield _mod_rio.Stanza(file_id=file_id.decode('utf8'),
961
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
963
def _sha_from_stat(self, path, stat_result):
964
"""Get a sha digest from the tree's stat cache.
966
The default implementation assumes no stat cache is present.
968
:param path: The path.
969
:param stat_result: The stat result being looked up.
973
def _put_rio(self, filename, stanzas, header):
974
self._must_be_locked()
975
my_file = _mod_rio.rio_file(stanzas, header)
976
self._transport.put_file(filename, my_file,
977
mode=self.bzrdir._get_file_mode())
979
@needs_write_lock # because merge pulls data into the branch.
980
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
981
merge_type=None, force=False):
982
"""Merge from a branch into this working tree.
984
:param branch: The branch to merge from.
985
:param to_revision: If non-None, the merge will merge to to_revision,
986
but not beyond it. to_revision does not need to be in the history
987
of the branch when it is supplied. If None, to_revision defaults to
988
branch.last_revision().
990
from bzrlib.merge import Merger, Merge3Merger
991
merger = Merger(self.branch, this_tree=self)
992
# check that there are no local alterations
993
if not force and self.has_changes():
994
raise errors.UncommittedChanges(self)
995
if to_revision is None:
996
to_revision = _mod_revision.ensure_null(branch.last_revision())
997
merger.other_rev_id = to_revision
998
if _mod_revision.is_null(merger.other_rev_id):
999
raise errors.NoCommits(branch)
1000
self.branch.fetch(branch, last_revision=merger.other_rev_id)
1001
merger.other_basis = merger.other_rev_id
1002
merger.other_tree = self.branch.repository.revision_tree(
1003
merger.other_rev_id)
1004
merger.other_branch = branch
1005
if from_revision is None:
1008
merger.set_base_revision(from_revision, branch)
1009
if merger.base_rev_id == merger.other_rev_id:
1010
raise errors.PointlessMerge
1011
merger.backup_files = False
1012
if merge_type is None:
1013
merger.merge_type = Merge3Merger
1015
merger.merge_type = merge_type
1016
merger.set_interesting_files(None)
1017
merger.show_base = False
1018
merger.reprocess = False
1019
conflicts = merger.do_merge()
1020
merger.set_pending()
1024
def merge_modified(self):
1025
"""Return a dictionary of files modified by a merge.
1027
The list is initialized by WorkingTree.set_merge_modified, which is
1028
typically called after we make some automatic updates to the tree
1031
This returns a map of file_id->sha1, containing only files which are
1032
still in the working inventory and have that text hash.
1035
hashfile = self._transport.get('merge-hashes')
1036
except errors.NoSuchFile:
1041
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
1042
raise errors.MergeModifiedFormatError()
1043
except StopIteration:
1044
raise errors.MergeModifiedFormatError()
1045
for s in _mod_rio.RioReader(hashfile):
1046
# RioReader reads in Unicode, so convert file_ids back to utf8
1047
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
1048
if file_id not in self.inventory:
1050
text_hash = s.get("hash")
1051
if text_hash == self.get_file_sha1(file_id):
1052
merge_hashes[file_id] = text_hash
1058
def mkdir(self, path, file_id=None):
1059
"""See MutableTree.mkdir()."""
1061
file_id = generate_ids.gen_file_id(os.path.basename(path))
1062
os.mkdir(self.abspath(path))
1063
self.add(path, file_id, 'directory')
1066
def get_symlink_target(self, file_id):
1067
abspath = self.id2abspath(file_id)
1068
target = osutils.readlink(abspath)
1072
def subsume(self, other_tree):
1073
def add_children(inventory, entry):
1074
for child_entry in entry.children.values():
1075
inventory._byid[child_entry.file_id] = child_entry
1076
if child_entry.kind == 'directory':
1077
add_children(inventory, child_entry)
1078
if other_tree.get_root_id() == self.get_root_id():
1079
raise errors.BadSubsumeSource(self, other_tree,
1080
'Trees have the same root')
1082
other_tree_path = self.relpath(other_tree.basedir)
1083
except errors.PathNotChild:
1084
raise errors.BadSubsumeSource(self, other_tree,
1085
'Tree is not contained by the other')
1086
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1087
if new_root_parent is None:
1088
raise errors.BadSubsumeSource(self, other_tree,
1089
'Parent directory is not versioned.')
1090
# We need to ensure that the result of a fetch will have a
1091
# versionedfile for the other_tree root, and only fetching into
1092
# RepositoryKnit2 guarantees that.
1093
if not self.branch.repository.supports_rich_root():
1094
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1095
other_tree.lock_tree_write()
1097
new_parents = other_tree.get_parent_ids()
1098
other_root = other_tree.inventory.root
1099
other_root.parent_id = new_root_parent
1100
other_root.name = osutils.basename(other_tree_path)
1101
self.inventory.add(other_root)
1102
add_children(self.inventory, other_root)
1103
self._write_inventory(self.inventory)
1104
# normally we don't want to fetch whole repositories, but i think
1105
# here we really do want to consolidate the whole thing.
1106
for parent_id in other_tree.get_parent_ids():
1107
self.branch.fetch(other_tree.branch, parent_id)
1108
self.add_parent_tree_id(parent_id)
1111
other_tree.bzrdir.retire_bzrdir()
1113
def _setup_directory_is_tree_reference(self):
1114
if self._branch.repository._format.supports_tree_reference:
1115
self._directory_is_tree_reference = \
1116
self._directory_may_be_tree_reference
1118
self._directory_is_tree_reference = \
1119
self._directory_is_never_tree_reference
1121
def _directory_is_never_tree_reference(self, relpath):
1124
def _directory_may_be_tree_reference(self, relpath):
1125
# as a special case, if a directory contains control files then
1126
# it's a tree reference, except that the root of the tree is not
1127
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1128
# TODO: We could ask all the control formats whether they
1129
# recognize this directory, but at the moment there's no cheap api
1130
# to do that. Since we probably can only nest bzr checkouts and
1131
# they always use this name it's ok for now. -- mbp 20060306
1133
# FIXME: There is an unhandled case here of a subdirectory
1134
# containing .bzr but not a branch; that will probably blow up
1135
# when you try to commit it. It might happen if there is a
1136
# checkout in a subdirectory. This can be avoided by not adding
1139
@needs_tree_write_lock
1140
def extract(self, file_id, format=None):
1141
"""Extract a subtree from this tree.
1143
A new branch will be created, relative to the path for this tree.
1147
segments = osutils.splitpath(path)
1148
transport = self.branch.bzrdir.root_transport
1149
for name in segments:
1150
transport = transport.clone(name)
1151
transport.ensure_base()
1154
sub_path = self.id2path(file_id)
1155
branch_transport = mkdirs(sub_path)
1157
format = self.bzrdir.cloning_metadir()
1158
branch_transport.ensure_base()
1159
branch_bzrdir = format.initialize_on_transport(branch_transport)
1161
repo = branch_bzrdir.find_repository()
1162
except errors.NoRepositoryPresent:
1163
repo = branch_bzrdir.create_repository()
1164
if not repo.supports_rich_root():
1165
raise errors.RootNotRich()
1166
new_branch = branch_bzrdir.create_branch()
1167
new_branch.pull(self.branch)
1168
for parent_id in self.get_parent_ids():
1169
new_branch.fetch(self.branch, parent_id)
1170
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1171
if tree_transport.base != branch_transport.base:
1172
tree_bzrdir = format.initialize_on_transport(tree_transport)
1173
branch.BranchReferenceFormat().initialize(tree_bzrdir,
1174
target_branch=new_branch)
1176
tree_bzrdir = branch_bzrdir
1177
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1178
wt.set_parent_ids(self.get_parent_ids())
1179
my_inv = self.inventory
1180
child_inv = inventory.Inventory(root_id=None)
1181
new_root = my_inv[file_id]
1182
my_inv.remove_recursive_id(file_id)
1183
new_root.parent_id = None
1184
child_inv.add(new_root)
1185
self._write_inventory(my_inv)
1186
wt._write_inventory(child_inv)
1189
def _serialize(self, inventory, out_file):
1190
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1193
def _deserialize(selt, in_file):
1194
return xml5.serializer_v5.read_inventory(in_file)
1197
"""Write the in memory inventory to disk."""
1198
# TODO: Maybe this should only write on dirty ?
1199
if self._control_files._lock_mode != 'w':
1200
raise errors.NotWriteLocked(self)
1202
self._serialize(self._inventory, sio)
1204
self._transport.put_file('inventory', sio,
1205
mode=self.bzrdir._get_file_mode())
1206
self._inventory_is_modified = False
1208
def _kind(self, relpath):
1209
return osutils.file_kind(self.abspath(relpath))
1211
def list_files(self, include_root=False, from_dir=None, recursive=True):
1212
"""List all files as (path, class, kind, id, entry).
120
1214
Lists, but does not descend into unversioned directories.
122
1215
This does not include files that have been deleted in this
1216
tree. Skips the control directory.
125
Skips the control directory.
1218
:param include_root: if True, return an entry for the root
1219
:param from_dir: start from this directory or None for the root
1220
:param recursive: whether to recurse into subdirectories or not
127
from osutils import appendpath, file_kind
1222
# list_files is an iterator, so @needs_read_lock doesn't work properly
1223
# with it. So callers should be careful to always read_lock the tree.
1224
if not self.is_locked():
1225
raise errors.ObjectNotLocked(self)
130
1227
inv = self.inventory
132
def descend(from_dir_relpath, from_dir_id, dp):
1228
if from_dir is None and include_root is True:
1229
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1230
# Convert these into local objects to save lookup times
1231
pathjoin = osutils.pathjoin
1232
file_kind = self._kind
1234
# transport.base ends in a slash, we want the piece
1235
# between the last two slashes
1236
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1238
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1240
# directory file_id, relative path, absolute path, reverse sorted children
1241
if from_dir is not None:
1242
from_dir_id = inv.path2id(from_dir)
1243
if from_dir_id is None:
1244
# Directory not versioned
1246
from_dir_abspath = pathjoin(self.basedir, from_dir)
1248
from_dir_id = inv.root.file_id
1249
from_dir_abspath = self.basedir
1250
children = os.listdir(from_dir_abspath)
1252
# jam 20060527 The kernel sized tree seems equivalent whether we
1253
# use a deque and popleft to keep them sorted, or if we use a plain
1254
# list and just reverse() them.
1255
children = collections.deque(children)
1256
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1258
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1261
f = children.popleft()
136
1262
## TODO: If we find a subdirectory with its own .bzr
137
1263
## directory, then that is a separate tree and we
138
1264
## should exclude it.
139
if bzrlib.BZRDIR == f:
1266
# the bzrdir for this tree
1267
if transport_base_dir == f:
143
fp = appendpath(from_dir_relpath, f)
1270
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1271
# and 'f' doesn't begin with one, we can do a string op, rather
1272
# than the checks of pathjoin(), all relative paths will have an extra slash
1274
fp = from_dir_relpath + '/' + f
146
fap = appendpath(dp, f)
148
f_ie = inv.get_child(from_dir_id, f)
1277
fap = from_dir_abspath + '/' + f
1279
dir_ie = inv[from_dir_id]
1280
if dir_ie.kind == 'directory':
1281
f_ie = dir_ie.children.get(f)
151
elif self.is_ignored(fp):
1286
elif self.is_ignored(fp[1:]):
1289
# we may not have found this file, because of a unicode
1290
# issue, or because the directory was actually a symlink.
1291
f_norm, can_access = osutils.normalized_filename(f)
1292
if f == f_norm or not can_access:
1293
# No change, so treat this file normally
1296
# this file can be accessed by a normalized path
1297
# check again if it is versioned
1298
# these lines are repeated here for performance
1300
fp = from_dir_relpath + '/' + f
1301
fap = from_dir_abspath + '/' + f
1302
f_ie = inv.get_child(from_dir_id, f)
1305
elif self.is_ignored(fp[1:]):
156
1310
fk = file_kind(fap)
1312
# make a last minute entry
160
raise BzrCheckError("file %r entered as kind %r id %r, "
162
% (fap, f_ie.kind, f_ie.file_id, fk))
164
yield fp, c, fk, (f_ie and f_ie.file_id)
1314
yield fp[1:], c, fk, f_ie.file_id, f_ie
1317
yield fp[1:], c, fk, None, fk_entries[fk]()
1319
yield fp[1:], c, fk, None, TreeEntry()
166
1322
if fk != 'directory':
170
# don't descend unversioned directories
173
for ff in descend(fp, f_ie.file_id, fap):
176
for f in descend('', inv.root.file_id, self.basedir):
1325
# But do this child first if recursing down
1327
new_children = os.listdir(fap)
1329
new_children = collections.deque(new_children)
1330
stack.append((f_ie.file_id, fp, fap, new_children))
1331
# Break out of inner loop,
1332
# so that we start outer loop with child
1335
# if we finished all children, pop it off the stack
1338
@needs_tree_write_lock
1339
def move(self, from_paths, to_dir=None, after=False):
1342
to_dir must exist in the inventory.
1344
If to_dir exists and is a directory, the files are moved into
1345
it, keeping their old names.
1347
Note that to_dir is only the last component of the new name;
1348
this doesn't change the directory.
1350
For each entry in from_paths the move mode will be determined
1353
The first mode moves the file in the filesystem and updates the
1354
inventory. The second mode only updates the inventory without
1355
touching the file on the filesystem. This is the new mode introduced
1358
move uses the second mode if 'after == True' and the target is not
1359
versioned but present in the working tree.
1361
move uses the second mode if 'after == False' and the source is
1362
versioned but no longer in the working tree, and the target is not
1363
versioned but present in the working tree.
1365
move uses the first mode if 'after == False' and the source is
1366
versioned and present in the working tree, and the target is not
1367
versioned and not present in the working tree.
1369
Everything else results in an error.
1371
This returns a list of (from_path, to_path) pairs for each
1372
entry that is moved.
1377
# check for deprecated use of signature
1379
raise TypeError('You must supply a target directory')
1380
# check destination directory
1381
if isinstance(from_paths, basestring):
1383
inv = self.inventory
1384
to_abs = self.abspath(to_dir)
1385
if not isdir(to_abs):
1386
raise errors.BzrMoveFailedError('',to_dir,
1387
errors.NotADirectory(to_abs))
1388
if not self.has_filename(to_dir):
1389
raise errors.BzrMoveFailedError('',to_dir,
1390
errors.NotInWorkingDirectory(to_dir))
1391
to_dir_id = inv.path2id(to_dir)
1392
if to_dir_id is None:
1393
raise errors.BzrMoveFailedError('',to_dir,
1394
errors.NotVersionedError(path=to_dir))
1396
to_dir_ie = inv[to_dir_id]
1397
if to_dir_ie.kind != 'directory':
1398
raise errors.BzrMoveFailedError('',to_dir,
1399
errors.NotADirectory(to_abs))
1401
# create rename entries and tuples
1402
for from_rel in from_paths:
1403
from_tail = splitpath(from_rel)[-1]
1404
from_id = inv.path2id(from_rel)
1406
raise errors.BzrMoveFailedError(from_rel,to_dir,
1407
errors.NotVersionedError(path=from_rel))
1409
from_entry = inv[from_id]
1410
from_parent_id = from_entry.parent_id
1411
to_rel = pathjoin(to_dir, from_tail)
1412
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1414
from_tail=from_tail,
1415
from_parent_id=from_parent_id,
1416
to_rel=to_rel, to_tail=from_tail,
1417
to_parent_id=to_dir_id)
1418
rename_entries.append(rename_entry)
1419
rename_tuples.append((from_rel, to_rel))
1421
# determine which move mode to use. checks also for movability
1422
rename_entries = self._determine_mv_mode(rename_entries, after)
1424
original_modified = self._inventory_is_modified
1427
self._inventory_is_modified = True
1428
self._move(rename_entries)
1430
# restore the inventory on error
1431
self._inventory_is_modified = original_modified
1433
self._write_inventory(inv)
1434
return rename_tuples
1436
def _determine_mv_mode(self, rename_entries, after=False):
1437
"""Determines for each from-to pair if both inventory and working tree
1438
or only the inventory has to be changed.
1440
Also does basic plausability tests.
1442
inv = self.inventory
1444
for rename_entry in rename_entries:
1445
# store to local variables for easier reference
1446
from_rel = rename_entry.from_rel
1447
from_id = rename_entry.from_id
1448
to_rel = rename_entry.to_rel
1449
to_id = inv.path2id(to_rel)
1450
only_change_inv = False
1452
# check the inventory for source and destination
1454
raise errors.BzrMoveFailedError(from_rel,to_rel,
1455
errors.NotVersionedError(path=from_rel))
1456
if to_id is not None:
1457
raise errors.BzrMoveFailedError(from_rel,to_rel,
1458
errors.AlreadyVersionedError(path=to_rel))
1460
# try to determine the mode for rename (only change inv or change
1461
# inv and file system)
1463
if not self.has_filename(to_rel):
1464
raise errors.BzrMoveFailedError(from_id,to_rel,
1465
errors.NoSuchFile(path=to_rel,
1466
extra="New file has not been created yet"))
1467
only_change_inv = True
1468
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1469
only_change_inv = True
1470
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1471
only_change_inv = False
1472
elif (not self.case_sensitive
1473
and from_rel.lower() == to_rel.lower()
1474
and self.has_filename(from_rel)):
1475
only_change_inv = False
1477
# something is wrong, so lets determine what exactly
1478
if not self.has_filename(from_rel) and \
1479
not self.has_filename(to_rel):
1480
raise errors.BzrRenameFailedError(from_rel,to_rel,
1481
errors.PathsDoNotExist(paths=(str(from_rel),
1484
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1485
rename_entry.only_change_inv = only_change_inv
1486
return rename_entries
1488
def _move(self, rename_entries):
1489
"""Moves a list of files.
1491
Depending on the value of the flag 'only_change_inv', the
1492
file will be moved on the file system or not.
1494
inv = self.inventory
1497
for entry in rename_entries:
1499
self._move_entry(entry)
1501
self._rollback_move(moved)
1505
def _rollback_move(self, moved):
1506
"""Try to rollback a previous move in case of an filesystem error."""
1507
inv = self.inventory
1510
self._move_entry(WorkingTree._RenameEntry(
1511
entry.to_rel, entry.from_id,
1512
entry.to_tail, entry.to_parent_id, entry.from_rel,
1513
entry.from_tail, entry.from_parent_id,
1514
entry.only_change_inv))
1515
except errors.BzrMoveFailedError, e:
1516
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1517
" The working tree is in an inconsistent state."
1518
" Please consider doing a 'bzr revert'."
1519
" Error message is: %s" % e)
1521
def _move_entry(self, entry):
1522
inv = self.inventory
1523
from_rel_abs = self.abspath(entry.from_rel)
1524
to_rel_abs = self.abspath(entry.to_rel)
1525
if from_rel_abs == to_rel_abs:
1526
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1527
"Source and target are identical.")
1529
if not entry.only_change_inv:
1531
osutils.rename(from_rel_abs, to_rel_abs)
1533
raise errors.BzrMoveFailedError(entry.from_rel,
1535
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1537
@needs_tree_write_lock
1538
def rename_one(self, from_rel, to_rel, after=False):
1541
This can change the directory or the filename or both.
1543
rename_one has several 'modes' to work. First, it can rename a physical
1544
file and change the file_id. That is the normal mode. Second, it can
1545
only change the file_id without touching any physical file. This is
1546
the new mode introduced in version 0.15.
1548
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1549
versioned but present in the working tree.
1551
rename_one uses the second mode if 'after == False' and 'from_rel' is
1552
versioned but no longer in the working tree, and 'to_rel' is not
1553
versioned but present in the working tree.
1555
rename_one uses the first mode if 'after == False' and 'from_rel' is
1556
versioned and present in the working tree, and 'to_rel' is not
1557
versioned and not present in the working tree.
1559
Everything else results in an error.
1561
inv = self.inventory
1564
# create rename entries and tuples
1565
from_tail = splitpath(from_rel)[-1]
1566
from_id = inv.path2id(from_rel)
1568
# if file is missing in the inventory maybe it's in the basis_tree
1569
basis_tree = self.branch.basis_tree()
1570
from_id = basis_tree.path2id(from_rel)
1572
raise errors.BzrRenameFailedError(from_rel,to_rel,
1573
errors.NotVersionedError(path=from_rel))
1574
# put entry back in the inventory so we can rename it
1575
from_entry = basis_tree.inventory[from_id].copy()
1578
from_entry = inv[from_id]
1579
from_parent_id = from_entry.parent_id
1580
to_dir, to_tail = os.path.split(to_rel)
1581
to_dir_id = inv.path2id(to_dir)
1582
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1584
from_tail=from_tail,
1585
from_parent_id=from_parent_id,
1586
to_rel=to_rel, to_tail=to_tail,
1587
to_parent_id=to_dir_id)
1588
rename_entries.append(rename_entry)
1590
# determine which move mode to use. checks also for movability
1591
rename_entries = self._determine_mv_mode(rename_entries, after)
1593
# check if the target changed directory and if the target directory is
1595
if to_dir_id is None:
1596
raise errors.BzrMoveFailedError(from_rel,to_rel,
1597
errors.NotVersionedError(path=to_dir))
1599
# all checks done. now we can continue with our actual work
1600
mutter('rename_one:\n'
1605
' to_dir_id {%s}\n',
1606
from_id, from_rel, to_rel, to_dir, to_dir_id)
1608
self._move(rename_entries)
1609
self._write_inventory(inv)
1611
class _RenameEntry(object):
1612
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1613
to_rel, to_tail, to_parent_id, only_change_inv=False):
1614
self.from_rel = from_rel
1615
self.from_id = from_id
1616
self.from_tail = from_tail
1617
self.from_parent_id = from_parent_id
1618
self.to_rel = to_rel
1619
self.to_tail = to_tail
1620
self.to_parent_id = to_parent_id
1621
self.only_change_inv = only_change_inv
181
1624
def unknowns(self):
182
for subp in self.extras():
183
if not self.is_ignored(subp):
1625
"""Return all unknown files.
1627
These are files in the working directory that are not versioned or
1628
control files or ignored.
1630
# force the extras method to be fully executed before returning, to
1631
# prevent race conditions with the lock
1633
[subp for subp in self.extras() if not self.is_ignored(subp)])
1635
@needs_tree_write_lock
1636
def unversion(self, file_ids):
1637
"""Remove the file ids in file_ids from the current versioned set.
1639
When a file_id is unversioned, all of its children are automatically
1642
:param file_ids: The file ids to stop versioning.
1643
:raises: NoSuchId if any fileid is not currently versioned.
1645
for file_id in file_ids:
1646
if file_id not in self._inventory:
1647
raise errors.NoSuchId(self, file_id)
1648
for file_id in file_ids:
1649
if self._inventory.has_id(file_id):
1650
self._inventory.remove_recursive_id(file_id)
1652
# in the future this should just set a dirty bit to wait for the
1653
# final unlock. However, until all methods of workingtree start
1654
# with the current in -memory inventory rather than triggering
1655
# a read, it is more complex - we need to teach read_inventory
1656
# to know when to read, and when to not read first... and possibly
1657
# to save first when the in memory one may be corrupted.
1658
# so for now, we just only write it if it is indeed dirty.
1660
self._write_inventory(self._inventory)
1663
def pull(self, source, overwrite=False, stop_revision=None,
1664
change_reporter=None, possible_transports=None, local=False,
1668
old_revision_info = self.branch.last_revision_info()
1669
basis_tree = self.basis_tree()
1670
count = self.branch.pull(source, overwrite, stop_revision,
1671
possible_transports=possible_transports,
1673
new_revision_info = self.branch.last_revision_info()
1674
if new_revision_info != old_revision_info:
1675
repository = self.branch.repository
1676
basis_tree.lock_read()
1678
new_basis_tree = self.branch.basis_tree()
1685
change_reporter=change_reporter,
1686
show_base=show_base)
1687
basis_root_id = basis_tree.get_root_id()
1688
new_root_id = new_basis_tree.get_root_id()
1689
if basis_root_id != new_root_id:
1690
self.set_root_id(new_root_id)
1693
# TODO - dedup parents list with things merged by pull ?
1694
# reuse the revisiontree we merged against to set the new
1696
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1697
# we have to pull the merge trees out again, because
1698
# merge_inner has set the ids. - this corner is not yet
1699
# layered well enough to prevent double handling.
1700
# XXX TODO: Fix the double handling: telling the tree about
1701
# the already known parent data is wasteful.
1702
merges = self.get_parent_ids()[1:]
1703
parent_trees.extend([
1704
(parent, repository.revision_tree(parent)) for
1706
self.set_parent_trees(parent_trees)
1712
def put_file_bytes_non_atomic(self, file_id, bytes):
1713
"""See MutableTree.put_file_bytes_non_atomic."""
1714
stream = file(self.id2abspath(file_id), 'wb')
1719
# TODO: update the hashcache here ?
187
1721
def extras(self):
188
"""Yield all unknown files in this WorkingTree.
1722
"""Yield all unversioned files in this WorkingTree.
190
If there are any unknown directories then only the directory is
191
returned, not all its children. But if there are unknown files
1724
If there are any unversioned directories then only the directory is
1725
returned, not all its children. But if there are unversioned files
192
1726
under a versioned subdirectory, they are returned.
194
1728
Currently returned depth-first, sorted by name within directories.
1729
This is the same order used by 'osutils.walkdirs'.
196
1731
## TODO: Work from given directory downwards
197
from osutils import isdir, appendpath
199
1732
for path, dir_entry in self.inventory.directories():
200
mutter("search for unknowns in %r" % path)
1733
# mutter("search for unknowns in %r", path)
201
1734
dirabs = self.abspath(path)
202
1735
if not isdir(dirabs):
203
1736
# e.g. directory deleted
207
1740
for subf in os.listdir(dirabs):
209
and (subf not in dir_entry.children)):
1741
if self.bzrdir.is_control_filename(subf):
1743
if subf not in dir_entry.children:
1746
can_access) = osutils.normalized_filename(subf)
1747
except UnicodeDecodeError:
1748
path_os_enc = path.encode(osutils._fs_enc)
1749
relpath = path_os_enc + '/' + subf
1750
raise errors.BadFilenameEncoding(relpath,
1752
if subf_norm != subf and can_access:
1753
if subf_norm not in dir_entry.children:
1754
fl.append(subf_norm)
214
subp = appendpath(path, subf)
1760
subp = pathjoin(path, subf)
218
1763
def ignored_files(self):
219
1764
"""Yield list of PATH, IGNORE_PATTERN"""
220
1765
for subp in self.extras():
221
1766
pat = self.is_ignored(subp)
226
1770
def get_ignore_list(self):
227
1771
"""Return list of ignore patterns.
229
1773
Cached in the Tree object after the first call.
231
if hasattr(self, '_ignorelist'):
232
return self._ignorelist
1775
ignoreset = getattr(self, '_ignoreset', None)
1776
if ignoreset is not None:
234
l = bzrlib.DEFAULT_IGNORE[:]
1779
ignore_globs = set()
1780
ignore_globs.update(ignores.get_runtime_ignores())
1781
ignore_globs.update(ignores.get_user_ignores())
235
1782
if self.has_filename(bzrlib.IGNORE_FILENAME):
236
1783
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
237
l.extend([line.rstrip("\n\r") for line in f.readlines()])
1785
ignore_globs.update(ignores.parse_ignore_file(f))
1788
self._ignoreset = ignore_globs
1791
def _flush_ignore_list_cache(self):
1792
"""Resets the cached ignore list to force a cache rebuild."""
1793
self._ignoreset = None
1794
self._ignoreglobster = None
242
1796
def is_ignored(self, filename):
243
1797
r"""Check whether the filename matches an ignore pattern.
245
1799
Patterns containing '/' or '\' need to match the whole path;
246
others match against only the last component.
1800
others match against only the last component. Patterns starting
1801
with '!' are ignore exceptions. Exceptions take precedence
1802
over regular patterns and cause the filename to not be ignored.
248
1804
If the file is ignored, returns the pattern which caused it to
249
1805
be ignored, otherwise None. So this can simply be used as a
250
1806
boolean if desired."""
252
# TODO: Use '**' to match directories, and other extended
253
# globbing stuff from cvs/rsync.
255
# XXX: fnmatch is actually not quite what we want: it's only
256
# approximately the same as real Unix fnmatch, and doesn't
257
# treat dotfiles correctly and allows * to match /.
258
# Eventually it should be replaced with something more
262
from osutils import splitpath
264
for pat in self.get_ignore_list():
265
if '/' in pat or '\\' in pat:
267
# as a special case, you can put ./ at the start of a
268
# pattern; this is good to match in the top-level
271
if (pat[:2] == './') or (pat[:2] == '.\\'):
1807
if getattr(self, '_ignoreglobster', None) is None:
1808
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1809
return self._ignoreglobster.match(filename)
1811
def kind(self, file_id):
1812
return file_kind(self.id2abspath(file_id))
1814
def stored_kind(self, file_id):
1815
"""See Tree.stored_kind"""
1816
return self.inventory[file_id].kind
1818
def _comparison_data(self, entry, path):
1819
abspath = self.abspath(path)
1821
stat_value = os.lstat(abspath)
1823
if getattr(e, 'errno', None) == errno.ENOENT:
1830
mode = stat_value.st_mode
1831
kind = osutils.file_kind_from_stat_mode(mode)
1832
if not supports_executable():
1833
executable = entry is not None and entry.executable
1835
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1836
return kind, executable, stat_value
1838
def _file_size(self, entry, stat_value):
1839
return stat_value.st_size
1841
def last_revision(self):
1842
"""Return the last revision of the branch for this tree.
1844
This format tree does not support a separate marker for last-revision
1845
compared to the branch.
1847
See MutableTree.last_revision
1849
return self._last_revision()
1852
def _last_revision(self):
1853
"""helper for get_parent_ids."""
1854
return _mod_revision.ensure_null(self.branch.last_revision())
1856
def is_locked(self):
1857
return self._control_files.is_locked()
1859
def _must_be_locked(self):
1860
if not self.is_locked():
1861
raise errors.ObjectNotLocked(self)
1863
def lock_read(self):
1864
"""Lock the tree for reading.
1866
This also locks the branch, and can be unlocked via self.unlock().
1868
:return: A bzrlib.lock.LogicalLockResult.
1870
if not self.is_locked():
1872
self.branch.lock_read()
1874
self._control_files.lock_read()
1875
return LogicalLockResult(self.unlock)
1877
self.branch.unlock()
1880
def lock_tree_write(self):
1881
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1883
:return: A bzrlib.lock.LogicalLockResult.
1885
if not self.is_locked():
1887
self.branch.lock_read()
1889
self._control_files.lock_write()
1890
return LogicalLockResult(self.unlock)
1892
self.branch.unlock()
1895
def lock_write(self):
1896
"""See MutableTree.lock_write, and WorkingTree.unlock.
1898
:return: A bzrlib.lock.LogicalLockResult.
1900
if not self.is_locked():
1902
self.branch.lock_write()
1904
self._control_files.lock_write()
1905
return LogicalLockResult(self.unlock)
1907
self.branch.unlock()
1910
def get_physical_lock_status(self):
1911
return self._control_files.get_physical_lock_status()
1913
def _basis_inventory_name(self):
1914
return 'basis-inventory-cache'
1916
def _reset_data(self):
1917
"""Reset transient data that cannot be revalidated."""
1918
self._inventory_is_modified = False
1919
f = self._transport.get('inventory')
1921
result = self._deserialize(f)
1924
self._set_inventory(result, dirty=False)
1926
@needs_tree_write_lock
1927
def set_last_revision(self, new_revision):
1928
"""Change the last revision in the working tree."""
1929
if self._change_last_revision(new_revision):
1930
self._cache_basis_inventory(new_revision)
1932
def _change_last_revision(self, new_revision):
1933
"""Template method part of set_last_revision to perform the change.
1935
This is used to allow WorkingTree3 instances to not affect branch
1936
when their last revision is set.
1938
if _mod_revision.is_null(new_revision):
1939
self.branch.set_revision_history([])
1942
self.branch.generate_revision_history(new_revision)
1943
except errors.NoSuchRevision:
1944
# not present in the repo - dont try to set it deeper than the tip
1945
self.branch.set_revision_history([new_revision])
1948
def _write_basis_inventory(self, xml):
1949
"""Write the basis inventory XML to the basis-inventory file"""
1950
path = self._basis_inventory_name()
1952
self._transport.put_file(path, sio,
1953
mode=self.bzrdir._get_file_mode())
1955
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1956
"""Create the text that will be saved in basis-inventory"""
1957
inventory.revision_id = revision_id
1958
return xml7.serializer_v7.write_inventory_to_string(inventory)
1960
def _cache_basis_inventory(self, new_revision):
1961
"""Cache new_revision as the basis inventory."""
1962
# TODO: this should allow the ready-to-use inventory to be passed in,
1963
# as commit already has that ready-to-use [while the format is the
1966
# this double handles the inventory - unpack and repack -
1967
# but is easier to understand. We can/should put a conditional
1968
# in here based on whether the inventory is in the latest format
1969
# - perhaps we should repack all inventories on a repository
1971
# the fast path is to copy the raw xml from the repository. If the
1972
# xml contains 'revision_id="', then we assume the right
1973
# revision_id is set. We must check for this full string, because a
1974
# root node id can legitimately look like 'revision_id' but cannot
1976
xml = self.branch.repository._get_inventory_xml(new_revision)
1977
firstline = xml.split('\n', 1)[0]
1978
if (not 'revision_id="' in firstline or
1979
'format="7"' not in firstline):
1980
inv = self.branch.repository._serializer.read_inventory_from_string(
1982
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1983
self._write_basis_inventory(xml)
1984
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1987
def read_basis_inventory(self):
1988
"""Read the cached basis inventory."""
1989
path = self._basis_inventory_name()
1990
return self._transport.get_bytes(path)
1993
def read_working_inventory(self):
1994
"""Read the working inventory.
1996
:raises errors.InventoryModified: read_working_inventory will fail
1997
when the current in memory inventory has been modified.
1999
# conceptually this should be an implementation detail of the tree.
2000
# XXX: Deprecate this.
2001
# ElementTree does its own conversion from UTF-8, so open in
2003
if self._inventory_is_modified:
2004
raise errors.InventoryModified(self)
2005
f = self._transport.get('inventory')
2007
result = self._deserialize(f)
2010
self._set_inventory(result, dirty=False)
2013
@needs_tree_write_lock
2014
def remove(self, files, verbose=False, to_file=None, keep_files=True,
2016
"""Remove nominated files from the working inventory.
2018
:files: File paths relative to the basedir.
2019
:keep_files: If true, the files will also be kept.
2020
:force: Delete files and directories, even if they are changed and
2021
even if the directories are not empty.
2023
if isinstance(files, basestring):
2028
all_files = set() # specified and nested files
2029
unknown_nested_files=set()
2031
to_file = sys.stdout
2033
files_to_backup = []
2035
def recurse_directory_to_add_files(directory):
2036
# Recurse directory and add all files
2037
# so we can check if they have changed.
2038
for parent_info, file_infos in self.walkdirs(directory):
2039
for relpath, basename, kind, lstat, fileid, kind in file_infos:
2040
# Is it versioned or ignored?
2041
if self.path2id(relpath):
2042
# Add nested content for deletion.
2043
all_files.add(relpath)
2045
# Files which are not versioned
2046
# should be treated as unknown.
2047
files_to_backup.append(relpath)
2049
for filename in files:
2050
# Get file name into canonical form.
2051
abspath = self.abspath(filename)
2052
filename = self.relpath(abspath)
2053
if len(filename) > 0:
2054
all_files.add(filename)
2055
recurse_directory_to_add_files(filename)
2057
files = list(all_files)
2060
return # nothing to do
2062
# Sort needed to first handle directory content before the directory
2063
files.sort(reverse=True)
2065
# Bail out if we are going to delete files we shouldn't
2066
if not keep_files and not force:
2067
for (file_id, path, content_change, versioned, parent_id, name,
2068
kind, executable) in self.iter_changes(self.basis_tree(),
2069
include_unchanged=True, require_versioned=False,
2070
want_unversioned=True, specific_files=files):
2071
if versioned[0] == False:
2072
# The record is unknown or newly added
2073
files_to_backup.append(path[1])
2074
elif (content_change and (kind[1] is not None) and
2075
osutils.is_inside_any(files, path[1])):
2076
# Versioned and changed, but not deleted, and still
2077
# in one of the dirs to be deleted.
2078
files_to_backup.append(path[1])
2080
def backup(file_to_backup):
2081
backup_name = self.bzrdir._available_backup_name(file_to_backup)
2082
osutils.rename(abs_path, self.abspath(backup_name))
2083
return "removed %s (but kept a copy: %s)" % (file_to_backup,
2086
# Build inv_delta and delete files where applicable,
2087
# do this before any modifications to inventory.
2089
fid = self.path2id(f)
2092
message = "%s is not versioned." % (f,)
2095
# having removed it, it must be either ignored or unknown
2096
if self.is_ignored(f):
2100
# XXX: Really should be a more abstract reporter interface
2101
kind_ch = osutils.kind_marker(self.kind(fid))
2102
to_file.write(new_status + ' ' + f + kind_ch + '\n')
2104
inv_delta.append((f, None, fid, None))
2105
message = "removed %s" % (f,)
2108
abs_path = self.abspath(f)
2109
if osutils.lexists(abs_path):
2110
if (osutils.isdir(abs_path) and
2111
len(os.listdir(abs_path)) > 0):
2113
osutils.rmtree(abs_path)
2114
message = "deleted %s" % (f,)
2118
if f in files_to_backup:
2121
osutils.delete_any(abs_path)
2122
message = "deleted %s" % (f,)
2123
elif message is not None:
2124
# Only care if we haven't done anything yet.
2125
message = "%s does not exist." % (f,)
2127
# Print only one message (if any) per file.
2128
if message is not None:
2130
self.apply_inventory_delta(inv_delta)
2132
@needs_tree_write_lock
2133
def revert(self, filenames=None, old_tree=None, backups=True,
2134
pb=None, report_changes=False):
2135
from bzrlib.conflicts import resolve
2138
symbol_versioning.warn('Using [] to revert all files is deprecated'
2139
' as of bzr 0.91. Please use None (the default) instead.',
2140
DeprecationWarning, stacklevel=2)
2141
if old_tree is None:
2142
basis_tree = self.basis_tree()
2143
basis_tree.lock_read()
2144
old_tree = basis_tree
2148
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2150
if filenames is None and len(self.get_parent_ids()) > 1:
2152
last_revision = self.last_revision()
2153
if last_revision != _mod_revision.NULL_REVISION:
2154
if basis_tree is None:
2155
basis_tree = self.basis_tree()
2156
basis_tree.lock_read()
2157
parent_trees.append((last_revision, basis_tree))
2158
self.set_parent_trees(parent_trees)
2161
resolve(self, filenames, ignore_misses=True, recursive=True)
2163
if basis_tree is not None:
2167
def revision_tree(self, revision_id):
2168
"""See Tree.revision_tree.
2170
WorkingTree can supply revision_trees for the basis revision only
2171
because there is only one cached inventory in the bzr directory.
2173
if revision_id == self.last_revision():
2175
xml = self.read_basis_inventory()
2176
except errors.NoSuchFile:
2180
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2181
# dont use the repository revision_tree api because we want
2182
# to supply the inventory.
2183
if inv.revision_id == revision_id:
2184
return revisiontree.RevisionTree(self.branch.repository,
2186
except errors.BadInventoryFormat:
2188
# raise if there was no inventory, or if we read the wrong inventory.
2189
raise errors.NoSuchRevisionInTree(self, revision_id)
2191
# XXX: This method should be deprecated in favour of taking in a proper
2192
# new Inventory object.
2193
@needs_tree_write_lock
2194
def set_inventory(self, new_inventory_list):
2195
from bzrlib.inventory import (Inventory,
2199
inv = Inventory(self.get_root_id())
2200
for path, file_id, parent, kind in new_inventory_list:
2201
name = os.path.basename(path)
2204
# fixme, there should be a factory function inv,add_??
2205
if kind == 'directory':
2206
inv.add(InventoryDirectory(file_id, name, parent))
2207
elif kind == 'file':
2208
inv.add(InventoryFile(file_id, name, parent))
2209
elif kind == 'symlink':
2210
inv.add(InventoryLink(file_id, name, parent))
2212
raise errors.BzrError("unknown kind %r" % kind)
2213
self._write_inventory(inv)
2215
@needs_tree_write_lock
2216
def set_root_id(self, file_id):
2217
"""Set the root id for this tree."""
2221
'WorkingTree.set_root_id with fileid=None')
2222
file_id = osutils.safe_file_id(file_id)
2223
self._set_root_id(file_id)
2225
def _set_root_id(self, file_id):
2226
"""Set the root id for this tree, in a format specific manner.
2228
:param file_id: The file id to assign to the root. It must not be
2229
present in the current inventory or an error will occur. It must
2230
not be None, but rather a valid file id.
2232
inv = self._inventory
2233
orig_root_id = inv.root.file_id
2234
# TODO: it might be nice to exit early if there was nothing
2235
# to do, saving us from trigger a sync on unlock.
2236
self._inventory_is_modified = True
2237
# we preserve the root inventory entry object, but
2238
# unlinkit from the byid index
2239
del inv._byid[inv.root.file_id]
2240
inv.root.file_id = file_id
2241
# and link it into the index with the new changed id.
2242
inv._byid[inv.root.file_id] = inv.root
2243
# and finally update all children to reference the new id.
2244
# XXX: this should be safe to just look at the root.children
2245
# list, not the WHOLE INVENTORY.
2248
if entry.parent_id == orig_root_id:
2249
entry.parent_id = inv.root.file_id
2252
"""See Branch.unlock.
2254
WorkingTree locking just uses the Branch locking facilities.
2255
This is current because all working trees have an embedded branch
2256
within them. IF in the future, we were to make branch data shareable
2257
between multiple working trees, i.e. via shared storage, then we
2258
would probably want to lock both the local tree, and the branch.
2260
raise NotImplementedError(self.unlock)
2264
def update(self, change_reporter=None, possible_transports=None,
2265
revision=None, old_tip=_marker, show_base=False):
2266
"""Update a working tree along its branch.
2268
This will update the branch if its bound too, which means we have
2269
multiple trees involved:
2271
- The new basis tree of the master.
2272
- The old basis tree of the branch.
2273
- The old basis tree of the working tree.
2274
- The current working tree state.
2276
Pathologically, all three may be different, and non-ancestors of each
2277
other. Conceptually we want to:
2279
- Preserve the wt.basis->wt.state changes
2280
- Transform the wt.basis to the new master basis.
2281
- Apply a merge of the old branch basis to get any 'local' changes from
2283
- Restore the wt.basis->wt.state changes.
2285
There isn't a single operation at the moment to do that, so we:
2286
- Merge current state -> basis tree of the master w.r.t. the old tree
2288
- Do a 'normal' merge of the old branch basis if it is relevant.
2290
:param revision: The target revision to update to. Must be in the
2292
:param old_tip: If branch.update() has already been run, the value it
2293
returned (old tip of the branch or None). _marker is used
2296
if self.branch.get_bound_location() is not None:
2298
update_branch = (old_tip is self._marker)
2300
self.lock_tree_write()
2301
update_branch = False
2304
old_tip = self.branch.update(possible_transports)
2306
if old_tip is self._marker:
2308
return self._update_tree(old_tip, change_reporter, revision, show_base)
2312
@needs_tree_write_lock
2313
def _update_tree(self, old_tip=None, change_reporter=None, revision=None,
2315
"""Update a tree to the master branch.
2317
:param old_tip: if supplied, the previous tip revision the branch,
2318
before it was changed to the master branch's tip.
2320
# here if old_tip is not None, it is the old tip of the branch before
2321
# it was updated from the master branch. This should become a pending
2322
# merge in the working tree to preserve the user existing work. we
2323
# cant set that until we update the working trees last revision to be
2324
# one from the new branch, because it will just get absorbed by the
2325
# parent de-duplication logic.
2327
# We MUST save it even if an error occurs, because otherwise the users
2328
# local work is unreferenced and will appear to have been lost.
2332
last_rev = self.get_parent_ids()[0]
2334
last_rev = _mod_revision.NULL_REVISION
2335
if revision is None:
2336
revision = self.branch.last_revision()
2338
old_tip = old_tip or _mod_revision.NULL_REVISION
2340
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2341
# the branch we are bound to was updated
2342
# merge those changes in first
2343
base_tree = self.basis_tree()
2344
other_tree = self.branch.repository.revision_tree(old_tip)
2345
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2346
base_tree, this_tree=self,
2347
change_reporter=change_reporter,
2348
show_base=show_base)
2350
self.add_parent_tree((old_tip, other_tree))
2351
note('Rerun update after fixing the conflicts.')
2354
if last_rev != _mod_revision.ensure_null(revision):
2355
# the working tree is up to date with the branch
2356
# we can merge the specified revision from master
2357
to_tree = self.branch.repository.revision_tree(revision)
2358
to_root_id = to_tree.get_root_id()
2360
basis = self.basis_tree()
2363
if (basis.inventory.root is None
2364
or basis.inventory.root.file_id != to_root_id):
2365
self.set_root_id(to_root_id)
2370
# determine the branch point
2371
graph = self.branch.repository.get_graph()
2372
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2374
base_tree = self.branch.repository.revision_tree(base_rev_id)
2376
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2378
change_reporter=change_reporter,
2379
show_base=show_base)
2380
self.set_last_revision(revision)
2381
# TODO - dedup parents list with things merged by pull ?
2382
# reuse the tree we've updated to to set the basis:
2383
parent_trees = [(revision, to_tree)]
2384
merges = self.get_parent_ids()[1:]
2385
# Ideally we ask the tree for the trees here, that way the working
2386
# tree can decide whether to give us the entire tree or give us a
2387
# lazy initialised tree. dirstate for instance will have the trees
2388
# in ram already, whereas a last-revision + basis-inventory tree
2389
# will not, but also does not need them when setting parents.
2390
for parent in merges:
2391
parent_trees.append(
2392
(parent, self.branch.repository.revision_tree(parent)))
2393
if not _mod_revision.is_null(old_tip):
2394
parent_trees.append(
2395
(old_tip, self.branch.repository.revision_tree(old_tip)))
2396
self.set_parent_trees(parent_trees)
2397
last_rev = parent_trees[0][0]
2400
def _write_hashcache_if_dirty(self):
2401
"""Write out the hashcache if it is dirty."""
2402
if self._hashcache.needs_write:
2404
self._hashcache.write()
2406
if e.errno not in (errno.EPERM, errno.EACCES):
2408
# TODO: jam 20061219 Should this be a warning? A single line
2409
# warning might be sufficient to let the user know what
2411
mutter('Could not write hashcache for %s\nError: %s',
2412
self._hashcache.cache_file_name(), e)
2414
@needs_tree_write_lock
2415
def _write_inventory(self, inv):
2416
"""Write inventory as the current inventory."""
2417
self._set_inventory(inv, dirty=True)
2420
def set_conflicts(self, arg):
2421
raise errors.UnsupportedOperation(self.set_conflicts, self)
2423
def add_conflicts(self, arg):
2424
raise errors.UnsupportedOperation(self.add_conflicts, self)
2426
def conflicts(self):
2427
raise NotImplementedError(self.conflicts)
2429
def walkdirs(self, prefix=""):
2430
"""Walk the directories of this tree.
2432
returns a generator which yields items in the form:
2433
((curren_directory_path, fileid),
2434
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2437
This API returns a generator, which is only valid during the current
2438
tree transaction - within a single lock_read or lock_write duration.
2440
If the tree is not locked, it may cause an error to be raised,
2441
depending on the tree implementation.
2443
disk_top = self.abspath(prefix)
2444
if disk_top.endswith('/'):
2445
disk_top = disk_top[:-1]
2446
top_strip_len = len(disk_top) + 1
2447
inventory_iterator = self._walkdirs(prefix)
2448
disk_iterator = osutils.walkdirs(disk_top, prefix)
2450
current_disk = disk_iterator.next()
2451
disk_finished = False
2453
if not (e.errno == errno.ENOENT or
2454
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2457
disk_finished = True
2459
current_inv = inventory_iterator.next()
2460
inv_finished = False
2461
except StopIteration:
2464
while not inv_finished or not disk_finished:
2466
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2467
cur_disk_dir_content) = current_disk
2469
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2470
cur_disk_dir_content) = ((None, None), None)
2471
if not disk_finished:
2472
# strip out .bzr dirs
2473
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2474
len(cur_disk_dir_content) > 0):
2475
# osutils.walkdirs can be made nicer -
2476
# yield the path-from-prefix rather than the pathjoined
2478
bzrdir_loc = bisect_left(cur_disk_dir_content,
2480
if (bzrdir_loc < len(cur_disk_dir_content)
2481
and self.bzrdir.is_control_filename(
2482
cur_disk_dir_content[bzrdir_loc][0])):
2483
# we dont yield the contents of, or, .bzr itself.
2484
del cur_disk_dir_content[bzrdir_loc]
2486
# everything is unknown
2489
# everything is missing
2492
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2494
# disk is before inventory - unknown
2495
dirblock = [(relpath, basename, kind, stat, None, None) for
2496
relpath, basename, kind, stat, top_path in
2497
cur_disk_dir_content]
2498
yield (cur_disk_dir_relpath, None), dirblock
2500
current_disk = disk_iterator.next()
2501
except StopIteration:
2502
disk_finished = True
2504
# inventory is before disk - missing.
2505
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2506
for relpath, basename, dkind, stat, fileid, kind in
2508
yield (current_inv[0][0], current_inv[0][1]), dirblock
2510
current_inv = inventory_iterator.next()
2511
except StopIteration:
2514
# versioned present directory
2515
# merge the inventory and disk data together
2517
for relpath, subiterator in itertools.groupby(sorted(
2518
current_inv[1] + cur_disk_dir_content,
2519
key=operator.itemgetter(0)), operator.itemgetter(1)):
2520
path_elements = list(subiterator)
2521
if len(path_elements) == 2:
2522
inv_row, disk_row = path_elements
2523
# versioned, present file
2524
dirblock.append((inv_row[0],
2525
inv_row[1], disk_row[2],
2526
disk_row[3], inv_row[4],
2528
elif len(path_elements[0]) == 5:
2530
dirblock.append((path_elements[0][0],
2531
path_elements[0][1], path_elements[0][2],
2532
path_elements[0][3], None, None))
2533
elif len(path_elements[0]) == 6:
2534
# versioned, absent file.
2535
dirblock.append((path_elements[0][0],
2536
path_elements[0][1], 'unknown', None,
2537
path_elements[0][4], path_elements[0][5]))
2539
raise NotImplementedError('unreachable code')
2540
yield current_inv[0], dirblock
2542
current_inv = inventory_iterator.next()
2543
except StopIteration:
2546
current_disk = disk_iterator.next()
2547
except StopIteration:
2548
disk_finished = True
2550
def _walkdirs(self, prefix=""):
2551
"""Walk the directories of this tree.
2553
:prefix: is used as the directrory to start with.
2554
returns a generator which yields items in the form:
2555
((curren_directory_path, fileid),
2556
[(file1_path, file1_name, file1_kind, None, file1_id,
2559
_directory = 'directory'
2560
# get the root in the inventory
2561
inv = self.inventory
2562
top_id = inv.path2id(prefix)
2566
pending = [(prefix, '', _directory, None, top_id, None)]
2569
currentdir = pending.pop()
2570
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2571
top_id = currentdir[4]
2573
relroot = currentdir[0] + '/'
2576
# FIXME: stash the node in pending
2578
if entry.kind == 'directory':
2579
for name, child in entry.sorted_children():
2580
dirblock.append((relroot + name, name, child.kind, None,
2581
child.file_id, child.kind
2583
yield (currentdir[0], entry.file_id), dirblock
2584
# push the user specified dirs from dirblock
2585
for dir in reversed(dirblock):
2586
if dir[2] == _directory:
2589
@needs_tree_write_lock
2590
def auto_resolve(self):
2591
"""Automatically resolve text conflicts according to contents.
2593
Only text conflicts are auto_resolvable. Files with no conflict markers
2594
are considered 'resolved', because bzr always puts conflict markers
2595
into files that have text conflicts. The corresponding .THIS .BASE and
2596
.OTHER files are deleted, as per 'resolve'.
2597
:return: a tuple of ConflictLists: (un_resolved, resolved).
2599
un_resolved = _mod_conflicts.ConflictList()
2600
resolved = _mod_conflicts.ConflictList()
2601
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2602
for conflict in self.conflicts():
2603
if (conflict.typestring != 'text conflict' or
2604
self.kind(conflict.file_id) != 'file'):
2605
un_resolved.append(conflict)
2607
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2609
for line in my_file:
2610
if conflict_re.search(line):
2611
un_resolved.append(conflict)
275
if fnmatch.fnmatchcase(filename, newpat):
2614
resolved.append(conflict)
2617
resolved.remove_files(self)
2618
self.set_conflicts(un_resolved)
2619
return un_resolved, resolved
2622
def _check(self, references):
2623
"""Check the tree for consistency.
2625
:param references: A dict with keys matching the items returned by
2626
self._get_check_refs(), and values from looking those keys up in
2629
tree_basis = self.basis_tree()
2630
tree_basis.lock_read()
2632
repo_basis = references[('trees', self.last_revision())]
2633
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2634
raise errors.BzrCheckError(
2635
"Mismatched basis inventory content.")
2640
def _validate(self):
2641
"""Validate internal structures.
2643
This is meant mostly for the test suite. To give it a chance to detect
2644
corruption after actions have occurred. The default implementation is a
2647
:return: None. An exception should be raised if there is an error.
2652
def check_state(self):
2653
"""Check that the working state is/isn't valid."""
2654
check_refs = self._get_check_refs()
2656
for ref in check_refs:
2659
refs[ref] = self.branch.repository.revision_tree(value)
2662
@needs_tree_write_lock
2663
def reset_state(self, revision_ids=None):
2664
"""Reset the state of the working tree.
2666
This does a hard-reset to a last-known-good state. This is a way to
2667
fix if something got corrupted (like the .bzr/checkout/dirstate file)
2669
if revision_ids is None:
2670
revision_ids = self.get_parent_ids()
2671
if not revision_ids:
2672
rt = self.branch.repository.revision_tree(
2673
_mod_revision.NULL_REVISION)
2675
rt = self.branch.repository.revision_tree(revision_ids[0])
2676
self._write_inventory(rt.inventory)
2677
self.set_parent_ids(revision_ids)
2679
def _get_rules_searcher(self, default_searcher):
2680
"""See Tree._get_rules_searcher."""
2681
if self._rules_searcher is None:
2682
self._rules_searcher = super(WorkingTree,
2683
self)._get_rules_searcher(default_searcher)
2684
return self._rules_searcher
2686
def get_shelf_manager(self):
2687
"""Return the ShelfManager for this WorkingTree."""
2688
from bzrlib.shelf import ShelfManager
2689
return ShelfManager(self, self._transport)
2692
class WorkingTree3(WorkingTree):
2693
"""This is the Format 3 working tree.
2695
This differs from the base WorkingTree by:
2696
- having its own file lock
2697
- having its own last-revision property.
2699
This is new in bzr 0.8
2703
def _last_revision(self):
2704
"""See Mutable.last_revision."""
2706
return self._transport.get_bytes('last-revision')
2707
except errors.NoSuchFile:
2708
return _mod_revision.NULL_REVISION
2710
def _change_last_revision(self, revision_id):
2711
"""See WorkingTree._change_last_revision."""
2712
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
2714
self._transport.delete('last-revision')
2715
except errors.NoSuchFile:
2719
self._transport.put_bytes('last-revision', revision_id,
2720
mode=self.bzrdir._get_file_mode())
2723
def _get_check_refs(self):
2724
"""Return the references needed to perform a check of this tree."""
2725
return [('trees', self.last_revision())]
2727
@needs_tree_write_lock
2728
def set_conflicts(self, conflicts):
2729
self._put_rio('conflicts', conflicts.to_stanzas(),
2732
@needs_tree_write_lock
2733
def add_conflicts(self, new_conflicts):
2734
conflict_set = set(self.conflicts())
2735
conflict_set.update(set(list(new_conflicts)))
2736
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2737
key=_mod_conflicts.Conflict.sort_key)))
2740
def conflicts(self):
2742
confile = self._transport.get('conflicts')
2743
except errors.NoSuchFile:
2744
return _mod_conflicts.ConflictList()
2747
if confile.next() != CONFLICT_HEADER_1 + '\n':
2748
raise errors.ConflictFormatError()
2749
except StopIteration:
2750
raise errors.ConflictFormatError()
2751
reader = _mod_rio.RioReader(confile)
2752
return _mod_conflicts.ConflictList.from_stanzas(reader)
2757
# do non-implementation specific cleanup
2759
if self._control_files._lock_count == 1:
2760
# _inventory_is_modified is always False during a read lock.
2761
if self._inventory_is_modified:
2763
self._write_hashcache_if_dirty()
2764
# reverse order of locking.
2766
return self._control_files.unlock()
2768
self.branch.unlock()
2771
class WorkingTreeFormatRegistry(controldir.ControlComponentFormatRegistry):
2772
"""Registry for working tree formats."""
2774
def __init__(self, other_registry=None):
2775
super(WorkingTreeFormatRegistry, self).__init__(other_registry)
2776
self._default_format = None
2778
def get_default(self):
2779
"""Return the current default format."""
2780
return self._default_format
2782
def set_default(self, format):
2783
self._default_format = format
2786
format_registry = WorkingTreeFormatRegistry()
2789
class WorkingTreeFormat(controldir.ControlComponentFormat):
2790
"""An encapsulation of the initialization and open routines for a format.
2792
Formats provide three things:
2793
* An initialization routine,
2797
Formats are placed in an dict by their format string for reference
2798
during workingtree opening. Its not required that these be instances, they
2799
can be classes themselves with class methods - it simply depends on
2800
whether state is needed for a given format or not.
2802
Once a format is deprecated, just deprecate the initialize and open
2803
methods on the format class. Do not deprecate the object, as the
2804
object will be created every time regardless.
2807
requires_rich_root = False
2809
upgrade_recommended = False
2811
requires_normalized_unicode_filenames = False
2813
case_sensitive_filename = "FoRMaT"
2815
missing_parent_conflicts = False
2816
"""If this format supports missing parent conflicts."""
2819
def find_format(klass, a_bzrdir):
2820
"""Return the format for the working tree object in a_bzrdir."""
2822
transport = a_bzrdir.get_workingtree_transport(None)
2823
format_string = transport.get_bytes("format")
2824
return format_registry.get(format_string)
2825
except errors.NoSuchFile:
2826
raise errors.NoWorkingTree(base=transport.base)
2828
raise errors.UnknownFormatError(format=format_string,
2829
kind="working tree")
2831
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2832
accelerator_tree=None, hardlink=False):
2833
"""Initialize a new working tree in a_bzrdir.
2835
:param a_bzrdir: BzrDir to initialize the working tree in.
2836
:param revision_id: allows creating a working tree at a different
2837
revision than the branch is at.
2838
:param from_branch: Branch to checkout
2839
:param accelerator_tree: A tree which can be used for retrieving file
2840
contents more quickly than the revision tree, i.e. a workingtree.
2841
The revision tree will be used for cases where accelerator_tree's
2842
content is different.
2843
:param hardlink: If true, hard-link files from accelerator_tree,
2846
raise NotImplementedError(self.initialize)
2848
def __eq__(self, other):
2849
return self.__class__ is other.__class__
2851
def __ne__(self, other):
2852
return not (self == other)
2855
@symbol_versioning.deprecated_method(
2856
symbol_versioning.deprecated_in((2, 4, 0)))
2857
def get_default_format(klass):
2858
"""Return the current default format."""
2859
return format_registry.get_default()
2861
def get_format_string(self):
2862
"""Return the ASCII format string that identifies this format."""
2863
raise NotImplementedError(self.get_format_string)
2865
def get_format_description(self):
2866
"""Return the short description for this format."""
2867
raise NotImplementedError(self.get_format_description)
2869
def is_supported(self):
2870
"""Is this format supported?
2872
Supported formats can be initialized and opened.
2873
Unsupported formats may not support initialization or committing or
2874
some other features depending on the reason for not being supported.
2878
def supports_content_filtering(self):
2879
"""True if this format supports content filtering."""
2882
def supports_views(self):
2883
"""True if this format supports stored views."""
2887
@symbol_versioning.deprecated_method(
2888
symbol_versioning.deprecated_in((2, 4, 0)))
2889
def register_format(klass, format):
2890
format_registry.register(format)
2893
@symbol_versioning.deprecated_method(
2894
symbol_versioning.deprecated_in((2, 4, 0)))
2895
def register_extra_format(klass, format):
2896
format_registry.register_extra(format)
2899
@symbol_versioning.deprecated_method(
2900
symbol_versioning.deprecated_in((2, 4, 0)))
2901
def unregister_extra_format(klass, format):
2902
format_registry.unregister_extra(format)
2905
@symbol_versioning.deprecated_method(
2906
symbol_versioning.deprecated_in((2, 4, 0)))
2907
def get_formats(klass):
2908
return format_registry._get_all()
2911
@symbol_versioning.deprecated_method(
2912
symbol_versioning.deprecated_in((2, 4, 0)))
2913
def set_default_format(klass, format):
2914
format_registry.set_default(format)
2917
@symbol_versioning.deprecated_method(
2918
symbol_versioning.deprecated_in((2, 4, 0)))
2919
def unregister_format(klass, format):
2920
format_registry.remove(format)
2923
class WorkingTreeFormat3(WorkingTreeFormat):
2924
"""The second working tree format updated to record a format marker.
2927
- exists within a metadir controlling .bzr
2928
- includes an explicit version marker for the workingtree control
2929
files, separate from the BzrDir format
2930
- modifies the hash cache format
2932
- uses a LockDir to guard access for writes.
2935
upgrade_recommended = True
2937
missing_parent_conflicts = True
2939
def get_format_string(self):
2940
"""See WorkingTreeFormat.get_format_string()."""
2941
return "Bazaar-NG Working Tree format 3"
2943
def get_format_description(self):
2944
"""See WorkingTreeFormat.get_format_description()."""
2945
return "Working tree format 3"
2947
_lock_file_name = 'lock'
2948
_lock_class = LockDir
2950
_tree_class = WorkingTree3
2952
def __get_matchingbzrdir(self):
2953
return bzrdir.BzrDirMetaFormat1()
2955
_matchingbzrdir = property(__get_matchingbzrdir)
2957
def _open_control_files(self, a_bzrdir):
2958
transport = a_bzrdir.get_workingtree_transport(None)
2959
return LockableFiles(transport, self._lock_file_name,
2962
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2963
accelerator_tree=None, hardlink=False):
2964
"""See WorkingTreeFormat.initialize().
2966
:param revision_id: if supplied, create a working tree at a different
2967
revision than the branch is at.
2968
:param accelerator_tree: A tree which can be used for retrieving file
2969
contents more quickly than the revision tree, i.e. a workingtree.
2970
The revision tree will be used for cases where accelerator_tree's
2971
content is different.
2972
:param hardlink: If true, hard-link files from accelerator_tree,
2975
if not isinstance(a_bzrdir.transport, LocalTransport):
2976
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2977
transport = a_bzrdir.get_workingtree_transport(self)
2978
control_files = self._open_control_files(a_bzrdir)
2979
control_files.create_lock()
2980
control_files.lock_write()
2981
transport.put_bytes('format', self.get_format_string(),
2982
mode=a_bzrdir._get_file_mode())
2983
if from_branch is not None:
2984
branch = from_branch
2986
branch = a_bzrdir.open_branch()
2987
if revision_id is None:
2988
revision_id = _mod_revision.ensure_null(branch.last_revision())
2989
# WorkingTree3 can handle an inventory which has a unique root id.
2990
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
2991
# those trees. And because there isn't a format bump inbetween, we
2992
# are maintaining compatibility with older clients.
2993
# inv = Inventory(root_id=gen_root_id())
2994
inv = self._initial_inventory()
2995
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3001
_control_files=control_files)
3002
wt.lock_tree_write()
3004
basis_tree = branch.repository.revision_tree(revision_id)
3005
# only set an explicit root id if there is one to set.
3006
if basis_tree.inventory.root is not None:
3007
wt.set_root_id(basis_tree.get_root_id())
3008
if revision_id == _mod_revision.NULL_REVISION:
3009
wt.set_parent_trees([])
278
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
3011
wt.set_parent_trees([(revision_id, basis_tree)])
3012
transform.build_tree(basis_tree, wt)
3014
# Unlock in this order so that the unlock-triggers-flush in
3015
# WorkingTree is given a chance to fire.
3016
control_files.unlock()
3020
def _initial_inventory(self):
3021
return inventory.Inventory()
3024
super(WorkingTreeFormat3, self).__init__()
3026
def open(self, a_bzrdir, _found=False):
3027
"""Return the WorkingTree object for a_bzrdir
3029
_found is a private parameter, do not use it. It is used to indicate
3030
if format probing has already been done.
3033
# we are being called directly and must probe.
3034
raise NotImplementedError
3035
if not isinstance(a_bzrdir.transport, LocalTransport):
3036
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3037
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
3040
def _open(self, a_bzrdir, control_files):
3041
"""Open the tree itself.
3043
:param a_bzrdir: the dir for the tree.
3044
:param control_files: the control files for the tree.
3046
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3050
_control_files=control_files)
3053
return self.get_format_string()
3056
__default_format = WorkingTreeFormat6()
3057
format_registry.register_lazy("Bazaar Working Tree Format 4 (bzr 0.15)\n",
3058
"bzrlib.workingtree_4", "WorkingTreeFormat4")
3059
format_registry.register_lazy("Bazaar Working Tree Format 5 (bzr 1.11)\n",
3060
"bzrlib.workingtree_4", "WorkingTreeFormat5")
3061
format_registry.register_lazy("Bazaar Working Tree Format 6 (bzr 1.14)\n",
3062
"bzrlib.workingtree_4", "WorkingTreeFormat6")
3063
format_registry.register(WorkingTreeFormat3())
3064
format_registry.set_default(__default_format)