46
447
and the working file exists.
48
449
inv = self._inventory
49
for file_id in self._inventory:
50
# TODO: This is slightly redundant; we should be able to just
51
# check the statcache but it only includes regular files.
52
# only include files which still exist on disk
55
if ((file_id in self._statcache)
56
or (os.path.exists(self.abspath(inv.id2path(file_id))))):
450
for path, ie in inv.iter_entries():
451
if osutils.lexists(self.abspath(path)):
454
def all_file_ids(self):
455
"""See Tree.iter_all_file_ids"""
456
return set(self.inventory)
61
458
def __repr__(self):
62
459
return "<%s of %s>" % (self.__class__.__name__,
460
getattr(self, 'basedir', None))
65
462
def abspath(self, filename):
66
return os.path.join(self.basedir, filename)
463
return pathjoin(self.basedir, filename)
465
def basis_tree(self):
466
"""Return RevisionTree for the current last revision.
468
If the left most parent is a ghost then the returned tree will be an
469
empty tree - one obtained by calling
470
repository.revision_tree(NULL_REVISION).
473
revision_id = self.get_parent_ids()[0]
475
# no parents, return an empty revision tree.
476
# in the future this should return the tree for
477
# 'empty:' - the implicit root empty tree.
478
return self.branch.repository.revision_tree(
479
_mod_revision.NULL_REVISION)
481
return self.revision_tree(revision_id)
482
except errors.NoSuchRevision:
484
# No cached copy available, retrieve from the repository.
485
# FIXME? RBC 20060403 should we cache the inventory locally
488
return self.branch.repository.revision_tree(revision_id)
489
except (errors.RevisionNotPresent, errors.NoSuchRevision):
490
# the basis tree *may* be a ghost or a low level error may have
491
# occurred. If the revision is present, its a problem, if its not
493
if self.branch.repository.has_revision(revision_id):
495
# the basis tree is a ghost so return an empty tree.
496
return self.branch.repository.revision_tree(
497
_mod_revision.NULL_REVISION)
500
self._flush_ignore_list_cache()
502
def relpath(self, path):
503
"""Return the local path portion from a given path.
505
The path may be absolute or relative. If its a relative path it is
506
interpreted relative to the python current working directory.
508
return osutils.relpath(self.basedir, path)
68
510
def has_filename(self, filename):
69
return os.path.exists(self.abspath(filename))
71
def get_file(self, file_id):
72
return self.get_file_byname(self.id2path(file_id))
74
def get_file_byname(self, filename):
75
return file(self.abspath(filename), 'rb')
511
return osutils.lexists(self.abspath(filename))
513
def get_file(self, file_id, path=None, filtered=True):
514
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
516
def get_file_with_stat(self, file_id, path=None, filtered=True,
518
"""See Tree.get_file_with_stat."""
520
path = self.id2path(file_id)
521
file_obj = self.get_file_byname(path, filtered=False)
522
stat_value = _fstat(file_obj.fileno())
523
if filtered and self.supports_content_filtering():
524
filters = self._content_filter_stack(path)
525
file_obj = filtered_input_file(file_obj, filters)
526
return (file_obj, stat_value)
528
def get_file_text(self, file_id, path=None, filtered=True):
529
my_file = self.get_file(file_id, path=path, filtered=filtered)
531
return my_file.read()
535
def get_file_byname(self, filename, filtered=True):
536
path = self.abspath(filename)
538
if filtered and self.supports_content_filtering():
539
filters = self._content_filter_stack(filename)
540
return filtered_input_file(f, filters)
544
def get_file_lines(self, file_id, path=None, filtered=True):
545
"""See Tree.get_file_lines()"""
546
file = self.get_file(file_id, path, filtered=filtered)
548
return file.readlines()
553
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
554
"""See Tree.annotate_iter
556
This implementation will use the basis tree implementation if possible.
557
Lines not in the basis are attributed to CURRENT_REVISION
559
If there are pending merges, lines added by those merges will be
560
incorrectly attributed to CURRENT_REVISION (but after committing, the
561
attribution will be correct).
563
maybe_file_parent_keys = []
564
for parent_id in self.get_parent_ids():
566
parent_tree = self.revision_tree(parent_id)
567
except errors.NoSuchRevisionInTree:
568
parent_tree = self.branch.repository.revision_tree(parent_id)
569
parent_tree.lock_read()
571
if file_id not in parent_tree:
573
ie = parent_tree.inventory[file_id]
574
if ie.kind != 'file':
575
# Note: this is slightly unnecessary, because symlinks and
576
# directories have a "text" which is the empty text, and we
577
# know that won't mess up annotations. But it seems cleaner
579
parent_text_key = (file_id, ie.revision)
580
if parent_text_key not in maybe_file_parent_keys:
581
maybe_file_parent_keys.append(parent_text_key)
584
graph = _mod_graph.Graph(self.branch.repository.texts)
585
heads = graph.heads(maybe_file_parent_keys)
586
file_parent_keys = []
587
for key in maybe_file_parent_keys:
589
file_parent_keys.append(key)
591
# Now we have the parents of this content
592
annotator = self.branch.repository.texts.get_annotator()
593
text = self.get_file_text(file_id)
594
this_key =(file_id, default_revision)
595
annotator.add_special_text(this_key, file_parent_keys, text)
596
annotations = [(key[-1], line)
597
for key, line in annotator.annotate_flat(this_key)]
600
def _get_ancestors(self, default_revision):
601
ancestors = set([default_revision])
602
for parent_id in self.get_parent_ids():
603
ancestors.update(self.branch.repository.get_ancestry(
604
parent_id, topo_sorted=False))
607
def get_parent_ids(self):
608
"""See Tree.get_parent_ids.
610
This implementation reads the pending merges list and last_revision
611
value and uses that to decide what the parents list should be.
613
last_rev = _mod_revision.ensure_null(self._last_revision())
614
if _mod_revision.NULL_REVISION == last_rev:
619
merges_bytes = self._transport.get_bytes('pending-merges')
620
except errors.NoSuchFile:
623
for l in osutils.split_lines(merges_bytes):
624
revision_id = l.rstrip('\n')
625
parents.append(revision_id)
629
def get_root_id(self):
630
"""Return the id of this trees root"""
631
return self._inventory.root.file_id
77
633
def _get_store_filename(self, file_id):
78
## XXX: badly named; this isn't in the store at all
79
return self.abspath(self.id2path(file_id))
634
## XXX: badly named; this is not in the store at all
635
return self.abspath(self.id2path(file_id))
638
def clone(self, to_bzrdir, revision_id=None):
639
"""Duplicate this working tree into to_bzr, including all state.
641
Specifically modified files are kept as modified, but
642
ignored and unknown files are discarded.
644
If you want to make a new line of development, see bzrdir.sprout()
647
If not None, the cloned tree will have its last revision set to
648
revision, and difference between the source trees last revision
649
and this one merged in.
651
# assumes the target bzr dir format is compatible.
652
result = to_bzrdir.create_workingtree()
653
self.copy_content_into(result, revision_id)
657
def copy_content_into(self, tree, revision_id=None):
658
"""Copy the current content and user files of this tree into tree."""
659
tree.set_root_id(self.get_root_id())
660
if revision_id is None:
661
merge.transform_tree(tree, self)
663
# TODO now merge from tree.last_revision to revision (to preserve
664
# user local changes)
665
merge.transform_tree(tree, self)
666
tree.set_parent_ids([revision_id])
668
def id2abspath(self, file_id):
669
return self.abspath(self.id2path(file_id))
82
671
def has_id(self, file_id):
83
672
# files that have been deleted are excluded
84
if not self.inventory.has_id(file_id):
674
if not inv.has_id(file_id):
86
if file_id in self._statcache:
676
path = inv.id2path(file_id)
677
return osutils.lexists(self.abspath(path))
679
def has_or_had_id(self, file_id):
680
if file_id == self.inventory.root.file_id:
88
return os.path.exists(self.abspath(self.id2path(file_id)))
682
return self.inventory.has_id(file_id)
91
684
__contains__ = has_id
94
def _update_statcache(self):
96
if not self._statcache:
97
self._statcache = statcache.update_cache(self.basedir, self.inventory)
99
686
def get_file_size(self, file_id):
101
return os.stat(self._get_store_filename(file_id))[stat.ST_SIZE]
104
def get_file_sha1(self, file_id):
105
return self._statcache[file_id][statcache.SC_SHA1]
108
def file_class(self, filename):
109
if self.path2id(filename):
111
elif self.is_ignored(filename):
117
def list_files(self):
118
"""Recursively list all files as (path, class, kind, id).
687
"""See Tree.get_file_size"""
688
# XXX: this returns the on-disk size; it should probably return the
691
return os.path.getsize(self.id2abspath(file_id))
693
if e.errno != errno.ENOENT:
699
def get_file_sha1(self, file_id, path=None, stat_value=None):
701
path = self._inventory.id2path(file_id)
702
return self._hashcache.get_sha1(path, stat_value)
704
def get_file_mtime(self, file_id, path=None):
706
path = self.inventory.id2path(file_id)
707
return os.lstat(self.abspath(path)).st_mtime
709
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
710
file_id = self.path2id(path)
712
# For unversioned files on win32, we just assume they are not
715
return self._inventory[file_id].executable
717
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
718
mode = stat_result.st_mode
719
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
721
if not supports_executable():
722
def is_executable(self, file_id, path=None):
723
return self._inventory[file_id].executable
725
_is_executable_from_path_and_stat = \
726
_is_executable_from_path_and_stat_from_basis
728
def is_executable(self, file_id, path=None):
730
path = self.id2path(file_id)
731
mode = os.lstat(self.abspath(path)).st_mode
732
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
734
_is_executable_from_path_and_stat = \
735
_is_executable_from_path_and_stat_from_stat
737
@needs_tree_write_lock
738
def _add(self, files, ids, kinds):
739
"""See MutableTree._add."""
740
# TODO: Re-adding a file that is removed in the working copy
741
# should probably put it back with the previous ID.
742
# the read and write working inventory should not occur in this
743
# function - they should be part of lock_write and unlock.
745
for f, file_id, kind in zip(files, ids, kinds):
747
inv.add_path(f, kind=kind)
749
inv.add_path(f, kind=kind, file_id=file_id)
750
self._inventory_is_modified = True
752
@needs_tree_write_lock
753
def _gather_kinds(self, files, kinds):
754
"""See MutableTree._gather_kinds."""
755
for pos, f in enumerate(files):
756
if kinds[pos] is None:
757
fullpath = normpath(self.abspath(f))
759
kinds[pos] = file_kind(fullpath)
761
if e.errno == errno.ENOENT:
762
raise errors.NoSuchFile(fullpath)
765
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
766
"""Add revision_id as a parent.
768
This is equivalent to retrieving the current list of parent ids
769
and setting the list to its value plus revision_id.
771
:param revision_id: The revision id to add to the parent list. It may
772
be a ghost revision as long as its not the first parent to be added,
773
or the allow_leftmost_as_ghost parameter is set True.
774
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
776
parents = self.get_parent_ids() + [revision_id]
777
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
778
or allow_leftmost_as_ghost)
780
@needs_tree_write_lock
781
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
782
"""Add revision_id, tree tuple as a parent.
784
This is equivalent to retrieving the current list of parent trees
785
and setting the list to its value plus parent_tuple. See also
786
add_parent_tree_id - if you only have a parent id available it will be
787
simpler to use that api. If you have the parent already available, using
788
this api is preferred.
790
:param parent_tuple: The (revision id, tree) to add to the parent list.
791
If the revision_id is a ghost, pass None for the tree.
792
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
794
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
795
if len(parent_ids) > 1:
796
# the leftmost may have already been a ghost, preserve that if it
798
allow_leftmost_as_ghost = True
799
self.set_parent_ids(parent_ids,
800
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
802
@needs_tree_write_lock
803
def add_pending_merge(self, *revision_ids):
804
# TODO: Perhaps should check at this point that the
805
# history of the revision is actually present?
806
parents = self.get_parent_ids()
808
for rev_id in revision_ids:
809
if rev_id in parents:
811
parents.append(rev_id)
814
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
816
def path_content_summary(self, path, _lstat=os.lstat,
817
_mapper=osutils.file_kind_from_stat_mode):
818
"""See Tree.path_content_summary."""
819
abspath = self.abspath(path)
821
stat_result = _lstat(abspath)
823
if getattr(e, 'errno', None) == errno.ENOENT:
825
return ('missing', None, None, None)
826
# propagate other errors
828
kind = _mapper(stat_result.st_mode)
830
return self._file_content_summary(path, stat_result)
831
elif kind == 'directory':
832
# perhaps it looks like a plain directory, but it's really a
834
if self._directory_is_tree_reference(path):
835
kind = 'tree-reference'
836
return kind, None, None, None
837
elif kind == 'symlink':
838
target = osutils.readlink(abspath)
839
return ('symlink', None, None, target)
841
return (kind, None, None, None)
843
def _file_content_summary(self, path, stat_result):
844
size = stat_result.st_size
845
executable = self._is_executable_from_path_and_stat(path, stat_result)
846
# try for a stat cache lookup
847
return ('file', size, executable, self._sha_from_stat(
850
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
851
"""Common ghost checking functionality from set_parent_*.
853
This checks that the left hand-parent exists if there are any
856
if len(revision_ids) > 0:
857
leftmost_id = revision_ids[0]
858
if (not allow_leftmost_as_ghost and not
859
self.branch.repository.has_revision(leftmost_id)):
860
raise errors.GhostRevisionUnusableHere(leftmost_id)
862
def _set_merges_from_parent_ids(self, parent_ids):
863
merges = parent_ids[1:]
864
self._transport.put_bytes('pending-merges', '\n'.join(merges),
865
mode=self.bzrdir._get_file_mode())
867
def _filter_parent_ids_by_ancestry(self, revision_ids):
868
"""Check that all merged revisions are proper 'heads'.
870
This will always return the first revision_id, and any merged revisions
873
if len(revision_ids) == 0:
875
graph = self.branch.repository.get_graph()
876
heads = graph.heads(revision_ids)
877
new_revision_ids = revision_ids[:1]
878
for revision_id in revision_ids[1:]:
879
if revision_id in heads and revision_id not in new_revision_ids:
880
new_revision_ids.append(revision_id)
881
if new_revision_ids != revision_ids:
882
trace.mutter('requested to set revision_ids = %s,'
883
' but filtered to %s', revision_ids, new_revision_ids)
884
return new_revision_ids
886
@needs_tree_write_lock
887
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
888
"""Set the parent ids to revision_ids.
890
See also set_parent_trees. This api will try to retrieve the tree data
891
for each element of revision_ids from the trees repository. If you have
892
tree data already available, it is more efficient to use
893
set_parent_trees rather than set_parent_ids. set_parent_ids is however
894
an easier API to use.
896
:param revision_ids: The revision_ids to set as the parent ids of this
897
working tree. Any of these may be ghosts.
899
self._check_parents_for_ghosts(revision_ids,
900
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
901
for revision_id in revision_ids:
902
_mod_revision.check_not_reserved_id(revision_id)
904
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
906
if len(revision_ids) > 0:
907
self.set_last_revision(revision_ids[0])
909
self.set_last_revision(_mod_revision.NULL_REVISION)
911
self._set_merges_from_parent_ids(revision_ids)
913
@needs_tree_write_lock
914
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
915
"""See MutableTree.set_parent_trees."""
916
parent_ids = [rev for (rev, tree) in parents_list]
917
for revision_id in parent_ids:
918
_mod_revision.check_not_reserved_id(revision_id)
920
self._check_parents_for_ghosts(parent_ids,
921
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
923
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
925
if len(parent_ids) == 0:
926
leftmost_parent_id = _mod_revision.NULL_REVISION
927
leftmost_parent_tree = None
929
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
931
if self._change_last_revision(leftmost_parent_id):
932
if leftmost_parent_tree is None:
933
# If we don't have a tree, fall back to reading the
934
# parent tree from the repository.
935
self._cache_basis_inventory(leftmost_parent_id)
937
inv = leftmost_parent_tree.inventory
938
xml = self._create_basis_xml_from_inventory(
939
leftmost_parent_id, inv)
940
self._write_basis_inventory(xml)
941
self._set_merges_from_parent_ids(parent_ids)
943
@needs_tree_write_lock
944
def set_pending_merges(self, rev_list):
945
parents = self.get_parent_ids()
946
leftmost = parents[:1]
947
new_parents = leftmost + rev_list
948
self.set_parent_ids(new_parents)
950
@needs_tree_write_lock
951
def set_merge_modified(self, modified_hashes):
953
for file_id, hash in modified_hashes.iteritems():
954
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
955
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
957
def _sha_from_stat(self, path, stat_result):
958
"""Get a sha digest from the tree's stat cache.
960
The default implementation assumes no stat cache is present.
962
:param path: The path.
963
:param stat_result: The stat result being looked up.
967
def _put_rio(self, filename, stanzas, header):
968
self._must_be_locked()
969
my_file = rio_file(stanzas, header)
970
self._transport.put_file(filename, my_file,
971
mode=self.bzrdir._get_file_mode())
973
@needs_write_lock # because merge pulls data into the branch.
974
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
975
merge_type=None, force=False):
976
"""Merge from a branch into this working tree.
978
:param branch: The branch to merge from.
979
:param to_revision: If non-None, the merge will merge to to_revision,
980
but not beyond it. to_revision does not need to be in the history
981
of the branch when it is supplied. If None, to_revision defaults to
982
branch.last_revision().
984
from bzrlib.merge import Merger, Merge3Merger
985
merger = Merger(self.branch, this_tree=self)
986
# check that there are no local alterations
987
if not force and self.has_changes():
988
raise errors.UncommittedChanges(self)
989
if to_revision is None:
990
to_revision = _mod_revision.ensure_null(branch.last_revision())
991
merger.other_rev_id = to_revision
992
if _mod_revision.is_null(merger.other_rev_id):
993
raise errors.NoCommits(branch)
994
self.branch.fetch(branch, last_revision=merger.other_rev_id)
995
merger.other_basis = merger.other_rev_id
996
merger.other_tree = self.branch.repository.revision_tree(
998
merger.other_branch = branch
999
if from_revision is None:
1002
merger.set_base_revision(from_revision, branch)
1003
if merger.base_rev_id == merger.other_rev_id:
1004
raise errors.PointlessMerge
1005
merger.backup_files = False
1006
if merge_type is None:
1007
merger.merge_type = Merge3Merger
1009
merger.merge_type = merge_type
1010
merger.set_interesting_files(None)
1011
merger.show_base = False
1012
merger.reprocess = False
1013
conflicts = merger.do_merge()
1014
merger.set_pending()
1018
def merge_modified(self):
1019
"""Return a dictionary of files modified by a merge.
1021
The list is initialized by WorkingTree.set_merge_modified, which is
1022
typically called after we make some automatic updates to the tree
1025
This returns a map of file_id->sha1, containing only files which are
1026
still in the working inventory and have that text hash.
1029
hashfile = self._transport.get('merge-hashes')
1030
except errors.NoSuchFile:
1035
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
1036
raise errors.MergeModifiedFormatError()
1037
except StopIteration:
1038
raise errors.MergeModifiedFormatError()
1039
for s in RioReader(hashfile):
1040
# RioReader reads in Unicode, so convert file_ids back to utf8
1041
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
1042
if file_id not in self.inventory:
1044
text_hash = s.get("hash")
1045
if text_hash == self.get_file_sha1(file_id):
1046
merge_hashes[file_id] = text_hash
1052
def mkdir(self, path, file_id=None):
1053
"""See MutableTree.mkdir()."""
1055
file_id = generate_ids.gen_file_id(os.path.basename(path))
1056
os.mkdir(self.abspath(path))
1057
self.add(path, file_id, 'directory')
1060
def get_symlink_target(self, file_id):
1061
abspath = self.id2abspath(file_id)
1062
target = osutils.readlink(abspath)
1066
def subsume(self, other_tree):
1067
def add_children(inventory, entry):
1068
for child_entry in entry.children.values():
1069
inventory._byid[child_entry.file_id] = child_entry
1070
if child_entry.kind == 'directory':
1071
add_children(inventory, child_entry)
1072
if other_tree.get_root_id() == self.get_root_id():
1073
raise errors.BadSubsumeSource(self, other_tree,
1074
'Trees have the same root')
1076
other_tree_path = self.relpath(other_tree.basedir)
1077
except errors.PathNotChild:
1078
raise errors.BadSubsumeSource(self, other_tree,
1079
'Tree is not contained by the other')
1080
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1081
if new_root_parent is None:
1082
raise errors.BadSubsumeSource(self, other_tree,
1083
'Parent directory is not versioned.')
1084
# We need to ensure that the result of a fetch will have a
1085
# versionedfile for the other_tree root, and only fetching into
1086
# RepositoryKnit2 guarantees that.
1087
if not self.branch.repository.supports_rich_root():
1088
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1089
other_tree.lock_tree_write()
1091
new_parents = other_tree.get_parent_ids()
1092
other_root = other_tree.inventory.root
1093
other_root.parent_id = new_root_parent
1094
other_root.name = osutils.basename(other_tree_path)
1095
self.inventory.add(other_root)
1096
add_children(self.inventory, other_root)
1097
self._write_inventory(self.inventory)
1098
# normally we don't want to fetch whole repositories, but i think
1099
# here we really do want to consolidate the whole thing.
1100
for parent_id in other_tree.get_parent_ids():
1101
self.branch.fetch(other_tree.branch, parent_id)
1102
self.add_parent_tree_id(parent_id)
1105
other_tree.bzrdir.retire_bzrdir()
1107
def _setup_directory_is_tree_reference(self):
1108
if self._branch.repository._format.supports_tree_reference:
1109
self._directory_is_tree_reference = \
1110
self._directory_may_be_tree_reference
1112
self._directory_is_tree_reference = \
1113
self._directory_is_never_tree_reference
1115
def _directory_is_never_tree_reference(self, relpath):
1118
def _directory_may_be_tree_reference(self, relpath):
1119
# as a special case, if a directory contains control files then
1120
# it's a tree reference, except that the root of the tree is not
1121
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1122
# TODO: We could ask all the control formats whether they
1123
# recognize this directory, but at the moment there's no cheap api
1124
# to do that. Since we probably can only nest bzr checkouts and
1125
# they always use this name it's ok for now. -- mbp 20060306
1127
# FIXME: There is an unhandled case here of a subdirectory
1128
# containing .bzr but not a branch; that will probably blow up
1129
# when you try to commit it. It might happen if there is a
1130
# checkout in a subdirectory. This can be avoided by not adding
1133
@needs_tree_write_lock
1134
def extract(self, file_id, format=None):
1135
"""Extract a subtree from this tree.
1137
A new branch will be created, relative to the path for this tree.
1141
segments = osutils.splitpath(path)
1142
transport = self.branch.bzrdir.root_transport
1143
for name in segments:
1144
transport = transport.clone(name)
1145
transport.ensure_base()
1148
sub_path = self.id2path(file_id)
1149
branch_transport = mkdirs(sub_path)
1151
format = self.bzrdir.cloning_metadir()
1152
branch_transport.ensure_base()
1153
branch_bzrdir = format.initialize_on_transport(branch_transport)
1155
repo = branch_bzrdir.find_repository()
1156
except errors.NoRepositoryPresent:
1157
repo = branch_bzrdir.create_repository()
1158
if not repo.supports_rich_root():
1159
raise errors.RootNotRich()
1160
new_branch = branch_bzrdir.create_branch()
1161
new_branch.pull(self.branch)
1162
for parent_id in self.get_parent_ids():
1163
new_branch.fetch(self.branch, parent_id)
1164
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1165
if tree_transport.base != branch_transport.base:
1166
tree_bzrdir = format.initialize_on_transport(tree_transport)
1167
branch.BranchReferenceFormat().initialize(tree_bzrdir,
1168
target_branch=new_branch)
1170
tree_bzrdir = branch_bzrdir
1171
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1172
wt.set_parent_ids(self.get_parent_ids())
1173
my_inv = self.inventory
1174
child_inv = inventory.Inventory(root_id=None)
1175
new_root = my_inv[file_id]
1176
my_inv.remove_recursive_id(file_id)
1177
new_root.parent_id = None
1178
child_inv.add(new_root)
1179
self._write_inventory(my_inv)
1180
wt._write_inventory(child_inv)
1183
def _serialize(self, inventory, out_file):
1184
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1187
def _deserialize(selt, in_file):
1188
return xml5.serializer_v5.read_inventory(in_file)
1191
"""Write the in memory inventory to disk."""
1192
# TODO: Maybe this should only write on dirty ?
1193
if self._control_files._lock_mode != 'w':
1194
raise errors.NotWriteLocked(self)
1196
self._serialize(self._inventory, sio)
1198
self._transport.put_file('inventory', sio,
1199
mode=self.bzrdir._get_file_mode())
1200
self._inventory_is_modified = False
1202
def _kind(self, relpath):
1203
return osutils.file_kind(self.abspath(relpath))
1205
def list_files(self, include_root=False, from_dir=None, recursive=True):
1206
"""List all files as (path, class, kind, id, entry).
120
1208
Lists, but does not descend into unversioned directories.
122
1209
This does not include files that have been deleted in this
1210
tree. Skips the control directory.
125
Skips the control directory.
1212
:param include_root: if True, return an entry for the root
1213
:param from_dir: start from this directory or None for the root
1214
:param recursive: whether to recurse into subdirectories or not
127
from osutils import appendpath, file_kind
1216
# list_files is an iterator, so @needs_read_lock doesn't work properly
1217
# with it. So callers should be careful to always read_lock the tree.
1218
if not self.is_locked():
1219
raise errors.ObjectNotLocked(self)
130
1221
inv = self.inventory
132
def descend(from_dir_relpath, from_dir_id, dp):
1222
if from_dir is None and include_root is True:
1223
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1224
# Convert these into local objects to save lookup times
1225
pathjoin = osutils.pathjoin
1226
file_kind = self._kind
1228
# transport.base ends in a slash, we want the piece
1229
# between the last two slashes
1230
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1232
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1234
# directory file_id, relative path, absolute path, reverse sorted children
1235
if from_dir is not None:
1236
from_dir_id = inv.path2id(from_dir)
1237
if from_dir_id is None:
1238
# Directory not versioned
1240
from_dir_abspath = pathjoin(self.basedir, from_dir)
1242
from_dir_id = inv.root.file_id
1243
from_dir_abspath = self.basedir
1244
children = os.listdir(from_dir_abspath)
1246
# jam 20060527 The kernel sized tree seems equivalent whether we
1247
# use a deque and popleft to keep them sorted, or if we use a plain
1248
# list and just reverse() them.
1249
children = collections.deque(children)
1250
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1252
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1255
f = children.popleft()
136
1256
## TODO: If we find a subdirectory with its own .bzr
137
1257
## directory, then that is a separate tree and we
138
1258
## should exclude it.
139
if bzrlib.BZRDIR == f:
1260
# the bzrdir for this tree
1261
if transport_base_dir == f:
143
fp = appendpath(from_dir_relpath, f)
1264
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1265
# and 'f' doesn't begin with one, we can do a string op, rather
1266
# than the checks of pathjoin(), all relative paths will have an extra slash
1268
fp = from_dir_relpath + '/' + f
146
fap = appendpath(dp, f)
148
f_ie = inv.get_child(from_dir_id, f)
1271
fap = from_dir_abspath + '/' + f
1273
dir_ie = inv[from_dir_id]
1274
if dir_ie.kind == 'directory':
1275
f_ie = dir_ie.children.get(f)
151
elif self.is_ignored(fp):
1280
elif self.is_ignored(fp[1:]):
1283
# we may not have found this file, because of a unicode
1284
# issue, or because the directory was actually a symlink.
1285
f_norm, can_access = osutils.normalized_filename(f)
1286
if f == f_norm or not can_access:
1287
# No change, so treat this file normally
1290
# this file can be accessed by a normalized path
1291
# check again if it is versioned
1292
# these lines are repeated here for performance
1294
fp = from_dir_relpath + '/' + f
1295
fap = from_dir_abspath + '/' + f
1296
f_ie = inv.get_child(from_dir_id, f)
1299
elif self.is_ignored(fp[1:]):
156
1304
fk = file_kind(fap)
1306
# make a last minute entry
160
raise BzrCheckError("file %r entered as kind %r id %r, "
162
% (fap, f_ie.kind, f_ie.file_id, fk))
164
yield fp, c, fk, (f_ie and f_ie.file_id)
1308
yield fp[1:], c, fk, f_ie.file_id, f_ie
1311
yield fp[1:], c, fk, None, fk_entries[fk]()
1313
yield fp[1:], c, fk, None, TreeEntry()
166
1316
if fk != 'directory':
170
# don't descend unversioned directories
173
for ff in descend(fp, f_ie.file_id, fap):
176
for f in descend('', inv.root.file_id, self.basedir):
1319
# But do this child first if recursing down
1321
new_children = os.listdir(fap)
1323
new_children = collections.deque(new_children)
1324
stack.append((f_ie.file_id, fp, fap, new_children))
1325
# Break out of inner loop,
1326
# so that we start outer loop with child
1329
# if we finished all children, pop it off the stack
1332
@needs_tree_write_lock
1333
def move(self, from_paths, to_dir=None, after=False):
1336
to_dir must exist in the inventory.
1338
If to_dir exists and is a directory, the files are moved into
1339
it, keeping their old names.
1341
Note that to_dir is only the last component of the new name;
1342
this doesn't change the directory.
1344
For each entry in from_paths the move mode will be determined
1347
The first mode moves the file in the filesystem and updates the
1348
inventory. The second mode only updates the inventory without
1349
touching the file on the filesystem. This is the new mode introduced
1352
move uses the second mode if 'after == True' and the target is not
1353
versioned but present in the working tree.
1355
move uses the second mode if 'after == False' and the source is
1356
versioned but no longer in the working tree, and the target is not
1357
versioned but present in the working tree.
1359
move uses the first mode if 'after == False' and the source is
1360
versioned and present in the working tree, and the target is not
1361
versioned and not present in the working tree.
1363
Everything else results in an error.
1365
This returns a list of (from_path, to_path) pairs for each
1366
entry that is moved.
1371
# check for deprecated use of signature
1373
raise TypeError('You must supply a target directory')
1374
# check destination directory
1375
if isinstance(from_paths, basestring):
1377
inv = self.inventory
1378
to_abs = self.abspath(to_dir)
1379
if not isdir(to_abs):
1380
raise errors.BzrMoveFailedError('',to_dir,
1381
errors.NotADirectory(to_abs))
1382
if not self.has_filename(to_dir):
1383
raise errors.BzrMoveFailedError('',to_dir,
1384
errors.NotInWorkingDirectory(to_dir))
1385
to_dir_id = inv.path2id(to_dir)
1386
if to_dir_id is None:
1387
raise errors.BzrMoveFailedError('',to_dir,
1388
errors.NotVersionedError(path=to_dir))
1390
to_dir_ie = inv[to_dir_id]
1391
if to_dir_ie.kind != 'directory':
1392
raise errors.BzrMoveFailedError('',to_dir,
1393
errors.NotADirectory(to_abs))
1395
# create rename entries and tuples
1396
for from_rel in from_paths:
1397
from_tail = splitpath(from_rel)[-1]
1398
from_id = inv.path2id(from_rel)
1400
raise errors.BzrMoveFailedError(from_rel,to_dir,
1401
errors.NotVersionedError(path=from_rel))
1403
from_entry = inv[from_id]
1404
from_parent_id = from_entry.parent_id
1405
to_rel = pathjoin(to_dir, from_tail)
1406
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1408
from_tail=from_tail,
1409
from_parent_id=from_parent_id,
1410
to_rel=to_rel, to_tail=from_tail,
1411
to_parent_id=to_dir_id)
1412
rename_entries.append(rename_entry)
1413
rename_tuples.append((from_rel, to_rel))
1415
# determine which move mode to use. checks also for movability
1416
rename_entries = self._determine_mv_mode(rename_entries, after)
1418
original_modified = self._inventory_is_modified
1421
self._inventory_is_modified = True
1422
self._move(rename_entries)
1424
# restore the inventory on error
1425
self._inventory_is_modified = original_modified
1427
self._write_inventory(inv)
1428
return rename_tuples
1430
def _determine_mv_mode(self, rename_entries, after=False):
1431
"""Determines for each from-to pair if both inventory and working tree
1432
or only the inventory has to be changed.
1434
Also does basic plausability tests.
1436
inv = self.inventory
1438
for rename_entry in rename_entries:
1439
# store to local variables for easier reference
1440
from_rel = rename_entry.from_rel
1441
from_id = rename_entry.from_id
1442
to_rel = rename_entry.to_rel
1443
to_id = inv.path2id(to_rel)
1444
only_change_inv = False
1446
# check the inventory for source and destination
1448
raise errors.BzrMoveFailedError(from_rel,to_rel,
1449
errors.NotVersionedError(path=from_rel))
1450
if to_id is not None:
1451
raise errors.BzrMoveFailedError(from_rel,to_rel,
1452
errors.AlreadyVersionedError(path=to_rel))
1454
# try to determine the mode for rename (only change inv or change
1455
# inv and file system)
1457
if not self.has_filename(to_rel):
1458
raise errors.BzrMoveFailedError(from_id,to_rel,
1459
errors.NoSuchFile(path=to_rel,
1460
extra="New file has not been created yet"))
1461
only_change_inv = True
1462
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1463
only_change_inv = True
1464
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1465
only_change_inv = False
1466
elif (not self.case_sensitive
1467
and from_rel.lower() == to_rel.lower()
1468
and self.has_filename(from_rel)):
1469
only_change_inv = False
1471
# something is wrong, so lets determine what exactly
1472
if not self.has_filename(from_rel) and \
1473
not self.has_filename(to_rel):
1474
raise errors.BzrRenameFailedError(from_rel,to_rel,
1475
errors.PathsDoNotExist(paths=(str(from_rel),
1478
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1479
rename_entry.only_change_inv = only_change_inv
1480
return rename_entries
1482
def _move(self, rename_entries):
1483
"""Moves a list of files.
1485
Depending on the value of the flag 'only_change_inv', the
1486
file will be moved on the file system or not.
1488
inv = self.inventory
1491
for entry in rename_entries:
1493
self._move_entry(entry)
1495
self._rollback_move(moved)
1499
def _rollback_move(self, moved):
1500
"""Try to rollback a previous move in case of an filesystem error."""
1501
inv = self.inventory
1504
self._move_entry(WorkingTree._RenameEntry(
1505
entry.to_rel, entry.from_id,
1506
entry.to_tail, entry.to_parent_id, entry.from_rel,
1507
entry.from_tail, entry.from_parent_id,
1508
entry.only_change_inv))
1509
except errors.BzrMoveFailedError, e:
1510
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1511
" The working tree is in an inconsistent state."
1512
" Please consider doing a 'bzr revert'."
1513
" Error message is: %s" % e)
1515
def _move_entry(self, entry):
1516
inv = self.inventory
1517
from_rel_abs = self.abspath(entry.from_rel)
1518
to_rel_abs = self.abspath(entry.to_rel)
1519
if from_rel_abs == to_rel_abs:
1520
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1521
"Source and target are identical.")
1523
if not entry.only_change_inv:
1525
osutils.rename(from_rel_abs, to_rel_abs)
1527
raise errors.BzrMoveFailedError(entry.from_rel,
1529
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1531
@needs_tree_write_lock
1532
def rename_one(self, from_rel, to_rel, after=False):
1535
This can change the directory or the filename or both.
1537
rename_one has several 'modes' to work. First, it can rename a physical
1538
file and change the file_id. That is the normal mode. Second, it can
1539
only change the file_id without touching any physical file. This is
1540
the new mode introduced in version 0.15.
1542
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1543
versioned but present in the working tree.
1545
rename_one uses the second mode if 'after == False' and 'from_rel' is
1546
versioned but no longer in the working tree, and 'to_rel' is not
1547
versioned but present in the working tree.
1549
rename_one uses the first mode if 'after == False' and 'from_rel' is
1550
versioned and present in the working tree, and 'to_rel' is not
1551
versioned and not present in the working tree.
1553
Everything else results in an error.
1555
inv = self.inventory
1558
# create rename entries and tuples
1559
from_tail = splitpath(from_rel)[-1]
1560
from_id = inv.path2id(from_rel)
1562
# if file is missing in the inventory maybe it's in the basis_tree
1563
basis_tree = self.branch.basis_tree()
1564
from_id = basis_tree.path2id(from_rel)
1566
raise errors.BzrRenameFailedError(from_rel,to_rel,
1567
errors.NotVersionedError(path=from_rel))
1568
# put entry back in the inventory so we can rename it
1569
from_entry = basis_tree.inventory[from_id].copy()
1572
from_entry = inv[from_id]
1573
from_parent_id = from_entry.parent_id
1574
to_dir, to_tail = os.path.split(to_rel)
1575
to_dir_id = inv.path2id(to_dir)
1576
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1578
from_tail=from_tail,
1579
from_parent_id=from_parent_id,
1580
to_rel=to_rel, to_tail=to_tail,
1581
to_parent_id=to_dir_id)
1582
rename_entries.append(rename_entry)
1584
# determine which move mode to use. checks also for movability
1585
rename_entries = self._determine_mv_mode(rename_entries, after)
1587
# check if the target changed directory and if the target directory is
1589
if to_dir_id is None:
1590
raise errors.BzrMoveFailedError(from_rel,to_rel,
1591
errors.NotVersionedError(path=to_dir))
1593
# all checks done. now we can continue with our actual work
1594
mutter('rename_one:\n'
1599
' to_dir_id {%s}\n',
1600
from_id, from_rel, to_rel, to_dir, to_dir_id)
1602
self._move(rename_entries)
1603
self._write_inventory(inv)
1605
class _RenameEntry(object):
1606
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1607
to_rel, to_tail, to_parent_id, only_change_inv=False):
1608
self.from_rel = from_rel
1609
self.from_id = from_id
1610
self.from_tail = from_tail
1611
self.from_parent_id = from_parent_id
1612
self.to_rel = to_rel
1613
self.to_tail = to_tail
1614
self.to_parent_id = to_parent_id
1615
self.only_change_inv = only_change_inv
181
1618
def unknowns(self):
182
for subp in self.extras():
183
if not self.is_ignored(subp):
1619
"""Return all unknown files.
1621
These are files in the working directory that are not versioned or
1622
control files or ignored.
1624
# force the extras method to be fully executed before returning, to
1625
# prevent race conditions with the lock
1627
[subp for subp in self.extras() if not self.is_ignored(subp)])
1629
@needs_tree_write_lock
1630
def unversion(self, file_ids):
1631
"""Remove the file ids in file_ids from the current versioned set.
1633
When a file_id is unversioned, all of its children are automatically
1636
:param file_ids: The file ids to stop versioning.
1637
:raises: NoSuchId if any fileid is not currently versioned.
1639
for file_id in file_ids:
1640
if file_id not in self._inventory:
1641
raise errors.NoSuchId(self, file_id)
1642
for file_id in file_ids:
1643
if self._inventory.has_id(file_id):
1644
self._inventory.remove_recursive_id(file_id)
1646
# in the future this should just set a dirty bit to wait for the
1647
# final unlock. However, until all methods of workingtree start
1648
# with the current in -memory inventory rather than triggering
1649
# a read, it is more complex - we need to teach read_inventory
1650
# to know when to read, and when to not read first... and possibly
1651
# to save first when the in memory one may be corrupted.
1652
# so for now, we just only write it if it is indeed dirty.
1654
self._write_inventory(self._inventory)
1656
def _iter_conflicts(self):
1658
for info in self.list_files():
1660
stem = get_conflicted_stem(path)
1663
if stem not in conflicted:
1664
conflicted.add(stem)
1668
def pull(self, source, overwrite=False, stop_revision=None,
1669
change_reporter=None, possible_transports=None, local=False,
1673
old_revision_info = self.branch.last_revision_info()
1674
basis_tree = self.basis_tree()
1675
count = self.branch.pull(source, overwrite, stop_revision,
1676
possible_transports=possible_transports,
1678
new_revision_info = self.branch.last_revision_info()
1679
if new_revision_info != old_revision_info:
1680
repository = self.branch.repository
1681
basis_tree.lock_read()
1683
new_basis_tree = self.branch.basis_tree()
1690
change_reporter=change_reporter,
1691
show_base=show_base)
1692
basis_root_id = basis_tree.get_root_id()
1693
new_root_id = new_basis_tree.get_root_id()
1694
if basis_root_id != new_root_id:
1695
self.set_root_id(new_root_id)
1698
# TODO - dedup parents list with things merged by pull ?
1699
# reuse the revisiontree we merged against to set the new
1701
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1702
# we have to pull the merge trees out again, because
1703
# merge_inner has set the ids. - this corner is not yet
1704
# layered well enough to prevent double handling.
1705
# XXX TODO: Fix the double handling: telling the tree about
1706
# the already known parent data is wasteful.
1707
merges = self.get_parent_ids()[1:]
1708
parent_trees.extend([
1709
(parent, repository.revision_tree(parent)) for
1711
self.set_parent_trees(parent_trees)
1717
def put_file_bytes_non_atomic(self, file_id, bytes):
1718
"""See MutableTree.put_file_bytes_non_atomic."""
1719
stream = file(self.id2abspath(file_id), 'wb')
1724
# TODO: update the hashcache here ?
187
1726
def extras(self):
188
"""Yield all unknown files in this WorkingTree.
1727
"""Yield all unversioned files in this WorkingTree.
190
If there are any unknown directories then only the directory is
191
returned, not all its children. But if there are unknown files
1729
If there are any unversioned directories then only the directory is
1730
returned, not all its children. But if there are unversioned files
192
1731
under a versioned subdirectory, they are returned.
194
1733
Currently returned depth-first, sorted by name within directories.
1734
This is the same order used by 'osutils.walkdirs'.
196
1736
## TODO: Work from given directory downwards
197
from osutils import isdir, appendpath
199
1737
for path, dir_entry in self.inventory.directories():
200
mutter("search for unknowns in %r" % path)
1738
# mutter("search for unknowns in %r", path)
201
1739
dirabs = self.abspath(path)
202
1740
if not isdir(dirabs):
203
1741
# e.g. directory deleted
207
1745
for subf in os.listdir(dirabs):
209
and (subf not in dir_entry.children)):
1746
if self.bzrdir.is_control_filename(subf):
1748
if subf not in dir_entry.children:
1751
can_access) = osutils.normalized_filename(subf)
1752
except UnicodeDecodeError:
1753
path_os_enc = path.encode(osutils._fs_enc)
1754
relpath = path_os_enc + '/' + subf
1755
raise errors.BadFilenameEncoding(relpath,
1757
if subf_norm != subf and can_access:
1758
if subf_norm not in dir_entry.children:
1759
fl.append(subf_norm)
214
subp = appendpath(path, subf)
1765
subp = pathjoin(path, subf)
218
1768
def ignored_files(self):
219
1769
"""Yield list of PATH, IGNORE_PATTERN"""
220
1770
for subp in self.extras():
221
1771
pat = self.is_ignored(subp)
226
1775
def get_ignore_list(self):
227
1776
"""Return list of ignore patterns.
229
1778
Cached in the Tree object after the first call.
231
if hasattr(self, '_ignorelist'):
232
return self._ignorelist
1780
ignoreset = getattr(self, '_ignoreset', None)
1781
if ignoreset is not None:
234
l = bzrlib.DEFAULT_IGNORE[:]
1784
ignore_globs = set()
1785
ignore_globs.update(ignores.get_runtime_ignores())
1786
ignore_globs.update(ignores.get_user_ignores())
235
1787
if self.has_filename(bzrlib.IGNORE_FILENAME):
236
1788
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
237
l.extend([line.rstrip("\n\r") for line in f.readlines()])
1790
ignore_globs.update(ignores.parse_ignore_file(f))
1793
self._ignoreset = ignore_globs
1796
def _flush_ignore_list_cache(self):
1797
"""Resets the cached ignore list to force a cache rebuild."""
1798
self._ignoreset = None
1799
self._ignoreglobster = None
242
1801
def is_ignored(self, filename):
243
1802
r"""Check whether the filename matches an ignore pattern.
245
1804
Patterns containing '/' or '\' need to match the whole path;
246
others match against only the last component.
1805
others match against only the last component. Patterns starting
1806
with '!' are ignore exceptions. Exceptions take precedence
1807
over regular patterns and cause the filename to not be ignored.
248
1809
If the file is ignored, returns the pattern which caused it to
249
1810
be ignored, otherwise None. So this can simply be used as a
250
1811
boolean if desired."""
252
# TODO: Use '**' to match directories, and other extended
253
# globbing stuff from cvs/rsync.
255
# XXX: fnmatch is actually not quite what we want: it's only
256
# approximately the same as real Unix fnmatch, and doesn't
257
# treat dotfiles correctly and allows * to match /.
258
# Eventually it should be replaced with something more
262
from osutils import splitpath
264
for pat in self.get_ignore_list():
265
if '/' in pat or '\\' in pat:
267
# as a special case, you can put ./ at the start of a
268
# pattern; this is good to match in the top-level
271
if (pat[:2] == './') or (pat[:2] == '.\\'):
1812
if getattr(self, '_ignoreglobster', None) is None:
1813
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1814
return self._ignoreglobster.match(filename)
1816
def kind(self, file_id):
1817
return file_kind(self.id2abspath(file_id))
1819
def stored_kind(self, file_id):
1820
"""See Tree.stored_kind"""
1821
return self.inventory[file_id].kind
1823
def _comparison_data(self, entry, path):
1824
abspath = self.abspath(path)
1826
stat_value = os.lstat(abspath)
1828
if getattr(e, 'errno', None) == errno.ENOENT:
1835
mode = stat_value.st_mode
1836
kind = osutils.file_kind_from_stat_mode(mode)
1837
if not supports_executable():
1838
executable = entry is not None and entry.executable
1840
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1841
return kind, executable, stat_value
1843
def _file_size(self, entry, stat_value):
1844
return stat_value.st_size
1846
def last_revision(self):
1847
"""Return the last revision of the branch for this tree.
1849
This format tree does not support a separate marker for last-revision
1850
compared to the branch.
1852
See MutableTree.last_revision
1854
return self._last_revision()
1857
def _last_revision(self):
1858
"""helper for get_parent_ids."""
1859
return _mod_revision.ensure_null(self.branch.last_revision())
1861
def is_locked(self):
1862
return self._control_files.is_locked()
1864
def _must_be_locked(self):
1865
if not self.is_locked():
1866
raise errors.ObjectNotLocked(self)
1868
def lock_read(self):
1869
"""Lock the tree for reading.
1871
This also locks the branch, and can be unlocked via self.unlock().
1873
:return: A bzrlib.lock.LogicalLockResult.
1875
if not self.is_locked():
1877
self.branch.lock_read()
1879
self._control_files.lock_read()
1880
return LogicalLockResult(self.unlock)
1882
self.branch.unlock()
1885
def lock_tree_write(self):
1886
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1888
:return: A bzrlib.lock.LogicalLockResult.
1890
if not self.is_locked():
1892
self.branch.lock_read()
1894
self._control_files.lock_write()
1895
return LogicalLockResult(self.unlock)
1897
self.branch.unlock()
1900
def lock_write(self):
1901
"""See MutableTree.lock_write, and WorkingTree.unlock.
1903
:return: A bzrlib.lock.LogicalLockResult.
1905
if not self.is_locked():
1907
self.branch.lock_write()
1909
self._control_files.lock_write()
1910
return LogicalLockResult(self.unlock)
1912
self.branch.unlock()
1915
def get_physical_lock_status(self):
1916
return self._control_files.get_physical_lock_status()
1918
def _basis_inventory_name(self):
1919
return 'basis-inventory-cache'
1921
def _reset_data(self):
1922
"""Reset transient data that cannot be revalidated."""
1923
self._inventory_is_modified = False
1924
f = self._transport.get('inventory')
1926
result = self._deserialize(f)
1929
self._set_inventory(result, dirty=False)
1931
@needs_tree_write_lock
1932
def set_last_revision(self, new_revision):
1933
"""Change the last revision in the working tree."""
1934
if self._change_last_revision(new_revision):
1935
self._cache_basis_inventory(new_revision)
1937
def _change_last_revision(self, new_revision):
1938
"""Template method part of set_last_revision to perform the change.
1940
This is used to allow WorkingTree3 instances to not affect branch
1941
when their last revision is set.
1943
if _mod_revision.is_null(new_revision):
1944
self.branch.set_revision_history([])
1947
self.branch.generate_revision_history(new_revision)
1948
except errors.NoSuchRevision:
1949
# not present in the repo - dont try to set it deeper than the tip
1950
self.branch.set_revision_history([new_revision])
1953
def _write_basis_inventory(self, xml):
1954
"""Write the basis inventory XML to the basis-inventory file"""
1955
path = self._basis_inventory_name()
1957
self._transport.put_file(path, sio,
1958
mode=self.bzrdir._get_file_mode())
1960
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1961
"""Create the text that will be saved in basis-inventory"""
1962
inventory.revision_id = revision_id
1963
return xml7.serializer_v7.write_inventory_to_string(inventory)
1965
def _cache_basis_inventory(self, new_revision):
1966
"""Cache new_revision as the basis inventory."""
1967
# TODO: this should allow the ready-to-use inventory to be passed in,
1968
# as commit already has that ready-to-use [while the format is the
1971
# this double handles the inventory - unpack and repack -
1972
# but is easier to understand. We can/should put a conditional
1973
# in here based on whether the inventory is in the latest format
1974
# - perhaps we should repack all inventories on a repository
1976
# the fast path is to copy the raw xml from the repository. If the
1977
# xml contains 'revision_id="', then we assume the right
1978
# revision_id is set. We must check for this full string, because a
1979
# root node id can legitimately look like 'revision_id' but cannot
1981
xml = self.branch.repository._get_inventory_xml(new_revision)
1982
firstline = xml.split('\n', 1)[0]
1983
if (not 'revision_id="' in firstline or
1984
'format="7"' not in firstline):
1985
inv = self.branch.repository._serializer.read_inventory_from_string(
1987
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1988
self._write_basis_inventory(xml)
1989
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1992
def read_basis_inventory(self):
1993
"""Read the cached basis inventory."""
1994
path = self._basis_inventory_name()
1995
return self._transport.get_bytes(path)
1998
def read_working_inventory(self):
1999
"""Read the working inventory.
2001
:raises errors.InventoryModified: read_working_inventory will fail
2002
when the current in memory inventory has been modified.
2004
# conceptually this should be an implementation detail of the tree.
2005
# XXX: Deprecate this.
2006
# ElementTree does its own conversion from UTF-8, so open in
2008
if self._inventory_is_modified:
2009
raise errors.InventoryModified(self)
2010
f = self._transport.get('inventory')
2012
result = self._deserialize(f)
2015
self._set_inventory(result, dirty=False)
2018
@needs_tree_write_lock
2019
def remove(self, files, verbose=False, to_file=None, keep_files=True,
2021
"""Remove nominated files from the working inventory.
2023
:files: File paths relative to the basedir.
2024
:keep_files: If true, the files will also be kept.
2025
:force: Delete files and directories, even if they are changed and
2026
even if the directories are not empty.
2028
if isinstance(files, basestring):
2033
all_files = set() # specified and nested files
2034
unknown_nested_files=set()
2036
to_file = sys.stdout
2038
files_to_backup = []
2040
def recurse_directory_to_add_files(directory):
2041
# Recurse directory and add all files
2042
# so we can check if they have changed.
2043
for parent_info, file_infos in self.walkdirs(directory):
2044
for relpath, basename, kind, lstat, fileid, kind in file_infos:
2045
# Is it versioned or ignored?
2046
if self.path2id(relpath):
2047
# Add nested content for deletion.
2048
all_files.add(relpath)
2050
# Files which are not versioned
2051
# should be treated as unknown.
2052
files_to_backup.append(relpath)
2054
for filename in files:
2055
# Get file name into canonical form.
2056
abspath = self.abspath(filename)
2057
filename = self.relpath(abspath)
2058
if len(filename) > 0:
2059
all_files.add(filename)
2060
recurse_directory_to_add_files(filename)
2062
files = list(all_files)
2065
return # nothing to do
2067
# Sort needed to first handle directory content before the directory
2068
files.sort(reverse=True)
2070
# Bail out if we are going to delete files we shouldn't
2071
if not keep_files and not force:
2072
for (file_id, path, content_change, versioned, parent_id, name,
2073
kind, executable) in self.iter_changes(self.basis_tree(),
2074
include_unchanged=True, require_versioned=False,
2075
want_unversioned=True, specific_files=files):
2076
if versioned[0] == False:
2077
# The record is unknown or newly added
2078
files_to_backup.append(path[1])
2079
elif (content_change and (kind[1] is not None) and
2080
osutils.is_inside_any(files, path[1])):
2081
# Versioned and changed, but not deleted, and still
2082
# in one of the dirs to be deleted.
2083
files_to_backup.append(path[1])
2085
def backup(file_to_backup):
2086
backup_name = self.bzrdir._available_backup_name(file_to_backup)
2087
osutils.rename(abs_path, self.abspath(backup_name))
2088
return "removed %s (but kept a copy: %s)" % (file_to_backup,
2091
# Build inv_delta and delete files where applicable,
2092
# do this before any modifications to inventory.
2094
fid = self.path2id(f)
2097
message = "%s is not versioned." % (f,)
2100
# having removed it, it must be either ignored or unknown
2101
if self.is_ignored(f):
2105
# XXX: Really should be a more abstract reporter interface
2106
kind_ch = osutils.kind_marker(self.kind(fid))
2107
to_file.write(new_status + ' ' + f + kind_ch + '\n')
2109
inv_delta.append((f, None, fid, None))
2110
message = "removed %s" % (f,)
2113
abs_path = self.abspath(f)
2114
if osutils.lexists(abs_path):
2115
if (osutils.isdir(abs_path) and
2116
len(os.listdir(abs_path)) > 0):
2118
osutils.rmtree(abs_path)
2119
message = "deleted %s" % (f,)
2123
if f in files_to_backup:
2126
osutils.delete_any(abs_path)
2127
message = "deleted %s" % (f,)
2128
elif message is not None:
2129
# Only care if we haven't done anything yet.
2130
message = "%s does not exist." % (f,)
2132
# Print only one message (if any) per file.
2133
if message is not None:
2135
self.apply_inventory_delta(inv_delta)
2137
@needs_tree_write_lock
2138
def revert(self, filenames=None, old_tree=None, backups=True,
2139
pb=None, report_changes=False):
2140
from bzrlib.conflicts import resolve
2143
symbol_versioning.warn('Using [] to revert all files is deprecated'
2144
' as of bzr 0.91. Please use None (the default) instead.',
2145
DeprecationWarning, stacklevel=2)
2146
if old_tree is None:
2147
basis_tree = self.basis_tree()
2148
basis_tree.lock_read()
2149
old_tree = basis_tree
2153
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2155
if filenames is None and len(self.get_parent_ids()) > 1:
2157
last_revision = self.last_revision()
2158
if last_revision != _mod_revision.NULL_REVISION:
2159
if basis_tree is None:
2160
basis_tree = self.basis_tree()
2161
basis_tree.lock_read()
2162
parent_trees.append((last_revision, basis_tree))
2163
self.set_parent_trees(parent_trees)
2166
resolve(self, filenames, ignore_misses=True, recursive=True)
2168
if basis_tree is not None:
2172
def revision_tree(self, revision_id):
2173
"""See Tree.revision_tree.
2175
WorkingTree can supply revision_trees for the basis revision only
2176
because there is only one cached inventory in the bzr directory.
2178
if revision_id == self.last_revision():
2180
xml = self.read_basis_inventory()
2181
except errors.NoSuchFile:
2185
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2186
# dont use the repository revision_tree api because we want
2187
# to supply the inventory.
2188
if inv.revision_id == revision_id:
2189
return revisiontree.RevisionTree(self.branch.repository,
2191
except errors.BadInventoryFormat:
2193
# raise if there was no inventory, or if we read the wrong inventory.
2194
raise errors.NoSuchRevisionInTree(self, revision_id)
2196
# XXX: This method should be deprecated in favour of taking in a proper
2197
# new Inventory object.
2198
@needs_tree_write_lock
2199
def set_inventory(self, new_inventory_list):
2200
from bzrlib.inventory import (Inventory,
2204
inv = Inventory(self.get_root_id())
2205
for path, file_id, parent, kind in new_inventory_list:
2206
name = os.path.basename(path)
2209
# fixme, there should be a factory function inv,add_??
2210
if kind == 'directory':
2211
inv.add(InventoryDirectory(file_id, name, parent))
2212
elif kind == 'file':
2213
inv.add(InventoryFile(file_id, name, parent))
2214
elif kind == 'symlink':
2215
inv.add(InventoryLink(file_id, name, parent))
2217
raise errors.BzrError("unknown kind %r" % kind)
2218
self._write_inventory(inv)
2220
@needs_tree_write_lock
2221
def set_root_id(self, file_id):
2222
"""Set the root id for this tree."""
2226
'WorkingTree.set_root_id with fileid=None')
2227
file_id = osutils.safe_file_id(file_id)
2228
self._set_root_id(file_id)
2230
def _set_root_id(self, file_id):
2231
"""Set the root id for this tree, in a format specific manner.
2233
:param file_id: The file id to assign to the root. It must not be
2234
present in the current inventory or an error will occur. It must
2235
not be None, but rather a valid file id.
2237
inv = self._inventory
2238
orig_root_id = inv.root.file_id
2239
# TODO: it might be nice to exit early if there was nothing
2240
# to do, saving us from trigger a sync on unlock.
2241
self._inventory_is_modified = True
2242
# we preserve the root inventory entry object, but
2243
# unlinkit from the byid index
2244
del inv._byid[inv.root.file_id]
2245
inv.root.file_id = file_id
2246
# and link it into the index with the new changed id.
2247
inv._byid[inv.root.file_id] = inv.root
2248
# and finally update all children to reference the new id.
2249
# XXX: this should be safe to just look at the root.children
2250
# list, not the WHOLE INVENTORY.
2253
if entry.parent_id == orig_root_id:
2254
entry.parent_id = inv.root.file_id
2257
"""See Branch.unlock.
2259
WorkingTree locking just uses the Branch locking facilities.
2260
This is current because all working trees have an embedded branch
2261
within them. IF in the future, we were to make branch data shareable
2262
between multiple working trees, i.e. via shared storage, then we
2263
would probably want to lock both the local tree, and the branch.
2265
raise NotImplementedError(self.unlock)
2269
def update(self, change_reporter=None, possible_transports=None,
2270
revision=None, old_tip=_marker, show_base=False):
2271
"""Update a working tree along its branch.
2273
This will update the branch if its bound too, which means we have
2274
multiple trees involved:
2276
- The new basis tree of the master.
2277
- The old basis tree of the branch.
2278
- The old basis tree of the working tree.
2279
- The current working tree state.
2281
Pathologically, all three may be different, and non-ancestors of each
2282
other. Conceptually we want to:
2284
- Preserve the wt.basis->wt.state changes
2285
- Transform the wt.basis to the new master basis.
2286
- Apply a merge of the old branch basis to get any 'local' changes from
2288
- Restore the wt.basis->wt.state changes.
2290
There isn't a single operation at the moment to do that, so we:
2291
- Merge current state -> basis tree of the master w.r.t. the old tree
2293
- Do a 'normal' merge of the old branch basis if it is relevant.
2295
:param revision: The target revision to update to. Must be in the
2297
:param old_tip: If branch.update() has already been run, the value it
2298
returned (old tip of the branch or None). _marker is used
2301
if self.branch.get_bound_location() is not None:
2303
update_branch = (old_tip is self._marker)
2305
self.lock_tree_write()
2306
update_branch = False
2309
old_tip = self.branch.update(possible_transports)
2311
if old_tip is self._marker:
2313
return self._update_tree(old_tip, change_reporter, revision, show_base)
2317
@needs_tree_write_lock
2318
def _update_tree(self, old_tip=None, change_reporter=None, revision=None,
2320
"""Update a tree to the master branch.
2322
:param old_tip: if supplied, the previous tip revision the branch,
2323
before it was changed to the master branch's tip.
2325
# here if old_tip is not None, it is the old tip of the branch before
2326
# it was updated from the master branch. This should become a pending
2327
# merge in the working tree to preserve the user existing work. we
2328
# cant set that until we update the working trees last revision to be
2329
# one from the new branch, because it will just get absorbed by the
2330
# parent de-duplication logic.
2332
# We MUST save it even if an error occurs, because otherwise the users
2333
# local work is unreferenced and will appear to have been lost.
2337
last_rev = self.get_parent_ids()[0]
2339
last_rev = _mod_revision.NULL_REVISION
2340
if revision is None:
2341
revision = self.branch.last_revision()
2343
old_tip = old_tip or _mod_revision.NULL_REVISION
2345
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2346
# the branch we are bound to was updated
2347
# merge those changes in first
2348
base_tree = self.basis_tree()
2349
other_tree = self.branch.repository.revision_tree(old_tip)
2350
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2351
base_tree, this_tree=self,
2352
change_reporter=change_reporter,
2353
show_base=show_base)
2355
self.add_parent_tree((old_tip, other_tree))
2356
trace.note('Rerun update after fixing the conflicts.')
2359
if last_rev != _mod_revision.ensure_null(revision):
2360
# the working tree is up to date with the branch
2361
# we can merge the specified revision from master
2362
to_tree = self.branch.repository.revision_tree(revision)
2363
to_root_id = to_tree.get_root_id()
2365
basis = self.basis_tree()
2368
if (basis.inventory.root is None
2369
or basis.inventory.root.file_id != to_root_id):
2370
self.set_root_id(to_root_id)
2375
# determine the branch point
2376
graph = self.branch.repository.get_graph()
2377
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2379
base_tree = self.branch.repository.revision_tree(base_rev_id)
2381
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2383
change_reporter=change_reporter,
2384
show_base=show_base)
2385
self.set_last_revision(revision)
2386
# TODO - dedup parents list with things merged by pull ?
2387
# reuse the tree we've updated to to set the basis:
2388
parent_trees = [(revision, to_tree)]
2389
merges = self.get_parent_ids()[1:]
2390
# Ideally we ask the tree for the trees here, that way the working
2391
# tree can decide whether to give us the entire tree or give us a
2392
# lazy initialised tree. dirstate for instance will have the trees
2393
# in ram already, whereas a last-revision + basis-inventory tree
2394
# will not, but also does not need them when setting parents.
2395
for parent in merges:
2396
parent_trees.append(
2397
(parent, self.branch.repository.revision_tree(parent)))
2398
if not _mod_revision.is_null(old_tip):
2399
parent_trees.append(
2400
(old_tip, self.branch.repository.revision_tree(old_tip)))
2401
self.set_parent_trees(parent_trees)
2402
last_rev = parent_trees[0][0]
2405
def _write_hashcache_if_dirty(self):
2406
"""Write out the hashcache if it is dirty."""
2407
if self._hashcache.needs_write:
2409
self._hashcache.write()
2411
if e.errno not in (errno.EPERM, errno.EACCES):
2413
# TODO: jam 20061219 Should this be a warning? A single line
2414
# warning might be sufficient to let the user know what
2416
mutter('Could not write hashcache for %s\nError: %s',
2417
self._hashcache.cache_file_name(), e)
2419
@needs_tree_write_lock
2420
def _write_inventory(self, inv):
2421
"""Write inventory as the current inventory."""
2422
self._set_inventory(inv, dirty=True)
2425
def set_conflicts(self, arg):
2426
raise errors.UnsupportedOperation(self.set_conflicts, self)
2428
def add_conflicts(self, arg):
2429
raise errors.UnsupportedOperation(self.add_conflicts, self)
2432
def conflicts(self):
2433
conflicts = _mod_conflicts.ConflictList()
2434
for conflicted in self._iter_conflicts():
2437
if file_kind(self.abspath(conflicted)) != "file":
2439
except errors.NoSuchFile:
2442
for suffix in ('.THIS', '.OTHER'):
2444
kind = file_kind(self.abspath(conflicted+suffix))
2447
except errors.NoSuchFile:
2451
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2452
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2454
file_id=self.path2id(conflicted)))
2457
def walkdirs(self, prefix=""):
2458
"""Walk the directories of this tree.
2460
returns a generator which yields items in the form:
2461
((curren_directory_path, fileid),
2462
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2465
This API returns a generator, which is only valid during the current
2466
tree transaction - within a single lock_read or lock_write duration.
2468
If the tree is not locked, it may cause an error to be raised,
2469
depending on the tree implementation.
2471
disk_top = self.abspath(prefix)
2472
if disk_top.endswith('/'):
2473
disk_top = disk_top[:-1]
2474
top_strip_len = len(disk_top) + 1
2475
inventory_iterator = self._walkdirs(prefix)
2476
disk_iterator = osutils.walkdirs(disk_top, prefix)
2478
current_disk = disk_iterator.next()
2479
disk_finished = False
2481
if not (e.errno == errno.ENOENT or
2482
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2485
disk_finished = True
2487
current_inv = inventory_iterator.next()
2488
inv_finished = False
2489
except StopIteration:
2492
while not inv_finished or not disk_finished:
2494
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2495
cur_disk_dir_content) = current_disk
2497
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2498
cur_disk_dir_content) = ((None, None), None)
2499
if not disk_finished:
2500
# strip out .bzr dirs
2501
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2502
len(cur_disk_dir_content) > 0):
2503
# osutils.walkdirs can be made nicer -
2504
# yield the path-from-prefix rather than the pathjoined
2506
bzrdir_loc = bisect_left(cur_disk_dir_content,
2508
if (bzrdir_loc < len(cur_disk_dir_content)
2509
and self.bzrdir.is_control_filename(
2510
cur_disk_dir_content[bzrdir_loc][0])):
2511
# we dont yield the contents of, or, .bzr itself.
2512
del cur_disk_dir_content[bzrdir_loc]
2514
# everything is unknown
2517
# everything is missing
2520
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2522
# disk is before inventory - unknown
2523
dirblock = [(relpath, basename, kind, stat, None, None) for
2524
relpath, basename, kind, stat, top_path in
2525
cur_disk_dir_content]
2526
yield (cur_disk_dir_relpath, None), dirblock
2528
current_disk = disk_iterator.next()
2529
except StopIteration:
2530
disk_finished = True
2532
# inventory is before disk - missing.
2533
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2534
for relpath, basename, dkind, stat, fileid, kind in
2536
yield (current_inv[0][0], current_inv[0][1]), dirblock
2538
current_inv = inventory_iterator.next()
2539
except StopIteration:
2542
# versioned present directory
2543
# merge the inventory and disk data together
2545
for relpath, subiterator in itertools.groupby(sorted(
2546
current_inv[1] + cur_disk_dir_content,
2547
key=operator.itemgetter(0)), operator.itemgetter(1)):
2548
path_elements = list(subiterator)
2549
if len(path_elements) == 2:
2550
inv_row, disk_row = path_elements
2551
# versioned, present file
2552
dirblock.append((inv_row[0],
2553
inv_row[1], disk_row[2],
2554
disk_row[3], inv_row[4],
2556
elif len(path_elements[0]) == 5:
2558
dirblock.append((path_elements[0][0],
2559
path_elements[0][1], path_elements[0][2],
2560
path_elements[0][3], None, None))
2561
elif len(path_elements[0]) == 6:
2562
# versioned, absent file.
2563
dirblock.append((path_elements[0][0],
2564
path_elements[0][1], 'unknown', None,
2565
path_elements[0][4], path_elements[0][5]))
2567
raise NotImplementedError('unreachable code')
2568
yield current_inv[0], dirblock
2570
current_inv = inventory_iterator.next()
2571
except StopIteration:
2574
current_disk = disk_iterator.next()
2575
except StopIteration:
2576
disk_finished = True
2578
def _walkdirs(self, prefix=""):
2579
"""Walk the directories of this tree.
2581
:prefix: is used as the directrory to start with.
2582
returns a generator which yields items in the form:
2583
((curren_directory_path, fileid),
2584
[(file1_path, file1_name, file1_kind, None, file1_id,
2587
_directory = 'directory'
2588
# get the root in the inventory
2589
inv = self.inventory
2590
top_id = inv.path2id(prefix)
2594
pending = [(prefix, '', _directory, None, top_id, None)]
2597
currentdir = pending.pop()
2598
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2599
top_id = currentdir[4]
2601
relroot = currentdir[0] + '/'
2604
# FIXME: stash the node in pending
2606
if entry.kind == 'directory':
2607
for name, child in entry.sorted_children():
2608
dirblock.append((relroot + name, name, child.kind, None,
2609
child.file_id, child.kind
2611
yield (currentdir[0], entry.file_id), dirblock
2612
# push the user specified dirs from dirblock
2613
for dir in reversed(dirblock):
2614
if dir[2] == _directory:
2617
@needs_tree_write_lock
2618
def auto_resolve(self):
2619
"""Automatically resolve text conflicts according to contents.
2621
Only text conflicts are auto_resolvable. Files with no conflict markers
2622
are considered 'resolved', because bzr always puts conflict markers
2623
into files that have text conflicts. The corresponding .THIS .BASE and
2624
.OTHER files are deleted, as per 'resolve'.
2625
:return: a tuple of ConflictLists: (un_resolved, resolved).
2627
un_resolved = _mod_conflicts.ConflictList()
2628
resolved = _mod_conflicts.ConflictList()
2629
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2630
for conflict in self.conflicts():
2631
if (conflict.typestring != 'text conflict' or
2632
self.kind(conflict.file_id) != 'file'):
2633
un_resolved.append(conflict)
2635
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2637
for line in my_file:
2638
if conflict_re.search(line):
2639
un_resolved.append(conflict)
275
if fnmatch.fnmatchcase(filename, newpat):
2642
resolved.append(conflict)
2645
resolved.remove_files(self)
2646
self.set_conflicts(un_resolved)
2647
return un_resolved, resolved
2650
def _check(self, references):
2651
"""Check the tree for consistency.
2653
:param references: A dict with keys matching the items returned by
2654
self._get_check_refs(), and values from looking those keys up in
2657
tree_basis = self.basis_tree()
2658
tree_basis.lock_read()
2660
repo_basis = references[('trees', self.last_revision())]
2661
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2662
raise errors.BzrCheckError(
2663
"Mismatched basis inventory content.")
2668
def _validate(self):
2669
"""Validate internal structures.
2671
This is meant mostly for the test suite. To give it a chance to detect
2672
corruption after actions have occurred. The default implementation is a
2675
:return: None. An exception should be raised if there is an error.
2680
def check_state(self):
2681
"""Check that the working state is/isn't valid."""
2682
check_refs = self._get_check_refs()
2684
for ref in check_refs:
2687
refs[ref] = self.branch.repository.revision_tree(value)
2690
@needs_tree_write_lock
2691
def reset_state(self, revision_ids=None):
2692
"""Reset the state of the working tree.
2694
This does a hard-reset to a last-known-good state. This is a way to
2695
fix if something got corrupted (like the .bzr/checkout/dirstate file)
2697
if revision_ids is None:
2698
revision_ids = self.get_parent_ids()
2699
if not revision_ids:
2700
rt = self.branch.repository.revision_tree(
2701
_mod_revision.NULL_REVISION)
2703
rt = self.branch.repository.revision_tree(revision_ids[0])
2704
self._write_inventory(rt.inventory)
2705
self.set_parent_ids(revision_ids)
2707
def _get_rules_searcher(self, default_searcher):
2708
"""See Tree._get_rules_searcher."""
2709
if self._rules_searcher is None:
2710
self._rules_searcher = super(WorkingTree,
2711
self)._get_rules_searcher(default_searcher)
2712
return self._rules_searcher
2714
def get_shelf_manager(self):
2715
"""Return the ShelfManager for this WorkingTree."""
2716
from bzrlib.shelf import ShelfManager
2717
return ShelfManager(self, self._transport)
2720
class WorkingTree2(WorkingTree):
2721
"""This is the Format 2 working tree.
2723
This was the first weave based working tree.
2724
- uses os locks for locking.
2725
- uses the branch last-revision.
2728
def __init__(self, *args, **kwargs):
2729
super(WorkingTree2, self).__init__(*args, **kwargs)
2730
# WorkingTree2 has more of a constraint that self._inventory must
2731
# exist. Because this is an older format, we don't mind the overhead
2732
# caused by the extra computation here.
2734
# Newer WorkingTree's should only have self._inventory set when they
2736
if self._inventory is None:
2737
self.read_working_inventory()
2739
def _get_check_refs(self):
2740
"""Return the references needed to perform a check of this tree."""
2741
return [('trees', self.last_revision())]
2743
def lock_tree_write(self):
2744
"""See WorkingTree.lock_tree_write().
2746
In Format2 WorkingTrees we have a single lock for the branch and tree
2747
so lock_tree_write() degrades to lock_write().
2749
:return: An object with an unlock method which will release the lock
2752
self.branch.lock_write()
2754
self._control_files.lock_write()
2757
self.branch.unlock()
2761
# do non-implementation specific cleanup
2764
# we share control files:
2765
if self._control_files._lock_count == 3:
2766
# _inventory_is_modified is always False during a read lock.
2767
if self._inventory_is_modified:
2769
self._write_hashcache_if_dirty()
2771
# reverse order of locking.
2773
return self._control_files.unlock()
2775
self.branch.unlock()
2778
class WorkingTree3(WorkingTree):
2779
"""This is the Format 3 working tree.
2781
This differs from the base WorkingTree by:
2782
- having its own file lock
2783
- having its own last-revision property.
2785
This is new in bzr 0.8
2789
def _last_revision(self):
2790
"""See Mutable.last_revision."""
2792
return self._transport.get_bytes('last-revision')
2793
except errors.NoSuchFile:
2794
return _mod_revision.NULL_REVISION
2796
def _change_last_revision(self, revision_id):
2797
"""See WorkingTree._change_last_revision."""
2798
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
2800
self._transport.delete('last-revision')
2801
except errors.NoSuchFile:
2805
self._transport.put_bytes('last-revision', revision_id,
2806
mode=self.bzrdir._get_file_mode())
2809
def _get_check_refs(self):
2810
"""Return the references needed to perform a check of this tree."""
2811
return [('trees', self.last_revision())]
2813
@needs_tree_write_lock
2814
def set_conflicts(self, conflicts):
2815
self._put_rio('conflicts', conflicts.to_stanzas(),
2818
@needs_tree_write_lock
2819
def add_conflicts(self, new_conflicts):
2820
conflict_set = set(self.conflicts())
2821
conflict_set.update(set(list(new_conflicts)))
2822
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2823
key=_mod_conflicts.Conflict.sort_key)))
2826
def conflicts(self):
2828
confile = self._transport.get('conflicts')
2829
except errors.NoSuchFile:
2830
return _mod_conflicts.ConflictList()
2833
if confile.next() != CONFLICT_HEADER_1 + '\n':
2834
raise errors.ConflictFormatError()
2835
except StopIteration:
2836
raise errors.ConflictFormatError()
2837
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2842
# do non-implementation specific cleanup
2844
if self._control_files._lock_count == 1:
2845
# _inventory_is_modified is always False during a read lock.
2846
if self._inventory_is_modified:
2848
self._write_hashcache_if_dirty()
2849
# reverse order of locking.
2851
return self._control_files.unlock()
2853
self.branch.unlock()
2856
def get_conflicted_stem(path):
2857
for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
2858
if path.endswith(suffix):
2859
return path[:-len(suffix)]
2862
class WorkingTreeFormatRegistry(registry.FormatRegistry):
2863
"""Registry for working tree formats."""
2865
def __init__(self, other_registry=None):
2866
super(WorkingTreeFormatRegistry, self).__init__(other_registry)
2867
self._extra_formats = []
2868
self._default_format = None
2870
def register(self, format):
2871
"""Register a new repository format."""
2872
super(WorkingTreeFormatRegistry, self).register(
2873
format.get_format_string(), format)
2875
def remove(self, format):
2876
"""Remove a registered repository format."""
2877
super(WorkingTreeFormatRegistry, self).remove(format.get_format_string())
2879
def register_extra(self, format):
2880
"""Register a repository format that can not be used in a metadir.
2882
This is mainly useful to allow custom repository formats, such as older
2883
Bazaar formats and foreign formats, to be tested.
2885
self._extra_formats.append(registry._ObjectGetter(format))
2887
def remove_extra(self, format):
2888
"""Remove an extra repository format.
2890
self._extra_formats.remove(registry._ObjectGetter(format))
2892
def register_extra_lazy(self, module_name, member_name):
2893
"""Register a repository format lazily.
2895
self._extra_formats.append(
2896
registry._LazyObjectGetter(module_name, member_name))
2898
def get_default(self):
2899
"""Return the current default format."""
2900
return self._default_format
2902
def set_default(self, format):
2903
self._default_format = format
2905
def _get_extra(self):
2907
for getter in self._extra_formats:
2908
f = getter.get_obj()
2915
"""Return all repository formats, even those not usable in metadirs.
2917
return [self.get(k) for k in self.keys()] + self._get_extra()
2920
format_registry = WorkingTreeFormatRegistry()
2923
class WorkingTreeFormat(object):
2924
"""An encapsulation of the initialization and open routines for a format.
2926
Formats provide three things:
2927
* An initialization routine,
2931
Formats are placed in an dict by their format string for reference
2932
during workingtree opening. Its not required that these be instances, they
2933
can be classes themselves with class methods - it simply depends on
2934
whether state is needed for a given format or not.
2936
Once a format is deprecated, just deprecate the initialize and open
2937
methods on the format class. Do not deprecate the object, as the
2938
object will be created every time regardless.
2941
requires_rich_root = False
2943
upgrade_recommended = False
2945
requires_normalized_unicode_filenames = False
2947
case_sensitive_filename = "FoRMaT"
2949
missing_parent_conflicts = False
2950
"""If this format supports missing parent conflicts."""
2953
def find_format(klass, a_bzrdir):
2954
"""Return the format for the working tree object in a_bzrdir."""
2956
transport = a_bzrdir.get_workingtree_transport(None)
2957
format_string = transport.get_bytes("format")
2958
return format_registry.get(format_string)
2959
except errors.NoSuchFile:
2960
raise errors.NoWorkingTree(base=transport.base)
2962
raise errors.UnknownFormatError(format=format_string,
2963
kind="working tree")
2965
def __eq__(self, other):
2966
return self.__class__ is other.__class__
2968
def __ne__(self, other):
2969
return not (self == other)
2972
@symbol_versioning.deprecated_method(
2973
symbol_versioning.deprecated_in((2, 4, 0)))
2974
def get_default_format(klass):
2975
"""Return the current default format."""
2976
return format_registry.get_default()
2978
def get_format_string(self):
2979
"""Return the ASCII format string that identifies this format."""
2980
raise NotImplementedError(self.get_format_string)
2982
def get_format_description(self):
2983
"""Return the short description for this format."""
2984
raise NotImplementedError(self.get_format_description)
2986
def is_supported(self):
2987
"""Is this format supported?
2989
Supported formats can be initialized and opened.
2990
Unsupported formats may not support initialization or committing or
2991
some other features depending on the reason for not being supported.
2995
def supports_content_filtering(self):
2996
"""True if this format supports content filtering."""
2999
def supports_views(self):
3000
"""True if this format supports stored views."""
3004
@symbol_versioning.deprecated_method(
3005
symbol_versioning.deprecated_in((2, 4, 0)))
3006
def register_format(klass, format):
3007
format_registry.register(format)
3010
@symbol_versioning.deprecated_method(
3011
symbol_versioning.deprecated_in((2, 4, 0)))
3012
def register_extra_format(klass, format):
3013
format_registry.register_extra(format)
3016
@symbol_versioning.deprecated_method(
3017
symbol_versioning.deprecated_in((2, 4, 0)))
3018
def unregister_extra_format(klass, format):
3019
format_registry.unregister_extra(format)
3022
@symbol_versioning.deprecated_method(
3023
symbol_versioning.deprecated_in((2, 4, 0)))
3024
def get_formats(klass):
3025
return format_registry._get_all()
3028
@symbol_versioning.deprecated_method(
3029
symbol_versioning.deprecated_in((2, 4, 0)))
3030
def set_default_format(klass, format):
3031
format_registry.set_default(format)
3034
@symbol_versioning.deprecated_method(
3035
symbol_versioning.deprecated_in((2, 4, 0)))
3036
def unregister_format(klass, format):
3037
format_registry.remove(format)
3040
class WorkingTreeFormat2(WorkingTreeFormat):
3041
"""The second working tree format.
3043
This format modified the hash cache from the format 1 hash cache.
3046
upgrade_recommended = True
3048
requires_normalized_unicode_filenames = True
3050
case_sensitive_filename = "Branch-FoRMaT"
3052
missing_parent_conflicts = False
3054
def get_format_description(self):
3055
"""See WorkingTreeFormat.get_format_description()."""
3056
return "Working tree format 2"
3058
def _stub_initialize_on_transport(self, transport, file_mode):
3059
"""Workaround: create control files for a remote working tree.
3061
This ensures that it can later be updated and dealt with locally,
3062
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
3063
no working tree. (See bug #43064).
3066
inv = inventory.Inventory()
3067
xml5.serializer_v5.write_inventory(inv, sio, working=True)
3069
transport.put_file('inventory', sio, file_mode)
3070
transport.put_bytes('pending-merges', '', file_mode)
3072
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
3073
accelerator_tree=None, hardlink=False):
3074
"""See WorkingTreeFormat.initialize()."""
3075
if not isinstance(a_bzrdir.transport, LocalTransport):
3076
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3077
if from_branch is not None:
3078
branch = from_branch
3080
branch = a_bzrdir.open_branch()
3081
if revision_id is None:
3082
revision_id = _mod_revision.ensure_null(branch.last_revision())
3085
branch.generate_revision_history(revision_id)
3088
inv = inventory.Inventory()
3089
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
3095
_control_files=branch.control_files)
3096
basis_tree = branch.repository.revision_tree(revision_id)
3097
if basis_tree.inventory.root is not None:
3098
wt.set_root_id(basis_tree.get_root_id())
3099
# set the parent list and cache the basis tree.
3100
if _mod_revision.is_null(revision_id):
3103
parent_trees = [(revision_id, basis_tree)]
3104
wt.set_parent_trees(parent_trees)
3105
transform.build_tree(basis_tree, wt)
3109
super(WorkingTreeFormat2, self).__init__()
3110
self._matchingbzrdir = bzrdir.BzrDirFormat6()
3112
def open(self, a_bzrdir, _found=False):
3113
"""Return the WorkingTree object for a_bzrdir
3115
_found is a private parameter, do not use it. It is used to indicate
3116
if format probing has already been done.
3119
# we are being called directly and must probe.
3120
raise NotImplementedError
3121
if not isinstance(a_bzrdir.transport, LocalTransport):
3122
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3123
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
3127
_control_files=a_bzrdir.open_branch().control_files)
3130
class WorkingTreeFormat3(WorkingTreeFormat):
3131
"""The second working tree format updated to record a format marker.
3134
- exists within a metadir controlling .bzr
3135
- includes an explicit version marker for the workingtree control
3136
files, separate from the BzrDir format
3137
- modifies the hash cache format
3139
- uses a LockDir to guard access for writes.
3142
upgrade_recommended = True
3144
missing_parent_conflicts = True
3146
def get_format_string(self):
3147
"""See WorkingTreeFormat.get_format_string()."""
3148
return "Bazaar-NG Working Tree format 3"
3150
def get_format_description(self):
3151
"""See WorkingTreeFormat.get_format_description()."""
3152
return "Working tree format 3"
3154
_lock_file_name = 'lock'
3155
_lock_class = LockDir
3157
_tree_class = WorkingTree3
3159
def __get_matchingbzrdir(self):
3160
return bzrdir.BzrDirMetaFormat1()
3162
_matchingbzrdir = property(__get_matchingbzrdir)
3164
def _open_control_files(self, a_bzrdir):
3165
transport = a_bzrdir.get_workingtree_transport(None)
3166
return LockableFiles(transport, self._lock_file_name,
3169
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
3170
accelerator_tree=None, hardlink=False):
3171
"""See WorkingTreeFormat.initialize().
3173
:param revision_id: if supplied, create a working tree at a different
3174
revision than the branch is at.
3175
:param accelerator_tree: A tree which can be used for retrieving file
3176
contents more quickly than the revision tree, i.e. a workingtree.
3177
The revision tree will be used for cases where accelerator_tree's
3178
content is different.
3179
:param hardlink: If true, hard-link files from accelerator_tree,
3182
if not isinstance(a_bzrdir.transport, LocalTransport):
3183
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3184
transport = a_bzrdir.get_workingtree_transport(self)
3185
control_files = self._open_control_files(a_bzrdir)
3186
control_files.create_lock()
3187
control_files.lock_write()
3188
transport.put_bytes('format', self.get_format_string(),
3189
mode=a_bzrdir._get_file_mode())
3190
if from_branch is not None:
3191
branch = from_branch
3193
branch = a_bzrdir.open_branch()
3194
if revision_id is None:
3195
revision_id = _mod_revision.ensure_null(branch.last_revision())
3196
# WorkingTree3 can handle an inventory which has a unique root id.
3197
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
3198
# those trees. And because there isn't a format bump inbetween, we
3199
# are maintaining compatibility with older clients.
3200
# inv = Inventory(root_id=gen_root_id())
3201
inv = self._initial_inventory()
3202
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3208
_control_files=control_files)
3209
wt.lock_tree_write()
3211
basis_tree = branch.repository.revision_tree(revision_id)
3212
# only set an explicit root id if there is one to set.
3213
if basis_tree.inventory.root is not None:
3214
wt.set_root_id(basis_tree.get_root_id())
3215
if revision_id == _mod_revision.NULL_REVISION:
3216
wt.set_parent_trees([])
278
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
3218
wt.set_parent_trees([(revision_id, basis_tree)])
3219
transform.build_tree(basis_tree, wt)
3221
# Unlock in this order so that the unlock-triggers-flush in
3222
# WorkingTree is given a chance to fire.
3223
control_files.unlock()
3227
def _initial_inventory(self):
3228
return inventory.Inventory()
3231
super(WorkingTreeFormat3, self).__init__()
3233
def open(self, a_bzrdir, _found=False):
3234
"""Return the WorkingTree object for a_bzrdir
3236
_found is a private parameter, do not use it. It is used to indicate
3237
if format probing has already been done.
3240
# we are being called directly and must probe.
3241
raise NotImplementedError
3242
if not isinstance(a_bzrdir.transport, LocalTransport):
3243
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3244
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
3247
def _open(self, a_bzrdir, control_files):
3248
"""Open the tree itself.
3250
:param a_bzrdir: the dir for the tree.
3251
:param control_files: the control files for the tree.
3253
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3257
_control_files=control_files)
3260
return self.get_format_string()
3263
__default_format = WorkingTreeFormat6()
3264
format_registry.register_lazy("Bazaar Working Tree Format 4 (bzr 0.15)\n",
3265
"bzrlib.workingtree_4", "WorkingTreeFormat4")
3266
format_registry.register_lazy("Bazaar Working Tree Format 5 (bzr 1.11)\n",
3267
"bzrlib.workingtree_4", "WorkingTreeFormat5")
3268
format_registry.register_lazy("Bazaar Working Tree Format 6 (bzr 1.14)\n",
3269
"bzrlib.workingtree_4", "WorkingTreeFormat6")
3270
format_registry.register(WorkingTreeFormat3())
3271
format_registry.set_default(__default_format)
3272
# Register extra formats which have no format string are not discoverable
3273
# and not independently creatable. They are implicitly created as part of
3274
# e.g. older Bazaar formats or foreign formats.
3275
format_registry.register_extra(WorkingTreeFormat2())