46
381
and the working file exists.
48
383
inv = self._inventory
49
for file_id in self._inventory:
50
# TODO: This is slightly redundant; we should be able to just
51
# check the statcache but it only includes regular files.
52
# only include files which still exist on disk
55
if ((file_id in self._statcache)
56
or (os.path.exists(self.abspath(inv.id2path(file_id))))):
384
for path, ie in inv.iter_entries():
385
if osutils.lexists(self.abspath(path)):
388
def all_file_ids(self):
389
"""See Tree.iter_all_file_ids"""
390
return set(self.inventory)
61
392
def __repr__(self):
62
393
return "<%s of %s>" % (self.__class__.__name__,
394
getattr(self, 'basedir', None))
65
396
def abspath(self, filename):
66
return os.path.join(self.basedir, filename)
397
return pathjoin(self.basedir, filename)
399
def basis_tree(self):
400
"""Return RevisionTree for the current last revision.
402
If the left most parent is a ghost then the returned tree will be an
403
empty tree - one obtained by calling
404
repository.revision_tree(NULL_REVISION).
407
revision_id = self.get_parent_ids()[0]
409
# no parents, return an empty revision tree.
410
# in the future this should return the tree for
411
# 'empty:' - the implicit root empty tree.
412
return self.branch.repository.revision_tree(
413
_mod_revision.NULL_REVISION)
415
return self.revision_tree(revision_id)
416
except errors.NoSuchRevision:
418
# No cached copy available, retrieve from the repository.
419
# FIXME? RBC 20060403 should we cache the inventory locally
422
return self.branch.repository.revision_tree(revision_id)
423
except (errors.RevisionNotPresent, errors.NoSuchRevision):
424
# the basis tree *may* be a ghost or a low level error may have
425
# occurred. If the revision is present, its a problem, if its not
427
if self.branch.repository.has_revision(revision_id):
429
# the basis tree is a ghost so return an empty tree.
430
return self.branch.repository.revision_tree(
431
_mod_revision.NULL_REVISION)
434
self._flush_ignore_list_cache()
436
def relpath(self, path):
437
"""Return the local path portion from a given path.
439
The path may be absolute or relative. If its a relative path it is
440
interpreted relative to the python current working directory.
442
return osutils.relpath(self.basedir, path)
68
444
def has_filename(self, filename):
69
return os.path.exists(self.abspath(filename))
71
def get_file(self, file_id):
72
return self.get_file_byname(self.id2path(file_id))
74
def get_file_byname(self, filename):
75
return file(self.abspath(filename), 'rb')
445
return osutils.lexists(self.abspath(filename))
447
def get_file(self, file_id, path=None, filtered=True):
448
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
450
def get_file_with_stat(self, file_id, path=None, filtered=True,
452
"""See Tree.get_file_with_stat."""
454
path = self.id2path(file_id)
455
file_obj = self.get_file_byname(path, filtered=False)
456
stat_value = _fstat(file_obj.fileno())
457
if filtered and self.supports_content_filtering():
458
filters = self._content_filter_stack(path)
459
file_obj = filtered_input_file(file_obj, filters)
460
return (file_obj, stat_value)
462
def get_file_text(self, file_id, path=None, filtered=True):
463
return self.get_file(file_id, path=path, filtered=filtered).read()
465
def get_file_byname(self, filename, filtered=True):
466
path = self.abspath(filename)
468
if filtered and self.supports_content_filtering():
469
filters = self._content_filter_stack(filename)
470
return filtered_input_file(f, filters)
474
def get_file_lines(self, file_id, path=None, filtered=True):
475
"""See Tree.get_file_lines()"""
476
file = self.get_file(file_id, path, filtered=filtered)
478
return file.readlines()
483
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
484
"""See Tree.annotate_iter
486
This implementation will use the basis tree implementation if possible.
487
Lines not in the basis are attributed to CURRENT_REVISION
489
If there are pending merges, lines added by those merges will be
490
incorrectly attributed to CURRENT_REVISION (but after committing, the
491
attribution will be correct).
493
maybe_file_parent_keys = []
494
for parent_id in self.get_parent_ids():
496
parent_tree = self.revision_tree(parent_id)
497
except errors.NoSuchRevisionInTree:
498
parent_tree = self.branch.repository.revision_tree(parent_id)
499
parent_tree.lock_read()
501
if file_id not in parent_tree:
503
ie = parent_tree.inventory[file_id]
504
if ie.kind != 'file':
505
# Note: this is slightly unnecessary, because symlinks and
506
# directories have a "text" which is the empty text, and we
507
# know that won't mess up annotations. But it seems cleaner
509
parent_text_key = (file_id, ie.revision)
510
if parent_text_key not in maybe_file_parent_keys:
511
maybe_file_parent_keys.append(parent_text_key)
514
graph = _mod_graph.Graph(self.branch.repository.texts)
515
heads = graph.heads(maybe_file_parent_keys)
516
file_parent_keys = []
517
for key in maybe_file_parent_keys:
519
file_parent_keys.append(key)
521
# Now we have the parents of this content
522
annotator = self.branch.repository.texts.get_annotator()
523
text = self.get_file(file_id).read()
524
this_key =(file_id, default_revision)
525
annotator.add_special_text(this_key, file_parent_keys, text)
526
annotations = [(key[-1], line)
527
for key, line in annotator.annotate_flat(this_key)]
530
def _get_ancestors(self, default_revision):
531
ancestors = set([default_revision])
532
for parent_id in self.get_parent_ids():
533
ancestors.update(self.branch.repository.get_ancestry(
534
parent_id, topo_sorted=False))
537
def get_parent_ids(self):
538
"""See Tree.get_parent_ids.
540
This implementation reads the pending merges list and last_revision
541
value and uses that to decide what the parents list should be.
543
last_rev = _mod_revision.ensure_null(self._last_revision())
544
if _mod_revision.NULL_REVISION == last_rev:
549
merges_bytes = self._transport.get_bytes('pending-merges')
550
except errors.NoSuchFile:
553
for l in osutils.split_lines(merges_bytes):
554
revision_id = l.rstrip('\n')
555
parents.append(revision_id)
559
def get_root_id(self):
560
"""Return the id of this trees root"""
561
return self._inventory.root.file_id
77
563
def _get_store_filename(self, file_id):
78
## XXX: badly named; this isn't in the store at all
79
return self.abspath(self.id2path(file_id))
564
## XXX: badly named; this is not in the store at all
565
return self.abspath(self.id2path(file_id))
568
def clone(self, to_bzrdir, revision_id=None):
569
"""Duplicate this working tree into to_bzr, including all state.
571
Specifically modified files are kept as modified, but
572
ignored and unknown files are discarded.
574
If you want to make a new line of development, see bzrdir.sprout()
577
If not None, the cloned tree will have its last revision set to
578
revision, and difference between the source trees last revision
579
and this one merged in.
581
# assumes the target bzr dir format is compatible.
582
result = to_bzrdir.create_workingtree()
583
self.copy_content_into(result, revision_id)
587
def copy_content_into(self, tree, revision_id=None):
588
"""Copy the current content and user files of this tree into tree."""
589
tree.set_root_id(self.get_root_id())
590
if revision_id is None:
591
merge.transform_tree(tree, self)
593
# TODO now merge from tree.last_revision to revision (to preserve
594
# user local changes)
595
merge.transform_tree(tree, self)
596
tree.set_parent_ids([revision_id])
598
def id2abspath(self, file_id):
599
return self.abspath(self.id2path(file_id))
82
601
def has_id(self, file_id):
83
602
# files that have been deleted are excluded
84
if not self.inventory.has_id(file_id):
604
if not inv.has_id(file_id):
86
if file_id in self._statcache:
606
path = inv.id2path(file_id)
607
return osutils.lexists(self.abspath(path))
609
def has_or_had_id(self, file_id):
610
if file_id == self.inventory.root.file_id:
88
return os.path.exists(self.abspath(self.id2path(file_id)))
612
return self.inventory.has_id(file_id)
91
614
__contains__ = has_id
94
def _update_statcache(self):
96
if not self._statcache:
97
self._statcache = statcache.update_cache(self.basedir, self.inventory)
99
616
def get_file_size(self, file_id):
101
return os.stat(self._get_store_filename(file_id))[stat.ST_SIZE]
104
def get_file_sha1(self, file_id):
105
return self._statcache[file_id][statcache.SC_SHA1]
108
def file_class(self, filename):
109
if self.path2id(filename):
111
elif self.is_ignored(filename):
117
def list_files(self):
118
"""Recursively list all files as (path, class, kind, id).
617
"""See Tree.get_file_size"""
618
# XXX: this returns the on-disk size; it should probably return the
621
return os.path.getsize(self.id2abspath(file_id))
623
if e.errno != errno.ENOENT:
629
def get_file_sha1(self, file_id, path=None, stat_value=None):
631
path = self._inventory.id2path(file_id)
632
return self._hashcache.get_sha1(path, stat_value)
634
def get_file_mtime(self, file_id, path=None):
636
path = self.inventory.id2path(file_id)
637
return os.lstat(self.abspath(path)).st_mtime
639
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
640
file_id = self.path2id(path)
642
# For unversioned files on win32, we just assume they are not
645
return self._inventory[file_id].executable
647
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
648
mode = stat_result.st_mode
649
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
651
if not supports_executable():
652
def is_executable(self, file_id, path=None):
653
return self._inventory[file_id].executable
655
_is_executable_from_path_and_stat = \
656
_is_executable_from_path_and_stat_from_basis
658
def is_executable(self, file_id, path=None):
660
path = self.id2path(file_id)
661
mode = os.lstat(self.abspath(path)).st_mode
662
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
664
_is_executable_from_path_and_stat = \
665
_is_executable_from_path_and_stat_from_stat
667
@needs_tree_write_lock
668
def _add(self, files, ids, kinds):
669
"""See MutableTree._add."""
670
# TODO: Re-adding a file that is removed in the working copy
671
# should probably put it back with the previous ID.
672
# the read and write working inventory should not occur in this
673
# function - they should be part of lock_write and unlock.
675
for f, file_id, kind in zip(files, ids, kinds):
677
inv.add_path(f, kind=kind)
679
inv.add_path(f, kind=kind, file_id=file_id)
680
self._inventory_is_modified = True
682
@needs_tree_write_lock
683
def _gather_kinds(self, files, kinds):
684
"""See MutableTree._gather_kinds."""
685
for pos, f in enumerate(files):
686
if kinds[pos] is None:
687
fullpath = normpath(self.abspath(f))
689
kinds[pos] = file_kind(fullpath)
691
if e.errno == errno.ENOENT:
692
raise errors.NoSuchFile(fullpath)
695
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
696
"""Add revision_id as a parent.
698
This is equivalent to retrieving the current list of parent ids
699
and setting the list to its value plus revision_id.
701
:param revision_id: The revision id to add to the parent list. It may
702
be a ghost revision as long as its not the first parent to be added,
703
or the allow_leftmost_as_ghost parameter is set True.
704
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
706
parents = self.get_parent_ids() + [revision_id]
707
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
708
or allow_leftmost_as_ghost)
710
@needs_tree_write_lock
711
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
712
"""Add revision_id, tree tuple as a parent.
714
This is equivalent to retrieving the current list of parent trees
715
and setting the list to its value plus parent_tuple. See also
716
add_parent_tree_id - if you only have a parent id available it will be
717
simpler to use that api. If you have the parent already available, using
718
this api is preferred.
720
:param parent_tuple: The (revision id, tree) to add to the parent list.
721
If the revision_id is a ghost, pass None for the tree.
722
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
724
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
725
if len(parent_ids) > 1:
726
# the leftmost may have already been a ghost, preserve that if it
728
allow_leftmost_as_ghost = True
729
self.set_parent_ids(parent_ids,
730
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
732
@needs_tree_write_lock
733
def add_pending_merge(self, *revision_ids):
734
# TODO: Perhaps should check at this point that the
735
# history of the revision is actually present?
736
parents = self.get_parent_ids()
738
for rev_id in revision_ids:
739
if rev_id in parents:
741
parents.append(rev_id)
744
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
746
def path_content_summary(self, path, _lstat=os.lstat,
747
_mapper=osutils.file_kind_from_stat_mode):
748
"""See Tree.path_content_summary."""
749
abspath = self.abspath(path)
751
stat_result = _lstat(abspath)
753
if getattr(e, 'errno', None) == errno.ENOENT:
755
return ('missing', None, None, None)
756
# propagate other errors
758
kind = _mapper(stat_result.st_mode)
760
return self._file_content_summary(path, stat_result)
761
elif kind == 'directory':
762
# perhaps it looks like a plain directory, but it's really a
764
if self._directory_is_tree_reference(path):
765
kind = 'tree-reference'
766
return kind, None, None, None
767
elif kind == 'symlink':
768
target = osutils.readlink(abspath)
769
return ('symlink', None, None, target)
771
return (kind, None, None, None)
773
def _file_content_summary(self, path, stat_result):
774
size = stat_result.st_size
775
executable = self._is_executable_from_path_and_stat(path, stat_result)
776
# try for a stat cache lookup
777
return ('file', size, executable, self._sha_from_stat(
780
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
781
"""Common ghost checking functionality from set_parent_*.
783
This checks that the left hand-parent exists if there are any
786
if len(revision_ids) > 0:
787
leftmost_id = revision_ids[0]
788
if (not allow_leftmost_as_ghost and not
789
self.branch.repository.has_revision(leftmost_id)):
790
raise errors.GhostRevisionUnusableHere(leftmost_id)
792
def _set_merges_from_parent_ids(self, parent_ids):
793
merges = parent_ids[1:]
794
self._transport.put_bytes('pending-merges', '\n'.join(merges),
795
mode=self.bzrdir._get_file_mode())
797
def _filter_parent_ids_by_ancestry(self, revision_ids):
798
"""Check that all merged revisions are proper 'heads'.
800
This will always return the first revision_id, and any merged revisions
803
if len(revision_ids) == 0:
805
graph = self.branch.repository.get_graph()
806
heads = graph.heads(revision_ids)
807
new_revision_ids = revision_ids[:1]
808
for revision_id in revision_ids[1:]:
809
if revision_id in heads and revision_id not in new_revision_ids:
810
new_revision_ids.append(revision_id)
811
if new_revision_ids != revision_ids:
812
trace.mutter('requested to set revision_ids = %s,'
813
' but filtered to %s', revision_ids, new_revision_ids)
814
return new_revision_ids
816
@needs_tree_write_lock
817
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
818
"""Set the parent ids to revision_ids.
820
See also set_parent_trees. This api will try to retrieve the tree data
821
for each element of revision_ids from the trees repository. If you have
822
tree data already available, it is more efficient to use
823
set_parent_trees rather than set_parent_ids. set_parent_ids is however
824
an easier API to use.
826
:param revision_ids: The revision_ids to set as the parent ids of this
827
working tree. Any of these may be ghosts.
829
self._check_parents_for_ghosts(revision_ids,
830
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
831
for revision_id in revision_ids:
832
_mod_revision.check_not_reserved_id(revision_id)
834
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
836
if len(revision_ids) > 0:
837
self.set_last_revision(revision_ids[0])
839
self.set_last_revision(_mod_revision.NULL_REVISION)
841
self._set_merges_from_parent_ids(revision_ids)
843
@needs_tree_write_lock
844
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
845
"""See MutableTree.set_parent_trees."""
846
parent_ids = [rev for (rev, tree) in parents_list]
847
for revision_id in parent_ids:
848
_mod_revision.check_not_reserved_id(revision_id)
850
self._check_parents_for_ghosts(parent_ids,
851
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
853
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
855
if len(parent_ids) == 0:
856
leftmost_parent_id = _mod_revision.NULL_REVISION
857
leftmost_parent_tree = None
859
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
861
if self._change_last_revision(leftmost_parent_id):
862
if leftmost_parent_tree is None:
863
# If we don't have a tree, fall back to reading the
864
# parent tree from the repository.
865
self._cache_basis_inventory(leftmost_parent_id)
867
inv = leftmost_parent_tree.inventory
868
xml = self._create_basis_xml_from_inventory(
869
leftmost_parent_id, inv)
870
self._write_basis_inventory(xml)
871
self._set_merges_from_parent_ids(parent_ids)
873
@needs_tree_write_lock
874
def set_pending_merges(self, rev_list):
875
parents = self.get_parent_ids()
876
leftmost = parents[:1]
877
new_parents = leftmost + rev_list
878
self.set_parent_ids(new_parents)
880
@needs_tree_write_lock
881
def set_merge_modified(self, modified_hashes):
883
for file_id, hash in modified_hashes.iteritems():
884
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
885
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
887
def _sha_from_stat(self, path, stat_result):
888
"""Get a sha digest from the tree's stat cache.
890
The default implementation assumes no stat cache is present.
892
:param path: The path.
893
:param stat_result: The stat result being looked up.
897
def _put_rio(self, filename, stanzas, header):
898
self._must_be_locked()
899
my_file = rio_file(stanzas, header)
900
self._transport.put_file(filename, my_file,
901
mode=self.bzrdir._get_file_mode())
903
@needs_write_lock # because merge pulls data into the branch.
904
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
905
merge_type=None, force=False):
906
"""Merge from a branch into this working tree.
908
:param branch: The branch to merge from.
909
:param to_revision: If non-None, the merge will merge to to_revision,
910
but not beyond it. to_revision does not need to be in the history
911
of the branch when it is supplied. If None, to_revision defaults to
912
branch.last_revision().
914
from bzrlib.merge import Merger, Merge3Merger
915
pb = ui.ui_factory.nested_progress_bar()
917
merger = Merger(self.branch, this_tree=self, pb=pb)
918
merger.pp = ProgressPhase("Merge phase", 5, pb)
919
merger.pp.next_phase()
920
# check that there are no local alterations
921
if not force and self.has_changes():
922
raise errors.UncommittedChanges(self)
923
if to_revision is None:
924
to_revision = _mod_revision.ensure_null(branch.last_revision())
925
merger.other_rev_id = to_revision
926
if _mod_revision.is_null(merger.other_rev_id):
927
raise errors.NoCommits(branch)
928
self.branch.fetch(branch, last_revision=merger.other_rev_id)
929
merger.other_basis = merger.other_rev_id
930
merger.other_tree = self.branch.repository.revision_tree(
932
merger.other_branch = branch
933
merger.pp.next_phase()
934
if from_revision is None:
937
merger.set_base_revision(from_revision, branch)
938
if merger.base_rev_id == merger.other_rev_id:
939
raise errors.PointlessMerge
940
merger.backup_files = False
941
if merge_type is None:
942
merger.merge_type = Merge3Merger
944
merger.merge_type = merge_type
945
merger.set_interesting_files(None)
946
merger.show_base = False
947
merger.reprocess = False
948
conflicts = merger.do_merge()
955
def merge_modified(self):
956
"""Return a dictionary of files modified by a merge.
958
The list is initialized by WorkingTree.set_merge_modified, which is
959
typically called after we make some automatic updates to the tree
962
This returns a map of file_id->sha1, containing only files which are
963
still in the working inventory and have that text hash.
966
hashfile = self._transport.get('merge-hashes')
967
except errors.NoSuchFile:
972
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
973
raise errors.MergeModifiedFormatError()
974
except StopIteration:
975
raise errors.MergeModifiedFormatError()
976
for s in RioReader(hashfile):
977
# RioReader reads in Unicode, so convert file_ids back to utf8
978
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
979
if file_id not in self.inventory:
981
text_hash = s.get("hash")
982
if text_hash == self.get_file_sha1(file_id):
983
merge_hashes[file_id] = text_hash
989
def mkdir(self, path, file_id=None):
990
"""See MutableTree.mkdir()."""
992
file_id = generate_ids.gen_file_id(os.path.basename(path))
993
os.mkdir(self.abspath(path))
994
self.add(path, file_id, 'directory')
997
def get_symlink_target(self, file_id):
998
abspath = self.id2abspath(file_id)
999
target = osutils.readlink(abspath)
1003
def subsume(self, other_tree):
1004
def add_children(inventory, entry):
1005
for child_entry in entry.children.values():
1006
inventory._byid[child_entry.file_id] = child_entry
1007
if child_entry.kind == 'directory':
1008
add_children(inventory, child_entry)
1009
if other_tree.get_root_id() == self.get_root_id():
1010
raise errors.BadSubsumeSource(self, other_tree,
1011
'Trees have the same root')
1013
other_tree_path = self.relpath(other_tree.basedir)
1014
except errors.PathNotChild:
1015
raise errors.BadSubsumeSource(self, other_tree,
1016
'Tree is not contained by the other')
1017
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1018
if new_root_parent is None:
1019
raise errors.BadSubsumeSource(self, other_tree,
1020
'Parent directory is not versioned.')
1021
# We need to ensure that the result of a fetch will have a
1022
# versionedfile for the other_tree root, and only fetching into
1023
# RepositoryKnit2 guarantees that.
1024
if not self.branch.repository.supports_rich_root():
1025
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1026
other_tree.lock_tree_write()
1028
new_parents = other_tree.get_parent_ids()
1029
other_root = other_tree.inventory.root
1030
other_root.parent_id = new_root_parent
1031
other_root.name = osutils.basename(other_tree_path)
1032
self.inventory.add(other_root)
1033
add_children(self.inventory, other_root)
1034
self._write_inventory(self.inventory)
1035
# normally we don't want to fetch whole repositories, but i think
1036
# here we really do want to consolidate the whole thing.
1037
for parent_id in other_tree.get_parent_ids():
1038
self.branch.fetch(other_tree.branch, parent_id)
1039
self.add_parent_tree_id(parent_id)
1042
other_tree.bzrdir.retire_bzrdir()
1044
def _setup_directory_is_tree_reference(self):
1045
if self._branch.repository._format.supports_tree_reference:
1046
self._directory_is_tree_reference = \
1047
self._directory_may_be_tree_reference
1049
self._directory_is_tree_reference = \
1050
self._directory_is_never_tree_reference
1052
def _directory_is_never_tree_reference(self, relpath):
1055
def _directory_may_be_tree_reference(self, relpath):
1056
# as a special case, if a directory contains control files then
1057
# it's a tree reference, except that the root of the tree is not
1058
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1059
# TODO: We could ask all the control formats whether they
1060
# recognize this directory, but at the moment there's no cheap api
1061
# to do that. Since we probably can only nest bzr checkouts and
1062
# they always use this name it's ok for now. -- mbp 20060306
1064
# FIXME: There is an unhandled case here of a subdirectory
1065
# containing .bzr but not a branch; that will probably blow up
1066
# when you try to commit it. It might happen if there is a
1067
# checkout in a subdirectory. This can be avoided by not adding
1070
@needs_tree_write_lock
1071
def extract(self, file_id, format=None):
1072
"""Extract a subtree from this tree.
1074
A new branch will be created, relative to the path for this tree.
1078
segments = osutils.splitpath(path)
1079
transport = self.branch.bzrdir.root_transport
1080
for name in segments:
1081
transport = transport.clone(name)
1082
transport.ensure_base()
1085
sub_path = self.id2path(file_id)
1086
branch_transport = mkdirs(sub_path)
1088
format = self.bzrdir.cloning_metadir()
1089
branch_transport.ensure_base()
1090
branch_bzrdir = format.initialize_on_transport(branch_transport)
1092
repo = branch_bzrdir.find_repository()
1093
except errors.NoRepositoryPresent:
1094
repo = branch_bzrdir.create_repository()
1095
if not repo.supports_rich_root():
1096
raise errors.RootNotRich()
1097
new_branch = branch_bzrdir.create_branch()
1098
new_branch.pull(self.branch)
1099
for parent_id in self.get_parent_ids():
1100
new_branch.fetch(self.branch, parent_id)
1101
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1102
if tree_transport.base != branch_transport.base:
1103
tree_bzrdir = format.initialize_on_transport(tree_transport)
1104
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1106
tree_bzrdir = branch_bzrdir
1107
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1108
wt.set_parent_ids(self.get_parent_ids())
1109
my_inv = self.inventory
1110
child_inv = inventory.Inventory(root_id=None)
1111
new_root = my_inv[file_id]
1112
my_inv.remove_recursive_id(file_id)
1113
new_root.parent_id = None
1114
child_inv.add(new_root)
1115
self._write_inventory(my_inv)
1116
wt._write_inventory(child_inv)
1119
def _serialize(self, inventory, out_file):
1120
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1123
def _deserialize(selt, in_file):
1124
return xml5.serializer_v5.read_inventory(in_file)
1127
"""Write the in memory inventory to disk."""
1128
# TODO: Maybe this should only write on dirty ?
1129
if self._control_files._lock_mode != 'w':
1130
raise errors.NotWriteLocked(self)
1132
self._serialize(self._inventory, sio)
1134
self._transport.put_file('inventory', sio,
1135
mode=self.bzrdir._get_file_mode())
1136
self._inventory_is_modified = False
1138
def _kind(self, relpath):
1139
return osutils.file_kind(self.abspath(relpath))
1141
def list_files(self, include_root=False, from_dir=None, recursive=True):
1142
"""List all files as (path, class, kind, id, entry).
120
1144
Lists, but does not descend into unversioned directories.
122
1145
This does not include files that have been deleted in this
1146
tree. Skips the control directory.
125
Skips the control directory.
1148
:param include_root: if True, do not return an entry for the root
1149
:param from_dir: start from this directory or None for the root
1150
:param recursive: whether to recurse into subdirectories or not
127
from osutils import appendpath, file_kind
1152
# list_files is an iterator, so @needs_read_lock doesn't work properly
1153
# with it. So callers should be careful to always read_lock the tree.
1154
if not self.is_locked():
1155
raise errors.ObjectNotLocked(self)
130
1157
inv = self.inventory
132
def descend(from_dir_relpath, from_dir_id, dp):
1158
if from_dir is None and include_root is True:
1159
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1160
# Convert these into local objects to save lookup times
1161
pathjoin = osutils.pathjoin
1162
file_kind = self._kind
1164
# transport.base ends in a slash, we want the piece
1165
# between the last two slashes
1166
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1168
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1170
# directory file_id, relative path, absolute path, reverse sorted children
1171
if from_dir is not None:
1172
from_dir_id = inv.path2id(from_dir)
1173
if from_dir_id is None:
1174
# Directory not versioned
1176
from_dir_abspath = pathjoin(self.basedir, from_dir)
1178
from_dir_id = inv.root.file_id
1179
from_dir_abspath = self.basedir
1180
children = os.listdir(from_dir_abspath)
1182
# jam 20060527 The kernel sized tree seems equivalent whether we
1183
# use a deque and popleft to keep them sorted, or if we use a plain
1184
# list and just reverse() them.
1185
children = collections.deque(children)
1186
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1188
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1191
f = children.popleft()
136
1192
## TODO: If we find a subdirectory with its own .bzr
137
1193
## directory, then that is a separate tree and we
138
1194
## should exclude it.
139
if bzrlib.BZRDIR == f:
1196
# the bzrdir for this tree
1197
if transport_base_dir == f:
143
fp = appendpath(from_dir_relpath, f)
1200
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1201
# and 'f' doesn't begin with one, we can do a string op, rather
1202
# than the checks of pathjoin(), all relative paths will have an extra slash
1204
fp = from_dir_relpath + '/' + f
146
fap = appendpath(dp, f)
1207
fap = from_dir_abspath + '/' + f
148
1209
f_ie = inv.get_child(from_dir_id, f)
151
elif self.is_ignored(fp):
1212
elif self.is_ignored(fp[1:]):
1215
# we may not have found this file, because of a unicode issue
1216
f_norm, can_access = osutils.normalized_filename(f)
1217
if f == f_norm or not can_access:
1218
# No change, so treat this file normally
1221
# this file can be accessed by a normalized path
1222
# check again if it is versioned
1223
# these lines are repeated here for performance
1225
fp = from_dir_relpath + '/' + f
1226
fap = from_dir_abspath + '/' + f
1227
f_ie = inv.get_child(from_dir_id, f)
1230
elif self.is_ignored(fp[1:]):
156
1235
fk = file_kind(fap)
1237
# make a last minute entry
160
raise BzrCheckError("file %r entered as kind %r id %r, "
162
% (fap, f_ie.kind, f_ie.file_id, fk))
164
yield fp, c, fk, (f_ie and f_ie.file_id)
1239
yield fp[1:], c, fk, f_ie.file_id, f_ie
1242
yield fp[1:], c, fk, None, fk_entries[fk]()
1244
yield fp[1:], c, fk, None, TreeEntry()
166
1247
if fk != 'directory':
170
# don't descend unversioned directories
173
for ff in descend(fp, f_ie.file_id, fap):
176
for f in descend('', inv.root.file_id, self.basedir):
1250
# But do this child first if recursing down
1252
new_children = os.listdir(fap)
1254
new_children = collections.deque(new_children)
1255
stack.append((f_ie.file_id, fp, fap, new_children))
1256
# Break out of inner loop,
1257
# so that we start outer loop with child
1260
# if we finished all children, pop it off the stack
1263
@needs_tree_write_lock
1264
def move(self, from_paths, to_dir=None, after=False, **kwargs):
1267
to_dir must exist in the inventory.
1269
If to_dir exists and is a directory, the files are moved into
1270
it, keeping their old names.
1272
Note that to_dir is only the last component of the new name;
1273
this doesn't change the directory.
1275
For each entry in from_paths the move mode will be determined
1278
The first mode moves the file in the filesystem and updates the
1279
inventory. The second mode only updates the inventory without
1280
touching the file on the filesystem. This is the new mode introduced
1283
move uses the second mode if 'after == True' and the target is not
1284
versioned but present in the working tree.
1286
move uses the second mode if 'after == False' and the source is
1287
versioned but no longer in the working tree, and the target is not
1288
versioned but present in the working tree.
1290
move uses the first mode if 'after == False' and the source is
1291
versioned and present in the working tree, and the target is not
1292
versioned and not present in the working tree.
1294
Everything else results in an error.
1296
This returns a list of (from_path, to_path) pairs for each
1297
entry that is moved.
1302
# check for deprecated use of signature
1304
to_dir = kwargs.get('to_name', None)
1306
raise TypeError('You must supply a target directory')
1308
symbol_versioning.warn('The parameter to_name was deprecated'
1309
' in version 0.13. Use to_dir instead',
1312
# check destination directory
1313
if isinstance(from_paths, basestring):
1315
inv = self.inventory
1316
to_abs = self.abspath(to_dir)
1317
if not isdir(to_abs):
1318
raise errors.BzrMoveFailedError('',to_dir,
1319
errors.NotADirectory(to_abs))
1320
if not self.has_filename(to_dir):
1321
raise errors.BzrMoveFailedError('',to_dir,
1322
errors.NotInWorkingDirectory(to_dir))
1323
to_dir_id = inv.path2id(to_dir)
1324
if to_dir_id is None:
1325
raise errors.BzrMoveFailedError('',to_dir,
1326
errors.NotVersionedError(path=str(to_dir)))
1328
to_dir_ie = inv[to_dir_id]
1329
if to_dir_ie.kind != 'directory':
1330
raise errors.BzrMoveFailedError('',to_dir,
1331
errors.NotADirectory(to_abs))
1333
# create rename entries and tuples
1334
for from_rel in from_paths:
1335
from_tail = splitpath(from_rel)[-1]
1336
from_id = inv.path2id(from_rel)
1338
raise errors.BzrMoveFailedError(from_rel,to_dir,
1339
errors.NotVersionedError(path=str(from_rel)))
1341
from_entry = inv[from_id]
1342
from_parent_id = from_entry.parent_id
1343
to_rel = pathjoin(to_dir, from_tail)
1344
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1346
from_tail=from_tail,
1347
from_parent_id=from_parent_id,
1348
to_rel=to_rel, to_tail=from_tail,
1349
to_parent_id=to_dir_id)
1350
rename_entries.append(rename_entry)
1351
rename_tuples.append((from_rel, to_rel))
1353
# determine which move mode to use. checks also for movability
1354
rename_entries = self._determine_mv_mode(rename_entries, after)
1356
original_modified = self._inventory_is_modified
1359
self._inventory_is_modified = True
1360
self._move(rename_entries)
1362
# restore the inventory on error
1363
self._inventory_is_modified = original_modified
1365
self._write_inventory(inv)
1366
return rename_tuples
1368
def _determine_mv_mode(self, rename_entries, after=False):
1369
"""Determines for each from-to pair if both inventory and working tree
1370
or only the inventory has to be changed.
1372
Also does basic plausability tests.
1374
inv = self.inventory
1376
for rename_entry in rename_entries:
1377
# store to local variables for easier reference
1378
from_rel = rename_entry.from_rel
1379
from_id = rename_entry.from_id
1380
to_rel = rename_entry.to_rel
1381
to_id = inv.path2id(to_rel)
1382
only_change_inv = False
1384
# check the inventory for source and destination
1386
raise errors.BzrMoveFailedError(from_rel,to_rel,
1387
errors.NotVersionedError(path=str(from_rel)))
1388
if to_id is not None:
1389
raise errors.BzrMoveFailedError(from_rel,to_rel,
1390
errors.AlreadyVersionedError(path=str(to_rel)))
1392
# try to determine the mode for rename (only change inv or change
1393
# inv and file system)
1395
if not self.has_filename(to_rel):
1396
raise errors.BzrMoveFailedError(from_id,to_rel,
1397
errors.NoSuchFile(path=str(to_rel),
1398
extra="New file has not been created yet"))
1399
only_change_inv = True
1400
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1401
only_change_inv = True
1402
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1403
only_change_inv = False
1404
elif (not self.case_sensitive
1405
and from_rel.lower() == to_rel.lower()
1406
and self.has_filename(from_rel)):
1407
only_change_inv = False
1409
# something is wrong, so lets determine what exactly
1410
if not self.has_filename(from_rel) and \
1411
not self.has_filename(to_rel):
1412
raise errors.BzrRenameFailedError(from_rel,to_rel,
1413
errors.PathsDoNotExist(paths=(str(from_rel),
1416
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1417
rename_entry.only_change_inv = only_change_inv
1418
return rename_entries
1420
def _move(self, rename_entries):
1421
"""Moves a list of files.
1423
Depending on the value of the flag 'only_change_inv', the
1424
file will be moved on the file system or not.
1426
inv = self.inventory
1429
for entry in rename_entries:
1431
self._move_entry(entry)
1433
self._rollback_move(moved)
1437
def _rollback_move(self, moved):
1438
"""Try to rollback a previous move in case of an filesystem error."""
1439
inv = self.inventory
1442
self._move_entry(WorkingTree._RenameEntry(
1443
entry.to_rel, entry.from_id,
1444
entry.to_tail, entry.to_parent_id, entry.from_rel,
1445
entry.from_tail, entry.from_parent_id,
1446
entry.only_change_inv))
1447
except errors.BzrMoveFailedError, e:
1448
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1449
" The working tree is in an inconsistent state."
1450
" Please consider doing a 'bzr revert'."
1451
" Error message is: %s" % e)
1453
def _move_entry(self, entry):
1454
inv = self.inventory
1455
from_rel_abs = self.abspath(entry.from_rel)
1456
to_rel_abs = self.abspath(entry.to_rel)
1457
if from_rel_abs == to_rel_abs:
1458
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1459
"Source and target are identical.")
1461
if not entry.only_change_inv:
1463
osutils.rename(from_rel_abs, to_rel_abs)
1465
raise errors.BzrMoveFailedError(entry.from_rel,
1467
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1469
@needs_tree_write_lock
1470
def rename_one(self, from_rel, to_rel, after=False):
1473
This can change the directory or the filename or both.
1475
rename_one has several 'modes' to work. First, it can rename a physical
1476
file and change the file_id. That is the normal mode. Second, it can
1477
only change the file_id without touching any physical file. This is
1478
the new mode introduced in version 0.15.
1480
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1481
versioned but present in the working tree.
1483
rename_one uses the second mode if 'after == False' and 'from_rel' is
1484
versioned but no longer in the working tree, and 'to_rel' is not
1485
versioned but present in the working tree.
1487
rename_one uses the first mode if 'after == False' and 'from_rel' is
1488
versioned and present in the working tree, and 'to_rel' is not
1489
versioned and not present in the working tree.
1491
Everything else results in an error.
1493
inv = self.inventory
1496
# create rename entries and tuples
1497
from_tail = splitpath(from_rel)[-1]
1498
from_id = inv.path2id(from_rel)
1500
# if file is missing in the inventory maybe it's in the basis_tree
1501
basis_tree = self.branch.basis_tree()
1502
from_id = basis_tree.path2id(from_rel)
1504
raise errors.BzrRenameFailedError(from_rel,to_rel,
1505
errors.NotVersionedError(path=str(from_rel)))
1506
# put entry back in the inventory so we can rename it
1507
from_entry = basis_tree.inventory[from_id].copy()
1510
from_entry = inv[from_id]
1511
from_parent_id = from_entry.parent_id
1512
to_dir, to_tail = os.path.split(to_rel)
1513
to_dir_id = inv.path2id(to_dir)
1514
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1516
from_tail=from_tail,
1517
from_parent_id=from_parent_id,
1518
to_rel=to_rel, to_tail=to_tail,
1519
to_parent_id=to_dir_id)
1520
rename_entries.append(rename_entry)
1522
# determine which move mode to use. checks also for movability
1523
rename_entries = self._determine_mv_mode(rename_entries, after)
1525
# check if the target changed directory and if the target directory is
1527
if to_dir_id is None:
1528
raise errors.BzrMoveFailedError(from_rel,to_rel,
1529
errors.NotVersionedError(path=str(to_dir)))
1531
# all checks done. now we can continue with our actual work
1532
mutter('rename_one:\n'
1537
' to_dir_id {%s}\n',
1538
from_id, from_rel, to_rel, to_dir, to_dir_id)
1540
self._move(rename_entries)
1541
self._write_inventory(inv)
1543
class _RenameEntry(object):
1544
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1545
to_rel, to_tail, to_parent_id, only_change_inv=False):
1546
self.from_rel = from_rel
1547
self.from_id = from_id
1548
self.from_tail = from_tail
1549
self.from_parent_id = from_parent_id
1550
self.to_rel = to_rel
1551
self.to_tail = to_tail
1552
self.to_parent_id = to_parent_id
1553
self.only_change_inv = only_change_inv
181
1556
def unknowns(self):
182
for subp in self.extras():
183
if not self.is_ignored(subp):
1557
"""Return all unknown files.
1559
These are files in the working directory that are not versioned or
1560
control files or ignored.
1562
# force the extras method to be fully executed before returning, to
1563
# prevent race conditions with the lock
1565
[subp for subp in self.extras() if not self.is_ignored(subp)])
1567
@needs_tree_write_lock
1568
def unversion(self, file_ids):
1569
"""Remove the file ids in file_ids from the current versioned set.
1571
When a file_id is unversioned, all of its children are automatically
1574
:param file_ids: The file ids to stop versioning.
1575
:raises: NoSuchId if any fileid is not currently versioned.
1577
for file_id in file_ids:
1578
if file_id not in self._inventory:
1579
raise errors.NoSuchId(self, file_id)
1580
for file_id in file_ids:
1581
if self._inventory.has_id(file_id):
1582
self._inventory.remove_recursive_id(file_id)
1584
# in the future this should just set a dirty bit to wait for the
1585
# final unlock. However, until all methods of workingtree start
1586
# with the current in -memory inventory rather than triggering
1587
# a read, it is more complex - we need to teach read_inventory
1588
# to know when to read, and when to not read first... and possibly
1589
# to save first when the in memory one may be corrupted.
1590
# so for now, we just only write it if it is indeed dirty.
1592
self._write_inventory(self._inventory)
1594
def _iter_conflicts(self):
1596
for info in self.list_files():
1598
stem = get_conflicted_stem(path)
1601
if stem not in conflicted:
1602
conflicted.add(stem)
1606
def pull(self, source, overwrite=False, stop_revision=None,
1607
change_reporter=None, possible_transports=None, local=False):
1608
top_pb = ui.ui_factory.nested_progress_bar()
1611
pp = ProgressPhase("Pull phase", 2, top_pb)
1613
old_revision_info = self.branch.last_revision_info()
1614
basis_tree = self.basis_tree()
1615
count = self.branch.pull(source, overwrite, stop_revision,
1616
possible_transports=possible_transports,
1618
new_revision_info = self.branch.last_revision_info()
1619
if new_revision_info != old_revision_info:
1621
repository = self.branch.repository
1622
pb = ui.ui_factory.nested_progress_bar()
1623
basis_tree.lock_read()
1625
new_basis_tree = self.branch.basis_tree()
1632
change_reporter=change_reporter)
1633
basis_root_id = basis_tree.get_root_id()
1634
new_root_id = new_basis_tree.get_root_id()
1635
if basis_root_id != new_root_id:
1636
self.set_root_id(new_root_id)
1640
# TODO - dedup parents list with things merged by pull ?
1641
# reuse the revisiontree we merged against to set the new
1643
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1644
# we have to pull the merge trees out again, because
1645
# merge_inner has set the ids. - this corner is not yet
1646
# layered well enough to prevent double handling.
1647
# XXX TODO: Fix the double handling: telling the tree about
1648
# the already known parent data is wasteful.
1649
merges = self.get_parent_ids()[1:]
1650
parent_trees.extend([
1651
(parent, repository.revision_tree(parent)) for
1653
self.set_parent_trees(parent_trees)
1660
def put_file_bytes_non_atomic(self, file_id, bytes):
1661
"""See MutableTree.put_file_bytes_non_atomic."""
1662
stream = file(self.id2abspath(file_id), 'wb')
1667
# TODO: update the hashcache here ?
187
1669
def extras(self):
188
"""Yield all unknown files in this WorkingTree.
1670
"""Yield all unversioned files in this WorkingTree.
190
If there are any unknown directories then only the directory is
191
returned, not all its children. But if there are unknown files
1672
If there are any unversioned directories then only the directory is
1673
returned, not all its children. But if there are unversioned files
192
1674
under a versioned subdirectory, they are returned.
194
1676
Currently returned depth-first, sorted by name within directories.
1677
This is the same order used by 'osutils.walkdirs'.
196
1679
## TODO: Work from given directory downwards
197
from osutils import isdir, appendpath
199
1680
for path, dir_entry in self.inventory.directories():
200
mutter("search for unknowns in %r" % path)
1681
# mutter("search for unknowns in %r", path)
201
1682
dirabs = self.abspath(path)
202
1683
if not isdir(dirabs):
203
1684
# e.g. directory deleted
207
1688
for subf in os.listdir(dirabs):
209
and (subf not in dir_entry.children)):
1689
if self.bzrdir.is_control_filename(subf):
1691
if subf not in dir_entry.children:
1694
can_access) = osutils.normalized_filename(subf)
1695
except UnicodeDecodeError:
1696
path_os_enc = path.encode(osutils._fs_enc)
1697
relpath = path_os_enc + '/' + subf
1698
raise errors.BadFilenameEncoding(relpath,
1700
if subf_norm != subf and can_access:
1701
if subf_norm not in dir_entry.children:
1702
fl.append(subf_norm)
214
subp = appendpath(path, subf)
1708
subp = pathjoin(path, subf)
218
1711
def ignored_files(self):
219
1712
"""Yield list of PATH, IGNORE_PATTERN"""
220
1713
for subp in self.extras():
221
1714
pat = self.is_ignored(subp)
226
1718
def get_ignore_list(self):
227
1719
"""Return list of ignore patterns.
229
1721
Cached in the Tree object after the first call.
231
if hasattr(self, '_ignorelist'):
232
return self._ignorelist
1723
ignoreset = getattr(self, '_ignoreset', None)
1724
if ignoreset is not None:
234
l = bzrlib.DEFAULT_IGNORE[:]
1727
ignore_globs = set()
1728
ignore_globs.update(ignores.get_runtime_ignores())
1729
ignore_globs.update(ignores.get_user_ignores())
235
1730
if self.has_filename(bzrlib.IGNORE_FILENAME):
236
1731
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
237
l.extend([line.rstrip("\n\r") for line in f.readlines()])
1733
ignore_globs.update(ignores.parse_ignore_file(f))
1736
self._ignoreset = ignore_globs
1739
def _flush_ignore_list_cache(self):
1740
"""Resets the cached ignore list to force a cache rebuild."""
1741
self._ignoreset = None
1742
self._ignoreglobster = None
242
1744
def is_ignored(self, filename):
243
1745
r"""Check whether the filename matches an ignore pattern.
245
1747
Patterns containing '/' or '\' need to match the whole path;
246
others match against only the last component.
1748
others match against only the last component. Patterns starting
1749
with '!' are ignore exceptions. Exceptions take precedence
1750
over regular patterns and cause the filename to not be ignored.
248
1752
If the file is ignored, returns the pattern which caused it to
249
1753
be ignored, otherwise None. So this can simply be used as a
250
1754
boolean if desired."""
252
# TODO: Use '**' to match directories, and other extended
253
# globbing stuff from cvs/rsync.
255
# XXX: fnmatch is actually not quite what we want: it's only
256
# approximately the same as real Unix fnmatch, and doesn't
257
# treat dotfiles correctly and allows * to match /.
258
# Eventually it should be replaced with something more
262
from osutils import splitpath
264
for pat in self.get_ignore_list():
265
if '/' in pat or '\\' in pat:
267
# as a special case, you can put ./ at the start of a
268
# pattern; this is good to match in the top-level
271
if (pat[:2] == './') or (pat[:2] == '.\\'):
1755
if getattr(self, '_ignoreglobster', None) is None:
1756
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1757
return self._ignoreglobster.match(filename)
1759
def kind(self, file_id):
1760
return file_kind(self.id2abspath(file_id))
1762
def stored_kind(self, file_id):
1763
"""See Tree.stored_kind"""
1764
return self.inventory[file_id].kind
1766
def _comparison_data(self, entry, path):
1767
abspath = self.abspath(path)
1769
stat_value = os.lstat(abspath)
1771
if getattr(e, 'errno', None) == errno.ENOENT:
1778
mode = stat_value.st_mode
1779
kind = osutils.file_kind_from_stat_mode(mode)
1780
if not supports_executable():
1781
executable = entry is not None and entry.executable
1783
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1784
return kind, executable, stat_value
1786
def _file_size(self, entry, stat_value):
1787
return stat_value.st_size
1789
def last_revision(self):
1790
"""Return the last revision of the branch for this tree.
1792
This format tree does not support a separate marker for last-revision
1793
compared to the branch.
1795
See MutableTree.last_revision
1797
return self._last_revision()
1800
def _last_revision(self):
1801
"""helper for get_parent_ids."""
1802
return _mod_revision.ensure_null(self.branch.last_revision())
1804
def is_locked(self):
1805
return self._control_files.is_locked()
1807
def _must_be_locked(self):
1808
if not self.is_locked():
1809
raise errors.ObjectNotLocked(self)
1811
def lock_read(self):
1812
"""See Branch.lock_read, and WorkingTree.unlock."""
1813
if not self.is_locked():
1815
self.branch.lock_read()
1817
return self._control_files.lock_read()
1819
self.branch.unlock()
1822
def lock_tree_write(self):
1823
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
1824
if not self.is_locked():
1826
self.branch.lock_read()
1828
return self._control_files.lock_write()
1830
self.branch.unlock()
1833
def lock_write(self):
1834
"""See MutableTree.lock_write, and WorkingTree.unlock."""
1835
if not self.is_locked():
1837
self.branch.lock_write()
1839
return self._control_files.lock_write()
1841
self.branch.unlock()
1844
def get_physical_lock_status(self):
1845
return self._control_files.get_physical_lock_status()
1847
def _basis_inventory_name(self):
1848
return 'basis-inventory-cache'
1850
def _reset_data(self):
1851
"""Reset transient data that cannot be revalidated."""
1852
self._inventory_is_modified = False
1853
f = self._transport.get('inventory')
1855
result = self._deserialize(f)
1858
self._set_inventory(result, dirty=False)
1860
@needs_tree_write_lock
1861
def set_last_revision(self, new_revision):
1862
"""Change the last revision in the working tree."""
1863
if self._change_last_revision(new_revision):
1864
self._cache_basis_inventory(new_revision)
1866
def _change_last_revision(self, new_revision):
1867
"""Template method part of set_last_revision to perform the change.
1869
This is used to allow WorkingTree3 instances to not affect branch
1870
when their last revision is set.
1872
if _mod_revision.is_null(new_revision):
1873
self.branch.set_revision_history([])
1876
self.branch.generate_revision_history(new_revision)
1877
except errors.NoSuchRevision:
1878
# not present in the repo - dont try to set it deeper than the tip
1879
self.branch.set_revision_history([new_revision])
1882
def _write_basis_inventory(self, xml):
1883
"""Write the basis inventory XML to the basis-inventory file"""
1884
path = self._basis_inventory_name()
1886
self._transport.put_file(path, sio,
1887
mode=self.bzrdir._get_file_mode())
1889
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1890
"""Create the text that will be saved in basis-inventory"""
1891
inventory.revision_id = revision_id
1892
return xml7.serializer_v7.write_inventory_to_string(inventory)
1894
def _cache_basis_inventory(self, new_revision):
1895
"""Cache new_revision as the basis inventory."""
1896
# TODO: this should allow the ready-to-use inventory to be passed in,
1897
# as commit already has that ready-to-use [while the format is the
1900
# this double handles the inventory - unpack and repack -
1901
# but is easier to understand. We can/should put a conditional
1902
# in here based on whether the inventory is in the latest format
1903
# - perhaps we should repack all inventories on a repository
1905
# the fast path is to copy the raw xml from the repository. If the
1906
# xml contains 'revision_id="', then we assume the right
1907
# revision_id is set. We must check for this full string, because a
1908
# root node id can legitimately look like 'revision_id' but cannot
1910
xml = self.branch.repository._get_inventory_xml(new_revision)
1911
firstline = xml.split('\n', 1)[0]
1912
if (not 'revision_id="' in firstline or
1913
'format="7"' not in firstline):
1914
inv = self.branch.repository._serializer.read_inventory_from_string(
1916
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1917
self._write_basis_inventory(xml)
1918
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1921
def read_basis_inventory(self):
1922
"""Read the cached basis inventory."""
1923
path = self._basis_inventory_name()
1924
return self._transport.get_bytes(path)
1927
def read_working_inventory(self):
1928
"""Read the working inventory.
1930
:raises errors.InventoryModified: read_working_inventory will fail
1931
when the current in memory inventory has been modified.
1933
# conceptually this should be an implementation detail of the tree.
1934
# XXX: Deprecate this.
1935
# ElementTree does its own conversion from UTF-8, so open in
1937
if self._inventory_is_modified:
1938
raise errors.InventoryModified(self)
1939
f = self._transport.get('inventory')
1941
result = self._deserialize(f)
1944
self._set_inventory(result, dirty=False)
1947
@needs_tree_write_lock
1948
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1950
"""Remove nominated files from the working inventory.
1952
:files: File paths relative to the basedir.
1953
:keep_files: If true, the files will also be kept.
1954
:force: Delete files and directories, even if they are changed and
1955
even if the directories are not empty.
1957
if isinstance(files, basestring):
1963
unknown_nested_files=set()
1965
to_file = sys.stdout
1967
def recurse_directory_to_add_files(directory):
1968
# Recurse directory and add all files
1969
# so we can check if they have changed.
1970
for parent_info, file_infos in\
1971
self.walkdirs(directory):
1972
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1973
# Is it versioned or ignored?
1974
if self.path2id(relpath) or self.is_ignored(relpath):
1975
# Add nested content for deletion.
1976
new_files.add(relpath)
1978
# Files which are not versioned and not ignored
1979
# should be treated as unknown.
1980
unknown_nested_files.add((relpath, None, kind))
1982
for filename in files:
1983
# Get file name into canonical form.
1984
abspath = self.abspath(filename)
1985
filename = self.relpath(abspath)
1986
if len(filename) > 0:
1987
new_files.add(filename)
1988
recurse_directory_to_add_files(filename)
1990
files = list(new_files)
1993
return # nothing to do
1995
# Sort needed to first handle directory content before the directory
1996
files.sort(reverse=True)
1998
# Bail out if we are going to delete files we shouldn't
1999
if not keep_files and not force:
2000
has_changed_files = len(unknown_nested_files) > 0
2001
if not has_changed_files:
2002
for (file_id, path, content_change, versioned, parent_id, name,
2003
kind, executable) in self.iter_changes(self.basis_tree(),
2004
include_unchanged=True, require_versioned=False,
2005
want_unversioned=True, specific_files=files):
2006
if versioned == (False, False):
2007
# The record is unknown ...
2008
if not self.is_ignored(path[1]):
2009
# ... but not ignored
2010
has_changed_files = True
2012
elif content_change and (kind[1] is not None):
2013
# Versioned and changed, but not deleted
2014
has_changed_files = True
2017
if has_changed_files:
2018
# Make delta show ALL applicable changes in error message.
2019
tree_delta = self.changes_from(self.basis_tree(),
2020
require_versioned=False, want_unversioned=True,
2021
specific_files=files)
2022
for unknown_file in unknown_nested_files:
2023
if unknown_file not in tree_delta.unversioned:
2024
tree_delta.unversioned.extend((unknown_file,))
2025
raise errors.BzrRemoveChangedFilesError(tree_delta)
2027
# Build inv_delta and delete files where applicable,
2028
# do this before any modifications to inventory.
2030
fid = self.path2id(f)
2033
message = "%s is not versioned." % (f,)
2036
# having removed it, it must be either ignored or unknown
2037
if self.is_ignored(f):
2041
# XXX: Really should be a more abstract reporter interface
2042
kind_ch = osutils.kind_marker(self.kind(fid))
2043
to_file.write(new_status + ' ' + f + kind_ch + '\n')
2045
inv_delta.append((f, None, fid, None))
2046
message = "removed %s" % (f,)
2049
abs_path = self.abspath(f)
2050
if osutils.lexists(abs_path):
2051
if (osutils.isdir(abs_path) and
2052
len(os.listdir(abs_path)) > 0):
2054
osutils.rmtree(abs_path)
2056
message = "%s is not an empty directory "\
2057
"and won't be deleted." % (f,)
2059
osutils.delete_any(abs_path)
2060
message = "deleted %s" % (f,)
2061
elif message is not None:
2062
# Only care if we haven't done anything yet.
2063
message = "%s does not exist." % (f,)
2065
# Print only one message (if any) per file.
2066
if message is not None:
2068
self.apply_inventory_delta(inv_delta)
2070
@needs_tree_write_lock
2071
def revert(self, filenames=None, old_tree=None, backups=True,
2072
pb=DummyProgress(), report_changes=False):
2073
from bzrlib.conflicts import resolve
2076
symbol_versioning.warn('Using [] to revert all files is deprecated'
2077
' as of bzr 0.91. Please use None (the default) instead.',
2078
DeprecationWarning, stacklevel=2)
2079
if old_tree is None:
2080
basis_tree = self.basis_tree()
2081
basis_tree.lock_read()
2082
old_tree = basis_tree
2086
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2088
if filenames is None and len(self.get_parent_ids()) > 1:
2090
last_revision = self.last_revision()
2091
if last_revision != _mod_revision.NULL_REVISION:
2092
if basis_tree is None:
2093
basis_tree = self.basis_tree()
2094
basis_tree.lock_read()
2095
parent_trees.append((last_revision, basis_tree))
2096
self.set_parent_trees(parent_trees)
2099
resolve(self, filenames, ignore_misses=True, recursive=True)
2101
if basis_tree is not None:
2105
def revision_tree(self, revision_id):
2106
"""See Tree.revision_tree.
2108
WorkingTree can supply revision_trees for the basis revision only
2109
because there is only one cached inventory in the bzr directory.
2111
if revision_id == self.last_revision():
2113
xml = self.read_basis_inventory()
2114
except errors.NoSuchFile:
2118
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2119
# dont use the repository revision_tree api because we want
2120
# to supply the inventory.
2121
if inv.revision_id == revision_id:
2122
return revisiontree.RevisionTree(self.branch.repository,
2124
except errors.BadInventoryFormat:
2126
# raise if there was no inventory, or if we read the wrong inventory.
2127
raise errors.NoSuchRevisionInTree(self, revision_id)
2129
# XXX: This method should be deprecated in favour of taking in a proper
2130
# new Inventory object.
2131
@needs_tree_write_lock
2132
def set_inventory(self, new_inventory_list):
2133
from bzrlib.inventory import (Inventory,
2137
inv = Inventory(self.get_root_id())
2138
for path, file_id, parent, kind in new_inventory_list:
2139
name = os.path.basename(path)
2142
# fixme, there should be a factory function inv,add_??
2143
if kind == 'directory':
2144
inv.add(InventoryDirectory(file_id, name, parent))
2145
elif kind == 'file':
2146
inv.add(InventoryFile(file_id, name, parent))
2147
elif kind == 'symlink':
2148
inv.add(InventoryLink(file_id, name, parent))
2150
raise errors.BzrError("unknown kind %r" % kind)
2151
self._write_inventory(inv)
2153
@needs_tree_write_lock
2154
def set_root_id(self, file_id):
2155
"""Set the root id for this tree."""
2159
'WorkingTree.set_root_id with fileid=None')
2160
file_id = osutils.safe_file_id(file_id)
2161
self._set_root_id(file_id)
2163
def _set_root_id(self, file_id):
2164
"""Set the root id for this tree, in a format specific manner.
2166
:param file_id: The file id to assign to the root. It must not be
2167
present in the current inventory or an error will occur. It must
2168
not be None, but rather a valid file id.
2170
inv = self._inventory
2171
orig_root_id = inv.root.file_id
2172
# TODO: it might be nice to exit early if there was nothing
2173
# to do, saving us from trigger a sync on unlock.
2174
self._inventory_is_modified = True
2175
# we preserve the root inventory entry object, but
2176
# unlinkit from the byid index
2177
del inv._byid[inv.root.file_id]
2178
inv.root.file_id = file_id
2179
# and link it into the index with the new changed id.
2180
inv._byid[inv.root.file_id] = inv.root
2181
# and finally update all children to reference the new id.
2182
# XXX: this should be safe to just look at the root.children
2183
# list, not the WHOLE INVENTORY.
2186
if entry.parent_id == orig_root_id:
2187
entry.parent_id = inv.root.file_id
2190
"""See Branch.unlock.
2192
WorkingTree locking just uses the Branch locking facilities.
2193
This is current because all working trees have an embedded branch
2194
within them. IF in the future, we were to make branch data shareable
2195
between multiple working trees, i.e. via shared storage, then we
2196
would probably want to lock both the local tree, and the branch.
2198
raise NotImplementedError(self.unlock)
2202
def update(self, change_reporter=None, possible_transports=None,
2203
revision=None, old_tip=_marker):
2204
"""Update a working tree along its branch.
2206
This will update the branch if its bound too, which means we have
2207
multiple trees involved:
2209
- The new basis tree of the master.
2210
- The old basis tree of the branch.
2211
- The old basis tree of the working tree.
2212
- The current working tree state.
2214
Pathologically, all three may be different, and non-ancestors of each
2215
other. Conceptually we want to:
2217
- Preserve the wt.basis->wt.state changes
2218
- Transform the wt.basis to the new master basis.
2219
- Apply a merge of the old branch basis to get any 'local' changes from
2221
- Restore the wt.basis->wt.state changes.
2223
There isn't a single operation at the moment to do that, so we:
2224
- Merge current state -> basis tree of the master w.r.t. the old tree
2226
- Do a 'normal' merge of the old branch basis if it is relevant.
2228
:param revision: The target revision to update to. Must be in the
2230
:param old_tip: If branch.update() has already been run, the value it
2231
returned (old tip of the branch or None). _marker is used
2234
if self.branch.get_bound_location() is not None:
2236
update_branch = (old_tip is self._marker)
2238
self.lock_tree_write()
2239
update_branch = False
2242
old_tip = self.branch.update(possible_transports)
2244
if old_tip is self._marker:
2246
return self._update_tree(old_tip, change_reporter, revision)
2250
@needs_tree_write_lock
2251
def _update_tree(self, old_tip=None, change_reporter=None, revision=None):
2252
"""Update a tree to the master branch.
2254
:param old_tip: if supplied, the previous tip revision the branch,
2255
before it was changed to the master branch's tip.
2257
# here if old_tip is not None, it is the old tip of the branch before
2258
# it was updated from the master branch. This should become a pending
2259
# merge in the working tree to preserve the user existing work. we
2260
# cant set that until we update the working trees last revision to be
2261
# one from the new branch, because it will just get absorbed by the
2262
# parent de-duplication logic.
2264
# We MUST save it even if an error occurs, because otherwise the users
2265
# local work is unreferenced and will appear to have been lost.
2269
last_rev = self.get_parent_ids()[0]
2271
last_rev = _mod_revision.NULL_REVISION
2272
if revision is None:
2273
revision = self.branch.last_revision()
2275
if revision not in self.branch.revision_history():
2276
raise errors.NoSuchRevision(self.branch, revision)
2277
if last_rev != _mod_revision.ensure_null(revision):
2278
# merge tree state up to specified revision.
2279
basis = self.basis_tree()
2282
to_tree = self.branch.repository.revision_tree(revision)
2283
to_root_id = to_tree.get_root_id()
2284
if (basis.inventory.root is None
2285
or basis.inventory.root.file_id != to_root_id):
2286
self.set_root_id(to_root_id)
2288
result += merge.merge_inner(
2293
change_reporter=change_reporter)
2294
self.set_last_revision(revision)
2297
# TODO - dedup parents list with things merged by pull ?
2298
# reuse the tree we've updated to to set the basis:
2299
parent_trees = [(revision, to_tree)]
2300
merges = self.get_parent_ids()[1:]
2301
# Ideally we ask the tree for the trees here, that way the working
2302
# tree can decide whether to give us the entire tree or give us a
2303
# lazy initialised tree. dirstate for instance will have the trees
2304
# in ram already, whereas a last-revision + basis-inventory tree
2305
# will not, but also does not need them when setting parents.
2306
for parent in merges:
2307
parent_trees.append(
2308
(parent, self.branch.repository.revision_tree(parent)))
2309
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2310
parent_trees.append(
2311
(old_tip, self.branch.repository.revision_tree(old_tip)))
2312
self.set_parent_trees(parent_trees)
2313
last_rev = parent_trees[0][0]
2315
# the working tree had the same last-revision as the master
2316
# branch did. We may still have pivot local work from the local
2317
# branch into old_tip:
2318
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2319
self.add_parent_tree_id(old_tip)
2320
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2321
and old_tip != last_rev):
2322
# our last revision was not the prior branch last revision
2323
# and we have converted that last revision to a pending merge.
2324
# base is somewhere between the branch tip now
2325
# and the now pending merge
2327
# Since we just modified the working tree and inventory, flush out
2328
# the current state, before we modify it again.
2329
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2330
# requires it only because TreeTransform directly munges the
2331
# inventory and calls tree._write_inventory(). Ultimately we
2332
# should be able to remove this extra flush.
2334
graph = self.branch.repository.get_graph()
2335
base_rev_id = graph.find_unique_lca(revision, old_tip)
2336
base_tree = self.branch.repository.revision_tree(base_rev_id)
2337
other_tree = self.branch.repository.revision_tree(old_tip)
2338
result += merge.merge_inner(
2343
change_reporter=change_reporter)
2346
def _write_hashcache_if_dirty(self):
2347
"""Write out the hashcache if it is dirty."""
2348
if self._hashcache.needs_write:
2350
self._hashcache.write()
2352
if e.errno not in (errno.EPERM, errno.EACCES):
2354
# TODO: jam 20061219 Should this be a warning? A single line
2355
# warning might be sufficient to let the user know what
2357
mutter('Could not write hashcache for %s\nError: %s',
2358
self._hashcache.cache_file_name(), e)
2360
@needs_tree_write_lock
2361
def _write_inventory(self, inv):
2362
"""Write inventory as the current inventory."""
2363
self._set_inventory(inv, dirty=True)
2366
def set_conflicts(self, arg):
2367
raise errors.UnsupportedOperation(self.set_conflicts, self)
2369
def add_conflicts(self, arg):
2370
raise errors.UnsupportedOperation(self.add_conflicts, self)
2373
def conflicts(self):
2374
conflicts = _mod_conflicts.ConflictList()
2375
for conflicted in self._iter_conflicts():
2378
if file_kind(self.abspath(conflicted)) != "file":
2380
except errors.NoSuchFile:
2383
for suffix in ('.THIS', '.OTHER'):
2385
kind = file_kind(self.abspath(conflicted+suffix))
2388
except errors.NoSuchFile:
2392
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2393
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2395
file_id=self.path2id(conflicted)))
2398
def walkdirs(self, prefix=""):
2399
"""Walk the directories of this tree.
2401
returns a generator which yields items in the form:
2402
((curren_directory_path, fileid),
2403
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2406
This API returns a generator, which is only valid during the current
2407
tree transaction - within a single lock_read or lock_write duration.
2409
If the tree is not locked, it may cause an error to be raised,
2410
depending on the tree implementation.
2412
disk_top = self.abspath(prefix)
2413
if disk_top.endswith('/'):
2414
disk_top = disk_top[:-1]
2415
top_strip_len = len(disk_top) + 1
2416
inventory_iterator = self._walkdirs(prefix)
2417
disk_iterator = osutils.walkdirs(disk_top, prefix)
2419
current_disk = disk_iterator.next()
2420
disk_finished = False
2422
if not (e.errno == errno.ENOENT or
2423
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2426
disk_finished = True
2428
current_inv = inventory_iterator.next()
2429
inv_finished = False
2430
except StopIteration:
2433
while not inv_finished or not disk_finished:
2435
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2436
cur_disk_dir_content) = current_disk
2438
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2439
cur_disk_dir_content) = ((None, None), None)
2440
if not disk_finished:
2441
# strip out .bzr dirs
2442
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2443
len(cur_disk_dir_content) > 0):
2444
# osutils.walkdirs can be made nicer -
2445
# yield the path-from-prefix rather than the pathjoined
2447
bzrdir_loc = bisect_left(cur_disk_dir_content,
2449
if (bzrdir_loc < len(cur_disk_dir_content)
2450
and self.bzrdir.is_control_filename(
2451
cur_disk_dir_content[bzrdir_loc][0])):
2452
# we dont yield the contents of, or, .bzr itself.
2453
del cur_disk_dir_content[bzrdir_loc]
2455
# everything is unknown
2458
# everything is missing
2461
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2463
# disk is before inventory - unknown
2464
dirblock = [(relpath, basename, kind, stat, None, None) for
2465
relpath, basename, kind, stat, top_path in
2466
cur_disk_dir_content]
2467
yield (cur_disk_dir_relpath, None), dirblock
2469
current_disk = disk_iterator.next()
2470
except StopIteration:
2471
disk_finished = True
2473
# inventory is before disk - missing.
2474
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2475
for relpath, basename, dkind, stat, fileid, kind in
2477
yield (current_inv[0][0], current_inv[0][1]), dirblock
2479
current_inv = inventory_iterator.next()
2480
except StopIteration:
2483
# versioned present directory
2484
# merge the inventory and disk data together
2486
for relpath, subiterator in itertools.groupby(sorted(
2487
current_inv[1] + cur_disk_dir_content,
2488
key=operator.itemgetter(0)), operator.itemgetter(1)):
2489
path_elements = list(subiterator)
2490
if len(path_elements) == 2:
2491
inv_row, disk_row = path_elements
2492
# versioned, present file
2493
dirblock.append((inv_row[0],
2494
inv_row[1], disk_row[2],
2495
disk_row[3], inv_row[4],
2497
elif len(path_elements[0]) == 5:
2499
dirblock.append((path_elements[0][0],
2500
path_elements[0][1], path_elements[0][2],
2501
path_elements[0][3], None, None))
2502
elif len(path_elements[0]) == 6:
2503
# versioned, absent file.
2504
dirblock.append((path_elements[0][0],
2505
path_elements[0][1], 'unknown', None,
2506
path_elements[0][4], path_elements[0][5]))
2508
raise NotImplementedError('unreachable code')
2509
yield current_inv[0], dirblock
2511
current_inv = inventory_iterator.next()
2512
except StopIteration:
2515
current_disk = disk_iterator.next()
2516
except StopIteration:
2517
disk_finished = True
2519
def _walkdirs(self, prefix=""):
2520
"""Walk the directories of this tree.
2522
:prefix: is used as the directrory to start with.
2523
returns a generator which yields items in the form:
2524
((curren_directory_path, fileid),
2525
[(file1_path, file1_name, file1_kind, None, file1_id,
2528
_directory = 'directory'
2529
# get the root in the inventory
2530
inv = self.inventory
2531
top_id = inv.path2id(prefix)
2535
pending = [(prefix, '', _directory, None, top_id, None)]
2538
currentdir = pending.pop()
2539
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2540
top_id = currentdir[4]
2542
relroot = currentdir[0] + '/'
2545
# FIXME: stash the node in pending
2547
if entry.kind == 'directory':
2548
for name, child in entry.sorted_children():
2549
dirblock.append((relroot + name, name, child.kind, None,
2550
child.file_id, child.kind
2552
yield (currentdir[0], entry.file_id), dirblock
2553
# push the user specified dirs from dirblock
2554
for dir in reversed(dirblock):
2555
if dir[2] == _directory:
2558
@needs_tree_write_lock
2559
def auto_resolve(self):
2560
"""Automatically resolve text conflicts according to contents.
2562
Only text conflicts are auto_resolvable. Files with no conflict markers
2563
are considered 'resolved', because bzr always puts conflict markers
2564
into files that have text conflicts. The corresponding .THIS .BASE and
2565
.OTHER files are deleted, as per 'resolve'.
2566
:return: a tuple of ConflictLists: (un_resolved, resolved).
2568
un_resolved = _mod_conflicts.ConflictList()
2569
resolved = _mod_conflicts.ConflictList()
2570
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2571
for conflict in self.conflicts():
2572
if (conflict.typestring != 'text conflict' or
2573
self.kind(conflict.file_id) != 'file'):
2574
un_resolved.append(conflict)
2576
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2578
for line in my_file:
2579
if conflict_re.search(line):
2580
un_resolved.append(conflict)
275
if fnmatch.fnmatchcase(filename, newpat):
2583
resolved.append(conflict)
2586
resolved.remove_files(self)
2587
self.set_conflicts(un_resolved)
2588
return un_resolved, resolved
2591
def _check(self, references):
2592
"""Check the tree for consistency.
2594
:param references: A dict with keys matching the items returned by
2595
self._get_check_refs(), and values from looking those keys up in
2598
tree_basis = self.basis_tree()
2599
tree_basis.lock_read()
2601
repo_basis = references[('trees', self.last_revision())]
2602
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2603
raise errors.BzrCheckError(
2604
"Mismatched basis inventory content.")
2609
def _validate(self):
2610
"""Validate internal structures.
2612
This is meant mostly for the test suite. To give it a chance to detect
2613
corruption after actions have occurred. The default implementation is a
2616
:return: None. An exception should be raised if there is an error.
2620
def _get_rules_searcher(self, default_searcher):
2621
"""See Tree._get_rules_searcher."""
2622
if self._rules_searcher is None:
2623
self._rules_searcher = super(WorkingTree,
2624
self)._get_rules_searcher(default_searcher)
2625
return self._rules_searcher
2627
def get_shelf_manager(self):
2628
"""Return the ShelfManager for this WorkingTree."""
2629
from bzrlib.shelf import ShelfManager
2630
return ShelfManager(self, self._transport)
2633
class WorkingTree2(WorkingTree):
2634
"""This is the Format 2 working tree.
2636
This was the first weave based working tree.
2637
- uses os locks for locking.
2638
- uses the branch last-revision.
2641
def __init__(self, *args, **kwargs):
2642
super(WorkingTree2, self).__init__(*args, **kwargs)
2643
# WorkingTree2 has more of a constraint that self._inventory must
2644
# exist. Because this is an older format, we don't mind the overhead
2645
# caused by the extra computation here.
2647
# Newer WorkingTree's should only have self._inventory set when they
2649
if self._inventory is None:
2650
self.read_working_inventory()
2652
def _get_check_refs(self):
2653
"""Return the references needed to perform a check of this tree."""
2654
return [('trees', self.last_revision())]
2656
def lock_tree_write(self):
2657
"""See WorkingTree.lock_tree_write().
2659
In Format2 WorkingTrees we have a single lock for the branch and tree
2660
so lock_tree_write() degrades to lock_write().
2662
self.branch.lock_write()
2664
return self._control_files.lock_write()
2666
self.branch.unlock()
2670
# do non-implementation specific cleanup
2673
# we share control files:
2674
if self._control_files._lock_count == 3:
2675
# _inventory_is_modified is always False during a read lock.
2676
if self._inventory_is_modified:
2678
self._write_hashcache_if_dirty()
2680
# reverse order of locking.
2682
return self._control_files.unlock()
2684
self.branch.unlock()
2687
class WorkingTree3(WorkingTree):
2688
"""This is the Format 3 working tree.
2690
This differs from the base WorkingTree by:
2691
- having its own file lock
2692
- having its own last-revision property.
2694
This is new in bzr 0.8
2698
def _last_revision(self):
2699
"""See Mutable.last_revision."""
2701
return self._transport.get_bytes('last-revision')
2702
except errors.NoSuchFile:
2703
return _mod_revision.NULL_REVISION
2705
def _change_last_revision(self, revision_id):
2706
"""See WorkingTree._change_last_revision."""
2707
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
2709
self._transport.delete('last-revision')
2710
except errors.NoSuchFile:
2714
self._transport.put_bytes('last-revision', revision_id,
2715
mode=self.bzrdir._get_file_mode())
2718
def _get_check_refs(self):
2719
"""Return the references needed to perform a check of this tree."""
2720
return [('trees', self.last_revision())]
2722
@needs_tree_write_lock
2723
def set_conflicts(self, conflicts):
2724
self._put_rio('conflicts', conflicts.to_stanzas(),
2727
@needs_tree_write_lock
2728
def add_conflicts(self, new_conflicts):
2729
conflict_set = set(self.conflicts())
2730
conflict_set.update(set(list(new_conflicts)))
2731
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2732
key=_mod_conflicts.Conflict.sort_key)))
2735
def conflicts(self):
2737
confile = self._transport.get('conflicts')
2738
except errors.NoSuchFile:
2739
return _mod_conflicts.ConflictList()
2742
if confile.next() != CONFLICT_HEADER_1 + '\n':
2743
raise errors.ConflictFormatError()
2744
except StopIteration:
2745
raise errors.ConflictFormatError()
2746
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2751
# do non-implementation specific cleanup
2753
if self._control_files._lock_count == 1:
2754
# _inventory_is_modified is always False during a read lock.
2755
if self._inventory_is_modified:
2757
self._write_hashcache_if_dirty()
2758
# reverse order of locking.
2760
return self._control_files.unlock()
2762
self.branch.unlock()
2765
def get_conflicted_stem(path):
2766
for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
2767
if path.endswith(suffix):
2768
return path[:-len(suffix)]
2771
class WorkingTreeFormat(object):
2772
"""An encapsulation of the initialization and open routines for a format.
2774
Formats provide three things:
2775
* An initialization routine,
2779
Formats are placed in an dict by their format string for reference
2780
during workingtree opening. Its not required that these be instances, they
2781
can be classes themselves with class methods - it simply depends on
2782
whether state is needed for a given format or not.
2784
Once a format is deprecated, just deprecate the initialize and open
2785
methods on the format class. Do not deprecate the object, as the
2786
object will be created every time regardless.
2789
_default_format = None
2790
"""The default format used for new trees."""
2793
"""The known formats."""
2795
requires_rich_root = False
2797
upgrade_recommended = False
2800
def find_format(klass, a_bzrdir):
2801
"""Return the format for the working tree object in a_bzrdir."""
2803
transport = a_bzrdir.get_workingtree_transport(None)
2804
format_string = transport.get_bytes("format")
2805
return klass._formats[format_string]
2806
except errors.NoSuchFile:
2807
raise errors.NoWorkingTree(base=transport.base)
2809
raise errors.UnknownFormatError(format=format_string,
2810
kind="working tree")
2812
def __eq__(self, other):
2813
return self.__class__ is other.__class__
2815
def __ne__(self, other):
2816
return not (self == other)
2819
def get_default_format(klass):
2820
"""Return the current default format."""
2821
return klass._default_format
2823
def get_format_string(self):
2824
"""Return the ASCII format string that identifies this format."""
2825
raise NotImplementedError(self.get_format_string)
2827
def get_format_description(self):
2828
"""Return the short description for this format."""
2829
raise NotImplementedError(self.get_format_description)
2831
def is_supported(self):
2832
"""Is this format supported?
2834
Supported formats can be initialized and opened.
2835
Unsupported formats may not support initialization or committing or
2836
some other features depending on the reason for not being supported.
2840
def supports_content_filtering(self):
2841
"""True if this format supports content filtering."""
2844
def supports_views(self):
2845
"""True if this format supports stored views."""
2849
def register_format(klass, format):
2850
klass._formats[format.get_format_string()] = format
2853
def set_default_format(klass, format):
2854
klass._default_format = format
2857
def unregister_format(klass, format):
2858
del klass._formats[format.get_format_string()]
2861
class WorkingTreeFormat2(WorkingTreeFormat):
2862
"""The second working tree format.
2864
This format modified the hash cache from the format 1 hash cache.
2867
upgrade_recommended = True
2869
def get_format_description(self):
2870
"""See WorkingTreeFormat.get_format_description()."""
2871
return "Working tree format 2"
2873
def _stub_initialize_on_transport(self, transport, file_mode):
2874
"""Workaround: create control files for a remote working tree.
2876
This ensures that it can later be updated and dealt with locally,
2877
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2878
no working tree. (See bug #43064).
2881
inv = inventory.Inventory()
2882
xml5.serializer_v5.write_inventory(inv, sio, working=True)
2884
transport.put_file('inventory', sio, file_mode)
2885
transport.put_bytes('pending-merges', '', file_mode)
2887
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2888
accelerator_tree=None, hardlink=False):
2889
"""See WorkingTreeFormat.initialize()."""
2890
if not isinstance(a_bzrdir.transport, LocalTransport):
2891
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2892
if from_branch is not None:
2893
branch = from_branch
2895
branch = a_bzrdir.open_branch()
2896
if revision_id is None:
2897
revision_id = _mod_revision.ensure_null(branch.last_revision())
2900
branch.generate_revision_history(revision_id)
2903
inv = inventory.Inventory()
2904
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2910
basis_tree = branch.repository.revision_tree(revision_id)
2911
if basis_tree.inventory.root is not None:
2912
wt.set_root_id(basis_tree.get_root_id())
2913
# set the parent list and cache the basis tree.
2914
if _mod_revision.is_null(revision_id):
2917
parent_trees = [(revision_id, basis_tree)]
2918
wt.set_parent_trees(parent_trees)
2919
transform.build_tree(basis_tree, wt)
2923
super(WorkingTreeFormat2, self).__init__()
2924
self._matchingbzrdir = bzrdir.BzrDirFormat6()
2926
def open(self, a_bzrdir, _found=False):
2927
"""Return the WorkingTree object for a_bzrdir
2929
_found is a private parameter, do not use it. It is used to indicate
2930
if format probing has already been done.
2933
# we are being called directly and must probe.
2934
raise NotImplementedError
2935
if not isinstance(a_bzrdir.transport, LocalTransport):
2936
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2937
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2943
class WorkingTreeFormat3(WorkingTreeFormat):
2944
"""The second working tree format updated to record a format marker.
2947
- exists within a metadir controlling .bzr
2948
- includes an explicit version marker for the workingtree control
2949
files, separate from the BzrDir format
2950
- modifies the hash cache format
2952
- uses a LockDir to guard access for writes.
2955
upgrade_recommended = True
2957
def get_format_string(self):
2958
"""See WorkingTreeFormat.get_format_string()."""
2959
return "Bazaar-NG Working Tree format 3"
2961
def get_format_description(self):
2962
"""See WorkingTreeFormat.get_format_description()."""
2963
return "Working tree format 3"
2965
_lock_file_name = 'lock'
2966
_lock_class = LockDir
2968
_tree_class = WorkingTree3
2970
def __get_matchingbzrdir(self):
2971
return bzrdir.BzrDirMetaFormat1()
2973
_matchingbzrdir = property(__get_matchingbzrdir)
2975
def _open_control_files(self, a_bzrdir):
2976
transport = a_bzrdir.get_workingtree_transport(None)
2977
return LockableFiles(transport, self._lock_file_name,
2980
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2981
accelerator_tree=None, hardlink=False):
2982
"""See WorkingTreeFormat.initialize().
2984
:param revision_id: if supplied, create a working tree at a different
2985
revision than the branch is at.
2986
:param accelerator_tree: A tree which can be used for retrieving file
2987
contents more quickly than the revision tree, i.e. a workingtree.
2988
The revision tree will be used for cases where accelerator_tree's
2989
content is different.
2990
:param hardlink: If true, hard-link files from accelerator_tree,
2993
if not isinstance(a_bzrdir.transport, LocalTransport):
2994
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2995
transport = a_bzrdir.get_workingtree_transport(self)
2996
control_files = self._open_control_files(a_bzrdir)
2997
control_files.create_lock()
2998
control_files.lock_write()
2999
transport.put_bytes('format', self.get_format_string(),
3000
mode=a_bzrdir._get_file_mode())
3001
if from_branch is not None:
3002
branch = from_branch
3004
branch = a_bzrdir.open_branch()
3005
if revision_id is None:
3006
revision_id = _mod_revision.ensure_null(branch.last_revision())
3007
# WorkingTree3 can handle an inventory which has a unique root id.
3008
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
3009
# those trees. And because there isn't a format bump inbetween, we
3010
# are maintaining compatibility with older clients.
3011
# inv = Inventory(root_id=gen_root_id())
3012
inv = self._initial_inventory()
3013
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3019
_control_files=control_files)
3020
wt.lock_tree_write()
3022
basis_tree = branch.repository.revision_tree(revision_id)
3023
# only set an explicit root id if there is one to set.
3024
if basis_tree.inventory.root is not None:
3025
wt.set_root_id(basis_tree.get_root_id())
3026
if revision_id == _mod_revision.NULL_REVISION:
3027
wt.set_parent_trees([])
278
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
3029
wt.set_parent_trees([(revision_id, basis_tree)])
3030
transform.build_tree(basis_tree, wt)
3032
# Unlock in this order so that the unlock-triggers-flush in
3033
# WorkingTree is given a chance to fire.
3034
control_files.unlock()
3038
def _initial_inventory(self):
3039
return inventory.Inventory()
3042
super(WorkingTreeFormat3, self).__init__()
3044
def open(self, a_bzrdir, _found=False):
3045
"""Return the WorkingTree object for a_bzrdir
3047
_found is a private parameter, do not use it. It is used to indicate
3048
if format probing has already been done.
3051
# we are being called directly and must probe.
3052
raise NotImplementedError
3053
if not isinstance(a_bzrdir.transport, LocalTransport):
3054
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3055
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
3058
def _open(self, a_bzrdir, control_files):
3059
"""Open the tree itself.
3061
:param a_bzrdir: the dir for the tree.
3062
:param control_files: the control files for the tree.
3064
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3068
_control_files=control_files)
3071
return self.get_format_string()
3074
__default_format = WorkingTreeFormat6()
3075
WorkingTreeFormat.register_format(__default_format)
3076
WorkingTreeFormat.register_format(WorkingTreeFormat5())
3077
WorkingTreeFormat.register_format(WorkingTreeFormat4())
3078
WorkingTreeFormat.register_format(WorkingTreeFormat3())
3079
WorkingTreeFormat.set_default_format(__default_format)
3080
# formats which have no format string are not discoverable
3081
# and not independently creatable, so are not registered.
3082
_legacy_formats = [WorkingTreeFormat2(),