357
46
and the working file exists.
359
48
inv = self._inventory
360
for path, ie in inv.iter_entries():
361
if osutils.lexists(self.abspath(path)):
49
for file_id in self._inventory:
50
# TODO: This is slightly redundant; we should be able to just
51
# check the statcache but it only includes regular files.
52
# only include files which still exist on disk
55
if ((file_id in self._statcache)
56
or (os.path.exists(self.abspath(inv.id2path(file_id))))):
364
61
def __repr__(self):
365
62
return "<%s of %s>" % (self.__class__.__name__,
366
getattr(self, 'basedir', None))
368
65
def abspath(self, filename):
369
return pathjoin(self.basedir, filename)
371
def basis_tree(self):
372
"""Return RevisionTree for the current last revision.
374
If the left most parent is a ghost then the returned tree will be an
375
empty tree - one obtained by calling repository.revision_tree(None).
378
revision_id = self.get_parent_ids()[0]
380
# no parents, return an empty revision tree.
381
# in the future this should return the tree for
382
# 'empty:' - the implicit root empty tree.
383
return self.branch.repository.revision_tree(None)
385
return self.revision_tree(revision_id)
386
except errors.NoSuchRevision:
388
# No cached copy available, retrieve from the repository.
389
# FIXME? RBC 20060403 should we cache the inventory locally
392
return self.branch.repository.revision_tree(revision_id)
393
except errors.RevisionNotPresent:
394
# the basis tree *may* be a ghost or a low level error may have
395
# occured. If the revision is present, its a problem, if its not
397
if self.branch.repository.has_revision(revision_id):
399
# the basis tree is a ghost so return an empty tree.
400
return self.branch.repository.revision_tree(None)
403
@deprecated_method(zero_eight)
404
def create(branch, directory):
405
"""Create a workingtree for branch at directory.
407
If existing_directory already exists it must have a .bzr directory.
408
If it does not exist, it will be created.
410
This returns a new WorkingTree object for the new checkout.
412
TODO FIXME RBC 20060124 when we have checkout formats in place this
413
should accept an optional revisionid to checkout [and reject this if
414
checking out into the same dir as a pre-checkout-aware branch format.]
416
XXX: When BzrDir is present, these should be created through that
419
warnings.warn('delete WorkingTree.create', stacklevel=3)
420
transport = get_transport(directory)
421
if branch.bzrdir.root_transport.base == transport.base:
423
return branch.bzrdir.create_workingtree()
424
# different directory,
425
# create a branch reference
426
# and now a working tree.
427
raise NotImplementedError
430
@deprecated_method(zero_eight)
431
def create_standalone(directory):
432
"""Create a checkout and a branch and a repo at directory.
434
Directory must exist and be empty.
436
please use BzrDir.create_standalone_workingtree
438
return bzrdir.BzrDir.create_standalone_workingtree(directory)
440
def relpath(self, path):
441
"""Return the local path portion from a given path.
443
The path may be absolute or relative. If its a relative path it is
444
interpreted relative to the python current working directory.
446
return osutils.relpath(self.basedir, path)
66
return os.path.join(self.basedir, filename)
448
68
def has_filename(self, filename):
449
return osutils.lexists(self.abspath(filename))
69
return os.path.exists(self.abspath(filename))
451
71
def get_file(self, file_id):
452
file_id = osutils.safe_file_id(file_id)
453
72
return self.get_file_byname(self.id2path(file_id))
455
def get_file_text(self, file_id):
456
file_id = osutils.safe_file_id(file_id)
457
return self.get_file(file_id).read()
459
74
def get_file_byname(self, filename):
460
75
return file(self.abspath(filename), 'rb')
463
def annotate_iter(self, file_id):
464
"""See Tree.annotate_iter
466
This implementation will use the basis tree implementation if possible.
467
Lines not in the basis are attributed to CURRENT_REVISION
469
If there are pending merges, lines added by those merges will be
470
incorrectly attributed to CURRENT_REVISION (but after committing, the
471
attribution will be correct).
473
file_id = osutils.safe_file_id(file_id)
474
basis = self.basis_tree()
477
changes = self._iter_changes(basis, True, [self.id2path(file_id)],
478
require_versioned=True).next()
479
changed_content, kind = changes[2], changes[6]
480
if not changed_content:
481
return basis.annotate_iter(file_id)
485
if kind[0] != 'file':
488
old_lines = list(basis.annotate_iter(file_id))
490
for tree in self.branch.repository.revision_trees(
491
self.get_parent_ids()[1:]):
492
if file_id not in tree:
494
old.append(list(tree.annotate_iter(file_id)))
495
return annotate.reannotate(old, self.get_file(file_id).readlines(),
500
def get_parent_ids(self):
501
"""See Tree.get_parent_ids.
503
This implementation reads the pending merges list and last_revision
504
value and uses that to decide what the parents list should be.
506
last_rev = self._last_revision()
512
merges_file = self._control_files.get('pending-merges')
513
except errors.NoSuchFile:
516
for l in merges_file.readlines():
517
revision_id = osutils.safe_revision_id(l.rstrip('\n'))
518
parents.append(revision_id)
522
def get_root_id(self):
523
"""Return the id of this trees root"""
524
return self._inventory.root.file_id
526
77
def _get_store_filename(self, file_id):
527
## XXX: badly named; this is not in the store at all
528
file_id = osutils.safe_file_id(file_id)
529
return self.abspath(self.id2path(file_id))
532
def clone(self, to_bzrdir, revision_id=None):
533
"""Duplicate this working tree into to_bzr, including all state.
535
Specifically modified files are kept as modified, but
536
ignored and unknown files are discarded.
538
If you want to make a new line of development, see bzrdir.sprout()
541
If not None, the cloned tree will have its last revision set to
542
revision, and and difference between the source trees last revision
543
and this one merged in.
545
# assumes the target bzr dir format is compatible.
546
result = self._format.initialize(to_bzrdir)
547
self.copy_content_into(result, revision_id)
551
def copy_content_into(self, tree, revision_id=None):
552
"""Copy the current content and user files of this tree into tree."""
553
tree.set_root_id(self.get_root_id())
554
if revision_id is None:
555
merge.transform_tree(tree, self)
557
# TODO now merge from tree.last_revision to revision (to preserve
558
# user local changes)
559
merge.transform_tree(tree, self)
560
tree.set_parent_ids([revision_id])
562
def id2abspath(self, file_id):
563
file_id = osutils.safe_file_id(file_id)
564
return self.abspath(self.id2path(file_id))
78
## XXX: badly named; this isn't in the store at all
79
return self.abspath(self.id2path(file_id))
566
82
def has_id(self, file_id):
567
83
# files that have been deleted are excluded
568
file_id = osutils.safe_file_id(file_id)
570
if not inv.has_id(file_id):
84
if not self.inventory.has_id(file_id):
572
path = inv.id2path(file_id)
573
return osutils.lexists(self.abspath(path))
575
def has_or_had_id(self, file_id):
576
file_id = osutils.safe_file_id(file_id)
577
if file_id == self.inventory.root.file_id:
86
if file_id in self._statcache:
579
return self.inventory.has_id(file_id)
88
return os.path.exists(self.abspath(self.id2path(file_id)))
581
91
__contains__ = has_id
94
def _update_statcache(self):
96
if not self._statcache:
97
self._statcache = statcache.update_cache(self.basedir, self.inventory)
583
99
def get_file_size(self, file_id):
584
file_id = osutils.safe_file_id(file_id)
585
return os.path.getsize(self.id2abspath(file_id))
588
def get_file_sha1(self, file_id, path=None, stat_value=None):
589
file_id = osutils.safe_file_id(file_id)
591
path = self._inventory.id2path(file_id)
592
return self._hashcache.get_sha1(path, stat_value)
594
def get_file_mtime(self, file_id, path=None):
595
file_id = osutils.safe_file_id(file_id)
597
path = self.inventory.id2path(file_id)
598
return os.lstat(self.abspath(path)).st_mtime
600
if not supports_executable():
601
def is_executable(self, file_id, path=None):
602
file_id = osutils.safe_file_id(file_id)
603
return self._inventory[file_id].executable
605
def is_executable(self, file_id, path=None):
607
file_id = osutils.safe_file_id(file_id)
608
path = self.id2path(file_id)
609
mode = os.lstat(self.abspath(path)).st_mode
610
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
612
@needs_tree_write_lock
613
def _add(self, files, ids, kinds):
614
"""See MutableTree._add."""
615
# TODO: Re-adding a file that is removed in the working copy
616
# should probably put it back with the previous ID.
617
# the read and write working inventory should not occur in this
618
# function - they should be part of lock_write and unlock.
619
inv = self.read_working_inventory()
620
for f, file_id, kind in zip(files, ids, kinds):
621
assert kind is not None
623
inv.add_path(f, kind=kind)
625
file_id = osutils.safe_file_id(file_id)
626
inv.add_path(f, kind=kind, file_id=file_id)
627
self._write_inventory(inv)
629
@needs_tree_write_lock
630
def _gather_kinds(self, files, kinds):
631
"""See MutableTree._gather_kinds."""
632
for pos, f in enumerate(files):
633
if kinds[pos] is None:
634
fullpath = normpath(self.abspath(f))
636
kinds[pos] = file_kind(fullpath)
638
if e.errno == errno.ENOENT:
639
raise errors.NoSuchFile(fullpath)
642
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
643
"""Add revision_id as a parent.
645
This is equivalent to retrieving the current list of parent ids
646
and setting the list to its value plus revision_id.
648
:param revision_id: The revision id to add to the parent list. It may
649
be a ghost revision as long as its not the first parent to be added,
650
or the allow_leftmost_as_ghost parameter is set True.
651
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
653
parents = self.get_parent_ids() + [revision_id]
654
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
655
or allow_leftmost_as_ghost)
657
@needs_tree_write_lock
658
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
659
"""Add revision_id, tree tuple as a parent.
661
This is equivalent to retrieving the current list of parent trees
662
and setting the list to its value plus parent_tuple. See also
663
add_parent_tree_id - if you only have a parent id available it will be
664
simpler to use that api. If you have the parent already available, using
665
this api is preferred.
667
:param parent_tuple: The (revision id, tree) to add to the parent list.
668
If the revision_id is a ghost, pass None for the tree.
669
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
671
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
672
if len(parent_ids) > 1:
673
# the leftmost may have already been a ghost, preserve that if it
675
allow_leftmost_as_ghost = True
676
self.set_parent_ids(parent_ids,
677
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
679
@needs_tree_write_lock
680
def add_pending_merge(self, *revision_ids):
681
# TODO: Perhaps should check at this point that the
682
# history of the revision is actually present?
683
parents = self.get_parent_ids()
685
for rev_id in revision_ids:
686
if rev_id in parents:
688
parents.append(rev_id)
691
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
693
@deprecated_method(zero_eleven)
695
def pending_merges(self):
696
"""Return a list of pending merges.
698
These are revisions that have been merged into the working
699
directory but not yet committed.
701
As of 0.11 this is deprecated. Please see WorkingTree.get_parent_ids()
702
instead - which is available on all tree objects.
704
return self.get_parent_ids()[1:]
706
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
707
"""Common ghost checking functionality from set_parent_*.
709
This checks that the left hand-parent exists if there are any
712
if len(revision_ids) > 0:
713
leftmost_id = revision_ids[0]
714
if (not allow_leftmost_as_ghost and not
715
self.branch.repository.has_revision(leftmost_id)):
716
raise errors.GhostRevisionUnusableHere(leftmost_id)
718
def _set_merges_from_parent_ids(self, parent_ids):
719
merges = parent_ids[1:]
720
self._control_files.put_bytes('pending-merges', '\n'.join(merges))
722
@needs_tree_write_lock
723
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
724
"""Set the parent ids to revision_ids.
726
See also set_parent_trees. This api will try to retrieve the tree data
727
for each element of revision_ids from the trees repository. If you have
728
tree data already available, it is more efficient to use
729
set_parent_trees rather than set_parent_ids. set_parent_ids is however
730
an easier API to use.
732
:param revision_ids: The revision_ids to set as the parent ids of this
733
working tree. Any of these may be ghosts.
735
revision_ids = [osutils.safe_revision_id(r) for r in revision_ids]
736
self._check_parents_for_ghosts(revision_ids,
737
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
739
if len(revision_ids) > 0:
740
self.set_last_revision(revision_ids[0])
742
self.set_last_revision(None)
744
self._set_merges_from_parent_ids(revision_ids)
746
@needs_tree_write_lock
747
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
748
"""See MutableTree.set_parent_trees."""
749
parent_ids = [osutils.safe_revision_id(rev) for (rev, tree) in parents_list]
751
self._check_parents_for_ghosts(parent_ids,
752
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
754
if len(parent_ids) == 0:
755
leftmost_parent_id = None
756
leftmost_parent_tree = None
758
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
760
if self._change_last_revision(leftmost_parent_id):
761
if leftmost_parent_tree is None:
762
# If we don't have a tree, fall back to reading the
763
# parent tree from the repository.
764
self._cache_basis_inventory(leftmost_parent_id)
766
inv = leftmost_parent_tree.inventory
767
xml = self._create_basis_xml_from_inventory(
768
leftmost_parent_id, inv)
769
self._write_basis_inventory(xml)
770
self._set_merges_from_parent_ids(parent_ids)
772
@needs_tree_write_lock
773
def set_pending_merges(self, rev_list):
774
parents = self.get_parent_ids()
775
leftmost = parents[:1]
776
new_parents = leftmost + rev_list
777
self.set_parent_ids(new_parents)
779
@needs_tree_write_lock
780
def set_merge_modified(self, modified_hashes):
782
for file_id, hash in modified_hashes.iteritems():
783
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
784
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
786
def _put_rio(self, filename, stanzas, header):
787
self._must_be_locked()
788
my_file = rio_file(stanzas, header)
789
self._control_files.put(filename, my_file)
791
@needs_write_lock # because merge pulls data into the branch.
792
def merge_from_branch(self, branch, to_revision=None):
793
"""Merge from a branch into this working tree.
795
:param branch: The branch to merge from.
796
:param to_revision: If non-None, the merge will merge to to_revision,
797
but not beyond it. to_revision does not need to be in the history
798
of the branch when it is supplied. If None, to_revision defaults to
799
branch.last_revision().
801
from bzrlib.merge import Merger, Merge3Merger
802
pb = bzrlib.ui.ui_factory.nested_progress_bar()
804
merger = Merger(self.branch, this_tree=self, pb=pb)
805
merger.pp = ProgressPhase("Merge phase", 5, pb)
806
merger.pp.next_phase()
807
# check that there are no
809
merger.check_basis(check_clean=True, require_commits=False)
810
if to_revision is None:
811
to_revision = branch.last_revision()
813
to_revision = osutils.safe_revision_id(to_revision)
814
merger.other_rev_id = to_revision
815
if merger.other_rev_id is None:
816
raise error.NoCommits(branch)
817
self.branch.fetch(branch, last_revision=merger.other_rev_id)
818
merger.other_basis = merger.other_rev_id
819
merger.other_tree = self.branch.repository.revision_tree(
821
merger.other_branch = branch
822
merger.pp.next_phase()
824
if merger.base_rev_id == merger.other_rev_id:
825
raise errors.PointlessMerge
826
merger.backup_files = False
827
merger.merge_type = Merge3Merger
828
merger.set_interesting_files(None)
829
merger.show_base = False
830
merger.reprocess = False
831
conflicts = merger.do_merge()
838
def merge_modified(self):
839
"""Return a dictionary of files modified by a merge.
841
The list is initialized by WorkingTree.set_merge_modified, which is
842
typically called after we make some automatic updates to the tree
845
This returns a map of file_id->sha1, containing only files which are
846
still in the working inventory and have that text hash.
849
hashfile = self._control_files.get('merge-hashes')
850
except errors.NoSuchFile:
854
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
855
raise errors.MergeModifiedFormatError()
856
except StopIteration:
857
raise errors.MergeModifiedFormatError()
858
for s in RioReader(hashfile):
859
# RioReader reads in Unicode, so convert file_ids back to utf8
860
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
861
if file_id not in self.inventory:
863
text_hash = s.get("hash")
864
if text_hash == self.get_file_sha1(file_id):
865
merge_hashes[file_id] = text_hash
869
def mkdir(self, path, file_id=None):
870
"""See MutableTree.mkdir()."""
872
file_id = generate_ids.gen_file_id(os.path.basename(path))
873
os.mkdir(self.abspath(path))
874
self.add(path, file_id, 'directory')
877
def get_symlink_target(self, file_id):
878
file_id = osutils.safe_file_id(file_id)
879
return os.readlink(self.id2abspath(file_id))
882
def subsume(self, other_tree):
883
def add_children(inventory, entry):
884
for child_entry in entry.children.values():
885
inventory._byid[child_entry.file_id] = child_entry
886
if child_entry.kind == 'directory':
887
add_children(inventory, child_entry)
888
if other_tree.get_root_id() == self.get_root_id():
889
raise errors.BadSubsumeSource(self, other_tree,
890
'Trees have the same root')
892
other_tree_path = self.relpath(other_tree.basedir)
893
except errors.PathNotChild:
894
raise errors.BadSubsumeSource(self, other_tree,
895
'Tree is not contained by the other')
896
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
897
if new_root_parent is None:
898
raise errors.BadSubsumeSource(self, other_tree,
899
'Parent directory is not versioned.')
900
# We need to ensure that the result of a fetch will have a
901
# versionedfile for the other_tree root, and only fetching into
902
# RepositoryKnit2 guarantees that.
903
if not self.branch.repository.supports_rich_root():
904
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
905
other_tree.lock_tree_write()
907
new_parents = other_tree.get_parent_ids()
908
other_root = other_tree.inventory.root
909
other_root.parent_id = new_root_parent
910
other_root.name = osutils.basename(other_tree_path)
911
self.inventory.add(other_root)
912
add_children(self.inventory, other_root)
913
self._write_inventory(self.inventory)
914
# normally we don't want to fetch whole repositories, but i think
915
# here we really do want to consolidate the whole thing.
916
for parent_id in other_tree.get_parent_ids():
917
self.branch.fetch(other_tree.branch, parent_id)
918
self.add_parent_tree_id(parent_id)
921
other_tree.bzrdir.retire_bzrdir()
923
@needs_tree_write_lock
924
def extract(self, file_id, format=None):
925
"""Extract a subtree from this tree.
927
A new branch will be created, relative to the path for this tree.
931
segments = osutils.splitpath(path)
932
transport = self.branch.bzrdir.root_transport
933
for name in segments:
934
transport = transport.clone(name)
935
transport.ensure_base()
938
sub_path = self.id2path(file_id)
939
branch_transport = mkdirs(sub_path)
941
format = bzrdir.format_registry.make_bzrdir('dirstate-with-subtree')
942
branch_transport.ensure_base()
943
branch_bzrdir = format.initialize_on_transport(branch_transport)
945
repo = branch_bzrdir.find_repository()
946
except errors.NoRepositoryPresent:
947
repo = branch_bzrdir.create_repository()
948
assert repo.supports_rich_root()
950
if not repo.supports_rich_root():
951
raise errors.RootNotRich()
952
new_branch = branch_bzrdir.create_branch()
953
new_branch.pull(self.branch)
954
for parent_id in self.get_parent_ids():
955
new_branch.fetch(self.branch, parent_id)
956
tree_transport = self.bzrdir.root_transport.clone(sub_path)
957
if tree_transport.base != branch_transport.base:
958
tree_bzrdir = format.initialize_on_transport(tree_transport)
959
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
961
tree_bzrdir = branch_bzrdir
962
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
963
wt.set_parent_ids(self.get_parent_ids())
964
my_inv = self.inventory
965
child_inv = Inventory(root_id=None)
966
new_root = my_inv[file_id]
967
my_inv.remove_recursive_id(file_id)
968
new_root.parent_id = None
969
child_inv.add(new_root)
970
self._write_inventory(my_inv)
971
wt._write_inventory(child_inv)
974
def _serialize(self, inventory, out_file):
975
xml5.serializer_v5.write_inventory(self._inventory, out_file)
977
def _deserialize(selt, in_file):
978
return xml5.serializer_v5.read_inventory(in_file)
981
"""Write the in memory inventory to disk."""
982
# TODO: Maybe this should only write on dirty ?
983
if self._control_files._lock_mode != 'w':
984
raise errors.NotWriteLocked(self)
986
self._serialize(self._inventory, sio)
988
self._control_files.put('inventory', sio)
989
self._inventory_is_modified = False
991
def _kind(self, relpath):
992
return osutils.file_kind(self.abspath(relpath))
994
def list_files(self, include_root=False):
995
"""Recursively list all files as (path, class, kind, id, entry).
101
return os.stat(self._get_store_filename(file_id))[stat.ST_SIZE]
104
def get_file_sha1(self, file_id):
105
return self._statcache[file_id][statcache.SC_SHA1]
108
def file_class(self, filename):
109
if self.path2id(filename):
111
elif self.is_ignored(filename):
117
def list_files(self):
118
"""Recursively list all files as (path, class, kind, id).
997
120
Lists, but does not descend into unversioned directories.
1002
125
Skips the control directory.
1004
# list_files is an iterator, so @needs_read_lock doesn't work properly
1005
# with it. So callers should be careful to always read_lock the tree.
1006
if not self.is_locked():
1007
raise errors.ObjectNotLocked(self)
127
from osutils import appendpath, file_kind
1009
130
inv = self.inventory
1010
if include_root is True:
1011
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1012
# Convert these into local objects to save lookup times
1013
pathjoin = osutils.pathjoin
1014
file_kind = self._kind
1016
# transport.base ends in a slash, we want the piece
1017
# between the last two slashes
1018
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1020
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1022
# directory file_id, relative path, absolute path, reverse sorted children
1023
children = os.listdir(self.basedir)
1025
# jam 20060527 The kernel sized tree seems equivalent whether we
1026
# use a deque and popleft to keep them sorted, or if we use a plain
1027
# list and just reverse() them.
1028
children = collections.deque(children)
1029
stack = [(inv.root.file_id, u'', self.basedir, children)]
1031
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1034
f = children.popleft()
132
def descend(from_dir_relpath, from_dir_id, dp):
1035
136
## TODO: If we find a subdirectory with its own .bzr
1036
137
## directory, then that is a separate tree and we
1037
138
## should exclude it.
1039
# the bzrdir for this tree
1040
if transport_base_dir == f:
139
if bzrlib.BZRDIR == f:
1043
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1044
# and 'f' doesn't begin with one, we can do a string op, rather
1045
# than the checks of pathjoin(), all relative paths will have an extra slash
1047
fp = from_dir_relpath + '/' + f
143
fp = appendpath(from_dir_relpath, f)
1050
fap = from_dir_abspath + '/' + f
146
fap = appendpath(dp, f)
1052
148
f_ie = inv.get_child(from_dir_id, f)
1055
elif self.is_ignored(fp[1:]):
151
elif self.is_ignored(fp):
1058
# we may not have found this file, because of a unicode issue
1059
f_norm, can_access = osutils.normalized_filename(f)
1060
if f == f_norm or not can_access:
1061
# No change, so treat this file normally
1064
# this file can be accessed by a normalized path
1065
# check again if it is versioned
1066
# these lines are repeated here for performance
1068
fp = from_dir_relpath + '/' + f
1069
fap = from_dir_abspath + '/' + f
1070
f_ie = inv.get_child(from_dir_id, f)
1073
elif self.is_ignored(fp[1:]):
1078
156
fk = file_kind(fap)
1080
# make a last minute entry
1082
yield fp[1:], c, fk, f_ie.file_id, f_ie
1085
yield fp[1:], c, fk, None, fk_entries[fk]()
1087
yield fp[1:], c, fk, None, TreeEntry()
160
raise BzrCheckError("file %r entered as kind %r id %r, "
162
% (fap, f_ie.kind, f_ie.file_id, fk))
164
yield fp, c, fk, (f_ie and f_ie.file_id)
1090
166
if fk != 'directory':
1093
# But do this child first
1094
new_children = os.listdir(fap)
1096
new_children = collections.deque(new_children)
1097
stack.append((f_ie.file_id, fp, fap, new_children))
1098
# Break out of inner loop,
1099
# so that we start outer loop with child
1102
# if we finished all children, pop it off the stack
1105
@needs_tree_write_lock
1106
def move(self, from_paths, to_dir=None, after=False, **kwargs):
1109
to_dir must exist in the inventory.
1111
If to_dir exists and is a directory, the files are moved into
1112
it, keeping their old names.
1114
Note that to_dir is only the last component of the new name;
1115
this doesn't change the directory.
1117
For each entry in from_paths the move mode will be determined
1120
The first mode moves the file in the filesystem and updates the
1121
inventory. The second mode only updates the inventory without
1122
touching the file on the filesystem. This is the new mode introduced
1125
move uses the second mode if 'after == True' and the target is not
1126
versioned but present in the working tree.
1128
move uses the second mode if 'after == False' and the source is
1129
versioned but no longer in the working tree, and the target is not
1130
versioned but present in the working tree.
1132
move uses the first mode if 'after == False' and the source is
1133
versioned and present in the working tree, and the target is not
1134
versioned and not present in the working tree.
1136
Everything else results in an error.
1138
This returns a list of (from_path, to_path) pairs for each
1139
entry that is moved.
1144
# check for deprecated use of signature
1146
to_dir = kwargs.get('to_name', None)
1148
raise TypeError('You must supply a target directory')
1150
symbol_versioning.warn('The parameter to_name was deprecated'
1151
' in version 0.13. Use to_dir instead',
1154
# check destination directory
1155
assert not isinstance(from_paths, basestring)
1156
inv = self.inventory
1157
to_abs = self.abspath(to_dir)
1158
if not isdir(to_abs):
1159
raise errors.BzrMoveFailedError('',to_dir,
1160
errors.NotADirectory(to_abs))
1161
if not self.has_filename(to_dir):
1162
raise errors.BzrMoveFailedError('',to_dir,
1163
errors.NotInWorkingDirectory(to_dir))
1164
to_dir_id = inv.path2id(to_dir)
1165
if to_dir_id is None:
1166
raise errors.BzrMoveFailedError('',to_dir,
1167
errors.NotVersionedError(path=str(to_dir)))
1169
to_dir_ie = inv[to_dir_id]
1170
if to_dir_ie.kind != 'directory':
1171
raise errors.BzrMoveFailedError('',to_dir,
1172
errors.NotADirectory(to_abs))
1174
# create rename entries and tuples
1175
for from_rel in from_paths:
1176
from_tail = splitpath(from_rel)[-1]
1177
from_id = inv.path2id(from_rel)
1179
raise errors.BzrMoveFailedError(from_rel,to_dir,
1180
errors.NotVersionedError(path=str(from_rel)))
1182
from_entry = inv[from_id]
1183
from_parent_id = from_entry.parent_id
1184
to_rel = pathjoin(to_dir, from_tail)
1185
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1187
from_tail=from_tail,
1188
from_parent_id=from_parent_id,
1189
to_rel=to_rel, to_tail=from_tail,
1190
to_parent_id=to_dir_id)
1191
rename_entries.append(rename_entry)
1192
rename_tuples.append((from_rel, to_rel))
1194
# determine which move mode to use. checks also for movability
1195
rename_entries = self._determine_mv_mode(rename_entries, after)
1197
original_modified = self._inventory_is_modified
1200
self._inventory_is_modified = True
1201
self._move(rename_entries)
1203
# restore the inventory on error
1204
self._inventory_is_modified = original_modified
1206
self._write_inventory(inv)
1207
return rename_tuples
1209
def _determine_mv_mode(self, rename_entries, after=False):
1210
"""Determines for each from-to pair if both inventory and working tree
1211
or only the inventory has to be changed.
1213
Also does basic plausability tests.
1215
inv = self.inventory
1217
for rename_entry in rename_entries:
1218
# store to local variables for easier reference
1219
from_rel = rename_entry.from_rel
1220
from_id = rename_entry.from_id
1221
to_rel = rename_entry.to_rel
1222
to_id = inv.path2id(to_rel)
1223
only_change_inv = False
1225
# check the inventory for source and destination
1227
raise errors.BzrMoveFailedError(from_rel,to_rel,
1228
errors.NotVersionedError(path=str(from_rel)))
1229
if to_id is not None:
1230
raise errors.BzrMoveFailedError(from_rel,to_rel,
1231
errors.AlreadyVersionedError(path=str(to_rel)))
1233
# try to determine the mode for rename (only change inv or change
1234
# inv and file system)
1236
if not self.has_filename(to_rel):
1237
raise errors.BzrMoveFailedError(from_id,to_rel,
1238
errors.NoSuchFile(path=str(to_rel),
1239
extra="New file has not been created yet"))
1240
only_change_inv = True
1241
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1242
only_change_inv = True
1243
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1244
only_change_inv = False
1246
# something is wrong, so lets determine what exactly
1247
if not self.has_filename(from_rel) and \
1248
not self.has_filename(to_rel):
1249
raise errors.BzrRenameFailedError(from_rel,to_rel,
1250
errors.PathsDoNotExist(paths=(str(from_rel),
1253
raise errors.RenameFailedFilesExist(from_rel, to_rel,
1254
extra="(Use --after to update the Bazaar id)")
1255
rename_entry.only_change_inv = only_change_inv
1256
return rename_entries
1258
def _move(self, rename_entries):
1259
"""Moves a list of files.
1261
Depending on the value of the flag 'only_change_inv', the
1262
file will be moved on the file system or not.
1264
inv = self.inventory
1267
for entry in rename_entries:
1269
self._move_entry(entry)
1271
self._rollback_move(moved)
1275
def _rollback_move(self, moved):
1276
"""Try to rollback a previous move in case of an filesystem error."""
1277
inv = self.inventory
1280
self._move_entry(_RenameEntry(entry.to_rel, entry.from_id,
1281
entry.to_tail, entry.to_parent_id, entry.from_rel,
1282
entry.from_tail, entry.from_parent_id,
1283
entry.only_change_inv))
1284
except errors.BzrMoveFailedError, e:
1285
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1286
" The working tree is in an inconsistent state."
1287
" Please consider doing a 'bzr revert'."
1288
" Error message is: %s" % e)
1290
def _move_entry(self, entry):
1291
inv = self.inventory
1292
from_rel_abs = self.abspath(entry.from_rel)
1293
to_rel_abs = self.abspath(entry.to_rel)
1294
if from_rel_abs == to_rel_abs:
1295
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1296
"Source and target are identical.")
1298
if not entry.only_change_inv:
1300
osutils.rename(from_rel_abs, to_rel_abs)
1302
raise errors.BzrMoveFailedError(entry.from_rel,
1304
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1306
@needs_tree_write_lock
1307
def rename_one(self, from_rel, to_rel, after=False):
1310
This can change the directory or the filename or both.
1312
rename_one has several 'modes' to work. First, it can rename a physical
1313
file and change the file_id. That is the normal mode. Second, it can
1314
only change the file_id without touching any physical file. This is
1315
the new mode introduced in version 0.15.
1317
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1318
versioned but present in the working tree.
1320
rename_one uses the second mode if 'after == False' and 'from_rel' is
1321
versioned but no longer in the working tree, and 'to_rel' is not
1322
versioned but present in the working tree.
1324
rename_one uses the first mode if 'after == False' and 'from_rel' is
1325
versioned and present in the working tree, and 'to_rel' is not
1326
versioned and not present in the working tree.
1328
Everything else results in an error.
1330
inv = self.inventory
1333
# create rename entries and tuples
1334
from_tail = splitpath(from_rel)[-1]
1335
from_id = inv.path2id(from_rel)
1337
raise errors.BzrRenameFailedError(from_rel,to_rel,
1338
errors.NotVersionedError(path=str(from_rel)))
1339
from_entry = inv[from_id]
1340
from_parent_id = from_entry.parent_id
1341
to_dir, to_tail = os.path.split(to_rel)
1342
to_dir_id = inv.path2id(to_dir)
1343
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1345
from_tail=from_tail,
1346
from_parent_id=from_parent_id,
1347
to_rel=to_rel, to_tail=to_tail,
1348
to_parent_id=to_dir_id)
1349
rename_entries.append(rename_entry)
1351
# determine which move mode to use. checks also for movability
1352
rename_entries = self._determine_mv_mode(rename_entries, after)
1354
# check if the target changed directory and if the target directory is
1356
if to_dir_id is None:
1357
raise errors.BzrMoveFailedError(from_rel,to_rel,
1358
errors.NotVersionedError(path=str(to_dir)))
1360
# all checks done. now we can continue with our actual work
1361
mutter('rename_one:\n'
1366
' to_dir_id {%s}\n',
1367
from_id, from_rel, to_rel, to_dir, to_dir_id)
1369
self._move(rename_entries)
1370
self._write_inventory(inv)
1372
class _RenameEntry(object):
1373
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1374
to_rel, to_tail, to_parent_id, only_change_inv=False):
1375
self.from_rel = from_rel
1376
self.from_id = from_id
1377
self.from_tail = from_tail
1378
self.from_parent_id = from_parent_id
1379
self.to_rel = to_rel
1380
self.to_tail = to_tail
1381
self.to_parent_id = to_parent_id
1382
self.only_change_inv = only_change_inv
170
# don't descend unversioned directories
173
for ff in descend(fp, f_ie.file_id, fap):
176
for f in descend('', inv.root.file_id, self.basedir):
1385
181
def unknowns(self):
1386
"""Return all unknown files.
1388
These are files in the working directory that are not versioned or
1389
control files or ignored.
1391
# force the extras method to be fully executed before returning, to
1392
# prevent race conditions with the lock
1394
[subp for subp in self.extras() if not self.is_ignored(subp)])
1396
@needs_tree_write_lock
1397
def unversion(self, file_ids):
1398
"""Remove the file ids in file_ids from the current versioned set.
1400
When a file_id is unversioned, all of its children are automatically
1403
:param file_ids: The file ids to stop versioning.
1404
:raises: NoSuchId if any fileid is not currently versioned.
1406
for file_id in file_ids:
1407
file_id = osutils.safe_file_id(file_id)
1408
if self._inventory.has_id(file_id):
1409
self._inventory.remove_recursive_id(file_id)
1411
raise errors.NoSuchId(self, file_id)
1413
# in the future this should just set a dirty bit to wait for the
1414
# final unlock. However, until all methods of workingtree start
1415
# with the current in -memory inventory rather than triggering
1416
# a read, it is more complex - we need to teach read_inventory
1417
# to know when to read, and when to not read first... and possibly
1418
# to save first when the in memory one may be corrupted.
1419
# so for now, we just only write it if it is indeed dirty.
1421
self._write_inventory(self._inventory)
1423
@deprecated_method(zero_eight)
1424
def iter_conflicts(self):
1425
"""List all files in the tree that have text or content conflicts.
1426
DEPRECATED. Use conflicts instead."""
1427
return self._iter_conflicts()
1429
def _iter_conflicts(self):
1431
for info in self.list_files():
1433
stem = get_conflicted_stem(path)
1436
if stem not in conflicted:
1437
conflicted.add(stem)
1441
def pull(self, source, overwrite=False, stop_revision=None,
1442
change_reporter=None):
1443
top_pb = bzrlib.ui.ui_factory.nested_progress_bar()
1446
pp = ProgressPhase("Pull phase", 2, top_pb)
1448
old_revision_info = self.branch.last_revision_info()
1449
basis_tree = self.basis_tree()
1450
count = self.branch.pull(source, overwrite, stop_revision)
1451
new_revision_info = self.branch.last_revision_info()
1452
if new_revision_info != old_revision_info:
1454
repository = self.branch.repository
1455
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1456
basis_tree.lock_read()
1458
new_basis_tree = self.branch.basis_tree()
1465
change_reporter=change_reporter)
1466
if (basis_tree.inventory.root is None and
1467
new_basis_tree.inventory.root is not None):
1468
self.set_root_id(new_basis_tree.inventory.root.file_id)
1472
# TODO - dedup parents list with things merged by pull ?
1473
# reuse the revisiontree we merged against to set the new
1475
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1476
# we have to pull the merge trees out again, because
1477
# merge_inner has set the ids. - this corner is not yet
1478
# layered well enough to prevent double handling.
1479
# XXX TODO: Fix the double handling: telling the tree about
1480
# the already known parent data is wasteful.
1481
merges = self.get_parent_ids()[1:]
1482
parent_trees.extend([
1483
(parent, repository.revision_tree(parent)) for
1485
self.set_parent_trees(parent_trees)
1492
def put_file_bytes_non_atomic(self, file_id, bytes):
1493
"""See MutableTree.put_file_bytes_non_atomic."""
1494
file_id = osutils.safe_file_id(file_id)
1495
stream = file(self.id2abspath(file_id), 'wb')
1500
# TODO: update the hashcache here ?
182
for subp in self.extras():
183
if not self.is_ignored(subp):
1502
187
def extras(self):
1503
"""Yield all unversioned files in this WorkingTree.
188
"""Yield all unknown files in this WorkingTree.
1505
If there are any unversioned directories then only the directory is
1506
returned, not all its children. But if there are unversioned files
190
If there are any unknown directories then only the directory is
191
returned, not all its children. But if there are unknown files
1507
192
under a versioned subdirectory, they are returned.
1509
194
Currently returned depth-first, sorted by name within directories.
1510
This is the same order used by 'osutils.walkdirs'.
1512
196
## TODO: Work from given directory downwards
197
from osutils import isdir, appendpath
1513
199
for path, dir_entry in self.inventory.directories():
1514
# mutter("search for unknowns in %r", path)
200
mutter("search for unknowns in %r" % path)
1515
201
dirabs = self.abspath(path)
1516
202
if not isdir(dirabs):
1517
203
# e.g. directory deleted
1576
248
If the file is ignored, returns the pattern which caused it to
1577
249
be ignored, otherwise None. So this can simply be used as a
1578
250
boolean if desired."""
1579
if getattr(self, '_ignoreglobster', None) is None:
1580
self._ignoreglobster = globbing.Globster(self.get_ignore_list())
1581
return self._ignoreglobster.match(filename)
1583
def kind(self, file_id):
1584
return file_kind(self.id2abspath(file_id))
1586
def _comparison_data(self, entry, path):
1587
abspath = self.abspath(path)
1589
stat_value = os.lstat(abspath)
1591
if getattr(e, 'errno', None) == errno.ENOENT:
1598
mode = stat_value.st_mode
1599
kind = osutils.file_kind_from_stat_mode(mode)
1600
if not supports_executable():
1601
executable = entry is not None and entry.executable
1603
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1604
return kind, executable, stat_value
1606
def _file_size(self, entry, stat_value):
1607
return stat_value.st_size
1609
def last_revision(self):
1610
"""Return the last revision of the branch for this tree.
1612
This format tree does not support a separate marker for last-revision
1613
compared to the branch.
1615
See MutableTree.last_revision
1617
return self._last_revision()
1620
def _last_revision(self):
1621
"""helper for get_parent_ids."""
1622
return self.branch.last_revision()
1624
def is_locked(self):
1625
return self._control_files.is_locked()
1627
def _must_be_locked(self):
1628
if not self.is_locked():
1629
raise errors.ObjectNotLocked(self)
1631
def lock_read(self):
1632
"""See Branch.lock_read, and WorkingTree.unlock."""
1633
if not self.is_locked():
1635
self.branch.lock_read()
1637
return self._control_files.lock_read()
1639
self.branch.unlock()
1642
def lock_tree_write(self):
1643
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
1644
if not self.is_locked():
1646
self.branch.lock_read()
1648
return self._control_files.lock_write()
1650
self.branch.unlock()
1653
def lock_write(self):
1654
"""See MutableTree.lock_write, and WorkingTree.unlock."""
1655
if not self.is_locked():
1657
self.branch.lock_write()
1659
return self._control_files.lock_write()
1661
self.branch.unlock()
1664
def get_physical_lock_status(self):
1665
return self._control_files.get_physical_lock_status()
1667
def _basis_inventory_name(self):
1668
return 'basis-inventory-cache'
1670
def _reset_data(self):
1671
"""Reset transient data that cannot be revalidated."""
1672
self._inventory_is_modified = False
1673
result = self._deserialize(self._control_files.get('inventory'))
1674
self._set_inventory(result, dirty=False)
1676
@needs_tree_write_lock
1677
def set_last_revision(self, new_revision):
1678
"""Change the last revision in the working tree."""
1679
new_revision = osutils.safe_revision_id(new_revision)
1680
if self._change_last_revision(new_revision):
1681
self._cache_basis_inventory(new_revision)
1683
def _change_last_revision(self, new_revision):
1684
"""Template method part of set_last_revision to perform the change.
1686
This is used to allow WorkingTree3 instances to not affect branch
1687
when their last revision is set.
1689
if new_revision is None:
1690
self.branch.set_revision_history([])
1693
self.branch.generate_revision_history(new_revision)
1694
except errors.NoSuchRevision:
1695
# not present in the repo - dont try to set it deeper than the tip
1696
self.branch.set_revision_history([new_revision])
1699
def _write_basis_inventory(self, xml):
1700
"""Write the basis inventory XML to the basis-inventory file"""
1701
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1702
path = self._basis_inventory_name()
1704
self._control_files.put(path, sio)
1706
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1707
"""Create the text that will be saved in basis-inventory"""
1708
# TODO: jam 20070209 This should be redundant, as the revision_id
1709
# as all callers should have already converted the revision_id to
1711
inventory.revision_id = osutils.safe_revision_id(revision_id)
1712
return xml7.serializer_v7.write_inventory_to_string(inventory)
1714
def _cache_basis_inventory(self, new_revision):
1715
"""Cache new_revision as the basis inventory."""
1716
# TODO: this should allow the ready-to-use inventory to be passed in,
1717
# as commit already has that ready-to-use [while the format is the
1720
# this double handles the inventory - unpack and repack -
1721
# but is easier to understand. We can/should put a conditional
1722
# in here based on whether the inventory is in the latest format
1723
# - perhaps we should repack all inventories on a repository
1725
# the fast path is to copy the raw xml from the repository. If the
1726
# xml contains 'revision_id="', then we assume the right
1727
# revision_id is set. We must check for this full string, because a
1728
# root node id can legitimately look like 'revision_id' but cannot
1730
xml = self.branch.repository.get_inventory_xml(new_revision)
1731
firstline = xml.split('\n', 1)[0]
1732
if (not 'revision_id="' in firstline or
1733
'format="7"' not in firstline):
1734
inv = self.branch.repository.deserialise_inventory(
1736
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1737
self._write_basis_inventory(xml)
1738
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1741
def read_basis_inventory(self):
1742
"""Read the cached basis inventory."""
1743
path = self._basis_inventory_name()
1744
return self._control_files.get(path).read()
1747
def read_working_inventory(self):
1748
"""Read the working inventory.
1750
:raises errors.InventoryModified: read_working_inventory will fail
1751
when the current in memory inventory has been modified.
1753
# conceptually this should be an implementation detail of the tree.
1754
# XXX: Deprecate this.
1755
# ElementTree does its own conversion from UTF-8, so open in
1757
if self._inventory_is_modified:
1758
raise errors.InventoryModified(self)
1759
result = self._deserialize(self._control_files.get('inventory'))
1760
self._set_inventory(result, dirty=False)
1763
@needs_tree_write_lock
1764
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1766
"""Remove nominated files from the working inventor.
1768
:files: File paths relative to the basedir.
1769
:keep_files: If true, the files will also be kept.
1770
:force: Delete files and directories, even if they are changed and
1771
even if the directories are not empty.
1773
## TODO: Normalize names
1775
if isinstance(files, basestring):
1778
inv = self.inventory
1781
unknown_files_in_directory=set()
1783
def recurse_directory_to_add_files(directory):
1784
# recurse directory and add all files
1785
# so we can check if they have changed.
1786
for contained_dir_info in self.walkdirs(directory):
1787
for file_info in contained_dir_info[1]:
1788
if file_info[2] == 'file':
1789
relpath = self.relpath(file_info[0])
1790
if file_info[4]: #is it versioned?
1791
new_files.add(relpath)
1793
unknown_files_in_directory.add(
1794
(relpath, None, file_info[2]))
1796
for filename in files:
1797
# Get file name into canonical form.
1798
filename = self.relpath(self.abspath(filename))
1799
if len(filename) > 0:
1800
new_files.add(filename)
1801
if osutils.isdir(filename) and len(os.listdir(filename)) > 0:
1802
recurse_directory_to_add_files(filename)
1803
files = [f for f in new_files]
1805
# Sort needed to first handle directory content before the directory
1806
files.sort(reverse=True)
1807
if not keep_files and not force:
1808
tree_delta = self.changes_from(self.basis_tree(),
1809
specific_files=files)
1810
for unknown_file in unknown_files_in_directory:
1811
tree_delta.unversioned.extend((unknown_file,))
1812
if bool(tree_delta.modified
1814
or tree_delta.renamed
1815
or tree_delta.kind_changed
1816
or tree_delta.unversioned):
1817
raise errors.BzrRemoveChangedFilesError(tree_delta)
1819
# do this before any modifications
1821
fid = inv.path2id(f)
1824
message="%s is not versioned." % (f,)
1827
# having removed it, it must be either ignored or unknown
1828
if self.is_ignored(f):
1832
textui.show_status(new_status, inv[fid].kind, f,
1836
message="removed %s" % (f,)
1839
abs_path = self.abspath(f)
1840
if osutils.lexists(abs_path):
1841
if (osutils.isdir(abs_path) and
1842
len(os.listdir(abs_path)) > 0):
1843
message="%s is not empty directory "\
1844
"and won't be deleted." % (f,)
1846
osutils.delete_any(abs_path)
1847
message="deleted %s" % (f,)
1848
elif message is not None:
1849
# only care if we haven't done anything yet.
1850
message="%s does not exist." % (f,)
1852
# print only one message (if any) per file.
1853
if message is not None:
1855
self._write_inventory(inv)
1857
@needs_tree_write_lock
1858
def revert(self, filenames, old_tree=None, backups=True,
1859
pb=DummyProgress(), report_changes=False):
1860
from bzrlib.conflicts import resolve
1861
if old_tree is None:
1862
old_tree = self.basis_tree()
1863
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
1865
if not len(filenames):
1866
self.set_parent_ids(self.get_parent_ids()[:1])
1869
resolve(self, filenames, ignore_misses=True)
1872
def revision_tree(self, revision_id):
1873
"""See Tree.revision_tree.
1875
WorkingTree can supply revision_trees for the basis revision only
1876
because there is only one cached inventory in the bzr directory.
1878
if revision_id == self.last_revision():
1880
xml = self.read_basis_inventory()
1881
except errors.NoSuchFile:
1885
inv = xml7.serializer_v7.read_inventory_from_string(xml)
1886
# dont use the repository revision_tree api because we want
1887
# to supply the inventory.
1888
if inv.revision_id == revision_id:
1889
return revisiontree.RevisionTree(self.branch.repository,
1891
except errors.BadInventoryFormat:
1893
# raise if there was no inventory, or if we read the wrong inventory.
1894
raise errors.NoSuchRevisionInTree(self, revision_id)
1896
# XXX: This method should be deprecated in favour of taking in a proper
1897
# new Inventory object.
1898
@needs_tree_write_lock
1899
def set_inventory(self, new_inventory_list):
1900
from bzrlib.inventory import (Inventory,
1905
inv = Inventory(self.get_root_id())
1906
for path, file_id, parent, kind in new_inventory_list:
1907
name = os.path.basename(path)
1910
# fixme, there should be a factory function inv,add_??
1911
if kind == 'directory':
1912
inv.add(InventoryDirectory(file_id, name, parent))
1913
elif kind == 'file':
1914
inv.add(InventoryFile(file_id, name, parent))
1915
elif kind == 'symlink':
1916
inv.add(InventoryLink(file_id, name, parent))
1918
raise errors.BzrError("unknown kind %r" % kind)
1919
self._write_inventory(inv)
1921
@needs_tree_write_lock
1922
def set_root_id(self, file_id):
1923
"""Set the root id for this tree."""
1926
symbol_versioning.warn(symbol_versioning.zero_twelve
1927
% 'WorkingTree.set_root_id with fileid=None',
1932
file_id = osutils.safe_file_id(file_id)
1933
self._set_root_id(file_id)
1935
def _set_root_id(self, file_id):
1936
"""Set the root id for this tree, in a format specific manner.
1938
:param file_id: The file id to assign to the root. It must not be
1939
present in the current inventory or an error will occur. It must
1940
not be None, but rather a valid file id.
1942
inv = self._inventory
1943
orig_root_id = inv.root.file_id
1944
# TODO: it might be nice to exit early if there was nothing
1945
# to do, saving us from trigger a sync on unlock.
1946
self._inventory_is_modified = True
1947
# we preserve the root inventory entry object, but
1948
# unlinkit from the byid index
1949
del inv._byid[inv.root.file_id]
1950
inv.root.file_id = file_id
1951
# and link it into the index with the new changed id.
1952
inv._byid[inv.root.file_id] = inv.root
1953
# and finally update all children to reference the new id.
1954
# XXX: this should be safe to just look at the root.children
1955
# list, not the WHOLE INVENTORY.
1958
if entry.parent_id == orig_root_id:
1959
entry.parent_id = inv.root.file_id
1962
"""See Branch.unlock.
1964
WorkingTree locking just uses the Branch locking facilities.
1965
This is current because all working trees have an embedded branch
1966
within them. IF in the future, we were to make branch data shareable
1967
between multiple working trees, i.e. via shared storage, then we
1968
would probably want to lock both the local tree, and the branch.
1970
raise NotImplementedError(self.unlock)
1973
"""Update a working tree along its branch.
1975
This will update the branch if its bound too, which means we have
1976
multiple trees involved:
1978
- The new basis tree of the master.
1979
- The old basis tree of the branch.
1980
- The old basis tree of the working tree.
1981
- The current working tree state.
1983
Pathologically, all three may be different, and non-ancestors of each
1984
other. Conceptually we want to:
1986
- Preserve the wt.basis->wt.state changes
1987
- Transform the wt.basis to the new master basis.
1988
- Apply a merge of the old branch basis to get any 'local' changes from
1990
- Restore the wt.basis->wt.state changes.
1992
There isn't a single operation at the moment to do that, so we:
1993
- Merge current state -> basis tree of the master w.r.t. the old tree
1995
- Do a 'normal' merge of the old branch basis if it is relevant.
1997
if self.branch.get_master_branch() is not None:
1999
update_branch = True
2001
self.lock_tree_write()
2002
update_branch = False
2005
old_tip = self.branch.update()
2008
return self._update_tree(old_tip)
2012
@needs_tree_write_lock
2013
def _update_tree(self, old_tip=None):
2014
"""Update a tree to the master branch.
2016
:param old_tip: if supplied, the previous tip revision the branch,
2017
before it was changed to the master branch's tip.
2019
# here if old_tip is not None, it is the old tip of the branch before
2020
# it was updated from the master branch. This should become a pending
2021
# merge in the working tree to preserve the user existing work. we
2022
# cant set that until we update the working trees last revision to be
2023
# one from the new branch, because it will just get absorbed by the
2024
# parent de-duplication logic.
2026
# We MUST save it even if an error occurs, because otherwise the users
2027
# local work is unreferenced and will appear to have been lost.
2031
last_rev = self.get_parent_ids()[0]
2034
if last_rev != self.branch.last_revision():
2035
# merge tree state up to new branch tip.
2036
basis = self.basis_tree()
2039
to_tree = self.branch.basis_tree()
2040
if basis.inventory.root is None:
2041
self.set_root_id(to_tree.inventory.root.file_id)
2043
result += merge.merge_inner(
2050
# TODO - dedup parents list with things merged by pull ?
2051
# reuse the tree we've updated to to set the basis:
2052
parent_trees = [(self.branch.last_revision(), to_tree)]
2053
merges = self.get_parent_ids()[1:]
2054
# Ideally we ask the tree for the trees here, that way the working
2055
# tree can decide whether to give us teh entire tree or give us a
2056
# lazy initialised tree. dirstate for instance will have the trees
2057
# in ram already, whereas a last-revision + basis-inventory tree
2058
# will not, but also does not need them when setting parents.
2059
for parent in merges:
2060
parent_trees.append(
2061
(parent, self.branch.repository.revision_tree(parent)))
2062
if old_tip is not None:
2063
parent_trees.append(
2064
(old_tip, self.branch.repository.revision_tree(old_tip)))
2065
self.set_parent_trees(parent_trees)
2066
last_rev = parent_trees[0][0]
2068
# the working tree had the same last-revision as the master
2069
# branch did. We may still have pivot local work from the local
2070
# branch into old_tip:
2071
if old_tip is not None:
2072
self.add_parent_tree_id(old_tip)
2073
if old_tip and old_tip != last_rev:
2074
# our last revision was not the prior branch last revision
2075
# and we have converted that last revision to a pending merge.
2076
# base is somewhere between the branch tip now
2077
# and the now pending merge
2079
# Since we just modified the working tree and inventory, flush out
2080
# the current state, before we modify it again.
2081
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2082
# requires it only because TreeTransform directly munges the
2083
# inventory and calls tree._write_inventory(). Ultimately we
2084
# should be able to remove this extra flush.
2086
from bzrlib.revision import common_ancestor
2088
base_rev_id = common_ancestor(self.branch.last_revision(),
2090
self.branch.repository)
2091
except errors.NoCommonAncestor:
2093
base_tree = self.branch.repository.revision_tree(base_rev_id)
2094
other_tree = self.branch.repository.revision_tree(old_tip)
2095
result += merge.merge_inner(
2102
def _write_hashcache_if_dirty(self):
2103
"""Write out the hashcache if it is dirty."""
2104
if self._hashcache.needs_write:
2106
self._hashcache.write()
2108
if e.errno not in (errno.EPERM, errno.EACCES):
2110
# TODO: jam 20061219 Should this be a warning? A single line
2111
# warning might be sufficient to let the user know what
2113
mutter('Could not write hashcache for %s\nError: %s',
2114
self._hashcache.cache_file_name(), e)
2116
@needs_tree_write_lock
2117
def _write_inventory(self, inv):
2118
"""Write inventory as the current inventory."""
2119
self._set_inventory(inv, dirty=True)
2122
def set_conflicts(self, arg):
2123
raise errors.UnsupportedOperation(self.set_conflicts, self)
2125
def add_conflicts(self, arg):
2126
raise errors.UnsupportedOperation(self.add_conflicts, self)
2129
def conflicts(self):
2130
conflicts = _mod_conflicts.ConflictList()
2131
for conflicted in self._iter_conflicts():
2134
if file_kind(self.abspath(conflicted)) != "file":
2136
except errors.NoSuchFile:
2139
for suffix in ('.THIS', '.OTHER'):
2141
kind = file_kind(self.abspath(conflicted+suffix))
2144
except errors.NoSuchFile:
2148
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2149
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2151
file_id=self.path2id(conflicted)))
2154
def walkdirs(self, prefix=""):
2155
"""Walk the directories of this tree.
2157
returns a generator which yields items in the form:
2158
((curren_directory_path, fileid),
2159
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2162
This API returns a generator, which is only valid during the current
2163
tree transaction - within a single lock_read or lock_write duration.
2165
If the tree is not locked, it may cause an error to be raised,
2166
depending on the tree implementation.
2168
disk_top = self.abspath(prefix)
2169
if disk_top.endswith('/'):
2170
disk_top = disk_top[:-1]
2171
top_strip_len = len(disk_top) + 1
2172
inventory_iterator = self._walkdirs(prefix)
2173
disk_iterator = osutils.walkdirs(disk_top, prefix)
2175
current_disk = disk_iterator.next()
2176
disk_finished = False
2178
if not (e.errno == errno.ENOENT or
2179
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2182
disk_finished = True
2184
current_inv = inventory_iterator.next()
2185
inv_finished = False
2186
except StopIteration:
2189
while not inv_finished or not disk_finished:
2190
if not disk_finished:
2191
# strip out .bzr dirs
2192
if current_disk[0][1][top_strip_len:] == '':
2193
# osutils.walkdirs can be made nicer -
2194
# yield the path-from-prefix rather than the pathjoined
2196
bzrdir_loc = bisect_left(current_disk[1], ('.bzr', '.bzr'))
2197
if current_disk[1][bzrdir_loc][0] == '.bzr':
2198
# we dont yield the contents of, or, .bzr itself.
2199
del current_disk[1][bzrdir_loc]
2201
# everything is unknown
2204
# everything is missing
2207
direction = cmp(current_inv[0][0], current_disk[0][0])
2209
# disk is before inventory - unknown
2210
dirblock = [(relpath, basename, kind, stat, None, None) for
2211
relpath, basename, kind, stat, top_path in current_disk[1]]
2212
yield (current_disk[0][0], None), dirblock
2214
current_disk = disk_iterator.next()
2215
except StopIteration:
2216
disk_finished = True
2218
# inventory is before disk - missing.
2219
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2220
for relpath, basename, dkind, stat, fileid, kind in
2222
yield (current_inv[0][0], current_inv[0][1]), dirblock
2224
current_inv = inventory_iterator.next()
2225
except StopIteration:
2228
# versioned present directory
2229
# merge the inventory and disk data together
2231
for relpath, subiterator in itertools.groupby(sorted(
2232
current_inv[1] + current_disk[1], key=operator.itemgetter(0)), operator.itemgetter(1)):
2233
path_elements = list(subiterator)
2234
if len(path_elements) == 2:
2235
inv_row, disk_row = path_elements
2236
# versioned, present file
2237
dirblock.append((inv_row[0],
2238
inv_row[1], disk_row[2],
2239
disk_row[3], inv_row[4],
2241
elif len(path_elements[0]) == 5:
2243
dirblock.append((path_elements[0][0],
2244
path_elements[0][1], path_elements[0][2],
2245
path_elements[0][3], None, None))
2246
elif len(path_elements[0]) == 6:
2247
# versioned, absent file.
2248
dirblock.append((path_elements[0][0],
2249
path_elements[0][1], 'unknown', None,
2250
path_elements[0][4], path_elements[0][5]))
2252
raise NotImplementedError('unreachable code')
2253
yield current_inv[0], dirblock
2255
current_inv = inventory_iterator.next()
2256
except StopIteration:
2259
current_disk = disk_iterator.next()
2260
except StopIteration:
2261
disk_finished = True
2263
def _walkdirs(self, prefix=""):
2264
"""Walk the directories of this tree.
2266
:prefix: is used as the directrory to start with.
2267
returns a generator which yields items in the form:
2268
((curren_directory_path, fileid),
2269
[(file1_path, file1_name, file1_kind, None, file1_id,
2272
_directory = 'directory'
2273
# get the root in the inventory
2274
inv = self.inventory
2275
top_id = inv.path2id(prefix)
2279
pending = [(prefix, '', _directory, None, top_id, None)]
2282
currentdir = pending.pop()
2283
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2284
top_id = currentdir[4]
2286
relroot = currentdir[0] + '/'
2289
# FIXME: stash the node in pending
2291
for name, child in entry.sorted_children():
2292
dirblock.append((relroot + name, name, child.kind, None,
2293
child.file_id, child.kind
2295
yield (currentdir[0], entry.file_id), dirblock
2296
# push the user specified dirs from dirblock
2297
for dir in reversed(dirblock):
2298
if dir[2] == _directory:
2301
@needs_tree_write_lock
2302
def auto_resolve(self):
2303
"""Automatically resolve text conflicts according to contents.
2305
Only text conflicts are auto_resolvable. Files with no conflict markers
2306
are considered 'resolved', because bzr always puts conflict markers
2307
into files that have text conflicts. The corresponding .THIS .BASE and
2308
.OTHER files are deleted, as per 'resolve'.
2309
:return: a tuple of ConflictLists: (un_resolved, resolved).
2311
un_resolved = _mod_conflicts.ConflictList()
2312
resolved = _mod_conflicts.ConflictList()
2313
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2314
for conflict in self.conflicts():
2315
if (conflict.typestring != 'text conflict' or
2316
self.kind(conflict.file_id) != 'file'):
2317
un_resolved.append(conflict)
2319
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2321
for line in my_file:
2322
if conflict_re.search(line):
2323
un_resolved.append(conflict)
252
# TODO: Use '**' to match directories, and other extended
253
# globbing stuff from cvs/rsync.
255
# XXX: fnmatch is actually not quite what we want: it's only
256
# approximately the same as real Unix fnmatch, and doesn't
257
# treat dotfiles correctly and allows * to match /.
258
# Eventually it should be replaced with something more
262
from osutils import splitpath
264
for pat in self.get_ignore_list():
265
if '/' in pat or '\\' in pat:
267
# as a special case, you can put ./ at the start of a
268
# pattern; this is good to match in the top-level
271
if (pat[:2] == './') or (pat[:2] == '.\\'):
2326
resolved.append(conflict)
2329
resolved.remove_files(self)
2330
self.set_conflicts(un_resolved)
2331
return un_resolved, resolved
2333
def _validate(self):
2334
"""Validate internal structures.
2336
This is meant mostly for the test suite. To give it a chance to detect
2337
corruption after actions have occurred. The default implementation is a
2340
:return: None. An exception should be raised if there is an error.
2345
class WorkingTree2(WorkingTree):
2346
"""This is the Format 2 working tree.
2348
This was the first weave based working tree.
2349
- uses os locks for locking.
2350
- uses the branch last-revision.
2353
def __init__(self, *args, **kwargs):
2354
super(WorkingTree2, self).__init__(*args, **kwargs)
2355
# WorkingTree2 has more of a constraint that self._inventory must
2356
# exist. Because this is an older format, we don't mind the overhead
2357
# caused by the extra computation here.
2359
# Newer WorkingTree's should only have self._inventory set when they
2361
if self._inventory is None:
2362
self.read_working_inventory()
2364
def lock_tree_write(self):
2365
"""See WorkingTree.lock_tree_write().
2367
In Format2 WorkingTrees we have a single lock for the branch and tree
2368
so lock_tree_write() degrades to lock_write().
2370
self.branch.lock_write()
2372
return self._control_files.lock_write()
2374
self.branch.unlock()
2378
# we share control files:
2379
if self._control_files._lock_count == 3:
2380
# _inventory_is_modified is always False during a read lock.
2381
if self._inventory_is_modified:
2383
self._write_hashcache_if_dirty()
2385
# reverse order of locking.
2387
return self._control_files.unlock()
2389
self.branch.unlock()
2392
class WorkingTree3(WorkingTree):
2393
"""This is the Format 3 working tree.
2395
This differs from the base WorkingTree by:
2396
- having its own file lock
2397
- having its own last-revision property.
2399
This is new in bzr 0.8
2403
def _last_revision(self):
2404
"""See Mutable.last_revision."""
2406
return osutils.safe_revision_id(
2407
self._control_files.get('last-revision').read())
2408
except errors.NoSuchFile:
275
if fnmatch.fnmatchcase(filename, newpat):
278
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
2411
def _change_last_revision(self, revision_id):
2412
"""See WorkingTree._change_last_revision."""
2413
if revision_id is None or revision_id == NULL_REVISION:
2415
self._control_files._transport.delete('last-revision')
2416
except errors.NoSuchFile:
2420
self._control_files.put_bytes('last-revision', revision_id)
2423
@needs_tree_write_lock
2424
def set_conflicts(self, conflicts):
2425
self._put_rio('conflicts', conflicts.to_stanzas(),
2428
@needs_tree_write_lock
2429
def add_conflicts(self, new_conflicts):
2430
conflict_set = set(self.conflicts())
2431
conflict_set.update(set(list(new_conflicts)))
2432
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2433
key=_mod_conflicts.Conflict.sort_key)))
2436
def conflicts(self):
2438
confile = self._control_files.get('conflicts')
2439
except errors.NoSuchFile:
2440
return _mod_conflicts.ConflictList()
2442
if confile.next() != CONFLICT_HEADER_1 + '\n':
2443
raise errors.ConflictFormatError()
2444
except StopIteration:
2445
raise errors.ConflictFormatError()
2446
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2449
if self._control_files._lock_count == 1:
2450
# _inventory_is_modified is always False during a read lock.
2451
if self._inventory_is_modified:
2453
self._write_hashcache_if_dirty()
2454
# reverse order of locking.
2456
return self._control_files.unlock()
2458
self.branch.unlock()
2461
def get_conflicted_stem(path):
2462
for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
2463
if path.endswith(suffix):
2464
return path[:-len(suffix)]
2467
@deprecated_function(zero_eight)
2468
def is_control_file(filename):
2469
"""See WorkingTree.is_control_filename(filename)."""
2470
## FIXME: better check
2471
filename = normpath(filename)
2472
while filename != '':
2473
head, tail = os.path.split(filename)
2474
## mutter('check %r for control file' % ((head, tail),))
2477
if filename == head:
2483
class WorkingTreeFormat(object):
2484
"""An encapsulation of the initialization and open routines for a format.
2486
Formats provide three things:
2487
* An initialization routine,
2491
Formats are placed in an dict by their format string for reference
2492
during workingtree opening. Its not required that these be instances, they
2493
can be classes themselves with class methods - it simply depends on
2494
whether state is needed for a given format or not.
2496
Once a format is deprecated, just deprecate the initialize and open
2497
methods on the format class. Do not deprecate the object, as the
2498
object will be created every time regardless.
2501
_default_format = None
2502
"""The default format used for new trees."""
2505
"""The known formats."""
2507
requires_rich_root = False
2509
upgrade_recommended = False
2512
def find_format(klass, a_bzrdir):
2513
"""Return the format for the working tree object in a_bzrdir."""
2515
transport = a_bzrdir.get_workingtree_transport(None)
2516
format_string = transport.get("format").read()
2517
return klass._formats[format_string]
2518
except errors.NoSuchFile:
2519
raise errors.NoWorkingTree(base=transport.base)
2521
raise errors.UnknownFormatError(format=format_string)
2523
def __eq__(self, other):
2524
return self.__class__ is other.__class__
2526
def __ne__(self, other):
2527
return not (self == other)
2530
def get_default_format(klass):
2531
"""Return the current default format."""
2532
return klass._default_format
2534
def get_format_string(self):
2535
"""Return the ASCII format string that identifies this format."""
2536
raise NotImplementedError(self.get_format_string)
2538
def get_format_description(self):
2539
"""Return the short description for this format."""
2540
raise NotImplementedError(self.get_format_description)
2542
def is_supported(self):
2543
"""Is this format supported?
2545
Supported formats can be initialized and opened.
2546
Unsupported formats may not support initialization or committing or
2547
some other features depending on the reason for not being supported.
2552
def register_format(klass, format):
2553
klass._formats[format.get_format_string()] = format
2556
def set_default_format(klass, format):
2557
klass._default_format = format
2560
def unregister_format(klass, format):
2561
assert klass._formats[format.get_format_string()] is format
2562
del klass._formats[format.get_format_string()]
2565
class WorkingTreeFormat2(WorkingTreeFormat):
2566
"""The second working tree format.
2568
This format modified the hash cache from the format 1 hash cache.
2571
upgrade_recommended = True
2573
def get_format_description(self):
2574
"""See WorkingTreeFormat.get_format_description()."""
2575
return "Working tree format 2"
2577
def stub_initialize_remote(self, control_files):
2578
"""As a special workaround create critical control files for a remote working tree
2580
This ensures that it can later be updated and dealt with locally,
2581
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2582
no working tree. (See bug #43064).
2586
xml5.serializer_v5.write_inventory(inv, sio)
2588
control_files.put('inventory', sio)
2590
control_files.put_bytes('pending-merges', '')
2593
def initialize(self, a_bzrdir, revision_id=None):
2594
"""See WorkingTreeFormat.initialize()."""
2595
if not isinstance(a_bzrdir.transport, LocalTransport):
2596
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2597
branch = a_bzrdir.open_branch()
2598
if revision_id is not None:
2599
revision_id = osutils.safe_revision_id(revision_id)
2602
revision_history = branch.revision_history()
2604
position = revision_history.index(revision_id)
2606
raise errors.NoSuchRevision(branch, revision_id)
2607
branch.set_revision_history(revision_history[:position + 1])
2610
revision = branch.last_revision()
2612
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2618
basis_tree = branch.repository.revision_tree(revision)
2619
if basis_tree.inventory.root is not None:
2620
wt.set_root_id(basis_tree.inventory.root.file_id)
2621
# set the parent list and cache the basis tree.
2622
wt.set_parent_trees([(revision, basis_tree)])
2623
transform.build_tree(basis_tree, wt)
2627
super(WorkingTreeFormat2, self).__init__()
2628
self._matchingbzrdir = bzrdir.BzrDirFormat6()
2630
def open(self, a_bzrdir, _found=False):
2631
"""Return the WorkingTree object for a_bzrdir
2633
_found is a private parameter, do not use it. It is used to indicate
2634
if format probing has already been done.
2637
# we are being called directly and must probe.
2638
raise NotImplementedError
2639
if not isinstance(a_bzrdir.transport, LocalTransport):
2640
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2641
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2647
class WorkingTreeFormat3(WorkingTreeFormat):
2648
"""The second working tree format updated to record a format marker.
2651
- exists within a metadir controlling .bzr
2652
- includes an explicit version marker for the workingtree control
2653
files, separate from the BzrDir format
2654
- modifies the hash cache format
2656
- uses a LockDir to guard access for writes.
2659
upgrade_recommended = True
2661
def get_format_string(self):
2662
"""See WorkingTreeFormat.get_format_string()."""
2663
return "Bazaar-NG Working Tree format 3"
2665
def get_format_description(self):
2666
"""See WorkingTreeFormat.get_format_description()."""
2667
return "Working tree format 3"
2669
_lock_file_name = 'lock'
2670
_lock_class = LockDir
2672
_tree_class = WorkingTree3
2674
def __get_matchingbzrdir(self):
2675
return bzrdir.BzrDirMetaFormat1()
2677
_matchingbzrdir = property(__get_matchingbzrdir)
2679
def _open_control_files(self, a_bzrdir):
2680
transport = a_bzrdir.get_workingtree_transport(None)
2681
return LockableFiles(transport, self._lock_file_name,
2684
def initialize(self, a_bzrdir, revision_id=None):
2685
"""See WorkingTreeFormat.initialize().
2687
revision_id allows creating a working tree at a different
2688
revision than the branch is at.
2690
if not isinstance(a_bzrdir.transport, LocalTransport):
2691
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2692
transport = a_bzrdir.get_workingtree_transport(self)
2693
control_files = self._open_control_files(a_bzrdir)
2694
control_files.create_lock()
2695
control_files.lock_write()
2696
control_files.put_utf8('format', self.get_format_string())
2697
branch = a_bzrdir.open_branch()
2698
if revision_id is None:
2699
revision_id = branch.last_revision()
2701
revision_id = osutils.safe_revision_id(revision_id)
2702
# WorkingTree3 can handle an inventory which has a unique root id.
2703
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
2704
# those trees. And because there isn't a format bump inbetween, we
2705
# are maintaining compatibility with older clients.
2706
# inv = Inventory(root_id=gen_root_id())
2707
inv = self._initial_inventory()
2708
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
2714
_control_files=control_files)
2715
wt.lock_tree_write()
2717
basis_tree = branch.repository.revision_tree(revision_id)
2718
# only set an explicit root id if there is one to set.
2719
if basis_tree.inventory.root is not None:
2720
wt.set_root_id(basis_tree.inventory.root.file_id)
2721
if revision_id == NULL_REVISION:
2722
wt.set_parent_trees([])
2724
wt.set_parent_trees([(revision_id, basis_tree)])
2725
transform.build_tree(basis_tree, wt)
2727
# Unlock in this order so that the unlock-triggers-flush in
2728
# WorkingTree is given a chance to fire.
2729
control_files.unlock()
2733
def _initial_inventory(self):
2737
super(WorkingTreeFormat3, self).__init__()
2739
def open(self, a_bzrdir, _found=False):
2740
"""Return the WorkingTree object for a_bzrdir
2742
_found is a private parameter, do not use it. It is used to indicate
2743
if format probing has already been done.
2746
# we are being called directly and must probe.
2747
raise NotImplementedError
2748
if not isinstance(a_bzrdir.transport, LocalTransport):
2749
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2750
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
2753
def _open(self, a_bzrdir, control_files):
2754
"""Open the tree itself.
2756
:param a_bzrdir: the dir for the tree.
2757
:param control_files: the control files for the tree.
2759
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
2763
_control_files=control_files)
2766
return self.get_format_string()
2769
__default_format = WorkingTreeFormat4()
2770
WorkingTreeFormat.register_format(__default_format)
2771
WorkingTreeFormat.register_format(WorkingTreeFormat3())
2772
WorkingTreeFormat.set_default_format(__default_format)
2773
# formats which have no format string are not discoverable
2774
# and not independently creatable, so are not registered.
2775
_legacy_formats = [WorkingTreeFormat2(),
2779
class WorkingTreeTestProviderAdapter(object):
2780
"""A tool to generate a suite testing multiple workingtree formats at once.
2782
This is done by copying the test once for each transport and injecting
2783
the transport_server, transport_readonly_server, and workingtree_format
2784
classes into each copy. Each copy is also given a new id() to make it
2788
def __init__(self, transport_server, transport_readonly_server, formats):
2789
self._transport_server = transport_server
2790
self._transport_readonly_server = transport_readonly_server
2791
self._formats = formats
2793
def _clone_test(self, test, bzrdir_format, workingtree_format, variation):
2794
"""Clone test for adaption."""
2795
new_test = deepcopy(test)
2796
new_test.transport_server = self._transport_server
2797
new_test.transport_readonly_server = self._transport_readonly_server
2798
new_test.bzrdir_format = bzrdir_format
2799
new_test.workingtree_format = workingtree_format
2800
def make_new_test_id():
2801
new_id = "%s(%s)" % (test.id(), variation)
2802
return lambda: new_id
2803
new_test.id = make_new_test_id()
2806
def adapt(self, test):
2807
from bzrlib.tests import TestSuite
2808
result = TestSuite()
2809
for workingtree_format, bzrdir_format in self._formats:
2810
new_test = self._clone_test(
2813
workingtree_format, workingtree_format.__class__.__name__)
2814
result.addTest(new_test)