368
61
inv = self._inventory
369
62
for path, ie in inv.iter_entries():
370
if osutils.lexists(self.abspath(path)):
63
if os.path.exists(self.abspath(path)):
373
def all_file_ids(self):
374
"""See Tree.iter_all_file_ids"""
375
return set(self.inventory)
377
67
def __repr__(self):
378
68
return "<%s of %s>" % (self.__class__.__name__,
379
getattr(self, 'basedir', None))
381
73
def abspath(self, filename):
382
return pathjoin(self.basedir, filename)
384
def basis_tree(self):
385
"""Return RevisionTree for the current last revision.
387
If the left most parent is a ghost then the returned tree will be an
388
empty tree - one obtained by calling
389
repository.revision_tree(NULL_REVISION).
392
revision_id = self.get_parent_ids()[0]
394
# no parents, return an empty revision tree.
395
# in the future this should return the tree for
396
# 'empty:' - the implicit root empty tree.
397
return self.branch.repository.revision_tree(
398
_mod_revision.NULL_REVISION)
400
return self.revision_tree(revision_id)
401
except errors.NoSuchRevision:
403
# No cached copy available, retrieve from the repository.
404
# FIXME? RBC 20060403 should we cache the inventory locally
407
return self.branch.repository.revision_tree(revision_id)
408
except (errors.RevisionNotPresent, errors.NoSuchRevision):
409
# the basis tree *may* be a ghost or a low level error may have
410
# occured. If the revision is present, its a problem, if its not
412
if self.branch.repository.has_revision(revision_id):
414
# the basis tree is a ghost so return an empty tree.
415
return self.branch.repository.revision_tree(
416
_mod_revision.NULL_REVISION)
419
self._flush_ignore_list_cache()
421
def relpath(self, path):
422
"""Return the local path portion from a given path.
424
The path may be absolute or relative. If its a relative path it is
425
interpreted relative to the python current working directory.
427
return osutils.relpath(self.basedir, path)
74
return os.path.join(self.basedir, filename)
429
76
def has_filename(self, filename):
430
return osutils.lexists(self.abspath(filename))
432
def get_file(self, file_id, path=None):
433
return self.get_file_with_stat(file_id, path)[0]
435
def get_file_with_stat(self, file_id, path=None, _fstat=os.fstat):
436
"""See MutableTree.get_file_with_stat."""
438
path = self.id2path(file_id)
439
file_obj = self.get_file_byname(path)
440
return (file_obj, _fstat(file_obj.fileno()))
77
return os.path.exists(self.abspath(filename))
79
def get_file(self, file_id):
80
return self.get_file_byname(self.id2path(file_id))
442
82
def get_file_byname(self, filename):
443
83
return file(self.abspath(filename), 'rb')
445
def get_file_lines(self, file_id, path=None):
446
"""See Tree.get_file_lines()"""
447
file = self.get_file(file_id, path)
449
return file.readlines()
454
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
455
"""See Tree.annotate_iter
457
This implementation will use the basis tree implementation if possible.
458
Lines not in the basis are attributed to CURRENT_REVISION
460
If there are pending merges, lines added by those merges will be
461
incorrectly attributed to CURRENT_REVISION (but after committing, the
462
attribution will be correct).
464
basis = self.basis_tree()
467
changes = self.iter_changes(basis, True, [self.id2path(file_id)],
468
require_versioned=True).next()
469
changed_content, kind = changes[2], changes[6]
470
if not changed_content:
471
return basis.annotate_iter(file_id)
475
if kind[0] != 'file':
478
old_lines = list(basis.annotate_iter(file_id))
480
for tree in self.branch.repository.revision_trees(
481
self.get_parent_ids()[1:]):
482
if file_id not in tree:
484
old.append(list(tree.annotate_iter(file_id)))
485
return annotate.reannotate(old, self.get_file(file_id).readlines(),
490
def _get_ancestors(self, default_revision):
491
ancestors = set([default_revision])
492
for parent_id in self.get_parent_ids():
493
ancestors.update(self.branch.repository.get_ancestry(
494
parent_id, topo_sorted=False))
497
def get_parent_ids(self):
498
"""See Tree.get_parent_ids.
500
This implementation reads the pending merges list and last_revision
501
value and uses that to decide what the parents list should be.
503
last_rev = _mod_revision.ensure_null(self._last_revision())
504
if _mod_revision.NULL_REVISION == last_rev:
509
merges_file = self._transport.get('pending-merges')
510
except errors.NoSuchFile:
513
for l in merges_file.readlines():
514
revision_id = l.rstrip('\n')
515
parents.append(revision_id)
519
def get_root_id(self):
520
"""Return the id of this trees root"""
521
return self._inventory.root.file_id
523
85
def _get_store_filename(self, file_id):
524
## XXX: badly named; this is not in the store at all
525
return self.abspath(self.id2path(file_id))
528
def clone(self, to_bzrdir, revision_id=None):
529
"""Duplicate this working tree into to_bzr, including all state.
531
Specifically modified files are kept as modified, but
532
ignored and unknown files are discarded.
534
If you want to make a new line of development, see bzrdir.sprout()
537
If not None, the cloned tree will have its last revision set to
538
revision, and and difference between the source trees last revision
539
and this one merged in.
541
# assumes the target bzr dir format is compatible.
542
result = to_bzrdir.create_workingtree()
543
self.copy_content_into(result, revision_id)
547
def copy_content_into(self, tree, revision_id=None):
548
"""Copy the current content and user files of this tree into tree."""
549
tree.set_root_id(self.get_root_id())
550
if revision_id is None:
551
merge.transform_tree(tree, self)
553
# TODO now merge from tree.last_revision to revision (to preserve
554
# user local changes)
555
merge.transform_tree(tree, self)
556
tree.set_parent_ids([revision_id])
558
def id2abspath(self, file_id):
559
return self.abspath(self.id2path(file_id))
86
## XXX: badly named; this isn't in the store at all
87
return self.abspath(self.id2path(file_id))
561
90
def has_id(self, file_id):
562
91
# files that have been deleted are excluded
564
93
if not inv.has_id(file_id):
566
95
path = inv.id2path(file_id)
567
return osutils.lexists(self.abspath(path))
96
return os.path.exists(self.abspath(path))
569
def has_or_had_id(self, file_id):
570
if file_id == self.inventory.root.file_id:
572
return self.inventory.has_id(file_id)
574
99
__contains__ = has_id
576
102
def get_file_size(self, file_id):
577
"""See Tree.get_file_size"""
579
return os.path.getsize(self.id2abspath(file_id))
581
if e.errno != errno.ENOENT:
587
def get_file_sha1(self, file_id, path=None, stat_value=None):
589
path = self._inventory.id2path(file_id)
590
return self._hashcache.get_sha1(path, stat_value)
592
def get_file_mtime(self, file_id, path=None):
594
path = self.inventory.id2path(file_id)
595
return os.lstat(self.abspath(path)).st_mtime
597
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
598
file_id = self.path2id(path)
599
return self._inventory[file_id].executable
601
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
602
mode = stat_result.st_mode
603
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
605
if not supports_executable():
606
def is_executable(self, file_id, path=None):
607
return self._inventory[file_id].executable
609
_is_executable_from_path_and_stat = \
610
_is_executable_from_path_and_stat_from_basis
612
def is_executable(self, file_id, path=None):
614
path = self.id2path(file_id)
615
mode = os.lstat(self.abspath(path)).st_mode
616
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
618
_is_executable_from_path_and_stat = \
619
_is_executable_from_path_and_stat_from_stat
621
@needs_tree_write_lock
622
def _add(self, files, ids, kinds):
623
"""See MutableTree._add."""
624
# TODO: Re-adding a file that is removed in the working copy
625
# should probably put it back with the previous ID.
626
# the read and write working inventory should not occur in this
627
# function - they should be part of lock_write and unlock.
629
for f, file_id, kind in zip(files, ids, kinds):
631
inv.add_path(f, kind=kind)
633
inv.add_path(f, kind=kind, file_id=file_id)
634
self._inventory_is_modified = True
636
@needs_tree_write_lock
637
def _gather_kinds(self, files, kinds):
638
"""See MutableTree._gather_kinds."""
639
for pos, f in enumerate(files):
640
if kinds[pos] is None:
641
fullpath = normpath(self.abspath(f))
643
kinds[pos] = file_kind(fullpath)
645
if e.errno == errno.ENOENT:
646
raise errors.NoSuchFile(fullpath)
649
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
650
"""Add revision_id as a parent.
652
This is equivalent to retrieving the current list of parent ids
653
and setting the list to its value plus revision_id.
655
:param revision_id: The revision id to add to the parent list. It may
656
be a ghost revision as long as its not the first parent to be added,
657
or the allow_leftmost_as_ghost parameter is set True.
658
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
660
parents = self.get_parent_ids() + [revision_id]
661
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
662
or allow_leftmost_as_ghost)
664
@needs_tree_write_lock
665
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
666
"""Add revision_id, tree tuple as a parent.
668
This is equivalent to retrieving the current list of parent trees
669
and setting the list to its value plus parent_tuple. See also
670
add_parent_tree_id - if you only have a parent id available it will be
671
simpler to use that api. If you have the parent already available, using
672
this api is preferred.
674
:param parent_tuple: The (revision id, tree) to add to the parent list.
675
If the revision_id is a ghost, pass None for the tree.
676
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
678
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
679
if len(parent_ids) > 1:
680
# the leftmost may have already been a ghost, preserve that if it
682
allow_leftmost_as_ghost = True
683
self.set_parent_ids(parent_ids,
684
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
686
@needs_tree_write_lock
687
def add_pending_merge(self, *revision_ids):
688
# TODO: Perhaps should check at this point that the
689
# history of the revision is actually present?
690
parents = self.get_parent_ids()
692
for rev_id in revision_ids:
693
if rev_id in parents:
695
parents.append(rev_id)
698
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
700
def path_content_summary(self, path, _lstat=os.lstat,
701
_mapper=osutils.file_kind_from_stat_mode):
702
"""See Tree.path_content_summary."""
703
abspath = self.abspath(path)
705
stat_result = _lstat(abspath)
707
if getattr(e, 'errno', None) == errno.ENOENT:
709
return ('missing', None, None, None)
710
# propagate other errors
712
kind = _mapper(stat_result.st_mode)
714
size = stat_result.st_size
715
# try for a stat cache lookup
716
executable = self._is_executable_from_path_and_stat(path, stat_result)
717
return (kind, size, executable, self._sha_from_stat(
719
elif kind == 'directory':
720
# perhaps it looks like a plain directory, but it's really a
722
if self._directory_is_tree_reference(path):
723
kind = 'tree-reference'
724
return kind, None, None, None
725
elif kind == 'symlink':
726
return ('symlink', None, None, os.readlink(abspath.encode(osutils._fs_enc)))
728
return (kind, None, None, None)
730
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
731
"""Common ghost checking functionality from set_parent_*.
733
This checks that the left hand-parent exists if there are any
736
if len(revision_ids) > 0:
737
leftmost_id = revision_ids[0]
738
if (not allow_leftmost_as_ghost and not
739
self.branch.repository.has_revision(leftmost_id)):
740
raise errors.GhostRevisionUnusableHere(leftmost_id)
742
def _set_merges_from_parent_ids(self, parent_ids):
743
merges = parent_ids[1:]
744
self._transport.put_bytes('pending-merges', '\n'.join(merges),
745
mode=self._control_files._file_mode)
747
def _filter_parent_ids_by_ancestry(self, revision_ids):
748
"""Check that all merged revisions are proper 'heads'.
750
This will always return the first revision_id, and any merged revisions
753
if len(revision_ids) == 0:
755
graph = self.branch.repository.get_graph()
756
heads = graph.heads(revision_ids)
757
new_revision_ids = revision_ids[:1]
758
for revision_id in revision_ids[1:]:
759
if revision_id in heads and revision_id not in new_revision_ids:
760
new_revision_ids.append(revision_id)
761
if new_revision_ids != revision_ids:
762
trace.mutter('requested to set revision_ids = %s,'
763
' but filtered to %s', revision_ids, new_revision_ids)
764
return new_revision_ids
766
@needs_tree_write_lock
767
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
768
"""Set the parent ids to revision_ids.
770
See also set_parent_trees. This api will try to retrieve the tree data
771
for each element of revision_ids from the trees repository. If you have
772
tree data already available, it is more efficient to use
773
set_parent_trees rather than set_parent_ids. set_parent_ids is however
774
an easier API to use.
776
:param revision_ids: The revision_ids to set as the parent ids of this
777
working tree. Any of these may be ghosts.
779
self._check_parents_for_ghosts(revision_ids,
780
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
781
for revision_id in revision_ids:
782
_mod_revision.check_not_reserved_id(revision_id)
784
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
786
if len(revision_ids) > 0:
787
self.set_last_revision(revision_ids[0])
789
self.set_last_revision(_mod_revision.NULL_REVISION)
791
self._set_merges_from_parent_ids(revision_ids)
793
@needs_tree_write_lock
794
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
795
"""See MutableTree.set_parent_trees."""
796
parent_ids = [rev for (rev, tree) in parents_list]
797
for revision_id in parent_ids:
798
_mod_revision.check_not_reserved_id(revision_id)
800
self._check_parents_for_ghosts(parent_ids,
801
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
803
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
805
if len(parent_ids) == 0:
806
leftmost_parent_id = _mod_revision.NULL_REVISION
807
leftmost_parent_tree = None
809
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
811
if self._change_last_revision(leftmost_parent_id):
812
if leftmost_parent_tree is None:
813
# If we don't have a tree, fall back to reading the
814
# parent tree from the repository.
815
self._cache_basis_inventory(leftmost_parent_id)
817
inv = leftmost_parent_tree.inventory
818
xml = self._create_basis_xml_from_inventory(
819
leftmost_parent_id, inv)
820
self._write_basis_inventory(xml)
821
self._set_merges_from_parent_ids(parent_ids)
823
@needs_tree_write_lock
824
def set_pending_merges(self, rev_list):
825
parents = self.get_parent_ids()
826
leftmost = parents[:1]
827
new_parents = leftmost + rev_list
828
self.set_parent_ids(new_parents)
830
@needs_tree_write_lock
831
def set_merge_modified(self, modified_hashes):
833
for file_id, hash in modified_hashes.iteritems():
834
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
835
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
837
def _sha_from_stat(self, path, stat_result):
838
"""Get a sha digest from the tree's stat cache.
840
The default implementation assumes no stat cache is present.
842
:param path: The path.
843
:param stat_result: The stat result being looked up.
847
def _put_rio(self, filename, stanzas, header):
848
self._must_be_locked()
849
my_file = rio_file(stanzas, header)
850
self._transport.put_file(filename, my_file,
851
mode=self._control_files._file_mode)
853
@needs_write_lock # because merge pulls data into the branch.
854
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
856
"""Merge from a branch into this working tree.
858
:param branch: The branch to merge from.
859
:param to_revision: If non-None, the merge will merge to to_revision,
860
but not beyond it. to_revision does not need to be in the history
861
of the branch when it is supplied. If None, to_revision defaults to
862
branch.last_revision().
864
from bzrlib.merge import Merger, Merge3Merger
865
pb = bzrlib.ui.ui_factory.nested_progress_bar()
867
merger = Merger(self.branch, this_tree=self, pb=pb)
868
merger.pp = ProgressPhase("Merge phase", 5, pb)
869
merger.pp.next_phase()
870
# check that there are no
872
merger.check_basis(check_clean=True, require_commits=False)
873
if to_revision is None:
874
to_revision = _mod_revision.ensure_null(branch.last_revision())
875
merger.other_rev_id = to_revision
876
if _mod_revision.is_null(merger.other_rev_id):
877
raise errors.NoCommits(branch)
878
self.branch.fetch(branch, last_revision=merger.other_rev_id)
879
merger.other_basis = merger.other_rev_id
880
merger.other_tree = self.branch.repository.revision_tree(
882
merger.other_branch = branch
883
merger.pp.next_phase()
884
if from_revision is None:
887
merger.set_base_revision(from_revision, branch)
888
if merger.base_rev_id == merger.other_rev_id:
889
raise errors.PointlessMerge
890
merger.backup_files = False
891
if merge_type is None:
892
merger.merge_type = Merge3Merger
894
merger.merge_type = merge_type
895
merger.set_interesting_files(None)
896
merger.show_base = False
897
merger.reprocess = False
898
conflicts = merger.do_merge()
905
def merge_modified(self):
906
"""Return a dictionary of files modified by a merge.
908
The list is initialized by WorkingTree.set_merge_modified, which is
909
typically called after we make some automatic updates to the tree
912
This returns a map of file_id->sha1, containing only files which are
913
still in the working inventory and have that text hash.
916
hashfile = self._transport.get('merge-hashes')
917
except errors.NoSuchFile:
922
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
923
raise errors.MergeModifiedFormatError()
924
except StopIteration:
925
raise errors.MergeModifiedFormatError()
926
for s in RioReader(hashfile):
927
# RioReader reads in Unicode, so convert file_ids back to utf8
928
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
929
if file_id not in self.inventory:
931
text_hash = s.get("hash")
932
if text_hash == self.get_file_sha1(file_id):
933
merge_hashes[file_id] = text_hash
939
def mkdir(self, path, file_id=None):
940
"""See MutableTree.mkdir()."""
942
file_id = generate_ids.gen_file_id(os.path.basename(path))
943
os.mkdir(self.abspath(path))
944
self.add(path, file_id, 'directory')
947
def get_symlink_target(self, file_id):
948
return os.readlink(self.id2abspath(file_id).encode(osutils._fs_enc))
951
def subsume(self, other_tree):
952
def add_children(inventory, entry):
953
for child_entry in entry.children.values():
954
inventory._byid[child_entry.file_id] = child_entry
955
if child_entry.kind == 'directory':
956
add_children(inventory, child_entry)
957
if other_tree.get_root_id() == self.get_root_id():
958
raise errors.BadSubsumeSource(self, other_tree,
959
'Trees have the same root')
961
other_tree_path = self.relpath(other_tree.basedir)
962
except errors.PathNotChild:
963
raise errors.BadSubsumeSource(self, other_tree,
964
'Tree is not contained by the other')
965
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
966
if new_root_parent is None:
967
raise errors.BadSubsumeSource(self, other_tree,
968
'Parent directory is not versioned.')
969
# We need to ensure that the result of a fetch will have a
970
# versionedfile for the other_tree root, and only fetching into
971
# RepositoryKnit2 guarantees that.
972
if not self.branch.repository.supports_rich_root():
973
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
974
other_tree.lock_tree_write()
976
new_parents = other_tree.get_parent_ids()
977
other_root = other_tree.inventory.root
978
other_root.parent_id = new_root_parent
979
other_root.name = osutils.basename(other_tree_path)
980
self.inventory.add(other_root)
981
add_children(self.inventory, other_root)
982
self._write_inventory(self.inventory)
983
# normally we don't want to fetch whole repositories, but i think
984
# here we really do want to consolidate the whole thing.
985
for parent_id in other_tree.get_parent_ids():
986
self.branch.fetch(other_tree.branch, parent_id)
987
self.add_parent_tree_id(parent_id)
990
other_tree.bzrdir.retire_bzrdir()
992
def _setup_directory_is_tree_reference(self):
993
if self._branch.repository._format.supports_tree_reference:
994
self._directory_is_tree_reference = \
995
self._directory_may_be_tree_reference
997
self._directory_is_tree_reference = \
998
self._directory_is_never_tree_reference
1000
def _directory_is_never_tree_reference(self, relpath):
1003
def _directory_may_be_tree_reference(self, relpath):
1004
# as a special case, if a directory contains control files then
1005
# it's a tree reference, except that the root of the tree is not
1006
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1007
# TODO: We could ask all the control formats whether they
1008
# recognize this directory, but at the moment there's no cheap api
1009
# to do that. Since we probably can only nest bzr checkouts and
1010
# they always use this name it's ok for now. -- mbp 20060306
1012
# FIXME: There is an unhandled case here of a subdirectory
1013
# containing .bzr but not a branch; that will probably blow up
1014
# when you try to commit it. It might happen if there is a
1015
# checkout in a subdirectory. This can be avoided by not adding
1018
@needs_tree_write_lock
1019
def extract(self, file_id, format=None):
1020
"""Extract a subtree from this tree.
1022
A new branch will be created, relative to the path for this tree.
1026
segments = osutils.splitpath(path)
1027
transport = self.branch.bzrdir.root_transport
1028
for name in segments:
1029
transport = transport.clone(name)
1030
transport.ensure_base()
1033
sub_path = self.id2path(file_id)
1034
branch_transport = mkdirs(sub_path)
1036
format = self.bzrdir.cloning_metadir()
1037
branch_transport.ensure_base()
1038
branch_bzrdir = format.initialize_on_transport(branch_transport)
1040
repo = branch_bzrdir.find_repository()
1041
except errors.NoRepositoryPresent:
1042
repo = branch_bzrdir.create_repository()
1043
if not repo.supports_rich_root():
1044
raise errors.RootNotRich()
1045
new_branch = branch_bzrdir.create_branch()
1046
new_branch.pull(self.branch)
1047
for parent_id in self.get_parent_ids():
1048
new_branch.fetch(self.branch, parent_id)
1049
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1050
if tree_transport.base != branch_transport.base:
1051
tree_bzrdir = format.initialize_on_transport(tree_transport)
1052
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1054
tree_bzrdir = branch_bzrdir
1055
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
1056
wt.set_parent_ids(self.get_parent_ids())
1057
my_inv = self.inventory
1058
child_inv = Inventory(root_id=None)
1059
new_root = my_inv[file_id]
1060
my_inv.remove_recursive_id(file_id)
1061
new_root.parent_id = None
1062
child_inv.add(new_root)
1063
self._write_inventory(my_inv)
1064
wt._write_inventory(child_inv)
1067
def _serialize(self, inventory, out_file):
1068
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1071
def _deserialize(selt, in_file):
1072
return xml5.serializer_v5.read_inventory(in_file)
1075
"""Write the in memory inventory to disk."""
1076
# TODO: Maybe this should only write on dirty ?
1077
if self._control_files._lock_mode != 'w':
1078
raise errors.NotWriteLocked(self)
1080
self._serialize(self._inventory, sio)
1082
self._transport.put_file('inventory', sio,
1083
mode=self._control_files._file_mode)
1084
self._inventory_is_modified = False
1086
def _kind(self, relpath):
1087
return osutils.file_kind(self.abspath(relpath))
1089
def list_files(self, include_root=False):
1090
"""Recursively list all files as (path, class, kind, id, entry).
103
# is this still called?
104
raise NotImplementedError()
107
def get_file_sha1(self, file_id):
108
path = self._inventory.id2path(file_id)
109
return self._hashcache.get_sha1(path)
112
def file_class(self, filename):
113
if self.path2id(filename):
115
elif self.is_ignored(filename):
121
def list_files(self):
122
"""Recursively list all files as (path, class, kind, id).
1092
124
Lists, but does not descend into unversioned directories.
1097
129
Skips the control directory.
1099
# list_files is an iterator, so @needs_read_lock doesn't work properly
1100
# with it. So callers should be careful to always read_lock the tree.
1101
if not self.is_locked():
1102
raise errors.ObjectNotLocked(self)
1104
inv = self.inventory
1105
if include_root is True:
1106
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1107
# Convert these into local objects to save lookup times
1108
pathjoin = osutils.pathjoin
1109
file_kind = self._kind
1111
# transport.base ends in a slash, we want the piece
1112
# between the last two slashes
1113
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1115
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1117
# directory file_id, relative path, absolute path, reverse sorted children
1118
children = os.listdir(self.basedir)
1120
# jam 20060527 The kernel sized tree seems equivalent whether we
1121
# use a deque and popleft to keep them sorted, or if we use a plain
1122
# list and just reverse() them.
1123
children = collections.deque(children)
1124
stack = [(inv.root.file_id, u'', self.basedir, children)]
1126
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1129
f = children.popleft()
131
from osutils import appendpath, file_kind
134
inv = self._inventory
136
def descend(from_dir_relpath, from_dir_id, dp):
1130
140
## TODO: If we find a subdirectory with its own .bzr
1131
141
## directory, then that is a separate tree and we
1132
142
## should exclude it.
1134
# the bzrdir for this tree
1135
if transport_base_dir == f:
143
if bzrlib.BZRDIR == f:
1138
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1139
# and 'f' doesn't begin with one, we can do a string op, rather
1140
# than the checks of pathjoin(), all relative paths will have an extra slash
1142
fp = from_dir_relpath + '/' + f
147
fp = appendpath(from_dir_relpath, f)
1145
fap = from_dir_abspath + '/' + f
150
fap = appendpath(dp, f)
1147
152
f_ie = inv.get_child(from_dir_id, f)
1150
elif self.is_ignored(fp[1:]):
155
elif self.is_ignored(fp):
1153
# we may not have found this file, because of a unicode issue
1154
f_norm, can_access = osutils.normalized_filename(f)
1155
if f == f_norm or not can_access:
1156
# No change, so treat this file normally
1159
# this file can be accessed by a normalized path
1160
# check again if it is versioned
1161
# these lines are repeated here for performance
1163
fp = from_dir_relpath + '/' + f
1164
fap = from_dir_abspath + '/' + f
1165
f_ie = inv.get_child(from_dir_id, f)
1168
elif self.is_ignored(fp[1:]):
1173
160
fk = file_kind(fap)
1175
# make a last minute entry
1177
yield fp[1:], c, fk, f_ie.file_id, f_ie
1180
yield fp[1:], c, fk, None, fk_entries[fk]()
1182
yield fp[1:], c, fk, None, TreeEntry()
164
raise BzrCheckError("file %r entered as kind %r id %r, "
166
% (fap, f_ie.kind, f_ie.file_id, fk))
168
yield fp, c, fk, (f_ie and f_ie.file_id)
1185
170
if fk != 'directory':
1188
# But do this child first
1189
new_children = os.listdir(fap)
1191
new_children = collections.deque(new_children)
1192
stack.append((f_ie.file_id, fp, fap, new_children))
1193
# Break out of inner loop,
1194
# so that we start outer loop with child
1197
# if we finished all children, pop it off the stack
1200
@needs_tree_write_lock
1201
def move(self, from_paths, to_dir=None, after=False, **kwargs):
1204
to_dir must exist in the inventory.
1206
If to_dir exists and is a directory, the files are moved into
1207
it, keeping their old names.
1209
Note that to_dir is only the last component of the new name;
1210
this doesn't change the directory.
1212
For each entry in from_paths the move mode will be determined
1215
The first mode moves the file in the filesystem and updates the
1216
inventory. The second mode only updates the inventory without
1217
touching the file on the filesystem. This is the new mode introduced
1220
move uses the second mode if 'after == True' and the target is not
1221
versioned but present in the working tree.
1223
move uses the second mode if 'after == False' and the source is
1224
versioned but no longer in the working tree, and the target is not
1225
versioned but present in the working tree.
1227
move uses the first mode if 'after == False' and the source is
1228
versioned and present in the working tree, and the target is not
1229
versioned and not present in the working tree.
1231
Everything else results in an error.
1233
This returns a list of (from_path, to_path) pairs for each
1234
entry that is moved.
1239
# check for deprecated use of signature
1241
to_dir = kwargs.get('to_name', None)
1243
raise TypeError('You must supply a target directory')
1245
symbol_versioning.warn('The parameter to_name was deprecated'
1246
' in version 0.13. Use to_dir instead',
1249
# check destination directory
1250
if isinstance(from_paths, basestring):
1252
inv = self.inventory
1253
to_abs = self.abspath(to_dir)
1254
if not isdir(to_abs):
1255
raise errors.BzrMoveFailedError('',to_dir,
1256
errors.NotADirectory(to_abs))
1257
if not self.has_filename(to_dir):
1258
raise errors.BzrMoveFailedError('',to_dir,
1259
errors.NotInWorkingDirectory(to_dir))
1260
to_dir_id = inv.path2id(to_dir)
1261
if to_dir_id is None:
1262
raise errors.BzrMoveFailedError('',to_dir,
1263
errors.NotVersionedError(path=str(to_dir)))
1265
to_dir_ie = inv[to_dir_id]
1266
if to_dir_ie.kind != 'directory':
1267
raise errors.BzrMoveFailedError('',to_dir,
1268
errors.NotADirectory(to_abs))
1270
# create rename entries and tuples
1271
for from_rel in from_paths:
1272
from_tail = splitpath(from_rel)[-1]
1273
from_id = inv.path2id(from_rel)
1275
raise errors.BzrMoveFailedError(from_rel,to_dir,
1276
errors.NotVersionedError(path=str(from_rel)))
1278
from_entry = inv[from_id]
1279
from_parent_id = from_entry.parent_id
1280
to_rel = pathjoin(to_dir, from_tail)
1281
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1283
from_tail=from_tail,
1284
from_parent_id=from_parent_id,
1285
to_rel=to_rel, to_tail=from_tail,
1286
to_parent_id=to_dir_id)
1287
rename_entries.append(rename_entry)
1288
rename_tuples.append((from_rel, to_rel))
1290
# determine which move mode to use. checks also for movability
1291
rename_entries = self._determine_mv_mode(rename_entries, after)
1293
original_modified = self._inventory_is_modified
1296
self._inventory_is_modified = True
1297
self._move(rename_entries)
1299
# restore the inventory on error
1300
self._inventory_is_modified = original_modified
1302
self._write_inventory(inv)
1303
return rename_tuples
1305
def _determine_mv_mode(self, rename_entries, after=False):
1306
"""Determines for each from-to pair if both inventory and working tree
1307
or only the inventory has to be changed.
1309
Also does basic plausability tests.
1311
inv = self.inventory
1313
for rename_entry in rename_entries:
1314
# store to local variables for easier reference
1315
from_rel = rename_entry.from_rel
1316
from_id = rename_entry.from_id
1317
to_rel = rename_entry.to_rel
1318
to_id = inv.path2id(to_rel)
1319
only_change_inv = False
1321
# check the inventory for source and destination
1323
raise errors.BzrMoveFailedError(from_rel,to_rel,
1324
errors.NotVersionedError(path=str(from_rel)))
1325
if to_id is not None:
1326
raise errors.BzrMoveFailedError(from_rel,to_rel,
1327
errors.AlreadyVersionedError(path=str(to_rel)))
1329
# try to determine the mode for rename (only change inv or change
1330
# inv and file system)
1332
if not self.has_filename(to_rel):
1333
raise errors.BzrMoveFailedError(from_id,to_rel,
1334
errors.NoSuchFile(path=str(to_rel),
1335
extra="New file has not been created yet"))
1336
only_change_inv = True
1337
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1338
only_change_inv = True
1339
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1340
only_change_inv = False
1341
elif (not self.case_sensitive
1342
and from_rel.lower() == to_rel.lower()
1343
and self.has_filename(from_rel)):
1344
only_change_inv = False
1346
# something is wrong, so lets determine what exactly
1347
if not self.has_filename(from_rel) and \
1348
not self.has_filename(to_rel):
1349
raise errors.BzrRenameFailedError(from_rel,to_rel,
1350
errors.PathsDoNotExist(paths=(str(from_rel),
1353
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1354
rename_entry.only_change_inv = only_change_inv
1355
return rename_entries
1357
def _move(self, rename_entries):
1358
"""Moves a list of files.
1360
Depending on the value of the flag 'only_change_inv', the
1361
file will be moved on the file system or not.
1363
inv = self.inventory
1366
for entry in rename_entries:
1368
self._move_entry(entry)
1370
self._rollback_move(moved)
1374
def _rollback_move(self, moved):
1375
"""Try to rollback a previous move in case of an filesystem error."""
1376
inv = self.inventory
1379
self._move_entry(_RenameEntry(entry.to_rel, entry.from_id,
1380
entry.to_tail, entry.to_parent_id, entry.from_rel,
1381
entry.from_tail, entry.from_parent_id,
1382
entry.only_change_inv))
1383
except errors.BzrMoveFailedError, e:
1384
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1385
" The working tree is in an inconsistent state."
1386
" Please consider doing a 'bzr revert'."
1387
" Error message is: %s" % e)
1389
def _move_entry(self, entry):
1390
inv = self.inventory
1391
from_rel_abs = self.abspath(entry.from_rel)
1392
to_rel_abs = self.abspath(entry.to_rel)
1393
if from_rel_abs == to_rel_abs:
1394
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1395
"Source and target are identical.")
1397
if not entry.only_change_inv:
1399
osutils.rename(from_rel_abs, to_rel_abs)
1401
raise errors.BzrMoveFailedError(entry.from_rel,
1403
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1405
@needs_tree_write_lock
1406
def rename_one(self, from_rel, to_rel, after=False):
1409
This can change the directory or the filename or both.
1411
rename_one has several 'modes' to work. First, it can rename a physical
1412
file and change the file_id. That is the normal mode. Second, it can
1413
only change the file_id without touching any physical file. This is
1414
the new mode introduced in version 0.15.
1416
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1417
versioned but present in the working tree.
1419
rename_one uses the second mode if 'after == False' and 'from_rel' is
1420
versioned but no longer in the working tree, and 'to_rel' is not
1421
versioned but present in the working tree.
1423
rename_one uses the first mode if 'after == False' and 'from_rel' is
1424
versioned and present in the working tree, and 'to_rel' is not
1425
versioned and not present in the working tree.
1427
Everything else results in an error.
1429
inv = self.inventory
1432
# create rename entries and tuples
1433
from_tail = splitpath(from_rel)[-1]
1434
from_id = inv.path2id(from_rel)
1436
raise errors.BzrRenameFailedError(from_rel,to_rel,
1437
errors.NotVersionedError(path=str(from_rel)))
1438
from_entry = inv[from_id]
1439
from_parent_id = from_entry.parent_id
1440
to_dir, to_tail = os.path.split(to_rel)
1441
to_dir_id = inv.path2id(to_dir)
1442
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1444
from_tail=from_tail,
1445
from_parent_id=from_parent_id,
1446
to_rel=to_rel, to_tail=to_tail,
1447
to_parent_id=to_dir_id)
1448
rename_entries.append(rename_entry)
1450
# determine which move mode to use. checks also for movability
1451
rename_entries = self._determine_mv_mode(rename_entries, after)
1453
# check if the target changed directory and if the target directory is
1455
if to_dir_id is None:
1456
raise errors.BzrMoveFailedError(from_rel,to_rel,
1457
errors.NotVersionedError(path=str(to_dir)))
1459
# all checks done. now we can continue with our actual work
1460
mutter('rename_one:\n'
1465
' to_dir_id {%s}\n',
1466
from_id, from_rel, to_rel, to_dir, to_dir_id)
1468
self._move(rename_entries)
1469
self._write_inventory(inv)
1471
class _RenameEntry(object):
1472
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1473
to_rel, to_tail, to_parent_id, only_change_inv=False):
1474
self.from_rel = from_rel
1475
self.from_id = from_id
1476
self.from_tail = from_tail
1477
self.from_parent_id = from_parent_id
1478
self.to_rel = to_rel
1479
self.to_tail = to_tail
1480
self.to_parent_id = to_parent_id
1481
self.only_change_inv = only_change_inv
174
# don't descend unversioned directories
177
for ff in descend(fp, f_ie.file_id, fap):
180
for f in descend('', inv.root.file_id, self.basedir):
1484
185
def unknowns(self):
1485
"""Return all unknown files.
1487
These are files in the working directory that are not versioned or
1488
control files or ignored.
1490
# force the extras method to be fully executed before returning, to
1491
# prevent race conditions with the lock
1493
[subp for subp in self.extras() if not self.is_ignored(subp)])
1495
@needs_tree_write_lock
1496
def unversion(self, file_ids):
1497
"""Remove the file ids in file_ids from the current versioned set.
1499
When a file_id is unversioned, all of its children are automatically
1502
:param file_ids: The file ids to stop versioning.
1503
:raises: NoSuchId if any fileid is not currently versioned.
1505
for file_id in file_ids:
1506
if self._inventory.has_id(file_id):
1507
self._inventory.remove_recursive_id(file_id)
1509
raise errors.NoSuchId(self, file_id)
1511
# in the future this should just set a dirty bit to wait for the
1512
# final unlock. However, until all methods of workingtree start
1513
# with the current in -memory inventory rather than triggering
1514
# a read, it is more complex - we need to teach read_inventory
1515
# to know when to read, and when to not read first... and possibly
1516
# to save first when the in memory one may be corrupted.
1517
# so for now, we just only write it if it is indeed dirty.
1519
self._write_inventory(self._inventory)
1521
def _iter_conflicts(self):
1523
for info in self.list_files():
1525
stem = get_conflicted_stem(path)
1528
if stem not in conflicted:
1529
conflicted.add(stem)
1533
def pull(self, source, overwrite=False, stop_revision=None,
1534
change_reporter=None, possible_transports=None):
1535
top_pb = bzrlib.ui.ui_factory.nested_progress_bar()
1538
pp = ProgressPhase("Pull phase", 2, top_pb)
1540
old_revision_info = self.branch.last_revision_info()
1541
basis_tree = self.basis_tree()
1542
count = self.branch.pull(source, overwrite, stop_revision,
1543
possible_transports=possible_transports)
1544
new_revision_info = self.branch.last_revision_info()
1545
if new_revision_info != old_revision_info:
1547
repository = self.branch.repository
1548
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1549
basis_tree.lock_read()
1551
new_basis_tree = self.branch.basis_tree()
1558
change_reporter=change_reporter)
1559
if (basis_tree.inventory.root is None and
1560
new_basis_tree.inventory.root is not None):
1561
self.set_root_id(new_basis_tree.get_root_id())
1565
# TODO - dedup parents list with things merged by pull ?
1566
# reuse the revisiontree we merged against to set the new
1568
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1569
# we have to pull the merge trees out again, because
1570
# merge_inner has set the ids. - this corner is not yet
1571
# layered well enough to prevent double handling.
1572
# XXX TODO: Fix the double handling: telling the tree about
1573
# the already known parent data is wasteful.
1574
merges = self.get_parent_ids()[1:]
1575
parent_trees.extend([
1576
(parent, repository.revision_tree(parent)) for
1578
self.set_parent_trees(parent_trees)
1585
def put_file_bytes_non_atomic(self, file_id, bytes):
1586
"""See MutableTree.put_file_bytes_non_atomic."""
1587
stream = file(self.id2abspath(file_id), 'wb')
1592
# TODO: update the hashcache here ?
186
for subp in self.extras():
187
if not self.is_ignored(subp):
1594
191
def extras(self):
1595
"""Yield all unversioned files in this WorkingTree.
192
"""Yield all unknown files in this WorkingTree.
1597
If there are any unversioned directories then only the directory is
1598
returned, not all its children. But if there are unversioned files
194
If there are any unknown directories then only the directory is
195
returned, not all its children. But if there are unknown files
1599
196
under a versioned subdirectory, they are returned.
1601
198
Currently returned depth-first, sorted by name within directories.
1602
This is the same order used by 'osutils.walkdirs'.
1604
200
## TODO: Work from given directory downwards
201
from osutils import isdir, appendpath
1605
203
for path, dir_entry in self.inventory.directories():
1606
# mutter("search for unknowns in %r", path)
204
mutter("search for unknowns in %r" % path)
1607
205
dirabs = self.abspath(path)
1608
206
if not isdir(dirabs):
1609
207
# e.g. directory deleted
1675
252
If the file is ignored, returns the pattern which caused it to
1676
253
be ignored, otherwise None. So this can simply be used as a
1677
254
boolean if desired."""
1678
if getattr(self, '_ignoreglobster', None) is None:
1679
self._ignoreglobster = globbing.Globster(self.get_ignore_list())
1680
return self._ignoreglobster.match(filename)
1682
def kind(self, file_id):
1683
return file_kind(self.id2abspath(file_id))
1685
def stored_kind(self, file_id):
1686
"""See Tree.stored_kind"""
1687
return self.inventory[file_id].kind
1689
def _comparison_data(self, entry, path):
1690
abspath = self.abspath(path)
1692
stat_value = os.lstat(abspath)
1694
if getattr(e, 'errno', None) == errno.ENOENT:
1701
mode = stat_value.st_mode
1702
kind = osutils.file_kind_from_stat_mode(mode)
1703
if not supports_executable():
1704
executable = entry is not None and entry.executable
1706
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1707
return kind, executable, stat_value
1709
def _file_size(self, entry, stat_value):
1710
return stat_value.st_size
1712
def last_revision(self):
1713
"""Return the last revision of the branch for this tree.
1715
This format tree does not support a separate marker for last-revision
1716
compared to the branch.
1718
See MutableTree.last_revision
1720
return self._last_revision()
1723
def _last_revision(self):
1724
"""helper for get_parent_ids."""
1725
return _mod_revision.ensure_null(self.branch.last_revision())
1727
def is_locked(self):
1728
return self._control_files.is_locked()
1730
def _must_be_locked(self):
1731
if not self.is_locked():
1732
raise errors.ObjectNotLocked(self)
1734
def lock_read(self):
1735
"""See Branch.lock_read, and WorkingTree.unlock."""
1736
if not self.is_locked():
1738
self.branch.lock_read()
1740
return self._control_files.lock_read()
1742
self.branch.unlock()
1745
def lock_tree_write(self):
1746
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
1747
if not self.is_locked():
1749
self.branch.lock_read()
1751
return self._control_files.lock_write()
1753
self.branch.unlock()
1756
def lock_write(self):
1757
"""See MutableTree.lock_write, and WorkingTree.unlock."""
1758
if not self.is_locked():
1760
self.branch.lock_write()
1762
return self._control_files.lock_write()
1764
self.branch.unlock()
1767
def get_physical_lock_status(self):
1768
return self._control_files.get_physical_lock_status()
1770
def _basis_inventory_name(self):
1771
return 'basis-inventory-cache'
1773
def _reset_data(self):
1774
"""Reset transient data that cannot be revalidated."""
1775
self._inventory_is_modified = False
1776
result = self._deserialize(self._transport.get('inventory'))
1777
self._set_inventory(result, dirty=False)
1779
@needs_tree_write_lock
1780
def set_last_revision(self, new_revision):
1781
"""Change the last revision in the working tree."""
1782
if self._change_last_revision(new_revision):
1783
self._cache_basis_inventory(new_revision)
1785
def _change_last_revision(self, new_revision):
1786
"""Template method part of set_last_revision to perform the change.
1788
This is used to allow WorkingTree3 instances to not affect branch
1789
when their last revision is set.
1791
if _mod_revision.is_null(new_revision):
1792
self.branch.set_revision_history([])
1795
self.branch.generate_revision_history(new_revision)
1796
except errors.NoSuchRevision:
1797
# not present in the repo - dont try to set it deeper than the tip
1798
self.branch.set_revision_history([new_revision])
1801
def _write_basis_inventory(self, xml):
1802
"""Write the basis inventory XML to the basis-inventory file"""
1803
path = self._basis_inventory_name()
1805
self._transport.put_file(path, sio,
1806
mode=self._control_files._file_mode)
1808
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1809
"""Create the text that will be saved in basis-inventory"""
1810
inventory.revision_id = revision_id
1811
return xml7.serializer_v7.write_inventory_to_string(inventory)
1813
def _cache_basis_inventory(self, new_revision):
1814
"""Cache new_revision as the basis inventory."""
1815
# TODO: this should allow the ready-to-use inventory to be passed in,
1816
# as commit already has that ready-to-use [while the format is the
1819
# this double handles the inventory - unpack and repack -
1820
# but is easier to understand. We can/should put a conditional
1821
# in here based on whether the inventory is in the latest format
1822
# - perhaps we should repack all inventories on a repository
1824
# the fast path is to copy the raw xml from the repository. If the
1825
# xml contains 'revision_id="', then we assume the right
1826
# revision_id is set. We must check for this full string, because a
1827
# root node id can legitimately look like 'revision_id' but cannot
1829
xml = self.branch.repository.get_inventory_xml(new_revision)
1830
firstline = xml.split('\n', 1)[0]
1831
if (not 'revision_id="' in firstline or
1832
'format="7"' not in firstline):
1833
inv = self.branch.repository.deserialise_inventory(
1835
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1836
self._write_basis_inventory(xml)
1837
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1840
def read_basis_inventory(self):
1841
"""Read the cached basis inventory."""
1842
path = self._basis_inventory_name()
1843
return self._transport.get_bytes(path)
1846
def read_working_inventory(self):
1847
"""Read the working inventory.
1849
:raises errors.InventoryModified: read_working_inventory will fail
1850
when the current in memory inventory has been modified.
1852
# conceptually this should be an implementation detail of the tree.
1853
# XXX: Deprecate this.
1854
# ElementTree does its own conversion from UTF-8, so open in
1856
if self._inventory_is_modified:
1857
raise errors.InventoryModified(self)
1858
result = self._deserialize(self._transport.get('inventory'))
1859
self._set_inventory(result, dirty=False)
1862
@needs_tree_write_lock
1863
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1865
"""Remove nominated files from the working inventory.
1867
:files: File paths relative to the basedir.
1868
:keep_files: If true, the files will also be kept.
1869
:force: Delete files and directories, even if they are changed and
1870
even if the directories are not empty.
1872
if isinstance(files, basestring):
1878
unknown_nested_files=set()
1880
def recurse_directory_to_add_files(directory):
1881
# Recurse directory and add all files
1882
# so we can check if they have changed.
1883
for parent_info, file_infos in\
1884
self.walkdirs(directory):
1885
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1886
# Is it versioned or ignored?
1887
if self.path2id(relpath) or self.is_ignored(relpath):
1888
# Add nested content for deletion.
1889
new_files.add(relpath)
1891
# Files which are not versioned and not ignored
1892
# should be treated as unknown.
1893
unknown_nested_files.add((relpath, None, kind))
1895
for filename in files:
1896
# Get file name into canonical form.
1897
abspath = self.abspath(filename)
1898
filename = self.relpath(abspath)
1899
if len(filename) > 0:
1900
new_files.add(filename)
1901
recurse_directory_to_add_files(filename)
1903
files = list(new_files)
1906
return # nothing to do
1908
# Sort needed to first handle directory content before the directory
1909
files.sort(reverse=True)
1911
# Bail out if we are going to delete files we shouldn't
1912
if not keep_files and not force:
1913
has_changed_files = len(unknown_nested_files) > 0
1914
if not has_changed_files:
1915
for (file_id, path, content_change, versioned, parent_id, name,
1916
kind, executable) in self.iter_changes(self.basis_tree(),
1917
include_unchanged=True, require_versioned=False,
1918
want_unversioned=True, specific_files=files):
1919
if versioned == (False, False):
1920
# The record is unknown ...
1921
if not self.is_ignored(path[1]):
1922
# ... but not ignored
1923
has_changed_files = True
1925
elif content_change and (kind[1] is not None):
1926
# Versioned and changed, but not deleted
1927
has_changed_files = True
1930
if has_changed_files:
1931
# Make delta show ALL applicable changes in error message.
1932
tree_delta = self.changes_from(self.basis_tree(),
1933
require_versioned=False, want_unversioned=True,
1934
specific_files=files)
1935
for unknown_file in unknown_nested_files:
1936
if unknown_file not in tree_delta.unversioned:
1937
tree_delta.unversioned.extend((unknown_file,))
1938
raise errors.BzrRemoveChangedFilesError(tree_delta)
1940
# Build inv_delta and delete files where applicaple,
1941
# do this before any modifications to inventory.
1943
fid = self.path2id(f)
1946
message = "%s is not versioned." % (f,)
1949
# having removed it, it must be either ignored or unknown
1950
if self.is_ignored(f):
1954
textui.show_status(new_status, self.kind(fid), f,
1957
inv_delta.append((f, None, fid, None))
1958
message = "removed %s" % (f,)
1961
abs_path = self.abspath(f)
1962
if osutils.lexists(abs_path):
1963
if (osutils.isdir(abs_path) and
1964
len(os.listdir(abs_path)) > 0):
1966
osutils.rmtree(abs_path)
1968
message = "%s is not an empty directory "\
1969
"and won't be deleted." % (f,)
1971
osutils.delete_any(abs_path)
1972
message = "deleted %s" % (f,)
1973
elif message is not None:
1974
# Only care if we haven't done anything yet.
1975
message = "%s does not exist." % (f,)
1977
# Print only one message (if any) per file.
1978
if message is not None:
1980
self.apply_inventory_delta(inv_delta)
1982
@needs_tree_write_lock
1983
def revert(self, filenames=None, old_tree=None, backups=True,
1984
pb=DummyProgress(), report_changes=False):
1985
from bzrlib.conflicts import resolve
1988
symbol_versioning.warn('Using [] to revert all files is deprecated'
1989
' as of bzr 0.91. Please use None (the default) instead.',
1990
DeprecationWarning, stacklevel=2)
1991
if old_tree is None:
1992
basis_tree = self.basis_tree()
1993
basis_tree.lock_read()
1994
old_tree = basis_tree
1998
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2000
if filenames is None and len(self.get_parent_ids()) > 1:
2002
last_revision = self.last_revision()
2003
if last_revision != NULL_REVISION:
2004
if basis_tree is None:
2005
basis_tree = self.basis_tree()
2006
basis_tree.lock_read()
2007
parent_trees.append((last_revision, basis_tree))
2008
self.set_parent_trees(parent_trees)
2011
resolve(self, filenames, ignore_misses=True, recursive=True)
2013
if basis_tree is not None:
2017
def revision_tree(self, revision_id):
2018
"""See Tree.revision_tree.
2020
WorkingTree can supply revision_trees for the basis revision only
2021
because there is only one cached inventory in the bzr directory.
2023
if revision_id == self.last_revision():
2025
xml = self.read_basis_inventory()
2026
except errors.NoSuchFile:
2030
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2031
# dont use the repository revision_tree api because we want
2032
# to supply the inventory.
2033
if inv.revision_id == revision_id:
2034
return revisiontree.RevisionTree(self.branch.repository,
2036
except errors.BadInventoryFormat:
2038
# raise if there was no inventory, or if we read the wrong inventory.
2039
raise errors.NoSuchRevisionInTree(self, revision_id)
2041
# XXX: This method should be deprecated in favour of taking in a proper
2042
# new Inventory object.
2043
@needs_tree_write_lock
2044
def set_inventory(self, new_inventory_list):
2045
from bzrlib.inventory import (Inventory,
2050
inv = Inventory(self.get_root_id())
2051
for path, file_id, parent, kind in new_inventory_list:
2052
name = os.path.basename(path)
2055
# fixme, there should be a factory function inv,add_??
2056
if kind == 'directory':
2057
inv.add(InventoryDirectory(file_id, name, parent))
2058
elif kind == 'file':
2059
inv.add(InventoryFile(file_id, name, parent))
2060
elif kind == 'symlink':
2061
inv.add(InventoryLink(file_id, name, parent))
2063
raise errors.BzrError("unknown kind %r" % kind)
2064
self._write_inventory(inv)
2066
@needs_tree_write_lock
2067
def set_root_id(self, file_id):
2068
"""Set the root id for this tree."""
2072
'WorkingTree.set_root_id with fileid=None')
2073
file_id = osutils.safe_file_id(file_id)
2074
self._set_root_id(file_id)
2076
def _set_root_id(self, file_id):
2077
"""Set the root id for this tree, in a format specific manner.
2079
:param file_id: The file id to assign to the root. It must not be
2080
present in the current inventory or an error will occur. It must
2081
not be None, but rather a valid file id.
2083
inv = self._inventory
2084
orig_root_id = inv.root.file_id
2085
# TODO: it might be nice to exit early if there was nothing
2086
# to do, saving us from trigger a sync on unlock.
2087
self._inventory_is_modified = True
2088
# we preserve the root inventory entry object, but
2089
# unlinkit from the byid index
2090
del inv._byid[inv.root.file_id]
2091
inv.root.file_id = file_id
2092
# and link it into the index with the new changed id.
2093
inv._byid[inv.root.file_id] = inv.root
2094
# and finally update all children to reference the new id.
2095
# XXX: this should be safe to just look at the root.children
2096
# list, not the WHOLE INVENTORY.
2099
if entry.parent_id == orig_root_id:
2100
entry.parent_id = inv.root.file_id
2103
"""See Branch.unlock.
2105
WorkingTree locking just uses the Branch locking facilities.
2106
This is current because all working trees have an embedded branch
2107
within them. IF in the future, we were to make branch data shareable
2108
between multiple working trees, i.e. via shared storage, then we
2109
would probably want to lock both the local tree, and the branch.
2111
raise NotImplementedError(self.unlock)
2113
def update(self, change_reporter=None, possible_transports=None):
2114
"""Update a working tree along its branch.
2116
This will update the branch if its bound too, which means we have
2117
multiple trees involved:
2119
- The new basis tree of the master.
2120
- The old basis tree of the branch.
2121
- The old basis tree of the working tree.
2122
- The current working tree state.
2124
Pathologically, all three may be different, and non-ancestors of each
2125
other. Conceptually we want to:
2127
- Preserve the wt.basis->wt.state changes
2128
- Transform the wt.basis to the new master basis.
2129
- Apply a merge of the old branch basis to get any 'local' changes from
2131
- Restore the wt.basis->wt.state changes.
2133
There isn't a single operation at the moment to do that, so we:
2134
- Merge current state -> basis tree of the master w.r.t. the old tree
2136
- Do a 'normal' merge of the old branch basis if it is relevant.
2138
if self.branch.get_bound_location() is not None:
2140
update_branch = True
2142
self.lock_tree_write()
2143
update_branch = False
2146
old_tip = self.branch.update(possible_transports)
2149
return self._update_tree(old_tip, change_reporter)
2153
@needs_tree_write_lock
2154
def _update_tree(self, old_tip=None, change_reporter=None):
2155
"""Update a tree to the master branch.
2157
:param old_tip: if supplied, the previous tip revision the branch,
2158
before it was changed to the master branch's tip.
2160
# here if old_tip is not None, it is the old tip of the branch before
2161
# it was updated from the master branch. This should become a pending
2162
# merge in the working tree to preserve the user existing work. we
2163
# cant set that until we update the working trees last revision to be
2164
# one from the new branch, because it will just get absorbed by the
2165
# parent de-duplication logic.
2167
# We MUST save it even if an error occurs, because otherwise the users
2168
# local work is unreferenced and will appear to have been lost.
2172
last_rev = self.get_parent_ids()[0]
2174
last_rev = _mod_revision.NULL_REVISION
2175
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2176
# merge tree state up to new branch tip.
2177
basis = self.basis_tree()
2180
to_tree = self.branch.basis_tree()
2181
if basis.inventory.root is None:
2182
self.set_root_id(to_tree.get_root_id())
2184
result += merge.merge_inner(
2189
change_reporter=change_reporter)
2192
# TODO - dedup parents list with things merged by pull ?
2193
# reuse the tree we've updated to to set the basis:
2194
parent_trees = [(self.branch.last_revision(), to_tree)]
2195
merges = self.get_parent_ids()[1:]
2196
# Ideally we ask the tree for the trees here, that way the working
2197
# tree can decide whether to give us teh entire tree or give us a
2198
# lazy initialised tree. dirstate for instance will have the trees
2199
# in ram already, whereas a last-revision + basis-inventory tree
2200
# will not, but also does not need them when setting parents.
2201
for parent in merges:
2202
parent_trees.append(
2203
(parent, self.branch.repository.revision_tree(parent)))
2204
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2205
parent_trees.append(
2206
(old_tip, self.branch.repository.revision_tree(old_tip)))
2207
self.set_parent_trees(parent_trees)
2208
last_rev = parent_trees[0][0]
2210
# the working tree had the same last-revision as the master
2211
# branch did. We may still have pivot local work from the local
2212
# branch into old_tip:
2213
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2214
self.add_parent_tree_id(old_tip)
2215
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2216
and old_tip != last_rev):
2217
# our last revision was not the prior branch last revision
2218
# and we have converted that last revision to a pending merge.
2219
# base is somewhere between the branch tip now
2220
# and the now pending merge
2222
# Since we just modified the working tree and inventory, flush out
2223
# the current state, before we modify it again.
2224
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2225
# requires it only because TreeTransform directly munges the
2226
# inventory and calls tree._write_inventory(). Ultimately we
2227
# should be able to remove this extra flush.
2229
graph = self.branch.repository.get_graph()
2230
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2232
base_tree = self.branch.repository.revision_tree(base_rev_id)
2233
other_tree = self.branch.repository.revision_tree(old_tip)
2234
result += merge.merge_inner(
2239
change_reporter=change_reporter)
2242
def _write_hashcache_if_dirty(self):
2243
"""Write out the hashcache if it is dirty."""
2244
if self._hashcache.needs_write:
2246
self._hashcache.write()
2248
if e.errno not in (errno.EPERM, errno.EACCES):
2250
# TODO: jam 20061219 Should this be a warning? A single line
2251
# warning might be sufficient to let the user know what
2253
mutter('Could not write hashcache for %s\nError: %s',
2254
self._hashcache.cache_file_name(), e)
2256
@needs_tree_write_lock
2257
def _write_inventory(self, inv):
2258
"""Write inventory as the current inventory."""
2259
self._set_inventory(inv, dirty=True)
2262
def set_conflicts(self, arg):
2263
raise errors.UnsupportedOperation(self.set_conflicts, self)
2265
def add_conflicts(self, arg):
2266
raise errors.UnsupportedOperation(self.add_conflicts, self)
2269
def conflicts(self):
2270
conflicts = _mod_conflicts.ConflictList()
2271
for conflicted in self._iter_conflicts():
2274
if file_kind(self.abspath(conflicted)) != "file":
2276
except errors.NoSuchFile:
2279
for suffix in ('.THIS', '.OTHER'):
2281
kind = file_kind(self.abspath(conflicted+suffix))
2284
except errors.NoSuchFile:
2288
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2289
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2291
file_id=self.path2id(conflicted)))
2294
def walkdirs(self, prefix=""):
2295
"""Walk the directories of this tree.
2297
returns a generator which yields items in the form:
2298
((curren_directory_path, fileid),
2299
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2302
This API returns a generator, which is only valid during the current
2303
tree transaction - within a single lock_read or lock_write duration.
2305
If the tree is not locked, it may cause an error to be raised,
2306
depending on the tree implementation.
2308
disk_top = self.abspath(prefix)
2309
if disk_top.endswith('/'):
2310
disk_top = disk_top[:-1]
2311
top_strip_len = len(disk_top) + 1
2312
inventory_iterator = self._walkdirs(prefix)
2313
disk_iterator = osutils.walkdirs(disk_top, prefix)
2315
current_disk = disk_iterator.next()
2316
disk_finished = False
2318
if not (e.errno == errno.ENOENT or
2319
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2322
disk_finished = True
2324
current_inv = inventory_iterator.next()
2325
inv_finished = False
2326
except StopIteration:
2329
while not inv_finished or not disk_finished:
2331
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2332
cur_disk_dir_content) = current_disk
2334
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2335
cur_disk_dir_content) = ((None, None), None)
2336
if not disk_finished:
2337
# strip out .bzr dirs
2338
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2339
len(cur_disk_dir_content) > 0):
2340
# osutils.walkdirs can be made nicer -
2341
# yield the path-from-prefix rather than the pathjoined
2343
bzrdir_loc = bisect_left(cur_disk_dir_content,
2345
if (bzrdir_loc < len(cur_disk_dir_content)
2346
and cur_disk_dir_content[bzrdir_loc][0] == '.bzr'):
2347
# we dont yield the contents of, or, .bzr itself.
2348
del cur_disk_dir_content[bzrdir_loc]
2350
# everything is unknown
2353
# everything is missing
2356
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2358
# disk is before inventory - unknown
2359
dirblock = [(relpath, basename, kind, stat, None, None) for
2360
relpath, basename, kind, stat, top_path in
2361
cur_disk_dir_content]
2362
yield (cur_disk_dir_relpath, None), dirblock
2364
current_disk = disk_iterator.next()
2365
except StopIteration:
2366
disk_finished = True
2368
# inventory is before disk - missing.
2369
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2370
for relpath, basename, dkind, stat, fileid, kind in
2372
yield (current_inv[0][0], current_inv[0][1]), dirblock
2374
current_inv = inventory_iterator.next()
2375
except StopIteration:
2378
# versioned present directory
2379
# merge the inventory and disk data together
2381
for relpath, subiterator in itertools.groupby(sorted(
2382
current_inv[1] + cur_disk_dir_content,
2383
key=operator.itemgetter(0)), operator.itemgetter(1)):
2384
path_elements = list(subiterator)
2385
if len(path_elements) == 2:
2386
inv_row, disk_row = path_elements
2387
# versioned, present file
2388
dirblock.append((inv_row[0],
2389
inv_row[1], disk_row[2],
2390
disk_row[3], inv_row[4],
2392
elif len(path_elements[0]) == 5:
2394
dirblock.append((path_elements[0][0],
2395
path_elements[0][1], path_elements[0][2],
2396
path_elements[0][3], None, None))
2397
elif len(path_elements[0]) == 6:
2398
# versioned, absent file.
2399
dirblock.append((path_elements[0][0],
2400
path_elements[0][1], 'unknown', None,
2401
path_elements[0][4], path_elements[0][5]))
2403
raise NotImplementedError('unreachable code')
2404
yield current_inv[0], dirblock
2406
current_inv = inventory_iterator.next()
2407
except StopIteration:
2410
current_disk = disk_iterator.next()
2411
except StopIteration:
2412
disk_finished = True
2414
def _walkdirs(self, prefix=""):
2415
"""Walk the directories of this tree.
2417
:prefix: is used as the directrory to start with.
2418
returns a generator which yields items in the form:
2419
((curren_directory_path, fileid),
2420
[(file1_path, file1_name, file1_kind, None, file1_id,
2423
_directory = 'directory'
2424
# get the root in the inventory
2425
inv = self.inventory
2426
top_id = inv.path2id(prefix)
2430
pending = [(prefix, '', _directory, None, top_id, None)]
2433
currentdir = pending.pop()
2434
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2435
top_id = currentdir[4]
2437
relroot = currentdir[0] + '/'
2440
# FIXME: stash the node in pending
2442
if entry.kind == 'directory':
2443
for name, child in entry.sorted_children():
2444
dirblock.append((relroot + name, name, child.kind, None,
2445
child.file_id, child.kind
2447
yield (currentdir[0], entry.file_id), dirblock
2448
# push the user specified dirs from dirblock
2449
for dir in reversed(dirblock):
2450
if dir[2] == _directory:
2453
@needs_tree_write_lock
2454
def auto_resolve(self):
2455
"""Automatically resolve text conflicts according to contents.
2457
Only text conflicts are auto_resolvable. Files with no conflict markers
2458
are considered 'resolved', because bzr always puts conflict markers
2459
into files that have text conflicts. The corresponding .THIS .BASE and
2460
.OTHER files are deleted, as per 'resolve'.
2461
:return: a tuple of ConflictLists: (un_resolved, resolved).
2463
un_resolved = _mod_conflicts.ConflictList()
2464
resolved = _mod_conflicts.ConflictList()
2465
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2466
for conflict in self.conflicts():
2467
if (conflict.typestring != 'text conflict' or
2468
self.kind(conflict.file_id) != 'file'):
2469
un_resolved.append(conflict)
2471
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2473
for line in my_file:
2474
if conflict_re.search(line):
2475
un_resolved.append(conflict)
256
# TODO: Use '**' to match directories, and other extended
257
# globbing stuff from cvs/rsync.
259
# XXX: fnmatch is actually not quite what we want: it's only
260
# approximately the same as real Unix fnmatch, and doesn't
261
# treat dotfiles correctly and allows * to match /.
262
# Eventually it should be replaced with something more
266
from osutils import splitpath
268
for pat in self.get_ignore_list():
269
if '/' in pat or '\\' in pat:
271
# as a special case, you can put ./ at the start of a
272
# pattern; this is good to match in the top-level
275
if (pat[:2] == './') or (pat[:2] == '.\\'):
2478
resolved.append(conflict)
2481
resolved.remove_files(self)
2482
self.set_conflicts(un_resolved)
2483
return un_resolved, resolved
2487
tree_basis = self.basis_tree()
2488
tree_basis.lock_read()
2490
repo_basis = self.branch.repository.revision_tree(
2491
self.last_revision())
2492
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2493
raise errors.BzrCheckError(
2494
"Mismatched basis inventory content.")
2499
def _validate(self):
2500
"""Validate internal structures.
2502
This is meant mostly for the test suite. To give it a chance to detect
2503
corruption after actions have occurred. The default implementation is a
2506
:return: None. An exception should be raised if there is an error.
2511
def _get_rules_searcher(self, default_searcher):
2512
"""See Tree._get_rules_searcher."""
2513
if self._rules_searcher is None:
2514
self._rules_searcher = super(WorkingTree,
2515
self)._get_rules_searcher(default_searcher)
2516
return self._rules_searcher
2518
def get_shelf_manager(self):
2519
"""Return the ShelfManager for this WorkingTree."""
2520
from bzrlib.shelf import ShelfManager
2521
return ShelfManager(self, self._transport)
2524
class WorkingTree2(WorkingTree):
2525
"""This is the Format 2 working tree.
2527
This was the first weave based working tree.
2528
- uses os locks for locking.
2529
- uses the branch last-revision.
2532
def __init__(self, *args, **kwargs):
2533
super(WorkingTree2, self).__init__(*args, **kwargs)
2534
# WorkingTree2 has more of a constraint that self._inventory must
2535
# exist. Because this is an older format, we don't mind the overhead
2536
# caused by the extra computation here.
2538
# Newer WorkingTree's should only have self._inventory set when they
2540
if self._inventory is None:
2541
self.read_working_inventory()
2543
def lock_tree_write(self):
2544
"""See WorkingTree.lock_tree_write().
2546
In Format2 WorkingTrees we have a single lock for the branch and tree
2547
so lock_tree_write() degrades to lock_write().
2549
self.branch.lock_write()
2551
return self._control_files.lock_write()
2553
self.branch.unlock()
2557
# do non-implementation specific cleanup
2560
# we share control files:
2561
if self._control_files._lock_count == 3:
2562
# _inventory_is_modified is always False during a read lock.
2563
if self._inventory_is_modified:
2565
self._write_hashcache_if_dirty()
2567
# reverse order of locking.
2569
return self._control_files.unlock()
2571
self.branch.unlock()
2574
class WorkingTree3(WorkingTree):
2575
"""This is the Format 3 working tree.
2577
This differs from the base WorkingTree by:
2578
- having its own file lock
2579
- having its own last-revision property.
2581
This is new in bzr 0.8
2585
def _last_revision(self):
2586
"""See Mutable.last_revision."""
2588
return self._transport.get_bytes('last-revision')
2589
except errors.NoSuchFile:
2590
return _mod_revision.NULL_REVISION
2592
def _change_last_revision(self, revision_id):
2593
"""See WorkingTree._change_last_revision."""
2594
if revision_id is None or revision_id == NULL_REVISION:
2596
self._transport.delete('last-revision')
2597
except errors.NoSuchFile:
2601
self._transport.put_bytes('last-revision', revision_id,
2602
mode=self._control_files._file_mode)
2605
@needs_tree_write_lock
2606
def set_conflicts(self, conflicts):
2607
self._put_rio('conflicts', conflicts.to_stanzas(),
2610
@needs_tree_write_lock
2611
def add_conflicts(self, new_conflicts):
2612
conflict_set = set(self.conflicts())
2613
conflict_set.update(set(list(new_conflicts)))
2614
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2615
key=_mod_conflicts.Conflict.sort_key)))
2618
def conflicts(self):
2620
confile = self._transport.get('conflicts')
2621
except errors.NoSuchFile:
2622
return _mod_conflicts.ConflictList()
2625
if confile.next() != CONFLICT_HEADER_1 + '\n':
2626
raise errors.ConflictFormatError()
2627
except StopIteration:
2628
raise errors.ConflictFormatError()
2629
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2634
# do non-implementation specific cleanup
2636
if self._control_files._lock_count == 1:
2637
# _inventory_is_modified is always False during a read lock.
2638
if self._inventory_is_modified:
2640
self._write_hashcache_if_dirty()
2641
# reverse order of locking.
2643
return self._control_files.unlock()
2645
self.branch.unlock()
2648
def get_conflicted_stem(path):
2649
for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
2650
if path.endswith(suffix):
2651
return path[:-len(suffix)]
2654
class WorkingTreeFormat(object):
2655
"""An encapsulation of the initialization and open routines for a format.
2657
Formats provide three things:
2658
* An initialization routine,
2662
Formats are placed in an dict by their format string for reference
2663
during workingtree opening. Its not required that these be instances, they
2664
can be classes themselves with class methods - it simply depends on
2665
whether state is needed for a given format or not.
2667
Once a format is deprecated, just deprecate the initialize and open
2668
methods on the format class. Do not deprecate the object, as the
2669
object will be created every time regardless.
2672
_default_format = None
2673
"""The default format used for new trees."""
2676
"""The known formats."""
2678
requires_rich_root = False
2680
upgrade_recommended = False
2683
def find_format(klass, a_bzrdir):
2684
"""Return the format for the working tree object in a_bzrdir."""
2686
transport = a_bzrdir.get_workingtree_transport(None)
2687
format_string = transport.get("format").read()
2688
return klass._formats[format_string]
2689
except errors.NoSuchFile:
2690
raise errors.NoWorkingTree(base=transport.base)
2692
raise errors.UnknownFormatError(format=format_string,
2693
kind="working tree")
2695
def __eq__(self, other):
2696
return self.__class__ is other.__class__
2698
def __ne__(self, other):
2699
return not (self == other)
2702
def get_default_format(klass):
2703
"""Return the current default format."""
2704
return klass._default_format
2706
def get_format_string(self):
2707
"""Return the ASCII format string that identifies this format."""
2708
raise NotImplementedError(self.get_format_string)
2710
def get_format_description(self):
2711
"""Return the short description for this format."""
2712
raise NotImplementedError(self.get_format_description)
2714
def is_supported(self):
2715
"""Is this format supported?
2717
Supported formats can be initialized and opened.
2718
Unsupported formats may not support initialization or committing or
2719
some other features depending on the reason for not being supported.
2723
def supports_content_filtering(self):
2724
"""True if this format supports content filtering."""
2727
def supports_views(self):
2728
"""True if this format supports stored views."""
2732
def register_format(klass, format):
2733
klass._formats[format.get_format_string()] = format
2736
def set_default_format(klass, format):
2737
klass._default_format = format
2740
def unregister_format(klass, format):
2741
del klass._formats[format.get_format_string()]
2744
class WorkingTreeFormat2(WorkingTreeFormat):
2745
"""The second working tree format.
2747
This format modified the hash cache from the format 1 hash cache.
2750
upgrade_recommended = True
2752
def get_format_description(self):
2753
"""See WorkingTreeFormat.get_format_description()."""
2754
return "Working tree format 2"
2756
def _stub_initialize_on_transport(self, transport, file_mode):
2757
"""Workaround: create control files for a remote working tree.
2759
This ensures that it can later be updated and dealt with locally,
2760
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2761
no working tree. (See bug #43064).
2765
xml5.serializer_v5.write_inventory(inv, sio, working=True)
2767
transport.put_file('inventory', sio, file_mode)
2768
transport.put_bytes('pending-merges', '', file_mode)
2770
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2771
accelerator_tree=None, hardlink=False):
2772
"""See WorkingTreeFormat.initialize()."""
2773
if not isinstance(a_bzrdir.transport, LocalTransport):
2774
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2775
if from_branch is not None:
2776
branch = from_branch
2778
branch = a_bzrdir.open_branch()
2779
if revision_id is None:
2780
revision_id = _mod_revision.ensure_null(branch.last_revision())
2783
branch.generate_revision_history(revision_id)
2787
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2793
basis_tree = branch.repository.revision_tree(revision_id)
2794
if basis_tree.inventory.root is not None:
2795
wt.set_root_id(basis_tree.get_root_id())
2796
# set the parent list and cache the basis tree.
2797
if _mod_revision.is_null(revision_id):
2800
parent_trees = [(revision_id, basis_tree)]
2801
wt.set_parent_trees(parent_trees)
2802
transform.build_tree(basis_tree, wt)
2806
super(WorkingTreeFormat2, self).__init__()
2807
self._matchingbzrdir = bzrdir.BzrDirFormat6()
2809
def open(self, a_bzrdir, _found=False):
2810
"""Return the WorkingTree object for a_bzrdir
2812
_found is a private parameter, do not use it. It is used to indicate
2813
if format probing has already been done.
2816
# we are being called directly and must probe.
2817
raise NotImplementedError
2818
if not isinstance(a_bzrdir.transport, LocalTransport):
2819
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2820
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2826
class WorkingTreeFormat3(WorkingTreeFormat):
2827
"""The second working tree format updated to record a format marker.
2830
- exists within a metadir controlling .bzr
2831
- includes an explicit version marker for the workingtree control
2832
files, separate from the BzrDir format
2833
- modifies the hash cache format
2835
- uses a LockDir to guard access for writes.
2838
upgrade_recommended = True
2840
def get_format_string(self):
2841
"""See WorkingTreeFormat.get_format_string()."""
2842
return "Bazaar-NG Working Tree format 3"
2844
def get_format_description(self):
2845
"""See WorkingTreeFormat.get_format_description()."""
2846
return "Working tree format 3"
2848
_lock_file_name = 'lock'
2849
_lock_class = LockDir
2851
_tree_class = WorkingTree3
2853
def __get_matchingbzrdir(self):
2854
return bzrdir.BzrDirMetaFormat1()
2856
_matchingbzrdir = property(__get_matchingbzrdir)
2858
def _open_control_files(self, a_bzrdir):
2859
transport = a_bzrdir.get_workingtree_transport(None)
2860
return LockableFiles(transport, self._lock_file_name,
2863
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2864
accelerator_tree=None, hardlink=False):
2865
"""See WorkingTreeFormat.initialize().
2867
:param revision_id: if supplied, create a working tree at a different
2868
revision than the branch is at.
2869
:param accelerator_tree: A tree which can be used for retrieving file
2870
contents more quickly than the revision tree, i.e. a workingtree.
2871
The revision tree will be used for cases where accelerator_tree's
2872
content is different.
2873
:param hardlink: If true, hard-link files from accelerator_tree,
2876
if not isinstance(a_bzrdir.transport, LocalTransport):
2877
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2878
transport = a_bzrdir.get_workingtree_transport(self)
2879
control_files = self._open_control_files(a_bzrdir)
2880
control_files.create_lock()
2881
control_files.lock_write()
2882
transport.put_bytes('format', self.get_format_string(),
2883
mode=control_files._file_mode)
2884
if from_branch is not None:
2885
branch = from_branch
2887
branch = a_bzrdir.open_branch()
2888
if revision_id is None:
2889
revision_id = _mod_revision.ensure_null(branch.last_revision())
2890
# WorkingTree3 can handle an inventory which has a unique root id.
2891
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
2892
# those trees. And because there isn't a format bump inbetween, we
2893
# are maintaining compatibility with older clients.
2894
# inv = Inventory(root_id=gen_root_id())
2895
inv = self._initial_inventory()
2896
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
2902
_control_files=control_files)
2903
wt.lock_tree_write()
2905
basis_tree = branch.repository.revision_tree(revision_id)
2906
# only set an explicit root id if there is one to set.
2907
if basis_tree.inventory.root is not None:
2908
wt.set_root_id(basis_tree.get_root_id())
2909
if revision_id == NULL_REVISION:
2910
wt.set_parent_trees([])
279
if fnmatch.fnmatchcase(filename, newpat):
2912
wt.set_parent_trees([(revision_id, basis_tree)])
2913
transform.build_tree(basis_tree, wt)
2915
# Unlock in this order so that the unlock-triggers-flush in
2916
# WorkingTree is given a chance to fire.
2917
control_files.unlock()
2921
def _initial_inventory(self):
2925
super(WorkingTreeFormat3, self).__init__()
2927
def open(self, a_bzrdir, _found=False):
2928
"""Return the WorkingTree object for a_bzrdir
2930
_found is a private parameter, do not use it. It is used to indicate
2931
if format probing has already been done.
2934
# we are being called directly and must probe.
2935
raise NotImplementedError
2936
if not isinstance(a_bzrdir.transport, LocalTransport):
2937
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2938
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
2941
def _open(self, a_bzrdir, control_files):
2942
"""Open the tree itself.
2944
:param a_bzrdir: the dir for the tree.
2945
:param control_files: the control files for the tree.
2947
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
2951
_control_files=control_files)
2954
return self.get_format_string()
2957
__default_format = WorkingTreeFormat4()
2958
WorkingTreeFormat.register_format(__default_format)
2959
WorkingTreeFormat.register_format(WorkingTreeFormat5())
2960
WorkingTreeFormat.register_format(WorkingTreeFormat3())
2961
WorkingTreeFormat.set_default_format(__default_format)
2962
# formats which have no format string are not discoverable
2963
# and not independently creatable, so are not registered.
2964
_legacy_formats = [WorkingTreeFormat2(),
282
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):