13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
"""WorkingTree object and friends.
19
19
A WorkingTree represents the editable working copy of a branch.
20
Operations which represent the WorkingTree are also done here,
21
such as renaming or adding files. The WorkingTree has an inventory
22
which is updated by these operations. A commit produces a
20
Operations which represent the WorkingTree are also done here,
21
such as renaming or adding files. The WorkingTree has an inventory
22
which is updated by these operations. A commit produces a
23
23
new revision based on the workingtree and its inventory.
25
25
At the moment every WorkingTree has its own branch. Remote
59
51
conflicts as _mod_conflicts,
60
revision as _mod_revision,
80
from bzrlib.transport import get_transport
82
from bzrlib.workingtree_4 import WorkingTreeFormat4
70
from bzrlib.workingtree_4 import (
85
77
from bzrlib import symbol_versioning
86
78
from bzrlib.decorators import needs_read_lock, needs_write_lock
87
from bzrlib.inventory import InventoryEntry, Inventory, ROOT_ID, TreeReference
88
from bzrlib.lockable_files import LockableFiles, TransportLock
79
from bzrlib.lock import LogicalLockResult
80
from bzrlib.lockable_files import LockableFiles
89
81
from bzrlib.lockdir import LockDir
90
82
import bzrlib.mutabletree
91
83
from bzrlib.mutabletree import needs_tree_write_lock
84
from bzrlib import osutils
92
85
from bzrlib.osutils import (
102
93
supports_executable,
95
from bzrlib.filters import filtered_input_file
104
96
from bzrlib.trace import mutter, note
105
97
from bzrlib.transport.local import LocalTransport
106
from bzrlib.progress import DummyProgress, ProgressPhase
107
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION
98
from bzrlib.revision import CURRENT_REVISION
108
99
from bzrlib.rio import RioReader, rio_file, Stanza
109
from bzrlib.symbol_versioning import (deprecated_passed,
112
DEPRECATED_PARAMETER,
100
from bzrlib.symbol_versioning import (
102
DEPRECATED_PARAMETER,
119
106
MERGE_MODIFIED_HEADER_1 = "BZR merge-modified list format 1"
107
# TODO: Modifying the conflict objects or their type is currently nearly
108
# impossible as there is no clear relationship between the working tree format
109
# and the conflict list file format.
120
110
CONFLICT_HEADER_1 = "BZR conflict list format 1"
122
112
ERROR_PATH_NOT_FOUND = 3 # WindowsError errno code, equivalent to ENOENT
125
@deprecated_function(zero_thirteen)
126
def gen_file_id(name):
127
"""Return new file id for the basename 'name'.
129
Use bzrlib.generate_ids.gen_file_id() instead
131
return generate_ids.gen_file_id(name)
134
@deprecated_function(zero_thirteen)
136
"""Return a new tree-root file id.
138
This has been deprecated in favor of bzrlib.generate_ids.gen_root_id()
140
return generate_ids.gen_root_id()
143
115
class TreeEntry(object):
144
116
"""An entry that implements the minimum interface used by commands.
146
This needs further inspection, it may be better to have
118
This needs further inspection, it may be better to have
147
119
InventoryEntries without ids - though that seems wrong. For now,
148
120
this is a parallel hierarchy to InventoryEntry, and needs to become
149
121
one of several things: decorates to that hierarchy, children of, or
269
246
# the Format factory and creation methods that are
270
247
# permitted to do this.
271
248
self._set_inventory(_inventory, dirty=False)
249
self._detect_case_handling()
250
self._rules_searcher = None
251
self.views = self._make_views()
254
def user_transport(self):
255
return self.bzrdir.user_transport
258
def control_transport(self):
259
return self._transport
261
def _detect_case_handling(self):
262
wt_trans = self.bzrdir.get_workingtree_transport(None)
264
wt_trans.stat("FoRMaT")
265
except errors.NoSuchFile:
266
self.case_sensitive = True
268
self.case_sensitive = False
270
self._setup_directory_is_tree_reference()
273
272
branch = property(
274
273
fget=lambda self: self._branch,
289
288
self._control_files.break_lock()
290
289
self.branch.break_lock()
291
def _get_check_refs(self):
292
"""Return the references needed to perform a check of this tree.
294
The default implementation returns no refs, and is only suitable for
295
trees that have no local caching and can commit on ghosts at any time.
297
:seealso: bzrlib.check for details about check_refs.
292
301
def requires_rich_root(self):
293
302
return self._format.requires_rich_root
295
304
def supports_tree_reference(self):
307
def supports_content_filtering(self):
308
return self._format.supports_content_filtering()
310
def supports_views(self):
311
return self.views.supports_views()
298
313
def _set_inventory(self, inv, dirty):
299
314
"""Set the internal cached inventory.
389
425
# at this point ?
391
427
return self.branch.repository.revision_tree(revision_id)
392
except errors.RevisionNotPresent:
428
except (errors.RevisionNotPresent, errors.NoSuchRevision):
393
429
# the basis tree *may* be a ghost or a low level error may have
394
# occured. If the revision is present, its a problem, if its not
430
# occurred. If the revision is present, its a problem, if its not
396
432
if self.branch.repository.has_revision(revision_id):
398
434
# the basis tree is a ghost so return an empty tree.
399
return self.branch.repository.revision_tree(None)
402
@deprecated_method(zero_eight)
403
def create(branch, directory):
404
"""Create a workingtree for branch at directory.
406
If existing_directory already exists it must have a .bzr directory.
407
If it does not exist, it will be created.
409
This returns a new WorkingTree object for the new checkout.
411
TODO FIXME RBC 20060124 when we have checkout formats in place this
412
should accept an optional revisionid to checkout [and reject this if
413
checking out into the same dir as a pre-checkout-aware branch format.]
415
XXX: When BzrDir is present, these should be created through that
418
warnings.warn('delete WorkingTree.create', stacklevel=3)
419
transport = get_transport(directory)
420
if branch.bzrdir.root_transport.base == transport.base:
422
return branch.bzrdir.create_workingtree()
423
# different directory,
424
# create a branch reference
425
# and now a working tree.
426
raise NotImplementedError
429
@deprecated_method(zero_eight)
430
def create_standalone(directory):
431
"""Create a checkout and a branch and a repo at directory.
433
Directory must exist and be empty.
435
please use BzrDir.create_standalone_workingtree
437
return bzrdir.BzrDir.create_standalone_workingtree(directory)
435
return self.branch.repository.revision_tree(
436
_mod_revision.NULL_REVISION)
439
self._flush_ignore_list_cache()
439
441
def relpath(self, path):
440
442
"""Return the local path portion from a given path.
442
The path may be absolute or relative. If its a relative path it is
444
The path may be absolute or relative. If its a relative path it is
443
445
interpreted relative to the python current working directory.
445
447
return osutils.relpath(self.basedir, path)
447
449
def has_filename(self, filename):
448
450
return osutils.lexists(self.abspath(filename))
450
def get_file(self, file_id):
451
file_id = osutils.safe_file_id(file_id)
452
return self.get_file_byname(self.id2path(file_id))
454
def get_file_text(self, file_id):
455
file_id = osutils.safe_file_id(file_id)
456
return self.get_file(file_id).read()
458
def get_file_byname(self, filename):
459
return file(self.abspath(filename), 'rb')
452
def get_file(self, file_id, path=None, filtered=True):
453
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
455
def get_file_with_stat(self, file_id, path=None, filtered=True,
457
"""See Tree.get_file_with_stat."""
459
path = self.id2path(file_id)
460
file_obj = self.get_file_byname(path, filtered=False)
461
stat_value = _fstat(file_obj.fileno())
462
if filtered and self.supports_content_filtering():
463
filters = self._content_filter_stack(path)
464
file_obj = filtered_input_file(file_obj, filters)
465
return (file_obj, stat_value)
467
def get_file_text(self, file_id, path=None, filtered=True):
468
my_file = self.get_file(file_id, path=path, filtered=filtered)
470
return my_file.read()
474
def get_file_byname(self, filename, filtered=True):
475
path = self.abspath(filename)
477
if filtered and self.supports_content_filtering():
478
filters = self._content_filter_stack(filename)
479
return filtered_input_file(f, filters)
483
def get_file_lines(self, file_id, path=None, filtered=True):
484
"""See Tree.get_file_lines()"""
485
file = self.get_file(file_id, path, filtered=filtered)
487
return file.readlines()
462
def annotate_iter(self, file_id):
492
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
463
493
"""See Tree.annotate_iter
465
495
This implementation will use the basis tree implementation if possible.
469
499
incorrectly attributed to CURRENT_REVISION (but after committing, the
470
500
attribution will be correct).
472
file_id = osutils.safe_file_id(file_id)
473
basis = self.basis_tree()
476
changes = self._iter_changes(basis, True, [self.id2path(file_id)],
477
require_versioned=True).next()
478
changed_content, kind = changes[2], changes[6]
479
if not changed_content:
480
return basis.annotate_iter(file_id)
484
if kind[0] != 'file':
487
old_lines = list(basis.annotate_iter(file_id))
489
for tree in self.branch.repository.revision_trees(
490
self.get_parent_ids()[1:]):
491
if file_id not in tree:
493
old.append(list(tree.annotate_iter(file_id)))
494
return annotate.reannotate(old, self.get_file(file_id).readlines(),
502
maybe_file_parent_keys = []
503
for parent_id in self.get_parent_ids():
505
parent_tree = self.revision_tree(parent_id)
506
except errors.NoSuchRevisionInTree:
507
parent_tree = self.branch.repository.revision_tree(parent_id)
508
parent_tree.lock_read()
510
if file_id not in parent_tree:
512
ie = parent_tree.inventory[file_id]
513
if ie.kind != 'file':
514
# Note: this is slightly unnecessary, because symlinks and
515
# directories have a "text" which is the empty text, and we
516
# know that won't mess up annotations. But it seems cleaner
518
parent_text_key = (file_id, ie.revision)
519
if parent_text_key not in maybe_file_parent_keys:
520
maybe_file_parent_keys.append(parent_text_key)
523
graph = _mod_graph.Graph(self.branch.repository.texts)
524
heads = graph.heads(maybe_file_parent_keys)
525
file_parent_keys = []
526
for key in maybe_file_parent_keys:
528
file_parent_keys.append(key)
530
# Now we have the parents of this content
531
annotator = self.branch.repository.texts.get_annotator()
532
text = self.get_file_text(file_id)
533
this_key =(file_id, default_revision)
534
annotator.add_special_text(this_key, file_parent_keys, text)
535
annotations = [(key[-1], line)
536
for key, line in annotator.annotate_flat(this_key)]
539
def _get_ancestors(self, default_revision):
540
ancestors = set([default_revision])
541
for parent_id in self.get_parent_ids():
542
ancestors.update(self.branch.repository.get_ancestry(
543
parent_id, topo_sorted=False))
499
546
def get_parent_ids(self):
500
547
"""See Tree.get_parent_ids.
502
549
This implementation reads the pending merges list and last_revision
503
550
value and uses that to decide what the parents list should be.
505
last_rev = self._last_revision()
552
last_rev = _mod_revision.ensure_null(self._last_revision())
553
if _mod_revision.NULL_REVISION == last_rev:
509
556
parents = [last_rev]
511
merges_file = self._control_files.get('pending-merges')
558
merges_bytes = self._transport.get_bytes('pending-merges')
512
559
except errors.NoSuchFile:
515
for l in merges_file.readlines():
516
revision_id = osutils.safe_revision_id(l.rstrip('\n'))
562
for l in osutils.split_lines(merges_bytes):
563
revision_id = l.rstrip('\n')
517
564
parents.append(revision_id)
521
568
def get_root_id(self):
522
569
"""Return the id of this trees root"""
523
570
return self._inventory.root.file_id
525
572
def _get_store_filename(self, file_id):
526
573
## XXX: badly named; this is not in the store at all
527
file_id = osutils.safe_file_id(file_id)
528
574
return self.abspath(self.id2path(file_id))
531
577
def clone(self, to_bzrdir, revision_id=None):
532
578
"""Duplicate this working tree into to_bzr, including all state.
534
580
Specifically modified files are kept as modified, but
535
581
ignored and unknown files are discarded.
537
583
If you want to make a new line of development, see bzrdir.sprout()
540
If not None, the cloned tree will have its last revision set to
541
revision, and and difference between the source trees last revision
586
If not None, the cloned tree will have its last revision set to
587
revision, and difference between the source trees last revision
542
588
and this one merged in.
544
590
# assumes the target bzr dir format is compatible.
545
result = self._format.initialize(to_bzrdir)
591
result = to_bzrdir.create_workingtree()
546
592
self.copy_content_into(result, revision_id)
580
623
__contains__ = has_id
582
625
def get_file_size(self, file_id):
583
file_id = osutils.safe_file_id(file_id)
584
return os.path.getsize(self.id2abspath(file_id))
626
"""See Tree.get_file_size"""
627
# XXX: this returns the on-disk size; it should probably return the
630
return os.path.getsize(self.id2abspath(file_id))
632
if e.errno != errno.ENOENT:
587
638
def get_file_sha1(self, file_id, path=None, stat_value=None):
588
file_id = osutils.safe_file_id(file_id)
590
640
path = self._inventory.id2path(file_id)
591
641
return self._hashcache.get_sha1(path, stat_value)
593
643
def get_file_mtime(self, file_id, path=None):
594
file_id = osutils.safe_file_id(file_id)
596
645
path = self.inventory.id2path(file_id)
597
646
return os.lstat(self.abspath(path)).st_mtime
648
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
649
file_id = self.path2id(path)
651
# For unversioned files on win32, we just assume they are not
654
return self._inventory[file_id].executable
656
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
657
mode = stat_result.st_mode
658
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
599
660
if not supports_executable():
600
661
def is_executable(self, file_id, path=None):
601
file_id = osutils.safe_file_id(file_id)
602
662
return self._inventory[file_id].executable
664
_is_executable_from_path_and_stat = \
665
_is_executable_from_path_and_stat_from_basis
604
667
def is_executable(self, file_id, path=None):
606
file_id = osutils.safe_file_id(file_id)
607
669
path = self.id2path(file_id)
608
670
mode = os.lstat(self.abspath(path)).st_mode
609
671
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
673
_is_executable_from_path_and_stat = \
674
_is_executable_from_path_and_stat_from_stat
611
676
@needs_tree_write_lock
612
677
def _add(self, files, ids, kinds):
613
678
"""See MutableTree._add."""
614
679
# TODO: Re-adding a file that is removed in the working copy
615
680
# should probably put it back with the previous ID.
616
# the read and write working inventory should not occur in this
681
# the read and write working inventory should not occur in this
617
682
# function - they should be part of lock_write and unlock.
618
683
inv = self.inventory
619
684
for f, file_id, kind in zip(files, ids, kinds):
620
assert kind is not None
621
685
if file_id is None:
622
686
inv.add_path(f, kind=kind)
624
file_id = osutils.safe_file_id(file_id)
625
688
inv.add_path(f, kind=kind, file_id=file_id)
626
689
self._inventory_is_modified = True
690
753
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
692
@deprecated_method(zero_eleven)
694
def pending_merges(self):
695
"""Return a list of pending merges.
697
These are revisions that have been merged into the working
698
directory but not yet committed.
700
As of 0.11 this is deprecated. Please see WorkingTree.get_parent_ids()
701
instead - which is available on all tree objects.
703
return self.get_parent_ids()[1:]
755
def path_content_summary(self, path, _lstat=os.lstat,
756
_mapper=osutils.file_kind_from_stat_mode):
757
"""See Tree.path_content_summary."""
758
abspath = self.abspath(path)
760
stat_result = _lstat(abspath)
762
if getattr(e, 'errno', None) == errno.ENOENT:
764
return ('missing', None, None, None)
765
# propagate other errors
767
kind = _mapper(stat_result.st_mode)
769
return self._file_content_summary(path, stat_result)
770
elif kind == 'directory':
771
# perhaps it looks like a plain directory, but it's really a
773
if self._directory_is_tree_reference(path):
774
kind = 'tree-reference'
775
return kind, None, None, None
776
elif kind == 'symlink':
777
target = osutils.readlink(abspath)
778
return ('symlink', None, None, target)
780
return (kind, None, None, None)
782
def _file_content_summary(self, path, stat_result):
783
size = stat_result.st_size
784
executable = self._is_executable_from_path_and_stat(path, stat_result)
785
# try for a stat cache lookup
786
return ('file', size, executable, self._sha_from_stat(
705
789
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
706
790
"""Common ghost checking functionality from set_parent_*.
717
801
def _set_merges_from_parent_ids(self, parent_ids):
718
802
merges = parent_ids[1:]
719
self._control_files.put_bytes('pending-merges', '\n'.join(merges))
803
self._transport.put_bytes('pending-merges', '\n'.join(merges),
804
mode=self.bzrdir._get_file_mode())
806
def _filter_parent_ids_by_ancestry(self, revision_ids):
807
"""Check that all merged revisions are proper 'heads'.
809
This will always return the first revision_id, and any merged revisions
812
if len(revision_ids) == 0:
814
graph = self.branch.repository.get_graph()
815
heads = graph.heads(revision_ids)
816
new_revision_ids = revision_ids[:1]
817
for revision_id in revision_ids[1:]:
818
if revision_id in heads and revision_id not in new_revision_ids:
819
new_revision_ids.append(revision_id)
820
if new_revision_ids != revision_ids:
821
trace.mutter('requested to set revision_ids = %s,'
822
' but filtered to %s', revision_ids, new_revision_ids)
823
return new_revision_ids
721
825
@needs_tree_write_lock
722
826
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
723
827
"""Set the parent ids to revision_ids.
725
829
See also set_parent_trees. This api will try to retrieve the tree data
726
830
for each element of revision_ids from the trees repository. If you have
727
831
tree data already available, it is more efficient to use
731
835
:param revision_ids: The revision_ids to set as the parent ids of this
732
836
working tree. Any of these may be ghosts.
734
revision_ids = [osutils.safe_revision_id(r) for r in revision_ids]
735
838
self._check_parents_for_ghosts(revision_ids,
736
839
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
840
for revision_id in revision_ids:
841
_mod_revision.check_not_reserved_id(revision_id)
843
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
738
845
if len(revision_ids) > 0:
739
846
self.set_last_revision(revision_ids[0])
741
self.set_last_revision(None)
848
self.set_last_revision(_mod_revision.NULL_REVISION)
743
850
self._set_merges_from_parent_ids(revision_ids)
745
852
@needs_tree_write_lock
746
853
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
747
854
"""See MutableTree.set_parent_trees."""
748
parent_ids = [osutils.safe_revision_id(rev) for (rev, tree) in parents_list]
855
parent_ids = [rev for (rev, tree) in parents_list]
856
for revision_id in parent_ids:
857
_mod_revision.check_not_reserved_id(revision_id)
750
859
self._check_parents_for_ghosts(parent_ids,
751
860
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
862
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
753
864
if len(parent_ids) == 0:
754
leftmost_parent_id = None
865
leftmost_parent_id = _mod_revision.NULL_REVISION
755
866
leftmost_parent_tree = None
757
868
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
782
893
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
783
894
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
896
def _sha_from_stat(self, path, stat_result):
897
"""Get a sha digest from the tree's stat cache.
899
The default implementation assumes no stat cache is present.
901
:param path: The path.
902
:param stat_result: The stat result being looked up.
785
906
def _put_rio(self, filename, stanzas, header):
786
907
self._must_be_locked()
787
908
my_file = rio_file(stanzas, header)
788
self._control_files.put(filename, my_file)
909
self._transport.put_file(filename, my_file,
910
mode=self.bzrdir._get_file_mode())
790
912
@needs_write_lock # because merge pulls data into the branch.
791
def merge_from_branch(self, branch, to_revision=None):
913
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
914
merge_type=None, force=False):
792
915
"""Merge from a branch into this working tree.
794
917
:param branch: The branch to merge from.
798
921
branch.last_revision().
800
923
from bzrlib.merge import Merger, Merge3Merger
801
pb = bzrlib.ui.ui_factory.nested_progress_bar()
803
merger = Merger(self.branch, this_tree=self, pb=pb)
804
merger.pp = ProgressPhase("Merge phase", 5, pb)
805
merger.pp.next_phase()
806
# check that there are no
808
merger.check_basis(check_clean=True, require_commits=False)
809
if to_revision is None:
810
to_revision = branch.last_revision()
812
to_revision = osutils.safe_revision_id(to_revision)
813
merger.other_rev_id = to_revision
814
if merger.other_rev_id is None:
815
raise errors.NoCommits(branch)
816
self.branch.fetch(branch, last_revision=merger.other_rev_id)
817
merger.other_basis = merger.other_rev_id
818
merger.other_tree = self.branch.repository.revision_tree(
820
merger.other_branch = branch
821
merger.pp.next_phase()
924
merger = Merger(self.branch, this_tree=self)
925
# check that there are no local alterations
926
if not force and self.has_changes():
927
raise errors.UncommittedChanges(self)
928
if to_revision is None:
929
to_revision = _mod_revision.ensure_null(branch.last_revision())
930
merger.other_rev_id = to_revision
931
if _mod_revision.is_null(merger.other_rev_id):
932
raise errors.NoCommits(branch)
933
self.branch.fetch(branch, last_revision=merger.other_rev_id)
934
merger.other_basis = merger.other_rev_id
935
merger.other_tree = self.branch.repository.revision_tree(
937
merger.other_branch = branch
938
if from_revision is None:
822
939
merger.find_base()
823
if merger.base_rev_id == merger.other_rev_id:
824
raise errors.PointlessMerge
825
merger.backup_files = False
941
merger.set_base_revision(from_revision, branch)
942
if merger.base_rev_id == merger.other_rev_id:
943
raise errors.PointlessMerge
944
merger.backup_files = False
945
if merge_type is None:
826
946
merger.merge_type = Merge3Merger
827
merger.set_interesting_files(None)
828
merger.show_base = False
829
merger.reprocess = False
830
conflicts = merger.do_merge()
948
merger.merge_type = merge_type
949
merger.set_interesting_files(None)
950
merger.show_base = False
951
merger.reprocess = False
952
conflicts = merger.do_merge()
837
957
def merge_modified(self):
838
958
"""Return a dictionary of files modified by a merge.
840
The list is initialized by WorkingTree.set_merge_modified, which is
960
The list is initialized by WorkingTree.set_merge_modified, which is
841
961
typically called after we make some automatic updates to the tree
842
962
because of a merge.
845
965
still in the working inventory and have that text hash.
848
hashfile = self._control_files.get('merge-hashes')
968
hashfile = self._transport.get('merge-hashes')
849
969
except errors.NoSuchFile:
853
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
974
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
975
raise errors.MergeModifiedFormatError()
976
except StopIteration:
854
977
raise errors.MergeModifiedFormatError()
855
except StopIteration:
856
raise errors.MergeModifiedFormatError()
857
for s in RioReader(hashfile):
858
# RioReader reads in Unicode, so convert file_ids back to utf8
859
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
860
if file_id not in self.inventory:
862
text_hash = s.get("hash")
863
if text_hash == self.get_file_sha1(file_id):
864
merge_hashes[file_id] = text_hash
978
for s in RioReader(hashfile):
979
# RioReader reads in Unicode, so convert file_ids back to utf8
980
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
981
if file_id not in self.inventory:
983
text_hash = s.get("hash")
984
if text_hash == self.get_file_sha1(file_id):
985
merge_hashes[file_id] = text_hash
867
990
@needs_write_lock
868
991
def mkdir(self, path, file_id=None):
919
1043
other_tree.unlock()
920
1044
other_tree.bzrdir.retire_bzrdir()
1046
def _setup_directory_is_tree_reference(self):
1047
if self._branch.repository._format.supports_tree_reference:
1048
self._directory_is_tree_reference = \
1049
self._directory_may_be_tree_reference
1051
self._directory_is_tree_reference = \
1052
self._directory_is_never_tree_reference
1054
def _directory_is_never_tree_reference(self, relpath):
1057
def _directory_may_be_tree_reference(self, relpath):
1058
# as a special case, if a directory contains control files then
1059
# it's a tree reference, except that the root of the tree is not
1060
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1061
# TODO: We could ask all the control formats whether they
1062
# recognize this directory, but at the moment there's no cheap api
1063
# to do that. Since we probably can only nest bzr checkouts and
1064
# they always use this name it's ok for now. -- mbp 20060306
1066
# FIXME: There is an unhandled case here of a subdirectory
1067
# containing .bzr but not a branch; that will probably blow up
1068
# when you try to commit it. It might happen if there is a
1069
# checkout in a subdirectory. This can be avoided by not adding
922
1072
@needs_tree_write_lock
923
1073
def extract(self, file_id, format=None):
924
1074
"""Extract a subtree from this tree.
926
1076
A new branch will be created, relative to the path for this tree.
933
1083
transport = transport.clone(name)
934
1084
transport.ensure_base()
935
1085
return transport
937
1087
sub_path = self.id2path(file_id)
938
1088
branch_transport = mkdirs(sub_path)
939
1089
if format is None:
940
format = bzrdir.format_registry.make_bzrdir('dirstate-with-subtree')
1090
format = self.bzrdir.cloning_metadir()
941
1091
branch_transport.ensure_base()
942
1092
branch_bzrdir = format.initialize_on_transport(branch_transport)
944
1094
repo = branch_bzrdir.find_repository()
945
1095
except errors.NoRepositoryPresent:
946
1096
repo = branch_bzrdir.create_repository()
947
assert repo.supports_rich_root()
949
if not repo.supports_rich_root():
950
raise errors.RootNotRich()
1097
if not repo.supports_rich_root():
1098
raise errors.RootNotRich()
951
1099
new_branch = branch_bzrdir.create_branch()
952
1100
new_branch.pull(self.branch)
953
1101
for parent_id in self.get_parent_ids():
955
1103
tree_transport = self.bzrdir.root_transport.clone(sub_path)
956
1104
if tree_transport.base != branch_transport.base:
957
1105
tree_bzrdir = format.initialize_on_transport(tree_transport)
958
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1106
branch.BranchReferenceFormat().initialize(tree_bzrdir,
1107
target_branch=new_branch)
960
1109
tree_bzrdir = branch_bzrdir
961
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
1110
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
962
1111
wt.set_parent_ids(self.get_parent_ids())
963
1112
my_inv = self.inventory
964
child_inv = Inventory(root_id=None)
1113
child_inv = inventory.Inventory(root_id=None)
965
1114
new_root = my_inv[file_id]
966
1115
my_inv.remove_recursive_id(file_id)
967
1116
new_root.parent_id = None
984
1134
sio = StringIO()
985
1135
self._serialize(self._inventory, sio)
987
self._control_files.put('inventory', sio)
1137
self._transport.put_file('inventory', sio,
1138
mode=self.bzrdir._get_file_mode())
988
1139
self._inventory_is_modified = False
990
1141
def _kind(self, relpath):
991
1142
return osutils.file_kind(self.abspath(relpath))
993
def list_files(self, include_root=False):
994
"""Recursively list all files as (path, class, kind, id, entry).
1144
def list_files(self, include_root=False, from_dir=None, recursive=True):
1145
"""List all files as (path, class, kind, id, entry).
996
1147
Lists, but does not descend into unversioned directories.
998
1148
This does not include files that have been deleted in this
1149
tree. Skips the control directory.
1001
Skips the control directory.
1151
:param include_root: if True, return an entry for the root
1152
:param from_dir: start from this directory or None for the root
1153
:param recursive: whether to recurse into subdirectories or not
1003
1155
# list_files is an iterator, so @needs_read_lock doesn't work properly
1004
1156
# with it. So callers should be careful to always read_lock the tree.
1019
1171
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1021
1173
# directory file_id, relative path, absolute path, reverse sorted children
1022
children = os.listdir(self.basedir)
1174
if from_dir is not None:
1175
from_dir_id = inv.path2id(from_dir)
1176
if from_dir_id is None:
1177
# Directory not versioned
1179
from_dir_abspath = pathjoin(self.basedir, from_dir)
1181
from_dir_id = inv.root.file_id
1182
from_dir_abspath = self.basedir
1183
children = os.listdir(from_dir_abspath)
1023
1184
children.sort()
1024
# jam 20060527 The kernel sized tree seems equivalent whether we
1185
# jam 20060527 The kernel sized tree seems equivalent whether we
1025
1186
# use a deque and popleft to keep them sorted, or if we use a plain
1026
1187
# list and just reverse() them.
1027
1188
children = collections.deque(children)
1028
stack = [(inv.root.file_id, u'', self.basedir, children)]
1189
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1030
1191
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1085
1246
except KeyError:
1086
1247
yield fp[1:], c, fk, None, TreeEntry()
1089
1250
if fk != 'directory':
1092
# But do this child first
1093
new_children = os.listdir(fap)
1095
new_children = collections.deque(new_children)
1096
stack.append((f_ie.file_id, fp, fap, new_children))
1097
# Break out of inner loop,
1098
# so that we start outer loop with child
1253
# But do this child first if recursing down
1255
new_children = os.listdir(fap)
1257
new_children = collections.deque(new_children)
1258
stack.append((f_ie.file_id, fp, fap, new_children))
1259
# Break out of inner loop,
1260
# so that we start outer loop with child
1101
1263
# if we finished all children, pop it off the stack
1333
1500
from_tail = splitpath(from_rel)[-1]
1334
1501
from_id = inv.path2id(from_rel)
1335
1502
if from_id is None:
1336
raise errors.BzrRenameFailedError(from_rel,to_rel,
1337
errors.NotVersionedError(path=str(from_rel)))
1338
from_entry = inv[from_id]
1503
# if file is missing in the inventory maybe it's in the basis_tree
1504
basis_tree = self.branch.basis_tree()
1505
from_id = basis_tree.path2id(from_rel)
1507
raise errors.BzrRenameFailedError(from_rel,to_rel,
1508
errors.NotVersionedError(path=str(from_rel)))
1509
# put entry back in the inventory so we can rename it
1510
from_entry = basis_tree.inventory[from_id].copy()
1513
from_entry = inv[from_id]
1339
1514
from_parent_id = from_entry.parent_id
1340
1515
to_dir, to_tail = os.path.split(to_rel)
1341
1516
to_dir_id = inv.path2id(to_dir)
1403
1578
:raises: NoSuchId if any fileid is not currently versioned.
1405
1580
for file_id in file_ids:
1406
file_id = osutils.safe_file_id(file_id)
1581
if file_id not in self._inventory:
1582
raise errors.NoSuchId(self, file_id)
1583
for file_id in file_ids:
1407
1584
if self._inventory.has_id(file_id):
1408
1585
self._inventory.remove_recursive_id(file_id)
1410
raise errors.NoSuchId(self, file_id)
1411
1586
if len(file_ids):
1412
# in the future this should just set a dirty bit to wait for the
1587
# in the future this should just set a dirty bit to wait for the
1413
1588
# final unlock. However, until all methods of workingtree start
1414
# with the current in -memory inventory rather than triggering
1589
# with the current in -memory inventory rather than triggering
1415
1590
# a read, it is more complex - we need to teach read_inventory
1416
1591
# to know when to read, and when to not read first... and possibly
1417
1592
# to save first when the in memory one may be corrupted.
1418
1593
# so for now, we just only write it if it is indeed dirty.
1419
1594
# - RBC 20060907
1420
1595
self._write_inventory(self._inventory)
1422
@deprecated_method(zero_eight)
1423
def iter_conflicts(self):
1424
"""List all files in the tree that have text or content conflicts.
1425
DEPRECATED. Use conflicts instead."""
1426
return self._iter_conflicts()
1428
1597
def _iter_conflicts(self):
1429
1598
conflicted = set()
1439
1608
@needs_write_lock
1440
1609
def pull(self, source, overwrite=False, stop_revision=None,
1441
change_reporter=None):
1442
top_pb = bzrlib.ui.ui_factory.nested_progress_bar()
1610
change_reporter=None, possible_transports=None, local=False):
1443
1611
source.lock_read()
1445
pp = ProgressPhase("Pull phase", 2, top_pb)
1447
1613
old_revision_info = self.branch.last_revision_info()
1448
1614
basis_tree = self.basis_tree()
1449
count = self.branch.pull(source, overwrite, stop_revision)
1615
count = self.branch.pull(source, overwrite, stop_revision,
1616
possible_transports=possible_transports,
1450
1618
new_revision_info = self.branch.last_revision_info()
1451
1619
if new_revision_info != old_revision_info:
1453
1620
repository = self.branch.repository
1454
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1455
1621
basis_tree.lock_read()
1457
1623
new_basis_tree = self.branch.basis_tree()
1460
1626
new_basis_tree,
1462
1628
this_tree=self,
1464
1630
change_reporter=change_reporter)
1465
if (basis_tree.inventory.root is None and
1466
new_basis_tree.inventory.root is not None):
1467
self.set_root_id(new_basis_tree.inventory.root.file_id)
1631
basis_root_id = basis_tree.get_root_id()
1632
new_root_id = new_basis_tree.get_root_id()
1633
if basis_root_id != new_root_id:
1634
self.set_root_id(new_root_id)
1470
1636
basis_tree.unlock()
1471
1637
# TODO - dedup parents list with things merged by pull ?
1472
1638
# reuse the revisiontree we merged against to set the new
1474
1640
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1475
# we have to pull the merge trees out again, because
1476
# merge_inner has set the ids. - this corner is not yet
1641
# we have to pull the merge trees out again, because
1642
# merge_inner has set the ids. - this corner is not yet
1477
1643
# layered well enough to prevent double handling.
1478
1644
# XXX TODO: Fix the double handling: telling the tree about
1479
1645
# the already known parent data is wasteful.
1570
1741
r"""Check whether the filename matches an ignore pattern.
1572
1743
Patterns containing '/' or '\' need to match the whole path;
1573
others match against only the last component.
1744
others match against only the last component. Patterns starting
1745
with '!' are ignore exceptions. Exceptions take precedence
1746
over regular patterns and cause the filename to not be ignored.
1575
1748
If the file is ignored, returns the pattern which caused it to
1576
1749
be ignored, otherwise None. So this can simply be used as a
1577
1750
boolean if desired."""
1578
1751
if getattr(self, '_ignoreglobster', None) is None:
1579
self._ignoreglobster = globbing.Globster(self.get_ignore_list())
1752
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1580
1753
return self._ignoreglobster.match(filename)
1582
1755
def kind(self, file_id):
1583
1756
return file_kind(self.id2abspath(file_id))
1758
def stored_kind(self, file_id):
1759
"""See Tree.stored_kind"""
1760
return self.inventory[file_id].kind
1585
1762
def _comparison_data(self, entry, path):
1586
1763
abspath = self.abspath(path)
1628
1805
raise errors.ObjectNotLocked(self)
1630
1807
def lock_read(self):
1631
"""See Branch.lock_read, and WorkingTree.unlock."""
1808
"""Lock the tree for reading.
1810
This also locks the branch, and can be unlocked via self.unlock().
1812
:return: A bzrlib.lock.LogicalLockResult.
1632
1814
if not self.is_locked():
1633
1815
self._reset_data()
1634
1816
self.branch.lock_read()
1636
return self._control_files.lock_read()
1818
self._control_files.lock_read()
1819
return LogicalLockResult(self.unlock)
1638
1821
self.branch.unlock()
1641
1824
def lock_tree_write(self):
1642
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
1825
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1827
:return: A bzrlib.lock.LogicalLockResult.
1643
1829
if not self.is_locked():
1644
1830
self._reset_data()
1645
1831
self.branch.lock_read()
1647
return self._control_files.lock_write()
1833
self._control_files.lock_write()
1834
return LogicalLockResult(self.unlock)
1649
1836
self.branch.unlock()
1652
1839
def lock_write(self):
1653
"""See MutableTree.lock_write, and WorkingTree.unlock."""
1840
"""See MutableTree.lock_write, and WorkingTree.unlock.
1842
:return: A bzrlib.lock.LogicalLockResult.
1654
1844
if not self.is_locked():
1655
1845
self._reset_data()
1656
1846
self.branch.lock_write()
1658
return self._control_files.lock_write()
1848
self._control_files.lock_write()
1849
return LogicalLockResult(self.unlock)
1660
1851
self.branch.unlock()
1669
1860
def _reset_data(self):
1670
1861
"""Reset transient data that cannot be revalidated."""
1671
1862
self._inventory_is_modified = False
1672
result = self._deserialize(self._control_files.get('inventory'))
1863
f = self._transport.get('inventory')
1865
result = self._deserialize(f)
1673
1868
self._set_inventory(result, dirty=False)
1675
1870
@needs_tree_write_lock
1676
1871
def set_last_revision(self, new_revision):
1677
1872
"""Change the last revision in the working tree."""
1678
new_revision = osutils.safe_revision_id(new_revision)
1679
1873
if self._change_last_revision(new_revision):
1680
1874
self._cache_basis_inventory(new_revision)
1682
1876
def _change_last_revision(self, new_revision):
1683
1877
"""Template method part of set_last_revision to perform the change.
1685
1879
This is used to allow WorkingTree3 instances to not affect branch
1686
1880
when their last revision is set.
1688
if new_revision is None:
1882
if _mod_revision.is_null(new_revision):
1689
1883
self.branch.set_revision_history([])
1698
1892
def _write_basis_inventory(self, xml):
1699
1893
"""Write the basis inventory XML to the basis-inventory file"""
1700
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1701
1894
path = self._basis_inventory_name()
1702
1895
sio = StringIO(xml)
1703
self._control_files.put(path, sio)
1896
self._transport.put_file(path, sio,
1897
mode=self.bzrdir._get_file_mode())
1705
1899
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1706
1900
"""Create the text that will be saved in basis-inventory"""
1707
# TODO: jam 20070209 This should be redundant, as the revision_id
1708
# as all callers should have already converted the revision_id to
1710
inventory.revision_id = osutils.safe_revision_id(revision_id)
1901
inventory.revision_id = revision_id
1711
1902
return xml7.serializer_v7.write_inventory_to_string(inventory)
1713
1904
def _cache_basis_inventory(self, new_revision):
1716
1907
# as commit already has that ready-to-use [while the format is the
1717
1908
# same, that is].
1719
# this double handles the inventory - unpack and repack -
1910
# this double handles the inventory - unpack and repack -
1720
1911
# but is easier to understand. We can/should put a conditional
1721
1912
# in here based on whether the inventory is in the latest format
1722
1913
# - perhaps we should repack all inventories on a repository
1724
1915
# the fast path is to copy the raw xml from the repository. If the
1725
# xml contains 'revision_id="', then we assume the right
1916
# xml contains 'revision_id="', then we assume the right
1726
1917
# revision_id is set. We must check for this full string, because a
1727
1918
# root node id can legitimately look like 'revision_id' but cannot
1728
1919
# contain a '"'.
1729
xml = self.branch.repository.get_inventory_xml(new_revision)
1920
xml = self.branch.repository._get_inventory_xml(new_revision)
1730
1921
firstline = xml.split('\n', 1)[0]
1731
if (not 'revision_id="' in firstline or
1922
if (not 'revision_id="' in firstline or
1732
1923
'format="7"' not in firstline):
1733
inv = self.branch.repository.deserialise_inventory(
1924
inv = self.branch.repository._serializer.read_inventory_from_string(
1735
1926
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1736
1927
self._write_basis_inventory(xml)
1737
1928
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1740
1931
def read_basis_inventory(self):
1741
1932
"""Read the cached basis inventory."""
1742
1933
path = self._basis_inventory_name()
1743
return self._control_files.get(path).read()
1934
return self._transport.get_bytes(path)
1745
1936
@needs_read_lock
1746
1937
def read_working_inventory(self):
1747
1938
"""Read the working inventory.
1749
1940
:raises errors.InventoryModified: read_working_inventory will fail
1750
1941
when the current in memory inventory has been modified.
1752
# conceptually this should be an implementation detail of the tree.
1943
# conceptually this should be an implementation detail of the tree.
1753
1944
# XXX: Deprecate this.
1754
1945
# ElementTree does its own conversion from UTF-8, so open in
1756
1947
if self._inventory_is_modified:
1757
1948
raise errors.InventoryModified(self)
1758
result = self._deserialize(self._control_files.get('inventory'))
1949
f = self._transport.get('inventory')
1951
result = self._deserialize(f)
1759
1954
self._set_inventory(result, dirty=False)
1762
1957
@needs_tree_write_lock
1763
1958
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1765
"""Remove nominated files from the working inventor.
1960
"""Remove nominated files from the working inventory.
1767
1962
:files: File paths relative to the basedir.
1768
1963
:keep_files: If true, the files will also be kept.
1769
1964
:force: Delete files and directories, even if they are changed and
1770
1965
even if the directories are not empty.
1772
## TODO: Normalize names
1774
1967
if isinstance(files, basestring):
1775
1968
files = [files]
1779
1972
new_files=set()
1780
unknown_files_in_directory=set()
1973
unknown_nested_files=set()
1975
to_file = sys.stdout
1782
1977
def recurse_directory_to_add_files(directory):
1783
# recurse directory and add all files
1978
# Recurse directory and add all files
1784
1979
# so we can check if they have changed.
1785
for parent_info, file_infos in\
1786
osutils.walkdirs(self.abspath(directory),
1788
for relpath, basename, kind, lstat, abspath in file_infos:
1790
if self.path2id(relpath): #is it versioned?
1791
new_files.add(relpath)
1793
unknown_files_in_directory.add(
1794
(relpath, None, kind))
1980
for parent_info, file_infos in self.walkdirs(directory):
1981
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1982
# Is it versioned or ignored?
1983
if self.path2id(relpath) or self.is_ignored(relpath):
1984
# Add nested content for deletion.
1985
new_files.add(relpath)
1987
# Files which are not versioned and not ignored
1988
# should be treated as unknown.
1989
unknown_nested_files.add((relpath, None, kind))
1796
1991
for filename in files:
1797
1992
# Get file name into canonical form.
1799
1994
filename = self.relpath(abspath)
1800
1995
if len(filename) > 0:
1801
1996
new_files.add(filename)
1802
if osutils.isdir(abspath):
1803
recurse_directory_to_add_files(filename)
1804
files = [f for f in new_files]
1997
recurse_directory_to_add_files(filename)
1999
files = list(new_files)
2002
return # nothing to do
1806
2004
# Sort needed to first handle directory content before the directory
1807
2005
files.sort(reverse=True)
2007
# Bail out if we are going to delete files we shouldn't
1808
2008
if not keep_files and not force:
1809
tree_delta = self.changes_from(self.basis_tree(),
1810
specific_files=files)
1811
for unknown_file in unknown_files_in_directory:
1812
tree_delta.unversioned.extend((unknown_file,))
1813
if bool(tree_delta.modified
1815
or tree_delta.renamed
1816
or tree_delta.kind_changed
1817
or tree_delta.unversioned):
2009
has_changed_files = len(unknown_nested_files) > 0
2010
if not has_changed_files:
2011
for (file_id, path, content_change, versioned, parent_id, name,
2012
kind, executable) in self.iter_changes(self.basis_tree(),
2013
include_unchanged=True, require_versioned=False,
2014
want_unversioned=True, specific_files=files):
2015
if versioned == (False, False):
2016
# The record is unknown ...
2017
if not self.is_ignored(path[1]):
2018
# ... but not ignored
2019
has_changed_files = True
2021
elif (content_change and (kind[1] is not None) and
2022
osutils.is_inside_any(files, path[1])):
2023
# Versioned and changed, but not deleted, and still
2024
# in one of the dirs to be deleted.
2025
has_changed_files = True
2028
if has_changed_files:
2029
# Make delta show ALL applicable changes in error message.
2030
tree_delta = self.changes_from(self.basis_tree(),
2031
require_versioned=False, want_unversioned=True,
2032
specific_files=files)
2033
for unknown_file in unknown_nested_files:
2034
if unknown_file not in tree_delta.unversioned:
2035
tree_delta.unversioned.extend((unknown_file,))
1818
2036
raise errors.BzrRemoveChangedFilesError(tree_delta)
1820
# do this before any modifications
2038
# Build inv_delta and delete files where applicable,
2039
# do this before any modifications to inventory.
1821
2040
for f in files:
1822
2041
fid = self.path2id(f)
1825
message="%s is not versioned." % (f,)
2044
message = "%s is not versioned." % (f,)
1828
2047
# having removed it, it must be either ignored or unknown
1830
2049
new_status = 'I'
1832
2051
new_status = '?'
1833
textui.show_status(new_status, self.kind(fid), f,
2052
# XXX: Really should be a more abstract reporter interface
2053
kind_ch = osutils.kind_marker(self.kind(fid))
2054
to_file.write(new_status + ' ' + f + kind_ch + '\n')
1836
2056
inv_delta.append((f, None, fid, None))
1837
message="removed %s" % (f,)
2057
message = "removed %s" % (f,)
1839
2059
if not keep_files:
1840
2060
abs_path = self.abspath(f)
1841
2061
if osutils.lexists(abs_path):
1842
2062
if (osutils.isdir(abs_path) and
1843
2063
len(os.listdir(abs_path)) > 0):
1844
message="%s is not empty directory "\
1845
"and won't be deleted." % (f,)
2065
osutils.rmtree(abs_path)
2067
message = "%s is not an empty directory "\
2068
"and won't be deleted." % (f,)
1847
2070
osutils.delete_any(abs_path)
1848
message="deleted %s" % (f,)
2071
message = "deleted %s" % (f,)
1849
2072
elif message is not None:
1850
# only care if we haven't done anything yet.
1851
message="%s does not exist." % (f,)
2073
# Only care if we haven't done anything yet.
2074
message = "%s does not exist." % (f,)
1853
# print only one message (if any) per file.
2076
# Print only one message (if any) per file.
1854
2077
if message is not None:
1856
2079
self.apply_inventory_delta(inv_delta)
1858
2081
@needs_tree_write_lock
1859
def revert(self, filenames, old_tree=None, backups=True,
1860
pb=DummyProgress(), report_changes=False):
2082
def revert(self, filenames=None, old_tree=None, backups=True,
2083
pb=None, report_changes=False):
1861
2084
from bzrlib.conflicts import resolve
2087
symbol_versioning.warn('Using [] to revert all files is deprecated'
2088
' as of bzr 0.91. Please use None (the default) instead.',
2089
DeprecationWarning, stacklevel=2)
1862
2090
if old_tree is None:
1863
old_tree = self.basis_tree()
1864
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
1866
if not len(filenames):
1867
self.set_parent_ids(self.get_parent_ids()[:1])
2091
basis_tree = self.basis_tree()
2092
basis_tree.lock_read()
2093
old_tree = basis_tree
1870
resolve(self, filenames, ignore_misses=True)
2097
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2099
if filenames is None and len(self.get_parent_ids()) > 1:
2101
last_revision = self.last_revision()
2102
if last_revision != _mod_revision.NULL_REVISION:
2103
if basis_tree is None:
2104
basis_tree = self.basis_tree()
2105
basis_tree.lock_read()
2106
parent_trees.append((last_revision, basis_tree))
2107
self.set_parent_trees(parent_trees)
2110
resolve(self, filenames, ignore_misses=True, recursive=True)
2112
if basis_tree is not None:
1871
2114
return conflicts
1873
2116
def revision_tree(self, revision_id):
1922
2164
@needs_tree_write_lock
1923
2165
def set_root_id(self, file_id):
1924
2166
"""Set the root id for this tree."""
1926
2168
if file_id is None:
1927
symbol_versioning.warn(symbol_versioning.zero_twelve
1928
% 'WorkingTree.set_root_id with fileid=None',
1933
file_id = osutils.safe_file_id(file_id)
2170
'WorkingTree.set_root_id with fileid=None')
2171
file_id = osutils.safe_file_id(file_id)
1934
2172
self._set_root_id(file_id)
1936
2174
def _set_root_id(self, file_id):
1937
2175
"""Set the root id for this tree, in a format specific manner.
1939
:param file_id: The file id to assign to the root. It must not be
2177
:param file_id: The file id to assign to the root. It must not be
1940
2178
present in the current inventory or an error will occur. It must
1941
2179
not be None, but rather a valid file id.
1962
2200
def unlock(self):
1963
2201
"""See Branch.unlock.
1965
2203
WorkingTree locking just uses the Branch locking facilities.
1966
2204
This is current because all working trees have an embedded branch
1967
2205
within them. IF in the future, we were to make branch data shareable
1968
between multiple working trees, i.e. via shared storage, then we
2206
between multiple working trees, i.e. via shared storage, then we
1969
2207
would probably want to lock both the local tree, and the branch.
1971
2209
raise NotImplementedError(self.unlock)
1973
def update(self, change_reporter=None):
2213
def update(self, change_reporter=None, possible_transports=None,
2214
revision=None, old_tip=_marker):
1974
2215
"""Update a working tree along its branch.
1976
2217
This will update the branch if its bound too, which means we have
1994
2235
- Merge current state -> basis tree of the master w.r.t. the old tree
1996
2237
- Do a 'normal' merge of the old branch basis if it is relevant.
2239
:param revision: The target revision to update to. Must be in the
2241
:param old_tip: If branch.update() has already been run, the value it
2242
returned (old tip of the branch or None). _marker is used
1998
if self.branch.get_master_branch() is not None:
2245
if self.branch.get_bound_location() is not None:
1999
2246
self.lock_write()
2000
update_branch = True
2247
update_branch = (old_tip is self._marker)
2002
2249
self.lock_tree_write()
2003
2250
update_branch = False
2005
2252
if update_branch:
2006
old_tip = self.branch.update()
2253
old_tip = self.branch.update(possible_transports)
2009
return self._update_tree(old_tip, change_reporter)
2255
if old_tip is self._marker:
2257
return self._update_tree(old_tip, change_reporter, revision)
2013
2261
@needs_tree_write_lock
2014
def _update_tree(self, old_tip=None, change_reporter=None):
2262
def _update_tree(self, old_tip=None, change_reporter=None, revision=None):
2015
2263
"""Update a tree to the master branch.
2017
2265
:param old_tip: if supplied, the previous tip revision the branch,
2023
2271
# cant set that until we update the working trees last revision to be
2024
2272
# one from the new branch, because it will just get absorbed by the
2025
2273
# parent de-duplication logic.
2027
2275
# We MUST save it even if an error occurs, because otherwise the users
2028
2276
# local work is unreferenced and will appear to have been lost.
2032
2280
last_rev = self.get_parent_ids()[0]
2033
2281
except IndexError:
2035
if last_rev != self.branch.last_revision():
2036
# merge tree state up to new branch tip.
2282
last_rev = _mod_revision.NULL_REVISION
2283
if revision is None:
2284
revision = self.branch.last_revision()
2286
old_tip = old_tip or _mod_revision.NULL_REVISION
2288
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2289
# the branch we are bound to was updated
2290
# merge those changes in first
2291
base_tree = self.basis_tree()
2292
other_tree = self.branch.repository.revision_tree(old_tip)
2293
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2294
base_tree, this_tree=self,
2295
change_reporter=change_reporter)
2297
self.add_parent_tree((old_tip, other_tree))
2298
trace.note('Rerun update after fixing the conflicts.')
2301
if last_rev != _mod_revision.ensure_null(revision):
2302
# the working tree is up to date with the branch
2303
# we can merge the specified revision from master
2304
to_tree = self.branch.repository.revision_tree(revision)
2305
to_root_id = to_tree.get_root_id()
2037
2307
basis = self.basis_tree()
2038
2308
basis.lock_read()
2040
to_tree = self.branch.basis_tree()
2041
if basis.inventory.root is None:
2042
self.set_root_id(to_tree.inventory.root.file_id)
2310
if (basis.inventory.root is None
2311
or basis.inventory.root.file_id != to_root_id):
2312
self.set_root_id(to_root_id)
2044
result += merge.merge_inner(
2049
change_reporter=change_reporter)
2317
# determine the branch point
2318
graph = self.branch.repository.get_graph()
2319
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2321
base_tree = self.branch.repository.revision_tree(base_rev_id)
2323
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2325
change_reporter=change_reporter)
2326
self.set_last_revision(revision)
2052
2327
# TODO - dedup parents list with things merged by pull ?
2053
2328
# reuse the tree we've updated to to set the basis:
2054
parent_trees = [(self.branch.last_revision(), to_tree)]
2329
parent_trees = [(revision, to_tree)]
2055
2330
merges = self.get_parent_ids()[1:]
2056
2331
# Ideally we ask the tree for the trees here, that way the working
2057
# tree can decide whether to give us teh entire tree or give us a
2332
# tree can decide whether to give us the entire tree or give us a
2058
2333
# lazy initialised tree. dirstate for instance will have the trees
2059
2334
# in ram already, whereas a last-revision + basis-inventory tree
2060
2335
# will not, but also does not need them when setting parents.
2061
2336
for parent in merges:
2062
2337
parent_trees.append(
2063
2338
(parent, self.branch.repository.revision_tree(parent)))
2064
if old_tip is not None:
2339
if not _mod_revision.is_null(old_tip):
2065
2340
parent_trees.append(
2066
2341
(old_tip, self.branch.repository.revision_tree(old_tip)))
2067
2342
self.set_parent_trees(parent_trees)
2068
2343
last_rev = parent_trees[0][0]
2070
# the working tree had the same last-revision as the master
2071
# branch did. We may still have pivot local work from the local
2072
# branch into old_tip:
2073
if old_tip is not None:
2074
self.add_parent_tree_id(old_tip)
2075
if old_tip and old_tip != last_rev:
2076
# our last revision was not the prior branch last revision
2077
# and we have converted that last revision to a pending merge.
2078
# base is somewhere between the branch tip now
2079
# and the now pending merge
2081
# Since we just modified the working tree and inventory, flush out
2082
# the current state, before we modify it again.
2083
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2084
# requires it only because TreeTransform directly munges the
2085
# inventory and calls tree._write_inventory(). Ultimately we
2086
# should be able to remove this extra flush.
2088
graph = self.branch.repository.get_graph()
2089
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2091
base_tree = self.branch.repository.revision_tree(base_rev_id)
2092
other_tree = self.branch.repository.revision_tree(old_tip)
2093
result += merge.merge_inner(
2098
change_reporter=change_reporter)
2101
2346
def _write_hashcache_if_dirty(self):
2102
2347
"""Write out the hashcache if it is dirty."""
2186
2431
current_inv = None
2187
2432
inv_finished = True
2188
2433
while not inv_finished or not disk_finished:
2435
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2436
cur_disk_dir_content) = current_disk
2438
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2439
cur_disk_dir_content) = ((None, None), None)
2189
2440
if not disk_finished:
2190
2441
# strip out .bzr dirs
2191
if current_disk[0][1][top_strip_len:] == '':
2192
# osutils.walkdirs can be made nicer -
2442
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2443
len(cur_disk_dir_content) > 0):
2444
# osutils.walkdirs can be made nicer -
2193
2445
# yield the path-from-prefix rather than the pathjoined
2195
bzrdir_loc = bisect_left(current_disk[1], ('.bzr', '.bzr'))
2196
if current_disk[1][bzrdir_loc][0] == '.bzr':
2447
bzrdir_loc = bisect_left(cur_disk_dir_content,
2449
if (bzrdir_loc < len(cur_disk_dir_content)
2450
and self.bzrdir.is_control_filename(
2451
cur_disk_dir_content[bzrdir_loc][0])):
2197
2452
# we dont yield the contents of, or, .bzr itself.
2198
del current_disk[1][bzrdir_loc]
2453
del cur_disk_dir_content[bzrdir_loc]
2199
2454
if inv_finished:
2200
2455
# everything is unknown
2203
2458
# everything is missing
2206
direction = cmp(current_inv[0][0], current_disk[0][0])
2461
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2207
2462
if direction > 0:
2208
2463
# disk is before inventory - unknown
2209
2464
dirblock = [(relpath, basename, kind, stat, None, None) for
2210
relpath, basename, kind, stat, top_path in current_disk[1]]
2211
yield (current_disk[0][0], None), dirblock
2465
relpath, basename, kind, stat, top_path in
2466
cur_disk_dir_content]
2467
yield (cur_disk_dir_relpath, None), dirblock
2213
2469
current_disk = disk_iterator.next()
2214
2470
except StopIteration:
2288
2545
# FIXME: stash the node in pending
2289
2546
entry = inv[top_id]
2290
for name, child in entry.sorted_children():
2291
dirblock.append((relroot + name, name, child.kind, None,
2292
child.file_id, child.kind
2547
if entry.kind == 'directory':
2548
for name, child in entry.sorted_children():
2549
dirblock.append((relroot + name, name, child.kind, None,
2550
child.file_id, child.kind
2294
2552
yield (currentdir[0], entry.file_id), dirblock
2295
2553
# push the user specified dirs from dirblock
2296
2554
for dir in reversed(dirblock):
2620
def _get_rules_searcher(self, default_searcher):
2621
"""See Tree._get_rules_searcher."""
2622
if self._rules_searcher is None:
2623
self._rules_searcher = super(WorkingTree,
2624
self)._get_rules_searcher(default_searcher)
2625
return self._rules_searcher
2627
def get_shelf_manager(self):
2628
"""Return the ShelfManager for this WorkingTree."""
2629
from bzrlib.shelf import ShelfManager
2630
return ShelfManager(self, self._transport)
2344
2633
class WorkingTree2(WorkingTree):
2345
2634
"""This is the Format 2 working tree.
2347
This was the first weave based working tree.
2636
This was the first weave based working tree.
2348
2637
- uses os locks for locking.
2349
2638
- uses the branch last-revision.
2360
2649
if self._inventory is None:
2361
2650
self.read_working_inventory()
2652
def _get_check_refs(self):
2653
"""Return the references needed to perform a check of this tree."""
2654
return [('trees', self.last_revision())]
2363
2656
def lock_tree_write(self):
2364
2657
"""See WorkingTree.lock_tree_write().
2366
2659
In Format2 WorkingTrees we have a single lock for the branch and tree
2367
2660
so lock_tree_write() degrades to lock_write().
2662
:return: An object with an unlock method which will release the lock
2369
2665
self.branch.lock_write()
2371
return self._control_files.lock_write()
2667
self._control_files.lock_write()
2373
2670
self.branch.unlock()
2376
2673
def unlock(self):
2674
# do non-implementation specific cleanup
2377
2677
# we share control files:
2378
2678
if self._control_files._lock_count == 3:
2379
2679
# _inventory_is_modified is always False during a read lock.
2380
2680
if self._inventory_is_modified:
2382
2682
self._write_hashcache_if_dirty()
2384
2684
# reverse order of locking.
2386
2686
return self._control_files.unlock()
2402
2702
def _last_revision(self):
2403
2703
"""See Mutable.last_revision."""
2405
return osutils.safe_revision_id(
2406
self._control_files.get('last-revision').read())
2705
return self._transport.get_bytes('last-revision')
2407
2706
except errors.NoSuchFile:
2707
return _mod_revision.NULL_REVISION
2410
2709
def _change_last_revision(self, revision_id):
2411
2710
"""See WorkingTree._change_last_revision."""
2412
if revision_id is None or revision_id == NULL_REVISION:
2711
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
2414
self._control_files._transport.delete('last-revision')
2713
self._transport.delete('last-revision')
2415
2714
except errors.NoSuchFile:
2419
self._control_files.put_bytes('last-revision', revision_id)
2718
self._transport.put_bytes('last-revision', revision_id,
2719
mode=self.bzrdir._get_file_mode())
2722
def _get_check_refs(self):
2723
"""Return the references needed to perform a check of this tree."""
2724
return [('trees', self.last_revision())]
2422
2726
@needs_tree_write_lock
2423
2727
def set_conflicts(self, conflicts):
2424
self._put_rio('conflicts', conflicts.to_stanzas(),
2728
self._put_rio('conflicts', conflicts.to_stanzas(),
2425
2729
CONFLICT_HEADER_1)
2427
2731
@needs_tree_write_lock
2434
2738
@needs_read_lock
2435
2739
def conflicts(self):
2437
confile = self._control_files.get('conflicts')
2741
confile = self._transport.get('conflicts')
2438
2742
except errors.NoSuchFile:
2439
2743
return _mod_conflicts.ConflictList()
2441
if confile.next() != CONFLICT_HEADER_1 + '\n':
2746
if confile.next() != CONFLICT_HEADER_1 + '\n':
2747
raise errors.ConflictFormatError()
2748
except StopIteration:
2442
2749
raise errors.ConflictFormatError()
2443
except StopIteration:
2444
raise errors.ConflictFormatError()
2445
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2750
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2447
2754
def unlock(self):
2755
# do non-implementation specific cleanup
2448
2757
if self._control_files._lock_count == 1:
2449
2758
# _inventory_is_modified is always False during a read lock.
2450
2759
if self._inventory_is_modified:
2573
2874
"""See WorkingTreeFormat.get_format_description()."""
2574
2875
return "Working tree format 2"
2576
def stub_initialize_remote(self, control_files):
2577
"""As a special workaround create critical control files for a remote working tree
2877
def _stub_initialize_on_transport(self, transport, file_mode):
2878
"""Workaround: create control files for a remote working tree.
2579
2880
This ensures that it can later be updated and dealt with locally,
2580
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2881
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2581
2882
no working tree. (See bug #43064).
2583
2884
sio = StringIO()
2585
xml5.serializer_v5.write_inventory(inv, sio)
2885
inv = inventory.Inventory()
2886
xml5.serializer_v5.write_inventory(inv, sio, working=True)
2587
control_files.put('inventory', sio)
2589
control_files.put_bytes('pending-merges', '')
2592
def initialize(self, a_bzrdir, revision_id=None):
2888
transport.put_file('inventory', sio, file_mode)
2889
transport.put_bytes('pending-merges', '', file_mode)
2891
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2892
accelerator_tree=None, hardlink=False):
2593
2893
"""See WorkingTreeFormat.initialize()."""
2594
2894
if not isinstance(a_bzrdir.transport, LocalTransport):
2595
2895
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2596
branch = a_bzrdir.open_branch()
2597
if revision_id is not None:
2598
revision_id = osutils.safe_revision_id(revision_id)
2601
revision_history = branch.revision_history()
2603
position = revision_history.index(revision_id)
2605
raise errors.NoSuchRevision(branch, revision_id)
2606
branch.set_revision_history(revision_history[:position + 1])
2609
revision = branch.last_revision()
2896
if from_branch is not None:
2897
branch = from_branch
2899
branch = a_bzrdir.open_branch()
2900
if revision_id is None:
2901
revision_id = _mod_revision.ensure_null(branch.last_revision())
2904
branch.generate_revision_history(revision_id)
2907
inv = inventory.Inventory()
2611
2908
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2614
2911
_internal=True,
2616
2913
_bzrdir=a_bzrdir)
2617
basis_tree = branch.repository.revision_tree(revision)
2914
basis_tree = branch.repository.revision_tree(revision_id)
2618
2915
if basis_tree.inventory.root is not None:
2619
wt.set_root_id(basis_tree.inventory.root.file_id)
2916
wt.set_root_id(basis_tree.get_root_id())
2620
2917
# set the parent list and cache the basis tree.
2621
wt.set_parent_trees([(revision, basis_tree)])
2918
if _mod_revision.is_null(revision_id):
2921
parent_trees = [(revision_id, basis_tree)]
2922
wt.set_parent_trees(parent_trees)
2622
2923
transform.build_tree(basis_tree, wt)
2678
2979
def _open_control_files(self, a_bzrdir):
2679
2980
transport = a_bzrdir.get_workingtree_transport(None)
2680
return LockableFiles(transport, self._lock_file_name,
2981
return LockableFiles(transport, self._lock_file_name,
2681
2982
self._lock_class)
2683
def initialize(self, a_bzrdir, revision_id=None):
2984
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2985
accelerator_tree=None, hardlink=False):
2684
2986
"""See WorkingTreeFormat.initialize().
2686
revision_id allows creating a working tree at a different
2687
revision than the branch is at.
2988
:param revision_id: if supplied, create a working tree at a different
2989
revision than the branch is at.
2990
:param accelerator_tree: A tree which can be used for retrieving file
2991
contents more quickly than the revision tree, i.e. a workingtree.
2992
The revision tree will be used for cases where accelerator_tree's
2993
content is different.
2994
:param hardlink: If true, hard-link files from accelerator_tree,
2689
2997
if not isinstance(a_bzrdir.transport, LocalTransport):
2690
2998
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2692
3000
control_files = self._open_control_files(a_bzrdir)
2693
3001
control_files.create_lock()
2694
3002
control_files.lock_write()
2695
control_files.put_utf8('format', self.get_format_string())
2696
branch = a_bzrdir.open_branch()
3003
transport.put_bytes('format', self.get_format_string(),
3004
mode=a_bzrdir._get_file_mode())
3005
if from_branch is not None:
3006
branch = from_branch
3008
branch = a_bzrdir.open_branch()
2697
3009
if revision_id is None:
2698
revision_id = branch.last_revision()
2700
revision_id = osutils.safe_revision_id(revision_id)
3010
revision_id = _mod_revision.ensure_null(branch.last_revision())
2701
3011
# WorkingTree3 can handle an inventory which has a unique root id.
2702
3012
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
2703
3013
# those trees. And because there isn't a format bump inbetween, we