383
378
inv = self._inventory
384
379
for path, ie in inv.iter_entries():
385
if osutils.lexists(self.abspath(path)):
380
if bzrlib.osutils.lexists(self.abspath(path)):
388
def all_file_ids(self):
389
"""See Tree.iter_all_file_ids"""
390
return set(self.inventory)
392
383
def __repr__(self):
393
384
return "<%s of %s>" % (self.__class__.__name__,
394
385
getattr(self, 'basedir', None))
396
387
def abspath(self, filename):
397
388
return pathjoin(self.basedir, filename)
399
390
def basis_tree(self):
400
"""Return RevisionTree for the current last revision.
402
If the left most parent is a ghost then the returned tree will be an
403
empty tree - one obtained by calling
404
repository.revision_tree(NULL_REVISION).
407
revision_id = self.get_parent_ids()[0]
409
# no parents, return an empty revision tree.
410
# in the future this should return the tree for
411
# 'empty:' - the implicit root empty tree.
412
return self.branch.repository.revision_tree(
413
_mod_revision.NULL_REVISION)
415
return self.revision_tree(revision_id)
416
except errors.NoSuchRevision:
418
# No cached copy available, retrieve from the repository.
419
# FIXME? RBC 20060403 should we cache the inventory locally
422
return self.branch.repository.revision_tree(revision_id)
423
except (errors.RevisionNotPresent, errors.NoSuchRevision):
424
# the basis tree *may* be a ghost or a low level error may have
425
# occurred. If the revision is present, its a problem, if its not
427
if self.branch.repository.has_revision(revision_id):
429
# the basis tree is a ghost so return an empty tree.
430
return self.branch.repository.revision_tree(
431
_mod_revision.NULL_REVISION)
434
self._flush_ignore_list_cache()
391
"""Return RevisionTree for the current last revision."""
392
revision_id = self.last_revision()
393
if revision_id is not None:
395
xml = self.read_basis_inventory()
396
inv = bzrlib.xml5.serializer_v5.read_inventory_from_string(xml)
399
if inv is not None and inv.revision_id == revision_id:
400
return bzrlib.tree.RevisionTree(self.branch.repository, inv,
402
# FIXME? RBC 20060403 should we cache the inventory here ?
403
return self.branch.repository.revision_tree(revision_id)
406
@deprecated_method(zero_eight)
407
def create(branch, directory):
408
"""Create a workingtree for branch at directory.
410
If existing_directory already exists it must have a .bzr directory.
411
If it does not exist, it will be created.
413
This returns a new WorkingTree object for the new checkout.
415
TODO FIXME RBC 20060124 when we have checkout formats in place this
416
should accept an optional revisionid to checkout [and reject this if
417
checking out into the same dir as a pre-checkout-aware branch format.]
419
XXX: When BzrDir is present, these should be created through that
422
warnings.warn('delete WorkingTree.create', stacklevel=3)
423
transport = get_transport(directory)
424
if branch.bzrdir.root_transport.base == transport.base:
426
return branch.bzrdir.create_workingtree()
427
# different directory,
428
# create a branch reference
429
# and now a working tree.
430
raise NotImplementedError
433
@deprecated_method(zero_eight)
434
def create_standalone(directory):
435
"""Create a checkout and a branch and a repo at directory.
437
Directory must exist and be empty.
439
please use BzrDir.create_standalone_workingtree
441
return bzrdir.BzrDir.create_standalone_workingtree(directory)
436
443
def relpath(self, path):
437
444
"""Return the local path portion from a given path.
439
The path may be absolute or relative. If its a relative path it is
446
The path may be absolute or relative. If its a relative path it is
440
447
interpreted relative to the python current working directory.
442
return osutils.relpath(self.basedir, path)
449
return relpath(self.basedir, path)
444
451
def has_filename(self, filename):
445
return osutils.lexists(self.abspath(filename))
447
def get_file(self, file_id, path=None, filtered=True):
448
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
450
def get_file_with_stat(self, file_id, path=None, filtered=True,
452
"""See Tree.get_file_with_stat."""
454
path = self.id2path(file_id)
455
file_obj = self.get_file_byname(path, filtered=False)
456
stat_value = _fstat(file_obj.fileno())
457
if filtered and self.supports_content_filtering():
458
filters = self._content_filter_stack(path)
459
file_obj = filtered_input_file(file_obj, filters)
460
return (file_obj, stat_value)
462
def get_file_text(self, file_id, path=None, filtered=True):
463
return self.get_file(file_id, path=path, filtered=filtered).read()
465
def get_file_byname(self, filename, filtered=True):
466
path = self.abspath(filename)
468
if filtered and self.supports_content_filtering():
469
filters = self._content_filter_stack(filename)
470
return filtered_input_file(f, filters)
474
def get_file_lines(self, file_id, path=None, filtered=True):
475
"""See Tree.get_file_lines()"""
476
file = self.get_file(file_id, path, filtered=filtered)
478
return file.readlines()
483
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
484
"""See Tree.annotate_iter
486
This implementation will use the basis tree implementation if possible.
487
Lines not in the basis are attributed to CURRENT_REVISION
489
If there are pending merges, lines added by those merges will be
490
incorrectly attributed to CURRENT_REVISION (but after committing, the
491
attribution will be correct).
493
maybe_file_parent_keys = []
494
for parent_id in self.get_parent_ids():
496
parent_tree = self.revision_tree(parent_id)
497
except errors.NoSuchRevisionInTree:
498
parent_tree = self.branch.repository.revision_tree(parent_id)
499
parent_tree.lock_read()
501
if file_id not in parent_tree:
503
ie = parent_tree.inventory[file_id]
504
if ie.kind != 'file':
505
# Note: this is slightly unnecessary, because symlinks and
506
# directories have a "text" which is the empty text, and we
507
# know that won't mess up annotations. But it seems cleaner
509
parent_text_key = (file_id, ie.revision)
510
if parent_text_key not in maybe_file_parent_keys:
511
maybe_file_parent_keys.append(parent_text_key)
514
graph = _mod_graph.Graph(self.branch.repository.texts)
515
heads = graph.heads(maybe_file_parent_keys)
516
file_parent_keys = []
517
for key in maybe_file_parent_keys:
519
file_parent_keys.append(key)
521
# Now we have the parents of this content
522
annotator = self.branch.repository.texts.get_annotator()
523
text = self.get_file(file_id).read()
524
this_key =(file_id, default_revision)
525
annotator.add_special_text(this_key, file_parent_keys, text)
526
annotations = [(key[-1], line)
527
for key, line in annotator.annotate_flat(this_key)]
530
def _get_ancestors(self, default_revision):
531
ancestors = set([default_revision])
532
for parent_id in self.get_parent_ids():
533
ancestors.update(self.branch.repository.get_ancestry(
534
parent_id, topo_sorted=False))
452
return bzrlib.osutils.lexists(self.abspath(filename))
454
def get_file(self, file_id):
455
return self.get_file_byname(self.id2path(file_id))
457
def get_file_byname(self, filename):
458
return file(self.abspath(filename), 'rb')
537
460
def get_parent_ids(self):
538
461
"""See Tree.get_parent_ids.
540
463
This implementation reads the pending merges list and last_revision
541
464
value and uses that to decide what the parents list should be.
543
last_rev = _mod_revision.ensure_null(self._last_revision())
544
if _mod_revision.NULL_REVISION == last_rev:
466
last_rev = self.last_revision()
547
470
parents = [last_rev]
549
merges_bytes = self._transport.get_bytes('pending-merges')
550
except errors.NoSuchFile:
553
for l in osutils.split_lines(merges_bytes):
554
revision_id = l.rstrip('\n')
555
parents.append(revision_id)
471
other_parents = self.pending_merges()
472
return parents + other_parents
559
474
def get_root_id(self):
560
475
"""Return the id of this trees root"""
561
return self._inventory.root.file_id
476
inv = self.read_working_inventory()
477
return inv.root.file_id
563
479
def _get_store_filename(self, file_id):
564
480
## XXX: badly named; this is not in the store at all
565
481
return self.abspath(self.id2path(file_id))
568
def clone(self, to_bzrdir, revision_id=None):
484
def clone(self, to_bzrdir, revision_id=None, basis=None):
569
485
"""Duplicate this working tree into to_bzr, including all state.
571
487
Specifically modified files are kept as modified, but
572
488
ignored and unknown files are discarded.
574
490
If you want to make a new line of development, see bzrdir.sprout()
577
If not None, the cloned tree will have its last revision set to
578
revision, and difference between the source trees last revision
493
If not None, the cloned tree will have its last revision set to
494
revision, and and difference between the source trees last revision
579
495
and this one merged in.
498
If not None, a closer copy of a tree which may have some files in
499
common, and which file content should be preferentially copied from.
581
501
# assumes the target bzr dir format is compatible.
582
result = to_bzrdir.create_workingtree()
502
result = self._format.initialize(to_bzrdir)
583
503
self.copy_content_into(result, revision_id)
587
507
def copy_content_into(self, tree, revision_id=None):
588
508
"""Copy the current content and user files of this tree into tree."""
589
tree.set_root_id(self.get_root_id())
590
509
if revision_id is None:
591
merge.transform_tree(tree, self)
510
transform_tree(tree, self)
593
# TODO now merge from tree.last_revision to revision (to preserve
594
# user local changes)
595
merge.transform_tree(tree, self)
596
tree.set_parent_ids([revision_id])
512
# TODO now merge from tree.last_revision to revision
513
transform_tree(tree, self)
514
tree.set_last_revision(revision_id)
517
def commit(self, message=None, revprops=None, *args, **kwargs):
518
# avoid circular imports
519
from bzrlib.commit import Commit
522
if not 'branch-nick' in revprops:
523
revprops['branch-nick'] = self.branch.nick
524
# args for wt.commit start at message from the Commit.commit method,
525
# but with branch a kwarg now, passing in args as is results in the
526
#message being used for the branch
527
args = (DEPRECATED_PARAMETER, message, ) + args
528
committed_id = Commit().commit( working_tree=self, revprops=revprops,
530
self._set_inventory(self.read_working_inventory())
598
533
def id2abspath(self, file_id):
599
534
return self.abspath(self.id2path(file_id))
601
536
def has_id(self, file_id):
602
537
# files that have been deleted are excluded
538
inv = self._inventory
604
539
if not inv.has_id(file_id):
606
541
path = inv.id2path(file_id)
607
return osutils.lexists(self.abspath(path))
542
return bzrlib.osutils.lexists(self.abspath(path))
609
544
def has_or_had_id(self, file_id):
610
545
if file_id == self.inventory.root.file_id:
614
549
__contains__ = has_id
616
551
def get_file_size(self, file_id):
617
"""See Tree.get_file_size"""
618
# XXX: this returns the on-disk size; it should probably return the
621
return os.path.getsize(self.id2abspath(file_id))
623
if e.errno != errno.ENOENT:
552
return os.path.getsize(self.id2abspath(file_id))
629
def get_file_sha1(self, file_id, path=None, stat_value=None):
555
def get_file_sha1(self, file_id, path=None):
631
557
path = self._inventory.id2path(file_id)
632
return self._hashcache.get_sha1(path, stat_value)
558
return self._hashcache.get_sha1(path)
634
560
def get_file_mtime(self, file_id, path=None):
636
path = self.inventory.id2path(file_id)
562
path = self._inventory.id2path(file_id)
637
563
return os.lstat(self.abspath(path)).st_mtime
639
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
640
file_id = self.path2id(path)
642
# For unversioned files on win32, we just assume they are not
645
return self._inventory[file_id].executable
647
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
648
mode = stat_result.st_mode
649
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
651
565
if not supports_executable():
652
566
def is_executable(self, file_id, path=None):
653
567
return self._inventory[file_id].executable
655
_is_executable_from_path_and_stat = \
656
_is_executable_from_path_and_stat_from_basis
658
569
def is_executable(self, file_id, path=None):
660
path = self.id2path(file_id)
571
path = self._inventory.id2path(file_id)
661
572
mode = os.lstat(self.abspath(path)).st_mode
662
573
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
664
_is_executable_from_path_and_stat = \
665
_is_executable_from_path_and_stat_from_stat
667
@needs_tree_write_lock
668
def _add(self, files, ids, kinds):
669
"""See MutableTree._add."""
576
def add(self, files, ids=None):
577
"""Make files versioned.
579
Note that the command line normally calls smart_add instead,
580
which can automatically recurse.
582
This adds the files to the inventory, so that they will be
583
recorded by the next commit.
586
List of paths to add, relative to the base of the tree.
589
If set, use these instead of automatically generated ids.
590
Must be the same length as the list of files, but may
591
contain None for ids that are to be autogenerated.
593
TODO: Perhaps have an option to add the ids even if the files do
596
TODO: Perhaps callback with the ids and paths as they're added.
670
598
# TODO: Re-adding a file that is removed in the working copy
671
599
# should probably put it back with the previous ID.
672
# the read and write working inventory should not occur in this
673
# function - they should be part of lock_write and unlock.
675
for f, file_id, kind in zip(files, ids, kinds):
600
if isinstance(files, basestring):
601
assert(ids is None or isinstance(ids, basestring))
607
ids = [None] * len(files)
609
assert(len(ids) == len(files))
611
inv = self.read_working_inventory()
612
for f,file_id in zip(files, ids):
613
if self.is_control_filename(f):
614
raise errors.ForbiddenControlFileError(filename=f)
619
raise BzrError("cannot add top-level %r" % f)
621
fullpath = normpath(self.abspath(f))
623
kind = file_kind(fullpath)
625
if e.errno == errno.ENOENT:
626
raise NoSuchFile(fullpath)
627
if not InventoryEntry.versionable_kind(kind):
628
raise errors.BadFileKindError(filename=f, kind=kind)
676
629
if file_id is None:
677
630
inv.add_path(f, kind=kind)
679
632
inv.add_path(f, kind=kind, file_id=file_id)
680
self._inventory_is_modified = True
682
@needs_tree_write_lock
683
def _gather_kinds(self, files, kinds):
684
"""See MutableTree._gather_kinds."""
685
for pos, f in enumerate(files):
686
if kinds[pos] is None:
687
fullpath = normpath(self.abspath(f))
689
kinds[pos] = file_kind(fullpath)
691
if e.errno == errno.ENOENT:
692
raise errors.NoSuchFile(fullpath)
634
self._write_inventory(inv)
694
636
@needs_write_lock
695
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
696
"""Add revision_id as a parent.
698
This is equivalent to retrieving the current list of parent ids
699
and setting the list to its value plus revision_id.
701
:param revision_id: The revision id to add to the parent list. It may
702
be a ghost revision as long as its not the first parent to be added,
703
or the allow_leftmost_as_ghost parameter is set True.
704
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
706
parents = self.get_parent_ids() + [revision_id]
707
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
708
or allow_leftmost_as_ghost)
710
@needs_tree_write_lock
711
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
712
"""Add revision_id, tree tuple as a parent.
714
This is equivalent to retrieving the current list of parent trees
715
and setting the list to its value plus parent_tuple. See also
716
add_parent_tree_id - if you only have a parent id available it will be
717
simpler to use that api. If you have the parent already available, using
718
this api is preferred.
720
:param parent_tuple: The (revision id, tree) to add to the parent list.
721
If the revision_id is a ghost, pass None for the tree.
722
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
724
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
725
if len(parent_ids) > 1:
726
# the leftmost may have already been a ghost, preserve that if it
728
allow_leftmost_as_ghost = True
729
self.set_parent_ids(parent_ids,
730
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
732
@needs_tree_write_lock
733
637
def add_pending_merge(self, *revision_ids):
734
638
# TODO: Perhaps should check at this point that the
735
639
# history of the revision is actually present?
736
parents = self.get_parent_ids()
640
p = self.pending_merges()
738
642
for rev_id in revision_ids:
739
if rev_id in parents:
741
parents.append(rev_id)
744
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
746
def path_content_summary(self, path, _lstat=os.lstat,
747
_mapper=osutils.file_kind_from_stat_mode):
748
"""See Tree.path_content_summary."""
749
abspath = self.abspath(path)
648
self.set_pending_merges(p)
651
def pending_merges(self):
652
"""Return a list of pending merges.
654
These are revisions that have been merged into the working
655
directory but not yet committed.
751
stat_result = _lstat(abspath)
753
if getattr(e, 'errno', None) == errno.ENOENT:
755
return ('missing', None, None, None)
756
# propagate other errors
758
kind = _mapper(stat_result.st_mode)
760
return self._file_content_summary(path, stat_result)
761
elif kind == 'directory':
762
# perhaps it looks like a plain directory, but it's really a
764
if self._directory_is_tree_reference(path):
765
kind = 'tree-reference'
766
return kind, None, None, None
767
elif kind == 'symlink':
768
target = osutils.readlink(abspath)
769
return ('symlink', None, None, target)
771
return (kind, None, None, None)
773
def _file_content_summary(self, path, stat_result):
774
size = stat_result.st_size
775
executable = self._is_executable_from_path_and_stat(path, stat_result)
776
# try for a stat cache lookup
777
return ('file', size, executable, self._sha_from_stat(
780
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
781
"""Common ghost checking functionality from set_parent_*.
783
This checks that the left hand-parent exists if there are any
786
if len(revision_ids) > 0:
787
leftmost_id = revision_ids[0]
788
if (not allow_leftmost_as_ghost and not
789
self.branch.repository.has_revision(leftmost_id)):
790
raise errors.GhostRevisionUnusableHere(leftmost_id)
792
def _set_merges_from_parent_ids(self, parent_ids):
793
merges = parent_ids[1:]
794
self._transport.put_bytes('pending-merges', '\n'.join(merges),
795
mode=self.bzrdir._get_file_mode())
797
def _filter_parent_ids_by_ancestry(self, revision_ids):
798
"""Check that all merged revisions are proper 'heads'.
800
This will always return the first revision_id, and any merged revisions
803
if len(revision_ids) == 0:
805
graph = self.branch.repository.get_graph()
806
heads = graph.heads(revision_ids)
807
new_revision_ids = revision_ids[:1]
808
for revision_id in revision_ids[1:]:
809
if revision_id in heads and revision_id not in new_revision_ids:
810
new_revision_ids.append(revision_id)
811
if new_revision_ids != revision_ids:
812
trace.mutter('requested to set revision_ids = %s,'
813
' but filtered to %s', revision_ids, new_revision_ids)
814
return new_revision_ids
816
@needs_tree_write_lock
817
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
818
"""Set the parent ids to revision_ids.
820
See also set_parent_trees. This api will try to retrieve the tree data
821
for each element of revision_ids from the trees repository. If you have
822
tree data already available, it is more efficient to use
823
set_parent_trees rather than set_parent_ids. set_parent_ids is however
824
an easier API to use.
826
:param revision_ids: The revision_ids to set as the parent ids of this
827
working tree. Any of these may be ghosts.
829
self._check_parents_for_ghosts(revision_ids,
830
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
831
for revision_id in revision_ids:
832
_mod_revision.check_not_reserved_id(revision_id)
834
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
836
if len(revision_ids) > 0:
837
self.set_last_revision(revision_ids[0])
839
self.set_last_revision(_mod_revision.NULL_REVISION)
841
self._set_merges_from_parent_ids(revision_ids)
843
@needs_tree_write_lock
844
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
845
"""See MutableTree.set_parent_trees."""
846
parent_ids = [rev for (rev, tree) in parents_list]
847
for revision_id in parent_ids:
848
_mod_revision.check_not_reserved_id(revision_id)
850
self._check_parents_for_ghosts(parent_ids,
851
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
853
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
855
if len(parent_ids) == 0:
856
leftmost_parent_id = _mod_revision.NULL_REVISION
857
leftmost_parent_tree = None
859
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
861
if self._change_last_revision(leftmost_parent_id):
862
if leftmost_parent_tree is None:
863
# If we don't have a tree, fall back to reading the
864
# parent tree from the repository.
865
self._cache_basis_inventory(leftmost_parent_id)
867
inv = leftmost_parent_tree.inventory
868
xml = self._create_basis_xml_from_inventory(
869
leftmost_parent_id, inv)
870
self._write_basis_inventory(xml)
871
self._set_merges_from_parent_ids(parent_ids)
873
@needs_tree_write_lock
658
merges_file = self._control_files.get_utf8('pending-merges')
662
for l in merges_file.readlines():
663
p.append(l.rstrip('\n'))
874
667
def set_pending_merges(self, rev_list):
875
parents = self.get_parent_ids()
876
leftmost = parents[:1]
877
new_parents = leftmost + rev_list
878
self.set_parent_ids(new_parents)
668
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
880
@needs_tree_write_lock
881
671
def set_merge_modified(self, modified_hashes):
882
672
def iter_stanzas():
883
673
for file_id, hash in modified_hashes.iteritems():
884
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
674
yield Stanza(file_id=file_id, hash=hash)
885
675
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
887
def _sha_from_stat(self, path, stat_result):
888
"""Get a sha digest from the tree's stat cache.
890
The default implementation assumes no stat cache is present.
892
:param path: The path.
893
:param stat_result: The stat result being looked up.
897
678
def _put_rio(self, filename, stanzas, header):
898
self._must_be_locked()
899
679
my_file = rio_file(stanzas, header)
900
self._transport.put_file(filename, my_file,
901
mode=self.bzrdir._get_file_mode())
903
@needs_write_lock # because merge pulls data into the branch.
904
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
905
merge_type=None, force=False):
906
"""Merge from a branch into this working tree.
908
:param branch: The branch to merge from.
909
:param to_revision: If non-None, the merge will merge to to_revision,
910
but not beyond it. to_revision does not need to be in the history
911
of the branch when it is supplied. If None, to_revision defaults to
912
branch.last_revision().
914
from bzrlib.merge import Merger, Merge3Merger
915
merger = Merger(self.branch, this_tree=self)
916
# check that there are no local alterations
917
if not force and self.has_changes():
918
raise errors.UncommittedChanges(self)
919
if to_revision is None:
920
to_revision = _mod_revision.ensure_null(branch.last_revision())
921
merger.other_rev_id = to_revision
922
if _mod_revision.is_null(merger.other_rev_id):
923
raise errors.NoCommits(branch)
924
self.branch.fetch(branch, last_revision=merger.other_rev_id)
925
merger.other_basis = merger.other_rev_id
926
merger.other_tree = self.branch.repository.revision_tree(
928
merger.other_branch = branch
929
if from_revision is None:
932
merger.set_base_revision(from_revision, branch)
933
if merger.base_rev_id == merger.other_rev_id:
934
raise errors.PointlessMerge
935
merger.backup_files = False
936
if merge_type is None:
937
merger.merge_type = Merge3Merger
939
merger.merge_type = merge_type
940
merger.set_interesting_files(None)
941
merger.show_base = False
942
merger.reprocess = False
943
conflicts = merger.do_merge()
680
self._control_files.put(filename, my_file)
948
683
def merge_modified(self):
949
"""Return a dictionary of files modified by a merge.
951
The list is initialized by WorkingTree.set_merge_modified, which is
952
typically called after we make some automatic updates to the tree
955
This returns a map of file_id->sha1, containing only files which are
956
still in the working inventory and have that text hash.
959
hashfile = self._transport.get('merge-hashes')
960
except errors.NoSuchFile:
685
hashfile = self._control_files.get('merge-hashes')
965
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
966
raise errors.MergeModifiedFormatError()
967
except StopIteration:
968
raise errors.MergeModifiedFormatError()
969
for s in RioReader(hashfile):
970
# RioReader reads in Unicode, so convert file_ids back to utf8
971
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
972
if file_id not in self.inventory:
974
text_hash = s.get("hash")
975
if text_hash == self.get_file_sha1(file_id):
976
merge_hashes[file_id] = text_hash
982
def mkdir(self, path, file_id=None):
983
"""See MutableTree.mkdir()."""
985
file_id = generate_ids.gen_file_id(os.path.basename(path))
986
os.mkdir(self.abspath(path))
987
self.add(path, file_id, 'directory')
690
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
691
raise MergeModifiedFormatError()
692
except StopIteration:
693
raise MergeModifiedFormatError()
694
for s in RioReader(hashfile):
695
file_id = s.get("file_id")
696
if file_id not in self.inventory:
699
if hash == self.get_file_sha1(file_id):
700
merge_hashes[file_id] = hash
990
703
def get_symlink_target(self, file_id):
991
abspath = self.id2abspath(file_id)
992
target = osutils.readlink(abspath)
996
def subsume(self, other_tree):
997
def add_children(inventory, entry):
998
for child_entry in entry.children.values():
999
inventory._byid[child_entry.file_id] = child_entry
1000
if child_entry.kind == 'directory':
1001
add_children(inventory, child_entry)
1002
if other_tree.get_root_id() == self.get_root_id():
1003
raise errors.BadSubsumeSource(self, other_tree,
1004
'Trees have the same root')
1006
other_tree_path = self.relpath(other_tree.basedir)
1007
except errors.PathNotChild:
1008
raise errors.BadSubsumeSource(self, other_tree,
1009
'Tree is not contained by the other')
1010
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1011
if new_root_parent is None:
1012
raise errors.BadSubsumeSource(self, other_tree,
1013
'Parent directory is not versioned.')
1014
# We need to ensure that the result of a fetch will have a
1015
# versionedfile for the other_tree root, and only fetching into
1016
# RepositoryKnit2 guarantees that.
1017
if not self.branch.repository.supports_rich_root():
1018
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1019
other_tree.lock_tree_write()
1021
new_parents = other_tree.get_parent_ids()
1022
other_root = other_tree.inventory.root
1023
other_root.parent_id = new_root_parent
1024
other_root.name = osutils.basename(other_tree_path)
1025
self.inventory.add(other_root)
1026
add_children(self.inventory, other_root)
1027
self._write_inventory(self.inventory)
1028
# normally we don't want to fetch whole repositories, but i think
1029
# here we really do want to consolidate the whole thing.
1030
for parent_id in other_tree.get_parent_ids():
1031
self.branch.fetch(other_tree.branch, parent_id)
1032
self.add_parent_tree_id(parent_id)
1035
other_tree.bzrdir.retire_bzrdir()
1037
def _setup_directory_is_tree_reference(self):
1038
if self._branch.repository._format.supports_tree_reference:
1039
self._directory_is_tree_reference = \
1040
self._directory_may_be_tree_reference
1042
self._directory_is_tree_reference = \
1043
self._directory_is_never_tree_reference
1045
def _directory_is_never_tree_reference(self, relpath):
1048
def _directory_may_be_tree_reference(self, relpath):
1049
# as a special case, if a directory contains control files then
1050
# it's a tree reference, except that the root of the tree is not
1051
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1052
# TODO: We could ask all the control formats whether they
1053
# recognize this directory, but at the moment there's no cheap api
1054
# to do that. Since we probably can only nest bzr checkouts and
1055
# they always use this name it's ok for now. -- mbp 20060306
1057
# FIXME: There is an unhandled case here of a subdirectory
1058
# containing .bzr but not a branch; that will probably blow up
1059
# when you try to commit it. It might happen if there is a
1060
# checkout in a subdirectory. This can be avoided by not adding
1063
@needs_tree_write_lock
1064
def extract(self, file_id, format=None):
1065
"""Extract a subtree from this tree.
1067
A new branch will be created, relative to the path for this tree.
1071
segments = osutils.splitpath(path)
1072
transport = self.branch.bzrdir.root_transport
1073
for name in segments:
1074
transport = transport.clone(name)
1075
transport.ensure_base()
1078
sub_path = self.id2path(file_id)
1079
branch_transport = mkdirs(sub_path)
1081
format = self.bzrdir.cloning_metadir()
1082
branch_transport.ensure_base()
1083
branch_bzrdir = format.initialize_on_transport(branch_transport)
1085
repo = branch_bzrdir.find_repository()
1086
except errors.NoRepositoryPresent:
1087
repo = branch_bzrdir.create_repository()
1088
if not repo.supports_rich_root():
1089
raise errors.RootNotRich()
1090
new_branch = branch_bzrdir.create_branch()
1091
new_branch.pull(self.branch)
1092
for parent_id in self.get_parent_ids():
1093
new_branch.fetch(self.branch, parent_id)
1094
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1095
if tree_transport.base != branch_transport.base:
1096
tree_bzrdir = format.initialize_on_transport(tree_transport)
1097
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1099
tree_bzrdir = branch_bzrdir
1100
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1101
wt.set_parent_ids(self.get_parent_ids())
1102
my_inv = self.inventory
1103
child_inv = inventory.Inventory(root_id=None)
1104
new_root = my_inv[file_id]
1105
my_inv.remove_recursive_id(file_id)
1106
new_root.parent_id = None
1107
child_inv.add(new_root)
1108
self._write_inventory(my_inv)
1109
wt._write_inventory(child_inv)
1112
def _serialize(self, inventory, out_file):
1113
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1116
def _deserialize(selt, in_file):
1117
return xml5.serializer_v5.read_inventory(in_file)
1120
"""Write the in memory inventory to disk."""
1121
# TODO: Maybe this should only write on dirty ?
1122
if self._control_files._lock_mode != 'w':
1123
raise errors.NotWriteLocked(self)
1125
self._serialize(self._inventory, sio)
1127
self._transport.put_file('inventory', sio,
1128
mode=self.bzrdir._get_file_mode())
1129
self._inventory_is_modified = False
1131
def _kind(self, relpath):
1132
return osutils.file_kind(self.abspath(relpath))
1134
def list_files(self, include_root=False, from_dir=None, recursive=True):
1135
"""List all files as (path, class, kind, id, entry).
704
return os.readlink(self.id2abspath(file_id))
706
def file_class(self, filename):
707
if self.path2id(filename):
709
elif self.is_ignored(filename):
714
def list_files(self):
715
"""Recursively list all files as (path, class, kind, id, entry).
1137
717
Lists, but does not descend into unversioned directories.
1138
719
This does not include files that have been deleted in this
1139
tree. Skips the control directory.
1141
:param include_root: if True, do not return an entry for the root
1142
:param from_dir: start from this directory or None for the root
1143
:param recursive: whether to recurse into subdirectories or not
722
Skips the control directory.
1145
# list_files is an iterator, so @needs_read_lock doesn't work properly
1146
# with it. So callers should be careful to always read_lock the tree.
1147
if not self.is_locked():
1148
raise errors.ObjectNotLocked(self)
1150
inv = self.inventory
1151
if from_dir is None and include_root is True:
1152
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
724
inv = self._inventory
1153
725
# Convert these into local objects to save lookup times
1154
pathjoin = osutils.pathjoin
1155
file_kind = self._kind
726
pathjoin = bzrlib.osutils.pathjoin
727
file_kind = bzrlib.osutils.file_kind
1157
729
# transport.base ends in a slash, we want the piece
1158
730
# between the last two slashes
1236
787
except KeyError:
1237
788
yield fp[1:], c, fk, None, TreeEntry()
1240
791
if fk != 'directory':
1243
# But do this child first if recursing down
1245
new_children = os.listdir(fap)
1247
new_children = collections.deque(new_children)
1248
stack.append((f_ie.file_id, fp, fap, new_children))
1249
# Break out of inner loop,
1250
# so that we start outer loop with child
794
# But do this child first
795
new_children = os.listdir(fap)
797
new_children = collections.deque(new_children)
798
stack.append((f_ie.file_id, fp, fap, new_children))
799
# Break out of inner loop, so that we start outer loop with child
1253
802
# if we finished all children, pop it off the stack
1256
@needs_tree_write_lock
1257
def move(self, from_paths, to_dir=None, after=False, **kwargs):
807
def move(self, from_paths, to_name):
1258
808
"""Rename files.
1260
to_dir must exist in the inventory.
1262
If to_dir exists and is a directory, the files are moved into
1263
it, keeping their old names.
1265
Note that to_dir is only the last component of the new name;
810
to_name must exist in the inventory.
812
If to_name exists and is a directory, the files are moved into
813
it, keeping their old names.
815
Note that to_name is only the last component of the new name;
1266
816
this doesn't change the directory.
1268
For each entry in from_paths the move mode will be determined
1271
The first mode moves the file in the filesystem and updates the
1272
inventory. The second mode only updates the inventory without
1273
touching the file on the filesystem. This is the new mode introduced
1276
move uses the second mode if 'after == True' and the target is not
1277
versioned but present in the working tree.
1279
move uses the second mode if 'after == False' and the source is
1280
versioned but no longer in the working tree, and the target is not
1281
versioned but present in the working tree.
1283
move uses the first mode if 'after == False' and the source is
1284
versioned and present in the working tree, and the target is not
1285
versioned and not present in the working tree.
1287
Everything else results in an error.
1289
818
This returns a list of (from_path, to_path) pairs for each
1290
819
entry that is moved.
1295
# check for deprecated use of signature
1297
to_dir = kwargs.get('to_name', None)
1299
raise TypeError('You must supply a target directory')
1301
symbol_versioning.warn('The parameter to_name was deprecated'
1302
' in version 0.13. Use to_dir instead',
1305
# check destination directory
1306
if isinstance(from_paths, basestring):
822
## TODO: Option to move IDs only
823
assert not isinstance(from_paths, basestring)
1308
824
inv = self.inventory
1309
to_abs = self.abspath(to_dir)
825
to_abs = self.abspath(to_name)
1310
826
if not isdir(to_abs):
1311
raise errors.BzrMoveFailedError('',to_dir,
1312
errors.NotADirectory(to_abs))
1313
if not self.has_filename(to_dir):
1314
raise errors.BzrMoveFailedError('',to_dir,
1315
errors.NotInWorkingDirectory(to_dir))
1316
to_dir_id = inv.path2id(to_dir)
1317
if to_dir_id is None:
1318
raise errors.BzrMoveFailedError('',to_dir,
1319
errors.NotVersionedError(path=str(to_dir)))
827
raise BzrError("destination %r is not a directory" % to_abs)
828
if not self.has_filename(to_name):
829
raise BzrError("destination %r not in working directory" % to_abs)
830
to_dir_id = inv.path2id(to_name)
831
if to_dir_id == None and to_name != '':
832
raise BzrError("destination %r is not a versioned directory" % to_name)
1321
833
to_dir_ie = inv[to_dir_id]
1322
if to_dir_ie.kind != 'directory':
1323
raise errors.BzrMoveFailedError('',to_dir,
1324
errors.NotADirectory(to_abs))
1326
# create rename entries and tuples
1327
for from_rel in from_paths:
1328
from_tail = splitpath(from_rel)[-1]
1329
from_id = inv.path2id(from_rel)
1331
raise errors.BzrMoveFailedError(from_rel,to_dir,
1332
errors.NotVersionedError(path=str(from_rel)))
1334
from_entry = inv[from_id]
1335
from_parent_id = from_entry.parent_id
1336
to_rel = pathjoin(to_dir, from_tail)
1337
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1339
from_tail=from_tail,
1340
from_parent_id=from_parent_id,
1341
to_rel=to_rel, to_tail=from_tail,
1342
to_parent_id=to_dir_id)
1343
rename_entries.append(rename_entry)
1344
rename_tuples.append((from_rel, to_rel))
1346
# determine which move mode to use. checks also for movability
1347
rename_entries = self._determine_mv_mode(rename_entries, after)
1349
original_modified = self._inventory_is_modified
834
if to_dir_ie.kind not in ('directory', 'root_directory'):
835
raise BzrError("destination %r is not a directory" % to_abs)
837
to_idpath = inv.get_idpath(to_dir_id)
840
if not self.has_filename(f):
841
raise BzrError("%r does not exist in working tree" % f)
842
f_id = inv.path2id(f)
844
raise BzrError("%r is not versioned" % f)
845
name_tail = splitpath(f)[-1]
846
dest_path = pathjoin(to_name, name_tail)
847
if self.has_filename(dest_path):
848
raise BzrError("destination %r already exists" % dest_path)
849
if f_id in to_idpath:
850
raise BzrError("can't move %r to a subdirectory of itself" % f)
852
# OK, so there's a race here, it's possible that someone will
853
# create a file in this interval and then the rename might be
854
# left half-done. But we should have caught most problems.
855
orig_inv = deepcopy(self.inventory)
1352
self._inventory_is_modified = True
1353
self._move(rename_entries)
858
name_tail = splitpath(f)[-1]
859
dest_path = pathjoin(to_name, name_tail)
860
result.append((f, dest_path))
861
inv.rename(inv.path2id(f), to_dir_id, name_tail)
863
rename(self.abspath(f), self.abspath(dest_path))
865
raise BzrError("failed to rename %r to %r: %s" %
866
(f, dest_path, e[1]),
867
["rename rolled back"])
1355
869
# restore the inventory on error
1356
self._inventory_is_modified = original_modified
870
self._set_inventory(orig_inv)
1358
872
self._write_inventory(inv)
1359
return rename_tuples
1361
def _determine_mv_mode(self, rename_entries, after=False):
1362
"""Determines for each from-to pair if both inventory and working tree
1363
or only the inventory has to be changed.
1365
Also does basic plausability tests.
1367
inv = self.inventory
1369
for rename_entry in rename_entries:
1370
# store to local variables for easier reference
1371
from_rel = rename_entry.from_rel
1372
from_id = rename_entry.from_id
1373
to_rel = rename_entry.to_rel
1374
to_id = inv.path2id(to_rel)
1375
only_change_inv = False
1377
# check the inventory for source and destination
1379
raise errors.BzrMoveFailedError(from_rel,to_rel,
1380
errors.NotVersionedError(path=str(from_rel)))
1381
if to_id is not None:
1382
raise errors.BzrMoveFailedError(from_rel,to_rel,
1383
errors.AlreadyVersionedError(path=str(to_rel)))
1385
# try to determine the mode for rename (only change inv or change
1386
# inv and file system)
1388
if not self.has_filename(to_rel):
1389
raise errors.BzrMoveFailedError(from_id,to_rel,
1390
errors.NoSuchFile(path=str(to_rel),
1391
extra="New file has not been created yet"))
1392
only_change_inv = True
1393
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1394
only_change_inv = True
1395
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1396
only_change_inv = False
1397
elif (not self.case_sensitive
1398
and from_rel.lower() == to_rel.lower()
1399
and self.has_filename(from_rel)):
1400
only_change_inv = False
1402
# something is wrong, so lets determine what exactly
1403
if not self.has_filename(from_rel) and \
1404
not self.has_filename(to_rel):
1405
raise errors.BzrRenameFailedError(from_rel,to_rel,
1406
errors.PathsDoNotExist(paths=(str(from_rel),
1409
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1410
rename_entry.only_change_inv = only_change_inv
1411
return rename_entries
1413
def _move(self, rename_entries):
1414
"""Moves a list of files.
1416
Depending on the value of the flag 'only_change_inv', the
1417
file will be moved on the file system or not.
1419
inv = self.inventory
1422
for entry in rename_entries:
1424
self._move_entry(entry)
1426
self._rollback_move(moved)
1430
def _rollback_move(self, moved):
1431
"""Try to rollback a previous move in case of an filesystem error."""
1432
inv = self.inventory
1435
self._move_entry(WorkingTree._RenameEntry(
1436
entry.to_rel, entry.from_id,
1437
entry.to_tail, entry.to_parent_id, entry.from_rel,
1438
entry.from_tail, entry.from_parent_id,
1439
entry.only_change_inv))
1440
except errors.BzrMoveFailedError, e:
1441
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1442
" The working tree is in an inconsistent state."
1443
" Please consider doing a 'bzr revert'."
1444
" Error message is: %s" % e)
1446
def _move_entry(self, entry):
1447
inv = self.inventory
1448
from_rel_abs = self.abspath(entry.from_rel)
1449
to_rel_abs = self.abspath(entry.to_rel)
1450
if from_rel_abs == to_rel_abs:
1451
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1452
"Source and target are identical.")
1454
if not entry.only_change_inv:
1456
osutils.rename(from_rel_abs, to_rel_abs)
1458
raise errors.BzrMoveFailedError(entry.from_rel,
1460
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1462
@needs_tree_write_lock
1463
def rename_one(self, from_rel, to_rel, after=False):
876
def rename_one(self, from_rel, to_rel):
1464
877
"""Rename one file.
1466
879
This can change the directory or the filename or both.
1468
rename_one has several 'modes' to work. First, it can rename a physical
1469
file and change the file_id. That is the normal mode. Second, it can
1470
only change the file_id without touching any physical file. This is
1471
the new mode introduced in version 0.15.
1473
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1474
versioned but present in the working tree.
1476
rename_one uses the second mode if 'after == False' and 'from_rel' is
1477
versioned but no longer in the working tree, and 'to_rel' is not
1478
versioned but present in the working tree.
1480
rename_one uses the first mode if 'after == False' and 'from_rel' is
1481
versioned and present in the working tree, and 'to_rel' is not
1482
versioned and not present in the working tree.
1484
Everything else results in an error.
1486
881
inv = self.inventory
1489
# create rename entries and tuples
1490
from_tail = splitpath(from_rel)[-1]
1491
from_id = inv.path2id(from_rel)
1493
# if file is missing in the inventory maybe it's in the basis_tree
1494
basis_tree = self.branch.basis_tree()
1495
from_id = basis_tree.path2id(from_rel)
1497
raise errors.BzrRenameFailedError(from_rel,to_rel,
1498
errors.NotVersionedError(path=str(from_rel)))
1499
# put entry back in the inventory so we can rename it
1500
from_entry = basis_tree.inventory[from_id].copy()
1503
from_entry = inv[from_id]
1504
from_parent_id = from_entry.parent_id
882
if not self.has_filename(from_rel):
883
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
884
if self.has_filename(to_rel):
885
raise BzrError("can't rename: new working file %r already exists" % to_rel)
887
file_id = inv.path2id(from_rel)
889
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
892
from_parent = entry.parent_id
893
from_name = entry.name
895
if inv.path2id(to_rel):
896
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1505
898
to_dir, to_tail = os.path.split(to_rel)
1506
899
to_dir_id = inv.path2id(to_dir)
1507
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1509
from_tail=from_tail,
1510
from_parent_id=from_parent_id,
1511
to_rel=to_rel, to_tail=to_tail,
1512
to_parent_id=to_dir_id)
1513
rename_entries.append(rename_entry)
1515
# determine which move mode to use. checks also for movability
1516
rename_entries = self._determine_mv_mode(rename_entries, after)
1518
# check if the target changed directory and if the target directory is
1520
if to_dir_id is None:
1521
raise errors.BzrMoveFailedError(from_rel,to_rel,
1522
errors.NotVersionedError(path=str(to_dir)))
1524
# all checks done. now we can continue with our actual work
1525
mutter('rename_one:\n'
1530
' to_dir_id {%s}\n',
1531
from_id, from_rel, to_rel, to_dir, to_dir_id)
1533
self._move(rename_entries)
900
if to_dir_id == None and to_dir != '':
901
raise BzrError("can't determine destination directory id for %r" % to_dir)
903
mutter("rename_one:")
904
mutter(" file_id {%s}" % file_id)
905
mutter(" from_rel %r" % from_rel)
906
mutter(" to_rel %r" % to_rel)
907
mutter(" to_dir %r" % to_dir)
908
mutter(" to_dir_id {%s}" % to_dir_id)
910
inv.rename(file_id, to_dir_id, to_tail)
912
from_abs = self.abspath(from_rel)
913
to_abs = self.abspath(to_rel)
915
rename(from_abs, to_abs)
917
inv.rename(file_id, from_parent, from_name)
918
raise BzrError("failed to rename %r to %r: %s"
919
% (from_abs, to_abs, e[1]),
920
["rename rolled back"])
1534
921
self._write_inventory(inv)
1536
class _RenameEntry(object):
1537
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1538
to_rel, to_tail, to_parent_id, only_change_inv=False):
1539
self.from_rel = from_rel
1540
self.from_id = from_id
1541
self.from_tail = from_tail
1542
self.from_parent_id = from_parent_id
1543
self.to_rel = to_rel
1544
self.to_tail = to_tail
1545
self.to_parent_id = to_parent_id
1546
self.only_change_inv = only_change_inv
1548
923
@needs_read_lock
1549
924
def unknowns(self):
1550
925
"""Return all unknown files.
1852
1178
def _change_last_revision(self, new_revision):
1853
1179
"""Template method part of set_last_revision to perform the change.
1855
1181
This is used to allow WorkingTree3 instances to not affect branch
1856
1182
when their last revision is set.
1858
if _mod_revision.is_null(new_revision):
1184
if new_revision is None:
1859
1185
self.branch.set_revision_history([])
1187
# current format is locked in with the branch
1188
revision_history = self.branch.revision_history()
1862
self.branch.generate_revision_history(new_revision)
1863
except errors.NoSuchRevision:
1864
# not present in the repo - dont try to set it deeper than the tip
1865
self.branch.set_revision_history([new_revision])
1190
position = revision_history.index(new_revision)
1192
raise errors.NoSuchRevision(self.branch, new_revision)
1193
self.branch.set_revision_history(revision_history[:position + 1])
1868
def _write_basis_inventory(self, xml):
1869
"""Write the basis inventory XML to the basis-inventory file"""
1870
path = self._basis_inventory_name()
1872
self._transport.put_file(path, sio,
1873
mode=self.bzrdir._get_file_mode())
1875
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1876
"""Create the text that will be saved in basis-inventory"""
1877
inventory.revision_id = revision_id
1878
return xml7.serializer_v7.write_inventory_to_string(inventory)
1880
1196
def _cache_basis_inventory(self, new_revision):
1881
1197
"""Cache new_revision as the basis inventory."""
1882
1198
# TODO: this should allow the ready-to-use inventory to be passed in,
1883
1199
# as commit already has that ready-to-use [while the format is the
1884
1200
# same, that is].
1886
# this double handles the inventory - unpack and repack -
1202
# this double handles the inventory - unpack and repack -
1887
1203
# but is easier to understand. We can/should put a conditional
1888
1204
# in here based on whether the inventory is in the latest format
1889
1205
# - perhaps we should repack all inventories on a repository
1891
1207
# the fast path is to copy the raw xml from the repository. If the
1892
# xml contains 'revision_id="', then we assume the right
1208
# xml contains 'revision_id="', then we assume the right
1893
1209
# revision_id is set. We must check for this full string, because a
1894
1210
# root node id can legitimately look like 'revision_id' but cannot
1895
1211
# contain a '"'.
1896
xml = self.branch.repository._get_inventory_xml(new_revision)
1897
firstline = xml.split('\n', 1)[0]
1898
if (not 'revision_id="' in firstline or
1899
'format="7"' not in firstline):
1900
inv = self.branch.repository._serializer.read_inventory_from_string(
1902
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1903
self._write_basis_inventory(xml)
1904
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1212
xml = self.branch.repository.get_inventory_xml(new_revision)
1213
if not 'revision_id="' in xml.split('\n', 1)[0]:
1214
inv = self.branch.repository.deserialise_inventory(
1216
inv.revision_id = new_revision
1217
xml = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
1218
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1219
path = self._basis_inventory_name()
1221
self._control_files.put(path, sio)
1222
except WeaveRevisionNotPresent:
1907
1225
def read_basis_inventory(self):
1908
1226
"""Read the cached basis inventory."""
1909
1227
path = self._basis_inventory_name()
1910
return self._transport.get_bytes(path)
1228
return self._control_files.get(path).read()
1912
1230
@needs_read_lock
1913
1231
def read_working_inventory(self):
1914
"""Read the working inventory.
1916
:raises errors.InventoryModified: read_working_inventory will fail
1917
when the current in memory inventory has been modified.
1919
# conceptually this should be an implementation detail of the tree.
1920
# XXX: Deprecate this.
1232
"""Read the working inventory."""
1921
1233
# ElementTree does its own conversion from UTF-8, so open in
1923
if self._inventory_is_modified:
1924
raise errors.InventoryModified(self)
1925
f = self._transport.get('inventory')
1927
result = self._deserialize(f)
1930
self._set_inventory(result, dirty=False)
1235
result = bzrlib.xml5.serializer_v5.read_inventory(
1236
self._control_files.get('inventory'))
1237
self._set_inventory(result)
1933
@needs_tree_write_lock
1934
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1936
"""Remove nominated files from the working inventory.
1938
:files: File paths relative to the basedir.
1939
:keep_files: If true, the files will also be kept.
1940
:force: Delete files and directories, even if they are changed and
1941
even if the directories are not empty.
1241
def remove(self, files, verbose=False, to_file=None):
1242
"""Remove nominated files from the working inventory..
1244
This does not remove their text. This does not run on XXX on what? RBC
1246
TODO: Refuse to remove modified files unless --force is given?
1248
TODO: Do something useful with directories.
1250
TODO: Should this remove the text or not? Tough call; not
1251
removing may be useful and the user can just use use rm, and
1252
is the opposite of add. Removing it is consistent with most
1253
other tools. Maybe an option.
1255
## TODO: Normalize names
1256
## TODO: Remove nested loops; better scalability
1943
1257
if isinstance(files, basestring):
1944
1258
files = [files]
1949
unknown_nested_files=set()
1951
to_file = sys.stdout
1953
def recurse_directory_to_add_files(directory):
1954
# Recurse directory and add all files
1955
# so we can check if they have changed.
1956
for parent_info, file_infos in\
1957
self.walkdirs(directory):
1958
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1959
# Is it versioned or ignored?
1960
if self.path2id(relpath) or self.is_ignored(relpath):
1961
# Add nested content for deletion.
1962
new_files.add(relpath)
1964
# Files which are not versioned and not ignored
1965
# should be treated as unknown.
1966
unknown_nested_files.add((relpath, None, kind))
1968
for filename in files:
1969
# Get file name into canonical form.
1970
abspath = self.abspath(filename)
1971
filename = self.relpath(abspath)
1972
if len(filename) > 0:
1973
new_files.add(filename)
1974
recurse_directory_to_add_files(filename)
1976
files = list(new_files)
1979
return # nothing to do
1981
# Sort needed to first handle directory content before the directory
1982
files.sort(reverse=True)
1984
# Bail out if we are going to delete files we shouldn't
1985
if not keep_files and not force:
1986
has_changed_files = len(unknown_nested_files) > 0
1987
if not has_changed_files:
1988
for (file_id, path, content_change, versioned, parent_id, name,
1989
kind, executable) in self.iter_changes(self.basis_tree(),
1990
include_unchanged=True, require_versioned=False,
1991
want_unversioned=True, specific_files=files):
1992
if versioned == (False, False):
1993
# The record is unknown ...
1994
if not self.is_ignored(path[1]):
1995
# ... but not ignored
1996
has_changed_files = True
1998
elif content_change and (kind[1] is not None):
1999
# Versioned and changed, but not deleted
2000
has_changed_files = True
2003
if has_changed_files:
2004
# Make delta show ALL applicable changes in error message.
2005
tree_delta = self.changes_from(self.basis_tree(),
2006
require_versioned=False, want_unversioned=True,
2007
specific_files=files)
2008
for unknown_file in unknown_nested_files:
2009
if unknown_file not in tree_delta.unversioned:
2010
tree_delta.unversioned.extend((unknown_file,))
2011
raise errors.BzrRemoveChangedFilesError(tree_delta)
2013
# Build inv_delta and delete files where applicable,
2014
# do this before any modifications to inventory.
1260
inv = self.inventory
1262
# do this before any modifications
2015
1263
for f in files:
2016
fid = self.path2id(f)
1264
fid = inv.path2id(f)
2019
message = "%s is not versioned." % (f,)
2022
# having removed it, it must be either ignored or unknown
2023
if self.is_ignored(f):
2027
# XXX: Really should be a more abstract reporter interface
2028
kind_ch = osutils.kind_marker(self.kind(fid))
2029
to_file.write(new_status + ' ' + f + kind_ch + '\n')
2031
inv_delta.append((f, None, fid, None))
2032
message = "removed %s" % (f,)
2035
abs_path = self.abspath(f)
2036
if osutils.lexists(abs_path):
2037
if (osutils.isdir(abs_path) and
2038
len(os.listdir(abs_path)) > 0):
2040
osutils.rmtree(abs_path)
2042
message = "%s is not an empty directory "\
2043
"and won't be deleted." % (f,)
2045
osutils.delete_any(abs_path)
2046
message = "deleted %s" % (f,)
2047
elif message is not None:
2048
# Only care if we haven't done anything yet.
2049
message = "%s does not exist." % (f,)
2051
# Print only one message (if any) per file.
2052
if message is not None:
2054
self.apply_inventory_delta(inv_delta)
2056
@needs_tree_write_lock
2057
def revert(self, filenames=None, old_tree=None, backups=True,
2058
pb=None, report_changes=False):
2059
from bzrlib.conflicts import resolve
2062
symbol_versioning.warn('Using [] to revert all files is deprecated'
2063
' as of bzr 0.91. Please use None (the default) instead.',
2064
DeprecationWarning, stacklevel=2)
1266
# TODO: Perhaps make this just a warning, and continue?
1267
# This tends to happen when
1268
raise NotVersionedError(path=f)
1270
# having remove it, it must be either ignored or unknown
1271
if self.is_ignored(f):
1275
show_status(new_status, inv[fid].kind, f, to_file=to_file)
1278
self._write_inventory(inv)
1281
def revert(self, filenames, old_tree=None, backups=True,
1282
pb=DummyProgress()):
1283
from transform import revert
1284
from conflicts import resolve
2065
1285
if old_tree is None:
2066
basis_tree = self.basis_tree()
2067
basis_tree.lock_read()
2068
old_tree = basis_tree
1286
old_tree = self.basis_tree()
1287
conflicts = revert(self, old_tree, filenames, backups, pb)
1288
if not len(filenames):
1289
self.set_pending_merges([])
2072
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2074
if filenames is None and len(self.get_parent_ids()) > 1:
2076
last_revision = self.last_revision()
2077
if last_revision != _mod_revision.NULL_REVISION:
2078
if basis_tree is None:
2079
basis_tree = self.basis_tree()
2080
basis_tree.lock_read()
2081
parent_trees.append((last_revision, basis_tree))
2082
self.set_parent_trees(parent_trees)
2085
resolve(self, filenames, ignore_misses=True, recursive=True)
2087
if basis_tree is not None:
1292
resolve(self, filenames, ignore_misses=True)
2089
1293
return conflicts
2091
def revision_tree(self, revision_id):
2092
"""See Tree.revision_tree.
2094
WorkingTree can supply revision_trees for the basis revision only
2095
because there is only one cached inventory in the bzr directory.
2097
if revision_id == self.last_revision():
2099
xml = self.read_basis_inventory()
2100
except errors.NoSuchFile:
2104
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2105
# dont use the repository revision_tree api because we want
2106
# to supply the inventory.
2107
if inv.revision_id == revision_id:
2108
return revisiontree.RevisionTree(self.branch.repository,
2110
except errors.BadInventoryFormat:
2112
# raise if there was no inventory, or if we read the wrong inventory.
2113
raise errors.NoSuchRevisionInTree(self, revision_id)
2115
1295
# XXX: This method should be deprecated in favour of taking in a proper
2116
1296
# new Inventory object.
2117
@needs_tree_write_lock
2118
1298
def set_inventory(self, new_inventory_list):
2119
1299
from bzrlib.inventory import (Inventory,
2120
1300
InventoryDirectory,
2123
1304
inv = Inventory(self.get_root_id())
2133
1314
elif kind == 'symlink':
2134
1315
inv.add(InventoryLink(file_id, name, parent))
2136
raise errors.BzrError("unknown kind %r" % kind)
1317
raise BzrError("unknown kind %r" % kind)
2137
1318
self._write_inventory(inv)
2139
@needs_tree_write_lock
2140
1321
def set_root_id(self, file_id):
2141
1322
"""Set the root id for this tree."""
2145
'WorkingTree.set_root_id with fileid=None')
2146
file_id = osutils.safe_file_id(file_id)
2147
self._set_root_id(file_id)
2149
def _set_root_id(self, file_id):
2150
"""Set the root id for this tree, in a format specific manner.
2152
:param file_id: The file id to assign to the root. It must not be
2153
present in the current inventory or an error will occur. It must
2154
not be None, but rather a valid file id.
2156
inv = self._inventory
1323
inv = self.read_working_inventory()
2157
1324
orig_root_id = inv.root.file_id
2158
# TODO: it might be nice to exit early if there was nothing
2159
# to do, saving us from trigger a sync on unlock.
2160
self._inventory_is_modified = True
2161
# we preserve the root inventory entry object, but
2162
# unlinkit from the byid index
2163
1325
del inv._byid[inv.root.file_id]
2164
1326
inv.root.file_id = file_id
2165
# and link it into the index with the new changed id.
2166
1327
inv._byid[inv.root.file_id] = inv.root
2167
# and finally update all children to reference the new id.
2168
# XXX: this should be safe to just look at the root.children
2169
# list, not the WHOLE INVENTORY.
2170
1328
for fid in inv:
2171
1329
entry = inv[fid]
2172
1330
if entry.parent_id == orig_root_id:
2173
1331
entry.parent_id = inv.root.file_id
1332
self._write_inventory(inv)
2175
1334
def unlock(self):
2176
1335
"""See Branch.unlock.
2178
1337
WorkingTree locking just uses the Branch locking facilities.
2179
1338
This is current because all working trees have an embedded branch
2180
1339
within them. IF in the future, we were to make branch data shareable
2181
between multiple working trees, i.e. via shared storage, then we
1340
between multiple working trees, i.e. via shared storage, then we
2182
1341
would probably want to lock both the local tree, and the branch.
2184
raise NotImplementedError(self.unlock)
2188
def update(self, change_reporter=None, possible_transports=None,
2189
revision=None, old_tip=_marker):
1343
# FIXME: We want to write out the hashcache only when the last lock on
1344
# this working copy is released. Peeking at the lock count is a bit
1345
# of a nasty hack; probably it's better to have a transaction object,
1346
# which can do some finalization when it's either successfully or
1347
# unsuccessfully completed. (Denys's original patch did that.)
1348
# RBC 20060206 hooking into transaction will couple lock and transaction
1349
# wrongly. Hooking into unlock on the control files object is fine though.
1351
# TODO: split this per format so there is no ugly if block
1352
if self._hashcache.needs_write and (
1353
# dedicated lock files
1354
self._control_files._lock_count==1 or
1356
(self._control_files is self.branch.control_files and
1357
self._control_files._lock_count==3)):
1358
self._hashcache.write()
1359
# reverse order of locking.
1361
return self._control_files.unlock()
1363
self.branch.unlock()
2190
1367
"""Update a working tree along its branch.
2192
This will update the branch if its bound too, which means we have
2193
multiple trees involved:
2195
- The new basis tree of the master.
2196
- The old basis tree of the branch.
2197
- The old basis tree of the working tree.
2198
- The current working tree state.
2200
Pathologically, all three may be different, and non-ancestors of each
2201
other. Conceptually we want to:
2203
- Preserve the wt.basis->wt.state changes
2204
- Transform the wt.basis to the new master basis.
2205
- Apply a merge of the old branch basis to get any 'local' changes from
2207
- Restore the wt.basis->wt.state changes.
1369
This will update the branch if its bound too, which means we have multiple trees involved:
1370
The new basis tree of the master.
1371
The old basis tree of the branch.
1372
The old basis tree of the working tree.
1373
The current working tree state.
1374
pathologically all three may be different, and non ancestors of each other.
1375
Conceptually we want to:
1376
Preserve the wt.basis->wt.state changes
1377
Transform the wt.basis to the new master basis.
1378
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1379
Restore the wt.basis->wt.state changes.
2209
1381
There isn't a single operation at the moment to do that, so we:
2210
- Merge current state -> basis tree of the master w.r.t. the old tree
2212
- Do a 'normal' merge of the old branch basis if it is relevant.
2214
:param revision: The target revision to update to. Must be in the
2216
:param old_tip: If branch.update() has already been run, the value it
2217
returned (old tip of the branch or None). _marker is used
1382
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1383
Do a 'normal' merge of the old branch basis if it is relevant.
2220
if self.branch.get_bound_location() is not None:
2222
update_branch = (old_tip is self._marker)
2224
self.lock_tree_write()
2225
update_branch = False
1385
old_tip = self.branch.update()
1386
if old_tip is not None:
1387
self.add_pending_merge(old_tip)
1388
self.branch.lock_read()
2228
old_tip = self.branch.update(possible_transports)
2230
if old_tip is self._marker:
2232
return self._update_tree(old_tip, change_reporter, revision)
1391
if self.last_revision() != self.branch.last_revision():
1392
# merge tree state up to new branch tip.
1393
basis = self.basis_tree()
1394
to_tree = self.branch.basis_tree()
1395
result += merge_inner(self.branch,
1399
self.set_last_revision(self.branch.last_revision())
1400
if old_tip and old_tip != self.last_revision():
1401
# our last revision was not the prior branch last revision
1402
# and we have converted that last revision to a pending merge.
1403
# base is somewhere between the branch tip now
1404
# and the now pending merge
1405
from bzrlib.revision import common_ancestor
1407
base_rev_id = common_ancestor(self.branch.last_revision(),
1409
self.branch.repository)
1410
except errors.NoCommonAncestor:
1412
base_tree = self.branch.repository.revision_tree(base_rev_id)
1413
other_tree = self.branch.repository.revision_tree(old_tip)
1414
result += merge_inner(self.branch,
2236
@needs_tree_write_lock
2237
def _update_tree(self, old_tip=None, change_reporter=None, revision=None):
2238
"""Update a tree to the master branch.
2240
:param old_tip: if supplied, the previous tip revision the branch,
2241
before it was changed to the master branch's tip.
2243
# here if old_tip is not None, it is the old tip of the branch before
2244
# it was updated from the master branch. This should become a pending
2245
# merge in the working tree to preserve the user existing work. we
2246
# cant set that until we update the working trees last revision to be
2247
# one from the new branch, because it will just get absorbed by the
2248
# parent de-duplication logic.
2250
# We MUST save it even if an error occurs, because otherwise the users
2251
# local work is unreferenced and will appear to have been lost.
2255
last_rev = self.get_parent_ids()[0]
2257
last_rev = _mod_revision.NULL_REVISION
2258
if revision is None:
2259
revision = self.branch.last_revision()
2261
if revision not in self.branch.revision_history():
2262
raise errors.NoSuchRevision(self.branch, revision)
2264
old_tip = old_tip or _mod_revision.NULL_REVISION
2266
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2267
# the branch we are bound to was updated
2268
# merge those changes in first
2269
base_tree = self.basis_tree()
2270
other_tree = self.branch.repository.revision_tree(old_tip)
2271
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2272
base_tree, this_tree=self,
2273
change_reporter=change_reporter)
2275
self.add_parent_tree((old_tip, other_tree))
2276
trace.note('Rerun update after fixing the conflicts.')
2279
if last_rev != _mod_revision.ensure_null(revision):
2280
# the working tree is up to date with the branch
2281
# we can merge the specified revision from master
2282
to_tree = self.branch.repository.revision_tree(revision)
2283
to_root_id = to_tree.get_root_id()
2285
basis = self.basis_tree()
2288
if (basis.inventory.root is None
2289
or basis.inventory.root.file_id != to_root_id):
2290
self.set_root_id(to_root_id)
2295
# determine the branch point
2296
graph = self.branch.repository.get_graph()
2297
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2299
base_tree = self.branch.repository.revision_tree(base_rev_id)
2301
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2303
change_reporter=change_reporter)
2304
self.set_last_revision(revision)
2305
# TODO - dedup parents list with things merged by pull ?
2306
# reuse the tree we've updated to to set the basis:
2307
parent_trees = [(revision, to_tree)]
2308
merges = self.get_parent_ids()[1:]
2309
# Ideally we ask the tree for the trees here, that way the working
2310
# tree can decide whether to give us the entire tree or give us a
2311
# lazy initialised tree. dirstate for instance will have the trees
2312
# in ram already, whereas a last-revision + basis-inventory tree
2313
# will not, but also does not need them when setting parents.
2314
for parent in merges:
2315
parent_trees.append(
2316
(parent, self.branch.repository.revision_tree(parent)))
2317
if not _mod_revision.is_null(old_tip):
2318
parent_trees.append(
2319
(old_tip, self.branch.repository.revision_tree(old_tip)))
2320
self.set_parent_trees(parent_trees)
2321
last_rev = parent_trees[0][0]
2324
def _write_hashcache_if_dirty(self):
2325
"""Write out the hashcache if it is dirty."""
2326
if self._hashcache.needs_write:
2328
self._hashcache.write()
2330
if e.errno not in (errno.EPERM, errno.EACCES):
2332
# TODO: jam 20061219 Should this be a warning? A single line
2333
# warning might be sufficient to let the user know what
2335
mutter('Could not write hashcache for %s\nError: %s',
2336
self._hashcache.cache_file_name(), e)
2338
@needs_tree_write_lock
1420
self.branch.unlock()
2339
1423
def _write_inventory(self, inv):
2340
1424
"""Write inventory as the current inventory."""
2341
self._set_inventory(inv, dirty=True)
1426
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1428
self._control_files.put('inventory', sio)
1429
self._set_inventory(inv)
1430
mutter('wrote working inventory')
2344
1432
def set_conflicts(self, arg):
2345
raise errors.UnsupportedOperation(self.set_conflicts, self)
2347
def add_conflicts(self, arg):
2348
raise errors.UnsupportedOperation(self.add_conflicts, self)
1433
raise UnsupportedOperation(self.set_conflicts, self)
2350
1435
@needs_read_lock
2351
1436
def conflicts(self):
2352
conflicts = _mod_conflicts.ConflictList()
1437
conflicts = ConflictList()
2353
1438
for conflicted in self._iter_conflicts():
2368
1453
if text == False:
2370
1455
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2371
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
1456
conflicts.append(Conflict.factory(ctype, path=conflicted,
2373
1457
file_id=self.path2id(conflicted)))
2374
1458
return conflicts
2376
def walkdirs(self, prefix=""):
2377
"""Walk the directories of this tree.
2379
returns a generator which yields items in the form:
2380
((curren_directory_path, fileid),
2381
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2384
This API returns a generator, which is only valid during the current
2385
tree transaction - within a single lock_read or lock_write duration.
2387
If the tree is not locked, it may cause an error to be raised,
2388
depending on the tree implementation.
2390
disk_top = self.abspath(prefix)
2391
if disk_top.endswith('/'):
2392
disk_top = disk_top[:-1]
2393
top_strip_len = len(disk_top) + 1
2394
inventory_iterator = self._walkdirs(prefix)
2395
disk_iterator = osutils.walkdirs(disk_top, prefix)
2397
current_disk = disk_iterator.next()
2398
disk_finished = False
2400
if not (e.errno == errno.ENOENT or
2401
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2404
disk_finished = True
2406
current_inv = inventory_iterator.next()
2407
inv_finished = False
2408
except StopIteration:
2411
while not inv_finished or not disk_finished:
2413
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2414
cur_disk_dir_content) = current_disk
2416
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2417
cur_disk_dir_content) = ((None, None), None)
2418
if not disk_finished:
2419
# strip out .bzr dirs
2420
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2421
len(cur_disk_dir_content) > 0):
2422
# osutils.walkdirs can be made nicer -
2423
# yield the path-from-prefix rather than the pathjoined
2425
bzrdir_loc = bisect_left(cur_disk_dir_content,
2427
if (bzrdir_loc < len(cur_disk_dir_content)
2428
and self.bzrdir.is_control_filename(
2429
cur_disk_dir_content[bzrdir_loc][0])):
2430
# we dont yield the contents of, or, .bzr itself.
2431
del cur_disk_dir_content[bzrdir_loc]
2433
# everything is unknown
2436
# everything is missing
2439
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2441
# disk is before inventory - unknown
2442
dirblock = [(relpath, basename, kind, stat, None, None) for
2443
relpath, basename, kind, stat, top_path in
2444
cur_disk_dir_content]
2445
yield (cur_disk_dir_relpath, None), dirblock
2447
current_disk = disk_iterator.next()
2448
except StopIteration:
2449
disk_finished = True
2451
# inventory is before disk - missing.
2452
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2453
for relpath, basename, dkind, stat, fileid, kind in
2455
yield (current_inv[0][0], current_inv[0][1]), dirblock
2457
current_inv = inventory_iterator.next()
2458
except StopIteration:
2461
# versioned present directory
2462
# merge the inventory and disk data together
2464
for relpath, subiterator in itertools.groupby(sorted(
2465
current_inv[1] + cur_disk_dir_content,
2466
key=operator.itemgetter(0)), operator.itemgetter(1)):
2467
path_elements = list(subiterator)
2468
if len(path_elements) == 2:
2469
inv_row, disk_row = path_elements
2470
# versioned, present file
2471
dirblock.append((inv_row[0],
2472
inv_row[1], disk_row[2],
2473
disk_row[3], inv_row[4],
2475
elif len(path_elements[0]) == 5:
2477
dirblock.append((path_elements[0][0],
2478
path_elements[0][1], path_elements[0][2],
2479
path_elements[0][3], None, None))
2480
elif len(path_elements[0]) == 6:
2481
# versioned, absent file.
2482
dirblock.append((path_elements[0][0],
2483
path_elements[0][1], 'unknown', None,
2484
path_elements[0][4], path_elements[0][5]))
2486
raise NotImplementedError('unreachable code')
2487
yield current_inv[0], dirblock
2489
current_inv = inventory_iterator.next()
2490
except StopIteration:
2493
current_disk = disk_iterator.next()
2494
except StopIteration:
2495
disk_finished = True
2497
def _walkdirs(self, prefix=""):
2498
"""Walk the directories of this tree.
2500
:prefix: is used as the directrory to start with.
2501
returns a generator which yields items in the form:
2502
((curren_directory_path, fileid),
2503
[(file1_path, file1_name, file1_kind, None, file1_id,
2506
_directory = 'directory'
2507
# get the root in the inventory
2508
inv = self.inventory
2509
top_id = inv.path2id(prefix)
2513
pending = [(prefix, '', _directory, None, top_id, None)]
2516
currentdir = pending.pop()
2517
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2518
top_id = currentdir[4]
2520
relroot = currentdir[0] + '/'
2523
# FIXME: stash the node in pending
2525
if entry.kind == 'directory':
2526
for name, child in entry.sorted_children():
2527
dirblock.append((relroot + name, name, child.kind, None,
2528
child.file_id, child.kind
2530
yield (currentdir[0], entry.file_id), dirblock
2531
# push the user specified dirs from dirblock
2532
for dir in reversed(dirblock):
2533
if dir[2] == _directory:
2536
@needs_tree_write_lock
2537
def auto_resolve(self):
2538
"""Automatically resolve text conflicts according to contents.
2540
Only text conflicts are auto_resolvable. Files with no conflict markers
2541
are considered 'resolved', because bzr always puts conflict markers
2542
into files that have text conflicts. The corresponding .THIS .BASE and
2543
.OTHER files are deleted, as per 'resolve'.
2544
:return: a tuple of ConflictLists: (un_resolved, resolved).
2546
un_resolved = _mod_conflicts.ConflictList()
2547
resolved = _mod_conflicts.ConflictList()
2548
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2549
for conflict in self.conflicts():
2550
if (conflict.typestring != 'text conflict' or
2551
self.kind(conflict.file_id) != 'file'):
2552
un_resolved.append(conflict)
2554
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2556
for line in my_file:
2557
if conflict_re.search(line):
2558
un_resolved.append(conflict)
2561
resolved.append(conflict)
2564
resolved.remove_files(self)
2565
self.set_conflicts(un_resolved)
2566
return un_resolved, resolved
2569
def _check(self, references):
2570
"""Check the tree for consistency.
2572
:param references: A dict with keys matching the items returned by
2573
self._get_check_refs(), and values from looking those keys up in
2576
tree_basis = self.basis_tree()
2577
tree_basis.lock_read()
2579
repo_basis = references[('trees', self.last_revision())]
2580
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2581
raise errors.BzrCheckError(
2582
"Mismatched basis inventory content.")
2587
def _validate(self):
2588
"""Validate internal structures.
2590
This is meant mostly for the test suite. To give it a chance to detect
2591
corruption after actions have occurred. The default implementation is a
2594
:return: None. An exception should be raised if there is an error.
2598
def _get_rules_searcher(self, default_searcher):
2599
"""See Tree._get_rules_searcher."""
2600
if self._rules_searcher is None:
2601
self._rules_searcher = super(WorkingTree,
2602
self)._get_rules_searcher(default_searcher)
2603
return self._rules_searcher
2605
def get_shelf_manager(self):
2606
"""Return the ShelfManager for this WorkingTree."""
2607
from bzrlib.shelf import ShelfManager
2608
return ShelfManager(self, self._transport)
2611
class WorkingTree2(WorkingTree):
2612
"""This is the Format 2 working tree.
2614
This was the first weave based working tree.
2615
- uses os locks for locking.
2616
- uses the branch last-revision.
2619
def __init__(self, *args, **kwargs):
2620
super(WorkingTree2, self).__init__(*args, **kwargs)
2621
# WorkingTree2 has more of a constraint that self._inventory must
2622
# exist. Because this is an older format, we don't mind the overhead
2623
# caused by the extra computation here.
2625
# Newer WorkingTree's should only have self._inventory set when they
2627
if self._inventory is None:
2628
self.read_working_inventory()
2630
def _get_check_refs(self):
2631
"""Return the references needed to perform a check of this tree."""
2632
return [('trees', self.last_revision())]
2634
def lock_tree_write(self):
2635
"""See WorkingTree.lock_tree_write().
2637
In Format2 WorkingTrees we have a single lock for the branch and tree
2638
so lock_tree_write() degrades to lock_write().
2640
self.branch.lock_write()
2642
return self._control_files.lock_write()
2644
self.branch.unlock()
2648
# do non-implementation specific cleanup
2651
# we share control files:
2652
if self._control_files._lock_count == 3:
2653
# _inventory_is_modified is always False during a read lock.
2654
if self._inventory_is_modified:
2656
self._write_hashcache_if_dirty()
2658
# reverse order of locking.
2660
return self._control_files.unlock()
2662
self.branch.unlock()
2665
1461
class WorkingTree3(WorkingTree):
2666
1462
"""This is the Format 3 working tree.