395
370
def abspath(self, filename):
396
371
return pathjoin(self.basedir, filename)
398
373
def basis_tree(self):
399
"""Return RevisionTree for the current last revision.
401
If the left most parent is a ghost then the returned tree will be an
402
empty tree - one obtained by calling
403
repository.revision_tree(NULL_REVISION).
406
revision_id = self.get_parent_ids()[0]
408
# no parents, return an empty revision tree.
409
# in the future this should return the tree for
410
# 'empty:' - the implicit root empty tree.
411
return self.branch.repository.revision_tree(
412
_mod_revision.NULL_REVISION)
414
return self.revision_tree(revision_id)
415
except errors.NoSuchRevision:
417
# No cached copy available, retrieve from the repository.
418
# FIXME? RBC 20060403 should we cache the inventory locally
421
return self.branch.repository.revision_tree(revision_id)
422
except (errors.RevisionNotPresent, errors.NoSuchRevision):
423
# the basis tree *may* be a ghost or a low level error may have
424
# occurred. If the revision is present, its a problem, if its not
426
if self.branch.repository.has_revision(revision_id):
428
# the basis tree is a ghost so return an empty tree.
429
return self.branch.repository.revision_tree(
430
_mod_revision.NULL_REVISION)
433
self._flush_ignore_list_cache()
435
def relpath(self, path):
436
"""Return the local path portion from a given path.
438
The path may be absolute or relative. If its a relative path it is
439
interpreted relative to the python current working directory.
441
return osutils.relpath(self.basedir, path)
374
"""Return RevisionTree for the current last revision."""
375
revision_id = self.last_revision()
376
if revision_id is not None:
378
xml = self.read_basis_inventory()
379
inv = bzrlib.xml5.serializer_v5.read_inventory_from_string(xml)
382
if inv is not None and inv.revision_id == revision_id:
383
return bzrlib.tree.RevisionTree(self.branch.repository, inv,
385
# FIXME? RBC 20060403 should we cache the inventory here ?
386
return self.branch.repository.revision_tree(revision_id)
389
@deprecated_method(zero_eight)
390
def create(branch, directory):
391
"""Create a workingtree for branch at directory.
393
If existing_directory already exists it must have a .bzr directory.
394
If it does not exist, it will be created.
396
This returns a new WorkingTree object for the new checkout.
398
TODO FIXME RBC 20060124 when we have checkout formats in place this
399
should accept an optional revisionid to checkout [and reject this if
400
checking out into the same dir as a pre-checkout-aware branch format.]
402
XXX: When BzrDir is present, these should be created through that
405
warn('delete WorkingTree.create', stacklevel=3)
406
transport = get_transport(directory)
407
if branch.bzrdir.root_transport.base == transport.base:
409
return branch.bzrdir.create_workingtree()
410
# different directory,
411
# create a branch reference
412
# and now a working tree.
413
raise NotImplementedError
416
@deprecated_method(zero_eight)
417
def create_standalone(directory):
418
"""Create a checkout and a branch and a repo at directory.
420
Directory must exist and be empty.
422
please use BzrDir.create_standalone_workingtree
424
return bzrdir.BzrDir.create_standalone_workingtree(directory)
426
def relpath(self, abs):
427
"""Return the local path portion from a given absolute path."""
428
return relpath(self.basedir, abs)
443
430
def has_filename(self, filename):
444
return osutils.lexists(self.abspath(filename))
446
def get_file(self, file_id, path=None, filtered=True):
447
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
449
def get_file_with_stat(self, file_id, path=None, filtered=True,
450
_fstat=osutils.fstat):
451
"""See Tree.get_file_with_stat."""
453
path = self.id2path(file_id)
454
file_obj = self.get_file_byname(path, filtered=False)
455
stat_value = _fstat(file_obj.fileno())
456
if filtered and self.supports_content_filtering():
457
filters = self._content_filter_stack(path)
458
file_obj = _mod_filters.filtered_input_file(file_obj, filters)
459
return (file_obj, stat_value)
461
def get_file_text(self, file_id, path=None, filtered=True):
462
my_file = self.get_file(file_id, path=path, filtered=filtered)
464
return my_file.read()
468
def get_file_byname(self, filename, filtered=True):
469
path = self.abspath(filename)
471
if filtered and self.supports_content_filtering():
472
filters = self._content_filter_stack(filename)
473
return _mod_filters.filtered_input_file(f, filters)
477
def get_file_lines(self, file_id, path=None, filtered=True):
478
"""See Tree.get_file_lines()"""
479
file = self.get_file(file_id, path, filtered=filtered)
481
return file.readlines()
485
def get_parent_ids(self):
486
"""See Tree.get_parent_ids.
488
This implementation reads the pending merges list and last_revision
489
value and uses that to decide what the parents list should be.
491
last_rev = _mod_revision.ensure_null(self._last_revision())
492
if _mod_revision.NULL_REVISION == last_rev:
497
merges_bytes = self._transport.get_bytes('pending-merges')
498
except errors.NoSuchFile:
501
for l in osutils.split_lines(merges_bytes):
502
revision_id = l.rstrip('\n')
503
parents.append(revision_id)
431
return bzrlib.osutils.lexists(self.abspath(filename))
433
def get_file(self, file_id):
434
return self.get_file_byname(self.id2path(file_id))
436
def get_file_byname(self, filename):
437
return file(self.abspath(filename), 'rb')
506
439
def get_root_id(self):
507
440
"""Return the id of this trees root"""
508
raise NotImplementedError(self.get_root_id)
441
inv = self.read_working_inventory()
442
return inv.root.file_id
444
def _get_store_filename(self, file_id):
445
## XXX: badly named; this is not in the store at all
446
return self.abspath(self.id2path(file_id))
511
def clone(self, to_controldir, revision_id=None):
449
def clone(self, to_bzrdir, revision_id=None, basis=None):
512
450
"""Duplicate this working tree into to_bzr, including all state.
514
452
Specifically modified files are kept as modified, but
515
453
ignored and unknown files are discarded.
517
If you want to make a new line of development, see ControlDir.sprout()
455
If you want to make a new line of development, see bzrdir.sprout()
520
If not None, the cloned tree will have its last revision set to
521
revision, and difference between the source trees last revision
458
If not None, the cloned tree will have its last revision set to
459
revision, and and difference between the source trees last revision
522
460
and this one merged in.
463
If not None, a closer copy of a tree which may have some files in
464
common, and which file content should be preferentially copied from.
524
466
# assumes the target bzr dir format is compatible.
525
result = to_controldir.create_workingtree()
467
result = self._format.initialize(to_bzrdir)
526
468
self.copy_content_into(result, revision_id)
530
472
def copy_content_into(self, tree, revision_id=None):
531
473
"""Copy the current content and user files of this tree into tree."""
532
tree.set_root_id(self.get_root_id())
533
474
if revision_id is None:
534
merge.transform_tree(tree, self)
475
transform_tree(tree, self)
536
# TODO now merge from tree.last_revision to revision (to preserve
537
# user local changes)
539
other_tree = self.revision_tree(revision_id)
540
except errors.NoSuchRevision:
541
other_tree = self.branch.repository.revision_tree(revision_id)
477
# TODO now merge from tree.last_revision to revision
478
transform_tree(tree, self)
479
tree.set_last_revision(revision_id)
543
merge.transform_tree(tree, other_tree)
544
if revision_id == _mod_revision.NULL_REVISION:
547
new_parents = [revision_id]
548
tree.set_parent_ids(new_parents)
482
def commit(self, message=None, revprops=None, *args, **kwargs):
483
# avoid circular imports
484
from bzrlib.commit import Commit
487
if not 'branch-nick' in revprops:
488
revprops['branch-nick'] = self.branch.nick
489
# args for wt.commit start at message from the Commit.commit method,
490
# but with branch a kwarg now, passing in args as is results in the
491
#message being used for the branch
492
args = (DEPRECATED_PARAMETER, message, ) + args
493
Commit().commit(working_tree=self, revprops=revprops, *args, **kwargs)
494
self._set_inventory(self.read_working_inventory())
550
496
def id2abspath(self, file_id):
551
497
return self.abspath(self.id2path(file_id))
553
def _check_for_tree_references(self, iterator):
554
"""See if directories have become tree-references."""
555
blocked_parent_ids = set()
556
for path, ie in iterator:
557
if ie.parent_id in blocked_parent_ids:
558
# This entry was pruned because one of its parents became a
559
# TreeReference. If this is a directory, mark it as blocked.
560
if ie.kind == 'directory':
561
blocked_parent_ids.add(ie.file_id)
563
if ie.kind == 'directory' and self._directory_is_tree_reference(path):
564
# This InventoryDirectory needs to be a TreeReference
565
ie = inventory.TreeReference(ie.file_id, ie.name, ie.parent_id)
566
blocked_parent_ids.add(ie.file_id)
569
def iter_entries_by_dir(self, specific_file_ids=None, yield_parents=False):
570
"""See Tree.iter_entries_by_dir()"""
571
# The only trick here is that if we supports_tree_reference then we
572
# need to detect if a directory becomes a tree-reference.
573
iterator = super(WorkingTree, self).iter_entries_by_dir(
574
specific_file_ids=specific_file_ids,
575
yield_parents=yield_parents)
576
if not self.supports_tree_reference():
579
return self._check_for_tree_references(iterator)
499
def has_id(self, file_id):
500
# files that have been deleted are excluded
501
inv = self._inventory
502
if not inv.has_id(file_id):
504
path = inv.id2path(file_id)
505
return bzrlib.osutils.lexists(self.abspath(path))
507
def has_or_had_id(self, file_id):
508
if file_id == self.inventory.root.file_id:
510
return self.inventory.has_id(file_id)
512
__contains__ = has_id
581
514
def get_file_size(self, file_id):
582
"""See Tree.get_file_size"""
583
# XXX: this returns the on-disk size; it should probably return the
586
return os.path.getsize(self.id2abspath(file_id))
588
if e.errno != errno.ENOENT:
593
@needs_tree_write_lock
594
def _gather_kinds(self, files, kinds):
595
"""See MutableTree._gather_kinds."""
596
for pos, f in enumerate(files):
597
if kinds[pos] is None:
598
fullpath = normpath(self.abspath(f))
600
kinds[pos] = file_kind(fullpath)
602
if e.errno == errno.ENOENT:
603
raise errors.NoSuchFile(fullpath)
606
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
607
"""Add revision_id as a parent.
609
This is equivalent to retrieving the current list of parent ids
610
and setting the list to its value plus revision_id.
612
:param revision_id: The revision id to add to the parent list. It may
613
be a ghost revision as long as its not the first parent to be
614
added, or the allow_leftmost_as_ghost parameter is set True.
615
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
617
parents = self.get_parent_ids() + [revision_id]
618
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
619
or allow_leftmost_as_ghost)
621
@needs_tree_write_lock
622
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
623
"""Add revision_id, tree tuple as a parent.
625
This is equivalent to retrieving the current list of parent trees
626
and setting the list to its value plus parent_tuple. See also
627
add_parent_tree_id - if you only have a parent id available it will be
628
simpler to use that api. If you have the parent already available, using
629
this api is preferred.
631
:param parent_tuple: The (revision id, tree) to add to the parent list.
632
If the revision_id is a ghost, pass None for the tree.
633
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
635
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
636
if len(parent_ids) > 1:
637
# the leftmost may have already been a ghost, preserve that if it
639
allow_leftmost_as_ghost = True
640
self.set_parent_ids(parent_ids,
641
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
643
@needs_tree_write_lock
515
return os.path.getsize(self.id2abspath(file_id))
518
def get_file_sha1(self, file_id):
519
path = self._inventory.id2path(file_id)
520
return self._hashcache.get_sha1(path)
522
def is_executable(self, file_id):
523
if not supports_executable():
524
return self._inventory[file_id].executable
526
path = self._inventory.id2path(file_id)
527
mode = os.lstat(self.abspath(path)).st_mode
528
return bool(stat.S_ISREG(mode) and stat.S_IEXEC&mode)
531
def add(self, files, ids=None):
532
"""Make files versioned.
534
Note that the command line normally calls smart_add instead,
535
which can automatically recurse.
537
This adds the files to the inventory, so that they will be
538
recorded by the next commit.
541
List of paths to add, relative to the base of the tree.
544
If set, use these instead of automatically generated ids.
545
Must be the same length as the list of files, but may
546
contain None for ids that are to be autogenerated.
548
TODO: Perhaps have an option to add the ids even if the files do
551
TODO: Perhaps callback with the ids and paths as they're added.
553
# TODO: Re-adding a file that is removed in the working copy
554
# should probably put it back with the previous ID.
555
if isinstance(files, basestring):
556
assert(ids is None or isinstance(ids, basestring))
562
ids = [None] * len(files)
564
assert(len(ids) == len(files))
566
inv = self.read_working_inventory()
567
for f,file_id in zip(files, ids):
568
if self.is_control_filename(f):
569
raise BzrError("cannot add control file %s" % quotefn(f))
574
raise BzrError("cannot add top-level %r" % f)
576
fullpath = normpath(self.abspath(f))
579
kind = file_kind(fullpath)
581
if e.errno == errno.ENOENT:
582
raise NoSuchFile(fullpath)
583
# maybe something better?
584
raise BzrError('cannot add: not a regular file, symlink or directory: %s' % quotefn(f))
586
if not InventoryEntry.versionable_kind(kind):
587
raise BzrError('cannot add: not a versionable file ('
588
'i.e. regular file, symlink or directory): %s' % quotefn(f))
591
file_id = gen_file_id(f)
592
inv.add_path(f, kind=kind, file_id=file_id)
594
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
595
self._write_inventory(inv)
644
598
def add_pending_merge(self, *revision_ids):
645
599
# TODO: Perhaps should check at this point that the
646
600
# history of the revision is actually present?
647
parents = self.get_parent_ids()
601
p = self.pending_merges()
649
603
for rev_id in revision_ids:
650
if rev_id in parents:
652
parents.append(rev_id)
655
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
657
def path_content_summary(self, path, _lstat=os.lstat,
658
_mapper=osutils.file_kind_from_stat_mode):
659
"""See Tree.path_content_summary."""
660
abspath = self.abspath(path)
609
self.set_pending_merges(p)
612
def pending_merges(self):
613
"""Return a list of pending merges.
615
These are revisions that have been merged into the working
616
directory but not yet committed.
662
stat_result = _lstat(abspath)
619
merges_file = self._control_files.get_utf8('pending-merges')
663
620
except OSError, e:
664
if getattr(e, 'errno', None) == errno.ENOENT:
666
return ('missing', None, None, None)
667
# propagate other errors
669
kind = _mapper(stat_result.st_mode)
671
return self._file_content_summary(path, stat_result)
672
elif kind == 'directory':
673
# perhaps it looks like a plain directory, but it's really a
675
if self._directory_is_tree_reference(path):
676
kind = 'tree-reference'
677
return kind, None, None, None
678
elif kind == 'symlink':
679
target = osutils.readlink(abspath)
680
return ('symlink', None, None, target)
682
return (kind, None, None, None)
684
def _file_content_summary(self, path, stat_result):
685
size = stat_result.st_size
686
executable = self._is_executable_from_path_and_stat(path, stat_result)
687
# try for a stat cache lookup
688
return ('file', size, executable, self._sha_from_stat(
691
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
692
"""Common ghost checking functionality from set_parent_*.
694
This checks that the left hand-parent exists if there are any
697
if len(revision_ids) > 0:
698
leftmost_id = revision_ids[0]
699
if (not allow_leftmost_as_ghost and not
700
self.branch.repository.has_revision(leftmost_id)):
701
raise errors.GhostRevisionUnusableHere(leftmost_id)
703
def _set_merges_from_parent_ids(self, parent_ids):
704
merges = parent_ids[1:]
705
self._transport.put_bytes('pending-merges', '\n'.join(merges),
706
mode=self.bzrdir._get_file_mode())
708
def _filter_parent_ids_by_ancestry(self, revision_ids):
709
"""Check that all merged revisions are proper 'heads'.
711
This will always return the first revision_id, and any merged revisions
714
if len(revision_ids) == 0:
716
graph = self.branch.repository.get_graph()
717
heads = graph.heads(revision_ids)
718
new_revision_ids = revision_ids[:1]
719
for revision_id in revision_ids[1:]:
720
if revision_id in heads and revision_id not in new_revision_ids:
721
new_revision_ids.append(revision_id)
722
if new_revision_ids != revision_ids:
723
mutter('requested to set revision_ids = %s,'
724
' but filtered to %s', revision_ids, new_revision_ids)
725
return new_revision_ids
727
@needs_tree_write_lock
728
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
729
"""Set the parent ids to revision_ids.
731
See also set_parent_trees. This api will try to retrieve the tree data
732
for each element of revision_ids from the trees repository. If you have
733
tree data already available, it is more efficient to use
734
set_parent_trees rather than set_parent_ids. set_parent_ids is however
735
an easier API to use.
737
:param revision_ids: The revision_ids to set as the parent ids of this
738
working tree. Any of these may be ghosts.
740
self._check_parents_for_ghosts(revision_ids,
741
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
742
for revision_id in revision_ids:
743
_mod_revision.check_not_reserved_id(revision_id)
745
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
747
if len(revision_ids) > 0:
748
self.set_last_revision(revision_ids[0])
750
self.set_last_revision(_mod_revision.NULL_REVISION)
752
self._set_merges_from_parent_ids(revision_ids)
754
@needs_tree_write_lock
621
if e.errno != errno.ENOENT:
625
for l in merges_file.readlines():
626
p.append(l.rstrip('\n'))
755
630
def set_pending_merges(self, rev_list):
756
parents = self.get_parent_ids()
757
leftmost = parents[:1]
758
new_parents = leftmost + rev_list
759
self.set_parent_ids(new_parents)
631
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
761
@needs_tree_write_lock
762
634
def set_merge_modified(self, modified_hashes):
763
"""Set the merge modified hashes."""
764
raise NotImplementedError(self.set_merge_modified)
766
def _sha_from_stat(self, path, stat_result):
767
"""Get a sha digest from the tree's stat cache.
769
The default implementation assumes no stat cache is present.
771
:param path: The path.
772
:param stat_result: The stat result being looked up.
776
@needs_write_lock # because merge pulls data into the branch.
777
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
778
merge_type=None, force=False):
779
"""Merge from a branch into this working tree.
781
:param branch: The branch to merge from.
782
:param to_revision: If non-None, the merge will merge to to_revision,
783
but not beyond it. to_revision does not need to be in the history
784
of the branch when it is supplied. If None, to_revision defaults to
785
branch.last_revision().
787
from bzrlib.merge import Merger, Merge3Merger
788
merger = Merger(self.branch, this_tree=self)
789
# check that there are no local alterations
790
if not force and self.has_changes():
791
raise errors.UncommittedChanges(self)
792
if to_revision is None:
793
to_revision = _mod_revision.ensure_null(branch.last_revision())
794
merger.other_rev_id = to_revision
795
if _mod_revision.is_null(merger.other_rev_id):
796
raise errors.NoCommits(branch)
797
self.branch.fetch(branch, last_revision=merger.other_rev_id)
798
merger.other_basis = merger.other_rev_id
799
merger.other_tree = self.branch.repository.revision_tree(
801
merger.other_branch = branch
802
if from_revision is None:
805
merger.set_base_revision(from_revision, branch)
806
if merger.base_rev_id == merger.other_rev_id:
807
raise errors.PointlessMerge
808
merger.backup_files = False
809
if merge_type is None:
810
merger.merge_type = Merge3Merger
812
merger.merge_type = merge_type
813
merger.set_interesting_files(None)
814
merger.show_base = False
815
merger.reprocess = False
816
conflicts = merger.do_merge()
636
for file_id, hash in modified_hashes.iteritems():
637
yield Stanza(file_id=file_id, hash=hash)
638
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
641
def _put_rio(self, filename, stanzas, header):
642
my_file = rio_file(stanzas, header)
643
self._control_files.put(filename, my_file)
820
646
def merge_modified(self):
821
"""Return a dictionary of files modified by a merge.
823
The list is initialized by WorkingTree.set_merge_modified, which is
824
typically called after we make some automatic updates to the tree
827
This returns a map of file_id->sha1, containing only files which are
828
still in the working inventory and have that text hash.
830
raise NotImplementedError(self.merge_modified)
833
def mkdir(self, path, file_id=None):
834
"""See MutableTree.mkdir()."""
836
file_id = generate_ids.gen_file_id(os.path.basename(path))
837
os.mkdir(self.abspath(path))
838
self.add(path, file_id, 'directory')
841
def get_symlink_target(self, file_id, path=None):
843
abspath = self.abspath(path)
845
abspath = self.id2abspath(file_id)
846
target = osutils.readlink(abspath)
849
def subsume(self, other_tree):
850
raise NotImplementedError(self.subsume)
852
def _setup_directory_is_tree_reference(self):
853
if self._branch.repository._format.supports_tree_reference:
854
self._directory_is_tree_reference = \
855
self._directory_may_be_tree_reference
857
self._directory_is_tree_reference = \
858
self._directory_is_never_tree_reference
860
def _directory_is_never_tree_reference(self, relpath):
863
def _directory_may_be_tree_reference(self, relpath):
864
# as a special case, if a directory contains control files then
865
# it's a tree reference, except that the root of the tree is not
866
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
867
# TODO: We could ask all the control formats whether they
868
# recognize this directory, but at the moment there's no cheap api
869
# to do that. Since we probably can only nest bzr checkouts and
870
# they always use this name it's ok for now. -- mbp 20060306
872
# FIXME: There is an unhandled case here of a subdirectory
873
# containing .bzr but not a branch; that will probably blow up
874
# when you try to commit it. It might happen if there is a
875
# checkout in a subdirectory. This can be avoided by not adding
878
def extract(self, file_id, format=None):
879
"""Extract a subtree from this tree.
881
A new branch will be created, relative to the path for this tree.
883
raise NotImplementedError(self.extract)
886
"""Write the in memory meta data to disk."""
887
raise NotImplementedError(self.flush)
889
def _kind(self, relpath):
890
return osutils.file_kind(self.abspath(relpath))
892
def list_files(self, include_root=False, from_dir=None, recursive=True):
893
"""List all files as (path, class, kind, id, entry).
648
hashfile = self._control_files.get('merge-hashes')
653
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
654
raise MergeModifiedFormatError()
655
except StopIteration:
656
raise MergeModifiedFormatError()
657
for s in RioReader(hashfile):
658
file_id = s.get("file_id")
659
if file_id not in self.inventory:
662
if hash == self.get_file_sha1(file_id):
663
merge_hashes[file_id] = hash
666
def get_symlink_target(self, file_id):
667
return os.readlink(self.id2abspath(file_id))
669
def file_class(self, filename):
670
if self.path2id(filename):
672
elif self.is_ignored(filename):
677
def list_files(self):
678
"""Recursively list all files as (path, class, kind, id).
895
680
Lists, but does not descend into unversioned directories.
896
682
This does not include files that have been deleted in this
897
tree. Skips the control directory.
899
:param include_root: if True, return an entry for the root
900
:param from_dir: start from this directory or None for the root
901
:param recursive: whether to recurse into subdirectories or not
685
Skips the control directory.
903
raise NotImplementedError(self.list_files)
905
def move(self, from_paths, to_dir=None, after=False):
687
inv = self._inventory
689
def descend(from_dir_relpath, from_dir_id, dp):
693
## TODO: If we find a subdirectory with its own .bzr
694
## directory, then that is a separate tree and we
695
## should exclude it.
697
# the bzrdir for this tree
698
if self.bzrdir.transport.base.endswith(f + '/'):
702
fp = appendpath(from_dir_relpath, f)
705
fap = appendpath(dp, f)
707
f_ie = inv.get_child(from_dir_id, f)
710
elif self.is_ignored(fp):
719
raise BzrCheckError("file %r entered as kind %r id %r, "
721
% (fap, f_ie.kind, f_ie.file_id, fk))
723
# make a last minute entry
727
if fk == 'directory':
728
entry = TreeDirectory()
731
elif fk == 'symlink':
736
yield fp, c, fk, (f_ie and f_ie.file_id), entry
738
if fk != 'directory':
742
# don't descend unversioned directories
745
for ff in descend(fp, f_ie.file_id, fap):
748
for f in descend(u'', inv.root.file_id, self.basedir):
752
def move(self, from_paths, to_name):
908
to_dir must be known to the working tree.
910
If to_dir exists and is a directory, the files are moved into
911
it, keeping their old names.
913
Note that to_dir is only the last component of the new name;
755
to_name must exist in the inventory.
757
If to_name exists and is a directory, the files are moved into
758
it, keeping their old names.
760
Note that to_name is only the last component of the new name;
914
761
this doesn't change the directory.
916
For each entry in from_paths the move mode will be determined
919
The first mode moves the file in the filesystem and updates the
920
working tree metadata. The second mode only updates the working tree
921
metadata without touching the file on the filesystem.
923
move uses the second mode if 'after == True' and the target is not
924
versioned but present in the working tree.
926
move uses the second mode if 'after == False' and the source is
927
versioned but no longer in the working tree, and the target is not
928
versioned but present in the working tree.
930
move uses the first mode if 'after == False' and the source is
931
versioned and present in the working tree, and the target is not
932
versioned and not present in the working tree.
934
Everything else results in an error.
936
763
This returns a list of (from_path, to_path) pairs for each
937
764
entry that is moved.
939
raise NotImplementedError(self.move)
941
@needs_tree_write_lock
942
def rename_one(self, from_rel, to_rel, after=False):
767
## TODO: Option to move IDs only
768
assert not isinstance(from_paths, basestring)
770
to_abs = self.abspath(to_name)
771
if not isdir(to_abs):
772
raise BzrError("destination %r is not a directory" % to_abs)
773
if not self.has_filename(to_name):
774
raise BzrError("destination %r not in working directory" % to_abs)
775
to_dir_id = inv.path2id(to_name)
776
if to_dir_id == None and to_name != '':
777
raise BzrError("destination %r is not a versioned directory" % to_name)
778
to_dir_ie = inv[to_dir_id]
779
if to_dir_ie.kind not in ('directory', 'root_directory'):
780
raise BzrError("destination %r is not a directory" % to_abs)
782
to_idpath = inv.get_idpath(to_dir_id)
785
if not self.has_filename(f):
786
raise BzrError("%r does not exist in working tree" % f)
787
f_id = inv.path2id(f)
789
raise BzrError("%r is not versioned" % f)
790
name_tail = splitpath(f)[-1]
791
dest_path = appendpath(to_name, name_tail)
792
if self.has_filename(dest_path):
793
raise BzrError("destination %r already exists" % dest_path)
794
if f_id in to_idpath:
795
raise BzrError("can't move %r to a subdirectory of itself" % f)
797
# OK, so there's a race here, it's possible that someone will
798
# create a file in this interval and then the rename might be
799
# left half-done. But we should have caught most problems.
800
orig_inv = deepcopy(self.inventory)
803
name_tail = splitpath(f)[-1]
804
dest_path = appendpath(to_name, name_tail)
805
result.append((f, dest_path))
806
inv.rename(inv.path2id(f), to_dir_id, name_tail)
808
rename(self.abspath(f), self.abspath(dest_path))
810
raise BzrError("failed to rename %r to %r: %s" %
811
(f, dest_path, e[1]),
812
["rename rolled back"])
814
# restore the inventory on error
815
self._set_inventory(orig_inv)
817
self._write_inventory(inv)
821
def rename_one(self, from_rel, to_rel):
943
822
"""Rename one file.
945
824
This can change the directory or the filename or both.
947
rename_one has several 'modes' to work. First, it can rename a physical
948
file and change the file_id. That is the normal mode. Second, it can
949
only change the file_id without touching any physical file.
951
rename_one uses the second mode if 'after == True' and 'to_rel' is
952
either not versioned or newly added, and present in the working tree.
954
rename_one uses the second mode if 'after == False' and 'from_rel' is
955
versioned but no longer in the working tree, and 'to_rel' is not
956
versioned but present in the working tree.
958
rename_one uses the first mode if 'after == False' and 'from_rel' is
959
versioned and present in the working tree, and 'to_rel' is not
960
versioned and not present in the working tree.
962
Everything else results in an error.
964
raise NotImplementedError(self.rename_one)
827
if not self.has_filename(from_rel):
828
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
829
if self.has_filename(to_rel):
830
raise BzrError("can't rename: new working file %r already exists" % to_rel)
832
file_id = inv.path2id(from_rel)
834
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
837
from_parent = entry.parent_id
838
from_name = entry.name
840
if inv.path2id(to_rel):
841
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
843
to_dir, to_tail = os.path.split(to_rel)
844
to_dir_id = inv.path2id(to_dir)
845
if to_dir_id == None and to_dir != '':
846
raise BzrError("can't determine destination directory id for %r" % to_dir)
848
mutter("rename_one:")
849
mutter(" file_id {%s}" % file_id)
850
mutter(" from_rel %r" % from_rel)
851
mutter(" to_rel %r" % to_rel)
852
mutter(" to_dir %r" % to_dir)
853
mutter(" to_dir_id {%s}" % to_dir_id)
855
inv.rename(file_id, to_dir_id, to_tail)
857
from_abs = self.abspath(from_rel)
858
to_abs = self.abspath(to_rel)
860
rename(from_abs, to_abs)
862
inv.rename(file_id, from_parent, from_name)
863
raise BzrError("failed to rename %r to %r: %s"
864
% (from_abs, to_abs, e[1]),
865
["rename rolled back"])
866
self._write_inventory(inv)
967
869
def unknowns(self):
970
872
These are files in the working directory that are not versioned or
971
873
control files or ignored.
973
# force the extras method to be fully executed before returning, to
974
# prevent race conditions with the lock
976
[subp for subp in self.extras() if not self.is_ignored(subp)])
978
def unversion(self, file_ids):
979
"""Remove the file ids in file_ids from the current versioned set.
981
When a file_id is unversioned, all of its children are automatically
984
:param file_ids: The file ids to stop versioning.
985
:raises: NoSuchId if any fileid is not currently versioned.
987
raise NotImplementedError(self.unversion)
875
>>> from bzrlib.bzrdir import ScratchDir
876
>>> d = ScratchDir(files=['foo', 'foo~'])
877
>>> b = d.open_branch()
878
>>> tree = d.open_workingtree()
879
>>> map(str, tree.unknowns())
882
>>> list(b.unknowns())
884
>>> tree.remove('foo')
885
>>> list(b.unknowns())
888
for subp in self.extras():
889
if not self.is_ignored(subp):
892
@deprecated_method(zero_eight)
893
def iter_conflicts(self):
894
"""List all files in the tree that have text or content conflicts.
895
DEPRECATED. Use conflicts instead."""
896
return self._iter_conflicts()
898
def _iter_conflicts(self):
900
for path in (s[0] for s in self.list_files()):
901
stem = get_conflicted_stem(path)
904
if stem not in conflicted:
989
908
@needs_write_lock
990
def pull(self, source, overwrite=False, stop_revision=None,
991
change_reporter=None, possible_transports=None, local=False,
909
def pull(self, source, overwrite=False, stop_revision=None):
910
top_pb = bzrlib.ui.ui_factory.nested_progress_bar()
993
911
source.lock_read()
995
old_revision_info = self.branch.last_revision_info()
913
pp = ProgressPhase("Pull phase", 2, top_pb)
915
old_revision_history = self.branch.revision_history()
996
916
basis_tree = self.basis_tree()
997
count = self.branch.pull(source, overwrite, stop_revision,
998
possible_transports=possible_transports,
1000
new_revision_info = self.branch.last_revision_info()
1001
if new_revision_info != old_revision_info:
917
count = self.branch.pull(source, overwrite, stop_revision)
918
new_revision_history = self.branch.revision_history()
919
if new_revision_history != old_revision_history:
921
if len(old_revision_history):
922
other_revision = old_revision_history[-1]
924
other_revision = None
1002
925
repository = self.branch.repository
1003
if repository._format.fast_deltas:
1004
parent_ids = self.get_parent_ids()
1006
basis_id = parent_ids[0]
1007
basis_tree = repository.revision_tree(basis_id)
1008
basis_tree.lock_read()
926
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1010
new_basis_tree = self.branch.basis_tree()
1017
change_reporter=change_reporter,
1018
show_base=show_base)
1019
basis_root_id = basis_tree.get_root_id()
1020
new_root_id = new_basis_tree.get_root_id()
1021
if new_root_id is not None and basis_root_id != new_root_id:
1022
self.set_root_id(new_root_id)
928
merge_inner(self.branch,
929
self.branch.basis_tree(),
1025
# TODO - dedup parents list with things merged by pull ?
1026
# reuse the revisiontree we merged against to set the new
1029
if self.branch.last_revision() != _mod_revision.NULL_REVISION:
1030
parent_trees.append(
1031
(self.branch.last_revision(), new_basis_tree))
1032
# we have to pull the merge trees out again, because
1033
# merge_inner has set the ids. - this corner is not yet
1034
# layered well enough to prevent double handling.
1035
# XXX TODO: Fix the double handling: telling the tree about
1036
# the already known parent data is wasteful.
1037
merges = self.get_parent_ids()[1:]
1038
parent_trees.extend([
1039
(parent, repository.revision_tree(parent)) for
1041
self.set_parent_trees(parent_trees)
935
self.set_last_revision(self.branch.last_revision())
1047
def put_file_bytes_non_atomic(self, file_id, bytes):
1048
"""See MutableTree.put_file_bytes_non_atomic."""
1049
stream = file(self.id2abspath(file_id), 'wb')
1055
941
def extras(self):
1056
"""Yield all unversioned files in this WorkingTree.
942
"""Yield all unknown files in this WorkingTree.
1058
If there are any unversioned directories then only the directory is
1059
returned, not all its children. But if there are unversioned files
944
If there are any unknown directories then only the directory is
945
returned, not all its children. But if there are unknown files
1060
946
under a versioned subdirectory, they are returned.
1062
948
Currently returned depth-first, sorted by name within directories.
1063
This is the same order used by 'osutils.walkdirs'.
1065
raise NotImplementedError(self.extras)
950
## TODO: Work from given directory downwards
951
for path, dir_entry in self.inventory.directories():
952
mutter("search for unknowns in %r", path)
953
dirabs = self.abspath(path)
954
if not isdir(dirabs):
955
# e.g. directory deleted
959
for subf in os.listdir(dirabs):
961
and (subf not in dir_entry.children)):
966
subp = appendpath(path, subf)
1067
970
def ignored_files(self):
1068
971
"""Yield list of PATH, IGNORE_PATTERN"""
1069
972
for subp in self.extras():
1070
973
pat = self.is_ignored(subp)
1074
978
def get_ignore_list(self):
1075
979
"""Return list of ignore patterns.
1077
981
Cached in the Tree object after the first call.
1079
ignoreset = getattr(self, '_ignoreset', None)
1080
if ignoreset is not None:
983
if hasattr(self, '_ignorelist'):
984
return self._ignorelist
1083
ignore_globs = set()
1084
ignore_globs.update(ignores.get_runtime_ignores())
1085
ignore_globs.update(ignores.get_user_ignores())
986
l = bzrlib.DEFAULT_IGNORE[:]
1086
987
if self.has_filename(bzrlib.IGNORE_FILENAME):
1087
988
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
1089
ignore_globs.update(ignores.parse_ignore_file(f))
1092
self._ignoreset = ignore_globs
989
l.extend([line.rstrip("\n\r") for line in f.readlines()])
1095
def _flush_ignore_list_cache(self):
1096
"""Resets the cached ignore list to force a cache rebuild."""
1097
self._ignoreset = None
1098
self._ignoreglobster = None
1100
994
def is_ignored(self, filename):
1101
995
r"""Check whether the filename matches an ignore pattern.
1103
997
Patterns containing '/' or '\' need to match the whole path;
1104
others match against only the last component. Patterns starting
1105
with '!' are ignore exceptions. Exceptions take precedence
1106
over regular patterns and cause the filename to not be ignored.
998
others match against only the last component.
1108
1000
If the file is ignored, returns the pattern which caused it to
1109
1001
be ignored, otherwise None. So this can simply be used as a
1110
1002
boolean if desired."""
1111
if getattr(self, '_ignoreglobster', None) is None:
1112
self._ignoreglobster = globbing.ExceptionGlobster(self.get_ignore_list())
1113
return self._ignoreglobster.match(filename)
1004
# TODO: Use '**' to match directories, and other extended
1005
# globbing stuff from cvs/rsync.
1007
# XXX: fnmatch is actually not quite what we want: it's only
1008
# approximately the same as real Unix fnmatch, and doesn't
1009
# treat dotfiles correctly and allows * to match /.
1010
# Eventually it should be replaced with something more
1013
for pat in self.get_ignore_list():
1014
if '/' in pat or '\\' in pat:
1016
# as a special case, you can put ./ at the start of a
1017
# pattern; this is good to match in the top-level
1020
if (pat[:2] == './') or (pat[:2] == '.\\'):
1024
if fnmatch.fnmatchcase(filename, newpat):
1027
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
1115
1032
def kind(self, file_id):
1116
1033
return file_kind(self.id2abspath(file_id))
1118
def stored_kind(self, file_id):
1119
"""See Tree.stored_kind"""
1120
raise NotImplementedError(self.stored_kind)
1122
def _comparison_data(self, entry, path):
1123
abspath = self.abspath(path)
1125
stat_value = os.lstat(abspath)
1127
if getattr(e, 'errno', None) == errno.ENOENT:
1134
mode = stat_value.st_mode
1135
kind = osutils.file_kind_from_stat_mode(mode)
1136
if not self._supports_executable():
1137
executable = entry is not None and entry.executable
1139
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1140
return kind, executable, stat_value
1142
def _file_size(self, entry, stat_value):
1143
return stat_value.st_size
1145
1036
def last_revision(self):
1146
"""Return the last revision of the branch for this tree.
1148
This format tree does not support a separate marker for last-revision
1149
compared to the branch.
1151
See MutableTree.last_revision
1037
"""Return the last revision id of this working tree.
1039
In early branch formats this was == the branch last_revision,
1040
but that cannot be relied upon - for working tree operations,
1041
always use tree.last_revision().
1153
return self._last_revision()
1156
def _last_revision(self):
1157
"""helper for get_parent_ids."""
1158
return _mod_revision.ensure_null(self.branch.last_revision())
1043
return self.branch.last_revision()
1160
1045
def is_locked(self):
1161
"""Check if this tree is locked."""
1162
raise NotImplementedError(self.is_locked)
1046
return self._control_files.is_locked()
1164
1048
def lock_read(self):
1165
"""Lock the tree for reading.
1167
This also locks the branch, and can be unlocked via self.unlock().
1169
:return: A bzrlib.lock.LogicalLockResult.
1171
raise NotImplementedError(self.lock_read)
1173
def lock_tree_write(self):
1174
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1176
:return: A bzrlib.lock.LogicalLockResult.
1178
raise NotImplementedError(self.lock_tree_write)
1049
"""See Branch.lock_read, and WorkingTree.unlock."""
1050
self.branch.lock_read()
1052
return self._control_files.lock_read()
1054
self.branch.unlock()
1180
1057
def lock_write(self):
1181
"""See MutableTree.lock_write, and WorkingTree.unlock.
1183
:return: A bzrlib.lock.LogicalLockResult.
1185
raise NotImplementedError(self.lock_write)
1058
"""See Branch.lock_write, and WorkingTree.unlock."""
1059
self.branch.lock_write()
1061
return self._control_files.lock_write()
1063
self.branch.unlock()
1187
1066
def get_physical_lock_status(self):
1188
raise NotImplementedError(self.get_physical_lock_status)
1067
return self._control_files.get_physical_lock_status()
1069
def _basis_inventory_name(self):
1070
return 'basis-inventory'
1190
1073
def set_last_revision(self, new_revision):
1191
1074
"""Change the last revision in the working tree."""
1192
raise NotImplementedError(self.set_last_revision)
1075
if self._change_last_revision(new_revision):
1076
self._cache_basis_inventory(new_revision)
1194
1078
def _change_last_revision(self, new_revision):
1195
1079
"""Template method part of set_last_revision to perform the change.
1197
1081
This is used to allow WorkingTree3 instances to not affect branch
1198
1082
when their last revision is set.
1200
if _mod_revision.is_null(new_revision):
1201
self.branch.set_last_revision_info(0, new_revision)
1084
if new_revision is None:
1085
self.branch.set_revision_history([])
1203
_mod_revision.check_not_reserved_id(new_revision)
1087
# current format is locked in with the branch
1088
revision_history = self.branch.revision_history()
1205
self.branch.generate_revision_history(new_revision)
1206
except errors.NoSuchRevision:
1207
# not present in the repo - dont try to set it deeper than the tip
1208
self.branch._set_revision_history([new_revision])
1090
position = revision_history.index(new_revision)
1092
raise errors.NoSuchRevision(self.branch, new_revision)
1093
self.branch.set_revision_history(revision_history[:position + 1])
1211
@needs_tree_write_lock
1212
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1214
"""Remove nominated files from the working tree metadata.
1216
:files: File paths relative to the basedir.
1217
:keep_files: If true, the files will also be kept.
1218
:force: Delete files and directories, even if they are changed and
1219
even if the directories are not empty.
1096
def _cache_basis_inventory(self, new_revision):
1097
"""Cache new_revision as the basis inventory."""
1099
# this double handles the inventory - unpack and repack -
1100
# but is easier to understand. We can/should put a conditional
1101
# in here based on whether the inventory is in the latest format
1102
# - perhaps we should repack all inventories on a repository
1104
inv = self.branch.repository.get_inventory(new_revision)
1105
inv.revision_id = new_revision
1106
xml = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
1108
path = self._basis_inventory_name()
1109
self._control_files.put_utf8(path, xml)
1110
except WeaveRevisionNotPresent:
1113
def read_basis_inventory(self):
1114
"""Read the cached basis inventory."""
1115
path = self._basis_inventory_name()
1116
return self._control_files.get_utf8(path).read()
1119
def read_working_inventory(self):
1120
"""Read the working inventory."""
1121
# ElementTree does its own conversion from UTF-8, so open in
1123
result = bzrlib.xml5.serializer_v5.read_inventory(
1124
self._control_files.get('inventory'))
1125
self._set_inventory(result)
1129
def remove(self, files, verbose=False):
1130
"""Remove nominated files from the working inventory..
1132
This does not remove their text. This does not run on XXX on what? RBC
1134
TODO: Refuse to remove modified files unless --force is given?
1136
TODO: Do something useful with directories.
1138
TODO: Should this remove the text or not? Tough call; not
1139
removing may be useful and the user can just use use rm, and
1140
is the opposite of add. Removing it is consistent with most
1141
other tools. Maybe an option.
1143
## TODO: Normalize names
1144
## TODO: Remove nested loops; better scalability
1221
1145
if isinstance(files, basestring):
1222
1146
files = [files]
1226
all_files = set() # specified and nested files
1227
unknown_nested_files=set()
1229
to_file = sys.stdout
1231
files_to_backup = []
1233
def recurse_directory_to_add_files(directory):
1234
# Recurse directory and add all files
1235
# so we can check if they have changed.
1236
for parent_info, file_infos in self.walkdirs(directory):
1237
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1238
# Is it versioned or ignored?
1239
if self.path2id(relpath):
1240
# Add nested content for deletion.
1241
all_files.add(relpath)
1243
# Files which are not versioned
1244
# should be treated as unknown.
1245
files_to_backup.append(relpath)
1247
for filename in files:
1248
# Get file name into canonical form.
1249
abspath = self.abspath(filename)
1250
filename = self.relpath(abspath)
1251
if len(filename) > 0:
1252
all_files.add(filename)
1253
recurse_directory_to_add_files(filename)
1255
files = list(all_files)
1258
return # nothing to do
1260
# Sort needed to first handle directory content before the directory
1261
files.sort(reverse=True)
1263
# Bail out if we are going to delete files we shouldn't
1264
if not keep_files and not force:
1265
for (file_id, path, content_change, versioned, parent_id, name,
1266
kind, executable) in self.iter_changes(self.basis_tree(),
1267
include_unchanged=True, require_versioned=False,
1268
want_unversioned=True, specific_files=files):
1269
if versioned[0] == False:
1270
# The record is unknown or newly added
1271
files_to_backup.append(path[1])
1272
elif (content_change and (kind[1] is not None) and
1273
osutils.is_inside_any(files, path[1])):
1274
# Versioned and changed, but not deleted, and still
1275
# in one of the dirs to be deleted.
1276
files_to_backup.append(path[1])
1278
def backup(file_to_backup):
1279
backup_name = self.bzrdir._available_backup_name(file_to_backup)
1280
osutils.rename(abs_path, self.abspath(backup_name))
1281
return "removed %s (but kept a copy: %s)" % (file_to_backup,
1284
# Build inv_delta and delete files where applicable,
1285
# do this before any modifications to meta data.
1148
inv = self.inventory
1150
# do this before any modifications
1286
1151
for f in files:
1287
fid = self.path2id(f)
1152
fid = inv.path2id(f)
1290
message = "%s is not versioned." % (f,)
1293
# having removed it, it must be either ignored or unknown
1294
if self.is_ignored(f):
1298
# XXX: Really should be a more abstract reporter interface
1299
kind_ch = osutils.kind_marker(self.kind(fid))
1300
to_file.write(new_status + ' ' + f + kind_ch + '\n')
1302
inv_delta.append((f, None, fid, None))
1303
message = "removed %s" % (f,)
1306
abs_path = self.abspath(f)
1307
if osutils.lexists(abs_path):
1308
if (osutils.isdir(abs_path) and
1309
len(os.listdir(abs_path)) > 0):
1311
osutils.rmtree(abs_path)
1312
message = "deleted %s" % (f,)
1316
if f in files_to_backup:
1319
osutils.delete_any(abs_path)
1320
message = "deleted %s" % (f,)
1321
elif message is not None:
1322
# Only care if we haven't done anything yet.
1323
message = "%s does not exist." % (f,)
1325
# Print only one message (if any) per file.
1326
if message is not None:
1328
self.apply_inventory_delta(inv_delta)
1330
@needs_tree_write_lock
1331
def revert(self, filenames=None, old_tree=None, backups=True,
1332
pb=None, report_changes=False):
1333
from bzrlib.conflicts import resolve
1154
# TODO: Perhaps make this just a warning, and continue?
1155
# This tends to happen when
1156
raise NotVersionedError(path=f)
1157
mutter("remove inventory entry %s {%s}", quotefn(f), fid)
1159
# having remove it, it must be either ignored or unknown
1160
if self.is_ignored(f):
1164
show_status(new_status, inv[fid].kind, quotefn(f))
1167
self._write_inventory(inv)
1170
def revert(self, filenames, old_tree=None, backups=True,
1171
pb=DummyProgress()):
1172
from transform import revert
1173
from conflicts import resolve
1334
1174
if old_tree is None:
1335
basis_tree = self.basis_tree()
1336
basis_tree.lock_read()
1337
old_tree = basis_tree
1175
old_tree = self.basis_tree()
1176
conflicts = revert(self, old_tree, filenames, backups, pb)
1177
if not len(filenames):
1178
self.set_pending_merges([])
1341
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
1343
if filenames is None and len(self.get_parent_ids()) > 1:
1345
last_revision = self.last_revision()
1346
if last_revision != _mod_revision.NULL_REVISION:
1347
if basis_tree is None:
1348
basis_tree = self.basis_tree()
1349
basis_tree.lock_read()
1350
parent_trees.append((last_revision, basis_tree))
1351
self.set_parent_trees(parent_trees)
1354
resolve(self, filenames, ignore_misses=True, recursive=True)
1356
if basis_tree is not None:
1181
resolve(self, filenames, ignore_misses=True)
1358
1182
return conflicts
1361
def store_uncommitted(self):
1362
"""Store uncommitted changes from the tree in the branch."""
1363
target_tree = self.basis_tree()
1364
shelf_creator = shelf.ShelfCreator(self, target_tree)
1366
if not shelf_creator.shelve_all():
1368
self.branch.store_uncommitted(shelf_creator)
1369
shelf_creator.transform()
1371
shelf_creator.finalize()
1372
note('Uncommitted changes stored in branch "%s".', self.branch.nick)
1375
def restore_uncommitted(self):
1376
"""Restore uncommitted changes from the branch into the tree."""
1377
unshelver = self.branch.get_unshelver(self)
1378
if unshelver is None:
1381
merger = unshelver.make_merger()
1382
merger.ignore_zero = True
1384
self.branch.store_uncommitted(None)
1386
unshelver.finalize()
1388
def revision_tree(self, revision_id):
1389
"""See Tree.revision_tree.
1391
WorkingTree can supply revision_trees for the basis revision only
1392
because there is only one cached inventory in the bzr directory.
1394
raise NotImplementedError(self.revision_tree)
1396
@needs_tree_write_lock
1397
def set_root_id(self, file_id):
1398
"""Set the root id for this tree."""
1402
'WorkingTree.set_root_id with fileid=None')
1403
file_id = osutils.safe_file_id(file_id)
1404
self._set_root_id(file_id)
1406
def _set_root_id(self, file_id):
1407
"""Set the root id for this tree, in a format specific manner.
1409
:param file_id: The file id to assign to the root. It must not be
1410
present in the current inventory or an error will occur. It must
1411
not be None, but rather a valid file id.
1413
raise NotImplementedError(self._set_root_id)
1416
"""See Branch.unlock.
1418
WorkingTree locking just uses the Branch locking facilities.
1419
This is current because all working trees have an embedded branch
1420
within them. IF in the future, we were to make branch data shareable
1421
between multiple working trees, i.e. via shared storage, then we
1422
would probably want to lock both the local tree, and the branch.
1424
raise NotImplementedError(self.unlock)
1428
def update(self, change_reporter=None, possible_transports=None,
1429
revision=None, old_tip=_marker, show_base=False):
1430
"""Update a working tree along its branch.
1432
This will update the branch if its bound too, which means we have
1433
multiple trees involved:
1435
- The new basis tree of the master.
1436
- The old basis tree of the branch.
1437
- The old basis tree of the working tree.
1438
- The current working tree state.
1440
Pathologically, all three may be different, and non-ancestors of each
1441
other. Conceptually we want to:
1443
- Preserve the wt.basis->wt.state changes
1444
- Transform the wt.basis to the new master basis.
1445
- Apply a merge of the old branch basis to get any 'local' changes from
1447
- Restore the wt.basis->wt.state changes.
1449
There isn't a single operation at the moment to do that, so we:
1451
- Merge current state -> basis tree of the master w.r.t. the old tree
1453
- Do a 'normal' merge of the old branch basis if it is relevant.
1455
:param revision: The target revision to update to. Must be in the
1457
:param old_tip: If branch.update() has already been run, the value it
1458
returned (old tip of the branch or None). _marker is used
1461
if self.branch.get_bound_location() is not None:
1463
update_branch = (old_tip is self._marker)
1465
self.lock_tree_write()
1466
update_branch = False
1469
old_tip = self.branch.update(possible_transports)
1471
if old_tip is self._marker:
1473
return self._update_tree(old_tip, change_reporter, revision, show_base)
1477
@needs_tree_write_lock
1478
def _update_tree(self, old_tip=None, change_reporter=None, revision=None,
1480
"""Update a tree to the master branch.
1482
:param old_tip: if supplied, the previous tip revision the branch,
1483
before it was changed to the master branch's tip.
1485
# here if old_tip is not None, it is the old tip of the branch before
1486
# it was updated from the master branch. This should become a pending
1487
# merge in the working tree to preserve the user existing work. we
1488
# cant set that until we update the working trees last revision to be
1489
# one from the new branch, because it will just get absorbed by the
1490
# parent de-duplication logic.
1492
# We MUST save it even if an error occurs, because otherwise the users
1493
# local work is unreferenced and will appear to have been lost.
1497
last_rev = self.get_parent_ids()[0]
1499
last_rev = _mod_revision.NULL_REVISION
1500
if revision is None:
1501
revision = self.branch.last_revision()
1503
old_tip = old_tip or _mod_revision.NULL_REVISION
1505
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
1506
# the branch we are bound to was updated
1507
# merge those changes in first
1508
base_tree = self.basis_tree()
1509
other_tree = self.branch.repository.revision_tree(old_tip)
1510
nb_conflicts = merge.merge_inner(self.branch, other_tree,
1511
base_tree, this_tree=self,
1512
change_reporter=change_reporter,
1513
show_base=show_base)
1515
self.add_parent_tree((old_tip, other_tree))
1516
note(gettext('Rerun update after fixing the conflicts.'))
1519
if last_rev != _mod_revision.ensure_null(revision):
1520
# the working tree is up to date with the branch
1521
# we can merge the specified revision from master
1522
to_tree = self.branch.repository.revision_tree(revision)
1523
to_root_id = to_tree.get_root_id()
1525
basis = self.basis_tree()
1528
if (basis.get_root_id() is None or basis.get_root_id() != to_root_id):
1529
self.set_root_id(to_root_id)
1534
# determine the branch point
1535
graph = self.branch.repository.get_graph()
1536
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
1538
base_tree = self.branch.repository.revision_tree(base_rev_id)
1540
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
1542
change_reporter=change_reporter,
1543
show_base=show_base)
1544
self.set_last_revision(revision)
1545
# TODO - dedup parents list with things merged by pull ?
1546
# reuse the tree we've updated to to set the basis:
1547
parent_trees = [(revision, to_tree)]
1548
merges = self.get_parent_ids()[1:]
1549
# Ideally we ask the tree for the trees here, that way the working
1550
# tree can decide whether to give us the entire tree or give us a
1551
# lazy initialised tree. dirstate for instance will have the trees
1552
# in ram already, whereas a last-revision + basis-inventory tree
1553
# will not, but also does not need them when setting parents.
1554
for parent in merges:
1555
parent_trees.append(
1556
(parent, self.branch.repository.revision_tree(parent)))
1557
if not _mod_revision.is_null(old_tip):
1558
parent_trees.append(
1559
(old_tip, self.branch.repository.revision_tree(old_tip)))
1560
self.set_parent_trees(parent_trees)
1561
last_rev = parent_trees[0][0]
1564
def set_conflicts(self, arg):
1565
raise errors.UnsupportedOperation(self.set_conflicts, self)
1567
def add_conflicts(self, arg):
1568
raise errors.UnsupportedOperation(self.add_conflicts, self)
1570
def conflicts(self):
1571
raise NotImplementedError(self.conflicts)
1573
def walkdirs(self, prefix=""):
1574
"""Walk the directories of this tree.
1576
returns a generator which yields items in the form:
1577
((curren_directory_path, fileid),
1578
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
1581
This API returns a generator, which is only valid during the current
1582
tree transaction - within a single lock_read or lock_write duration.
1584
If the tree is not locked, it may cause an error to be raised,
1585
depending on the tree implementation.
1587
disk_top = self.abspath(prefix)
1588
if disk_top.endswith('/'):
1589
disk_top = disk_top[:-1]
1590
top_strip_len = len(disk_top) + 1
1591
inventory_iterator = self._walkdirs(prefix)
1592
disk_iterator = osutils.walkdirs(disk_top, prefix)
1594
current_disk = disk_iterator.next()
1595
disk_finished = False
1597
if not (e.errno == errno.ENOENT or
1598
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
1601
disk_finished = True
1603
current_inv = inventory_iterator.next()
1604
inv_finished = False
1605
except StopIteration:
1608
while not inv_finished or not disk_finished:
1610
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
1611
cur_disk_dir_content) = current_disk
1613
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
1614
cur_disk_dir_content) = ((None, None), None)
1615
if not disk_finished:
1616
# strip out .bzr dirs
1617
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
1618
len(cur_disk_dir_content) > 0):
1619
# osutils.walkdirs can be made nicer -
1620
# yield the path-from-prefix rather than the pathjoined
1622
bzrdir_loc = bisect_left(cur_disk_dir_content,
1624
if (bzrdir_loc < len(cur_disk_dir_content)
1625
and self.bzrdir.is_control_filename(
1626
cur_disk_dir_content[bzrdir_loc][0])):
1627
# we dont yield the contents of, or, .bzr itself.
1628
del cur_disk_dir_content[bzrdir_loc]
1630
# everything is unknown
1633
# everything is missing
1636
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
1638
# disk is before inventory - unknown
1639
dirblock = [(relpath, basename, kind, stat, None, None) for
1640
relpath, basename, kind, stat, top_path in
1641
cur_disk_dir_content]
1642
yield (cur_disk_dir_relpath, None), dirblock
1644
current_disk = disk_iterator.next()
1645
except StopIteration:
1646
disk_finished = True
1648
# inventory is before disk - missing.
1649
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
1650
for relpath, basename, dkind, stat, fileid, kind in
1652
yield (current_inv[0][0], current_inv[0][1]), dirblock
1654
current_inv = inventory_iterator.next()
1655
except StopIteration:
1658
# versioned present directory
1659
# merge the inventory and disk data together
1661
for relpath, subiterator in itertools.groupby(sorted(
1662
current_inv[1] + cur_disk_dir_content,
1663
key=operator.itemgetter(0)), operator.itemgetter(1)):
1664
path_elements = list(subiterator)
1665
if len(path_elements) == 2:
1666
inv_row, disk_row = path_elements
1667
# versioned, present file
1668
dirblock.append((inv_row[0],
1669
inv_row[1], disk_row[2],
1670
disk_row[3], inv_row[4],
1672
elif len(path_elements[0]) == 5:
1674
dirblock.append((path_elements[0][0],
1675
path_elements[0][1], path_elements[0][2],
1676
path_elements[0][3], None, None))
1677
elif len(path_elements[0]) == 6:
1678
# versioned, absent file.
1679
dirblock.append((path_elements[0][0],
1680
path_elements[0][1], 'unknown', None,
1681
path_elements[0][4], path_elements[0][5]))
1683
raise NotImplementedError('unreachable code')
1684
yield current_inv[0], dirblock
1686
current_inv = inventory_iterator.next()
1687
except StopIteration:
1690
current_disk = disk_iterator.next()
1691
except StopIteration:
1692
disk_finished = True
1694
def _walkdirs(self, prefix=""):
1695
"""Walk the directories of this tree.
1697
:param prefix: is used as the directrory to start with.
1698
:returns: a generator which yields items in the form::
1700
((curren_directory_path, fileid),
1701
[(file1_path, file1_name, file1_kind, None, file1_id,
1704
raise NotImplementedError(self._walkdirs)
1706
@needs_tree_write_lock
1707
def auto_resolve(self):
1708
"""Automatically resolve text conflicts according to contents.
1710
Only text conflicts are auto_resolvable. Files with no conflict markers
1711
are considered 'resolved', because bzr always puts conflict markers
1712
into files that have text conflicts. The corresponding .THIS .BASE and
1713
.OTHER files are deleted, as per 'resolve'.
1715
:return: a tuple of ConflictLists: (un_resolved, resolved).
1717
un_resolved = _mod_conflicts.ConflictList()
1718
resolved = _mod_conflicts.ConflictList()
1719
conflict_re = re.compile('^(<{7}|={7}|>{7})')
1720
for conflict in self.conflicts():
1721
if (conflict.typestring != 'text conflict' or
1722
self.kind(conflict.file_id) != 'file'):
1723
un_resolved.append(conflict)
1725
my_file = open(self.id2abspath(conflict.file_id), 'rb')
1727
for line in my_file:
1728
if conflict_re.search(line):
1729
un_resolved.append(conflict)
1732
resolved.append(conflict)
1735
resolved.remove_files(self)
1736
self.set_conflicts(un_resolved)
1737
return un_resolved, resolved
1739
def _validate(self):
1740
"""Validate internal structures.
1742
This is meant mostly for the test suite. To give it a chance to detect
1743
corruption after actions have occurred. The default implementation is a
1746
:return: None. An exception should be raised if there is an error.
1750
def check_state(self):
1751
"""Check that the working state is/isn't valid."""
1752
raise NotImplementedError(self.check_state)
1754
def reset_state(self, revision_ids=None):
1755
"""Reset the state of the working tree.
1757
This does a hard-reset to a last-known-good state. This is a way to
1758
fix if something got corrupted (like the .bzr/checkout/dirstate file)
1760
raise NotImplementedError(self.reset_state)
1762
def _get_rules_searcher(self, default_searcher):
1763
"""See Tree._get_rules_searcher."""
1764
if self._rules_searcher is None:
1765
self._rules_searcher = super(WorkingTree,
1766
self)._get_rules_searcher(default_searcher)
1767
return self._rules_searcher
1769
def get_shelf_manager(self):
1770
"""Return the ShelfManager for this WorkingTree."""
1771
from bzrlib.shelf import ShelfManager
1772
return ShelfManager(self, self._transport)
1775
class InventoryWorkingTree(WorkingTree,
1776
bzrlib.mutabletree.MutableInventoryTree):
1777
"""Base class for working trees that are inventory-oriented.
1779
The inventory is held in the `Branch` working-inventory, and the
1780
files are in a directory on disk.
1782
It is possible for a `WorkingTree` to have a filename which is
1783
not listed in the Inventory and vice versa.
1786
def __init__(self, basedir='.',
1787
branch=DEPRECATED_PARAMETER,
1789
_control_files=None,
1793
"""Construct a InventoryWorkingTree instance. This is not a public API.
1795
:param branch: A branch to override probing for the branch.
1797
super(InventoryWorkingTree, self).__init__(basedir=basedir,
1798
branch=branch, _transport=_control_files._transport,
1799
_internal=_internal, _format=_format, _bzrdir=_bzrdir)
1801
self._control_files = _control_files
1802
self._detect_case_handling()
1804
if _inventory is None:
1805
# This will be acquired on lock_read() or lock_write()
1806
self._inventory_is_modified = False
1807
self._inventory = None
1809
# the caller of __init__ has provided an inventory,
1810
# we assume they know what they are doing - as its only
1811
# the Format factory and creation methods that are
1812
# permitted to do this.
1813
self._set_inventory(_inventory, dirty=False)
1815
def _set_inventory(self, inv, dirty):
1816
"""Set the internal cached inventory.
1818
:param inv: The inventory to set.
1819
:param dirty: A boolean indicating whether the inventory is the same
1820
logical inventory as whats on disk. If True the inventory is not
1821
the same and should be written to disk or data will be lost, if
1822
False then the inventory is the same as that on disk and any
1823
serialisation would be unneeded overhead.
1825
self._inventory = inv
1826
self._inventory_is_modified = dirty
1828
def _detect_case_handling(self):
1829
wt_trans = self.bzrdir.get_workingtree_transport(None)
1831
wt_trans.stat(self._format.case_sensitive_filename)
1832
except errors.NoSuchFile:
1833
self.case_sensitive = True
1835
self.case_sensitive = False
1837
self._setup_directory_is_tree_reference()
1839
def _serialize(self, inventory, out_file):
1840
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1843
def _deserialize(selt, in_file):
1844
return xml5.serializer_v5.read_inventory(in_file)
1846
def break_lock(self):
1847
"""Break a lock if one is present from another instance.
1849
Uses the ui factory to ask for confirmation if the lock may be from
1852
This will probe the repository for its lock as well.
1854
self._control_files.break_lock()
1855
self.branch.break_lock()
1857
def is_locked(self):
1858
return self._control_files.is_locked()
1860
def _must_be_locked(self):
1861
if not self.is_locked():
1862
raise errors.ObjectNotLocked(self)
1864
def lock_read(self):
1865
"""Lock the tree for reading.
1867
This also locks the branch, and can be unlocked via self.unlock().
1869
:return: A bzrlib.lock.LogicalLockResult.
1871
if not self.is_locked():
1873
self.branch.lock_read()
1875
self._control_files.lock_read()
1876
return LogicalLockResult(self.unlock)
1878
self.branch.unlock()
1881
def lock_tree_write(self):
1882
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1884
:return: A bzrlib.lock.LogicalLockResult.
1886
if not self.is_locked():
1888
self.branch.lock_read()
1890
self._control_files.lock_write()
1891
return LogicalLockResult(self.unlock)
1893
self.branch.unlock()
1896
def lock_write(self):
1897
"""See MutableTree.lock_write, and WorkingTree.unlock.
1899
:return: A bzrlib.lock.LogicalLockResult.
1901
if not self.is_locked():
1903
self.branch.lock_write()
1905
self._control_files.lock_write()
1906
return LogicalLockResult(self.unlock)
1908
self.branch.unlock()
1911
def get_physical_lock_status(self):
1912
return self._control_files.get_physical_lock_status()
1914
@needs_tree_write_lock
1915
def _write_inventory(self, inv):
1916
"""Write inventory as the current inventory."""
1917
self._set_inventory(inv, dirty=True)
1920
1184
# XXX: This method should be deprecated in favour of taking in a proper
1921
1185
# new Inventory object.
1922
@needs_tree_write_lock
1923
1187
def set_inventory(self, new_inventory_list):
1924
1188
from bzrlib.inventory import (Inventory,
1925
1189
InventoryDirectory,
1928
1193
inv = Inventory(self.get_root_id())
1938
1203
elif kind == 'symlink':
1939
1204
inv.add(InventoryLink(file_id, name, parent))
1941
raise errors.BzrError("unknown kind %r" % kind)
1206
raise BzrError("unknown kind %r" % kind)
1942
1207
self._write_inventory(inv)
1944
def _write_basis_inventory(self, xml):
1945
"""Write the basis inventory XML to the basis-inventory file"""
1946
path = self._basis_inventory_name()
1948
self._transport.put_file(path, sio,
1949
mode=self.bzrdir._get_file_mode())
1951
def _reset_data(self):
1952
"""Reset transient data that cannot be revalidated."""
1953
self._inventory_is_modified = False
1954
f = self._transport.get('inventory')
1956
result = self._deserialize(f)
1959
self._set_inventory(result, dirty=False)
1961
def _set_root_id(self, file_id):
1962
"""Set the root id for this tree, in a format specific manner.
1964
:param file_id: The file id to assign to the root. It must not be
1965
present in the current inventory or an error will occur. It must
1966
not be None, but rather a valid file id.
1968
inv = self._inventory
1210
def set_root_id(self, file_id):
1211
"""Set the root id for this tree."""
1212
inv = self.read_working_inventory()
1969
1213
orig_root_id = inv.root.file_id
1970
# TODO: it might be nice to exit early if there was nothing
1971
# to do, saving us from trigger a sync on unlock.
1972
self._inventory_is_modified = True
1973
# we preserve the root inventory entry object, but
1974
# unlinkit from the byid index
1975
1214
del inv._byid[inv.root.file_id]
1976
1215
inv.root.file_id = file_id
1977
# and link it into the index with the new changed id.
1978
1216
inv._byid[inv.root.file_id] = inv.root
1979
# and finally update all children to reference the new id.
1980
# XXX: this should be safe to just look at the root.children
1981
# list, not the WHOLE INVENTORY.
1982
1217
for fid in inv:
1983
1218
entry = inv[fid]
1984
1219
if entry.parent_id == orig_root_id:
1985
1220
entry.parent_id = inv.root.file_id
1987
@needs_tree_write_lock
1988
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
1989
"""See MutableTree.set_parent_trees."""
1990
parent_ids = [rev for (rev, tree) in parents_list]
1991
for revision_id in parent_ids:
1992
_mod_revision.check_not_reserved_id(revision_id)
1994
self._check_parents_for_ghosts(parent_ids,
1995
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
1997
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
1999
if len(parent_ids) == 0:
2000
leftmost_parent_id = _mod_revision.NULL_REVISION
2001
leftmost_parent_tree = None
1221
self._write_inventory(inv)
1224
"""See Branch.unlock.
1226
WorkingTree locking just uses the Branch locking facilities.
1227
This is current because all working trees have an embedded branch
1228
within them. IF in the future, we were to make branch data shareable
1229
between multiple working trees, i.e. via shared storage, then we
1230
would probably want to lock both the local tree, and the branch.
1232
# FIXME: We want to write out the hashcache only when the last lock on
1233
# this working copy is released. Peeking at the lock count is a bit
1234
# of a nasty hack; probably it's better to have a transaction object,
1235
# which can do some finalization when it's either successfully or
1236
# unsuccessfully completed. (Denys's original patch did that.)
1237
# RBC 20060206 hookinhg into transaction will couple lock and transaction
1238
# wrongly. Hookinh into unllock on the control files object is fine though.
1240
# TODO: split this per format so there is no ugly if block
1241
if self._hashcache.needs_write and (
1242
# dedicated lock files
1243
self._control_files._lock_count==1 or
1245
(self._control_files is self.branch.control_files and
1246
self._control_files._lock_count==3)):
1247
self._hashcache.write()
1248
# reverse order of locking.
1250
return self._control_files.unlock()
1252
self.branch.unlock()
1256
"""Update a working tree along its branch.
1258
This will update the branch if its bound too, which means we have multiple trees involved:
1259
The new basis tree of the master.
1260
The old basis tree of the branch.
1261
The old basis tree of the working tree.
1262
The current working tree state.
1263
pathologically all three may be different, and non ancestors of each other.
1264
Conceptually we want to:
1265
Preserve the wt.basis->wt.state changes
1266
Transform the wt.basis to the new master basis.
1267
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1268
Restore the wt.basis->wt.state changes.
1270
There isn't a single operation at the moment to do that, so we:
1271
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1272
Do a 'normal' merge of the old branch basis if it is relevant.
1274
old_tip = self.branch.update()
1275
if old_tip is not None:
1276
self.add_pending_merge(old_tip)
1277
self.branch.lock_read()
1280
if self.last_revision() != self.branch.last_revision():
1281
# merge tree state up to new branch tip.
1282
basis = self.basis_tree()
1283
to_tree = self.branch.basis_tree()
1284
result += merge_inner(self.branch,
1288
self.set_last_revision(self.branch.last_revision())
1289
if old_tip and old_tip != self.last_revision():
1290
# our last revision was not the prior branch last reivison
1291
# and we have converted that last revision to a pending merge.
1292
# base is somewhere between the branch tip now
1293
# and the now pending merge
1294
from bzrlib.revision import common_ancestor
1296
base_rev_id = common_ancestor(self.branch.last_revision(),
1298
self.branch.repository)
1299
except errors.NoCommonAncestor:
1301
base_tree = self.branch.repository.revision_tree(base_rev_id)
1302
other_tree = self.branch.repository.revision_tree(old_tip)
1303
result += merge_inner(self.branch,
1309
self.branch.unlock()
1312
def _write_inventory(self, inv):
1313
"""Write inventory as the current inventory."""
1315
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1317
self._control_files.put('inventory', sio)
1318
self._set_inventory(inv)
1319
mutter('wrote working inventory')
1321
def set_conflicts(self, arg):
1322
raise UnsupportedOperation(self.set_conflicts, self)
1325
def conflicts(self):
1326
conflicts = ConflictList()
1327
for conflicted in self._iter_conflicts():
1330
if file_kind(self.abspath(conflicted)) != "file":
1333
if e.errno == errno.ENOENT:
1338
for suffix in ('.THIS', '.OTHER'):
1340
kind = file_kind(self.abspath(conflicted+suffix))
1342
if e.errno == errno.ENOENT:
1350
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
1351
conflicts.append(Conflict.factory(ctype, path=conflicted,
1352
file_id=self.path2id(conflicted)))
1356
class WorkingTree3(WorkingTree):
1357
"""This is the Format 3 working tree.
1359
This differs from the base WorkingTree by:
1360
- having its own file lock
1361
- having its own last-revision property.
1363
This is new in bzr 0.8
1367
def last_revision(self):
1368
"""See WorkingTree.last_revision."""
1370
return self._control_files.get_utf8('last-revision').read()
1374
def _change_last_revision(self, revision_id):
1375
"""See WorkingTree._change_last_revision."""
1376
if revision_id is None or revision_id == NULL_REVISION:
1378
self._control_files._transport.delete('last-revision')
1379
except errors.NoSuchFile:
2003
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
2005
if self._change_last_revision(leftmost_parent_id):
2006
if leftmost_parent_tree is None:
2007
# If we don't have a tree, fall back to reading the
2008
# parent tree from the repository.
2009
self._cache_basis_inventory(leftmost_parent_id)
2011
inv = leftmost_parent_tree.root_inventory
2012
xml = self._create_basis_xml_from_inventory(
2013
leftmost_parent_id, inv)
2014
self._write_basis_inventory(xml)
2015
self._set_merges_from_parent_ids(parent_ids)
2017
def _cache_basis_inventory(self, new_revision):
2018
"""Cache new_revision as the basis inventory."""
2019
# TODO: this should allow the ready-to-use inventory to be passed in,
2020
# as commit already has that ready-to-use [while the format is the
2023
# this double handles the inventory - unpack and repack -
2024
# but is easier to understand. We can/should put a conditional
2025
# in here based on whether the inventory is in the latest format
2026
# - perhaps we should repack all inventories on a repository
2028
# the fast path is to copy the raw xml from the repository. If the
2029
# xml contains 'revision_id="', then we assume the right
2030
# revision_id is set. We must check for this full string, because a
2031
# root node id can legitimately look like 'revision_id' but cannot
2033
xml = self.branch.repository._get_inventory_xml(new_revision)
2034
firstline = xml.split('\n', 1)[0]
2035
if (not 'revision_id="' in firstline or
2036
'format="7"' not in firstline):
2037
inv = self.branch.repository._serializer.read_inventory_from_string(
2039
xml = self._create_basis_xml_from_inventory(new_revision, inv)
2040
self._write_basis_inventory(xml)
2041
except (errors.NoSuchRevision, errors.RevisionNotPresent):
2044
def _basis_inventory_name(self):
2045
return 'basis-inventory-cache'
2047
def _create_basis_xml_from_inventory(self, revision_id, inventory):
2048
"""Create the text that will be saved in basis-inventory"""
2049
inventory.revision_id = revision_id
2050
return xml7.serializer_v7.write_inventory_to_string(inventory)
2052
@needs_tree_write_lock
1384
self.branch.revision_history().index(revision_id)
1386
raise errors.NoSuchRevision(self.branch, revision_id)
1387
self._control_files.put_utf8('last-revision', revision_id)
2053
1391
def set_conflicts(self, conflicts):
2054
self._put_rio('conflicts', conflicts.to_stanzas(),
1392
self._put_rio('conflicts', conflicts.to_stanzas(),
2055
1393
CONFLICT_HEADER_1)
2057
@needs_tree_write_lock
2058
def add_conflicts(self, new_conflicts):
2059
conflict_set = set(self.conflicts())
2060
conflict_set.update(set(list(new_conflicts)))
2061
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2062
key=_mod_conflicts.Conflict.sort_key)))
2064
1395
@needs_read_lock
2065
1396
def conflicts(self):
2067
confile = self._transport.get('conflicts')
2068
except errors.NoSuchFile:
2069
return _mod_conflicts.ConflictList()
2072
if confile.next() != CONFLICT_HEADER_1 + '\n':
2073
raise errors.ConflictFormatError()
2074
except StopIteration:
2075
raise errors.ConflictFormatError()
2076
reader = _mod_rio.RioReader(confile)
2077
return _mod_conflicts.ConflictList.from_stanzas(reader)
2081
def read_basis_inventory(self):
2082
"""Read the cached basis inventory."""
2083
path = self._basis_inventory_name()
2084
return self._transport.get_bytes(path)
2087
def read_working_inventory(self):
2088
"""Read the working inventory.
2090
:raises errors.InventoryModified: read_working_inventory will fail
2091
when the current in memory inventory has been modified.
2093
# conceptually this should be an implementation detail of the tree.
2094
# XXX: Deprecate this.
2095
# ElementTree does its own conversion from UTF-8, so open in
2097
if self._inventory_is_modified:
2098
raise errors.InventoryModified(self)
2099
f = self._transport.get('inventory')
2101
result = self._deserialize(f)
2104
self._set_inventory(result, dirty=False)
2108
def get_root_id(self):
2109
"""Return the id of this trees root"""
2110
return self._inventory.root.file_id
2112
def has_id(self, file_id):
2113
# files that have been deleted are excluded
2114
inv, inv_file_id = self._unpack_file_id(file_id)
2115
if not inv.has_id(inv_file_id):
2117
path = inv.id2path(inv_file_id)
2118
return osutils.lexists(self.abspath(path))
2120
def has_or_had_id(self, file_id):
2121
if file_id == self.get_root_id():
1398
confile = self._control_files.get('conflicts')
1400
return ConflictList()
1402
if confile.next() != CONFLICT_HEADER_1 + '\n':
1403
raise ConflictFormatError()
1404
except StopIteration:
1405
raise ConflictFormatError()
1406
return ConflictList.from_stanzas(RioReader(confile))
1409
def get_conflicted_stem(path):
1410
for suffix in CONFLICT_SUFFIXES:
1411
if path.endswith(suffix):
1412
return path[:-len(suffix)]
1414
@deprecated_function(zero_eight)
1415
def is_control_file(filename):
1416
"""See WorkingTree.is_control_filename(filename)."""
1417
## FIXME: better check
1418
filename = normpath(filename)
1419
while filename != '':
1420
head, tail = os.path.split(filename)
1421
## mutter('check %r for control file' % ((head, tail),))
2123
inv, inv_file_id = self._unpack_file_id(file_id)
2124
return inv.has_id(inv_file_id)
2126
def all_file_ids(self):
2127
"""Iterate through file_ids for this tree.
2129
file_ids are in a WorkingTree if they are in the working inventory
2130
and the working file exists.
2133
for path, ie in self.iter_entries_by_dir():
2137
@needs_tree_write_lock
2138
def set_last_revision(self, new_revision):
2139
"""Change the last revision in the working tree."""
2140
if self._change_last_revision(new_revision):
2141
self._cache_basis_inventory(new_revision)
2143
def _get_check_refs(self):
2144
"""Return the references needed to perform a check of this tree.
2146
The default implementation returns no refs, and is only suitable for
2147
trees that have no local caching and can commit on ghosts at any time.
2149
:seealso: bzrlib.check for details about check_refs.
2154
def _check(self, references):
2155
"""Check the tree for consistency.
2157
:param references: A dict with keys matching the items returned by
2158
self._get_check_refs(), and values from looking those keys up in
2161
tree_basis = self.basis_tree()
2162
tree_basis.lock_read()
2164
repo_basis = references[('trees', self.last_revision())]
2165
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2166
raise errors.BzrCheckError(
2167
"Mismatched basis inventory content.")
2173
def check_state(self):
2174
"""Check that the working state is/isn't valid."""
2175
check_refs = self._get_check_refs()
2177
for ref in check_refs:
2180
refs[ref] = self.branch.repository.revision_tree(value)
2183
@needs_tree_write_lock
2184
def reset_state(self, revision_ids=None):
2185
"""Reset the state of the working tree.
2187
This does a hard-reset to a last-known-good state. This is a way to
2188
fix if something got corrupted (like the .bzr/checkout/dirstate file)
2190
if revision_ids is None:
2191
revision_ids = self.get_parent_ids()
2192
if not revision_ids:
2193
rt = self.branch.repository.revision_tree(
2194
_mod_revision.NULL_REVISION)
2196
rt = self.branch.repository.revision_tree(revision_ids[0])
2197
self._write_inventory(rt.root_inventory)
2198
self.set_parent_ids(revision_ids)
2201
"""Write the in memory inventory to disk."""
2202
# TODO: Maybe this should only write on dirty ?
2203
if self._control_files._lock_mode != 'w':
2204
raise errors.NotWriteLocked(self)
2206
self._serialize(self._inventory, sio)
2208
self._transport.put_file('inventory', sio,
2209
mode=self.bzrdir._get_file_mode())
2210
self._inventory_is_modified = False
2212
def get_file_mtime(self, file_id, path=None):
2213
"""See Tree.get_file_mtime."""
2215
path = self.id2path(file_id)
2217
return os.lstat(self.abspath(path)).st_mtime
2219
if e.errno == errno.ENOENT:
2220
raise errors.FileTimestampUnavailable(path)
2223
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
2224
inv, file_id = self._path2inv_file_id(path)
2226
# For unversioned files on win32, we just assume they are not
2229
return inv[file_id].executable
2231
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
2232
mode = stat_result.st_mode
2233
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
2235
def is_executable(self, file_id, path=None):
2236
if not self._supports_executable():
2237
inv, inv_file_id = self._unpack_file_id(file_id)
2238
return inv[inv_file_id].executable
2241
path = self.id2path(file_id)
2242
mode = os.lstat(self.abspath(path)).st_mode
2243
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
2245
def _is_executable_from_path_and_stat(self, path, stat_result):
2246
if not self._supports_executable():
2247
return self._is_executable_from_path_and_stat_from_basis(path, stat_result)
2249
return self._is_executable_from_path_and_stat_from_stat(path, stat_result)
2251
@needs_tree_write_lock
2252
def _add(self, files, ids, kinds):
2253
"""See MutableTree._add."""
2254
# TODO: Re-adding a file that is removed in the working copy
2255
# should probably put it back with the previous ID.
2256
# the read and write working inventory should not occur in this
2257
# function - they should be part of lock_write and unlock.
2258
# FIXME: nested trees
2259
inv = self.root_inventory
2260
for f, file_id, kind in zip(files, ids, kinds):
2262
inv.add_path(f, kind=kind)
2264
inv.add_path(f, kind=kind, file_id=file_id)
2265
self._inventory_is_modified = True
2267
def revision_tree(self, revision_id):
2268
"""See WorkingTree.revision_id."""
2269
if revision_id == self.last_revision():
2271
xml = self.read_basis_inventory()
2272
except errors.NoSuchFile:
2276
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2277
# dont use the repository revision_tree api because we want
2278
# to supply the inventory.
2279
if inv.revision_id == revision_id:
2280
return revisiontree.InventoryRevisionTree(
2281
self.branch.repository, inv, revision_id)
2282
except errors.BadInventoryFormat:
2284
# raise if there was no inventory, or if we read the wrong inventory.
2285
raise errors.NoSuchRevisionInTree(self, revision_id)
2288
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
2289
"""See Tree.annotate_iter
2291
This implementation will use the basis tree implementation if possible.
2292
Lines not in the basis are attributed to CURRENT_REVISION
2294
If there are pending merges, lines added by those merges will be
2295
incorrectly attributed to CURRENT_REVISION (but after committing, the
2296
attribution will be correct).
2298
maybe_file_parent_keys = []
2299
for parent_id in self.get_parent_ids():
2301
parent_tree = self.revision_tree(parent_id)
2302
except errors.NoSuchRevisionInTree:
2303
parent_tree = self.branch.repository.revision_tree(parent_id)
2304
parent_tree.lock_read()
2307
kind = parent_tree.kind(file_id)
2308
except errors.NoSuchId:
2311
# Note: this is slightly unnecessary, because symlinks and
2312
# directories have a "text" which is the empty text, and we
2313
# know that won't mess up annotations. But it seems cleaner
2316
file_id, parent_tree.get_file_revision(file_id))
2317
if parent_text_key not in maybe_file_parent_keys:
2318
maybe_file_parent_keys.append(parent_text_key)
2320
parent_tree.unlock()
2321
graph = _mod_graph.Graph(self.branch.repository.texts)
2322
heads = graph.heads(maybe_file_parent_keys)
2323
file_parent_keys = []
2324
for key in maybe_file_parent_keys:
2326
file_parent_keys.append(key)
2328
# Now we have the parents of this content
2329
annotator = self.branch.repository.texts.get_annotator()
2330
text = self.get_file_text(file_id)
2331
this_key =(file_id, default_revision)
2332
annotator.add_special_text(this_key, file_parent_keys, text)
2333
annotations = [(key[-1], line)
2334
for key, line in annotator.annotate_flat(this_key)]
2337
def _put_rio(self, filename, stanzas, header):
2338
self._must_be_locked()
2339
my_file = _mod_rio.rio_file(stanzas, header)
2340
self._transport.put_file(filename, my_file,
2341
mode=self.bzrdir._get_file_mode())
2343
@needs_tree_write_lock
2344
def set_merge_modified(self, modified_hashes):
2346
for file_id, hash in modified_hashes.iteritems():
2347
yield _mod_rio.Stanza(file_id=file_id.decode('utf8'),
2349
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
2352
def merge_modified(self):
2353
"""Return a dictionary of files modified by a merge.
2355
The list is initialized by WorkingTree.set_merge_modified, which is
2356
typically called after we make some automatic updates to the tree
2359
This returns a map of file_id->sha1, containing only files which are
2360
still in the working inventory and have that text hash.
2363
hashfile = self._transport.get('merge-hashes')
2364
except errors.NoSuchFile:
2369
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
2370
raise errors.MergeModifiedFormatError()
2371
except StopIteration:
2372
raise errors.MergeModifiedFormatError()
2373
for s in _mod_rio.RioReader(hashfile):
2374
# RioReader reads in Unicode, so convert file_ids back to utf8
2375
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
2376
if not self.has_id(file_id):
2378
text_hash = s.get("hash")
2379
if text_hash == self.get_file_sha1(file_id):
2380
merge_hashes[file_id] = text_hash
2386
def subsume(self, other_tree):
2387
def add_children(inventory, entry):
2388
for child_entry in entry.children.values():
2389
inventory._byid[child_entry.file_id] = child_entry
2390
if child_entry.kind == 'directory':
2391
add_children(inventory, child_entry)
2392
if other_tree.get_root_id() == self.get_root_id():
2393
raise errors.BadSubsumeSource(self, other_tree,
2394
'Trees have the same root')
2396
other_tree_path = self.relpath(other_tree.basedir)
2397
except errors.PathNotChild:
2398
raise errors.BadSubsumeSource(self, other_tree,
2399
'Tree is not contained by the other')
2400
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
2401
if new_root_parent is None:
2402
raise errors.BadSubsumeSource(self, other_tree,
2403
'Parent directory is not versioned.')
2404
# We need to ensure that the result of a fetch will have a
2405
# versionedfile for the other_tree root, and only fetching into
2406
# RepositoryKnit2 guarantees that.
2407
if not self.branch.repository.supports_rich_root():
2408
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
2409
other_tree.lock_tree_write()
2411
new_parents = other_tree.get_parent_ids()
2412
other_root = other_tree.root_inventory.root
2413
other_root.parent_id = new_root_parent
2414
other_root.name = osutils.basename(other_tree_path)
2415
self.root_inventory.add(other_root)
2416
add_children(self.root_inventory, other_root)
2417
self._write_inventory(self.root_inventory)
2418
# normally we don't want to fetch whole repositories, but i think
2419
# here we really do want to consolidate the whole thing.
2420
for parent_id in other_tree.get_parent_ids():
2421
self.branch.fetch(other_tree.branch, parent_id)
2422
self.add_parent_tree_id(parent_id)
2425
other_tree.bzrdir.retire_bzrdir()
2427
@needs_tree_write_lock
2428
def extract(self, file_id, format=None):
2429
"""Extract a subtree from this tree.
2431
A new branch will be created, relative to the path for this tree.
2435
segments = osutils.splitpath(path)
2436
transport = self.branch.bzrdir.root_transport
2437
for name in segments:
2438
transport = transport.clone(name)
2439
transport.ensure_base()
2442
sub_path = self.id2path(file_id)
2443
branch_transport = mkdirs(sub_path)
2445
format = self.bzrdir.cloning_metadir()
2446
branch_transport.ensure_base()
2447
branch_bzrdir = format.initialize_on_transport(branch_transport)
2449
repo = branch_bzrdir.find_repository()
2450
except errors.NoRepositoryPresent:
2451
repo = branch_bzrdir.create_repository()
2452
if not repo.supports_rich_root():
2453
raise errors.RootNotRich()
2454
new_branch = branch_bzrdir.create_branch()
2455
new_branch.pull(self.branch)
2456
for parent_id in self.get_parent_ids():
2457
new_branch.fetch(self.branch, parent_id)
2458
tree_transport = self.bzrdir.root_transport.clone(sub_path)
2459
if tree_transport.base != branch_transport.base:
2460
tree_bzrdir = format.initialize_on_transport(tree_transport)
2461
tree_bzrdir.set_branch_reference(new_branch)
2463
tree_bzrdir = branch_bzrdir
2464
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
2465
wt.set_parent_ids(self.get_parent_ids())
2466
# FIXME: Support nested trees
2467
my_inv = self.root_inventory
2468
child_inv = inventory.Inventory(root_id=None)
2469
new_root = my_inv[file_id]
2470
my_inv.remove_recursive_id(file_id)
2471
new_root.parent_id = None
2472
child_inv.add(new_root)
2473
self._write_inventory(my_inv)
2474
wt._write_inventory(child_inv)
2477
def list_files(self, include_root=False, from_dir=None, recursive=True):
2478
"""List all files as (path, class, kind, id, entry).
2480
Lists, but does not descend into unversioned directories.
2481
This does not include files that have been deleted in this
2482
tree. Skips the control directory.
2484
:param include_root: if True, return an entry for the root
2485
:param from_dir: start from this directory or None for the root
2486
:param recursive: whether to recurse into subdirectories or not
2488
# list_files is an iterator, so @needs_read_lock doesn't work properly
2489
# with it. So callers should be careful to always read_lock the tree.
2490
if not self.is_locked():
2491
raise errors.ObjectNotLocked(self)
2493
if from_dir is None and include_root is True:
2494
yield ('', 'V', 'directory', self.get_root_id(), self.root_inventory.root)
2495
# Convert these into local objects to save lookup times
2496
pathjoin = osutils.pathjoin
2497
file_kind = self._kind
2499
# transport.base ends in a slash, we want the piece
2500
# between the last two slashes
2501
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
2503
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
2505
# directory file_id, relative path, absolute path, reverse sorted children
2506
if from_dir is not None:
2507
inv, from_dir_id = self._path2inv_file_id(from_dir)
2508
if from_dir_id is None:
2509
# Directory not versioned
2511
from_dir_abspath = pathjoin(self.basedir, from_dir)
2513
inv = self.root_inventory
2514
from_dir_id = inv.root.file_id
2515
from_dir_abspath = self.basedir
2516
children = os.listdir(from_dir_abspath)
2518
# jam 20060527 The kernel sized tree seems equivalent whether we
2519
# use a deque and popleft to keep them sorted, or if we use a plain
2520
# list and just reverse() them.
2521
children = collections.deque(children)
2522
stack = [(from_dir_id, u'', from_dir_abspath, children)]
2524
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
2527
f = children.popleft()
2528
## TODO: If we find a subdirectory with its own .bzr
2529
## directory, then that is a separate tree and we
2530
## should exclude it.
2532
# the bzrdir for this tree
2533
if transport_base_dir == f:
2536
# we know that from_dir_relpath and from_dir_abspath never end in a slash
2537
# and 'f' doesn't begin with one, we can do a string op, rather
2538
# than the checks of pathjoin(), all relative paths will have an extra slash
2540
fp = from_dir_relpath + '/' + f
2543
fap = from_dir_abspath + '/' + f
2545
dir_ie = inv[from_dir_id]
2546
if dir_ie.kind == 'directory':
2547
f_ie = dir_ie.children.get(f)
2552
elif self.is_ignored(fp[1:]):
2555
# we may not have found this file, because of a unicode
2556
# issue, or because the directory was actually a symlink.
2557
f_norm, can_access = osutils.normalized_filename(f)
2558
if f == f_norm or not can_access:
2559
# No change, so treat this file normally
2562
# this file can be accessed by a normalized path
2563
# check again if it is versioned
2564
# these lines are repeated here for performance
2566
fp = from_dir_relpath + '/' + f
2567
fap = from_dir_abspath + '/' + f
2568
f_ie = inv.get_child(from_dir_id, f)
2571
elif self.is_ignored(fp[1:]):
2578
# make a last minute entry
2580
yield fp[1:], c, fk, f_ie.file_id, f_ie
2583
yield fp[1:], c, fk, None, fk_entries[fk]()
2585
yield fp[1:], c, fk, None, TreeEntry()
2588
if fk != 'directory':
2591
# But do this child first if recursing down
2593
new_children = os.listdir(fap)
2595
new_children = collections.deque(new_children)
2596
stack.append((f_ie.file_id, fp, fap, new_children))
2597
# Break out of inner loop,
2598
# so that we start outer loop with child
2601
# if we finished all children, pop it off the stack
2604
@needs_tree_write_lock
2605
def move(self, from_paths, to_dir=None, after=False):
2608
to_dir must exist in the inventory.
2610
If to_dir exists and is a directory, the files are moved into
2611
it, keeping their old names.
2613
Note that to_dir is only the last component of the new name;
2614
this doesn't change the directory.
2616
For each entry in from_paths the move mode will be determined
2619
The first mode moves the file in the filesystem and updates the
2620
inventory. The second mode only updates the inventory without
2621
touching the file on the filesystem.
2623
move uses the second mode if 'after == True' and the target is
2624
either not versioned or newly added, and present in the working tree.
2626
move uses the second mode if 'after == False' and the source is
2627
versioned but no longer in the working tree, and the target is not
2628
versioned but present in the working tree.
2630
move uses the first mode if 'after == False' and the source is
2631
versioned and present in the working tree, and the target is not
2632
versioned and not present in the working tree.
2634
Everything else results in an error.
2636
This returns a list of (from_path, to_path) pairs for each
2637
entry that is moved.
2642
invs_to_write = set()
2644
# check for deprecated use of signature
2646
raise TypeError('You must supply a target directory')
2647
# check destination directory
2648
if isinstance(from_paths, basestring):
2650
to_abs = self.abspath(to_dir)
2651
if not isdir(to_abs):
2652
raise errors.BzrMoveFailedError('',to_dir,
2653
errors.NotADirectory(to_abs))
2654
if not self.has_filename(to_dir):
2655
raise errors.BzrMoveFailedError('',to_dir,
2656
errors.NotInWorkingDirectory(to_dir))
2657
to_inv, to_dir_id = self._path2inv_file_id(to_dir)
2658
if to_dir_id is None:
2659
raise errors.BzrMoveFailedError('',to_dir,
2660
errors.NotVersionedError(path=to_dir))
2662
to_dir_ie = to_inv[to_dir_id]
2663
if to_dir_ie.kind != 'directory':
2664
raise errors.BzrMoveFailedError('',to_dir,
2665
errors.NotADirectory(to_abs))
2667
# create rename entries and tuples
2668
for from_rel in from_paths:
2669
from_tail = splitpath(from_rel)[-1]
2670
from_inv, from_id = self._path2inv_file_id(from_rel)
2672
raise errors.BzrMoveFailedError(from_rel,to_dir,
2673
errors.NotVersionedError(path=from_rel))
2675
from_entry = from_inv[from_id]
2676
from_parent_id = from_entry.parent_id
2677
to_rel = pathjoin(to_dir, from_tail)
2678
rename_entry = InventoryWorkingTree._RenameEntry(
2681
from_tail=from_tail,
2682
from_parent_id=from_parent_id,
2683
to_rel=to_rel, to_tail=from_tail,
2684
to_parent_id=to_dir_id)
2685
rename_entries.append(rename_entry)
2686
rename_tuples.append((from_rel, to_rel))
2688
# determine which move mode to use. checks also for movability
2689
rename_entries = self._determine_mv_mode(rename_entries, after)
2691
original_modified = self._inventory_is_modified
2694
self._inventory_is_modified = True
2695
self._move(rename_entries)
2697
# restore the inventory on error
2698
self._inventory_is_modified = original_modified
2700
#FIXME: Should potentially also write the from_invs
2701
self._write_inventory(to_inv)
2702
return rename_tuples
2704
@needs_tree_write_lock
2705
def rename_one(self, from_rel, to_rel, after=False):
2708
This can change the directory or the filename or both.
2710
rename_one has several 'modes' to work. First, it can rename a physical
2711
file and change the file_id. That is the normal mode. Second, it can
2712
only change the file_id without touching any physical file.
2714
rename_one uses the second mode if 'after == True' and 'to_rel' is not
2715
versioned but present in the working tree.
2717
rename_one uses the second mode if 'after == False' and 'from_rel' is
2718
versioned but no longer in the working tree, and 'to_rel' is not
2719
versioned but present in the working tree.
2721
rename_one uses the first mode if 'after == False' and 'from_rel' is
2722
versioned and present in the working tree, and 'to_rel' is not
2723
versioned and not present in the working tree.
2725
Everything else results in an error.
2729
# create rename entries and tuples
2730
from_tail = splitpath(from_rel)[-1]
2731
from_inv, from_id = self._path2inv_file_id(from_rel)
2733
# if file is missing in the inventory maybe it's in the basis_tree
2734
basis_tree = self.branch.basis_tree()
2735
from_id = basis_tree.path2id(from_rel)
2737
raise errors.BzrRenameFailedError(from_rel,to_rel,
2738
errors.NotVersionedError(path=from_rel))
2739
# put entry back in the inventory so we can rename it
2740
from_entry = basis_tree.root_inventory[from_id].copy()
2741
from_inv.add(from_entry)
2743
from_inv, from_inv_id = self._unpack_file_id(from_id)
2744
from_entry = from_inv[from_inv_id]
2745
from_parent_id = from_entry.parent_id
2746
to_dir, to_tail = os.path.split(to_rel)
2747
to_inv, to_dir_id = self._path2inv_file_id(to_dir)
2748
rename_entry = InventoryWorkingTree._RenameEntry(from_rel=from_rel,
2750
from_tail=from_tail,
2751
from_parent_id=from_parent_id,
2752
to_rel=to_rel, to_tail=to_tail,
2753
to_parent_id=to_dir_id)
2754
rename_entries.append(rename_entry)
2756
# determine which move mode to use. checks also for movability
2757
rename_entries = self._determine_mv_mode(rename_entries, after)
2759
# check if the target changed directory and if the target directory is
2761
if to_dir_id is None:
2762
raise errors.BzrMoveFailedError(from_rel,to_rel,
2763
errors.NotVersionedError(path=to_dir))
2765
# all checks done. now we can continue with our actual work
2766
mutter('rename_one:\n'
2771
' to_dir_id {%s}\n',
2772
from_id, from_rel, to_rel, to_dir, to_dir_id)
2774
self._move(rename_entries)
2775
self._write_inventory(to_inv)
2777
class _RenameEntry(object):
2778
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
2779
to_rel, to_tail, to_parent_id, only_change_inv=False,
2781
self.from_rel = from_rel
2782
self.from_id = from_id
2783
self.from_tail = from_tail
2784
self.from_parent_id = from_parent_id
2785
self.to_rel = to_rel
2786
self.to_tail = to_tail
2787
self.to_parent_id = to_parent_id
2788
self.change_id = change_id
2789
self.only_change_inv = only_change_inv
2791
def _determine_mv_mode(self, rename_entries, after=False):
2792
"""Determines for each from-to pair if both inventory and working tree
2793
or only the inventory has to be changed.
2795
Also does basic plausability tests.
2797
# FIXME: Handling of nested trees
2798
inv = self.root_inventory
2800
for rename_entry in rename_entries:
2801
# store to local variables for easier reference
2802
from_rel = rename_entry.from_rel
2803
from_id = rename_entry.from_id
2804
to_rel = rename_entry.to_rel
2805
to_id = inv.path2id(to_rel)
2806
only_change_inv = False
2809
# check the inventory for source and destination
2811
raise errors.BzrMoveFailedError(from_rel,to_rel,
2812
errors.NotVersionedError(path=from_rel))
2813
if to_id is not None:
2815
# allow it with --after but only if dest is newly added
2817
basis = self.basis_tree()
2820
if not basis.has_id(to_id):
2821
rename_entry.change_id = True
2826
raise errors.BzrMoveFailedError(from_rel,to_rel,
2827
errors.AlreadyVersionedError(path=to_rel))
2829
# try to determine the mode for rename (only change inv or change
2830
# inv and file system)
2832
if not self.has_filename(to_rel):
2833
raise errors.BzrMoveFailedError(from_id,to_rel,
2834
errors.NoSuchFile(path=to_rel,
2835
extra="New file has not been created yet"))
2836
only_change_inv = True
2837
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
2838
only_change_inv = True
2839
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
2840
only_change_inv = False
2841
elif (not self.case_sensitive
2842
and from_rel.lower() == to_rel.lower()
2843
and self.has_filename(from_rel)):
2844
only_change_inv = False
2846
# something is wrong, so lets determine what exactly
2847
if not self.has_filename(from_rel) and \
2848
not self.has_filename(to_rel):
2849
raise errors.BzrRenameFailedError(from_rel, to_rel,
2850
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
2852
raise errors.RenameFailedFilesExist(from_rel, to_rel)
2853
rename_entry.only_change_inv = only_change_inv
2854
return rename_entries
2856
def _move(self, rename_entries):
2857
"""Moves a list of files.
2859
Depending on the value of the flag 'only_change_inv', the
2860
file will be moved on the file system or not.
2864
for entry in rename_entries:
2866
self._move_entry(entry)
2868
self._rollback_move(moved)
2872
def _rollback_move(self, moved):
2873
"""Try to rollback a previous move in case of an filesystem error."""
2876
self._move_entry(WorkingTree._RenameEntry(
2877
entry.to_rel, entry.from_id,
2878
entry.to_tail, entry.to_parent_id, entry.from_rel,
2879
entry.from_tail, entry.from_parent_id,
2880
entry.only_change_inv))
2881
except errors.BzrMoveFailedError, e:
2882
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
2883
" The working tree is in an inconsistent state."
2884
" Please consider doing a 'bzr revert'."
2885
" Error message is: %s" % e)
2887
def _move_entry(self, entry):
2888
inv = self.root_inventory
2889
from_rel_abs = self.abspath(entry.from_rel)
2890
to_rel_abs = self.abspath(entry.to_rel)
2891
if from_rel_abs == to_rel_abs:
2892
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
2893
"Source and target are identical.")
2895
if not entry.only_change_inv:
2897
osutils.rename(from_rel_abs, to_rel_abs)
2899
raise errors.BzrMoveFailedError(entry.from_rel,
2902
to_id = inv.path2id(entry.to_rel)
2903
inv.remove_recursive_id(to_id)
2904
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
2906
@needs_tree_write_lock
2907
def unversion(self, file_ids):
2908
"""Remove the file ids in file_ids from the current versioned set.
2910
When a file_id is unversioned, all of its children are automatically
2913
:param file_ids: The file ids to stop versioning.
2914
:raises: NoSuchId if any fileid is not currently versioned.
2916
for file_id in file_ids:
2917
if not self._inventory.has_id(file_id):
2918
raise errors.NoSuchId(self, file_id)
2919
for file_id in file_ids:
2920
if self._inventory.has_id(file_id):
2921
self._inventory.remove_recursive_id(file_id)
2923
# in the future this should just set a dirty bit to wait for the
2924
# final unlock. However, until all methods of workingtree start
2925
# with the current in -memory inventory rather than triggering
2926
# a read, it is more complex - we need to teach read_inventory
2927
# to know when to read, and when to not read first... and possibly
2928
# to save first when the in memory one may be corrupted.
2929
# so for now, we just only write it if it is indeed dirty.
2931
self._write_inventory(self._inventory)
2933
def stored_kind(self, file_id):
2934
"""See Tree.stored_kind"""
2935
inv, inv_file_id = self._unpack_file_id(file_id)
2936
return inv[inv_file_id].kind
2939
"""Yield all unversioned files in this WorkingTree.
2941
If there are any unversioned directories then only the directory is
2942
returned, not all its children. But if there are unversioned files
2943
under a versioned subdirectory, they are returned.
2945
Currently returned depth-first, sorted by name within directories.
2946
This is the same order used by 'osutils.walkdirs'.
2948
## TODO: Work from given directory downwards
2949
for path, dir_entry in self.iter_entries_by_dir():
2950
if dir_entry.kind != 'directory':
2952
# mutter("search for unknowns in %r", path)
2953
dirabs = self.abspath(path)
2954
if not isdir(dirabs):
2955
# e.g. directory deleted
2959
for subf in os.listdir(dirabs):
2960
if self.bzrdir.is_control_filename(subf):
2962
if subf not in dir_entry.children:
2965
can_access) = osutils.normalized_filename(subf)
2966
except UnicodeDecodeError:
2967
path_os_enc = path.encode(osutils._fs_enc)
2968
relpath = path_os_enc + '/' + subf
2969
raise errors.BadFilenameEncoding(relpath,
2971
if subf_norm != subf and can_access:
2972
if subf_norm not in dir_entry.children:
2973
fl.append(subf_norm)
2979
subp = pathjoin(path, subf)
2982
def _walkdirs(self, prefix=""):
2983
"""Walk the directories of this tree.
2985
:param prefix: is used as the directrory to start with.
2986
:returns: a generator which yields items in the form::
2988
((curren_directory_path, fileid),
2989
[(file1_path, file1_name, file1_kind, None, file1_id,
2992
_directory = 'directory'
2993
# get the root in the inventory
2994
inv, top_id = self._path2inv_file_id(prefix)
2998
pending = [(prefix, '', _directory, None, top_id, None)]
3001
currentdir = pending.pop()
3002
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
3003
top_id = currentdir[4]
3005
relroot = currentdir[0] + '/'
3008
# FIXME: stash the node in pending
3010
if entry.kind == 'directory':
3011
for name, child in entry.sorted_children():
3012
dirblock.append((relroot + name, name, child.kind, None,
3013
child.file_id, child.kind
3015
yield (currentdir[0], entry.file_id), dirblock
3016
# push the user specified dirs from dirblock
3017
for dir in reversed(dirblock):
3018
if dir[2] == _directory:
3022
def update_feature_flags(self, updated_flags):
3023
"""Update the feature flags for this branch.
3025
:param updated_flags: Dictionary mapping feature names to necessities
3026
A necessity can be None to indicate the feature should be removed
3028
self._format._update_feature_flags(updated_flags)
3029
self.control_transport.put_bytes('format', self._format.as_string())
3032
class WorkingTreeFormatRegistry(controldir.ControlComponentFormatRegistry):
3033
"""Registry for working tree formats."""
3035
def __init__(self, other_registry=None):
3036
super(WorkingTreeFormatRegistry, self).__init__(other_registry)
3037
self._default_format = None
3038
self._default_format_key = None
3040
def get_default(self):
3041
"""Return the current default format."""
3042
if (self._default_format_key is not None and
3043
self._default_format is None):
3044
self._default_format = self.get(self._default_format_key)
3045
return self._default_format
3047
def set_default(self, format):
3048
"""Set the default format."""
3049
self._default_format = format
3050
self._default_format_key = None
3052
def set_default_key(self, format_string):
3053
"""Set the default format by its format string."""
3054
self._default_format_key = format_string
3055
self._default_format = None
3058
format_registry = WorkingTreeFormatRegistry()
3061
class WorkingTreeFormat(controldir.ControlComponentFormat):
1424
if filename == head:
1430
class WorkingTreeFormat(object):
3062
1431
"""An encapsulation of the initialization and open routines for a format.
3064
1433
Formats provide three things:
3120
1480
"""Is this format supported?
3122
1482
Supported formats can be initialized and opened.
3123
Unsupported formats may not support initialization or committing or
1483
Unsupported formats may not support initialization or committing or
3124
1484
some other features depending on the reason for not being supported.
3128
def supports_content_filtering(self):
3129
"""True if this format supports content filtering."""
3132
def supports_views(self):
3133
"""True if this format supports stored views."""
3136
def get_controldir_for_branch(self):
3137
"""Get the control directory format for creating branches.
3139
This is to support testing of working tree formats that can not exist
3140
in the same control directory as a branch.
3142
return self._matchingbzrdir
3145
class WorkingTreeFormatMetaDir(bzrdir.BzrFormat, WorkingTreeFormat):
3146
"""Base class for working trees that live in bzr meta directories."""
3149
WorkingTreeFormat.__init__(self)
3150
bzrdir.BzrFormat.__init__(self)
3153
def find_format_string(klass, controldir):
3154
"""Return format name for the working tree object in controldir."""
3156
transport = controldir.get_workingtree_transport(None)
3157
return transport.get_bytes("format")
3158
except errors.NoSuchFile:
3159
raise errors.NoWorkingTree(base=transport.base)
3162
def find_format(klass, controldir):
3163
"""Return the format for the working tree object in controldir."""
3164
format_string = klass.find_format_string(controldir)
3165
return klass._find_format(format_registry, 'working tree',
3168
def check_support_status(self, allow_unsupported, recommend_upgrade=True,
3170
WorkingTreeFormat.check_support_status(self,
3171
allow_unsupported=allow_unsupported, recommend_upgrade=recommend_upgrade,
3173
bzrdir.BzrFormat.check_support_status(self, allow_unsupported=allow_unsupported,
3174
recommend_upgrade=recommend_upgrade, basedir=basedir)
3176
def get_controldir_for_branch(self):
3177
"""Get the control directory format for creating branches.
3179
This is to support testing of working tree formats that can not exist
3180
in the same control directory as a branch.
3182
return self._matchingbzrdir
3185
class WorkingTreeFormatMetaDir(bzrdir.BzrFormat, WorkingTreeFormat):
3186
"""Base class for working trees that live in bzr meta directories."""
3189
WorkingTreeFormat.__init__(self)
3190
bzrdir.BzrFormat.__init__(self)
3193
def find_format_string(klass, controldir):
3194
"""Return format name for the working tree object in controldir."""
3196
transport = controldir.get_workingtree_transport(None)
3197
return transport.get_bytes("format")
3198
except errors.NoSuchFile:
3199
raise errors.NoWorkingTree(base=transport.base)
3202
def find_format(klass, controldir):
3203
"""Return the format for the working tree object in controldir."""
3204
format_string = klass.find_format_string(controldir)
3205
return klass._find_format(format_registry, 'working tree',
3208
def check_support_status(self, allow_unsupported, recommend_upgrade=True,
3210
WorkingTreeFormat.check_support_status(self,
3211
allow_unsupported=allow_unsupported, recommend_upgrade=recommend_upgrade,
3213
bzrdir.BzrFormat.check_support_status(self, allow_unsupported=allow_unsupported,
3214
recommend_upgrade=recommend_upgrade, basedir=basedir)
3217
format_registry.register_lazy("Bazaar Working Tree Format 4 (bzr 0.15)\n",
3218
"bzrlib.workingtree_4", "WorkingTreeFormat4")
3219
format_registry.register_lazy("Bazaar Working Tree Format 5 (bzr 1.11)\n",
3220
"bzrlib.workingtree_4", "WorkingTreeFormat5")
3221
format_registry.register_lazy("Bazaar Working Tree Format 6 (bzr 1.14)\n",
3222
"bzrlib.workingtree_4", "WorkingTreeFormat6")
3223
format_registry.register_lazy("Bazaar-NG Working Tree format 3",
3224
"bzrlib.workingtree_3", "WorkingTreeFormat3")
3225
format_registry.set_default_key("Bazaar Working Tree Format 6 (bzr 1.14)\n")
1489
def register_format(klass, format):
1490
klass._formats[format.get_format_string()] = format
1493
def set_default_format(klass, format):
1494
klass._default_format = format
1497
def unregister_format(klass, format):
1498
assert klass._formats[format.get_format_string()] is format
1499
del klass._formats[format.get_format_string()]
1503
class WorkingTreeFormat2(WorkingTreeFormat):
1504
"""The second working tree format.
1506
This format modified the hash cache from the format 1 hash cache.
1509
def get_format_description(self):
1510
"""See WorkingTreeFormat.get_format_description()."""
1511
return "Working tree format 2"
1513
def stub_initialize_remote(self, control_files):
1514
"""As a special workaround create critical control files for a remote working tree
1516
This ensures that it can later be updated and dealt with locally,
1517
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
1518
no working tree. (See bug #43064).
1522
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1524
control_files.put('inventory', sio)
1526
control_files.put_utf8('pending-merges', '')
1529
def initialize(self, a_bzrdir, revision_id=None):
1530
"""See WorkingTreeFormat.initialize()."""
1531
if not isinstance(a_bzrdir.transport, LocalTransport):
1532
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1533
branch = a_bzrdir.open_branch()
1534
if revision_id is not None:
1537
revision_history = branch.revision_history()
1539
position = revision_history.index(revision_id)
1541
raise errors.NoSuchRevision(branch, revision_id)
1542
branch.set_revision_history(revision_history[:position + 1])
1545
revision = branch.last_revision()
1547
wt = WorkingTree(a_bzrdir.root_transport.base,
1553
wt._write_inventory(inv)
1554
wt.set_root_id(inv.root.file_id)
1555
wt.set_last_revision(revision)
1556
wt.set_pending_merges([])
1557
build_tree(wt.basis_tree(), wt)
1561
super(WorkingTreeFormat2, self).__init__()
1562
self._matchingbzrdir = bzrdir.BzrDirFormat6()
1564
def open(self, a_bzrdir, _found=False):
1565
"""Return the WorkingTree object for a_bzrdir
1567
_found is a private parameter, do not use it. It is used to indicate
1568
if format probing has already been done.
1571
# we are being called directly and must probe.
1572
raise NotImplementedError
1573
if not isinstance(a_bzrdir.transport, LocalTransport):
1574
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1575
return WorkingTree(a_bzrdir.root_transport.base,
1581
class WorkingTreeFormat3(WorkingTreeFormat):
1582
"""The second working tree format updated to record a format marker.
1585
- exists within a metadir controlling .bzr
1586
- includes an explicit version marker for the workingtree control
1587
files, separate from the BzrDir format
1588
- modifies the hash cache format
1590
- uses a LockDir to guard access to the repository
1593
def get_format_string(self):
1594
"""See WorkingTreeFormat.get_format_string()."""
1595
return "Bazaar-NG Working Tree format 3"
1597
def get_format_description(self):
1598
"""See WorkingTreeFormat.get_format_description()."""
1599
return "Working tree format 3"
1601
_lock_file_name = 'lock'
1602
_lock_class = LockDir
1604
def _open_control_files(self, a_bzrdir):
1605
transport = a_bzrdir.get_workingtree_transport(None)
1606
return LockableFiles(transport, self._lock_file_name,
1609
def initialize(self, a_bzrdir, revision_id=None):
1610
"""See WorkingTreeFormat.initialize().
1612
revision_id allows creating a working tree at a differnet
1613
revision than the branch is at.
1615
if not isinstance(a_bzrdir.transport, LocalTransport):
1616
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1617
transport = a_bzrdir.get_workingtree_transport(self)
1618
control_files = self._open_control_files(a_bzrdir)
1619
control_files.create_lock()
1620
control_files.lock_write()
1621
control_files.put_utf8('format', self.get_format_string())
1622
branch = a_bzrdir.open_branch()
1623
if revision_id is None:
1624
revision_id = branch.last_revision()
1626
wt = WorkingTree3(a_bzrdir.root_transport.base,
1632
_control_files=control_files)
1635
wt._write_inventory(inv)
1636
wt.set_root_id(inv.root.file_id)
1637
wt.set_last_revision(revision_id)
1638
wt.set_pending_merges([])
1639
build_tree(wt.basis_tree(), wt)
1642
control_files.unlock()
1646
super(WorkingTreeFormat3, self).__init__()
1647
self._matchingbzrdir = bzrdir.BzrDirMetaFormat1()
1649
def open(self, a_bzrdir, _found=False):
1650
"""Return the WorkingTree object for a_bzrdir
1652
_found is a private parameter, do not use it. It is used to indicate
1653
if format probing has already been done.
1656
# we are being called directly and must probe.
1657
raise NotImplementedError
1658
if not isinstance(a_bzrdir.transport, LocalTransport):
1659
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1660
control_files = self._open_control_files(a_bzrdir)
1661
return WorkingTree3(a_bzrdir.root_transport.base,
1665
_control_files=control_files)
1668
return self.get_format_string()
1671
# formats which have no format string are not discoverable
1672
# and not independently creatable, so are not registered.
1673
__default_format = WorkingTreeFormat3()
1674
WorkingTreeFormat.register_format(__default_format)
1675
WorkingTreeFormat.set_default_format(__default_format)
1676
_legacy_formats = [WorkingTreeFormat2(),
1680
class WorkingTreeTestProviderAdapter(object):
1681
"""A tool to generate a suite testing multiple workingtree formats at once.
1683
This is done by copying the test once for each transport and injecting
1684
the transport_server, transport_readonly_server, and workingtree_format
1685
classes into each copy. Each copy is also given a new id() to make it
1689
def __init__(self, transport_server, transport_readonly_server, formats):
1690
self._transport_server = transport_server
1691
self._transport_readonly_server = transport_readonly_server
1692
self._formats = formats
1694
def adapt(self, test):
1695
from bzrlib.tests import TestSuite
1696
result = TestSuite()
1697
for workingtree_format, bzrdir_format in self._formats:
1698
new_test = deepcopy(test)
1699
new_test.transport_server = self._transport_server
1700
new_test.transport_readonly_server = self._transport_readonly_server
1701
new_test.bzrdir_format = bzrdir_format
1702
new_test.workingtree_format = workingtree_format
1703
def make_new_test_id():
1704
new_id = "%s(%s)" % (new_test.id(), workingtree_format.__class__.__name__)
1705
return lambda: new_id
1706
new_test.id = make_new_test_id()
1707
result.addTest(new_test)