23
25
from inventory import Inventory
24
26
from trace import mutter, note
25
from tree import Tree, EmptyTree, RevisionTree
27
from tree import Tree, EmptyTree, RevisionTree, WorkingTree
26
28
from inventory import InventoryEntry, Inventory
27
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
28
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
29
joinpath, sha_file, sha_string, file_kind, local_time_offset, appendpath
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
30
32
from store import ImmutableStore
31
33
from revision import Revision
32
from errors import BzrError
34
from errors import bailout, BzrError
33
35
from textui import show_status
36
from diff import diff_trees
35
38
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
36
39
## TODO: Maybe include checks for common corruption of newlines, etc?
40
def find_branch(f, **args):
41
if f and (f.startswith('http://') or f.startswith('https://')):
43
return remotebranch.RemoteBranch(f, **args)
45
return Branch(f, **args)
49
def _relpath(base, path):
50
"""Return path relative to base, or raise exception.
52
The path may be either an absolute path or a path relative to the
53
current working directory.
55
Lifted out of Branch.relpath for ease of testing.
57
os.path.commonprefix (python2.4) has a bad bug that it works just
58
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
59
avoids that problem."""
60
rp = os.path.abspath(path)
64
while len(head) >= len(base):
67
head, tail = os.path.split(head)
71
from errors import NotBranchError
72
raise NotBranchError("path %r is not within branch %r" % (rp, base))
77
43
def find_branch_root(f=None):
78
44
"""Find the branch root enclosing f, or pwd.
80
f may be a filename or a URL.
82
46
It is not necessary that f exists.
84
48
Basically we keep looking up until we find the control directory or
184
119
__repr__ = __str__
188
if self._lock_mode or self._lock:
189
from warnings import warn
190
warn("branch %r was not explicitly unlocked" % self)
195
def lock_write(self):
197
if self._lock_mode != 'w':
198
from errors import LockError
199
raise LockError("can't upgrade to a write lock from %r" %
201
self._lock_count += 1
203
from bzrlib.lock import WriteLock
205
self._lock = WriteLock(self.controlfilename('branch-lock'))
206
self._lock_mode = 'w'
213
assert self._lock_mode in ('r', 'w'), \
214
"invalid lock mode %r" % self._lock_mode
215
self._lock_count += 1
217
from bzrlib.lock import ReadLock
219
self._lock = ReadLock(self.controlfilename('branch-lock'))
220
self._lock_mode = 'r'
226
if not self._lock_mode:
227
from errors import LockError
228
raise LockError('branch %r is not locked' % (self))
230
if self._lock_count > 1:
231
self._lock_count -= 1
235
self._lock_mode = self._lock_count = None
123
def lock(self, mode='w'):
124
"""Lock the on-disk branch, excluding other processes."""
130
om = os.O_WRONLY | os.O_CREAT
135
raise BzrError("invalid locking mode %r" % mode)
137
# XXX: Old branches might not have the lock file, and
138
# won't get one until someone does a write-mode command on
139
# them or creates it by hand.
141
lockfile = os.open(self.controlfilename('branch-lock'), om)
142
fcntl.lockf(lockfile, lm)
144
fcntl.lockf(lockfile, fcntl.LOCK_UN)
146
self._lockmode = None
148
self._lockmode = mode
150
warning("please write a locking method for platform %r" % sys.platform)
152
self._lockmode = None
154
self._lockmode = mode
157
def _need_readlock(self):
158
if self._lockmode not in ['r', 'w']:
159
raise BzrError('need read lock on branch, only have %r' % self._lockmode)
161
def _need_writelock(self):
162
if self._lockmode not in ['w']:
163
raise BzrError('need write lock on branch, only have %r' % self._lockmode)
238
166
def abspath(self, name):
366
291
This puts the files in the Added state, so that they will be
367
292
recorded by the next commit.
370
List of paths to add, relative to the base of the tree.
373
If set, use these instead of automatically generated ids.
374
Must be the same length as the list of files, but may
375
contain None for ids that are to be autogenerated.
377
294
TODO: Perhaps have an option to add the ids even if the files do
380
297
TODO: Perhaps return the ids of the files? But then again it
381
is easy to retrieve them if they're needed.
298
is easy to retrieve them if they're needed.
300
TODO: Option to specify file id.
383
302
TODO: Adding a directory should optionally recurse down and
384
add all non-ignored children. Perhaps do that in a
303
add all non-ignored children. Perhaps do that in a
306
>>> b = ScratchBranch(files=['foo'])
307
>>> 'foo' in b.unknowns()
312
>>> 'foo' in b.unknowns()
314
>>> bool(b.inventory.path2id('foo'))
320
Traceback (most recent call last):
322
BzrError: ('foo is already versioned', [])
324
>>> b.add(['nothere'])
325
Traceback (most recent call last):
326
BzrError: ('cannot add: not a regular file or directory: nothere', [])
328
self._need_writelock()
387
330
# TODO: Re-adding a file that is removed in the working copy
388
331
# should probably put it back with the previous ID.
389
332
if isinstance(files, types.StringTypes):
390
assert(ids is None or isinstance(ids, types.StringTypes))
396
ids = [None] * len(files)
398
assert(len(ids) == len(files))
402
inv = self.read_working_inventory()
403
for f,file_id in zip(files, ids):
404
if is_control_file(f):
405
raise BzrError("cannot add control file %s" % quotefn(f))
410
raise BzrError("cannot add top-level %r" % f)
412
fullpath = os.path.normpath(self.abspath(f))
415
kind = file_kind(fullpath)
417
# maybe something better?
418
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
420
if kind != 'file' and kind != 'directory':
421
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
424
file_id = gen_file_id(f)
425
inv.add_path(f, kind=kind, file_id=file_id)
428
print 'added', quotefn(f)
430
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
432
self._write_inventory(inv)
335
inv = self.read_working_inventory()
337
if is_control_file(f):
338
bailout("cannot add control file %s" % quotefn(f))
343
bailout("cannot add top-level %r" % f)
345
fullpath = os.path.normpath(self.abspath(f))
348
kind = file_kind(fullpath)
350
# maybe something better?
351
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
353
if kind != 'file' and kind != 'directory':
354
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
356
file_id = gen_file_id(f)
357
inv.add_path(f, kind=kind, file_id=file_id)
360
show_status('A', kind, quotefn(f))
362
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
364
self._write_inventory(inv)
437
367
def print_file(self, file, revno):
438
368
"""Print `file` to stdout."""
441
tree = self.revision_tree(self.lookup_revision(revno))
442
# use inventory as it was in that revision
443
file_id = tree.inventory.path2id(file)
445
raise BzrError("%r is not present in revision %d" % (file, revno))
446
tree.print_file(file_id)
369
self._need_readlock()
370
tree = self.revision_tree(self.lookup_revision(revno))
371
# use inventory as it was in that revision
372
file_id = tree.inventory.path2id(file)
374
bailout("%r is not present in revision %d" % (file, revno))
375
tree.print_file(file_id)
451
378
def remove(self, files, verbose=False):
452
379
"""Mark nominated files for removal from the inventory.
465
414
## TODO: Normalize names
466
415
## TODO: Remove nested loops; better scalability
416
self._need_writelock()
467
418
if isinstance(files, types.StringTypes):
473
tree = self.working_tree()
476
# do this before any modifications
480
raise BzrError("cannot remove unversioned file %s" % quotefn(f))
481
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
483
# having remove it, it must be either ignored or unknown
484
if tree.is_ignored(f):
488
show_status(new_status, inv[fid].kind, quotefn(f))
491
self._write_inventory(inv)
496
# FIXME: this doesn't need to be a branch method
497
def set_inventory(self, new_inventory_list):
499
for path, file_id, parent, kind in new_inventory_list:
500
name = os.path.basename(path)
503
inv.add(InventoryEntry(file_id, name, kind, parent))
421
tree = self.working_tree()
424
# do this before any modifications
428
bailout("cannot remove unversioned file %s" % quotefn(f))
429
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
431
# having remove it, it must be either ignored or unknown
432
if tree.is_ignored(f):
436
show_status(new_status, inv[fid].kind, quotefn(f))
504
439
self._write_inventory(inv)
523
458
return self.working_tree().unknowns()
461
def commit(self, message, timestamp=None, timezone=None,
464
"""Commit working copy as a new revision.
466
The basic approach is to add all the file texts into the
467
store, then the inventory, then make a new revision pointing
468
to that inventory and store that.
470
This is not quite safe if the working copy changes during the
471
commit; for the moment that is simply not allowed. A better
472
approach is to make a temporary copy of the files before
473
computing their hashes, and then add those hashes in turn to
474
the inventory. This should mean at least that there are no
475
broken hash pointers. There is no way we can get a snapshot
476
of the whole directory at an instant. This would also have to
477
be robust against files disappearing, moving, etc. So the
478
whole thing is a bit hard.
480
timestamp -- if not None, seconds-since-epoch for a
481
postdated/predated commit.
483
self._need_writelock()
485
## TODO: Show branch names
487
# TODO: Don't commit if there are no changes, unless forced?
489
# First walk over the working inventory; and both update that
490
# and also build a new revision inventory. The revision
491
# inventory needs to hold the text-id, sha1 and size of the
492
# actual file versions committed in the revision. (These are
493
# not present in the working inventory.) We also need to
494
# detect missing/deleted files, and remove them from the
497
work_inv = self.read_working_inventory()
499
basis = self.basis_tree()
500
basis_inv = basis.inventory
502
for path, entry in work_inv.iter_entries():
503
## TODO: Cope with files that have gone missing.
505
## TODO: Check that the file kind has not changed from the previous
506
## revision of this file (if any).
510
p = self.abspath(path)
511
file_id = entry.file_id
512
mutter('commit prep file %s, id %r ' % (p, file_id))
514
if not os.path.exists(p):
515
mutter(" file is missing, removing from inventory")
517
show_status('D', entry.kind, quotefn(path))
518
missing_ids.append(file_id)
521
# TODO: Handle files that have been deleted
523
# TODO: Maybe a special case for empty files? Seems a
524
# waste to store them many times.
528
if basis_inv.has_id(file_id):
529
old_kind = basis_inv[file_id].kind
530
if old_kind != entry.kind:
531
bailout("entry %r changed kind from %r to %r"
532
% (file_id, old_kind, entry.kind))
534
if entry.kind == 'directory':
536
bailout("%s is entered as directory but not a directory" % quotefn(p))
537
elif entry.kind == 'file':
539
bailout("%s is entered as file but is not a file" % quotefn(p))
541
content = file(p, 'rb').read()
543
entry.text_sha1 = sha_string(content)
544
entry.text_size = len(content)
546
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
548
and (old_ie.text_size == entry.text_size)
549
and (old_ie.text_sha1 == entry.text_sha1)):
550
## assert content == basis.get_file(file_id).read()
551
entry.text_id = basis_inv[file_id].text_id
552
mutter(' unchanged from previous text_id {%s}' %
556
entry.text_id = gen_file_id(entry.name)
557
self.text_store.add(content, entry.text_id)
558
mutter(' stored with text_id {%s}' % entry.text_id)
562
elif (old_ie.name == entry.name
563
and old_ie.parent_id == entry.parent_id):
568
show_status(state, entry.kind, quotefn(path))
570
for file_id in missing_ids:
571
# have to do this later so we don't mess up the iterator.
572
# since parents may be removed before their children we
575
# FIXME: There's probably a better way to do this; perhaps
576
# the workingtree should know how to filter itself.
577
if work_inv.has_id(file_id):
578
del work_inv[file_id]
581
inv_id = rev_id = _gen_revision_id(time.time())
583
inv_tmp = tempfile.TemporaryFile()
584
inv.write_xml(inv_tmp)
586
self.inventory_store.add(inv_tmp, inv_id)
587
mutter('new inventory_id is {%s}' % inv_id)
589
self._write_inventory(work_inv)
591
if timestamp == None:
592
timestamp = time.time()
594
if committer == None:
595
committer = username()
598
timezone = local_time_offset()
600
mutter("building commit log message")
601
rev = Revision(timestamp=timestamp,
604
precursor = self.last_patch(),
609
rev_tmp = tempfile.TemporaryFile()
610
rev.write_xml(rev_tmp)
612
self.revision_store.add(rev_tmp, rev_id)
613
mutter("new revision_id is {%s}" % rev_id)
615
## XXX: Everything up to here can simply be orphaned if we abort
616
## the commit; it will leave junk files behind but that doesn't
619
## TODO: Read back the just-generated changeset, and make sure it
620
## applies and recreates the right state.
622
## TODO: Also calculate and store the inventory SHA1
623
mutter("committing patch r%d" % (self.revno() + 1))
626
self.append_revision(rev_id)
629
note("commited r%d" % self.revno())
526
632
def append_revision(self, revision_id):
527
from bzrlib.atomicfile import AtomicFile
529
633
mutter("add {%s} to revision-history" % revision_id)
530
rev_history = self.revision_history() + [revision_id]
532
f = AtomicFile(self.controlfilename('revision-history'))
534
for rev_id in rev_history:
634
rev_history = self.revision_history()
636
tmprhname = self.controlfilename('revision-history.tmp')
637
rhname = self.controlfilename('revision-history')
639
f = file(tmprhname, 'wt')
640
rev_history.append(revision_id)
641
f.write('\n'.join(rev_history))
645
if sys.platform == 'win32':
647
os.rename(tmprhname, rhname)
541
651
def get_revision(self, revision_id):
542
652
"""Return the Revision object for a named revision"""
543
if not revision_id or not isinstance(revision_id, basestring):
544
raise ValueError('invalid revision-id: %r' % revision_id)
653
self._need_readlock()
545
654
r = Revision.read_xml(self.revision_store[revision_id])
546
655
assert r.revision_id == revision_id
549
def get_revision_sha1(self, revision_id):
550
"""Hash the stored value of a revision, and return it."""
551
# In the future, revision entries will be signed. At that
552
# point, it is probably best *not* to include the signature
553
# in the revision hash. Because that lets you re-sign
554
# the revision, (add signatures/remove signatures) and still
555
# have all hash pointers stay consistent.
556
# But for now, just hash the contents.
557
return sha_file(self.revision_store[revision_id])
560
659
def get_inventory(self, inventory_id):
561
660
"""Get Inventory object by hash.
586
682
>>> ScratchBranch().revision_history()
591
return [l.rstrip('\r\n') for l in
592
self.controlfile('revision-history', 'r').readlines()]
597
def common_ancestor(self, other, self_revno=None, other_revno=None):
600
>>> sb = ScratchBranch(files=['foo', 'foo~'])
601
>>> sb.common_ancestor(sb) == (None, None)
603
>>> commit.commit(sb, "Committing first revision", verbose=False)
604
>>> sb.common_ancestor(sb)[0]
606
>>> clone = sb.clone()
607
>>> commit.commit(sb, "Committing second revision", verbose=False)
608
>>> sb.common_ancestor(sb)[0]
610
>>> sb.common_ancestor(clone)[0]
612
>>> commit.commit(clone, "Committing divergent second revision",
614
>>> sb.common_ancestor(clone)[0]
616
>>> sb.common_ancestor(clone) == clone.common_ancestor(sb)
618
>>> sb.common_ancestor(sb) != clone.common_ancestor(clone)
620
>>> clone2 = sb.clone()
621
>>> sb.common_ancestor(clone2)[0]
623
>>> sb.common_ancestor(clone2, self_revno=1)[0]
625
>>> sb.common_ancestor(clone2, other_revno=1)[0]
628
my_history = self.revision_history()
629
other_history = other.revision_history()
630
if self_revno is None:
631
self_revno = len(my_history)
632
if other_revno is None:
633
other_revno = len(other_history)
634
indices = range(min((self_revno, other_revno)))
637
if my_history[r] == other_history[r]:
638
return r+1, my_history[r]
641
def enum_history(self, direction):
642
"""Return (revno, revision_id) for history of branch.
645
'forward' is from earliest to latest
646
'reverse' is from latest to earliest
648
rh = self.revision_history()
649
if direction == 'forward':
654
elif direction == 'reverse':
660
raise ValueError('invalid history direction', direction)
685
self._need_readlock()
686
return [l.rstrip('\r\n') for l in self.controlfile('revision-history', 'r').readlines()]
666
692
That is equivalent to the number of revisions committed to
695
>>> b = ScratchBranch()
698
>>> b.commit('no foo')
669
702
return len(self.revision_history())
672
705
def last_patch(self):
673
706
"""Return last patch hash, or None if no history.
708
>>> ScratchBranch().last_patch() == None
675
711
ph = self.revision_history()
682
def missing_revisions(self, other, stop_revision=None):
684
If self and other have not diverged, return a list of the revisions
685
present in other, but missing from self.
687
>>> from bzrlib.commit import commit
688
>>> bzrlib.trace.silent = True
689
>>> br1 = ScratchBranch()
690
>>> br2 = ScratchBranch()
691
>>> br1.missing_revisions(br2)
693
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
694
>>> br1.missing_revisions(br2)
696
>>> br2.missing_revisions(br1)
698
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
699
>>> br1.missing_revisions(br2)
701
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
702
>>> br1.missing_revisions(br2)
704
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
705
>>> br1.missing_revisions(br2)
706
Traceback (most recent call last):
707
DivergedBranches: These branches have diverged.
709
self_history = self.revision_history()
710
self_len = len(self_history)
711
other_history = other.revision_history()
712
other_len = len(other_history)
713
common_index = min(self_len, other_len) -1
714
if common_index >= 0 and \
715
self_history[common_index] != other_history[common_index]:
716
raise DivergedBranches(self, other)
718
if stop_revision is None:
719
stop_revision = other_len
720
elif stop_revision > other_len:
721
raise NoSuchRevision(self, stop_revision)
723
return other_history[self_len:stop_revision]
726
def update_revisions(self, other, stop_revision=None):
727
"""Pull in all new revisions from other branch.
729
>>> from bzrlib.commit import commit
730
>>> bzrlib.trace.silent = True
731
>>> br1 = ScratchBranch(files=['foo', 'bar'])
734
>>> commit(br1, "lala!", rev_id="REVISION-ID-1", verbose=False)
735
>>> br2 = ScratchBranch()
736
>>> br2.update_revisions(br1)
740
>>> br2.revision_history()
742
>>> br2.update_revisions(br1)
746
>>> br1.text_store.total_size() == br2.text_store.total_size()
749
from bzrlib.progress import ProgressBar
753
pb.update('comparing histories')
754
revision_ids = self.missing_revisions(other, stop_revision)
756
needed_texts = sets.Set()
758
for rev_id in revision_ids:
760
pb.update('fetching revision', i, len(revision_ids))
761
rev = other.get_revision(rev_id)
762
revisions.append(rev)
763
inv = other.get_inventory(str(rev.inventory_id))
764
for key, entry in inv.iter_entries():
765
if entry.text_id is None:
767
if entry.text_id not in self.text_store:
768
needed_texts.add(entry.text_id)
772
count = self.text_store.copy_multi(other.text_store, needed_texts)
773
print "Added %d texts." % count
774
inventory_ids = [ f.inventory_id for f in revisions ]
775
count = self.inventory_store.copy_multi(other.inventory_store,
777
print "Added %d inventories." % count
778
revision_ids = [ f.revision_id for f in revisions]
779
count = self.revision_store.copy_multi(other.revision_store,
781
for revision_id in revision_ids:
782
self.append_revision(revision_id)
783
print "Added %d revisions." % count
786
def commit(self, *args, **kw):
787
from bzrlib.commit import commit
788
commit(self, *args, **kw)
791
718
def lookup_revision(self, revno):
771
def write_log(self, show_timezone='original', verbose=False):
772
"""Write out human-readable log of commits to this branch
774
utc -- If true, show dates in universal time, not local time."""
775
self._need_readlock()
776
## TODO: Option to choose either original, utc or local timezone
779
for p in self.revision_history():
781
print 'revno:', revno
782
## TODO: Show hash if --id is given.
783
##print 'revision-hash:', p
784
rev = self.get_revision(p)
785
print 'committer:', rev.committer
786
print 'timestamp: %s' % (format_date(rev.timestamp, rev.timezone or 0,
789
## opportunistic consistency check, same as check_patch_chaining
790
if rev.precursor != precursor:
791
bailout("mismatched precursor!")
795
print ' (no message)'
797
for l in rev.message.split('\n'):
800
if verbose == True and precursor != None:
801
print 'changed files:'
802
tree = self.revision_tree(p)
803
prevtree = self.revision_tree(precursor)
805
for file_state, fid, old_name, new_name, kind in \
806
diff_trees(prevtree, tree, ):
807
if file_state == 'A' or file_state == 'M':
808
show_status(file_state, kind, new_name)
809
elif file_state == 'D':
810
show_status(file_state, kind, old_name)
811
elif file_state == 'R':
812
show_status(file_state, kind,
813
old_name + ' => ' + new_name)
836
819
def rename_one(self, from_rel, to_rel):
837
820
"""Rename one file.
839
822
This can change the directory or the filename or both.
824
self._need_writelock()
825
tree = self.working_tree()
827
if not tree.has_filename(from_rel):
828
bailout("can't rename: old working file %r does not exist" % from_rel)
829
if tree.has_filename(to_rel):
830
bailout("can't rename: new working file %r already exists" % to_rel)
832
file_id = inv.path2id(from_rel)
834
bailout("can't rename: old name %r is not versioned" % from_rel)
836
if inv.path2id(to_rel):
837
bailout("can't rename: new name %r is already versioned" % to_rel)
839
to_dir, to_tail = os.path.split(to_rel)
840
to_dir_id = inv.path2id(to_dir)
841
if to_dir_id == None and to_dir != '':
842
bailout("can't determine destination directory id for %r" % to_dir)
844
mutter("rename_one:")
845
mutter(" file_id {%s}" % file_id)
846
mutter(" from_rel %r" % from_rel)
847
mutter(" to_rel %r" % to_rel)
848
mutter(" to_dir %r" % to_dir)
849
mutter(" to_dir_id {%s}" % to_dir_id)
851
inv.rename(file_id, to_dir_id, to_tail)
853
print "%s => %s" % (from_rel, to_rel)
855
from_abs = self.abspath(from_rel)
856
to_abs = self.abspath(to_rel)
843
tree = self.working_tree()
845
if not tree.has_filename(from_rel):
846
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
847
if tree.has_filename(to_rel):
848
raise BzrError("can't rename: new working file %r already exists" % to_rel)
850
file_id = inv.path2id(from_rel)
852
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
854
if inv.path2id(to_rel):
855
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
857
to_dir, to_tail = os.path.split(to_rel)
858
to_dir_id = inv.path2id(to_dir)
859
if to_dir_id == None and to_dir != '':
860
raise BzrError("can't determine destination directory id for %r" % to_dir)
862
mutter("rename_one:")
863
mutter(" file_id {%s}" % file_id)
864
mutter(" from_rel %r" % from_rel)
865
mutter(" to_rel %r" % to_rel)
866
mutter(" to_dir %r" % to_dir)
867
mutter(" to_dir_id {%s}" % to_dir_id)
869
inv.rename(file_id, to_dir_id, to_tail)
871
print "%s => %s" % (from_rel, to_rel)
873
from_abs = self.abspath(from_rel)
874
to_abs = self.abspath(to_rel)
876
os.rename(from_abs, to_abs)
878
raise BzrError("failed to rename %r to %r: %s"
879
% (from_abs, to_abs, e[1]),
880
["rename rolled back"])
882
self._write_inventory(inv)
858
os.rename(from_abs, to_abs)
860
bailout("failed to rename %r to %r: %s"
861
% (from_abs, to_abs, e[1]),
862
["rename rolled back"])
864
self._write_inventory(inv)
887
868
def move(self, from_paths, to_name):
895
876
Note that to_name is only the last component of the new name;
896
877
this doesn't change the directory.
900
## TODO: Option to move IDs only
901
assert not isinstance(from_paths, basestring)
902
tree = self.working_tree()
904
to_abs = self.abspath(to_name)
905
if not isdir(to_abs):
906
raise BzrError("destination %r is not a directory" % to_abs)
907
if not tree.has_filename(to_name):
908
raise BzrError("destination %r not in working directory" % to_abs)
909
to_dir_id = inv.path2id(to_name)
910
if to_dir_id == None and to_name != '':
911
raise BzrError("destination %r is not a versioned directory" % to_name)
912
to_dir_ie = inv[to_dir_id]
913
if to_dir_ie.kind not in ('directory', 'root_directory'):
914
raise BzrError("destination %r is not a directory" % to_abs)
916
to_idpath = inv.get_idpath(to_dir_id)
919
if not tree.has_filename(f):
920
raise BzrError("%r does not exist in working tree" % f)
921
f_id = inv.path2id(f)
923
raise BzrError("%r is not versioned" % f)
924
name_tail = splitpath(f)[-1]
925
dest_path = appendpath(to_name, name_tail)
926
if tree.has_filename(dest_path):
927
raise BzrError("destination %r already exists" % dest_path)
928
if f_id in to_idpath:
929
raise BzrError("can't move %r to a subdirectory of itself" % f)
931
# OK, so there's a race here, it's possible that someone will
932
# create a file in this interval and then the rename might be
933
# left half-done. But we should have caught most problems.
936
name_tail = splitpath(f)[-1]
937
dest_path = appendpath(to_name, name_tail)
938
print "%s => %s" % (f, dest_path)
939
inv.rename(inv.path2id(f), to_dir_id, name_tail)
941
os.rename(self.abspath(f), self.abspath(dest_path))
943
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
944
["rename rolled back"])
946
self._write_inventory(inv)
951
def revert(self, filenames, old_tree=None, backups=True):
952
"""Restore selected files to the versions from a previous tree.
955
If true (default) backups are made of files before
958
from bzrlib.errors import NotVersionedError, BzrError
959
from bzrlib.atomicfile import AtomicFile
960
from bzrlib.osutils import backup_file
879
self._need_writelock()
880
## TODO: Option to move IDs only
881
assert not isinstance(from_paths, basestring)
882
tree = self.working_tree()
884
to_abs = self.abspath(to_name)
885
if not isdir(to_abs):
886
bailout("destination %r is not a directory" % to_abs)
887
if not tree.has_filename(to_name):
888
bailout("destination %r not in working directory" % to_abs)
889
to_dir_id = inv.path2id(to_name)
890
if to_dir_id == None and to_name != '':
891
bailout("destination %r is not a versioned directory" % to_name)
892
to_dir_ie = inv[to_dir_id]
893
if to_dir_ie.kind not in ('directory', 'root_directory'):
894
bailout("destination %r is not a directory" % to_abs)
896
to_idpath = Set(inv.get_idpath(to_dir_id))
899
if not tree.has_filename(f):
900
bailout("%r does not exist in working tree" % f)
901
f_id = inv.path2id(f)
903
bailout("%r is not versioned" % f)
904
name_tail = splitpath(f)[-1]
905
dest_path = appendpath(to_name, name_tail)
906
if tree.has_filename(dest_path):
907
bailout("destination %r already exists" % dest_path)
908
if f_id in to_idpath:
909
bailout("can't move %r to a subdirectory of itself" % f)
911
# OK, so there's a race here, it's possible that someone will
912
# create a file in this interval and then the rename might be
913
# left half-done. But we should have caught most problems.
916
name_tail = splitpath(f)[-1]
917
dest_path = appendpath(to_name, name_tail)
918
print "%s => %s" % (f, dest_path)
919
inv.rename(inv.path2id(f), to_dir_id, name_tail)
921
os.rename(self.abspath(f), self.abspath(dest_path))
923
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
924
["rename rolled back"])
926
self._write_inventory(inv)
930
def show_status(self, show_all=False):
931
"""Display single-line status for non-ignored working files.
933
The list is show sorted in order by file name.
935
>>> b = ScratchBranch(files=['foo', 'foo~'])
941
>>> b.commit("add foo")
943
>>> os.unlink(b.abspath('foo'))
962
inv = self.read_working_inventory()
964
old_tree = self.basis_tree()
965
old_inv = old_tree.inventory
969
file_id = inv.path2id(fn)
971
raise NotVersionedError("not a versioned file", fn)
972
if not old_inv.has_id(file_id):
973
raise BzrError("file not present in old tree", fn, file_id)
974
nids.append((fn, file_id))
976
# TODO: Rename back if it was previously at a different location
978
# TODO: If given a directory, restore the entire contents from
979
# the previous version.
981
# TODO: Make a backup to a temporary file.
983
# TODO: If the file previously didn't exist, delete it?
984
for fn, file_id in nids:
987
f = AtomicFile(fn, 'wb')
989
f.write(old_tree.get_file(file_id).read())
947
TODO: Get state for single files.
949
self._need_readlock()
951
# We have to build everything into a list first so that it can
952
# sorted by name, incorporating all the different sources.
954
# FIXME: Rather than getting things in random order and then sorting,
955
# just step through in order.
957
# Interesting case: the old ID for a file has been removed,
958
# but a new file has been created under that name.
960
old = self.basis_tree()
961
new = self.working_tree()
963
for fs, fid, oldname, newname, kind in diff_trees(old, new):
965
show_status(fs, kind,
966
oldname + ' => ' + newname)
967
elif fs == 'A' or fs == 'M':
968
show_status(fs, kind, newname)
970
show_status(fs, kind, oldname)
973
show_status(fs, kind, newname)
976
show_status(fs, kind, newname)
978
show_status(fs, kind, newname)
980
bailout("weird file state %r" % ((fs, fid),))
996
984
class ScratchBranch(Branch):