26
28
from inventory import InventoryEntry, Inventory
27
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
28
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
29
joinpath, sha_file, sha_string, file_kind, local_time_offset, appendpath
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
30
32
from store import ImmutableStore
31
33
from revision import Revision
32
from errors import BzrError
34
from errors import bailout, BzrError
33
35
from textui import show_status
36
from diff import diff_trees
35
38
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
36
39
## TODO: Maybe include checks for common corruption of newlines, etc?
104
78
raise BzrError('%r is not in a branch' % orig_f)
107
class DivergedBranches(Exception):
108
def __init__(self, branch1, branch2):
109
self.branch1 = branch1
110
self.branch2 = branch2
111
Exception.__init__(self, "These branches have diverged.")
114
class NoSuchRevision(BzrError):
115
def __init__(self, branch, revision):
117
self.revision = revision
118
msg = "Branch %s has no revision %d" % (branch, revision)
119
BzrError.__init__(self, msg)
122
83
######################################################################
125
class Branch(object):
126
87
"""Branch holding a history of revisions.
129
90
Base directory of the branch.
135
If _lock_mode is true, a positive count of the number of times the
139
Lock object from bzrlib.lock.
146
def __init__(self, base, init=False, find_root=True):
94
def __init__(self, base, init=False, find_root=True, lock_mode='w'):
147
95
"""Create new branch object at a particular location.
149
97
base -- Base directory for the branch.
184
132
__repr__ = __str__
188
if self._lock_mode or self._lock:
189
from warnings import warn
190
warn("branch %r was not explicitly unlocked" % self)
195
def lock_write(self):
197
if self._lock_mode != 'w':
198
from errors import LockError
199
raise LockError("can't upgrade to a write lock from %r" %
201
self._lock_count += 1
203
from bzrlib.lock import WriteLock
205
self._lock = WriteLock(self.controlfilename('branch-lock'))
206
self._lock_mode = 'w'
213
assert self._lock_mode in ('r', 'w'), \
214
"invalid lock mode %r" % self._lock_mode
215
self._lock_count += 1
217
from bzrlib.lock import ReadLock
219
self._lock = ReadLock(self.controlfilename('branch-lock'))
220
self._lock_mode = 'r'
136
def lock(self, mode='w'):
137
"""Lock the on-disk branch, excluding other processes."""
143
om = os.O_WRONLY | os.O_CREAT
148
raise BzrError("invalid locking mode %r" % mode)
151
lockfile = os.open(self.controlfilename('branch-lock'), om)
153
if e.errno == errno.ENOENT:
154
# might not exist on branches from <0.0.4
155
self.controlfile('branch-lock', 'w').close()
156
lockfile = os.open(self.controlfilename('branch-lock'), om)
226
if not self._lock_mode:
227
from errors import LockError
228
raise LockError('branch %r is not locked' % (self))
230
if self._lock_count > 1:
231
self._lock_count -= 1
235
self._lock_mode = self._lock_count = None
160
fcntl.lockf(lockfile, lm)
162
fcntl.lockf(lockfile, fcntl.LOCK_UN)
164
self._lockmode = None
166
self._lockmode = mode
168
warning("please write a locking method for platform %r" % sys.platform)
170
self._lockmode = None
172
self._lockmode = mode
175
def _need_readlock(self):
176
if self._lockmode not in ['r', 'w']:
177
raise BzrError('need read lock on branch, only have %r' % self._lockmode)
179
def _need_writelock(self):
180
if self._lockmode not in ['w']:
181
raise BzrError('need write lock on branch, only have %r' % self._lockmode)
238
184
def abspath(self, name):
311
263
fmt = self.controlfile('branch-format', 'r').read()
312
264
fmt.replace('\r\n', '')
313
265
if fmt != BZR_BRANCH_FORMAT:
314
raise BzrError('sorry, branch format %r not supported' % fmt,
315
['use a different bzr version',
316
'or remove the .bzr directory and "bzr init" again'])
266
bailout('sorry, branch format %r not supported' % fmt,
267
['use a different bzr version',
268
'or remove the .bzr directory and "bzr init" again'])
320
271
def read_working_inventory(self):
321
272
"""Read the working inventory."""
273
self._need_readlock()
322
274
before = time.time()
323
275
# ElementTree does its own conversion from UTF-8, so open in
327
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
328
mutter("loaded inventory of %d items in %f"
329
% (len(inv), time.time() - before))
277
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
278
mutter("loaded inventory of %d items in %f"
279
% (len(inv), time.time() - before))
335
283
def _write_inventory(self, inv):
336
284
"""Update the working inventory.
363
312
This puts the files in the Added state, so that they will be
364
313
recorded by the next commit.
367
List of paths to add, relative to the base of the tree.
370
If set, use these instead of automatically generated ids.
371
Must be the same length as the list of files, but may
372
contain None for ids that are to be autogenerated.
374
315
TODO: Perhaps have an option to add the ids even if the files do
377
318
TODO: Perhaps return the ids of the files? But then again it
378
is easy to retrieve them if they're needed.
319
is easy to retrieve them if they're needed.
321
TODO: Option to specify file id.
380
323
TODO: Adding a directory should optionally recurse down and
381
add all non-ignored children. Perhaps do that in a
324
add all non-ignored children. Perhaps do that in a
327
>>> b = ScratchBranch(files=['foo'])
328
>>> 'foo' in b.unknowns()
333
>>> 'foo' in b.unknowns()
335
>>> bool(b.inventory.path2id('foo'))
341
Traceback (most recent call last):
343
BzrError: ('foo is already versioned', [])
345
>>> b.add(['nothere'])
346
Traceback (most recent call last):
347
BzrError: ('cannot add: not a regular file or directory: nothere', [])
349
self._need_writelock()
384
351
# TODO: Re-adding a file that is removed in the working copy
385
352
# should probably put it back with the previous ID.
386
353
if isinstance(files, types.StringTypes):
387
assert(ids is None or isinstance(ids, types.StringTypes))
393
ids = [None] * len(files)
395
assert(len(ids) == len(files))
399
inv = self.read_working_inventory()
400
for f,file_id in zip(files, ids):
401
if is_control_file(f):
402
raise BzrError("cannot add control file %s" % quotefn(f))
407
raise BzrError("cannot add top-level %r" % f)
409
fullpath = os.path.normpath(self.abspath(f))
412
kind = file_kind(fullpath)
414
# maybe something better?
415
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
417
if kind != 'file' and kind != 'directory':
418
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
421
file_id = gen_file_id(f)
422
inv.add_path(f, kind=kind, file_id=file_id)
425
show_status('A', kind, quotefn(f))
427
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
429
self._write_inventory(inv)
356
inv = self.read_working_inventory()
358
if is_control_file(f):
359
bailout("cannot add control file %s" % quotefn(f))
364
bailout("cannot add top-level %r" % f)
366
fullpath = os.path.normpath(self.abspath(f))
369
kind = file_kind(fullpath)
371
# maybe something better?
372
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
374
if kind != 'file' and kind != 'directory':
375
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
377
file_id = gen_file_id(f)
378
inv.add_path(f, kind=kind, file_id=file_id)
381
show_status('A', kind, quotefn(f))
383
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
385
self._write_inventory(inv)
434
388
def print_file(self, file, revno):
435
389
"""Print `file` to stdout."""
438
tree = self.revision_tree(self.lookup_revision(revno))
439
# use inventory as it was in that revision
440
file_id = tree.inventory.path2id(file)
442
raise BzrError("%r is not present in revision %d" % (file, revno))
443
tree.print_file(file_id)
390
self._need_readlock()
391
tree = self.revision_tree(self.lookup_revision(revno))
392
# use inventory as it was in that revision
393
file_id = tree.inventory.path2id(file)
395
bailout("%r is not present in revision %d" % (file, revno))
396
tree.print_file(file_id)
448
399
def remove(self, files, verbose=False):
449
400
"""Mark nominated files for removal from the inventory.
462
435
## TODO: Normalize names
463
436
## TODO: Remove nested loops; better scalability
437
self._need_writelock()
464
439
if isinstance(files, types.StringTypes):
470
tree = self.working_tree()
473
# do this before any modifications
477
raise BzrError("cannot remove unversioned file %s" % quotefn(f))
478
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
480
# having remove it, it must be either ignored or unknown
481
if tree.is_ignored(f):
485
show_status(new_status, inv[fid].kind, quotefn(f))
488
self._write_inventory(inv)
493
# FIXME: this doesn't need to be a branch method
494
def set_inventory(self, new_inventory_list):
496
for path, file_id, parent, kind in new_inventory_list:
497
name = os.path.basename(path)
500
inv.add(InventoryEntry(file_id, name, kind, parent))
442
tree = self.working_tree()
445
# do this before any modifications
449
bailout("cannot remove unversioned file %s" % quotefn(f))
450
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
452
# having remove it, it must be either ignored or unknown
453
if tree.is_ignored(f):
457
show_status(new_status, inv[fid].kind, quotefn(f))
501
460
self._write_inventory(inv)
520
479
return self.working_tree().unknowns()
482
def commit(self, message, timestamp=None, timezone=None,
485
"""Commit working copy as a new revision.
487
The basic approach is to add all the file texts into the
488
store, then the inventory, then make a new revision pointing
489
to that inventory and store that.
491
This is not quite safe if the working copy changes during the
492
commit; for the moment that is simply not allowed. A better
493
approach is to make a temporary copy of the files before
494
computing their hashes, and then add those hashes in turn to
495
the inventory. This should mean at least that there are no
496
broken hash pointers. There is no way we can get a snapshot
497
of the whole directory at an instant. This would also have to
498
be robust against files disappearing, moving, etc. So the
499
whole thing is a bit hard.
501
timestamp -- if not None, seconds-since-epoch for a
502
postdated/predated commit.
504
self._need_writelock()
506
## TODO: Show branch names
508
# TODO: Don't commit if there are no changes, unless forced?
510
# First walk over the working inventory; and both update that
511
# and also build a new revision inventory. The revision
512
# inventory needs to hold the text-id, sha1 and size of the
513
# actual file versions committed in the revision. (These are
514
# not present in the working inventory.) We also need to
515
# detect missing/deleted files, and remove them from the
518
work_inv = self.read_working_inventory()
520
basis = self.basis_tree()
521
basis_inv = basis.inventory
523
for path, entry in work_inv.iter_entries():
524
## TODO: Cope with files that have gone missing.
526
## TODO: Check that the file kind has not changed from the previous
527
## revision of this file (if any).
531
p = self.abspath(path)
532
file_id = entry.file_id
533
mutter('commit prep file %s, id %r ' % (p, file_id))
535
if not os.path.exists(p):
536
mutter(" file is missing, removing from inventory")
538
show_status('D', entry.kind, quotefn(path))
539
missing_ids.append(file_id)
542
# TODO: Handle files that have been deleted
544
# TODO: Maybe a special case for empty files? Seems a
545
# waste to store them many times.
549
if basis_inv.has_id(file_id):
550
old_kind = basis_inv[file_id].kind
551
if old_kind != entry.kind:
552
bailout("entry %r changed kind from %r to %r"
553
% (file_id, old_kind, entry.kind))
555
if entry.kind == 'directory':
557
bailout("%s is entered as directory but not a directory" % quotefn(p))
558
elif entry.kind == 'file':
560
bailout("%s is entered as file but is not a file" % quotefn(p))
562
content = file(p, 'rb').read()
564
entry.text_sha1 = sha_string(content)
565
entry.text_size = len(content)
567
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
569
and (old_ie.text_size == entry.text_size)
570
and (old_ie.text_sha1 == entry.text_sha1)):
571
## assert content == basis.get_file(file_id).read()
572
entry.text_id = basis_inv[file_id].text_id
573
mutter(' unchanged from previous text_id {%s}' %
577
entry.text_id = gen_file_id(entry.name)
578
self.text_store.add(content, entry.text_id)
579
mutter(' stored with text_id {%s}' % entry.text_id)
583
elif (old_ie.name == entry.name
584
and old_ie.parent_id == entry.parent_id):
589
show_status(state, entry.kind, quotefn(path))
591
for file_id in missing_ids:
592
# have to do this later so we don't mess up the iterator.
593
# since parents may be removed before their children we
596
# FIXME: There's probably a better way to do this; perhaps
597
# the workingtree should know how to filter itself.
598
if work_inv.has_id(file_id):
599
del work_inv[file_id]
602
inv_id = rev_id = _gen_revision_id(time.time())
604
inv_tmp = tempfile.TemporaryFile()
605
inv.write_xml(inv_tmp)
607
self.inventory_store.add(inv_tmp, inv_id)
608
mutter('new inventory_id is {%s}' % inv_id)
610
self._write_inventory(work_inv)
612
if timestamp == None:
613
timestamp = time.time()
615
if committer == None:
616
committer = username()
619
timezone = local_time_offset()
621
mutter("building commit log message")
622
rev = Revision(timestamp=timestamp,
625
precursor = self.last_patch(),
630
rev_tmp = tempfile.TemporaryFile()
631
rev.write_xml(rev_tmp)
633
self.revision_store.add(rev_tmp, rev_id)
634
mutter("new revision_id is {%s}" % rev_id)
636
## XXX: Everything up to here can simply be orphaned if we abort
637
## the commit; it will leave junk files behind but that doesn't
640
## TODO: Read back the just-generated changeset, and make sure it
641
## applies and recreates the right state.
643
## TODO: Also calculate and store the inventory SHA1
644
mutter("committing patch r%d" % (self.revno() + 1))
647
self.append_revision(rev_id)
650
note("commited r%d" % self.revno())
523
653
def append_revision(self, revision_id):
524
654
mutter("add {%s} to revision-history" % revision_id)
525
655
rev_history = self.revision_history()
542
672
def get_revision(self, revision_id):
543
673
"""Return the Revision object for a named revision"""
544
if not revision_id or not isinstance(revision_id, basestring):
545
raise ValueError('invalid revision-id: %r' % revision_id)
674
self._need_readlock()
546
675
r = Revision.read_xml(self.revision_store[revision_id])
547
676
assert r.revision_id == revision_id
550
def get_revision_sha1(self, revision_id):
551
"""Hash the stored value of a revision, and return it."""
552
# In the future, revision entries will be signed. At that
553
# point, it is probably best *not* to include the signature
554
# in the revision hash. Because that lets you re-sign
555
# the revision, (add signatures/remove signatures) and still
556
# have all hash pointers stay consistent.
557
# But for now, just hash the contents.
558
return sha_file(self.revision_store[revision_id])
561
680
def get_inventory(self, inventory_id):
562
681
"""Get Inventory object by hash.
587
703
>>> ScratchBranch().revision_history()
592
return [l.rstrip('\r\n') for l in
593
self.controlfile('revision-history', 'r').readlines()]
598
def common_ancestor(self, other, self_revno=None, other_revno=None):
601
>>> sb = ScratchBranch(files=['foo', 'foo~'])
602
>>> sb.common_ancestor(sb) == (None, None)
604
>>> commit.commit(sb, "Committing first revision", verbose=False)
605
>>> sb.common_ancestor(sb)[0]
607
>>> clone = sb.clone()
608
>>> commit.commit(sb, "Committing second revision", verbose=False)
609
>>> sb.common_ancestor(sb)[0]
611
>>> sb.common_ancestor(clone)[0]
613
>>> commit.commit(clone, "Committing divergent second revision",
615
>>> sb.common_ancestor(clone)[0]
617
>>> sb.common_ancestor(clone) == clone.common_ancestor(sb)
619
>>> sb.common_ancestor(sb) != clone.common_ancestor(clone)
621
>>> clone2 = sb.clone()
622
>>> sb.common_ancestor(clone2)[0]
624
>>> sb.common_ancestor(clone2, self_revno=1)[0]
626
>>> sb.common_ancestor(clone2, other_revno=1)[0]
629
my_history = self.revision_history()
630
other_history = other.revision_history()
631
if self_revno is None:
632
self_revno = len(my_history)
633
if other_revno is None:
634
other_revno = len(other_history)
635
indices = range(min((self_revno, other_revno)))
638
if my_history[r] == other_history[r]:
639
return r+1, my_history[r]
706
self._need_readlock()
707
return [l.rstrip('\r\n') for l in self.controlfile('revision-history', 'r').readlines()]
642
710
def enum_history(self, direction):
643
711
"""Return (revno, revision_id) for history of branch.
667
735
That is equivalent to the number of revisions committed to
738
>>> b = ScratchBranch()
741
>>> b.commit('no foo')
670
745
return len(self.revision_history())
673
748
def last_patch(self):
674
749
"""Return last patch hash, or None if no history.
751
>>> ScratchBranch().last_patch() == None
676
754
ph = self.revision_history()
683
def missing_revisions(self, other, stop_revision=None):
685
If self and other have not diverged, return a list of the revisions
686
present in other, but missing from self.
688
>>> from bzrlib.commit import commit
689
>>> bzrlib.trace.silent = True
690
>>> br1 = ScratchBranch()
691
>>> br2 = ScratchBranch()
692
>>> br1.missing_revisions(br2)
694
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
695
>>> br1.missing_revisions(br2)
697
>>> br2.missing_revisions(br1)
699
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
700
>>> br1.missing_revisions(br2)
702
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
703
>>> br1.missing_revisions(br2)
705
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
706
>>> br1.missing_revisions(br2)
707
Traceback (most recent call last):
708
DivergedBranches: These branches have diverged.
710
self_history = self.revision_history()
711
self_len = len(self_history)
712
other_history = other.revision_history()
713
other_len = len(other_history)
714
common_index = min(self_len, other_len) -1
715
if common_index >= 0 and \
716
self_history[common_index] != other_history[common_index]:
717
raise DivergedBranches(self, other)
719
if stop_revision is None:
720
stop_revision = other_len
721
elif stop_revision > other_len:
722
raise NoSuchRevision(self, stop_revision)
724
return other_history[self_len:stop_revision]
727
def update_revisions(self, other, stop_revision=None):
728
"""Pull in all new revisions from other branch.
730
>>> from bzrlib.commit import commit
731
>>> bzrlib.trace.silent = True
732
>>> br1 = ScratchBranch(files=['foo', 'bar'])
735
>>> commit(br1, "lala!", rev_id="REVISION-ID-1", verbose=False)
736
>>> br2 = ScratchBranch()
737
>>> br2.update_revisions(br1)
741
>>> br2.revision_history()
743
>>> br2.update_revisions(br1)
747
>>> br1.text_store.total_size() == br2.text_store.total_size()
750
from bzrlib.progress import ProgressBar
754
pb.update('comparing histories')
755
revision_ids = self.missing_revisions(other, stop_revision)
757
needed_texts = sets.Set()
759
for rev_id in revision_ids:
761
pb.update('fetching revision', i, len(revision_ids))
762
rev = other.get_revision(rev_id)
763
revisions.append(rev)
764
inv = other.get_inventory(str(rev.inventory_id))
765
for key, entry in inv.iter_entries():
766
if entry.text_id is None:
768
if entry.text_id not in self.text_store:
769
needed_texts.add(entry.text_id)
773
count = self.text_store.copy_multi(other.text_store, needed_texts)
774
print "Added %d texts." % count
775
inventory_ids = [ f.inventory_id for f in revisions ]
776
count = self.inventory_store.copy_multi(other.inventory_store,
778
print "Added %d inventories." % count
779
revision_ids = [ f.revision_id for f in revisions]
780
count = self.revision_store.copy_multi(other.revision_store,
782
for revision_id in revision_ids:
783
self.append_revision(revision_id)
784
print "Added %d revisions." % count
787
def commit(self, *args, **kw):
789
from bzrlib.commit import commit
790
commit(self, *args, **kw)
793
761
def lookup_revision(self, revno):
841
818
This can change the directory or the filename or both.
820
self._need_writelock()
821
tree = self.working_tree()
823
if not tree.has_filename(from_rel):
824
bailout("can't rename: old working file %r does not exist" % from_rel)
825
if tree.has_filename(to_rel):
826
bailout("can't rename: new working file %r already exists" % to_rel)
828
file_id = inv.path2id(from_rel)
830
bailout("can't rename: old name %r is not versioned" % from_rel)
832
if inv.path2id(to_rel):
833
bailout("can't rename: new name %r is already versioned" % to_rel)
835
to_dir, to_tail = os.path.split(to_rel)
836
to_dir_id = inv.path2id(to_dir)
837
if to_dir_id == None and to_dir != '':
838
bailout("can't determine destination directory id for %r" % to_dir)
840
mutter("rename_one:")
841
mutter(" file_id {%s}" % file_id)
842
mutter(" from_rel %r" % from_rel)
843
mutter(" to_rel %r" % to_rel)
844
mutter(" to_dir %r" % to_dir)
845
mutter(" to_dir_id {%s}" % to_dir_id)
847
inv.rename(file_id, to_dir_id, to_tail)
849
print "%s => %s" % (from_rel, to_rel)
851
from_abs = self.abspath(from_rel)
852
to_abs = self.abspath(to_rel)
845
tree = self.working_tree()
847
if not tree.has_filename(from_rel):
848
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
849
if tree.has_filename(to_rel):
850
raise BzrError("can't rename: new working file %r already exists" % to_rel)
852
file_id = inv.path2id(from_rel)
854
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
856
if inv.path2id(to_rel):
857
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
859
to_dir, to_tail = os.path.split(to_rel)
860
to_dir_id = inv.path2id(to_dir)
861
if to_dir_id == None and to_dir != '':
862
raise BzrError("can't determine destination directory id for %r" % to_dir)
864
mutter("rename_one:")
865
mutter(" file_id {%s}" % file_id)
866
mutter(" from_rel %r" % from_rel)
867
mutter(" to_rel %r" % to_rel)
868
mutter(" to_dir %r" % to_dir)
869
mutter(" to_dir_id {%s}" % to_dir_id)
871
inv.rename(file_id, to_dir_id, to_tail)
873
print "%s => %s" % (from_rel, to_rel)
875
from_abs = self.abspath(from_rel)
876
to_abs = self.abspath(to_rel)
878
os.rename(from_abs, to_abs)
880
raise BzrError("failed to rename %r to %r: %s"
881
% (from_abs, to_abs, e[1]),
882
["rename rolled back"])
884
self._write_inventory(inv)
854
os.rename(from_abs, to_abs)
856
bailout("failed to rename %r to %r: %s"
857
% (from_abs, to_abs, e[1]),
858
["rename rolled back"])
860
self._write_inventory(inv)
889
864
def move(self, from_paths, to_name):
897
872
Note that to_name is only the last component of the new name;
898
873
this doesn't change the directory.
902
## TODO: Option to move IDs only
903
assert not isinstance(from_paths, basestring)
904
tree = self.working_tree()
906
to_abs = self.abspath(to_name)
907
if not isdir(to_abs):
908
raise BzrError("destination %r is not a directory" % to_abs)
909
if not tree.has_filename(to_name):
910
raise BzrError("destination %r not in working directory" % to_abs)
911
to_dir_id = inv.path2id(to_name)
912
if to_dir_id == None and to_name != '':
913
raise BzrError("destination %r is not a versioned directory" % to_name)
914
to_dir_ie = inv[to_dir_id]
915
if to_dir_ie.kind not in ('directory', 'root_directory'):
916
raise BzrError("destination %r is not a directory" % to_abs)
918
to_idpath = inv.get_idpath(to_dir_id)
921
if not tree.has_filename(f):
922
raise BzrError("%r does not exist in working tree" % f)
923
f_id = inv.path2id(f)
925
raise BzrError("%r is not versioned" % f)
926
name_tail = splitpath(f)[-1]
927
dest_path = appendpath(to_name, name_tail)
928
if tree.has_filename(dest_path):
929
raise BzrError("destination %r already exists" % dest_path)
930
if f_id in to_idpath:
931
raise BzrError("can't move %r to a subdirectory of itself" % f)
933
# OK, so there's a race here, it's possible that someone will
934
# create a file in this interval and then the rename might be
935
# left half-done. But we should have caught most problems.
938
name_tail = splitpath(f)[-1]
939
dest_path = appendpath(to_name, name_tail)
940
print "%s => %s" % (f, dest_path)
941
inv.rename(inv.path2id(f), to_dir_id, name_tail)
943
os.rename(self.abspath(f), self.abspath(dest_path))
945
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
946
["rename rolled back"])
948
self._write_inventory(inv)
875
self._need_writelock()
876
## TODO: Option to move IDs only
877
assert not isinstance(from_paths, basestring)
878
tree = self.working_tree()
880
to_abs = self.abspath(to_name)
881
if not isdir(to_abs):
882
bailout("destination %r is not a directory" % to_abs)
883
if not tree.has_filename(to_name):
884
bailout("destination %r not in working directory" % to_abs)
885
to_dir_id = inv.path2id(to_name)
886
if to_dir_id == None and to_name != '':
887
bailout("destination %r is not a versioned directory" % to_name)
888
to_dir_ie = inv[to_dir_id]
889
if to_dir_ie.kind not in ('directory', 'root_directory'):
890
bailout("destination %r is not a directory" % to_abs)
892
to_idpath = Set(inv.get_idpath(to_dir_id))
895
if not tree.has_filename(f):
896
bailout("%r does not exist in working tree" % f)
897
f_id = inv.path2id(f)
899
bailout("%r is not versioned" % f)
900
name_tail = splitpath(f)[-1]
901
dest_path = appendpath(to_name, name_tail)
902
if tree.has_filename(dest_path):
903
bailout("destination %r already exists" % dest_path)
904
if f_id in to_idpath:
905
bailout("can't move %r to a subdirectory of itself" % f)
907
# OK, so there's a race here, it's possible that someone will
908
# create a file in this interval and then the rename might be
909
# left half-done. But we should have caught most problems.
912
name_tail = splitpath(f)[-1]
913
dest_path = appendpath(to_name, name_tail)
914
print "%s => %s" % (f, dest_path)
915
inv.rename(inv.path2id(f), to_dir_id, name_tail)
917
os.rename(self.abspath(f), self.abspath(dest_path))
919
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
920
["rename rolled back"])
922
self._write_inventory(inv)