15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
import sys, os, os.path, random, time, sha, sets, types, re, shutil, tempfile
21
import traceback, socket, fnmatch, difflib, time
22
from binascii import hexlify
25
from inventory import Inventory
26
from trace import mutter, note
27
from tree import Tree, EmptyTree, RevisionTree
28
from inventory import InventoryEntry, Inventory
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
32
from store import ImmutableStore
33
from revision import Revision
34
from errors import bailout, BzrError
35
from textui import show_status
36
from diff import diff_trees
21
from bzrlib.trace import mutter, note
22
from bzrlib.osutils import isdir, quotefn, compact_date, rand_bytes, splitpath, \
23
sha_file, appendpath, file_kind
24
from bzrlib.errors import BzrError
38
26
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
39
27
## TODO: Maybe include checks for common corruption of newlines, etc?
46
34
return remotebranch.RemoteBranch(f, **args)
48
36
return Branch(f, **args)
39
def find_cached_branch(f, cache_root, **args):
40
from remotebranch import RemoteBranch
41
br = find_branch(f, **args)
42
def cacheify(br, store_name):
43
from meta_store import CachedStore
44
cache_path = os.path.join(cache_root, store_name)
46
new_store = CachedStore(getattr(br, store_name), cache_path)
47
setattr(br, store_name, new_store)
49
if isinstance(br, RemoteBranch):
50
cacheify(br, 'inventory_store')
51
cacheify(br, 'text_store')
52
cacheify(br, 'revision_store')
56
def _relpath(base, path):
57
"""Return path relative to base, or raise exception.
59
The path may be either an absolute path or a path relative to the
60
current working directory.
62
Lifted out of Branch.relpath for ease of testing.
64
os.path.commonprefix (python2.4) has a bad bug that it works just
65
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
66
avoids that problem."""
67
rp = os.path.abspath(path)
71
while len(head) >= len(base):
74
head, tail = os.path.split(head)
78
from errors import NotBranchError
79
raise NotBranchError("path %r is not within branch %r" % (rp, base))
51
84
def find_branch_root(f=None):
132
192
__repr__ = __str__
136
def lock(self, mode='w'):
137
"""Lock the on-disk branch, excluding other processes."""
143
om = os.O_WRONLY | os.O_CREAT
148
raise BzrError("invalid locking mode %r" % mode)
151
lockfile = os.open(self.controlfilename('branch-lock'), om)
153
if e.errno == errno.ENOENT:
154
# might not exist on branches from <0.0.4
155
self.controlfile('branch-lock', 'w').close()
156
lockfile = os.open(self.controlfilename('branch-lock'), om)
196
if self._lock_mode or self._lock:
197
from warnings import warn
198
warn("branch %r was not explicitly unlocked" % self)
203
def lock_write(self):
205
if self._lock_mode != 'w':
206
from errors import LockError
207
raise LockError("can't upgrade to a write lock from %r" %
209
self._lock_count += 1
211
from bzrlib.lock import WriteLock
213
self._lock = WriteLock(self.controlfilename('branch-lock'))
214
self._lock_mode = 'w'
221
assert self._lock_mode in ('r', 'w'), \
222
"invalid lock mode %r" % self._lock_mode
223
self._lock_count += 1
225
from bzrlib.lock import ReadLock
227
self._lock = ReadLock(self.controlfilename('branch-lock'))
228
self._lock_mode = 'r'
160
fcntl.lockf(lockfile, lm)
162
fcntl.lockf(lockfile, fcntl.LOCK_UN)
164
self._lockmode = None
166
self._lockmode = mode
168
warning("please write a locking method for platform %r" % sys.platform)
170
self._lockmode = None
172
self._lockmode = mode
175
def _need_readlock(self):
176
if self._lockmode not in ['r', 'w']:
177
raise BzrError('need read lock on branch, only have %r' % self._lockmode)
179
def _need_writelock(self):
180
if self._lockmode not in ['w']:
181
raise BzrError('need write lock on branch, only have %r' % self._lockmode)
234
if not self._lock_mode:
235
from errors import LockError
236
raise LockError('branch %r is not locked' % (self))
238
if self._lock_count > 1:
239
self._lock_count -= 1
243
self._lock_mode = self._lock_count = None
184
246
def abspath(self, name):
263
324
fmt = self.controlfile('branch-format', 'r').read()
264
325
fmt.replace('\r\n', '')
265
326
if fmt != BZR_BRANCH_FORMAT:
266
bailout('sorry, branch format %r not supported' % fmt,
267
['use a different bzr version',
268
'or remove the .bzr directory and "bzr init" again'])
327
raise BzrError('sorry, branch format %r not supported' % fmt,
328
['use a different bzr version',
329
'or remove the .bzr directory and "bzr init" again'])
271
333
def read_working_inventory(self):
272
334
"""Read the working inventory."""
273
self._need_readlock()
275
# ElementTree does its own conversion from UTF-8, so open in
277
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
278
mutter("loaded inventory of %d items in %f"
279
% (len(inv), time.time() - before))
335
from bzrlib.inventory import Inventory
336
from bzrlib.xml import unpack_xml
337
from time import time
341
# ElementTree does its own conversion from UTF-8, so open in
343
inv = unpack_xml(Inventory,
344
self.controlfile('inventory', 'rb'))
345
mutter("loaded inventory of %d items in %f"
346
% (len(inv), time() - before))
283
352
def _write_inventory(self, inv):
284
353
"""Update the working inventory.
312
384
This puts the files in the Added state, so that they will be
313
385
recorded by the next commit.
388
List of paths to add, relative to the base of the tree.
391
If set, use these instead of automatically generated ids.
392
Must be the same length as the list of files, but may
393
contain None for ids that are to be autogenerated.
315
395
TODO: Perhaps have an option to add the ids even if the files do
318
398
TODO: Perhaps return the ids of the files? But then again it
319
is easy to retrieve them if they're needed.
321
TODO: Option to specify file id.
399
is easy to retrieve them if they're needed.
323
401
TODO: Adding a directory should optionally recurse down and
324
add all non-ignored children. Perhaps do that in a
327
>>> b = ScratchBranch(files=['foo'])
328
>>> 'foo' in b.unknowns()
333
>>> 'foo' in b.unknowns()
335
>>> bool(b.inventory.path2id('foo'))
341
Traceback (most recent call last):
343
BzrError: ('foo is already versioned', [])
345
>>> b.add(['nothere'])
346
Traceback (most recent call last):
347
BzrError: ('cannot add: not a regular file or directory: nothere', [])
402
add all non-ignored children. Perhaps do that in a
349
self._need_writelock()
405
from bzrlib.textui import show_status
351
406
# TODO: Re-adding a file that is removed in the working copy
352
407
# should probably put it back with the previous ID.
353
if isinstance(files, types.StringTypes):
408
if isinstance(files, basestring):
409
assert(ids is None or isinstance(ids, basestring))
356
inv = self.read_working_inventory()
358
if is_control_file(f):
359
bailout("cannot add control file %s" % quotefn(f))
364
bailout("cannot add top-level %r" % f)
366
fullpath = os.path.normpath(self.abspath(f))
369
kind = file_kind(fullpath)
371
# maybe something better?
372
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
374
if kind != 'file' and kind != 'directory':
375
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
377
file_id = gen_file_id(f)
378
inv.add_path(f, kind=kind, file_id=file_id)
381
show_status('A', kind, quotefn(f))
383
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
385
self._write_inventory(inv)
415
ids = [None] * len(files)
417
assert(len(ids) == len(files))
421
inv = self.read_working_inventory()
422
for f,file_id in zip(files, ids):
423
if is_control_file(f):
424
raise BzrError("cannot add control file %s" % quotefn(f))
429
raise BzrError("cannot add top-level %r" % f)
431
fullpath = os.path.normpath(self.abspath(f))
434
kind = file_kind(fullpath)
436
# maybe something better?
437
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
439
if kind != 'file' and kind != 'directory':
440
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
443
file_id = gen_file_id(f)
444
inv.add_path(f, kind=kind, file_id=file_id)
447
print 'added', quotefn(f)
449
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
451
self._write_inventory(inv)
388
456
def print_file(self, file, revno):
389
457
"""Print `file` to stdout."""
390
self._need_readlock()
391
tree = self.revision_tree(self.lookup_revision(revno))
392
# use inventory as it was in that revision
393
file_id = tree.inventory.path2id(file)
395
bailout("%r is not present in revision %d" % (file, revno))
396
tree.print_file(file_id)
460
tree = self.revision_tree(self.lookup_revision(revno))
461
# use inventory as it was in that revision
462
file_id = tree.inventory.path2id(file)
464
raise BzrError("%r is not present in revision %d" % (file, revno))
465
tree.print_file(file_id)
399
470
def remove(self, files, verbose=False):
400
471
"""Mark nominated files for removal from the inventory.
432
481
is the opposite of add. Removing it is consistent with most
433
482
other tools. Maybe an option.
484
from bzrlib.textui import show_status
435
485
## TODO: Normalize names
436
486
## TODO: Remove nested loops; better scalability
437
self._need_writelock()
439
if isinstance(files, types.StringTypes):
487
if isinstance(files, basestring):
442
tree = self.working_tree()
445
# do this before any modifications
449
bailout("cannot remove unversioned file %s" % quotefn(f))
450
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
452
# having remove it, it must be either ignored or unknown
453
if tree.is_ignored(f):
457
show_status(new_status, inv[fid].kind, quotefn(f))
493
tree = self.working_tree()
496
# do this before any modifications
500
raise BzrError("cannot remove unversioned file %s" % quotefn(f))
501
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
503
# having remove it, it must be either ignored or unknown
504
if tree.is_ignored(f):
508
show_status(new_status, inv[fid].kind, quotefn(f))
511
self._write_inventory(inv)
516
# FIXME: this doesn't need to be a branch method
517
def set_inventory(self, new_inventory_list):
518
from bzrlib.inventory import Inventory, InventoryEntry
520
for path, file_id, parent, kind in new_inventory_list:
521
name = os.path.basename(path)
524
inv.add(InventoryEntry(file_id, name, kind, parent))
460
525
self._write_inventory(inv)
479
544
return self.working_tree().unknowns()
482
def commit(self, message, timestamp=None, timezone=None,
485
"""Commit working copy as a new revision.
487
The basic approach is to add all the file texts into the
488
store, then the inventory, then make a new revision pointing
489
to that inventory and store that.
491
This is not quite safe if the working copy changes during the
492
commit; for the moment that is simply not allowed. A better
493
approach is to make a temporary copy of the files before
494
computing their hashes, and then add those hashes in turn to
495
the inventory. This should mean at least that there are no
496
broken hash pointers. There is no way we can get a snapshot
497
of the whole directory at an instant. This would also have to
498
be robust against files disappearing, moving, etc. So the
499
whole thing is a bit hard.
501
timestamp -- if not None, seconds-since-epoch for a
502
postdated/predated commit.
504
self._need_writelock()
506
## TODO: Show branch names
508
# TODO: Don't commit if there are no changes, unless forced?
510
# First walk over the working inventory; and both update that
511
# and also build a new revision inventory. The revision
512
# inventory needs to hold the text-id, sha1 and size of the
513
# actual file versions committed in the revision. (These are
514
# not present in the working inventory.) We also need to
515
# detect missing/deleted files, and remove them from the
518
work_inv = self.read_working_inventory()
520
basis = self.basis_tree()
521
basis_inv = basis.inventory
523
for path, entry in work_inv.iter_entries():
524
## TODO: Cope with files that have gone missing.
526
## TODO: Check that the file kind has not changed from the previous
527
## revision of this file (if any).
531
p = self.abspath(path)
532
file_id = entry.file_id
533
mutter('commit prep file %s, id %r ' % (p, file_id))
535
if not os.path.exists(p):
536
mutter(" file is missing, removing from inventory")
538
show_status('D', entry.kind, quotefn(path))
539
missing_ids.append(file_id)
542
# TODO: Handle files that have been deleted
544
# TODO: Maybe a special case for empty files? Seems a
545
# waste to store them many times.
549
if basis_inv.has_id(file_id):
550
old_kind = basis_inv[file_id].kind
551
if old_kind != entry.kind:
552
bailout("entry %r changed kind from %r to %r"
553
% (file_id, old_kind, entry.kind))
555
if entry.kind == 'directory':
557
bailout("%s is entered as directory but not a directory" % quotefn(p))
558
elif entry.kind == 'file':
560
bailout("%s is entered as file but is not a file" % quotefn(p))
562
content = file(p, 'rb').read()
564
entry.text_sha1 = sha_string(content)
565
entry.text_size = len(content)
567
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
569
and (old_ie.text_size == entry.text_size)
570
and (old_ie.text_sha1 == entry.text_sha1)):
571
## assert content == basis.get_file(file_id).read()
572
entry.text_id = basis_inv[file_id].text_id
573
mutter(' unchanged from previous text_id {%s}' %
577
entry.text_id = gen_file_id(entry.name)
578
self.text_store.add(content, entry.text_id)
579
mutter(' stored with text_id {%s}' % entry.text_id)
583
elif (old_ie.name == entry.name
584
and old_ie.parent_id == entry.parent_id):
589
show_status(state, entry.kind, quotefn(path))
591
for file_id in missing_ids:
592
# have to do this later so we don't mess up the iterator.
593
# since parents may be removed before their children we
596
# FIXME: There's probably a better way to do this; perhaps
597
# the workingtree should know how to filter itself.
598
if work_inv.has_id(file_id):
599
del work_inv[file_id]
602
inv_id = rev_id = _gen_revision_id(time.time())
604
inv_tmp = tempfile.TemporaryFile()
605
inv.write_xml(inv_tmp)
607
self.inventory_store.add(inv_tmp, inv_id)
608
mutter('new inventory_id is {%s}' % inv_id)
610
self._write_inventory(work_inv)
612
if timestamp == None:
613
timestamp = time.time()
615
if committer == None:
616
committer = username()
619
timezone = local_time_offset()
621
mutter("building commit log message")
622
rev = Revision(timestamp=timestamp,
625
precursor = self.last_patch(),
630
rev_tmp = tempfile.TemporaryFile()
631
rev.write_xml(rev_tmp)
633
self.revision_store.add(rev_tmp, rev_id)
634
mutter("new revision_id is {%s}" % rev_id)
636
## XXX: Everything up to here can simply be orphaned if we abort
637
## the commit; it will leave junk files behind but that doesn't
640
## TODO: Read back the just-generated changeset, and make sure it
641
## applies and recreates the right state.
643
## TODO: Also calculate and store the inventory SHA1
644
mutter("committing patch r%d" % (self.revno() + 1))
647
self.append_revision(rev_id)
650
note("commited r%d" % self.revno())
653
547
def append_revision(self, revision_id):
548
from bzrlib.atomicfile import AtomicFile
654
550
mutter("add {%s} to revision-history" % revision_id)
655
rev_history = self.revision_history()
657
tmprhname = self.controlfilename('revision-history.tmp')
658
rhname = self.controlfilename('revision-history')
660
f = file(tmprhname, 'wt')
661
rev_history.append(revision_id)
662
f.write('\n'.join(rev_history))
666
if sys.platform == 'win32':
668
os.rename(tmprhname, rhname)
551
rev_history = self.revision_history() + [revision_id]
553
f = AtomicFile(self.controlfilename('revision-history'))
555
for rev_id in rev_history:
672
562
def get_revision(self, revision_id):
673
563
"""Return the Revision object for a named revision"""
674
self._need_readlock()
675
r = Revision.read_xml(self.revision_store[revision_id])
564
from bzrlib.revision import Revision
565
from bzrlib.xml import unpack_xml
569
if not revision_id or not isinstance(revision_id, basestring):
570
raise ValueError('invalid revision-id: %r' % revision_id)
571
r = unpack_xml(Revision, self.revision_store[revision_id])
676
575
assert r.revision_id == revision_id
579
def get_revision_sha1(self, revision_id):
580
"""Hash the stored value of a revision, and return it."""
581
# In the future, revision entries will be signed. At that
582
# point, it is probably best *not* to include the signature
583
# in the revision hash. Because that lets you re-sign
584
# the revision, (add signatures/remove signatures) and still
585
# have all hash pointers stay consistent.
586
# But for now, just hash the contents.
587
return sha_file(self.revision_store[revision_id])
680
590
def get_inventory(self, inventory_id):
703
620
>>> ScratchBranch().revision_history()
706
self._need_readlock()
707
return [l.rstrip('\r\n') for l in self.controlfile('revision-history', 'r').readlines()]
625
return [l.rstrip('\r\n') for l in
626
self.controlfile('revision-history', 'r').readlines()]
631
def common_ancestor(self, other, self_revno=None, other_revno=None):
634
>>> sb = ScratchBranch(files=['foo', 'foo~'])
635
>>> sb.common_ancestor(sb) == (None, None)
637
>>> commit.commit(sb, "Committing first revision", verbose=False)
638
>>> sb.common_ancestor(sb)[0]
640
>>> clone = sb.clone()
641
>>> commit.commit(sb, "Committing second revision", verbose=False)
642
>>> sb.common_ancestor(sb)[0]
644
>>> sb.common_ancestor(clone)[0]
646
>>> commit.commit(clone, "Committing divergent second revision",
648
>>> sb.common_ancestor(clone)[0]
650
>>> sb.common_ancestor(clone) == clone.common_ancestor(sb)
652
>>> sb.common_ancestor(sb) != clone.common_ancestor(clone)
654
>>> clone2 = sb.clone()
655
>>> sb.common_ancestor(clone2)[0]
657
>>> sb.common_ancestor(clone2, self_revno=1)[0]
659
>>> sb.common_ancestor(clone2, other_revno=1)[0]
662
my_history = self.revision_history()
663
other_history = other.revision_history()
664
if self_revno is None:
665
self_revno = len(my_history)
666
if other_revno is None:
667
other_revno = len(other_history)
668
indices = range(min((self_revno, other_revno)))
671
if my_history[r] == other_history[r]:
672
return r+1, my_history[r]
710
675
def enum_history(self, direction):
711
676
"""Return (revno, revision_id) for history of branch.
735
700
That is equivalent to the number of revisions committed to
738
>>> b = ScratchBranch()
741
>>> b.commit('no foo')
745
703
return len(self.revision_history())
748
706
def last_patch(self):
749
707
"""Return last patch hash, or None if no history.
751
>>> ScratchBranch().last_patch() == None
754
709
ph = self.revision_history()
716
def missing_revisions(self, other, stop_revision=None):
718
If self and other have not diverged, return a list of the revisions
719
present in other, but missing from self.
721
>>> from bzrlib.commit import commit
722
>>> bzrlib.trace.silent = True
723
>>> br1 = ScratchBranch()
724
>>> br2 = ScratchBranch()
725
>>> br1.missing_revisions(br2)
727
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
728
>>> br1.missing_revisions(br2)
730
>>> br2.missing_revisions(br1)
732
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
733
>>> br1.missing_revisions(br2)
735
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
736
>>> br1.missing_revisions(br2)
738
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
739
>>> br1.missing_revisions(br2)
740
Traceback (most recent call last):
741
DivergedBranches: These branches have diverged.
743
self_history = self.revision_history()
744
self_len = len(self_history)
745
other_history = other.revision_history()
746
other_len = len(other_history)
747
common_index = min(self_len, other_len) -1
748
if common_index >= 0 and \
749
self_history[common_index] != other_history[common_index]:
750
raise DivergedBranches(self, other)
752
if stop_revision is None:
753
stop_revision = other_len
754
elif stop_revision > other_len:
755
raise NoSuchRevision(self, stop_revision)
757
return other_history[self_len:stop_revision]
760
def update_revisions(self, other, stop_revision=None):
761
"""Pull in all new revisions from other branch.
763
>>> from bzrlib.commit import commit
764
>>> bzrlib.trace.silent = True
765
>>> br1 = ScratchBranch(files=['foo', 'bar'])
768
>>> commit(br1, "lala!", rev_id="REVISION-ID-1", verbose=False)
769
>>> br2 = ScratchBranch()
770
>>> br2.update_revisions(br1)
774
>>> br2.revision_history()
776
>>> br2.update_revisions(br1)
780
>>> br1.text_store.total_size() == br2.text_store.total_size()
783
from bzrlib.progress import ProgressBar
787
from sets import Set as set
791
pb.update('comparing histories')
792
revision_ids = self.missing_revisions(other, stop_revision)
794
if hasattr(other.revision_store, "prefetch"):
795
other.revision_store.prefetch(revision_ids)
796
if hasattr(other.inventory_store, "prefetch"):
797
inventory_ids = [other.get_revision(r).inventory_id
798
for r in revision_ids]
799
other.inventory_store.prefetch(inventory_ids)
804
for rev_id in revision_ids:
806
pb.update('fetching revision', i, len(revision_ids))
807
rev = other.get_revision(rev_id)
808
revisions.append(rev)
809
inv = other.get_inventory(str(rev.inventory_id))
810
for key, entry in inv.iter_entries():
811
if entry.text_id is None:
813
if entry.text_id not in self.text_store:
814
needed_texts.add(entry.text_id)
818
count = self.text_store.copy_multi(other.text_store, needed_texts)
819
print "Added %d texts." % count
820
inventory_ids = [ f.inventory_id for f in revisions ]
821
count = self.inventory_store.copy_multi(other.inventory_store,
823
print "Added %d inventories." % count
824
revision_ids = [ f.revision_id for f in revisions]
825
count = self.revision_store.copy_multi(other.revision_store,
827
for revision_id in revision_ids:
828
self.append_revision(revision_id)
829
print "Added %d revisions." % count
832
def commit(self, *args, **kw):
833
from bzrlib.commit import commit
834
commit(self, *args, **kw)
761
837
def lookup_revision(self, revno):
818
887
This can change the directory or the filename or both.
820
self._need_writelock()
821
tree = self.working_tree()
823
if not tree.has_filename(from_rel):
824
bailout("can't rename: old working file %r does not exist" % from_rel)
825
if tree.has_filename(to_rel):
826
bailout("can't rename: new working file %r already exists" % to_rel)
828
file_id = inv.path2id(from_rel)
830
bailout("can't rename: old name %r is not versioned" % from_rel)
832
if inv.path2id(to_rel):
833
bailout("can't rename: new name %r is already versioned" % to_rel)
835
to_dir, to_tail = os.path.split(to_rel)
836
to_dir_id = inv.path2id(to_dir)
837
if to_dir_id == None and to_dir != '':
838
bailout("can't determine destination directory id for %r" % to_dir)
840
mutter("rename_one:")
841
mutter(" file_id {%s}" % file_id)
842
mutter(" from_rel %r" % from_rel)
843
mutter(" to_rel %r" % to_rel)
844
mutter(" to_dir %r" % to_dir)
845
mutter(" to_dir_id {%s}" % to_dir_id)
847
inv.rename(file_id, to_dir_id, to_tail)
849
print "%s => %s" % (from_rel, to_rel)
851
from_abs = self.abspath(from_rel)
852
to_abs = self.abspath(to_rel)
854
os.rename(from_abs, to_abs)
856
bailout("failed to rename %r to %r: %s"
857
% (from_abs, to_abs, e[1]),
858
["rename rolled back"])
860
self._write_inventory(inv)
891
tree = self.working_tree()
893
if not tree.has_filename(from_rel):
894
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
895
if tree.has_filename(to_rel):
896
raise BzrError("can't rename: new working file %r already exists" % to_rel)
898
file_id = inv.path2id(from_rel)
900
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
902
if inv.path2id(to_rel):
903
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
905
to_dir, to_tail = os.path.split(to_rel)
906
to_dir_id = inv.path2id(to_dir)
907
if to_dir_id == None and to_dir != '':
908
raise BzrError("can't determine destination directory id for %r" % to_dir)
910
mutter("rename_one:")
911
mutter(" file_id {%s}" % file_id)
912
mutter(" from_rel %r" % from_rel)
913
mutter(" to_rel %r" % to_rel)
914
mutter(" to_dir %r" % to_dir)
915
mutter(" to_dir_id {%s}" % to_dir_id)
917
inv.rename(file_id, to_dir_id, to_tail)
919
print "%s => %s" % (from_rel, to_rel)
921
from_abs = self.abspath(from_rel)
922
to_abs = self.abspath(to_rel)
924
os.rename(from_abs, to_abs)
926
raise BzrError("failed to rename %r to %r: %s"
927
% (from_abs, to_abs, e[1]),
928
["rename rolled back"])
930
self._write_inventory(inv)
864
935
def move(self, from_paths, to_name):
872
943
Note that to_name is only the last component of the new name;
873
944
this doesn't change the directory.
875
self._need_writelock()
876
## TODO: Option to move IDs only
877
assert not isinstance(from_paths, basestring)
878
tree = self.working_tree()
880
to_abs = self.abspath(to_name)
881
if not isdir(to_abs):
882
bailout("destination %r is not a directory" % to_abs)
883
if not tree.has_filename(to_name):
884
bailout("destination %r not in working directory" % to_abs)
885
to_dir_id = inv.path2id(to_name)
886
if to_dir_id == None and to_name != '':
887
bailout("destination %r is not a versioned directory" % to_name)
888
to_dir_ie = inv[to_dir_id]
889
if to_dir_ie.kind not in ('directory', 'root_directory'):
890
bailout("destination %r is not a directory" % to_abs)
892
to_idpath = Set(inv.get_idpath(to_dir_id))
895
if not tree.has_filename(f):
896
bailout("%r does not exist in working tree" % f)
897
f_id = inv.path2id(f)
899
bailout("%r is not versioned" % f)
900
name_tail = splitpath(f)[-1]
901
dest_path = appendpath(to_name, name_tail)
902
if tree.has_filename(dest_path):
903
bailout("destination %r already exists" % dest_path)
904
if f_id in to_idpath:
905
bailout("can't move %r to a subdirectory of itself" % f)
907
# OK, so there's a race here, it's possible that someone will
908
# create a file in this interval and then the rename might be
909
# left half-done. But we should have caught most problems.
912
name_tail = splitpath(f)[-1]
913
dest_path = appendpath(to_name, name_tail)
914
print "%s => %s" % (f, dest_path)
915
inv.rename(inv.path2id(f), to_dir_id, name_tail)
917
os.rename(self.abspath(f), self.abspath(dest_path))
919
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
920
["rename rolled back"])
922
self._write_inventory(inv)
948
## TODO: Option to move IDs only
949
assert not isinstance(from_paths, basestring)
950
tree = self.working_tree()
952
to_abs = self.abspath(to_name)
953
if not isdir(to_abs):
954
raise BzrError("destination %r is not a directory" % to_abs)
955
if not tree.has_filename(to_name):
956
raise BzrError("destination %r not in working directory" % to_abs)
957
to_dir_id = inv.path2id(to_name)
958
if to_dir_id == None and to_name != '':
959
raise BzrError("destination %r is not a versioned directory" % to_name)
960
to_dir_ie = inv[to_dir_id]
961
if to_dir_ie.kind not in ('directory', 'root_directory'):
962
raise BzrError("destination %r is not a directory" % to_abs)
964
to_idpath = inv.get_idpath(to_dir_id)
967
if not tree.has_filename(f):
968
raise BzrError("%r does not exist in working tree" % f)
969
f_id = inv.path2id(f)
971
raise BzrError("%r is not versioned" % f)
972
name_tail = splitpath(f)[-1]
973
dest_path = appendpath(to_name, name_tail)
974
if tree.has_filename(dest_path):
975
raise BzrError("destination %r already exists" % dest_path)
976
if f_id in to_idpath:
977
raise BzrError("can't move %r to a subdirectory of itself" % f)
979
# OK, so there's a race here, it's possible that someone will
980
# create a file in this interval and then the rename might be
981
# left half-done. But we should have caught most problems.
984
name_tail = splitpath(f)[-1]
985
dest_path = appendpath(to_name, name_tail)
986
print "%s => %s" % (f, dest_path)
987
inv.rename(inv.path2id(f), to_dir_id, name_tail)
989
os.rename(self.abspath(f), self.abspath(dest_path))
991
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
992
["rename rolled back"])
994
self._write_inventory(inv)
999
def revert(self, filenames, old_tree=None, backups=True):
1000
"""Restore selected files to the versions from a previous tree.
1003
If true (default) backups are made of files before
1006
from bzrlib.errors import NotVersionedError, BzrError
1007
from bzrlib.atomicfile import AtomicFile
1008
from bzrlib.osutils import backup_file
1010
inv = self.read_working_inventory()
1011
if old_tree is None:
1012
old_tree = self.basis_tree()
1013
old_inv = old_tree.inventory
1016
for fn in filenames:
1017
file_id = inv.path2id(fn)
1019
raise NotVersionedError("not a versioned file", fn)
1020
if not old_inv.has_id(file_id):
1021
raise BzrError("file not present in old tree", fn, file_id)
1022
nids.append((fn, file_id))
1024
# TODO: Rename back if it was previously at a different location
1026
# TODO: If given a directory, restore the entire contents from
1027
# the previous version.
1029
# TODO: Make a backup to a temporary file.
1031
# TODO: If the file previously didn't exist, delete it?
1032
for fn, file_id in nids:
1035
f = AtomicFile(fn, 'wb')
1037
f.write(old_tree.get_file(file_id).read())
1043
def pending_merges(self):
1044
"""Return a list of pending merges.
1046
These are revisions that have been merged into the working
1047
directory but not yet committed.
1049
cfn = self.controlfilename('pending-merges')
1050
if not os.path.exists(cfn):
1053
for l in self.controlfile('pending-merges', 'r').readlines():
1054
p.append(l.rstrip('\n'))
1058
def add_pending_merge(self, revision_id):
1059
from bzrlib.revision import validate_revision_id
1061
validate_revision_id(revision_id)
1063
p = self.pending_merges()
1064
if revision_id in p:
1066
p.append(revision_id)
1067
self.set_pending_merges(p)
1070
def set_pending_merges(self, rev_list):
1071
from bzrlib.atomicfile import AtomicFile
1074
f = AtomicFile(self.controlfilename('pending-merges'))