15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
import sys, os, os.path, random, time, sha, sets, types, re, shutil, tempfile
21
import traceback, socket, fnmatch, difflib, time
22
from binascii import hexlify
25
from inventory import Inventory
26
from trace import mutter, note
27
from tree import Tree, EmptyTree, RevisionTree, WorkingTree
28
from inventory import InventoryEntry, Inventory
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
32
from store import ImmutableStore
33
from revision import Revision
34
from errors import bailout, BzrError
35
from textui import show_status
36
from diff import diff_trees
21
from bzrlib.trace import mutter, note
22
from bzrlib.osutils import isdir, quotefn, compact_date, rand_bytes, splitpath, \
23
sha_file, appendpath, file_kind
24
from bzrlib.errors import BzrError
38
26
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
39
27
## TODO: Maybe include checks for common corruption of newlines, etc?
31
def find_branch(f, **args):
32
if f and (f.startswith('http://') or f.startswith('https://')):
34
return remotebranch.RemoteBranch(f, **args)
36
return Branch(f, **args)
39
def find_cached_branch(f, cache_root, **args):
40
from remotebranch import RemoteBranch
41
br = find_branch(f, **args)
42
def cacheify(br, store_name):
43
from meta_store import CachedStore
44
cache_path = os.path.join(cache_root, store_name)
46
new_store = CachedStore(getattr(br, store_name), cache_path)
47
setattr(br, store_name, new_store)
49
if isinstance(br, RemoteBranch):
50
cacheify(br, 'inventory_store')
51
cacheify(br, 'text_store')
52
cacheify(br, 'revision_store')
56
def _relpath(base, path):
57
"""Return path relative to base, or raise exception.
59
The path may be either an absolute path or a path relative to the
60
current working directory.
62
Lifted out of Branch.relpath for ease of testing.
64
os.path.commonprefix (python2.4) has a bad bug that it works just
65
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
66
avoids that problem."""
67
rp = os.path.abspath(path)
71
while len(head) >= len(base):
74
head, tail = os.path.split(head)
78
from errors import NotBranchError
79
raise NotBranchError("path %r is not within branch %r" % (rp, base))
43
84
def find_branch_root(f=None):
44
85
"""Find the branch root enclosing f, or pwd.
87
f may be a filename or a URL.
46
89
It is not necessary that f exists.
48
91
Basically we keep looking up until we find the control directory or
119
192
__repr__ = __str__
123
def lock(self, mode='w'):
124
"""Lock the on-disk branch, excluding other processes."""
130
om = os.O_WRONLY | os.O_CREAT
135
raise BzrError("invalid locking mode %r" % mode)
138
lockfile = os.open(self.controlfilename('branch-lock'), om)
140
if e.errno == errno.ENOENT:
141
# might not exist on branches from <0.0.4
142
self.controlfile('branch-lock', 'w').close()
143
lockfile = os.open(self.controlfilename('branch-lock'), om)
196
if self._lock_mode or self._lock:
197
from warnings import warn
198
warn("branch %r was not explicitly unlocked" % self)
203
def lock_write(self):
205
if self._lock_mode != 'w':
206
from errors import LockError
207
raise LockError("can't upgrade to a write lock from %r" %
209
self._lock_count += 1
211
from bzrlib.lock import WriteLock
213
self._lock = WriteLock(self.controlfilename('branch-lock'))
214
self._lock_mode = 'w'
221
assert self._lock_mode in ('r', 'w'), \
222
"invalid lock mode %r" % self._lock_mode
223
self._lock_count += 1
225
from bzrlib.lock import ReadLock
227
self._lock = ReadLock(self.controlfilename('branch-lock'))
228
self._lock_mode = 'r'
147
fcntl.lockf(lockfile, lm)
149
fcntl.lockf(lockfile, fcntl.LOCK_UN)
151
self._lockmode = None
153
self._lockmode = mode
155
warning("please write a locking method for platform %r" % sys.platform)
157
self._lockmode = None
159
self._lockmode = mode
162
def _need_readlock(self):
163
if self._lockmode not in ['r', 'w']:
164
raise BzrError('need read lock on branch, only have %r' % self._lockmode)
166
def _need_writelock(self):
167
if self._lockmode not in ['w']:
168
raise BzrError('need write lock on branch, only have %r' % self._lockmode)
234
if not self._lock_mode:
235
from errors import LockError
236
raise LockError('branch %r is not locked' % (self))
238
if self._lock_count > 1:
239
self._lock_count -= 1
243
self._lock_mode = self._lock_count = None
171
246
def abspath(self, name):
247
324
fmt = self.controlfile('branch-format', 'r').read()
248
325
fmt.replace('\r\n', '')
249
326
if fmt != BZR_BRANCH_FORMAT:
250
bailout('sorry, branch format %r not supported' % fmt,
251
['use a different bzr version',
252
'or remove the .bzr directory and "bzr init" again'])
327
raise BzrError('sorry, branch format %r not supported' % fmt,
328
['use a different bzr version',
329
'or remove the .bzr directory and "bzr init" again'])
255
333
def read_working_inventory(self):
256
334
"""Read the working inventory."""
257
self._need_readlock()
259
# ElementTree does its own conversion from UTF-8, so open in
261
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
262
mutter("loaded inventory of %d items in %f"
263
% (len(inv), time.time() - before))
335
from bzrlib.inventory import Inventory
336
from bzrlib.xml import unpack_xml
337
from time import time
341
# ElementTree does its own conversion from UTF-8, so open in
343
inv = unpack_xml(Inventory,
344
self.controlfile('inventory', 'rb'))
345
mutter("loaded inventory of %d items in %f"
346
% (len(inv), time() - before))
267
352
def _write_inventory(self, inv):
268
353
"""Update the working inventory.
296
384
This puts the files in the Added state, so that they will be
297
385
recorded by the next commit.
388
List of paths to add, relative to the base of the tree.
391
If set, use these instead of automatically generated ids.
392
Must be the same length as the list of files, but may
393
contain None for ids that are to be autogenerated.
299
395
TODO: Perhaps have an option to add the ids even if the files do
302
398
TODO: Perhaps return the ids of the files? But then again it
303
is easy to retrieve them if they're needed.
305
TODO: Option to specify file id.
399
is easy to retrieve them if they're needed.
307
401
TODO: Adding a directory should optionally recurse down and
308
add all non-ignored children. Perhaps do that in a
311
>>> b = ScratchBranch(files=['foo'])
312
>>> 'foo' in b.unknowns()
317
>>> 'foo' in b.unknowns()
319
>>> bool(b.inventory.path2id('foo'))
325
Traceback (most recent call last):
327
BzrError: ('foo is already versioned', [])
329
>>> b.add(['nothere'])
330
Traceback (most recent call last):
331
BzrError: ('cannot add: not a regular file or directory: nothere', [])
402
add all non-ignored children. Perhaps do that in a
333
self._need_writelock()
405
from bzrlib.textui import show_status
335
406
# TODO: Re-adding a file that is removed in the working copy
336
407
# should probably put it back with the previous ID.
337
if isinstance(files, types.StringTypes):
408
if isinstance(files, basestring):
409
assert(ids is None or isinstance(ids, basestring))
340
inv = self.read_working_inventory()
342
if is_control_file(f):
343
bailout("cannot add control file %s" % quotefn(f))
348
bailout("cannot add top-level %r" % f)
350
fullpath = os.path.normpath(self.abspath(f))
353
kind = file_kind(fullpath)
355
# maybe something better?
356
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
358
if kind != 'file' and kind != 'directory':
359
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
361
file_id = gen_file_id(f)
362
inv.add_path(f, kind=kind, file_id=file_id)
365
show_status('A', kind, quotefn(f))
367
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
369
self._write_inventory(inv)
415
ids = [None] * len(files)
417
assert(len(ids) == len(files))
421
inv = self.read_working_inventory()
422
for f,file_id in zip(files, ids):
423
if is_control_file(f):
424
raise BzrError("cannot add control file %s" % quotefn(f))
429
raise BzrError("cannot add top-level %r" % f)
431
fullpath = os.path.normpath(self.abspath(f))
434
kind = file_kind(fullpath)
436
# maybe something better?
437
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
439
if kind != 'file' and kind != 'directory':
440
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
443
file_id = gen_file_id(f)
444
inv.add_path(f, kind=kind, file_id=file_id)
447
print 'added', quotefn(f)
449
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
451
self._write_inventory(inv)
372
456
def print_file(self, file, revno):
373
457
"""Print `file` to stdout."""
374
self._need_readlock()
375
tree = self.revision_tree(self.lookup_revision(revno))
376
# use inventory as it was in that revision
377
file_id = tree.inventory.path2id(file)
379
bailout("%r is not present in revision %d" % (file, revno))
380
tree.print_file(file_id)
460
tree = self.revision_tree(self.lookup_revision(revno))
461
# use inventory as it was in that revision
462
file_id = tree.inventory.path2id(file)
464
raise BzrError("%r is not present in revision %d" % (file, revno))
465
tree.print_file(file_id)
383
470
def remove(self, files, verbose=False):
384
471
"""Mark nominated files for removal from the inventory.
416
481
is the opposite of add. Removing it is consistent with most
417
482
other tools. Maybe an option.
484
from bzrlib.textui import show_status
419
485
## TODO: Normalize names
420
486
## TODO: Remove nested loops; better scalability
421
self._need_writelock()
423
if isinstance(files, types.StringTypes):
487
if isinstance(files, basestring):
426
tree = self.working_tree()
429
# do this before any modifications
433
bailout("cannot remove unversioned file %s" % quotefn(f))
434
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
436
# having remove it, it must be either ignored or unknown
437
if tree.is_ignored(f):
441
show_status(new_status, inv[fid].kind, quotefn(f))
493
tree = self.working_tree()
496
# do this before any modifications
500
raise BzrError("cannot remove unversioned file %s" % quotefn(f))
501
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
503
# having remove it, it must be either ignored or unknown
504
if tree.is_ignored(f):
508
show_status(new_status, inv[fid].kind, quotefn(f))
511
self._write_inventory(inv)
516
# FIXME: this doesn't need to be a branch method
517
def set_inventory(self, new_inventory_list):
518
from bzrlib.inventory import Inventory, InventoryEntry
520
for path, file_id, parent, kind in new_inventory_list:
521
name = os.path.basename(path)
524
inv.add(InventoryEntry(file_id, name, kind, parent))
444
525
self._write_inventory(inv)
463
544
return self.working_tree().unknowns()
466
def commit(self, message, timestamp=None, timezone=None,
469
"""Commit working copy as a new revision.
471
The basic approach is to add all the file texts into the
472
store, then the inventory, then make a new revision pointing
473
to that inventory and store that.
475
This is not quite safe if the working copy changes during the
476
commit; for the moment that is simply not allowed. A better
477
approach is to make a temporary copy of the files before
478
computing their hashes, and then add those hashes in turn to
479
the inventory. This should mean at least that there are no
480
broken hash pointers. There is no way we can get a snapshot
481
of the whole directory at an instant. This would also have to
482
be robust against files disappearing, moving, etc. So the
483
whole thing is a bit hard.
485
timestamp -- if not None, seconds-since-epoch for a
486
postdated/predated commit.
488
self._need_writelock()
490
## TODO: Show branch names
492
# TODO: Don't commit if there are no changes, unless forced?
494
# First walk over the working inventory; and both update that
495
# and also build a new revision inventory. The revision
496
# inventory needs to hold the text-id, sha1 and size of the
497
# actual file versions committed in the revision. (These are
498
# not present in the working inventory.) We also need to
499
# detect missing/deleted files, and remove them from the
502
work_inv = self.read_working_inventory()
504
basis = self.basis_tree()
505
basis_inv = basis.inventory
507
for path, entry in work_inv.iter_entries():
508
## TODO: Cope with files that have gone missing.
510
## TODO: Check that the file kind has not changed from the previous
511
## revision of this file (if any).
515
p = self.abspath(path)
516
file_id = entry.file_id
517
mutter('commit prep file %s, id %r ' % (p, file_id))
519
if not os.path.exists(p):
520
mutter(" file is missing, removing from inventory")
522
show_status('D', entry.kind, quotefn(path))
523
missing_ids.append(file_id)
526
# TODO: Handle files that have been deleted
528
# TODO: Maybe a special case for empty files? Seems a
529
# waste to store them many times.
533
if basis_inv.has_id(file_id):
534
old_kind = basis_inv[file_id].kind
535
if old_kind != entry.kind:
536
bailout("entry %r changed kind from %r to %r"
537
% (file_id, old_kind, entry.kind))
539
if entry.kind == 'directory':
541
bailout("%s is entered as directory but not a directory" % quotefn(p))
542
elif entry.kind == 'file':
544
bailout("%s is entered as file but is not a file" % quotefn(p))
546
content = file(p, 'rb').read()
548
entry.text_sha1 = sha_string(content)
549
entry.text_size = len(content)
551
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
553
and (old_ie.text_size == entry.text_size)
554
and (old_ie.text_sha1 == entry.text_sha1)):
555
## assert content == basis.get_file(file_id).read()
556
entry.text_id = basis_inv[file_id].text_id
557
mutter(' unchanged from previous text_id {%s}' %
561
entry.text_id = gen_file_id(entry.name)
562
self.text_store.add(content, entry.text_id)
563
mutter(' stored with text_id {%s}' % entry.text_id)
567
elif (old_ie.name == entry.name
568
and old_ie.parent_id == entry.parent_id):
573
show_status(state, entry.kind, quotefn(path))
575
for file_id in missing_ids:
576
# have to do this later so we don't mess up the iterator.
577
# since parents may be removed before their children we
580
# FIXME: There's probably a better way to do this; perhaps
581
# the workingtree should know how to filter itself.
582
if work_inv.has_id(file_id):
583
del work_inv[file_id]
586
inv_id = rev_id = _gen_revision_id(time.time())
588
inv_tmp = tempfile.TemporaryFile()
589
inv.write_xml(inv_tmp)
591
self.inventory_store.add(inv_tmp, inv_id)
592
mutter('new inventory_id is {%s}' % inv_id)
594
self._write_inventory(work_inv)
596
if timestamp == None:
597
timestamp = time.time()
599
if committer == None:
600
committer = username()
603
timezone = local_time_offset()
605
mutter("building commit log message")
606
rev = Revision(timestamp=timestamp,
609
precursor = self.last_patch(),
614
rev_tmp = tempfile.TemporaryFile()
615
rev.write_xml(rev_tmp)
617
self.revision_store.add(rev_tmp, rev_id)
618
mutter("new revision_id is {%s}" % rev_id)
620
## XXX: Everything up to here can simply be orphaned if we abort
621
## the commit; it will leave junk files behind but that doesn't
624
## TODO: Read back the just-generated changeset, and make sure it
625
## applies and recreates the right state.
627
## TODO: Also calculate and store the inventory SHA1
628
mutter("committing patch r%d" % (self.revno() + 1))
631
self.append_revision(rev_id)
634
note("commited r%d" % self.revno())
637
547
def append_revision(self, revision_id):
548
from bzrlib.atomicfile import AtomicFile
638
550
mutter("add {%s} to revision-history" % revision_id)
639
rev_history = self.revision_history()
641
tmprhname = self.controlfilename('revision-history.tmp')
642
rhname = self.controlfilename('revision-history')
644
f = file(tmprhname, 'wt')
645
rev_history.append(revision_id)
646
f.write('\n'.join(rev_history))
650
if sys.platform == 'win32':
652
os.rename(tmprhname, rhname)
551
rev_history = self.revision_history() + [revision_id]
553
f = AtomicFile(self.controlfilename('revision-history'))
555
for rev_id in rev_history:
656
562
def get_revision(self, revision_id):
657
563
"""Return the Revision object for a named revision"""
658
self._need_readlock()
659
r = Revision.read_xml(self.revision_store[revision_id])
564
from bzrlib.revision import Revision
565
from bzrlib.xml import unpack_xml
569
if not revision_id or not isinstance(revision_id, basestring):
570
raise ValueError('invalid revision-id: %r' % revision_id)
571
r = unpack_xml(Revision, self.revision_store[revision_id])
660
575
assert r.revision_id == revision_id
579
def get_revision_sha1(self, revision_id):
580
"""Hash the stored value of a revision, and return it."""
581
# In the future, revision entries will be signed. At that
582
# point, it is probably best *not* to include the signature
583
# in the revision hash. Because that lets you re-sign
584
# the revision, (add signatures/remove signatures) and still
585
# have all hash pointers stay consistent.
586
# But for now, just hash the contents.
587
return sha_file(self.revision_store[revision_id])
664
590
def get_inventory(self, inventory_id):
687
622
>>> ScratchBranch().revision_history()
690
self._need_readlock()
691
return [l.rstrip('\r\n') for l in self.controlfile('revision-history', 'r').readlines()]
627
return [l.rstrip('\r\n') for l in
628
self.controlfile('revision-history', 'r').readlines()]
633
def common_ancestor(self, other, self_revno=None, other_revno=None):
636
>>> sb = ScratchBranch(files=['foo', 'foo~'])
637
>>> sb.common_ancestor(sb) == (None, None)
639
>>> commit.commit(sb, "Committing first revision", verbose=False)
640
>>> sb.common_ancestor(sb)[0]
642
>>> clone = sb.clone()
643
>>> commit.commit(sb, "Committing second revision", verbose=False)
644
>>> sb.common_ancestor(sb)[0]
646
>>> sb.common_ancestor(clone)[0]
648
>>> commit.commit(clone, "Committing divergent second revision",
650
>>> sb.common_ancestor(clone)[0]
652
>>> sb.common_ancestor(clone) == clone.common_ancestor(sb)
654
>>> sb.common_ancestor(sb) != clone.common_ancestor(clone)
656
>>> clone2 = sb.clone()
657
>>> sb.common_ancestor(clone2)[0]
659
>>> sb.common_ancestor(clone2, self_revno=1)[0]
661
>>> sb.common_ancestor(clone2, other_revno=1)[0]
664
my_history = self.revision_history()
665
other_history = other.revision_history()
666
if self_revno is None:
667
self_revno = len(my_history)
668
if other_revno is None:
669
other_revno = len(other_history)
670
indices = range(min((self_revno, other_revno)))
673
if my_history[r] == other_history[r]:
674
return r+1, my_history[r]
694
677
def enum_history(self, direction):
695
678
"""Return (revno, revision_id) for history of branch.
719
702
That is equivalent to the number of revisions committed to
722
>>> b = ScratchBranch()
725
>>> b.commit('no foo')
729
705
return len(self.revision_history())
732
708
def last_patch(self):
733
709
"""Return last patch hash, or None if no history.
735
>>> ScratchBranch().last_patch() == None
738
711
ph = self.revision_history()
718
def missing_revisions(self, other, stop_revision=None):
720
If self and other have not diverged, return a list of the revisions
721
present in other, but missing from self.
723
>>> from bzrlib.commit import commit
724
>>> bzrlib.trace.silent = True
725
>>> br1 = ScratchBranch()
726
>>> br2 = ScratchBranch()
727
>>> br1.missing_revisions(br2)
729
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
730
>>> br1.missing_revisions(br2)
732
>>> br2.missing_revisions(br1)
734
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
735
>>> br1.missing_revisions(br2)
737
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
738
>>> br1.missing_revisions(br2)
740
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
741
>>> br1.missing_revisions(br2)
742
Traceback (most recent call last):
743
DivergedBranches: These branches have diverged.
745
self_history = self.revision_history()
746
self_len = len(self_history)
747
other_history = other.revision_history()
748
other_len = len(other_history)
749
common_index = min(self_len, other_len) -1
750
if common_index >= 0 and \
751
self_history[common_index] != other_history[common_index]:
752
raise DivergedBranches(self, other)
754
if stop_revision is None:
755
stop_revision = other_len
756
elif stop_revision > other_len:
757
raise NoSuchRevision(self, stop_revision)
759
return other_history[self_len:stop_revision]
762
def update_revisions(self, other, stop_revision=None):
763
"""Pull in all new revisions from other branch.
765
>>> from bzrlib.commit import commit
766
>>> bzrlib.trace.silent = True
767
>>> br1 = ScratchBranch(files=['foo', 'bar'])
770
>>> commit(br1, "lala!", rev_id="REVISION-ID-1", verbose=False)
771
>>> br2 = ScratchBranch()
772
>>> br2.update_revisions(br1)
776
>>> br2.revision_history()
778
>>> br2.update_revisions(br1)
782
>>> br1.text_store.total_size() == br2.text_store.total_size()
785
from bzrlib.progress import ProgressBar
789
from sets import Set as set
793
pb.update('comparing histories')
794
revision_ids = self.missing_revisions(other, stop_revision)
796
if hasattr(other.revision_store, "prefetch"):
797
other.revision_store.prefetch(revision_ids)
798
if hasattr(other.inventory_store, "prefetch"):
799
inventory_ids = [other.get_revision(r).inventory_id
800
for r in revision_ids]
801
other.inventory_store.prefetch(inventory_ids)
806
for rev_id in revision_ids:
808
pb.update('fetching revision', i, len(revision_ids))
809
rev = other.get_revision(rev_id)
810
revisions.append(rev)
811
inv = other.get_inventory(str(rev.inventory_id))
812
for key, entry in inv.iter_entries():
813
if entry.text_id is None:
815
if entry.text_id not in self.text_store:
816
needed_texts.add(entry.text_id)
820
count = self.text_store.copy_multi(other.text_store, needed_texts)
821
print "Added %d texts." % count
822
inventory_ids = [ f.inventory_id for f in revisions ]
823
count = self.inventory_store.copy_multi(other.inventory_store,
825
print "Added %d inventories." % count
826
revision_ids = [ f.revision_id for f in revisions]
827
count = self.revision_store.copy_multi(other.revision_store,
829
for revision_id in revision_ids:
830
self.append_revision(revision_id)
831
print "Added %d revisions." % count
834
def commit(self, *args, **kw):
835
from bzrlib.commit import commit
836
commit(self, *args, **kw)
745
839
def lookup_revision(self, revno):
801
889
This can change the directory or the filename or both.
803
self._need_writelock()
804
tree = self.working_tree()
806
if not tree.has_filename(from_rel):
807
bailout("can't rename: old working file %r does not exist" % from_rel)
808
if tree.has_filename(to_rel):
809
bailout("can't rename: new working file %r already exists" % to_rel)
811
file_id = inv.path2id(from_rel)
813
bailout("can't rename: old name %r is not versioned" % from_rel)
815
if inv.path2id(to_rel):
816
bailout("can't rename: new name %r is already versioned" % to_rel)
818
to_dir, to_tail = os.path.split(to_rel)
819
to_dir_id = inv.path2id(to_dir)
820
if to_dir_id == None and to_dir != '':
821
bailout("can't determine destination directory id for %r" % to_dir)
823
mutter("rename_one:")
824
mutter(" file_id {%s}" % file_id)
825
mutter(" from_rel %r" % from_rel)
826
mutter(" to_rel %r" % to_rel)
827
mutter(" to_dir %r" % to_dir)
828
mutter(" to_dir_id {%s}" % to_dir_id)
830
inv.rename(file_id, to_dir_id, to_tail)
832
print "%s => %s" % (from_rel, to_rel)
834
from_abs = self.abspath(from_rel)
835
to_abs = self.abspath(to_rel)
837
os.rename(from_abs, to_abs)
839
bailout("failed to rename %r to %r: %s"
840
% (from_abs, to_abs, e[1]),
841
["rename rolled back"])
843
self._write_inventory(inv)
893
tree = self.working_tree()
895
if not tree.has_filename(from_rel):
896
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
897
if tree.has_filename(to_rel):
898
raise BzrError("can't rename: new working file %r already exists" % to_rel)
900
file_id = inv.path2id(from_rel)
902
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
904
if inv.path2id(to_rel):
905
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
907
to_dir, to_tail = os.path.split(to_rel)
908
to_dir_id = inv.path2id(to_dir)
909
if to_dir_id == None and to_dir != '':
910
raise BzrError("can't determine destination directory id for %r" % to_dir)
912
mutter("rename_one:")
913
mutter(" file_id {%s}" % file_id)
914
mutter(" from_rel %r" % from_rel)
915
mutter(" to_rel %r" % to_rel)
916
mutter(" to_dir %r" % to_dir)
917
mutter(" to_dir_id {%s}" % to_dir_id)
919
inv.rename(file_id, to_dir_id, to_tail)
921
print "%s => %s" % (from_rel, to_rel)
923
from_abs = self.abspath(from_rel)
924
to_abs = self.abspath(to_rel)
926
os.rename(from_abs, to_abs)
928
raise BzrError("failed to rename %r to %r: %s"
929
% (from_abs, to_abs, e[1]),
930
["rename rolled back"])
932
self._write_inventory(inv)
847
937
def move(self, from_paths, to_name):
855
945
Note that to_name is only the last component of the new name;
856
946
this doesn't change the directory.
858
self._need_writelock()
859
## TODO: Option to move IDs only
860
assert not isinstance(from_paths, basestring)
861
tree = self.working_tree()
863
to_abs = self.abspath(to_name)
864
if not isdir(to_abs):
865
bailout("destination %r is not a directory" % to_abs)
866
if not tree.has_filename(to_name):
867
bailout("destination %r not in working directory" % to_abs)
868
to_dir_id = inv.path2id(to_name)
869
if to_dir_id == None and to_name != '':
870
bailout("destination %r is not a versioned directory" % to_name)
871
to_dir_ie = inv[to_dir_id]
872
if to_dir_ie.kind not in ('directory', 'root_directory'):
873
bailout("destination %r is not a directory" % to_abs)
875
to_idpath = Set(inv.get_idpath(to_dir_id))
878
if not tree.has_filename(f):
879
bailout("%r does not exist in working tree" % f)
880
f_id = inv.path2id(f)
882
bailout("%r is not versioned" % f)
883
name_tail = splitpath(f)[-1]
884
dest_path = appendpath(to_name, name_tail)
885
if tree.has_filename(dest_path):
886
bailout("destination %r already exists" % dest_path)
887
if f_id in to_idpath:
888
bailout("can't move %r to a subdirectory of itself" % f)
890
# OK, so there's a race here, it's possible that someone will
891
# create a file in this interval and then the rename might be
892
# left half-done. But we should have caught most problems.
895
name_tail = splitpath(f)[-1]
896
dest_path = appendpath(to_name, name_tail)
897
print "%s => %s" % (f, dest_path)
898
inv.rename(inv.path2id(f), to_dir_id, name_tail)
900
os.rename(self.abspath(f), self.abspath(dest_path))
902
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
903
["rename rolled back"])
905
self._write_inventory(inv)
909
def show_status(self, show_all=False, file_list=None):
910
"""Display single-line status for non-ignored working files.
912
The list is show sorted in order by file name.
914
>>> b = ScratchBranch(files=['foo', 'foo~'])
920
>>> b.commit("add foo")
922
>>> os.unlink(b.abspath('foo'))
926
self._need_readlock()
928
# We have to build everything into a list first so that it can
929
# sorted by name, incorporating all the different sources.
931
# FIXME: Rather than getting things in random order and then sorting,
932
# just step through in order.
934
# Interesting case: the old ID for a file has been removed,
935
# but a new file has been created under that name.
937
old = self.basis_tree()
938
new = self.working_tree()
940
items = diff_trees(old, new)
941
# We want to filter out only if any file was provided in the file_list.
942
if isinstance(file_list, list) and len(file_list):
943
items = [item for item in items if item[3] in file_list]
945
for fs, fid, oldname, newname, kind in items:
947
show_status(fs, kind,
948
oldname + ' => ' + newname)
949
elif fs == 'A' or fs == 'M':
950
show_status(fs, kind, newname)
952
show_status(fs, kind, oldname)
955
show_status(fs, kind, newname)
958
show_status(fs, kind, newname)
960
show_status(fs, kind, newname)
962
bailout("weird file state %r" % ((fs, fid),))
950
## TODO: Option to move IDs only
951
assert not isinstance(from_paths, basestring)
952
tree = self.working_tree()
954
to_abs = self.abspath(to_name)
955
if not isdir(to_abs):
956
raise BzrError("destination %r is not a directory" % to_abs)
957
if not tree.has_filename(to_name):
958
raise BzrError("destination %r not in working directory" % to_abs)
959
to_dir_id = inv.path2id(to_name)
960
if to_dir_id == None and to_name != '':
961
raise BzrError("destination %r is not a versioned directory" % to_name)
962
to_dir_ie = inv[to_dir_id]
963
if to_dir_ie.kind not in ('directory', 'root_directory'):
964
raise BzrError("destination %r is not a directory" % to_abs)
966
to_idpath = inv.get_idpath(to_dir_id)
969
if not tree.has_filename(f):
970
raise BzrError("%r does not exist in working tree" % f)
971
f_id = inv.path2id(f)
973
raise BzrError("%r is not versioned" % f)
974
name_tail = splitpath(f)[-1]
975
dest_path = appendpath(to_name, name_tail)
976
if tree.has_filename(dest_path):
977
raise BzrError("destination %r already exists" % dest_path)
978
if f_id in to_idpath:
979
raise BzrError("can't move %r to a subdirectory of itself" % f)
981
# OK, so there's a race here, it's possible that someone will
982
# create a file in this interval and then the rename might be
983
# left half-done. But we should have caught most problems.
986
name_tail = splitpath(f)[-1]
987
dest_path = appendpath(to_name, name_tail)
988
print "%s => %s" % (f, dest_path)
989
inv.rename(inv.path2id(f), to_dir_id, name_tail)
991
os.rename(self.abspath(f), self.abspath(dest_path))
993
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
994
["rename rolled back"])
996
self._write_inventory(inv)
1001
def revert(self, filenames, old_tree=None, backups=True):
1002
"""Restore selected files to the versions from a previous tree.
1005
If true (default) backups are made of files before
1008
from bzrlib.errors import NotVersionedError, BzrError
1009
from bzrlib.atomicfile import AtomicFile
1010
from bzrlib.osutils import backup_file
1012
inv = self.read_working_inventory()
1013
if old_tree is None:
1014
old_tree = self.basis_tree()
1015
old_inv = old_tree.inventory
1018
for fn in filenames:
1019
file_id = inv.path2id(fn)
1021
raise NotVersionedError("not a versioned file", fn)
1022
if not old_inv.has_id(file_id):
1023
raise BzrError("file not present in old tree", fn, file_id)
1024
nids.append((fn, file_id))
1026
# TODO: Rename back if it was previously at a different location
1028
# TODO: If given a directory, restore the entire contents from
1029
# the previous version.
1031
# TODO: Make a backup to a temporary file.
1033
# TODO: If the file previously didn't exist, delete it?
1034
for fn, file_id in nids:
1037
f = AtomicFile(fn, 'wb')
1039
f.write(old_tree.get_file(file_id).read())
1045
def pending_merges(self):
1046
"""Return a list of pending merges.
1048
These are revisions that have been merged into the working
1049
directory but not yet committed.
1051
cfn = self.controlfilename('pending-merges')
1052
if not os.path.exists(cfn):
1055
for l in self.controlfile('pending-merges', 'r').readlines():
1056
p.append(l.rstrip('\n'))
1060
def add_pending_merge(self, revision_id):
1061
from bzrlib.revision import validate_revision_id
1063
validate_revision_id(revision_id)
1065
p = self.pending_merges()
1066
if revision_id in p:
1068
p.append(revision_id)
1069
self.set_pending_merges(p)
1072
def set_pending_merges(self, rev_list):
1073
from bzrlib.atomicfile import AtomicFile
1076
f = AtomicFile(self.controlfilename('pending-merges'))
966
1088
class ScratchBranch(Branch):