15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
import sys, os, os.path, random, time, sha, sets, types, re, shutil, tempfile
21
import traceback, socket, fnmatch, difflib, time
22
from binascii import hexlify
21
from bzrlib.trace import mutter, note
22
from bzrlib.osutils import isdir, quotefn, compact_date, rand_bytes, splitpath, \
23
sha_file, appendpath, file_kind
24
from bzrlib.errors import BzrError
25
from inventory import Inventory
26
from trace import mutter, note
27
from tree import Tree, EmptyTree, RevisionTree, WorkingTree
28
from inventory import InventoryEntry, Inventory
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
32
from store import ImmutableStore
33
from revision import Revision
34
from errors import bailout, BzrError
35
from textui import show_status
36
from diff import diff_trees
26
38
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
27
39
## TODO: Maybe include checks for common corruption of newlines, etc?
31
def find_branch(f, **args):
32
if f and (f.startswith('http://') or f.startswith('https://')):
34
return remotebranch.RemoteBranch(f, **args)
36
return Branch(f, **args)
39
def find_cached_branch(f, cache_root, **args):
40
from remotebranch import RemoteBranch
41
br = find_branch(f, **args)
42
def cacheify(br, store_name):
43
from meta_store import CachedStore
44
cache_path = os.path.join(cache_root, store_name)
46
new_store = CachedStore(getattr(br, store_name), cache_path)
47
setattr(br, store_name, new_store)
49
if isinstance(br, RemoteBranch):
50
cacheify(br, 'inventory_store')
51
cacheify(br, 'text_store')
52
cacheify(br, 'revision_store')
56
def _relpath(base, path):
57
"""Return path relative to base, or raise exception.
59
The path may be either an absolute path or a path relative to the
60
current working directory.
62
Lifted out of Branch.relpath for ease of testing.
64
os.path.commonprefix (python2.4) has a bad bug that it works just
65
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
66
avoids that problem."""
67
rp = os.path.abspath(path)
71
while len(head) >= len(base):
74
head, tail = os.path.split(head)
78
from errors import NotBranchError
79
raise NotBranchError("path %r is not within branch %r" % (rp, base))
84
43
def find_branch_root(f=None):
85
44
"""Find the branch root enclosing f, or pwd.
87
f may be a filename or a URL.
89
46
It is not necessary that f exists.
91
48
Basically we keep looking up until we find the control directory or
323
194
fmt = self.controlfile('branch-format', 'r').read()
324
195
fmt.replace('\r\n', '')
325
196
if fmt != BZR_BRANCH_FORMAT:
326
raise BzrError('sorry, branch format %r not supported' % fmt,
327
['use a different bzr version',
328
'or remove the .bzr directory and "bzr init" again'])
197
bailout('sorry, branch format %r not supported' % fmt,
198
['use a different bzr version',
199
'or remove the .bzr directory and "bzr init" again'])
332
202
def read_working_inventory(self):
333
203
"""Read the working inventory."""
334
from bzrlib.inventory import Inventory
335
from bzrlib.xml import unpack_xml
336
from time import time
340
# ElementTree does its own conversion from UTF-8, so open in
342
inv = unpack_xml(Inventory,
343
self.controlfile('inventory', 'rb'))
344
mutter("loaded inventory of %d items in %f"
345
% (len(inv), time() - before))
205
# ElementTree does its own conversion from UTF-8, so open in
207
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
208
mutter("loaded inventory of %d items in %f"
209
% (len(inv), time.time() - before))
351
213
def _write_inventory(self, inv):
352
214
"""Update the working inventory.
383
241
This puts the files in the Added state, so that they will be
384
242
recorded by the next commit.
387
List of paths to add, relative to the base of the tree.
390
If set, use these instead of automatically generated ids.
391
Must be the same length as the list of files, but may
392
contain None for ids that are to be autogenerated.
394
244
TODO: Perhaps have an option to add the ids even if the files do
397
247
TODO: Perhaps return the ids of the files? But then again it
398
is easy to retrieve them if they're needed.
248
is easy to retrieve them if they're needed.
250
TODO: Option to specify file id.
400
252
TODO: Adding a directory should optionally recurse down and
401
add all non-ignored children. Perhaps do that in a
253
add all non-ignored children. Perhaps do that in a
256
>>> b = ScratchBranch(files=['foo'])
257
>>> 'foo' in b.unknowns()
262
>>> 'foo' in b.unknowns()
264
>>> bool(b.inventory.path2id('foo'))
270
Traceback (most recent call last):
272
BzrError: ('foo is already versioned', [])
274
>>> b.add(['nothere'])
275
Traceback (most recent call last):
276
BzrError: ('cannot add: not a regular file or directory: nothere', [])
404
from bzrlib.textui import show_status
405
279
# TODO: Re-adding a file that is removed in the working copy
406
280
# should probably put it back with the previous ID.
407
if isinstance(files, basestring):
408
assert(ids is None or isinstance(ids, basestring))
281
if isinstance(files, types.StringTypes):
414
ids = [None] * len(files)
416
assert(len(ids) == len(files))
420
inv = self.read_working_inventory()
421
for f,file_id in zip(files, ids):
422
if is_control_file(f):
423
raise BzrError("cannot add control file %s" % quotefn(f))
428
raise BzrError("cannot add top-level %r" % f)
430
fullpath = os.path.normpath(self.abspath(f))
433
kind = file_kind(fullpath)
435
# maybe something better?
436
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
438
if kind != 'file' and kind != 'directory':
439
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
442
file_id = gen_file_id(f)
443
inv.add_path(f, kind=kind, file_id=file_id)
446
print 'added', quotefn(f)
448
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
450
self._write_inventory(inv)
284
inv = self.read_working_inventory()
286
if is_control_file(f):
287
bailout("cannot add control file %s" % quotefn(f))
292
bailout("cannot add top-level %r" % f)
294
fullpath = os.path.normpath(self.abspath(f))
297
kind = file_kind(fullpath)
299
# maybe something better?
300
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
302
if kind != 'file' and kind != 'directory':
303
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
305
file_id = gen_file_id(f)
306
inv.add_path(f, kind=kind, file_id=file_id)
309
show_status('A', kind, quotefn(f))
311
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
313
self._write_inventory(inv)
455
316
def print_file(self, file, revno):
456
317
"""Print `file` to stdout."""
459
tree = self.revision_tree(self.lookup_revision(revno))
460
# use inventory as it was in that revision
461
file_id = tree.inventory.path2id(file)
463
raise BzrError("%r is not present in revision %d" % (file, revno))
464
tree.print_file(file_id)
318
tree = self.revision_tree(self.lookup_revision(revno))
319
# use inventory as it was in that revision
320
file_id = tree.inventory.path2id(file)
322
bailout("%r is not present in revision %d" % (file, revno))
323
tree.print_file(file_id)
469
326
def remove(self, files, verbose=False):
470
327
"""Mark nominated files for removal from the inventory.
480
359
is the opposite of add. Removing it is consistent with most
481
360
other tools. Maybe an option.
483
from bzrlib.textui import show_status
484
362
## TODO: Normalize names
485
363
## TODO: Remove nested loops; better scalability
486
if isinstance(files, basestring):
365
if isinstance(files, types.StringTypes):
492
tree = self.working_tree()
495
# do this before any modifications
499
raise BzrError("cannot remove unversioned file %s" % quotefn(f))
500
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
502
# having remove it, it must be either ignored or unknown
503
if tree.is_ignored(f):
507
show_status(new_status, inv[fid].kind, quotefn(f))
510
self._write_inventory(inv)
515
# FIXME: this doesn't need to be a branch method
516
def set_inventory(self, new_inventory_list):
517
from bzrlib.inventory import Inventory, InventoryEntry
519
for path, file_id, parent, kind in new_inventory_list:
520
name = os.path.basename(path)
523
inv.add(InventoryEntry(file_id, name, kind, parent))
368
tree = self.working_tree()
371
# do this before any modifications
375
bailout("cannot remove unversioned file %s" % quotefn(f))
376
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
378
# having remove it, it must be either ignored or unknown
379
if tree.is_ignored(f):
383
show_status(new_status, inv[fid].kind, quotefn(f))
524
386
self._write_inventory(inv)
543
405
return self.working_tree().unknowns()
408
def commit(self, message, timestamp=None, timezone=None,
411
"""Commit working copy as a new revision.
413
The basic approach is to add all the file texts into the
414
store, then the inventory, then make a new revision pointing
415
to that inventory and store that.
417
This is not quite safe if the working copy changes during the
418
commit; for the moment that is simply not allowed. A better
419
approach is to make a temporary copy of the files before
420
computing their hashes, and then add those hashes in turn to
421
the inventory. This should mean at least that there are no
422
broken hash pointers. There is no way we can get a snapshot
423
of the whole directory at an instant. This would also have to
424
be robust against files disappearing, moving, etc. So the
425
whole thing is a bit hard.
427
timestamp -- if not None, seconds-since-epoch for a
428
postdated/predated commit.
431
## TODO: Show branch names
433
# TODO: Don't commit if there are no changes, unless forced?
435
# First walk over the working inventory; and both update that
436
# and also build a new revision inventory. The revision
437
# inventory needs to hold the text-id, sha1 and size of the
438
# actual file versions committed in the revision. (These are
439
# not present in the working inventory.) We also need to
440
# detect missing/deleted files, and remove them from the
443
work_inv = self.read_working_inventory()
445
basis = self.basis_tree()
446
basis_inv = basis.inventory
448
for path, entry in work_inv.iter_entries():
449
## TODO: Cope with files that have gone missing.
451
## TODO: Check that the file kind has not changed from the previous
452
## revision of this file (if any).
456
p = self.abspath(path)
457
file_id = entry.file_id
458
mutter('commit prep file %s, id %r ' % (p, file_id))
460
if not os.path.exists(p):
461
mutter(" file is missing, removing from inventory")
463
show_status('D', entry.kind, quotefn(path))
464
missing_ids.append(file_id)
467
# TODO: Handle files that have been deleted
469
# TODO: Maybe a special case for empty files? Seems a
470
# waste to store them many times.
474
if basis_inv.has_id(file_id):
475
old_kind = basis_inv[file_id].kind
476
if old_kind != entry.kind:
477
bailout("entry %r changed kind from %r to %r"
478
% (file_id, old_kind, entry.kind))
480
if entry.kind == 'directory':
482
bailout("%s is entered as directory but not a directory" % quotefn(p))
483
elif entry.kind == 'file':
485
bailout("%s is entered as file but is not a file" % quotefn(p))
487
content = file(p, 'rb').read()
489
entry.text_sha1 = sha_string(content)
490
entry.text_size = len(content)
492
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
494
and (old_ie.text_size == entry.text_size)
495
and (old_ie.text_sha1 == entry.text_sha1)):
496
## assert content == basis.get_file(file_id).read()
497
entry.text_id = basis_inv[file_id].text_id
498
mutter(' unchanged from previous text_id {%s}' %
502
entry.text_id = gen_file_id(entry.name)
503
self.text_store.add(content, entry.text_id)
504
mutter(' stored with text_id {%s}' % entry.text_id)
508
elif (old_ie.name == entry.name
509
and old_ie.parent_id == entry.parent_id):
514
show_status(state, entry.kind, quotefn(path))
516
for file_id in missing_ids:
517
# have to do this later so we don't mess up the iterator.
518
# since parents may be removed before their children we
521
# FIXME: There's probably a better way to do this; perhaps
522
# the workingtree should know how to filter itself.
523
if work_inv.has_id(file_id):
524
del work_inv[file_id]
527
inv_id = rev_id = _gen_revision_id(time.time())
529
inv_tmp = tempfile.TemporaryFile()
530
inv.write_xml(inv_tmp)
532
self.inventory_store.add(inv_tmp, inv_id)
533
mutter('new inventory_id is {%s}' % inv_id)
535
self._write_inventory(work_inv)
537
if timestamp == None:
538
timestamp = time.time()
540
if committer == None:
541
committer = username()
544
timezone = local_time_offset()
546
mutter("building commit log message")
547
rev = Revision(timestamp=timestamp,
550
precursor = self.last_patch(),
555
rev_tmp = tempfile.TemporaryFile()
556
rev.write_xml(rev_tmp)
558
self.revision_store.add(rev_tmp, rev_id)
559
mutter("new revision_id is {%s}" % rev_id)
561
## XXX: Everything up to here can simply be orphaned if we abort
562
## the commit; it will leave junk files behind but that doesn't
565
## TODO: Read back the just-generated changeset, and make sure it
566
## applies and recreates the right state.
568
## TODO: Also calculate and store the inventory SHA1
569
mutter("committing patch r%d" % (self.revno() + 1))
572
self.append_revision(rev_id)
575
note("commited r%d" % self.revno())
546
578
def append_revision(self, revision_id):
547
from bzrlib.atomicfile import AtomicFile
549
579
mutter("add {%s} to revision-history" % revision_id)
550
rev_history = self.revision_history() + [revision_id]
552
f = AtomicFile(self.controlfilename('revision-history'))
554
for rev_id in rev_history:
580
rev_history = self.revision_history()
582
tmprhname = self.controlfilename('revision-history.tmp')
583
rhname = self.controlfilename('revision-history')
585
f = file(tmprhname, 'wt')
586
rev_history.append(revision_id)
587
f.write('\n'.join(rev_history))
591
if sys.platform == 'win32':
593
os.rename(tmprhname, rhname)
561
597
def get_revision(self, revision_id):
562
598
"""Return the Revision object for a named revision"""
563
from bzrlib.revision import Revision
564
from bzrlib.xml import unpack_xml
568
if not revision_id or not isinstance(revision_id, basestring):
569
raise ValueError('invalid revision-id: %r' % revision_id)
570
r = unpack_xml(Revision, self.revision_store[revision_id])
599
r = Revision.read_xml(self.revision_store[revision_id])
574
600
assert r.revision_id == revision_id
578
def get_revision_sha1(self, revision_id):
579
"""Hash the stored value of a revision, and return it."""
580
# In the future, revision entries will be signed. At that
581
# point, it is probably best *not* to include the signature
582
# in the revision hash. Because that lets you re-sign
583
# the revision, (add signatures/remove signatures) and still
584
# have all hash pointers stay consistent.
585
# But for now, just hash the contents.
586
return sha_file(self.revision_store[revision_id])
589
604
def get_inventory(self, inventory_id):
619
625
>>> ScratchBranch().revision_history()
624
return [l.rstrip('\r\n') for l in
625
self.controlfile('revision-history', 'r').readlines()]
630
def common_ancestor(self, other, self_revno=None, other_revno=None):
633
>>> sb = ScratchBranch(files=['foo', 'foo~'])
634
>>> sb.common_ancestor(sb) == (None, None)
636
>>> commit.commit(sb, "Committing first revision", verbose=False)
637
>>> sb.common_ancestor(sb)[0]
639
>>> clone = sb.clone()
640
>>> commit.commit(sb, "Committing second revision", verbose=False)
641
>>> sb.common_ancestor(sb)[0]
643
>>> sb.common_ancestor(clone)[0]
645
>>> commit.commit(clone, "Committing divergent second revision",
647
>>> sb.common_ancestor(clone)[0]
649
>>> sb.common_ancestor(clone) == clone.common_ancestor(sb)
651
>>> sb.common_ancestor(sb) != clone.common_ancestor(clone)
653
>>> clone2 = sb.clone()
654
>>> sb.common_ancestor(clone2)[0]
656
>>> sb.common_ancestor(clone2, self_revno=1)[0]
658
>>> sb.common_ancestor(clone2, other_revno=1)[0]
661
my_history = self.revision_history()
662
other_history = other.revision_history()
663
if self_revno is None:
664
self_revno = len(my_history)
665
if other_revno is None:
666
other_revno = len(other_history)
667
indices = range(min((self_revno, other_revno)))
670
if my_history[r] == other_history[r]:
671
return r+1, my_history[r]
674
def enum_history(self, direction):
675
"""Return (revno, revision_id) for history of branch.
678
'forward' is from earliest to latest
679
'reverse' is from latest to earliest
681
rh = self.revision_history()
682
if direction == 'forward':
687
elif direction == 'reverse':
693
raise ValueError('invalid history direction', direction)
628
return [l.rstrip('\r\n') for l in self.controlfile('revision-history', 'r').readlines()]
699
634
That is equivalent to the number of revisions committed to
637
>>> b = ScratchBranch()
640
>>> b.commit('no foo')
702
644
return len(self.revision_history())
705
647
def last_patch(self):
706
648
"""Return last patch hash, or None if no history.
650
>>> ScratchBranch().last_patch() == None
708
653
ph = self.revision_history()
715
def missing_revisions(self, other, stop_revision=None):
717
If self and other have not diverged, return a list of the revisions
718
present in other, but missing from self.
720
>>> from bzrlib.commit import commit
721
>>> bzrlib.trace.silent = True
722
>>> br1 = ScratchBranch()
723
>>> br2 = ScratchBranch()
724
>>> br1.missing_revisions(br2)
726
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
727
>>> br1.missing_revisions(br2)
729
>>> br2.missing_revisions(br1)
731
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
732
>>> br1.missing_revisions(br2)
734
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
735
>>> br1.missing_revisions(br2)
737
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
738
>>> br1.missing_revisions(br2)
739
Traceback (most recent call last):
740
DivergedBranches: These branches have diverged.
742
self_history = self.revision_history()
743
self_len = len(self_history)
744
other_history = other.revision_history()
745
other_len = len(other_history)
746
common_index = min(self_len, other_len) -1
747
if common_index >= 0 and \
748
self_history[common_index] != other_history[common_index]:
749
raise DivergedBranches(self, other)
751
if stop_revision is None:
752
stop_revision = other_len
753
elif stop_revision > other_len:
754
raise NoSuchRevision(self, stop_revision)
756
return other_history[self_len:stop_revision]
759
def update_revisions(self, other, stop_revision=None):
760
"""Pull in all new revisions from other branch.
762
>>> from bzrlib.commit import commit
763
>>> bzrlib.trace.silent = True
764
>>> br1 = ScratchBranch(files=['foo', 'bar'])
767
>>> commit(br1, "lala!", rev_id="REVISION-ID-1", verbose=False)
768
>>> br2 = ScratchBranch()
769
>>> br2.update_revisions(br1)
773
>>> br2.revision_history()
775
>>> br2.update_revisions(br1)
779
>>> br1.text_store.total_size() == br2.text_store.total_size()
782
from bzrlib.progress import ProgressBar
786
from sets import Set as set
790
pb.update('comparing histories')
791
revision_ids = self.missing_revisions(other, stop_revision)
793
if hasattr(other.revision_store, "prefetch"):
794
other.revision_store.prefetch(revision_ids)
795
if hasattr(other.inventory_store, "prefetch"):
796
inventory_ids = [other.get_revision(r).inventory_id
797
for r in revision_ids]
798
other.inventory_store.prefetch(inventory_ids)
803
for rev_id in revision_ids:
805
pb.update('fetching revision', i, len(revision_ids))
806
rev = other.get_revision(rev_id)
807
revisions.append(rev)
808
inv = other.get_inventory(str(rev.inventory_id))
809
for key, entry in inv.iter_entries():
810
if entry.text_id is None:
812
if entry.text_id not in self.text_store:
813
needed_texts.add(entry.text_id)
817
count = self.text_store.copy_multi(other.text_store, needed_texts)
818
print "Added %d texts." % count
819
inventory_ids = [ f.inventory_id for f in revisions ]
820
count = self.inventory_store.copy_multi(other.inventory_store,
822
print "Added %d inventories." % count
823
revision_ids = [ f.revision_id for f in revisions]
824
count = self.revision_store.copy_multi(other.revision_store,
826
for revision_id in revision_ids:
827
self.append_revision(revision_id)
828
print "Added %d revisions." % count
831
def commit(self, *args, **kw):
832
from bzrlib.commit import commit
833
commit(self, *args, **kw)
836
660
def lookup_revision(self, revno):
713
def write_log(self, show_timezone='original', verbose=False):
714
"""Write out human-readable log of commits to this branch
716
utc -- If true, show dates in universal time, not local time."""
717
## TODO: Option to choose either original, utc or local timezone
720
for p in self.revision_history():
722
print 'revno:', revno
723
## TODO: Show hash if --id is given.
724
##print 'revision-hash:', p
725
rev = self.get_revision(p)
726
print 'committer:', rev.committer
727
print 'timestamp: %s' % (format_date(rev.timestamp, rev.timezone or 0,
730
## opportunistic consistency check, same as check_patch_chaining
731
if rev.precursor != precursor:
732
bailout("mismatched precursor!")
736
print ' (no message)'
738
for l in rev.message.split('\n'):
741
if verbose == True and precursor != None:
742
print 'changed files:'
743
tree = self.revision_tree(p)
744
prevtree = self.revision_tree(precursor)
746
for file_state, fid, old_name, new_name, kind in \
747
diff_trees(prevtree, tree, ):
748
if file_state == 'A' or file_state == 'M':
749
show_status(file_state, kind, new_name)
750
elif file_state == 'D':
751
show_status(file_state, kind, old_name)
752
elif file_state == 'R':
753
show_status(file_state, kind,
754
old_name + ' => ' + new_name)
883
760
def rename_one(self, from_rel, to_rel):
884
761
"""Rename one file.
886
763
This can change the directory or the filename or both.
765
tree = self.working_tree()
767
if not tree.has_filename(from_rel):
768
bailout("can't rename: old working file %r does not exist" % from_rel)
769
if tree.has_filename(to_rel):
770
bailout("can't rename: new working file %r already exists" % to_rel)
772
file_id = inv.path2id(from_rel)
774
bailout("can't rename: old name %r is not versioned" % from_rel)
776
if inv.path2id(to_rel):
777
bailout("can't rename: new name %r is already versioned" % to_rel)
779
to_dir, to_tail = os.path.split(to_rel)
780
to_dir_id = inv.path2id(to_dir)
781
if to_dir_id == None and to_dir != '':
782
bailout("can't determine destination directory id for %r" % to_dir)
784
mutter("rename_one:")
785
mutter(" file_id {%s}" % file_id)
786
mutter(" from_rel %r" % from_rel)
787
mutter(" to_rel %r" % to_rel)
788
mutter(" to_dir %r" % to_dir)
789
mutter(" to_dir_id {%s}" % to_dir_id)
791
inv.rename(file_id, to_dir_id, to_tail)
793
print "%s => %s" % (from_rel, to_rel)
795
from_abs = self.abspath(from_rel)
796
to_abs = self.abspath(to_rel)
890
tree = self.working_tree()
892
if not tree.has_filename(from_rel):
893
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
894
if tree.has_filename(to_rel):
895
raise BzrError("can't rename: new working file %r already exists" % to_rel)
897
file_id = inv.path2id(from_rel)
899
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
901
if inv.path2id(to_rel):
902
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
904
to_dir, to_tail = os.path.split(to_rel)
905
to_dir_id = inv.path2id(to_dir)
906
if to_dir_id == None and to_dir != '':
907
raise BzrError("can't determine destination directory id for %r" % to_dir)
909
mutter("rename_one:")
910
mutter(" file_id {%s}" % file_id)
911
mutter(" from_rel %r" % from_rel)
912
mutter(" to_rel %r" % to_rel)
913
mutter(" to_dir %r" % to_dir)
914
mutter(" to_dir_id {%s}" % to_dir_id)
916
inv.rename(file_id, to_dir_id, to_tail)
918
print "%s => %s" % (from_rel, to_rel)
920
from_abs = self.abspath(from_rel)
921
to_abs = self.abspath(to_rel)
923
os.rename(from_abs, to_abs)
925
raise BzrError("failed to rename %r to %r: %s"
926
% (from_abs, to_abs, e[1]),
927
["rename rolled back"])
929
self._write_inventory(inv)
798
os.rename(from_abs, to_abs)
800
bailout("failed to rename %r to %r: %s"
801
% (from_abs, to_abs, e[1]),
802
["rename rolled back"])
804
self._write_inventory(inv)
934
808
def move(self, from_paths, to_name):
942
816
Note that to_name is only the last component of the new name;
943
817
this doesn't change the directory.
947
## TODO: Option to move IDs only
948
assert not isinstance(from_paths, basestring)
949
tree = self.working_tree()
951
to_abs = self.abspath(to_name)
952
if not isdir(to_abs):
953
raise BzrError("destination %r is not a directory" % to_abs)
954
if not tree.has_filename(to_name):
955
raise BzrError("destination %r not in working directory" % to_abs)
956
to_dir_id = inv.path2id(to_name)
957
if to_dir_id == None and to_name != '':
958
raise BzrError("destination %r is not a versioned directory" % to_name)
959
to_dir_ie = inv[to_dir_id]
960
if to_dir_ie.kind not in ('directory', 'root_directory'):
961
raise BzrError("destination %r is not a directory" % to_abs)
963
to_idpath = inv.get_idpath(to_dir_id)
966
if not tree.has_filename(f):
967
raise BzrError("%r does not exist in working tree" % f)
968
f_id = inv.path2id(f)
970
raise BzrError("%r is not versioned" % f)
971
name_tail = splitpath(f)[-1]
972
dest_path = appendpath(to_name, name_tail)
973
if tree.has_filename(dest_path):
974
raise BzrError("destination %r already exists" % dest_path)
975
if f_id in to_idpath:
976
raise BzrError("can't move %r to a subdirectory of itself" % f)
978
# OK, so there's a race here, it's possible that someone will
979
# create a file in this interval and then the rename might be
980
# left half-done. But we should have caught most problems.
983
name_tail = splitpath(f)[-1]
984
dest_path = appendpath(to_name, name_tail)
985
print "%s => %s" % (f, dest_path)
986
inv.rename(inv.path2id(f), to_dir_id, name_tail)
988
os.rename(self.abspath(f), self.abspath(dest_path))
990
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
991
["rename rolled back"])
993
self._write_inventory(inv)
998
def revert(self, filenames, old_tree=None, backups=True):
999
"""Restore selected files to the versions from a previous tree.
1002
If true (default) backups are made of files before
1005
from bzrlib.errors import NotVersionedError, BzrError
1006
from bzrlib.atomicfile import AtomicFile
1007
from bzrlib.osutils import backup_file
819
## TODO: Option to move IDs only
820
assert not isinstance(from_paths, basestring)
821
tree = self.working_tree()
823
to_abs = self.abspath(to_name)
824
if not isdir(to_abs):
825
bailout("destination %r is not a directory" % to_abs)
826
if not tree.has_filename(to_name):
827
bailout("destination %r not in working directory" % to_abs)
828
to_dir_id = inv.path2id(to_name)
829
if to_dir_id == None and to_name != '':
830
bailout("destination %r is not a versioned directory" % to_name)
831
to_dir_ie = inv[to_dir_id]
832
if to_dir_ie.kind not in ('directory', 'root_directory'):
833
bailout("destination %r is not a directory" % to_abs)
835
to_idpath = Set(inv.get_idpath(to_dir_id))
838
if not tree.has_filename(f):
839
bailout("%r does not exist in working tree" % f)
840
f_id = inv.path2id(f)
842
bailout("%r is not versioned" % f)
843
name_tail = splitpath(f)[-1]
844
dest_path = appendpath(to_name, name_tail)
845
if tree.has_filename(dest_path):
846
bailout("destination %r already exists" % dest_path)
847
if f_id in to_idpath:
848
bailout("can't move %r to a subdirectory of itself" % f)
850
# OK, so there's a race here, it's possible that someone will
851
# create a file in this interval and then the rename might be
852
# left half-done. But we should have caught most problems.
855
name_tail = splitpath(f)[-1]
856
dest_path = appendpath(to_name, name_tail)
857
print "%s => %s" % (f, dest_path)
858
inv.rename(inv.path2id(f), to_dir_id, name_tail)
860
os.rename(self.abspath(f), self.abspath(dest_path))
862
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
863
["rename rolled back"])
865
self._write_inventory(inv)
869
def show_status(self, show_all=False):
870
"""Display single-line status for non-ignored working files.
872
The list is show sorted in order by file name.
874
>>> b = ScratchBranch(files=['foo', 'foo~'])
880
>>> b.commit("add foo")
882
>>> os.unlink(b.abspath('foo'))
1009
inv = self.read_working_inventory()
1010
if old_tree is None:
1011
old_tree = self.basis_tree()
1012
old_inv = old_tree.inventory
1015
for fn in filenames:
1016
file_id = inv.path2id(fn)
1018
raise NotVersionedError("not a versioned file", fn)
1019
if not old_inv.has_id(file_id):
1020
raise BzrError("file not present in old tree", fn, file_id)
1021
nids.append((fn, file_id))
1023
# TODO: Rename back if it was previously at a different location
1025
# TODO: If given a directory, restore the entire contents from
1026
# the previous version.
1028
# TODO: Make a backup to a temporary file.
1030
# TODO: If the file previously didn't exist, delete it?
1031
for fn, file_id in nids:
1034
f = AtomicFile(fn, 'wb')
1036
f.write(old_tree.get_file(file_id).read())
886
TODO: Get state for single files.
889
# We have to build everything into a list first so that it can
890
# sorted by name, incorporating all the different sources.
892
# FIXME: Rather than getting things in random order and then sorting,
893
# just step through in order.
895
# Interesting case: the old ID for a file has been removed,
896
# but a new file has been created under that name.
898
old = self.basis_tree()
899
new = self.working_tree()
901
for fs, fid, oldname, newname, kind in diff_trees(old, new):
903
show_status(fs, kind,
904
oldname + ' => ' + newname)
905
elif fs == 'A' or fs == 'M':
906
show_status(fs, kind, newname)
908
show_status(fs, kind, oldname)
911
show_status(fs, kind, newname)
914
show_status(fs, kind, newname)
916
show_status(fs, kind, newname)
918
bailout("weird file state %r" % ((fs, fid),))
1043
922
class ScratchBranch(Branch):