23
25
from inventory import Inventory
24
26
from trace import mutter, note
25
from tree import Tree, EmptyTree, RevisionTree
27
from tree import Tree, EmptyTree, RevisionTree, WorkingTree
26
28
from inventory import InventoryEntry, Inventory
27
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, chomp, \
28
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
29
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
30
32
from store import ImmutableStore
31
33
from revision import Revision
32
from errors import BzrError
34
from errors import bailout, BzrError
33
35
from textui import show_status
36
from diff import diff_trees
35
38
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
36
39
## TODO: Maybe include checks for common corruption of newlines, etc?
40
def find_branch(f, **args):
41
if f and (f.startswith('http://') or f.startswith('https://')):
43
return remotebranch.RemoteBranch(f, **args)
45
return Branch(f, **args)
49
def with_writelock(method):
50
"""Method decorator for functions run with the branch locked."""
52
# called with self set to the branch
55
return method(self, *a, **k)
61
def with_readlock(method):
65
return method(self, *a, **k)
71
43
def find_branch_root(f=None):
72
44
"""Find the branch root enclosing f, or pwd.
74
f may be a filename or a URL.
76
46
It is not necessary that f exists.
78
48
Basically we keep looking up until we find the control directory or
350
260
TODO: Adding a directory should optionally recurse down and
351
261
add all non-ignored children. Perhaps do that in a
352
262
higher-level method.
264
>>> b = ScratchBranch(files=['foo'])
265
>>> 'foo' in b.unknowns()
270
>>> 'foo' in b.unknowns()
272
>>> bool(b.inventory.path2id('foo'))
278
Traceback (most recent call last):
280
BzrError: ('foo is already versioned', [])
282
>>> b.add(['nothere'])
283
Traceback (most recent call last):
284
BzrError: ('cannot add: not a regular file or directory: nothere', [])
354
287
# TODO: Re-adding a file that is removed in the working copy
355
288
# should probably put it back with the previous ID.
356
289
if isinstance(files, types.StringTypes):
357
assert(ids is None or isinstance(ids, types.StringTypes))
363
ids = [None] * len(files)
365
assert(len(ids) == len(files))
367
292
inv = self.read_working_inventory()
368
for f,file_id in zip(files, ids):
369
294
if is_control_file(f):
370
raise BzrError("cannot add control file %s" % quotefn(f))
295
bailout("cannot add control file %s" % quotefn(f))
372
297
fp = splitpath(f)
375
raise BzrError("cannot add top-level %r" % f)
300
bailout("cannot add top-level %r" % f)
377
302
fullpath = os.path.normpath(self.abspath(f))
380
305
kind = file_kind(fullpath)
382
307
# maybe something better?
383
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
308
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
385
310
if kind != 'file' and kind != 'directory':
386
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
311
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
389
file_id = gen_file_id(f)
313
file_id = gen_file_id(f)
390
314
inv.add_path(f, kind=kind, file_id=file_id)
393
317
show_status('A', kind, quotefn(f))
395
319
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
397
321
self._write_inventory(inv)
400
324
def print_file(self, file, revno):
401
325
"""Print `file` to stdout."""
477
413
return self.working_tree().unknowns()
416
def commit(self, message, timestamp=None, timezone=None,
419
"""Commit working copy as a new revision.
421
The basic approach is to add all the file texts into the
422
store, then the inventory, then make a new revision pointing
423
to that inventory and store that.
425
This is not quite safe if the working copy changes during the
426
commit; for the moment that is simply not allowed. A better
427
approach is to make a temporary copy of the files before
428
computing their hashes, and then add those hashes in turn to
429
the inventory. This should mean at least that there are no
430
broken hash pointers. There is no way we can get a snapshot
431
of the whole directory at an instant. This would also have to
432
be robust against files disappearing, moving, etc. So the
433
whole thing is a bit hard.
435
timestamp -- if not None, seconds-since-epoch for a
436
postdated/predated commit.
439
## TODO: Show branch names
441
# TODO: Don't commit if there are no changes, unless forced?
443
# First walk over the working inventory; and both update that
444
# and also build a new revision inventory. The revision
445
# inventory needs to hold the text-id, sha1 and size of the
446
# actual file versions committed in the revision. (These are
447
# not present in the working inventory.) We also need to
448
# detect missing/deleted files, and remove them from the
451
work_inv = self.read_working_inventory()
453
basis = self.basis_tree()
454
basis_inv = basis.inventory
456
for path, entry in work_inv.iter_entries():
457
## TODO: Cope with files that have gone missing.
459
## TODO: Check that the file kind has not changed from the previous
460
## revision of this file (if any).
464
p = self.abspath(path)
465
file_id = entry.file_id
466
mutter('commit prep file %s, id %r ' % (p, file_id))
468
if not os.path.exists(p):
469
mutter(" file is missing, removing from inventory")
471
show_status('D', entry.kind, quotefn(path))
472
missing_ids.append(file_id)
475
# TODO: Handle files that have been deleted
477
# TODO: Maybe a special case for empty files? Seems a
478
# waste to store them many times.
482
if basis_inv.has_id(file_id):
483
old_kind = basis_inv[file_id].kind
484
if old_kind != entry.kind:
485
bailout("entry %r changed kind from %r to %r"
486
% (file_id, old_kind, entry.kind))
488
if entry.kind == 'directory':
490
bailout("%s is entered as directory but not a directory" % quotefn(p))
491
elif entry.kind == 'file':
493
bailout("%s is entered as file but is not a file" % quotefn(p))
495
content = file(p, 'rb').read()
497
entry.text_sha1 = sha_string(content)
498
entry.text_size = len(content)
500
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
502
and (old_ie.text_size == entry.text_size)
503
and (old_ie.text_sha1 == entry.text_sha1)):
504
## assert content == basis.get_file(file_id).read()
505
entry.text_id = basis_inv[file_id].text_id
506
mutter(' unchanged from previous text_id {%s}' %
510
entry.text_id = gen_file_id(entry.name)
511
self.text_store.add(content, entry.text_id)
512
mutter(' stored with text_id {%s}' % entry.text_id)
516
elif (old_ie.name == entry.name
517
and old_ie.parent_id == entry.parent_id):
522
show_status(state, entry.kind, quotefn(path))
524
for file_id in missing_ids:
525
# have to do this later so we don't mess up the iterator.
526
# since parents may be removed before their children we
529
# FIXME: There's probably a better way to do this; perhaps
530
# the workingtree should know how to filter itself.
531
if work_inv.has_id(file_id):
532
del work_inv[file_id]
535
inv_id = rev_id = _gen_revision_id(time.time())
537
inv_tmp = tempfile.TemporaryFile()
538
inv.write_xml(inv_tmp)
540
self.inventory_store.add(inv_tmp, inv_id)
541
mutter('new inventory_id is {%s}' % inv_id)
543
self._write_inventory(work_inv)
545
if timestamp == None:
546
timestamp = time.time()
548
if committer == None:
549
committer = username()
552
timezone = local_time_offset()
554
mutter("building commit log message")
555
rev = Revision(timestamp=timestamp,
558
precursor = self.last_patch(),
563
rev_tmp = tempfile.TemporaryFile()
564
rev.write_xml(rev_tmp)
566
self.revision_store.add(rev_tmp, rev_id)
567
mutter("new revision_id is {%s}" % rev_id)
569
## XXX: Everything up to here can simply be orphaned if we abort
570
## the commit; it will leave junk files behind but that doesn't
573
## TODO: Read back the just-generated changeset, and make sure it
574
## applies and recreates the right state.
576
## TODO: Also calculate and store the inventory SHA1
577
mutter("committing patch r%d" % (self.revno() + 1))
580
self.append_revision(rev_id)
583
note("commited r%d" % self.revno())
480
586
def append_revision(self, revision_id):
481
587
mutter("add {%s} to revision-history" % revision_id)
482
588
rev_history = self.revision_history()
721
def write_log(self, show_timezone='original', verbose=False):
722
"""Write out human-readable log of commits to this branch
724
utc -- If true, show dates in universal time, not local time."""
725
## TODO: Option to choose either original, utc or local timezone
728
for p in self.revision_history():
730
print 'revno:', revno
731
## TODO: Show hash if --id is given.
732
##print 'revision-hash:', p
733
rev = self.get_revision(p)
734
print 'committer:', rev.committer
735
print 'timestamp: %s' % (format_date(rev.timestamp, rev.timezone or 0,
738
## opportunistic consistency check, same as check_patch_chaining
739
if rev.precursor != precursor:
740
bailout("mismatched precursor!")
744
print ' (no message)'
746
for l in rev.message.split('\n'):
749
if verbose == True and precursor != None:
750
print 'changed files:'
751
tree = self.revision_tree(p)
752
prevtree = self.revision_tree(precursor)
754
for file_state, fid, old_name, new_name, kind in \
755
diff_trees(prevtree, tree, ):
756
if file_state == 'A' or file_state == 'M':
757
show_status(file_state, kind, new_name)
758
elif file_state == 'D':
759
show_status(file_state, kind, old_name)
760
elif file_state == 'R':
761
show_status(file_state, kind,
762
old_name + ' => ' + new_name)
627
768
def rename_one(self, from_rel, to_rel):
630
This can change the directory or the filename or both.
632
769
tree = self.working_tree()
633
770
inv = tree.inventory
634
771
if not tree.has_filename(from_rel):
635
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
772
bailout("can't rename: old working file %r does not exist" % from_rel)
636
773
if tree.has_filename(to_rel):
637
raise BzrError("can't rename: new working file %r already exists" % to_rel)
774
bailout("can't rename: new working file %r already exists" % to_rel)
639
776
file_id = inv.path2id(from_rel)
640
777
if file_id == None:
641
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
778
bailout("can't rename: old name %r is not versioned" % from_rel)
643
780
if inv.path2id(to_rel):
644
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
781
bailout("can't rename: new name %r is already versioned" % to_rel)
646
783
to_dir, to_tail = os.path.split(to_rel)
647
784
to_dir_id = inv.path2id(to_dir)
648
785
if to_dir_id == None and to_dir != '':
649
raise BzrError("can't determine destination directory id for %r" % to_dir)
786
bailout("can't determine destination directory id for %r" % to_dir)
651
788
mutter("rename_one:")
652
789
mutter(" file_id {%s}" % file_id)
690
826
inv = tree.inventory
691
827
to_abs = self.abspath(to_name)
692
828
if not isdir(to_abs):
693
raise BzrError("destination %r is not a directory" % to_abs)
829
bailout("destination %r is not a directory" % to_abs)
694
830
if not tree.has_filename(to_name):
695
raise BzrError("destination %r not in working directory" % to_abs)
831
bailout("destination %r not in working directory" % to_abs)
696
832
to_dir_id = inv.path2id(to_name)
697
833
if to_dir_id == None and to_name != '':
698
raise BzrError("destination %r is not a versioned directory" % to_name)
834
bailout("destination %r is not a versioned directory" % to_name)
699
835
to_dir_ie = inv[to_dir_id]
700
836
if to_dir_ie.kind not in ('directory', 'root_directory'):
701
raise BzrError("destination %r is not a directory" % to_abs)
837
bailout("destination %r is not a directory" % to_abs)
703
to_idpath = inv.get_idpath(to_dir_id)
839
to_idpath = Set(inv.get_idpath(to_dir_id))
705
841
for f in from_paths:
706
842
if not tree.has_filename(f):
707
raise BzrError("%r does not exist in working tree" % f)
843
bailout("%r does not exist in working tree" % f)
708
844
f_id = inv.path2id(f)
710
raise BzrError("%r is not versioned" % f)
846
bailout("%r is not versioned" % f)
711
847
name_tail = splitpath(f)[-1]
712
848
dest_path = appendpath(to_name, name_tail)
713
849
if tree.has_filename(dest_path):
714
raise BzrError("destination %r already exists" % dest_path)
850
bailout("destination %r already exists" % dest_path)
715
851
if f_id in to_idpath:
716
raise BzrError("can't move %r to a subdirectory of itself" % f)
852
bailout("can't move %r to a subdirectory of itself" % f)
718
854
# OK, so there's a race here, it's possible that someone will
719
855
# create a file in this interval and then the rename might be
728
864
os.rename(self.abspath(f), self.abspath(dest_path))
729
865
except OSError, e:
730
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
866
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
731
867
["rename rolled back"])
733
869
self._write_inventory(inv)
873
def show_status(self, show_all=False):
874
"""Display single-line status for non-ignored working files.
876
The list is show sorted in order by file name.
878
>>> b = ScratchBranch(files=['foo', 'foo~'])
884
>>> b.commit("add foo")
886
>>> os.unlink(b.abspath('foo'))
891
TODO: Get state for single files.
893
TODO: Perhaps show a slash at the end of directory names.
897
# We have to build everything into a list first so that it can
898
# sorted by name, incorporating all the different sources.
900
# FIXME: Rather than getting things in random order and then sorting,
901
# just step through in order.
903
# Interesting case: the old ID for a file has been removed,
904
# but a new file has been created under that name.
906
old = self.basis_tree()
907
new = self.working_tree()
909
for fs, fid, oldname, newname, kind in diff_trees(old, new):
911
show_status(fs, kind,
912
oldname + ' => ' + newname)
913
elif fs == 'A' or fs == 'M':
914
show_status(fs, kind, newname)
916
show_status(fs, kind, oldname)
919
show_status(fs, kind, newname)
922
show_status(fs, kind, newname)
924
show_status(fs, kind, newname)
926
bailout("weird file state %r" % ((fs, fid),))
738
930
class ScratchBranch(Branch):
739
931
"""Special test class: a branch that cleans up after itself.