25
23
from inventory import Inventory
26
24
from trace import mutter, note
27
from tree import Tree, EmptyTree, RevisionTree, WorkingTree
25
from tree import Tree, EmptyTree, RevisionTree
28
26
from inventory import InventoryEntry, Inventory
29
27
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
30
28
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
31
29
joinpath, sha_string, file_kind, local_time_offset, appendpath
32
30
from store import ImmutableStore
33
31
from revision import Revision
34
from errors import bailout, BzrError
32
from errors import BzrError
35
33
from textui import show_status
36
from diff import diff_trees
38
35
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
39
36
## TODO: Maybe include checks for common corruption of newlines, etc?
40
def find_branch(f, **args):
41
if f and (f.startswith('http://') or f.startswith('https://')):
43
return remotebranch.RemoteBranch(f, **args)
45
return Branch(f, **args)
43
48
def find_branch_root(f=None):
44
49
"""Find the branch root enclosing f, or pwd.
51
f may be a filename or a URL.
46
53
It is not necessary that f exists.
48
55
Basically we keep looking up until we find the control directory or
119
139
__repr__ = __str__
123
def lock(self, mode='w'):
124
"""Lock the on-disk branch, excluding other processes."""
130
om = os.O_WRONLY | os.O_CREAT
135
raise BzrError("invalid locking mode %r" % mode)
138
lockfile = os.open(self.controlfilename('branch-lock'), om)
140
if e.errno == errno.ENOENT:
141
# might not exist on branches from <0.0.4
142
self.controlfile('branch-lock', 'w').close()
143
lockfile = os.open(self.controlfilename('branch-lock'), om)
147
fcntl.lockf(lockfile, lm)
149
fcntl.lockf(lockfile, fcntl.LOCK_UN)
151
self._lockmode = None
153
self._lockmode = mode
155
warning("please write a locking method for platform %r" % sys.platform)
157
self._lockmode = None
159
self._lockmode = mode
144
from warnings import warn
145
warn("branch %r was not explicitly unlocked" % self)
149
def lock(self, mode):
151
raise BzrError('branch %r is already locked: %r' % (self, self._lock_mode))
153
from bzrlib.lock import lock, LOCK_SH, LOCK_EX
159
raise ValueError('invalid lock mode %r' % mode)
161
lock(self._lockfile, m)
162
self._lock_mode = (mode, 1)
166
if not self._lock_mode:
167
raise BzrError('branch %r is not locked' % (self))
168
from bzrlib.lock import unlock
169
unlock(self._lockfile)
170
self._lock_mode = None
162
173
def _need_readlock(self):
163
if self._lockmode not in ['r', 'w']:
174
if not self._lock_mode:
164
175
raise BzrError('need read lock on branch, only have %r' % self._lockmode)
166
178
def _need_writelock(self):
167
if self._lockmode not in ['w']:
179
if (self._lock_mode == None) or (self._lock_mode[0] != 'w'):
168
180
raise BzrError('need write lock on branch, only have %r' % self._lockmode)
307
323
TODO: Adding a directory should optionally recurse down and
308
324
add all non-ignored children. Perhaps do that in a
309
325
higher-level method.
311
>>> b = ScratchBranch(files=['foo'])
312
>>> 'foo' in b.unknowns()
317
>>> 'foo' in b.unknowns()
319
>>> bool(b.inventory.path2id('foo'))
325
Traceback (most recent call last):
327
BzrError: ('foo is already versioned', [])
329
>>> b.add(['nothere'])
330
Traceback (most recent call last):
331
BzrError: ('cannot add: not a regular file or directory: nothere', [])
333
327
self._need_writelock()
335
329
# TODO: Re-adding a file that is removed in the working copy
336
330
# should probably put it back with the previous ID.
337
331
if isinstance(files, types.StringTypes):
332
assert(ids is None or isinstance(ids, types.StringTypes))
338
ids = [None] * len(files)
340
assert(len(ids) == len(files))
340
342
inv = self.read_working_inventory()
343
for f,file_id in zip(files, ids):
342
344
if is_control_file(f):
343
bailout("cannot add control file %s" % quotefn(f))
345
raise BzrError("cannot add control file %s" % quotefn(f))
345
347
fp = splitpath(f)
348
bailout("cannot add top-level %r" % f)
350
raise BzrError("cannot add top-level %r" % f)
350
352
fullpath = os.path.normpath(self.abspath(f))
463
453
return self.working_tree().unknowns()
466
def commit(self, message, timestamp=None, timezone=None,
469
"""Commit working copy as a new revision.
471
The basic approach is to add all the file texts into the
472
store, then the inventory, then make a new revision pointing
473
to that inventory and store that.
475
This is not quite safe if the working copy changes during the
476
commit; for the moment that is simply not allowed. A better
477
approach is to make a temporary copy of the files before
478
computing their hashes, and then add those hashes in turn to
479
the inventory. This should mean at least that there are no
480
broken hash pointers. There is no way we can get a snapshot
481
of the whole directory at an instant. This would also have to
482
be robust against files disappearing, moving, etc. So the
483
whole thing is a bit hard.
485
timestamp -- if not None, seconds-since-epoch for a
486
postdated/predated commit.
488
self._need_writelock()
490
## TODO: Show branch names
492
# TODO: Don't commit if there are no changes, unless forced?
494
# First walk over the working inventory; and both update that
495
# and also build a new revision inventory. The revision
496
# inventory needs to hold the text-id, sha1 and size of the
497
# actual file versions committed in the revision. (These are
498
# not present in the working inventory.) We also need to
499
# detect missing/deleted files, and remove them from the
502
work_inv = self.read_working_inventory()
504
basis = self.basis_tree()
505
basis_inv = basis.inventory
507
for path, entry in work_inv.iter_entries():
508
## TODO: Cope with files that have gone missing.
510
## TODO: Check that the file kind has not changed from the previous
511
## revision of this file (if any).
515
p = self.abspath(path)
516
file_id = entry.file_id
517
mutter('commit prep file %s, id %r ' % (p, file_id))
519
if not os.path.exists(p):
520
mutter(" file is missing, removing from inventory")
522
show_status('D', entry.kind, quotefn(path))
523
missing_ids.append(file_id)
526
# TODO: Handle files that have been deleted
528
# TODO: Maybe a special case for empty files? Seems a
529
# waste to store them many times.
533
if basis_inv.has_id(file_id):
534
old_kind = basis_inv[file_id].kind
535
if old_kind != entry.kind:
536
bailout("entry %r changed kind from %r to %r"
537
% (file_id, old_kind, entry.kind))
539
if entry.kind == 'directory':
541
bailout("%s is entered as directory but not a directory" % quotefn(p))
542
elif entry.kind == 'file':
544
bailout("%s is entered as file but is not a file" % quotefn(p))
546
content = file(p, 'rb').read()
548
entry.text_sha1 = sha_string(content)
549
entry.text_size = len(content)
551
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
553
and (old_ie.text_size == entry.text_size)
554
and (old_ie.text_sha1 == entry.text_sha1)):
555
## assert content == basis.get_file(file_id).read()
556
entry.text_id = basis_inv[file_id].text_id
557
mutter(' unchanged from previous text_id {%s}' %
561
entry.text_id = gen_file_id(entry.name)
562
self.text_store.add(content, entry.text_id)
563
mutter(' stored with text_id {%s}' % entry.text_id)
567
elif (old_ie.name == entry.name
568
and old_ie.parent_id == entry.parent_id):
573
show_status(state, entry.kind, quotefn(path))
575
for file_id in missing_ids:
576
# have to do this later so we don't mess up the iterator.
577
# since parents may be removed before their children we
580
# FIXME: There's probably a better way to do this; perhaps
581
# the workingtree should know how to filter itself.
582
if work_inv.has_id(file_id):
583
del work_inv[file_id]
586
inv_id = rev_id = _gen_revision_id(time.time())
588
inv_tmp = tempfile.TemporaryFile()
589
inv.write_xml(inv_tmp)
591
self.inventory_store.add(inv_tmp, inv_id)
592
mutter('new inventory_id is {%s}' % inv_id)
594
self._write_inventory(work_inv)
596
if timestamp == None:
597
timestamp = time.time()
599
if committer == None:
600
committer = username()
603
timezone = local_time_offset()
605
mutter("building commit log message")
606
rev = Revision(timestamp=timestamp,
609
precursor = self.last_patch(),
614
rev_tmp = tempfile.TemporaryFile()
615
rev.write_xml(rev_tmp)
617
self.revision_store.add(rev_tmp, rev_id)
618
mutter("new revision_id is {%s}" % rev_id)
620
## XXX: Everything up to here can simply be orphaned if we abort
621
## the commit; it will leave junk files behind but that doesn't
624
## TODO: Read back the just-generated changeset, and make sure it
625
## applies and recreates the right state.
627
## TODO: Also calculate and store the inventory SHA1
628
mutter("committing patch r%d" % (self.revno() + 1))
631
self.append_revision(rev_id)
634
note("commited r%d" % self.revno())
637
456
def append_revision(self, revision_id):
638
457
mutter("add {%s} to revision-history" % revision_id)
639
458
rev_history = self.revision_history()
804
612
tree = self.working_tree()
805
613
inv = tree.inventory
806
614
if not tree.has_filename(from_rel):
807
bailout("can't rename: old working file %r does not exist" % from_rel)
615
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
808
616
if tree.has_filename(to_rel):
809
bailout("can't rename: new working file %r already exists" % to_rel)
617
raise BzrError("can't rename: new working file %r already exists" % to_rel)
811
619
file_id = inv.path2id(from_rel)
812
620
if file_id == None:
813
bailout("can't rename: old name %r is not versioned" % from_rel)
621
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
815
623
if inv.path2id(to_rel):
816
bailout("can't rename: new name %r is already versioned" % to_rel)
624
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
818
626
to_dir, to_tail = os.path.split(to_rel)
819
627
to_dir_id = inv.path2id(to_dir)
820
628
if to_dir_id == None and to_dir != '':
821
bailout("can't determine destination directory id for %r" % to_dir)
629
raise BzrError("can't determine destination directory id for %r" % to_dir)
823
631
mutter("rename_one:")
824
632
mutter(" file_id {%s}" % file_id)
862
670
inv = tree.inventory
863
671
to_abs = self.abspath(to_name)
864
672
if not isdir(to_abs):
865
bailout("destination %r is not a directory" % to_abs)
673
raise BzrError("destination %r is not a directory" % to_abs)
866
674
if not tree.has_filename(to_name):
867
bailout("destination %r not in working directory" % to_abs)
675
raise BzrError("destination %r not in working directory" % to_abs)
868
676
to_dir_id = inv.path2id(to_name)
869
677
if to_dir_id == None and to_name != '':
870
bailout("destination %r is not a versioned directory" % to_name)
678
raise BzrError("destination %r is not a versioned directory" % to_name)
871
679
to_dir_ie = inv[to_dir_id]
872
680
if to_dir_ie.kind not in ('directory', 'root_directory'):
873
bailout("destination %r is not a directory" % to_abs)
681
raise BzrError("destination %r is not a directory" % to_abs)
875
to_idpath = Set(inv.get_idpath(to_dir_id))
683
to_idpath = inv.get_idpath(to_dir_id)
877
685
for f in from_paths:
878
686
if not tree.has_filename(f):
879
bailout("%r does not exist in working tree" % f)
687
raise BzrError("%r does not exist in working tree" % f)
880
688
f_id = inv.path2id(f)
882
bailout("%r is not versioned" % f)
690
raise BzrError("%r is not versioned" % f)
883
691
name_tail = splitpath(f)[-1]
884
692
dest_path = appendpath(to_name, name_tail)
885
693
if tree.has_filename(dest_path):
886
bailout("destination %r already exists" % dest_path)
694
raise BzrError("destination %r already exists" % dest_path)
887
695
if f_id in to_idpath:
888
bailout("can't move %r to a subdirectory of itself" % f)
696
raise BzrError("can't move %r to a subdirectory of itself" % f)
890
698
# OK, so there's a race here, it's possible that someone will
891
699
# create a file in this interval and then the rename might be
900
708
os.rename(self.abspath(f), self.abspath(dest_path))
901
709
except OSError, e:
902
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
710
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
903
711
["rename rolled back"])
905
713
self._write_inventory(inv)
909
def show_status(self, show_all=False, file_list=None):
910
"""Display single-line status for non-ignored working files.
912
The list is show sorted in order by file name.
914
>>> b = ScratchBranch(files=['foo', 'foo~'])
920
>>> b.commit("add foo")
922
>>> os.unlink(b.abspath('foo'))
926
self._need_readlock()
928
# We have to build everything into a list first so that it can
929
# sorted by name, incorporating all the different sources.
931
# FIXME: Rather than getting things in random order and then sorting,
932
# just step through in order.
934
# Interesting case: the old ID for a file has been removed,
935
# but a new file has been created under that name.
937
old = self.basis_tree()
938
new = self.working_tree()
940
items = diff_trees(old, new)
941
# We want to filter out only if any file was provided in the file_list.
942
if isinstance(file_list, list) and len(file_list):
943
items = [item for item in items if item[3] in file_list]
945
for fs, fid, oldname, newname, kind in items:
947
show_status(fs, kind,
948
oldname + ' => ' + newname)
949
elif fs == 'A' or fs == 'M':
950
show_status(fs, kind, newname)
952
show_status(fs, kind, oldname)
955
show_status(fs, kind, newname)
958
show_status(fs, kind, newname)
960
show_status(fs, kind, newname)
962
bailout("weird file state %r" % ((fs, fid),))
966
718
class ScratchBranch(Branch):
967
719
"""Special test class: a branch that cleans up after itself.