175
131
__repr__ = __str__
179
if self._lock_mode or self._lock:
180
from warnings import warn
181
warn("branch %r was not explicitly unlocked" % self)
186
def lock_write(self):
188
if self._lock_mode != 'w':
189
from errors import LockError
190
raise LockError("can't upgrade to a write lock from %r" %
192
self._lock_count += 1
194
from bzrlib.lock import WriteLock
196
self._lock = WriteLock(self.controlfilename('branch-lock'))
197
self._lock_mode = 'w'
204
assert self._lock_mode in ('r', 'w'), \
205
"invalid lock mode %r" % self._lock_mode
206
self._lock_count += 1
208
from bzrlib.lock import ReadLock
210
self._lock = ReadLock(self.controlfilename('branch-lock'))
211
self._lock_mode = 'r'
135
def lock(self, mode='w'):
136
"""Lock the on-disk branch, excluding other processes."""
142
om = os.O_WRONLY | os.O_CREAT
147
raise BzrError("invalid locking mode %r" % mode)
150
lockfile = os.open(self.controlfilename('branch-lock'), om)
152
if e.errno == errno.ENOENT:
153
# might not exist on branches from <0.0.4
154
self.controlfile('branch-lock', 'w').close()
155
lockfile = os.open(self.controlfilename('branch-lock'), om)
217
if not self._lock_mode:
218
from errors import LockError
219
raise LockError('branch %r is not locked' % (self))
221
if self._lock_count > 1:
222
self._lock_count -= 1
226
self._lock_mode = self._lock_count = None
159
fcntl.lockf(lockfile, lm)
161
fcntl.lockf(lockfile, fcntl.LOCK_UN)
163
self._lockmode = None
165
self._lockmode = mode
167
warning("please write a locking method for platform %r" % sys.platform)
169
self._lockmode = None
171
self._lockmode = mode
174
def _need_readlock(self):
175
if self._lockmode not in ['r', 'w']:
176
raise BzrError('need read lock on branch, only have %r' % self._lockmode)
178
def _need_writelock(self):
179
if self._lockmode not in ['w']:
180
raise BzrError('need write lock on branch, only have %r' % self._lockmode)
229
183
def abspath(self, name):
302
262
fmt = self.controlfile('branch-format', 'r').read()
303
263
fmt.replace('\r\n', '')
304
264
if fmt != BZR_BRANCH_FORMAT:
305
raise BzrError('sorry, branch format %r not supported' % fmt,
306
['use a different bzr version',
307
'or remove the .bzr directory and "bzr init" again'])
265
bailout('sorry, branch format %r not supported' % fmt,
266
['use a different bzr version',
267
'or remove the .bzr directory and "bzr init" again'])
311
270
def read_working_inventory(self):
312
271
"""Read the working inventory."""
272
self._need_readlock()
313
273
before = time.time()
314
274
# ElementTree does its own conversion from UTF-8, so open in
318
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
319
mutter("loaded inventory of %d items in %f"
320
% (len(inv), time.time() - before))
276
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
277
mutter("loaded inventory of %d items in %f"
278
% (len(inv), time.time() - before))
326
282
def _write_inventory(self, inv):
327
283
"""Update the working inventory.
354
311
This puts the files in the Added state, so that they will be
355
312
recorded by the next commit.
358
List of paths to add, relative to the base of the tree.
361
If set, use these instead of automatically generated ids.
362
Must be the same length as the list of files, but may
363
contain None for ids that are to be autogenerated.
365
314
TODO: Perhaps have an option to add the ids even if the files do
368
317
TODO: Perhaps return the ids of the files? But then again it
369
is easy to retrieve them if they're needed.
318
is easy to retrieve them if they're needed.
320
TODO: Option to specify file id.
371
322
TODO: Adding a directory should optionally recurse down and
372
add all non-ignored children. Perhaps do that in a
323
add all non-ignored children. Perhaps do that in a
326
>>> b = ScratchBranch(files=['foo'])
327
>>> 'foo' in b.unknowns()
332
>>> 'foo' in b.unknowns()
334
>>> bool(b.inventory.path2id('foo'))
340
Traceback (most recent call last):
342
BzrError: ('foo is already versioned', [])
344
>>> b.add(['nothere'])
345
Traceback (most recent call last):
346
BzrError: ('cannot add: not a regular file or directory: nothere', [])
348
self._need_writelock()
375
350
# TODO: Re-adding a file that is removed in the working copy
376
351
# should probably put it back with the previous ID.
377
352
if isinstance(files, types.StringTypes):
378
assert(ids is None or isinstance(ids, types.StringTypes))
384
ids = [None] * len(files)
386
assert(len(ids) == len(files))
390
inv = self.read_working_inventory()
391
for f,file_id in zip(files, ids):
392
if is_control_file(f):
393
raise BzrError("cannot add control file %s" % quotefn(f))
398
raise BzrError("cannot add top-level %r" % f)
400
fullpath = os.path.normpath(self.abspath(f))
403
kind = file_kind(fullpath)
405
# maybe something better?
406
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
408
if kind != 'file' and kind != 'directory':
409
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
412
file_id = gen_file_id(f)
413
inv.add_path(f, kind=kind, file_id=file_id)
416
show_status('A', kind, quotefn(f))
418
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
420
self._write_inventory(inv)
355
inv = self.read_working_inventory()
357
if is_control_file(f):
358
bailout("cannot add control file %s" % quotefn(f))
363
bailout("cannot add top-level %r" % f)
365
fullpath = os.path.normpath(self.abspath(f))
368
kind = file_kind(fullpath)
370
# maybe something better?
371
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
373
if kind != 'file' and kind != 'directory':
374
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
376
file_id = gen_file_id(f)
377
inv.add_path(f, kind=kind, file_id=file_id)
380
show_status('A', kind, quotefn(f))
382
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
384
self._write_inventory(inv)
425
387
def print_file(self, file, revno):
426
388
"""Print `file` to stdout."""
429
tree = self.revision_tree(self.lookup_revision(revno))
430
# use inventory as it was in that revision
431
file_id = tree.inventory.path2id(file)
433
raise BzrError("%r is not present in revision %d" % (file, revno))
434
tree.print_file(file_id)
389
self._need_readlock()
390
tree = self.revision_tree(self.lookup_revision(revno))
391
# use inventory as it was in that revision
392
file_id = tree.inventory.path2id(file)
394
bailout("%r is not present in revision %d" % (file, revno))
395
tree.print_file(file_id)
439
398
def remove(self, files, verbose=False):
440
399
"""Mark nominated files for removal from the inventory.
453
434
## TODO: Normalize names
454
435
## TODO: Remove nested loops; better scalability
436
self._need_writelock()
455
438
if isinstance(files, types.StringTypes):
461
tree = self.working_tree()
464
# do this before any modifications
468
raise BzrError("cannot remove unversioned file %s" % quotefn(f))
469
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
471
# having remove it, it must be either ignored or unknown
472
if tree.is_ignored(f):
476
show_status(new_status, inv[fid].kind, quotefn(f))
479
self._write_inventory(inv)
484
# FIXME: this doesn't need to be a branch method
485
def set_inventory(self, new_inventory_list):
487
for path, file_id, parent, kind in new_inventory_list:
488
name = os.path.basename(path)
491
inv.add(InventoryEntry(file_id, name, kind, parent))
441
tree = self.working_tree()
444
# do this before any modifications
448
bailout("cannot remove unversioned file %s" % quotefn(f))
449
mutter("remove inventory entry %s {%s}" % (quotefn(f), fid))
451
# having remove it, it must be either ignored or unknown
452
if tree.is_ignored(f):
456
show_status(new_status, inv[fid].kind, quotefn(f))
492
459
self._write_inventory(inv)
511
478
return self.working_tree().unknowns()
481
def commit(self, message, timestamp=None, timezone=None,
484
"""Commit working copy as a new revision.
486
The basic approach is to add all the file texts into the
487
store, then the inventory, then make a new revision pointing
488
to that inventory and store that.
490
This is not quite safe if the working copy changes during the
491
commit; for the moment that is simply not allowed. A better
492
approach is to make a temporary copy of the files before
493
computing their hashes, and then add those hashes in turn to
494
the inventory. This should mean at least that there are no
495
broken hash pointers. There is no way we can get a snapshot
496
of the whole directory at an instant. This would also have to
497
be robust against files disappearing, moving, etc. So the
498
whole thing is a bit hard.
500
timestamp -- if not None, seconds-since-epoch for a
501
postdated/predated commit.
503
self._need_writelock()
505
## TODO: Show branch names
507
# TODO: Don't commit if there are no changes, unless forced?
509
# First walk over the working inventory; and both update that
510
# and also build a new revision inventory. The revision
511
# inventory needs to hold the text-id, sha1 and size of the
512
# actual file versions committed in the revision. (These are
513
# not present in the working inventory.) We also need to
514
# detect missing/deleted files, and remove them from the
517
work_inv = self.read_working_inventory()
519
basis = self.basis_tree()
520
basis_inv = basis.inventory
522
for path, entry in work_inv.iter_entries():
523
## TODO: Cope with files that have gone missing.
525
## TODO: Check that the file kind has not changed from the previous
526
## revision of this file (if any).
530
p = self.abspath(path)
531
file_id = entry.file_id
532
mutter('commit prep file %s, id %r ' % (p, file_id))
534
if not os.path.exists(p):
535
mutter(" file is missing, removing from inventory")
537
show_status('D', entry.kind, quotefn(path))
538
missing_ids.append(file_id)
541
# TODO: Handle files that have been deleted
543
# TODO: Maybe a special case for empty files? Seems a
544
# waste to store them many times.
548
if basis_inv.has_id(file_id):
549
old_kind = basis_inv[file_id].kind
550
if old_kind != entry.kind:
551
bailout("entry %r changed kind from %r to %r"
552
% (file_id, old_kind, entry.kind))
554
if entry.kind == 'directory':
556
bailout("%s is entered as directory but not a directory" % quotefn(p))
557
elif entry.kind == 'file':
559
bailout("%s is entered as file but is not a file" % quotefn(p))
561
content = file(p, 'rb').read()
563
entry.text_sha1 = sha_string(content)
564
entry.text_size = len(content)
566
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
568
and (old_ie.text_size == entry.text_size)
569
and (old_ie.text_sha1 == entry.text_sha1)):
570
## assert content == basis.get_file(file_id).read()
571
entry.text_id = basis_inv[file_id].text_id
572
mutter(' unchanged from previous text_id {%s}' %
576
entry.text_id = gen_file_id(entry.name)
577
self.text_store.add(content, entry.text_id)
578
mutter(' stored with text_id {%s}' % entry.text_id)
582
elif (old_ie.name == entry.name
583
and old_ie.parent_id == entry.parent_id):
588
show_status(state, entry.kind, quotefn(path))
590
for file_id in missing_ids:
591
# have to do this later so we don't mess up the iterator.
592
# since parents may be removed before their children we
595
# FIXME: There's probably a better way to do this; perhaps
596
# the workingtree should know how to filter itself.
597
if work_inv.has_id(file_id):
598
del work_inv[file_id]
601
inv_id = rev_id = _gen_revision_id(time.time())
603
inv_tmp = tempfile.TemporaryFile()
604
inv.write_xml(inv_tmp)
606
self.inventory_store.add(inv_tmp, inv_id)
607
mutter('new inventory_id is {%s}' % inv_id)
609
self._write_inventory(work_inv)
611
if timestamp == None:
612
timestamp = time.time()
614
if committer == None:
615
committer = username()
618
timezone = local_time_offset()
620
mutter("building commit log message")
621
rev = Revision(timestamp=timestamp,
624
precursor = self.last_patch(),
629
rev_tmp = tempfile.TemporaryFile()
630
rev.write_xml(rev_tmp)
632
self.revision_store.add(rev_tmp, rev_id)
633
mutter("new revision_id is {%s}" % rev_id)
635
## XXX: Everything up to here can simply be orphaned if we abort
636
## the commit; it will leave junk files behind but that doesn't
639
## TODO: Read back the just-generated changeset, and make sure it
640
## applies and recreates the right state.
642
## TODO: Also calculate and store the inventory SHA1
643
mutter("committing patch r%d" % (self.revno() + 1))
646
self.append_revision(rev_id)
649
note("commited r%d" % self.revno())
514
652
def append_revision(self, revision_id):
515
653
mutter("add {%s} to revision-history" % revision_id)
516
654
rev_history = self.revision_history()
561
702
>>> ScratchBranch().revision_history()
566
return [l.rstrip('\r\n') for l in
567
self.controlfile('revision-history', 'r').readlines()]
572
def common_ancestor(self, other, self_revno=None, other_revno=None):
575
>>> sb = ScratchBranch(files=['foo', 'foo~'])
576
>>> sb.common_ancestor(sb) == (None, None)
578
>>> commit.commit(sb, "Committing first revision", verbose=False)
579
>>> sb.common_ancestor(sb)[0]
581
>>> clone = sb.clone()
582
>>> commit.commit(sb, "Committing second revision", verbose=False)
583
>>> sb.common_ancestor(sb)[0]
585
>>> sb.common_ancestor(clone)[0]
587
>>> commit.commit(clone, "Committing divergent second revision",
589
>>> sb.common_ancestor(clone)[0]
591
>>> sb.common_ancestor(clone) == clone.common_ancestor(sb)
593
>>> sb.common_ancestor(sb) != clone.common_ancestor(clone)
595
>>> clone2 = sb.clone()
596
>>> sb.common_ancestor(clone2)[0]
598
>>> sb.common_ancestor(clone2, self_revno=1)[0]
600
>>> sb.common_ancestor(clone2, other_revno=1)[0]
603
my_history = self.revision_history()
604
other_history = other.revision_history()
605
if self_revno is None:
606
self_revno = len(my_history)
607
if other_revno is None:
608
other_revno = len(other_history)
609
indices = range(min((self_revno, other_revno)))
612
if my_history[r] == other_history[r]:
613
return r+1, my_history[r]
705
self._need_readlock()
706
return [l.rstrip('\r\n') for l in self.controlfile('revision-history', 'r').readlines()]
616
709
def enum_history(self, direction):
617
710
"""Return (revno, revision_id) for history of branch.
641
734
That is equivalent to the number of revisions committed to
737
>>> b = ScratchBranch()
740
>>> b.commit('no foo')
644
744
return len(self.revision_history())
647
747
def last_patch(self):
648
748
"""Return last patch hash, or None if no history.
750
>>> ScratchBranch().last_patch() == None
650
753
ph = self.revision_history()
657
def missing_revisions(self, other):
659
If self and other have not diverged, return a list of the revisions
660
present in other, but missing from self.
662
>>> from bzrlib.commit import commit
663
>>> bzrlib.trace.silent = True
664
>>> br1 = ScratchBranch()
665
>>> br2 = ScratchBranch()
666
>>> br1.missing_revisions(br2)
668
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
669
>>> br1.missing_revisions(br2)
671
>>> br2.missing_revisions(br1)
673
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
674
>>> br1.missing_revisions(br2)
676
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
677
>>> br1.missing_revisions(br2)
679
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
680
>>> br1.missing_revisions(br2)
681
Traceback (most recent call last):
682
DivergedBranches: These branches have diverged.
684
self_history = self.revision_history()
685
self_len = len(self_history)
686
other_history = other.revision_history()
687
other_len = len(other_history)
688
common_index = min(self_len, other_len) -1
689
if common_index >= 0 and \
690
self_history[common_index] != other_history[common_index]:
691
raise DivergedBranches(self, other)
692
if self_len < other_len:
693
return other_history[self_len:]
697
def update_revisions(self, other):
698
"""If self and other have not diverged, ensure self has all the
701
>>> from bzrlib.commit import commit
702
>>> bzrlib.trace.silent = True
703
>>> br1 = ScratchBranch(files=['foo', 'bar'])
706
>>> commit(br1, "lala!", rev_id="REVISION-ID-1", verbose=False)
707
>>> br2 = ScratchBranch()
708
>>> br2.update_revisions(br1)
712
>>> br2.revision_history()
714
>>> br2.update_revisions(br1)
718
>>> br1.text_store.total_size() == br2.text_store.total_size()
721
revision_ids = self.missing_revisions(other)
722
revisions = [other.get_revision(f) for f in revision_ids]
723
needed_texts = sets.Set()
724
for rev in revisions:
725
inv = other.get_inventory(str(rev.inventory_id))
726
for key, entry in inv.iter_entries():
727
if entry.text_id is None:
729
if entry.text_id not in self.text_store:
730
needed_texts.add(entry.text_id)
731
count = self.text_store.copy_multi(other.text_store, needed_texts)
732
print "Added %d texts." % count
733
inventory_ids = [ f.inventory_id for f in revisions ]
734
count = self.inventory_store.copy_multi(other.inventory_store,
736
print "Added %d inventories." % count
737
revision_ids = [ f.revision_id for f in revisions]
738
count = self.revision_store.copy_multi(other.revision_store,
740
for revision_id in revision_ids:
741
self.append_revision(revision_id)
742
print "Added %d revisions." % count
745
def commit(self, *args, **kw):
747
from bzrlib.commit import commit
748
commit(self, *args, **kw)
751
760
def lookup_revision(self, revno):
799
817
This can change the directory or the filename or both.
819
self._need_writelock()
820
tree = self.working_tree()
822
if not tree.has_filename(from_rel):
823
bailout("can't rename: old working file %r does not exist" % from_rel)
824
if tree.has_filename(to_rel):
825
bailout("can't rename: new working file %r already exists" % to_rel)
827
file_id = inv.path2id(from_rel)
829
bailout("can't rename: old name %r is not versioned" % from_rel)
831
if inv.path2id(to_rel):
832
bailout("can't rename: new name %r is already versioned" % to_rel)
834
to_dir, to_tail = os.path.split(to_rel)
835
to_dir_id = inv.path2id(to_dir)
836
if to_dir_id == None and to_dir != '':
837
bailout("can't determine destination directory id for %r" % to_dir)
839
mutter("rename_one:")
840
mutter(" file_id {%s}" % file_id)
841
mutter(" from_rel %r" % from_rel)
842
mutter(" to_rel %r" % to_rel)
843
mutter(" to_dir %r" % to_dir)
844
mutter(" to_dir_id {%s}" % to_dir_id)
846
inv.rename(file_id, to_dir_id, to_tail)
848
print "%s => %s" % (from_rel, to_rel)
850
from_abs = self.abspath(from_rel)
851
to_abs = self.abspath(to_rel)
803
tree = self.working_tree()
805
if not tree.has_filename(from_rel):
806
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
807
if tree.has_filename(to_rel):
808
raise BzrError("can't rename: new working file %r already exists" % to_rel)
810
file_id = inv.path2id(from_rel)
812
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
814
if inv.path2id(to_rel):
815
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
817
to_dir, to_tail = os.path.split(to_rel)
818
to_dir_id = inv.path2id(to_dir)
819
if to_dir_id == None and to_dir != '':
820
raise BzrError("can't determine destination directory id for %r" % to_dir)
822
mutter("rename_one:")
823
mutter(" file_id {%s}" % file_id)
824
mutter(" from_rel %r" % from_rel)
825
mutter(" to_rel %r" % to_rel)
826
mutter(" to_dir %r" % to_dir)
827
mutter(" to_dir_id {%s}" % to_dir_id)
829
inv.rename(file_id, to_dir_id, to_tail)
831
print "%s => %s" % (from_rel, to_rel)
833
from_abs = self.abspath(from_rel)
834
to_abs = self.abspath(to_rel)
836
os.rename(from_abs, to_abs)
838
raise BzrError("failed to rename %r to %r: %s"
839
% (from_abs, to_abs, e[1]),
840
["rename rolled back"])
842
self._write_inventory(inv)
853
os.rename(from_abs, to_abs)
855
bailout("failed to rename %r to %r: %s"
856
% (from_abs, to_abs, e[1]),
857
["rename rolled back"])
859
self._write_inventory(inv)
847
863
def move(self, from_paths, to_name):
855
871
Note that to_name is only the last component of the new name;
856
872
this doesn't change the directory.
860
## TODO: Option to move IDs only
861
assert not isinstance(from_paths, basestring)
862
tree = self.working_tree()
864
to_abs = self.abspath(to_name)
865
if not isdir(to_abs):
866
raise BzrError("destination %r is not a directory" % to_abs)
867
if not tree.has_filename(to_name):
868
raise BzrError("destination %r not in working directory" % to_abs)
869
to_dir_id = inv.path2id(to_name)
870
if to_dir_id == None and to_name != '':
871
raise BzrError("destination %r is not a versioned directory" % to_name)
872
to_dir_ie = inv[to_dir_id]
873
if to_dir_ie.kind not in ('directory', 'root_directory'):
874
raise BzrError("destination %r is not a directory" % to_abs)
876
to_idpath = inv.get_idpath(to_dir_id)
879
if not tree.has_filename(f):
880
raise BzrError("%r does not exist in working tree" % f)
881
f_id = inv.path2id(f)
883
raise BzrError("%r is not versioned" % f)
884
name_tail = splitpath(f)[-1]
885
dest_path = appendpath(to_name, name_tail)
886
if tree.has_filename(dest_path):
887
raise BzrError("destination %r already exists" % dest_path)
888
if f_id in to_idpath:
889
raise BzrError("can't move %r to a subdirectory of itself" % f)
891
# OK, so there's a race here, it's possible that someone will
892
# create a file in this interval and then the rename might be
893
# left half-done. But we should have caught most problems.
896
name_tail = splitpath(f)[-1]
897
dest_path = appendpath(to_name, name_tail)
898
print "%s => %s" % (f, dest_path)
899
inv.rename(inv.path2id(f), to_dir_id, name_tail)
901
os.rename(self.abspath(f), self.abspath(dest_path))
903
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
904
["rename rolled back"])
906
self._write_inventory(inv)
874
self._need_writelock()
875
## TODO: Option to move IDs only
876
assert not isinstance(from_paths, basestring)
877
tree = self.working_tree()
879
to_abs = self.abspath(to_name)
880
if not isdir(to_abs):
881
bailout("destination %r is not a directory" % to_abs)
882
if not tree.has_filename(to_name):
883
bailout("destination %r not in working directory" % to_abs)
884
to_dir_id = inv.path2id(to_name)
885
if to_dir_id == None and to_name != '':
886
bailout("destination %r is not a versioned directory" % to_name)
887
to_dir_ie = inv[to_dir_id]
888
if to_dir_ie.kind not in ('directory', 'root_directory'):
889
bailout("destination %r is not a directory" % to_abs)
891
to_idpath = Set(inv.get_idpath(to_dir_id))
894
if not tree.has_filename(f):
895
bailout("%r does not exist in working tree" % f)
896
f_id = inv.path2id(f)
898
bailout("%r is not versioned" % f)
899
name_tail = splitpath(f)[-1]
900
dest_path = appendpath(to_name, name_tail)
901
if tree.has_filename(dest_path):
902
bailout("destination %r already exists" % dest_path)
903
if f_id in to_idpath:
904
bailout("can't move %r to a subdirectory of itself" % f)
906
# OK, so there's a race here, it's possible that someone will
907
# create a file in this interval and then the rename might be
908
# left half-done. But we should have caught most problems.
911
name_tail = splitpath(f)[-1]
912
dest_path = appendpath(to_name, name_tail)
913
print "%s => %s" % (f, dest_path)
914
inv.rename(inv.path2id(f), to_dir_id, name_tail)
916
os.rename(self.abspath(f), self.abspath(dest_path))
918
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
919
["rename rolled back"])
921
self._write_inventory(inv)