15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
import sys, os, os.path, random, time, sha, sets, types, re, shutil, tempfile
21
import traceback, socket, fnmatch, difflib, time
22
from binascii import hexlify
22
from bzrlib.trace import mutter, note
23
from bzrlib.osutils import isdir, quotefn, compact_date, rand_bytes, \
25
sha_file, appendpath, file_kind
27
from bzrlib.errors import BzrError, InvalidRevisionNumber, InvalidRevisionId, \
28
DivergedBranches, NotBranchError
29
from bzrlib.textui import show_status
30
from bzrlib.revision import Revision
31
from bzrlib.delta import compare_trees
32
from bzrlib.tree import EmptyTree, RevisionTree
25
from inventory import Inventory
26
from trace import mutter, note
27
from tree import Tree, EmptyTree, RevisionTree, WorkingTree
28
from inventory import InventoryEntry, Inventory
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
32
from store import ImmutableStore
33
from revision import Revision
34
from errors import bailout, BzrError
35
from textui import show_status
36
from diff import diff_trees
38
38
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
39
39
## TODO: Maybe include checks for common corruption of newlines, etc?
42
# TODO: Some operations like log might retrieve the same revisions
43
# repeatedly to calculate deltas. We could perhaps have a weakref
44
# cache in memory to make this faster.
46
# TODO: please move the revision-string syntax stuff out of the branch
47
# object; it's clutter
50
def find_branch(f, **args):
51
if f and (f.startswith('http://') or f.startswith('https://')):
52
from bzrlib.remotebranch import RemoteBranch
53
return RemoteBranch(f, **args)
55
return Branch(f, **args)
58
def find_cached_branch(f, cache_root, **args):
59
from bzrlib.remotebranch import RemoteBranch
60
br = find_branch(f, **args)
61
def cacheify(br, store_name):
62
from bzrlib.meta_store import CachedStore
63
cache_path = os.path.join(cache_root, store_name)
65
new_store = CachedStore(getattr(br, store_name), cache_path)
66
setattr(br, store_name, new_store)
68
if isinstance(br, RemoteBranch):
69
cacheify(br, 'inventory_store')
70
cacheify(br, 'text_store')
71
cacheify(br, 'revision_store')
75
def _relpath(base, path):
76
"""Return path relative to base, or raise exception.
78
The path may be either an absolute path or a path relative to the
79
current working directory.
81
Lifted out of Branch.relpath for ease of testing.
83
os.path.commonprefix (python2.4) has a bad bug that it works just
84
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
85
avoids that problem."""
86
rp = os.path.abspath(path)
90
while len(head) >= len(base):
93
head, tail = os.path.split(head)
97
raise NotBranchError("path %r is not within branch %r" % (rp, base))
102
43
def find_branch_root(f=None):
103
44
"""Find the branch root enclosing f, or pwd.
105
f may be a filename or a URL.
107
46
It is not necessary that f exists.
109
48
Basically we keep looking up until we find the control directory or
110
run into the root. If there isn't one, raises NotBranchError.
114
52
elif hasattr(os.path, 'realpath'):
115
53
f = os.path.realpath(f)
117
55
f = os.path.abspath(f)
118
if not os.path.exists(f):
119
raise BzrError('%r does not exist' % f)
205
119
__repr__ = __str__
209
if self._lock_mode or self._lock:
210
from bzrlib.warnings import warn
211
warn("branch %r was not explicitly unlocked" % self)
214
def lock_write(self):
216
if self._lock_mode != 'w':
217
from bzrlib.errors import LockError
218
raise LockError("can't upgrade to a write lock from %r" %
220
self._lock_count += 1
222
from bzrlib.lock import WriteLock
224
self._lock = WriteLock(self.controlfilename('branch-lock'))
225
self._lock_mode = 'w'
231
assert self._lock_mode in ('r', 'w'), \
232
"invalid lock mode %r" % self._lock_mode
233
self._lock_count += 1
235
from bzrlib.lock import ReadLock
237
self._lock = ReadLock(self.controlfilename('branch-lock'))
238
self._lock_mode = 'r'
242
if not self._lock_mode:
243
from bzrlib.errors import LockError
244
raise LockError('branch %r is not locked' % (self))
246
if self._lock_count > 1:
247
self._lock_count -= 1
251
self._lock_mode = self._lock_count = None
123
def lock(self, mode='w'):
124
"""Lock the on-disk branch, excluding other processes."""
130
om = os.O_WRONLY | os.O_CREAT
135
raise BzrError("invalid locking mode %r" % mode)
137
# XXX: Old branches might not have the lock file, and
138
# won't get one until someone does a write-mode command on
139
# them or creates it by hand.
141
lockfile = os.open(self.controlfilename('branch-lock'), om)
142
fcntl.lockf(lockfile, lm)
144
fcntl.lockf(lockfile, fcntl.LOCK_UN)
146
self._lockmode = None
148
self._lockmode = mode
150
warning("please write a locking method for platform %r" % sys.platform)
152
self._lockmode = None
154
self._lockmode = mode
157
def _need_readlock(self):
158
if self._lockmode not in ['r', 'w']:
159
raise BzrError('need read lock on branch, only have %r' % self._lockmode)
161
def _need_writelock(self):
162
if self._lockmode not in ['w']:
163
raise BzrError('need write lock on branch, only have %r' % self._lockmode)
253
166
def abspath(self, name):
254
167
"""Return absolute filename for something in the branch"""
255
168
return os.path.join(self.base, name)
257
171
def relpath(self, path):
258
172
"""Return path relative to this branch of something inside it.
260
174
Raises an error if path is not in this branch."""
261
return _relpath(self.base, path)
175
rp = os.path.realpath(path)
177
if not rp.startswith(self.base):
178
bailout("path %r is not within branch %r" % (rp, self.base))
179
rp = rp[len(self.base):]
180
rp = rp.lstrip(os.sep)
263
184
def controlfilename(self, file_or_path):
264
185
"""Return location relative to branch."""
265
if isinstance(file_or_path, basestring):
186
if isinstance(file_or_path, types.StringTypes):
266
187
file_or_path = [file_or_path]
267
188
return os.path.join(self.base, bzrlib.BZRDIR, *file_or_path)
370
265
That is to say, the inventory describing changes underway, that
371
266
will be committed to the next revision.
373
from bzrlib.atomicfile import AtomicFile
377
f = AtomicFile(self.controlfilename('inventory'), 'wb')
379
bzrlib.xml.serializer_v4.write_inventory(inv, f)
268
self._need_writelock()
269
## TODO: factor out to atomicfile? is rename safe on windows?
270
## TODO: Maybe some kind of clean/dirty marker on inventory?
271
tmpfname = self.controlfilename('inventory.tmp')
272
tmpf = file(tmpfname, 'wb')
275
inv_fname = self.controlfilename('inventory')
276
if sys.platform == 'win32':
278
os.rename(tmpfname, inv_fname)
386
279
mutter('wrote working inventory')
389
282
inventory = property(read_working_inventory, _write_inventory, None,
390
283
"""Inventory for the working copy.""")
393
def add(self, files, ids=None):
286
def add(self, files, verbose=False):
394
287
"""Make files versioned.
396
Note that the command line normally calls smart_add instead,
397
which can automatically recurse.
289
Note that the command line normally calls smart_add instead.
399
291
This puts the files in the Added state, so that they will be
400
292
recorded by the next commit.
403
List of paths to add, relative to the base of the tree.
406
If set, use these instead of automatically generated ids.
407
Must be the same length as the list of files, but may
408
contain None for ids that are to be autogenerated.
410
294
TODO: Perhaps have an option to add the ids even if the files do
413
TODO: Perhaps yield the ids and paths as they're added.
297
TODO: Perhaps return the ids of the files? But then again it
298
is easy to retrieve them if they're needed.
300
TODO: Option to specify file id.
302
TODO: Adding a directory should optionally recurse down and
303
add all non-ignored children. Perhaps do that in a
306
>>> b = ScratchBranch(files=['foo'])
307
>>> 'foo' in b.unknowns()
312
>>> 'foo' in b.unknowns()
314
>>> bool(b.inventory.path2id('foo'))
320
Traceback (most recent call last):
322
BzrError: ('foo is already versioned', [])
324
>>> b.add(['nothere'])
325
Traceback (most recent call last):
326
BzrError: ('cannot add: not a regular file or directory: nothere', [])
328
self._need_writelock()
415
330
# TODO: Re-adding a file that is removed in the working copy
416
331
# should probably put it back with the previous ID.
417
if isinstance(files, basestring):
418
assert(ids is None or isinstance(ids, basestring))
332
if isinstance(files, types.StringTypes):
424
ids = [None] * len(files)
426
assert(len(ids) == len(files))
430
inv = self.read_working_inventory()
431
for f,file_id in zip(files, ids):
432
if is_control_file(f):
433
raise BzrError("cannot add control file %s" % quotefn(f))
438
raise BzrError("cannot add top-level %r" % f)
440
fullpath = os.path.normpath(self.abspath(f))
443
kind = file_kind(fullpath)
445
# maybe something better?
446
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
448
if kind != 'file' and kind != 'directory':
449
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
452
file_id = gen_file_id(f)
453
inv.add_path(f, kind=kind, file_id=file_id)
455
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
457
self._write_inventory(inv)
335
inv = self.read_working_inventory()
337
if is_control_file(f):
338
bailout("cannot add control file %s" % quotefn(f))
343
bailout("cannot add top-level %r" % f)
345
fullpath = os.path.normpath(self.abspath(f))
348
kind = file_kind(fullpath)
350
# maybe something better?
351
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
353
if kind != 'file' and kind != 'directory':
354
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
356
file_id = gen_file_id(f)
357
inv.add_path(f, kind=kind, file_id=file_id)
360
show_status('A', kind, quotefn(f))
362
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
364
self._write_inventory(inv)
462
367
def print_file(self, file, revno):
463
368
"""Print `file` to stdout."""
466
tree = self.revision_tree(self.lookup_revision(revno))
467
# use inventory as it was in that revision
468
file_id = tree.inventory.path2id(file)
470
raise BzrError("%r is not present in revision %s" % (file, revno))
471
tree.print_file(file_id)
369
self._need_readlock()
370
tree = self.revision_tree(self.lookup_revision(revno))
371
# use inventory as it was in that revision
372
file_id = tree.inventory.path2id(file)
374
bailout("%r is not present in revision %d" % (file, revno))
375
tree.print_file(file_id)
476
378
def remove(self, files, verbose=False):
477
379
"""Mark nominated files for removal from the inventory.
549
458
return self.working_tree().unknowns()
552
def append_revision(self, *revision_ids):
553
from bzrlib.atomicfile import AtomicFile
555
for revision_id in revision_ids:
556
mutter("add {%s} to revision-history" % revision_id)
461
def commit(self, message, timestamp=None, timezone=None,
464
"""Commit working copy as a new revision.
466
The basic approach is to add all the file texts into the
467
store, then the inventory, then make a new revision pointing
468
to that inventory and store that.
470
This is not quite safe if the working copy changes during the
471
commit; for the moment that is simply not allowed. A better
472
approach is to make a temporary copy of the files before
473
computing their hashes, and then add those hashes in turn to
474
the inventory. This should mean at least that there are no
475
broken hash pointers. There is no way we can get a snapshot
476
of the whole directory at an instant. This would also have to
477
be robust against files disappearing, moving, etc. So the
478
whole thing is a bit hard.
480
timestamp -- if not None, seconds-since-epoch for a
481
postdated/predated commit.
483
self._need_writelock()
485
## TODO: Show branch names
487
# TODO: Don't commit if there are no changes, unless forced?
489
# First walk over the working inventory; and both update that
490
# and also build a new revision inventory. The revision
491
# inventory needs to hold the text-id, sha1 and size of the
492
# actual file versions committed in the revision. (These are
493
# not present in the working inventory.) We also need to
494
# detect missing/deleted files, and remove them from the
497
work_inv = self.read_working_inventory()
499
basis = self.basis_tree()
500
basis_inv = basis.inventory
502
for path, entry in work_inv.iter_entries():
503
## TODO: Cope with files that have gone missing.
505
## TODO: Check that the file kind has not changed from the previous
506
## revision of this file (if any).
510
p = self.abspath(path)
511
file_id = entry.file_id
512
mutter('commit prep file %s, id %r ' % (p, file_id))
514
if not os.path.exists(p):
515
mutter(" file is missing, removing from inventory")
517
show_status('D', entry.kind, quotefn(path))
518
missing_ids.append(file_id)
521
# TODO: Handle files that have been deleted
523
# TODO: Maybe a special case for empty files? Seems a
524
# waste to store them many times.
528
if basis_inv.has_id(file_id):
529
old_kind = basis_inv[file_id].kind
530
if old_kind != entry.kind:
531
bailout("entry %r changed kind from %r to %r"
532
% (file_id, old_kind, entry.kind))
534
if entry.kind == 'directory':
536
bailout("%s is entered as directory but not a directory" % quotefn(p))
537
elif entry.kind == 'file':
539
bailout("%s is entered as file but is not a file" % quotefn(p))
541
content = file(p, 'rb').read()
543
entry.text_sha1 = sha_string(content)
544
entry.text_size = len(content)
546
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
548
and (old_ie.text_size == entry.text_size)
549
and (old_ie.text_sha1 == entry.text_sha1)):
550
## assert content == basis.get_file(file_id).read()
551
entry.text_id = basis_inv[file_id].text_id
552
mutter(' unchanged from previous text_id {%s}' %
556
entry.text_id = gen_file_id(entry.name)
557
self.text_store.add(content, entry.text_id)
558
mutter(' stored with text_id {%s}' % entry.text_id)
562
elif (old_ie.name == entry.name
563
and old_ie.parent_id == entry.parent_id):
568
show_status(state, entry.kind, quotefn(path))
570
for file_id in missing_ids:
571
# have to do this later so we don't mess up the iterator.
572
# since parents may be removed before their children we
575
# FIXME: There's probably a better way to do this; perhaps
576
# the workingtree should know how to filter itself.
577
if work_inv.has_id(file_id):
578
del work_inv[file_id]
581
inv_id = rev_id = _gen_revision_id(time.time())
583
inv_tmp = tempfile.TemporaryFile()
584
inv.write_xml(inv_tmp)
586
self.inventory_store.add(inv_tmp, inv_id)
587
mutter('new inventory_id is {%s}' % inv_id)
589
self._write_inventory(work_inv)
591
if timestamp == None:
592
timestamp = time.time()
594
if committer == None:
595
committer = username()
598
timezone = local_time_offset()
600
mutter("building commit log message")
601
rev = Revision(timestamp=timestamp,
604
precursor = self.last_patch(),
609
rev_tmp = tempfile.TemporaryFile()
610
rev.write_xml(rev_tmp)
612
self.revision_store.add(rev_tmp, rev_id)
613
mutter("new revision_id is {%s}" % rev_id)
615
## XXX: Everything up to here can simply be orphaned if we abort
616
## the commit; it will leave junk files behind but that doesn't
619
## TODO: Read back the just-generated changeset, and make sure it
620
## applies and recreates the right state.
622
## TODO: Also calculate and store the inventory SHA1
623
mutter("committing patch r%d" % (self.revno() + 1))
626
self.append_revision(rev_id)
629
note("commited r%d" % self.revno())
632
def append_revision(self, revision_id):
633
mutter("add {%s} to revision-history" % revision_id)
558
634
rev_history = self.revision_history()
559
rev_history.extend(revision_ids)
561
f = AtomicFile(self.controlfilename('revision-history'))
563
for rev_id in rev_history:
570
def get_revision_xml_file(self, revision_id):
571
"""Return XML file object for revision object."""
572
if not revision_id or not isinstance(revision_id, basestring):
573
raise InvalidRevisionId(revision_id)
578
return self.revision_store[revision_id]
580
raise bzrlib.errors.NoSuchRevision(self, revision_id)
586
get_revision_xml = get_revision_xml_file
636
tmprhname = self.controlfilename('revision-history.tmp')
637
rhname = self.controlfilename('revision-history')
639
f = file(tmprhname, 'wt')
640
rev_history.append(revision_id)
641
f.write('\n'.join(rev_history))
645
if sys.platform == 'win32':
647
os.rename(tmprhname, rhname)
589
651
def get_revision(self, revision_id):
590
652
"""Return the Revision object for a named revision"""
591
xml_file = self.get_revision_xml_file(revision_id)
594
r = bzrlib.xml.serializer_v4.read_revision(xml_file)
595
except SyntaxError, e:
596
raise bzrlib.errors.BzrError('failed to unpack revision_xml',
653
self._need_readlock()
654
r = Revision.read_xml(self.revision_store[revision_id])
600
655
assert r.revision_id == revision_id
604
def get_revision_delta(self, revno):
605
"""Return the delta for one revision.
607
The delta is relative to its mainline predecessor, or the
608
empty tree for revision 1.
610
assert isinstance(revno, int)
611
rh = self.revision_history()
612
if not (1 <= revno <= len(rh)):
613
raise InvalidRevisionNumber(revno)
615
# revno is 1-based; list is 0-based
617
new_tree = self.revision_tree(rh[revno-1])
619
old_tree = EmptyTree()
621
old_tree = self.revision_tree(rh[revno-2])
623
return compare_trees(old_tree, new_tree)
627
def get_revision_sha1(self, revision_id):
628
"""Hash the stored value of a revision, and return it."""
629
# In the future, revision entries will be signed. At that
630
# point, it is probably best *not* to include the signature
631
# in the revision hash. Because that lets you re-sign
632
# the revision, (add signatures/remove signatures) and still
633
# have all hash pointers stay consistent.
634
# But for now, just hash the contents.
635
return bzrlib.osutils.sha_file(self.get_revision_xml(revision_id))
638
659
def get_inventory(self, inventory_id):
639
660
"""Get Inventory object by hash.
641
662
TODO: Perhaps for this and similar methods, take a revision
642
663
parameter which can be either an integer revno or a
644
from bzrlib.inventory import Inventory
646
f = self.get_inventory_xml_file(inventory_id)
647
return bzrlib.xml.serializer_v4.read_inventory(f)
650
def get_inventory_xml(self, inventory_id):
651
"""Get inventory XML as a file object."""
652
return self.inventory_store[inventory_id]
654
get_inventory_xml_file = get_inventory_xml
657
def get_inventory_sha1(self, inventory_id):
658
"""Return the sha1 hash of the inventory entry
660
return sha_file(self.get_inventory_xml(inventory_id))
665
self._need_readlock()
666
i = Inventory.read_xml(self.inventory_store[inventory_id])
663
670
def get_revision_inventory(self, revision_id):
664
671
"""Return inventory of a past revision."""
665
# bzr 0.0.6 imposes the constraint that the inventory_id
666
# must be the same as its revision, so this is trivial.
672
self._need_readlock()
667
673
if revision_id == None:
668
from bzrlib.inventory import Inventory
669
return Inventory(self.get_root_id())
671
return self.get_inventory(revision_id)
676
return self.get_inventory(self.get_revision(revision_id).inventory_id)
674
679
def revision_history(self):
736
692
That is equivalent to the number of revisions committed to
695
>>> b = ScratchBranch()
698
>>> b.commit('no foo')
739
702
return len(self.revision_history())
742
705
def last_patch(self):
743
706
"""Return last patch hash, or None if no history.
708
>>> ScratchBranch().last_patch() == None
745
711
ph = self.revision_history()
752
def missing_revisions(self, other, stop_revision=None, diverged_ok=False):
754
If self and other have not diverged, return a list of the revisions
755
present in other, but missing from self.
757
>>> from bzrlib.commit import commit
758
>>> bzrlib.trace.silent = True
759
>>> br1 = ScratchBranch()
760
>>> br2 = ScratchBranch()
761
>>> br1.missing_revisions(br2)
763
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
764
>>> br1.missing_revisions(br2)
766
>>> br2.missing_revisions(br1)
768
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
769
>>> br1.missing_revisions(br2)
771
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
772
>>> br1.missing_revisions(br2)
774
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
775
>>> br1.missing_revisions(br2)
776
Traceback (most recent call last):
777
DivergedBranches: These branches have diverged.
779
self_history = self.revision_history()
780
self_len = len(self_history)
781
other_history = other.revision_history()
782
other_len = len(other_history)
783
common_index = min(self_len, other_len) -1
784
if common_index >= 0 and \
785
self_history[common_index] != other_history[common_index]:
786
raise DivergedBranches(self, other)
788
if stop_revision is None:
789
stop_revision = other_len
790
elif stop_revision > other_len:
791
raise bzrlib.errors.NoSuchRevision(self, stop_revision)
793
return other_history[self_len:stop_revision]
796
def update_revisions(self, other, stop_revision=None):
797
"""Pull in all new revisions from other branch.
799
from bzrlib.fetch import greedy_fetch
800
from bzrlib.revision import get_intervening_revisions
802
pb = bzrlib.ui.ui_factory.progress_bar()
803
pb.update('comparing histories')
806
revision_ids = self.missing_revisions(other, stop_revision)
807
except DivergedBranches, e:
809
if stop_revision is None:
810
end_revision = other.last_patch()
811
revision_ids = get_intervening_revisions(self.last_patch(),
813
assert self.last_patch() not in revision_ids
814
except bzrlib.errors.NotAncestor:
817
if len(revision_ids) > 0:
818
count = greedy_fetch(self, other, revision_ids[-1], pb)[0]
821
self.append_revision(*revision_ids)
822
## note("Added %d revisions." % count)
825
def install_revisions(self, other, revision_ids, pb):
826
if hasattr(other.revision_store, "prefetch"):
827
other.revision_store.prefetch(revision_ids)
828
if hasattr(other.inventory_store, "prefetch"):
829
inventory_ids = [other.get_revision(r).inventory_id
830
for r in revision_ids]
831
other.inventory_store.prefetch(inventory_ids)
834
pb = bzrlib.ui.ui_factory.progress_bar()
841
for i, rev_id in enumerate(revision_ids):
842
pb.update('fetching revision', i+1, len(revision_ids))
844
rev = other.get_revision(rev_id)
845
except bzrlib.errors.NoSuchRevision:
849
revisions.append(rev)
850
inv = other.get_inventory(str(rev.inventory_id))
851
for key, entry in inv.iter_entries():
852
if entry.text_id is None:
854
if entry.text_id not in self.text_store:
855
needed_texts.add(entry.text_id)
859
count, cp_fail = self.text_store.copy_multi(other.text_store,
861
#print "Added %d texts." % count
862
inventory_ids = [ f.inventory_id for f in revisions ]
863
count, cp_fail = self.inventory_store.copy_multi(other.inventory_store,
865
#print "Added %d inventories." % count
866
revision_ids = [ f.revision_id for f in revisions]
868
count, cp_fail = self.revision_store.copy_multi(other.revision_store,
871
assert len(cp_fail) == 0
872
return count, failures
875
def commit(self, *args, **kw):
876
from bzrlib.commit import commit
877
commit(self, *args, **kw)
880
def lookup_revision(self, revision):
881
"""Return the revision identifier for a given revision information."""
882
revno, info = self._get_revision_info(revision)
886
def revision_id_to_revno(self, revision_id):
887
"""Given a revision id, return its revno"""
888
history = self.revision_history()
890
return history.index(revision_id) + 1
892
raise bzrlib.errors.NoSuchRevision(self, revision_id)
895
def get_revision_info(self, revision):
896
"""Return (revno, revision id) for revision identifier.
898
revision can be an integer, in which case it is assumed to be revno (though
899
this will translate negative values into positive ones)
900
revision can also be a string, in which case it is parsed for something like
901
'date:' or 'revid:' etc.
903
revno, rev_id = self._get_revision_info(revision)
905
raise bzrlib.errors.NoSuchRevision(self, revision)
908
def get_rev_id(self, revno, history=None):
909
"""Find the revision id of the specified revno."""
718
def lookup_revision(self, revno):
719
"""Return revision hash for revision number."""
913
history = self.revision_history()
914
elif revno <= 0 or revno > len(history):
915
raise bzrlib.errors.NoSuchRevision(self, revno)
916
return history[revno - 1]
918
def _get_revision_info(self, revision):
919
"""Return (revno, revision id) for revision specifier.
921
revision can be an integer, in which case it is assumed to be revno
922
(though this will translate negative values into positive ones)
923
revision can also be a string, in which case it is parsed for something
924
like 'date:' or 'revid:' etc.
926
A revid is always returned. If it is None, the specifier referred to
927
the null revision. If the revid does not occur in the revision
928
history, revno will be None.
934
try:# Convert to int if possible
935
revision = int(revision)
938
revs = self.revision_history()
939
if isinstance(revision, int):
941
revno = len(revs) + revision + 1
944
rev_id = self.get_rev_id(revno, revs)
945
elif isinstance(revision, basestring):
946
for prefix, func in Branch.REVISION_NAMESPACES.iteritems():
947
if revision.startswith(prefix):
948
result = func(self, revs, revision)
950
revno, rev_id = result
953
rev_id = self.get_rev_id(revno, revs)
956
raise BzrError('No namespace registered for string: %r' %
959
raise TypeError('Unhandled revision type %s' % revision)
963
raise bzrlib.errors.NoSuchRevision(self, revision)
966
def _namespace_revno(self, revs, revision):
967
"""Lookup a revision by revision number"""
968
assert revision.startswith('revno:')
970
return (int(revision[6:]),)
973
REVISION_NAMESPACES['revno:'] = _namespace_revno
975
def _namespace_revid(self, revs, revision):
976
assert revision.startswith('revid:')
977
rev_id = revision[len('revid:'):]
979
return revs.index(rev_id) + 1, rev_id
982
REVISION_NAMESPACES['revid:'] = _namespace_revid
984
def _namespace_last(self, revs, revision):
985
assert revision.startswith('last:')
987
offset = int(revision[5:])
992
raise BzrError('You must supply a positive value for --revision last:XXX')
993
return (len(revs) - offset + 1,)
994
REVISION_NAMESPACES['last:'] = _namespace_last
996
def _namespace_tag(self, revs, revision):
997
assert revision.startswith('tag:')
998
raise BzrError('tag: namespace registered, but not implemented.')
999
REVISION_NAMESPACES['tag:'] = _namespace_tag
1001
def _namespace_date(self, revs, revision):
1002
assert revision.startswith('date:')
1004
# Spec for date revisions:
1006
# value can be 'yesterday', 'today', 'tomorrow' or a YYYY-MM-DD string.
1007
# it can also start with a '+/-/='. '+' says match the first
1008
# entry after the given date. '-' is match the first entry before the date
1009
# '=' is match the first entry after, but still on the given date.
1011
# +2005-05-12 says find the first matching entry after May 12th, 2005 at 0:00
1012
# -2005-05-12 says find the first matching entry before May 12th, 2005 at 0:00
1013
# =2005-05-12 says find the first match after May 12th, 2005 at 0:00 but before
1014
# May 13th, 2005 at 0:00
1016
# So the proper way of saying 'give me all entries for today' is:
1017
# -r {date:+today}:{date:-tomorrow}
1018
# The default is '=' when not supplied
1021
if val[:1] in ('+', '-', '='):
1022
match_style = val[:1]
1025
today = datetime.datetime.today().replace(hour=0,minute=0,second=0,microsecond=0)
1026
if val.lower() == 'yesterday':
1027
dt = today - datetime.timedelta(days=1)
1028
elif val.lower() == 'today':
1030
elif val.lower() == 'tomorrow':
1031
dt = today + datetime.timedelta(days=1)
1034
# This should be done outside the function to avoid recompiling it.
1035
_date_re = re.compile(
1036
r'(?P<date>(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d))?'
1038
r'(?P<time>(?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d))?)?'
1040
m = _date_re.match(val)
1041
if not m or (not m.group('date') and not m.group('time')):
1042
raise BzrError('Invalid revision date %r' % revision)
1045
year, month, day = int(m.group('year')), int(m.group('month')), int(m.group('day'))
1047
year, month, day = today.year, today.month, today.day
1049
hour = int(m.group('hour'))
1050
minute = int(m.group('minute'))
1051
if m.group('second'):
1052
second = int(m.group('second'))
1056
hour, minute, second = 0,0,0
1058
dt = datetime.datetime(year=year, month=month, day=day,
1059
hour=hour, minute=minute, second=second)
1063
if match_style == '-':
1065
elif match_style == '=':
1066
last = dt + datetime.timedelta(days=1)
1069
for i in range(len(revs)-1, -1, -1):
1070
r = self.get_revision(revs[i])
1071
# TODO: Handle timezone.
1072
dt = datetime.datetime.fromtimestamp(r.timestamp)
1073
if first >= dt and (last is None or dt >= last):
1076
for i in range(len(revs)):
1077
r = self.get_revision(revs[i])
1078
# TODO: Handle timezone.
1079
dt = datetime.datetime.fromtimestamp(r.timestamp)
1080
if first <= dt and (last is None or dt <= last):
1082
REVISION_NAMESPACES['date:'] = _namespace_date
724
# list is 0-based; revisions are 1-based
725
return self.revision_history()[revno-1]
727
raise BzrError("no such revision %s" % revno)
1084
730
def revision_tree(self, revision_id):
1085
731
"""Return Tree for a revision on this branch.
1087
733
`revision_id` may be None for the null revision, in which case
1088
734
an `EmptyTree` is returned."""
1089
# TODO: refactor this to use an existing revision object
1090
# so we don't need to read it in twice.
735
self._need_readlock()
1091
736
if revision_id == None:
1092
737
return EmptyTree()
771
def write_log(self, show_timezone='original', verbose=False):
772
"""Write out human-readable log of commits to this branch
774
utc -- If true, show dates in universal time, not local time."""
775
self._need_readlock()
776
## TODO: Option to choose either original, utc or local timezone
779
for p in self.revision_history():
781
print 'revno:', revno
782
## TODO: Show hash if --id is given.
783
##print 'revision-hash:', p
784
rev = self.get_revision(p)
785
print 'committer:', rev.committer
786
print 'timestamp: %s' % (format_date(rev.timestamp, rev.timezone or 0,
789
## opportunistic consistency check, same as check_patch_chaining
790
if rev.precursor != precursor:
791
bailout("mismatched precursor!")
795
print ' (no message)'
797
for l in rev.message.split('\n'):
800
if verbose == True and precursor != None:
801
print 'changed files:'
802
tree = self.revision_tree(p)
803
prevtree = self.revision_tree(precursor)
805
for file_state, fid, old_name, new_name, kind in \
806
diff_trees(prevtree, tree, ):
807
if file_state == 'A' or file_state == 'M':
808
show_status(file_state, kind, new_name)
809
elif file_state == 'D':
810
show_status(file_state, kind, old_name)
811
elif file_state == 'R':
812
show_status(file_state, kind,
813
old_name + ' => ' + new_name)
1117
819
def rename_one(self, from_rel, to_rel):
1118
820
"""Rename one file.
1120
822
This can change the directory or the filename or both.
824
self._need_writelock()
825
tree = self.working_tree()
827
if not tree.has_filename(from_rel):
828
bailout("can't rename: old working file %r does not exist" % from_rel)
829
if tree.has_filename(to_rel):
830
bailout("can't rename: new working file %r already exists" % to_rel)
832
file_id = inv.path2id(from_rel)
834
bailout("can't rename: old name %r is not versioned" % from_rel)
836
if inv.path2id(to_rel):
837
bailout("can't rename: new name %r is already versioned" % to_rel)
839
to_dir, to_tail = os.path.split(to_rel)
840
to_dir_id = inv.path2id(to_dir)
841
if to_dir_id == None and to_dir != '':
842
bailout("can't determine destination directory id for %r" % to_dir)
844
mutter("rename_one:")
845
mutter(" file_id {%s}" % file_id)
846
mutter(" from_rel %r" % from_rel)
847
mutter(" to_rel %r" % to_rel)
848
mutter(" to_dir %r" % to_dir)
849
mutter(" to_dir_id {%s}" % to_dir_id)
851
inv.rename(file_id, to_dir_id, to_tail)
853
print "%s => %s" % (from_rel, to_rel)
855
from_abs = self.abspath(from_rel)
856
to_abs = self.abspath(to_rel)
1124
tree = self.working_tree()
1125
inv = tree.inventory
1126
if not tree.has_filename(from_rel):
1127
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
1128
if tree.has_filename(to_rel):
1129
raise BzrError("can't rename: new working file %r already exists" % to_rel)
1131
file_id = inv.path2id(from_rel)
1133
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
1135
if inv.path2id(to_rel):
1136
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1138
to_dir, to_tail = os.path.split(to_rel)
1139
to_dir_id = inv.path2id(to_dir)
1140
if to_dir_id == None and to_dir != '':
1141
raise BzrError("can't determine destination directory id for %r" % to_dir)
1143
mutter("rename_one:")
1144
mutter(" file_id {%s}" % file_id)
1145
mutter(" from_rel %r" % from_rel)
1146
mutter(" to_rel %r" % to_rel)
1147
mutter(" to_dir %r" % to_dir)
1148
mutter(" to_dir_id {%s}" % to_dir_id)
1150
inv.rename(file_id, to_dir_id, to_tail)
1152
from_abs = self.abspath(from_rel)
1153
to_abs = self.abspath(to_rel)
1155
os.rename(from_abs, to_abs)
1157
raise BzrError("failed to rename %r to %r: %s"
1158
% (from_abs, to_abs, e[1]),
1159
["rename rolled back"])
1161
self._write_inventory(inv)
858
os.rename(from_abs, to_abs)
860
bailout("failed to rename %r to %r: %s"
861
% (from_abs, to_abs, e[1]),
862
["rename rolled back"])
864
self._write_inventory(inv)
1166
868
def move(self, from_paths, to_name):
1174
876
Note that to_name is only the last component of the new name;
1175
877
this doesn't change the directory.
1177
This returns a list of (from_path, to_path) pairs for each
1178
entry that is moved.
1183
## TODO: Option to move IDs only
1184
assert not isinstance(from_paths, basestring)
1185
tree = self.working_tree()
1186
inv = tree.inventory
1187
to_abs = self.abspath(to_name)
1188
if not isdir(to_abs):
1189
raise BzrError("destination %r is not a directory" % to_abs)
1190
if not tree.has_filename(to_name):
1191
raise BzrError("destination %r not in working directory" % to_abs)
1192
to_dir_id = inv.path2id(to_name)
1193
if to_dir_id == None and to_name != '':
1194
raise BzrError("destination %r is not a versioned directory" % to_name)
1195
to_dir_ie = inv[to_dir_id]
1196
if to_dir_ie.kind not in ('directory', 'root_directory'):
1197
raise BzrError("destination %r is not a directory" % to_abs)
1199
to_idpath = inv.get_idpath(to_dir_id)
1201
for f in from_paths:
1202
if not tree.has_filename(f):
1203
raise BzrError("%r does not exist in working tree" % f)
1204
f_id = inv.path2id(f)
1206
raise BzrError("%r is not versioned" % f)
1207
name_tail = splitpath(f)[-1]
1208
dest_path = appendpath(to_name, name_tail)
1209
if tree.has_filename(dest_path):
1210
raise BzrError("destination %r already exists" % dest_path)
1211
if f_id in to_idpath:
1212
raise BzrError("can't move %r to a subdirectory of itself" % f)
1214
# OK, so there's a race here, it's possible that someone will
1215
# create a file in this interval and then the rename might be
1216
# left half-done. But we should have caught most problems.
1218
for f in from_paths:
1219
name_tail = splitpath(f)[-1]
1220
dest_path = appendpath(to_name, name_tail)
1221
result.append((f, dest_path))
1222
inv.rename(inv.path2id(f), to_dir_id, name_tail)
1224
os.rename(self.abspath(f), self.abspath(dest_path))
1226
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
1227
["rename rolled back"])
1229
self._write_inventory(inv)
1236
def revert(self, filenames, old_tree=None, backups=True):
1237
"""Restore selected files to the versions from a previous tree.
1240
If true (default) backups are made of files before
1243
from bzrlib.errors import NotVersionedError, BzrError
1244
from bzrlib.atomicfile import AtomicFile
1245
from bzrlib.osutils import backup_file
1247
inv = self.read_working_inventory()
1248
if old_tree is None:
1249
old_tree = self.basis_tree()
1250
old_inv = old_tree.inventory
1253
for fn in filenames:
1254
file_id = inv.path2id(fn)
1256
raise NotVersionedError("not a versioned file", fn)
1257
if not old_inv.has_id(file_id):
1258
raise BzrError("file not present in old tree", fn, file_id)
1259
nids.append((fn, file_id))
1261
# TODO: Rename back if it was previously at a different location
1263
# TODO: If given a directory, restore the entire contents from
1264
# the previous version.
1266
# TODO: Make a backup to a temporary file.
1268
# TODO: If the file previously didn't exist, delete it?
1269
for fn, file_id in nids:
1272
f = AtomicFile(fn, 'wb')
1274
f.write(old_tree.get_file(file_id).read())
1280
def pending_merges(self):
1281
"""Return a list of pending merges.
1283
These are revisions that have been merged into the working
1284
directory but not yet committed.
1286
cfn = self.controlfilename('pending-merges')
1287
if not os.path.exists(cfn):
1290
for l in self.controlfile('pending-merges', 'r').readlines():
1291
p.append(l.rstrip('\n'))
1295
def add_pending_merge(self, revision_id):
1296
from bzrlib.revision import validate_revision_id
1298
validate_revision_id(revision_id)
1300
p = self.pending_merges()
1301
if revision_id in p:
1303
p.append(revision_id)
1304
self.set_pending_merges(p)
1307
def set_pending_merges(self, rev_list):
1308
from bzrlib.atomicfile import AtomicFile
1311
f = AtomicFile(self.controlfilename('pending-merges'))
1322
def get_parent(self):
1323
"""Return the parent location of the branch.
1325
This is the default location for push/pull/missing. The usual
1326
pattern is that the user can override it by specifying a
1330
_locs = ['parent', 'pull', 'x-pull']
1333
return self.controlfile(l, 'r').read().strip('\n')
1335
if e.errno != errno.ENOENT:
1340
def set_parent(self, url):
1341
# TODO: Maybe delete old location files?
1342
from bzrlib.atomicfile import AtomicFile
1345
f = AtomicFile(self.controlfilename('parent'))
1354
def check_revno(self, revno):
1356
Check whether a revno corresponds to any revision.
1357
Zero (the NULL revision) is considered valid.
1360
self.check_real_revno(revno)
1362
def check_real_revno(self, revno):
1364
Check whether a revno corresponds to a real revision.
1365
Zero (the NULL revision) is considered invalid
1367
if revno < 1 or revno > self.revno():
1368
raise InvalidRevisionNumber(revno)
879
self._need_writelock()
880
## TODO: Option to move IDs only
881
assert not isinstance(from_paths, basestring)
882
tree = self.working_tree()
884
to_abs = self.abspath(to_name)
885
if not isdir(to_abs):
886
bailout("destination %r is not a directory" % to_abs)
887
if not tree.has_filename(to_name):
888
bailout("destination %r not in working directory" % to_abs)
889
to_dir_id = inv.path2id(to_name)
890
if to_dir_id == None and to_name != '':
891
bailout("destination %r is not a versioned directory" % to_name)
892
to_dir_ie = inv[to_dir_id]
893
if to_dir_ie.kind not in ('directory', 'root_directory'):
894
bailout("destination %r is not a directory" % to_abs)
896
to_idpath = Set(inv.get_idpath(to_dir_id))
899
if not tree.has_filename(f):
900
bailout("%r does not exist in working tree" % f)
901
f_id = inv.path2id(f)
903
bailout("%r is not versioned" % f)
904
name_tail = splitpath(f)[-1]
905
dest_path = appendpath(to_name, name_tail)
906
if tree.has_filename(dest_path):
907
bailout("destination %r already exists" % dest_path)
908
if f_id in to_idpath:
909
bailout("can't move %r to a subdirectory of itself" % f)
911
# OK, so there's a race here, it's possible that someone will
912
# create a file in this interval and then the rename might be
913
# left half-done. But we should have caught most problems.
916
name_tail = splitpath(f)[-1]
917
dest_path = appendpath(to_name, name_tail)
918
print "%s => %s" % (f, dest_path)
919
inv.rename(inv.path2id(f), to_dir_id, name_tail)
921
os.rename(self.abspath(f), self.abspath(dest_path))
923
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
924
["rename rolled back"])
926
self._write_inventory(inv)
930
def show_status(self, show_all=False):
931
"""Display single-line status for non-ignored working files.
933
The list is show sorted in order by file name.
935
>>> b = ScratchBranch(files=['foo', 'foo~'])
941
>>> b.commit("add foo")
943
>>> os.unlink(b.abspath('foo'))
947
TODO: Get state for single files.
949
self._need_readlock()
951
# We have to build everything into a list first so that it can
952
# sorted by name, incorporating all the different sources.
954
# FIXME: Rather than getting things in random order and then sorting,
955
# just step through in order.
957
# Interesting case: the old ID for a file has been removed,
958
# but a new file has been created under that name.
960
old = self.basis_tree()
961
new = self.working_tree()
963
for fs, fid, oldname, newname, kind in diff_trees(old, new):
965
show_status(fs, kind,
966
oldname + ' => ' + newname)
967
elif fs == 'A' or fs == 'M':
968
show_status(fs, kind, newname)
970
show_status(fs, kind, oldname)
973
show_status(fs, kind, newname)
976
show_status(fs, kind, newname)
978
show_status(fs, kind, newname)
980
bailout("weird file state %r" % ((fs, fid),))
1373
984
class ScratchBranch(Branch):