15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
import sys, os, os.path, random, time, sha, sets, types, re, shutil, tempfile
21
import traceback, socket, fnmatch, difflib, time
22
from binascii import hexlify
25
from inventory import Inventory
26
from trace import mutter, note
27
from tree import Tree, EmptyTree, RevisionTree, WorkingTree
28
from inventory import InventoryEntry, Inventory
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, chomp, \
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
32
from store import ImmutableStore
33
from revision import Revision
34
from errors import bailout, BzrError
35
from textui import show_status
36
from diff import diff_trees
22
from bzrlib.trace import mutter, note
23
from bzrlib.osutils import isdir, quotefn, compact_date, rand_bytes, \
25
sha_file, appendpath, file_kind
27
from bzrlib.errors import BzrError, InvalidRevisionNumber, InvalidRevisionId
29
from bzrlib.textui import show_status
30
from bzrlib.revision import Revision
31
from bzrlib.xml import unpack_xml
32
from bzrlib.delta import compare_trees
33
from bzrlib.tree import EmptyTree, RevisionTree
38
38
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
39
39
## TODO: Maybe include checks for common corruption of newlines, etc?
42
# TODO: Some operations like log might retrieve the same revisions
43
# repeatedly to calculate deltas. We could perhaps have a weakref
44
# cache in memory to make this faster.
46
# TODO: please move the revision-string syntax stuff out of the branch
47
# object; it's clutter
50
def find_branch(f, **args):
51
if f and (f.startswith('http://') or f.startswith('https://')):
53
return remotebranch.RemoteBranch(f, **args)
55
return Branch(f, **args)
58
def find_cached_branch(f, cache_root, **args):
59
from remotebranch import RemoteBranch
60
br = find_branch(f, **args)
61
def cacheify(br, store_name):
62
from meta_store import CachedStore
63
cache_path = os.path.join(cache_root, store_name)
65
new_store = CachedStore(getattr(br, store_name), cache_path)
66
setattr(br, store_name, new_store)
68
if isinstance(br, RemoteBranch):
69
cacheify(br, 'inventory_store')
70
cacheify(br, 'text_store')
71
cacheify(br, 'revision_store')
75
def _relpath(base, path):
76
"""Return path relative to base, or raise exception.
78
The path may be either an absolute path or a path relative to the
79
current working directory.
81
Lifted out of Branch.relpath for ease of testing.
83
os.path.commonprefix (python2.4) has a bad bug that it works just
84
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
85
avoids that problem."""
86
rp = os.path.abspath(path)
90
while len(head) >= len(base):
93
head, tail = os.path.split(head)
97
from errors import NotBranchError
98
raise NotBranchError("path %r is not within branch %r" % (rp, base))
100
return os.sep.join(s)
43
103
def find_branch_root(f=None):
44
104
"""Find the branch root enclosing f, or pwd.
106
f may be a filename or a URL.
46
108
It is not necessary that f exists.
48
110
Basically we keep looking up until we find the control directory or
111
run into the root. If there isn't one, raises NotBranchError.
52
115
elif hasattr(os.path, 'realpath'):
53
116
f = os.path.realpath(f)
55
118
f = os.path.abspath(f)
119
if not os.path.exists(f):
120
raise BzrError('%r does not exist' % f)
204
337
fmt = self.controlfile('branch-format', 'r').read()
205
338
fmt.replace('\r\n', '')
206
339
if fmt != BZR_BRANCH_FORMAT:
207
bailout('sorry, branch format %r not supported' % fmt,
208
['use a different bzr version',
209
'or remove the .bzr directory and "bzr init" again'])
340
raise BzrError('sorry, branch format %r not supported' % fmt,
341
['use a different bzr version',
342
'or remove the .bzr directory and "bzr init" again'])
344
def get_root_id(self):
345
"""Return the id of this branches root"""
346
inv = self.read_working_inventory()
347
return inv.root.file_id
349
def set_root_id(self, file_id):
350
inv = self.read_working_inventory()
351
orig_root_id = inv.root.file_id
352
del inv._byid[inv.root.file_id]
353
inv.root.file_id = file_id
354
inv._byid[inv.root.file_id] = inv.root
357
if entry.parent_id in (None, orig_root_id):
358
entry.parent_id = inv.root.file_id
359
self._write_inventory(inv)
212
361
def read_working_inventory(self):
213
362
"""Read the working inventory."""
215
# ElementTree does its own conversion from UTF-8, so open in
217
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
218
mutter("loaded inventory of %d items in %f"
219
% (len(inv), time.time() - before))
363
from bzrlib.inventory import Inventory
364
from bzrlib.xml import unpack_xml
365
from time import time
369
# ElementTree does its own conversion from UTF-8, so open in
371
inv = unpack_xml(Inventory,
372
self.controlfile('inventory', 'rb'))
373
mutter("loaded inventory of %d items in %f"
374
% (len(inv), time() - before))
223
380
def _write_inventory(self, inv):
224
381
"""Update the working inventory.
251
412
This puts the files in the Added state, so that they will be
252
413
recorded by the next commit.
416
List of paths to add, relative to the base of the tree.
419
If set, use these instead of automatically generated ids.
420
Must be the same length as the list of files, but may
421
contain None for ids that are to be autogenerated.
254
423
TODO: Perhaps have an option to add the ids even if the files do
257
426
TODO: Perhaps return the ids of the files? But then again it
258
is easy to retrieve them if they're needed.
260
TODO: Option to specify file id.
427
is easy to retrieve them if they're needed.
262
429
TODO: Adding a directory should optionally recurse down and
263
add all non-ignored children. Perhaps do that in a
266
>>> b = ScratchBranch(files=['foo'])
267
>>> 'foo' in b.unknowns()
272
>>> 'foo' in b.unknowns()
274
>>> bool(b.inventory.path2id('foo'))
280
Traceback (most recent call last):
282
BzrError: ('foo is already versioned', [])
284
>>> b.add(['nothere'])
285
Traceback (most recent call last):
286
BzrError: ('cannot add: not a regular file or directory: nothere', [])
430
add all non-ignored children. Perhaps do that in a
289
433
# TODO: Re-adding a file that is removed in the working copy
290
434
# should probably put it back with the previous ID.
291
if isinstance(files, types.StringTypes):
435
if isinstance(files, basestring):
436
assert(ids is None or isinstance(ids, basestring))
294
inv = self.read_working_inventory()
296
if is_control_file(f):
297
bailout("cannot add control file %s" % quotefn(f))
302
bailout("cannot add top-level %r" % f)
304
fullpath = os.path.normpath(self.abspath(f))
307
kind = file_kind(fullpath)
309
# maybe something better?
310
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
312
if kind != 'file' and kind != 'directory':
313
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
315
file_id = gen_file_id(f)
316
inv.add_path(f, kind=kind, file_id=file_id)
319
show_status('A', kind, quotefn(f))
321
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
323
self._write_inventory(inv)
442
ids = [None] * len(files)
444
assert(len(ids) == len(files))
448
inv = self.read_working_inventory()
449
for f,file_id in zip(files, ids):
450
if is_control_file(f):
451
raise BzrError("cannot add control file %s" % quotefn(f))
456
raise BzrError("cannot add top-level %r" % f)
458
fullpath = os.path.normpath(self.abspath(f))
461
kind = file_kind(fullpath)
463
# maybe something better?
464
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
466
if kind != 'file' and kind != 'directory':
467
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
470
file_id = gen_file_id(f)
471
inv.add_path(f, kind=kind, file_id=file_id)
474
print 'added', quotefn(f)
476
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
478
self._write_inventory(inv)
326
483
def print_file(self, file, revno):
327
484
"""Print `file` to stdout."""
328
tree = self.revision_tree(self.lookup_revision(revno))
329
# use inventory as it was in that revision
330
file_id = tree.inventory.path2id(file)
332
bailout("%r is not present in revision %d" % (file, revno))
333
tree.print_file(file_id)
487
tree = self.revision_tree(self.lookup_revision(revno))
488
# use inventory as it was in that revision
489
file_id = tree.inventory.path2id(file)
491
raise BzrError("%r is not present in revision %s" % (file, revno))
492
tree.print_file(file_id)
336
497
def remove(self, files, verbose=False):
337
498
"""Mark nominated files for removal from the inventory.
415
570
return self.working_tree().unknowns()
418
def commit(self, message, timestamp=None, timezone=None,
421
"""Commit working copy as a new revision.
423
The basic approach is to add all the file texts into the
424
store, then the inventory, then make a new revision pointing
425
to that inventory and store that.
427
This is not quite safe if the working copy changes during the
428
commit; for the moment that is simply not allowed. A better
429
approach is to make a temporary copy of the files before
430
computing their hashes, and then add those hashes in turn to
431
the inventory. This should mean at least that there are no
432
broken hash pointers. There is no way we can get a snapshot
433
of the whole directory at an instant. This would also have to
434
be robust against files disappearing, moving, etc. So the
435
whole thing is a bit hard.
437
timestamp -- if not None, seconds-since-epoch for a
438
postdated/predated commit.
441
## TODO: Show branch names
443
# TODO: Don't commit if there are no changes, unless forced?
445
# First walk over the working inventory; and both update that
446
# and also build a new revision inventory. The revision
447
# inventory needs to hold the text-id, sha1 and size of the
448
# actual file versions committed in the revision. (These are
449
# not present in the working inventory.) We also need to
450
# detect missing/deleted files, and remove them from the
453
work_inv = self.read_working_inventory()
455
basis = self.basis_tree()
456
basis_inv = basis.inventory
458
for path, entry in work_inv.iter_entries():
459
## TODO: Cope with files that have gone missing.
461
## TODO: Check that the file kind has not changed from the previous
462
## revision of this file (if any).
466
p = self.abspath(path)
467
file_id = entry.file_id
468
mutter('commit prep file %s, id %r ' % (p, file_id))
470
if not os.path.exists(p):
471
mutter(" file is missing, removing from inventory")
473
show_status('D', entry.kind, quotefn(path))
474
missing_ids.append(file_id)
477
# TODO: Handle files that have been deleted
479
# TODO: Maybe a special case for empty files? Seems a
480
# waste to store them many times.
484
if basis_inv.has_id(file_id):
485
old_kind = basis_inv[file_id].kind
486
if old_kind != entry.kind:
487
bailout("entry %r changed kind from %r to %r"
488
% (file_id, old_kind, entry.kind))
490
if entry.kind == 'directory':
492
bailout("%s is entered as directory but not a directory" % quotefn(p))
493
elif entry.kind == 'file':
495
bailout("%s is entered as file but is not a file" % quotefn(p))
497
content = file(p, 'rb').read()
499
entry.text_sha1 = sha_string(content)
500
entry.text_size = len(content)
502
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
504
and (old_ie.text_size == entry.text_size)
505
and (old_ie.text_sha1 == entry.text_sha1)):
506
## assert content == basis.get_file(file_id).read()
507
entry.text_id = basis_inv[file_id].text_id
508
mutter(' unchanged from previous text_id {%s}' %
512
entry.text_id = gen_file_id(entry.name)
513
self.text_store.add(content, entry.text_id)
514
mutter(' stored with text_id {%s}' % entry.text_id)
518
elif (old_ie.name == entry.name
519
and old_ie.parent_id == entry.parent_id):
524
show_status(state, entry.kind, quotefn(path))
526
for file_id in missing_ids:
527
# have to do this later so we don't mess up the iterator.
528
# since parents may be removed before their children we
531
# FIXME: There's probably a better way to do this; perhaps
532
# the workingtree should know how to filter itself.
533
if work_inv.has_id(file_id):
534
del work_inv[file_id]
537
inv_id = rev_id = _gen_revision_id(time.time())
539
inv_tmp = tempfile.TemporaryFile()
540
inv.write_xml(inv_tmp)
542
self.inventory_store.add(inv_tmp, inv_id)
543
mutter('new inventory_id is {%s}' % inv_id)
545
self._write_inventory(work_inv)
547
if timestamp == None:
548
timestamp = time.time()
550
if committer == None:
551
committer = username()
554
timezone = local_time_offset()
556
mutter("building commit log message")
557
rev = Revision(timestamp=timestamp,
560
precursor = self.last_patch(),
565
rev_tmp = tempfile.TemporaryFile()
566
rev.write_xml(rev_tmp)
568
self.revision_store.add(rev_tmp, rev_id)
569
mutter("new revision_id is {%s}" % rev_id)
571
## XXX: Everything up to here can simply be orphaned if we abort
572
## the commit; it will leave junk files behind but that doesn't
575
## TODO: Read back the just-generated changeset, and make sure it
576
## applies and recreates the right state.
578
## TODO: Also calculate and store the inventory SHA1
579
mutter("committing patch r%d" % (self.revno() + 1))
582
self.append_revision(rev_id)
585
note("commited r%d" % self.revno())
588
def append_revision(self, revision_id):
589
mutter("add {%s} to revision-history" % revision_id)
573
def append_revision(self, *revision_ids):
574
from bzrlib.atomicfile import AtomicFile
576
for revision_id in revision_ids:
577
mutter("add {%s} to revision-history" % revision_id)
590
579
rev_history = self.revision_history()
592
tmprhname = self.controlfilename('revision-history.tmp')
593
rhname = self.controlfilename('revision-history')
595
f = file(tmprhname, 'wt')
596
rev_history.append(revision_id)
597
f.write('\n'.join(rev_history))
601
if sys.platform == 'win32':
603
os.rename(tmprhname, rhname)
580
rev_history.extend(revision_ids)
582
f = AtomicFile(self.controlfilename('revision-history'))
584
for rev_id in rev_history:
591
def get_revision_xml(self, revision_id):
592
"""Return XML file object for revision object."""
593
if not revision_id or not isinstance(revision_id, basestring):
594
raise InvalidRevisionId(revision_id)
599
return self.revision_store[revision_id]
601
raise bzrlib.errors.NoSuchRevision(self, revision_id)
607
606
def get_revision(self, revision_id):
608
607
"""Return the Revision object for a named revision"""
609
r = Revision.read_xml(self.revision_store[revision_id])
608
xml_file = self.get_revision_xml(revision_id)
611
r = unpack_xml(Revision, xml_file)
612
except SyntaxError, e:
613
raise bzrlib.errors.BzrError('failed to unpack revision_xml',
610
617
assert r.revision_id == revision_id
621
def get_revision_delta(self, revno):
622
"""Return the delta for one revision.
624
The delta is relative to its mainline predecessor, or the
625
empty tree for revision 1.
627
assert isinstance(revno, int)
628
rh = self.revision_history()
629
if not (1 <= revno <= len(rh)):
630
raise InvalidRevisionNumber(revno)
632
# revno is 1-based; list is 0-based
634
new_tree = self.revision_tree(rh[revno-1])
636
old_tree = EmptyTree()
638
old_tree = self.revision_tree(rh[revno-2])
640
return compare_trees(old_tree, new_tree)
644
def get_revision_sha1(self, revision_id):
645
"""Hash the stored value of a revision, and return it."""
646
# In the future, revision entries will be signed. At that
647
# point, it is probably best *not* to include the signature
648
# in the revision hash. Because that lets you re-sign
649
# the revision, (add signatures/remove signatures) and still
650
# have all hash pointers stay consistent.
651
# But for now, just hash the contents.
652
return bzrlib.osutils.sha_file(self.get_revision_xml(revision_id))
614
655
def get_inventory(self, inventory_id):
615
656
"""Get Inventory object by hash.
617
658
TODO: Perhaps for this and similar methods, take a revision
618
659
parameter which can be either an integer revno or a
620
i = Inventory.read_xml(self.inventory_store[inventory_id])
661
from bzrlib.inventory import Inventory
662
from bzrlib.xml import unpack_xml
664
return unpack_xml(Inventory, self.get_inventory_xml(inventory_id))
667
def get_inventory_xml(self, inventory_id):
668
"""Get inventory XML as a file object."""
669
return self.inventory_store[inventory_id]
672
def get_inventory_sha1(self, inventory_id):
673
"""Return the sha1 hash of the inventory entry
675
return sha_file(self.get_inventory_xml(inventory_id))
624
678
def get_revision_inventory(self, revision_id):
625
679
"""Return inventory of a past revision."""
680
# bzr 0.0.6 imposes the constraint that the inventory_id
681
# must be the same as its revision, so this is trivial.
626
682
if revision_id == None:
683
from bzrlib.inventory import Inventory
684
return Inventory(self.get_root_id())
629
return self.get_inventory(self.get_revision(revision_id).inventory_id)
686
return self.get_inventory(revision_id)
632
689
def revision_history(self):
644
751
That is equivalent to the number of revisions committed to
647
>>> b = ScratchBranch()
650
>>> b.commit('no foo')
654
754
return len(self.revision_history())
657
757
def last_patch(self):
658
758
"""Return last patch hash, or None if no history.
660
>>> ScratchBranch().last_patch() == None
663
760
ph = self.revision_history()
670
def lookup_revision(self, revno):
671
"""Return revision hash for revision number."""
676
# list is 0-based; revisions are 1-based
677
return self.revision_history()[revno-1]
679
raise BzrError("no such revision %s" % revno)
767
def missing_revisions(self, other, stop_revision=None, diverged_ok=False):
769
If self and other have not diverged, return a list of the revisions
770
present in other, but missing from self.
772
>>> from bzrlib.commit import commit
773
>>> bzrlib.trace.silent = True
774
>>> br1 = ScratchBranch()
775
>>> br2 = ScratchBranch()
776
>>> br1.missing_revisions(br2)
778
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
779
>>> br1.missing_revisions(br2)
781
>>> br2.missing_revisions(br1)
783
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
784
>>> br1.missing_revisions(br2)
786
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
787
>>> br1.missing_revisions(br2)
789
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
790
>>> br1.missing_revisions(br2)
791
Traceback (most recent call last):
792
DivergedBranches: These branches have diverged.
794
self_history = self.revision_history()
795
self_len = len(self_history)
796
other_history = other.revision_history()
797
other_len = len(other_history)
798
common_index = min(self_len, other_len) -1
799
if common_index >= 0 and \
800
self_history[common_index] != other_history[common_index]:
801
raise DivergedBranches(self, other)
803
if stop_revision is None:
804
stop_revision = other_len
805
elif stop_revision > other_len:
806
raise bzrlib.errors.NoSuchRevision(self, stop_revision)
808
return other_history[self_len:stop_revision]
811
def update_revisions(self, other, stop_revision=None):
812
"""Pull in all new revisions from other branch.
814
from bzrlib.fetch import greedy_fetch
816
pb = bzrlib.ui.ui_factory.progress_bar()
817
pb.update('comparing histories')
819
revision_ids = self.missing_revisions(other, stop_revision)
821
if len(revision_ids) > 0:
822
count = greedy_fetch(self, other, revision_ids[-1], pb)[0]
825
self.append_revision(*revision_ids)
826
## note("Added %d revisions." % count)
829
def install_revisions(self, other, revision_ids, pb):
830
if hasattr(other.revision_store, "prefetch"):
831
other.revision_store.prefetch(revision_ids)
832
if hasattr(other.inventory_store, "prefetch"):
833
inventory_ids = [other.get_revision(r).inventory_id
834
for r in revision_ids]
835
other.inventory_store.prefetch(inventory_ids)
838
pb = bzrlib.ui.ui_factory.progress_bar()
845
for i, rev_id in enumerate(revision_ids):
846
pb.update('fetching revision', i+1, len(revision_ids))
848
rev = other.get_revision(rev_id)
849
except bzrlib.errors.NoSuchRevision:
853
revisions.append(rev)
854
inv = other.get_inventory(str(rev.inventory_id))
855
for key, entry in inv.iter_entries():
856
if entry.text_id is None:
858
if entry.text_id not in self.text_store:
859
needed_texts.add(entry.text_id)
863
count, cp_fail = self.text_store.copy_multi(other.text_store,
865
print "Added %d texts." % count
866
inventory_ids = [ f.inventory_id for f in revisions ]
867
count, cp_fail = self.inventory_store.copy_multi(other.inventory_store,
869
print "Added %d inventories." % count
870
revision_ids = [ f.revision_id for f in revisions]
872
count, cp_fail = self.revision_store.copy_multi(other.revision_store,
875
assert len(cp_fail) == 0
876
return count, failures
879
def commit(self, *args, **kw):
880
from bzrlib.commit import commit
881
commit(self, *args, **kw)
884
def lookup_revision(self, revision):
885
"""Return the revision identifier for a given revision information."""
886
revno, info = self.get_revision_info(revision)
890
def revision_id_to_revno(self, revision_id):
891
"""Given a revision id, return its revno"""
892
history = self.revision_history()
894
return history.index(revision_id) + 1
896
raise bzrlib.errors.NoSuchRevision(self, revision_id)
899
def get_revision_info(self, revision):
900
"""Return (revno, revision id) for revision identifier.
902
revision can be an integer, in which case it is assumed to be revno (though
903
this will translate negative values into positive ones)
904
revision can also be a string, in which case it is parsed for something like
905
'date:' or 'revid:' etc.
910
try:# Convert to int if possible
911
revision = int(revision)
914
revs = self.revision_history()
915
if isinstance(revision, int):
918
# Mabye we should do this first, but we don't need it if revision == 0
920
revno = len(revs) + revision + 1
923
elif isinstance(revision, basestring):
924
for prefix, func in Branch.REVISION_NAMESPACES.iteritems():
925
if revision.startswith(prefix):
926
revno = func(self, revs, revision)
929
raise BzrError('No namespace registered for string: %r' % revision)
931
if revno is None or revno <= 0 or revno > len(revs):
932
raise BzrError("no such revision %s" % revision)
933
return revno, revs[revno-1]
935
def _namespace_revno(self, revs, revision):
936
"""Lookup a revision by revision number"""
937
assert revision.startswith('revno:')
939
return int(revision[6:])
942
REVISION_NAMESPACES['revno:'] = _namespace_revno
944
def _namespace_revid(self, revs, revision):
945
assert revision.startswith('revid:')
947
return revs.index(revision[6:]) + 1
950
REVISION_NAMESPACES['revid:'] = _namespace_revid
952
def _namespace_last(self, revs, revision):
953
assert revision.startswith('last:')
955
offset = int(revision[5:])
960
raise BzrError('You must supply a positive value for --revision last:XXX')
961
return len(revs) - offset + 1
962
REVISION_NAMESPACES['last:'] = _namespace_last
964
def _namespace_tag(self, revs, revision):
965
assert revision.startswith('tag:')
966
raise BzrError('tag: namespace registered, but not implemented.')
967
REVISION_NAMESPACES['tag:'] = _namespace_tag
969
def _namespace_date(self, revs, revision):
970
assert revision.startswith('date:')
972
# Spec for date revisions:
974
# value can be 'yesterday', 'today', 'tomorrow' or a YYYY-MM-DD string.
975
# it can also start with a '+/-/='. '+' says match the first
976
# entry after the given date. '-' is match the first entry before the date
977
# '=' is match the first entry after, but still on the given date.
979
# +2005-05-12 says find the first matching entry after May 12th, 2005 at 0:00
980
# -2005-05-12 says find the first matching entry before May 12th, 2005 at 0:00
981
# =2005-05-12 says find the first match after May 12th, 2005 at 0:00 but before
982
# May 13th, 2005 at 0:00
984
# So the proper way of saying 'give me all entries for today' is:
985
# -r {date:+today}:{date:-tomorrow}
986
# The default is '=' when not supplied
989
if val[:1] in ('+', '-', '='):
990
match_style = val[:1]
993
today = datetime.datetime.today().replace(hour=0,minute=0,second=0,microsecond=0)
994
if val.lower() == 'yesterday':
995
dt = today - datetime.timedelta(days=1)
996
elif val.lower() == 'today':
998
elif val.lower() == 'tomorrow':
999
dt = today + datetime.timedelta(days=1)
1002
# This should be done outside the function to avoid recompiling it.
1003
_date_re = re.compile(
1004
r'(?P<date>(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d))?'
1006
r'(?P<time>(?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d))?)?'
1008
m = _date_re.match(val)
1009
if not m or (not m.group('date') and not m.group('time')):
1010
raise BzrError('Invalid revision date %r' % revision)
1013
year, month, day = int(m.group('year')), int(m.group('month')), int(m.group('day'))
1015
year, month, day = today.year, today.month, today.day
1017
hour = int(m.group('hour'))
1018
minute = int(m.group('minute'))
1019
if m.group('second'):
1020
second = int(m.group('second'))
1024
hour, minute, second = 0,0,0
1026
dt = datetime.datetime(year=year, month=month, day=day,
1027
hour=hour, minute=minute, second=second)
1031
if match_style == '-':
1033
elif match_style == '=':
1034
last = dt + datetime.timedelta(days=1)
1037
for i in range(len(revs)-1, -1, -1):
1038
r = self.get_revision(revs[i])
1039
# TODO: Handle timezone.
1040
dt = datetime.datetime.fromtimestamp(r.timestamp)
1041
if first >= dt and (last is None or dt >= last):
1044
for i in range(len(revs)):
1045
r = self.get_revision(revs[i])
1046
# TODO: Handle timezone.
1047
dt = datetime.datetime.fromtimestamp(r.timestamp)
1048
if first <= dt and (last is None or dt <= last):
1050
REVISION_NAMESPACES['date:'] = _namespace_date
682
1052
def revision_tree(self, revision_id):
683
1053
"""Return Tree for a revision on this branch.
685
1055
`revision_id` may be None for the null revision, in which case
686
1056
an `EmptyTree` is returned."""
1057
# TODO: refactor this to use an existing revision object
1058
# so we don't need to read it in twice.
688
1059
if revision_id == None:
689
1060
return EmptyTree()
723
def write_log(self, show_timezone='original', verbose=False):
724
"""Write out human-readable log of commits to this branch
726
utc -- If true, show dates in universal time, not local time."""
727
## TODO: Option to choose either original, utc or local timezone
730
for p in self.revision_history():
732
print 'revno:', revno
733
## TODO: Show hash if --id is given.
734
##print 'revision-hash:', p
735
rev = self.get_revision(p)
736
print 'committer:', rev.committer
737
print 'timestamp: %s' % (format_date(rev.timestamp, rev.timezone or 0,
740
## opportunistic consistency check, same as check_patch_chaining
741
if rev.precursor != precursor:
742
bailout("mismatched precursor!")
746
print ' (no message)'
748
for l in rev.message.split('\n'):
751
if verbose == True and precursor != None:
752
print 'changed files:'
753
tree = self.revision_tree(p)
754
prevtree = self.revision_tree(precursor)
756
for file_state, fid, old_name, new_name, kind in \
757
diff_trees(prevtree, tree, ):
758
if file_state == 'A' or file_state == 'M':
759
show_status(file_state, kind, new_name)
760
elif file_state == 'D':
761
show_status(file_state, kind, old_name)
762
elif file_state == 'R':
763
show_status(file_state, kind,
764
old_name + ' => ' + new_name)
770
1085
def rename_one(self, from_rel, to_rel):
771
tree = self.working_tree()
773
if not tree.has_filename(from_rel):
774
bailout("can't rename: old working file %r does not exist" % from_rel)
775
if tree.has_filename(to_rel):
776
bailout("can't rename: new working file %r already exists" % to_rel)
778
file_id = inv.path2id(from_rel)
780
bailout("can't rename: old name %r is not versioned" % from_rel)
782
if inv.path2id(to_rel):
783
bailout("can't rename: new name %r is already versioned" % to_rel)
785
to_dir, to_tail = os.path.split(to_rel)
786
to_dir_id = inv.path2id(to_dir)
787
if to_dir_id == None and to_dir != '':
788
bailout("can't determine destination directory id for %r" % to_dir)
790
mutter("rename_one:")
791
mutter(" file_id {%s}" % file_id)
792
mutter(" from_rel %r" % from_rel)
793
mutter(" to_rel %r" % to_rel)
794
mutter(" to_dir %r" % to_dir)
795
mutter(" to_dir_id {%s}" % to_dir_id)
797
inv.rename(file_id, to_dir_id, to_tail)
799
print "%s => %s" % (from_rel, to_rel)
801
from_abs = self.abspath(from_rel)
802
to_abs = self.abspath(to_rel)
1088
This can change the directory or the filename or both.
804
os.rename(from_abs, to_abs)
806
bailout("failed to rename %r to %r: %s"
807
% (from_abs, to_abs, e[1]),
808
["rename rolled back"])
810
self._write_inventory(inv)
1092
tree = self.working_tree()
1093
inv = tree.inventory
1094
if not tree.has_filename(from_rel):
1095
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
1096
if tree.has_filename(to_rel):
1097
raise BzrError("can't rename: new working file %r already exists" % to_rel)
1099
file_id = inv.path2id(from_rel)
1101
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
1103
if inv.path2id(to_rel):
1104
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1106
to_dir, to_tail = os.path.split(to_rel)
1107
to_dir_id = inv.path2id(to_dir)
1108
if to_dir_id == None and to_dir != '':
1109
raise BzrError("can't determine destination directory id for %r" % to_dir)
1111
mutter("rename_one:")
1112
mutter(" file_id {%s}" % file_id)
1113
mutter(" from_rel %r" % from_rel)
1114
mutter(" to_rel %r" % to_rel)
1115
mutter(" to_dir %r" % to_dir)
1116
mutter(" to_dir_id {%s}" % to_dir_id)
1118
inv.rename(file_id, to_dir_id, to_tail)
1120
print "%s => %s" % (from_rel, to_rel)
1122
from_abs = self.abspath(from_rel)
1123
to_abs = self.abspath(to_rel)
1125
os.rename(from_abs, to_abs)
1127
raise BzrError("failed to rename %r to %r: %s"
1128
% (from_abs, to_abs, e[1]),
1129
["rename rolled back"])
1131
self._write_inventory(inv)
814
1136
def move(self, from_paths, to_name):
822
1144
Note that to_name is only the last component of the new name;
823
1145
this doesn't change the directory.
825
## TODO: Option to move IDs only
826
assert not isinstance(from_paths, basestring)
827
tree = self.working_tree()
829
to_abs = self.abspath(to_name)
830
if not isdir(to_abs):
831
bailout("destination %r is not a directory" % to_abs)
832
if not tree.has_filename(to_name):
833
bailout("destination %r not in working directory" % to_abs)
834
to_dir_id = inv.path2id(to_name)
835
if to_dir_id == None and to_name != '':
836
bailout("destination %r is not a versioned directory" % to_name)
837
to_dir_ie = inv[to_dir_id]
838
if to_dir_ie.kind not in ('directory', 'root_directory'):
839
bailout("destination %r is not a directory" % to_abs)
841
to_idpath = Set(inv.get_idpath(to_dir_id))
844
if not tree.has_filename(f):
845
bailout("%r does not exist in working tree" % f)
846
f_id = inv.path2id(f)
848
bailout("%r is not versioned" % f)
849
name_tail = splitpath(f)[-1]
850
dest_path = appendpath(to_name, name_tail)
851
if tree.has_filename(dest_path):
852
bailout("destination %r already exists" % dest_path)
853
if f_id in to_idpath:
854
bailout("can't move %r to a subdirectory of itself" % f)
856
# OK, so there's a race here, it's possible that someone will
857
# create a file in this interval and then the rename might be
858
# left half-done. But we should have caught most problems.
861
name_tail = splitpath(f)[-1]
862
dest_path = appendpath(to_name, name_tail)
863
print "%s => %s" % (f, dest_path)
864
inv.rename(inv.path2id(f), to_dir_id, name_tail)
866
os.rename(self.abspath(f), self.abspath(dest_path))
868
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
869
["rename rolled back"])
871
self._write_inventory(inv)
875
def show_status(self, show_all=False):
876
"""Display single-line status for non-ignored working files.
878
The list is show sorted in order by file name.
880
>>> b = ScratchBranch(files=['foo', 'foo~'])
886
>>> b.commit("add foo")
888
>>> os.unlink(b.abspath('foo'))
1149
## TODO: Option to move IDs only
1150
assert not isinstance(from_paths, basestring)
1151
tree = self.working_tree()
1152
inv = tree.inventory
1153
to_abs = self.abspath(to_name)
1154
if not isdir(to_abs):
1155
raise BzrError("destination %r is not a directory" % to_abs)
1156
if not tree.has_filename(to_name):
1157
raise BzrError("destination %r not in working directory" % to_abs)
1158
to_dir_id = inv.path2id(to_name)
1159
if to_dir_id == None and to_name != '':
1160
raise BzrError("destination %r is not a versioned directory" % to_name)
1161
to_dir_ie = inv[to_dir_id]
1162
if to_dir_ie.kind not in ('directory', 'root_directory'):
1163
raise BzrError("destination %r is not a directory" % to_abs)
1165
to_idpath = inv.get_idpath(to_dir_id)
1167
for f in from_paths:
1168
if not tree.has_filename(f):
1169
raise BzrError("%r does not exist in working tree" % f)
1170
f_id = inv.path2id(f)
1172
raise BzrError("%r is not versioned" % f)
1173
name_tail = splitpath(f)[-1]
1174
dest_path = appendpath(to_name, name_tail)
1175
if tree.has_filename(dest_path):
1176
raise BzrError("destination %r already exists" % dest_path)
1177
if f_id in to_idpath:
1178
raise BzrError("can't move %r to a subdirectory of itself" % f)
1180
# OK, so there's a race here, it's possible that someone will
1181
# create a file in this interval and then the rename might be
1182
# left half-done. But we should have caught most problems.
1184
for f in from_paths:
1185
name_tail = splitpath(f)[-1]
1186
dest_path = appendpath(to_name, name_tail)
1187
print "%s => %s" % (f, dest_path)
1188
inv.rename(inv.path2id(f), to_dir_id, name_tail)
1190
os.rename(self.abspath(f), self.abspath(dest_path))
1192
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
1193
["rename rolled back"])
1195
self._write_inventory(inv)
1200
def revert(self, filenames, old_tree=None, backups=True):
1201
"""Restore selected files to the versions from a previous tree.
1204
If true (default) backups are made of files before
1207
from bzrlib.errors import NotVersionedError, BzrError
1208
from bzrlib.atomicfile import AtomicFile
1209
from bzrlib.osutils import backup_file
892
TODO: Get state for single files.
1211
inv = self.read_working_inventory()
1212
if old_tree is None:
1213
old_tree = self.basis_tree()
1214
old_inv = old_tree.inventory
1217
for fn in filenames:
1218
file_id = inv.path2id(fn)
1220
raise NotVersionedError("not a versioned file", fn)
1221
if not old_inv.has_id(file_id):
1222
raise BzrError("file not present in old tree", fn, file_id)
1223
nids.append((fn, file_id))
1225
# TODO: Rename back if it was previously at a different location
1227
# TODO: If given a directory, restore the entire contents from
1228
# the previous version.
1230
# TODO: Make a backup to a temporary file.
1232
# TODO: If the file previously didn't exist, delete it?
1233
for fn, file_id in nids:
1236
f = AtomicFile(fn, 'wb')
1238
f.write(old_tree.get_file(file_id).read())
1244
def pending_merges(self):
1245
"""Return a list of pending merges.
1247
These are revisions that have been merged into the working
1248
directory but not yet committed.
895
# We have to build everything into a list first so that it can
896
# sorted by name, incorporating all the different sources.
898
# FIXME: Rather than getting things in random order and then sorting,
899
# just step through in order.
901
# Interesting case: the old ID for a file has been removed,
902
# but a new file has been created under that name.
904
old = self.basis_tree()
905
new = self.working_tree()
907
for fs, fid, oldname, newname, kind in diff_trees(old, new):
909
show_status(fs, kind,
910
oldname + ' => ' + newname)
911
elif fs == 'A' or fs == 'M':
912
show_status(fs, kind, newname)
914
show_status(fs, kind, oldname)
917
show_status(fs, kind, newname)
920
show_status(fs, kind, newname)
922
show_status(fs, kind, newname)
924
bailout("weird file state %r" % ((fs, fid),))
1250
cfn = self.controlfilename('pending-merges')
1251
if not os.path.exists(cfn):
1254
for l in self.controlfile('pending-merges', 'r').readlines():
1255
p.append(l.rstrip('\n'))
1259
def add_pending_merge(self, revision_id):
1260
from bzrlib.revision import validate_revision_id
1262
validate_revision_id(revision_id)
1264
p = self.pending_merges()
1265
if revision_id in p:
1267
p.append(revision_id)
1268
self.set_pending_merges(p)
1271
def set_pending_merges(self, rev_list):
1272
from bzrlib.atomicfile import AtomicFile
1275
f = AtomicFile(self.controlfilename('pending-merges'))
928
1287
class ScratchBranch(Branch):