15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
import sys, os, os.path, random, time, sha, sets, types, re, shutil, tempfile
21
import traceback, socket, fnmatch, difflib, time
22
from binascii import hexlify
22
from bzrlib.trace import mutter, note
23
from bzrlib.osutils import isdir, quotefn, compact_date, rand_bytes, \
25
sha_file, appendpath, file_kind
26
from bzrlib.errors import BzrError, InvalidRevisionNumber, InvalidRevisionId
27
from bzrlib.textui import show_status
28
from bzrlib.revision import Revision
29
from bzrlib.xml import unpack_xml
30
from bzrlib.delta import compare_trees
31
from bzrlib.tree import EmptyTree, RevisionTree
25
from inventory import Inventory
26
from trace import mutter, note
27
from tree import Tree, EmptyTree, RevisionTree
28
from inventory import InventoryEntry, Inventory
29
from osutils import isdir, quotefn, isfile, uuid, sha_file, username, \
30
format_date, compact_date, pumpfile, user_email, rand_bytes, splitpath, \
31
joinpath, sha_string, file_kind, local_time_offset, appendpath
32
from store import ImmutableStore
33
from revision import Revision
34
from errors import bailout, BzrError
35
from textui import show_status
36
from diff import diff_trees
33
38
BZR_BRANCH_FORMAT = "Bazaar-NG branch, format 0.0.4\n"
34
39
## TODO: Maybe include checks for common corruption of newlines, etc?
37
# TODO: Some operations like log might retrieve the same revisions
38
# repeatedly to calculate deltas. We could perhaps have a weakref
39
# cache in memory to make this faster.
42
43
def find_branch(f, **args):
43
44
if f and (f.startswith('http://') or f.startswith('https://')):
340
263
fmt = self.controlfile('branch-format', 'r').read()
341
264
fmt.replace('\r\n', '')
342
265
if fmt != BZR_BRANCH_FORMAT:
343
raise BzrError('sorry, branch format %r not supported' % fmt,
344
['use a different bzr version',
345
'or remove the .bzr directory and "bzr init" again'])
347
def get_root_id(self):
348
"""Return the id of this branches root"""
349
inv = self.read_working_inventory()
350
return inv.root.file_id
352
def set_root_id(self, file_id):
353
inv = self.read_working_inventory()
354
orig_root_id = inv.root.file_id
355
del inv._byid[inv.root.file_id]
356
inv.root.file_id = file_id
357
inv._byid[inv.root.file_id] = inv.root
360
if entry.parent_id in (None, orig_root_id):
361
entry.parent_id = inv.root.file_id
362
self._write_inventory(inv)
266
bailout('sorry, branch format %r not supported' % fmt,
267
['use a different bzr version',
268
'or remove the .bzr directory and "bzr init" again'])
364
271
def read_working_inventory(self):
365
272
"""Read the working inventory."""
366
from bzrlib.inventory import Inventory
367
from bzrlib.xml import unpack_xml
368
from time import time
372
# ElementTree does its own conversion from UTF-8, so open in
374
inv = unpack_xml(Inventory,
375
self.controlfile('inventory', 'rb'))
376
mutter("loaded inventory of %d items in %f"
377
% (len(inv), time() - before))
273
self._need_readlock()
275
# ElementTree does its own conversion from UTF-8, so open in
277
inv = Inventory.read_xml(self.controlfile('inventory', 'rb'))
278
mutter("loaded inventory of %d items in %f"
279
% (len(inv), time.time() - before))
383
283
def _write_inventory(self, inv):
384
284
"""Update the working inventory.
415
312
This puts the files in the Added state, so that they will be
416
313
recorded by the next commit.
419
List of paths to add, relative to the base of the tree.
422
If set, use these instead of automatically generated ids.
423
Must be the same length as the list of files, but may
424
contain None for ids that are to be autogenerated.
426
315
TODO: Perhaps have an option to add the ids even if the files do
429
318
TODO: Perhaps return the ids of the files? But then again it
430
is easy to retrieve them if they're needed.
319
is easy to retrieve them if they're needed.
321
TODO: Option to specify file id.
432
323
TODO: Adding a directory should optionally recurse down and
433
add all non-ignored children. Perhaps do that in a
324
add all non-ignored children. Perhaps do that in a
327
>>> b = ScratchBranch(files=['foo'])
328
>>> 'foo' in b.unknowns()
333
>>> 'foo' in b.unknowns()
335
>>> bool(b.inventory.path2id('foo'))
341
Traceback (most recent call last):
343
BzrError: ('foo is already versioned', [])
345
>>> b.add(['nothere'])
346
Traceback (most recent call last):
347
BzrError: ('cannot add: not a regular file or directory: nothere', [])
349
self._need_writelock()
436
351
# TODO: Re-adding a file that is removed in the working copy
437
352
# should probably put it back with the previous ID.
438
if isinstance(files, basestring):
439
assert(ids is None or isinstance(ids, basestring))
353
if isinstance(files, types.StringTypes):
445
ids = [None] * len(files)
447
assert(len(ids) == len(files))
451
inv = self.read_working_inventory()
452
for f,file_id in zip(files, ids):
453
if is_control_file(f):
454
raise BzrError("cannot add control file %s" % quotefn(f))
459
raise BzrError("cannot add top-level %r" % f)
461
fullpath = os.path.normpath(self.abspath(f))
464
kind = file_kind(fullpath)
466
# maybe something better?
467
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
469
if kind != 'file' and kind != 'directory':
470
raise BzrError('cannot add: not a regular file or directory: %s' % quotefn(f))
473
file_id = gen_file_id(f)
474
inv.add_path(f, kind=kind, file_id=file_id)
477
print 'added', quotefn(f)
479
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
481
self._write_inventory(inv)
356
inv = self.read_working_inventory()
358
if is_control_file(f):
359
bailout("cannot add control file %s" % quotefn(f))
364
bailout("cannot add top-level %r" % f)
366
fullpath = os.path.normpath(self.abspath(f))
369
kind = file_kind(fullpath)
371
# maybe something better?
372
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
374
if kind != 'file' and kind != 'directory':
375
bailout('cannot add: not a regular file or directory: %s' % quotefn(f))
377
file_id = gen_file_id(f)
378
inv.add_path(f, kind=kind, file_id=file_id)
381
show_status('A', kind, quotefn(f))
383
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
385
self._write_inventory(inv)
486
388
def print_file(self, file, revno):
487
389
"""Print `file` to stdout."""
490
tree = self.revision_tree(self.lookup_revision(revno))
491
# use inventory as it was in that revision
492
file_id = tree.inventory.path2id(file)
494
raise BzrError("%r is not present in revision %s" % (file, revno))
495
tree.print_file(file_id)
390
self._need_readlock()
391
tree = self.revision_tree(self.lookup_revision(revno))
392
# use inventory as it was in that revision
393
file_id = tree.inventory.path2id(file)
395
bailout("%r is not present in revision %d" % (file, revno))
396
tree.print_file(file_id)
500
399
def remove(self, files, verbose=False):
501
400
"""Mark nominated files for removal from the inventory.
573
479
return self.working_tree().unknowns()
576
def append_revision(self, *revision_ids):
577
from bzrlib.atomicfile import AtomicFile
579
for revision_id in revision_ids:
580
mutter("add {%s} to revision-history" % revision_id)
482
def commit(self, message, timestamp=None, timezone=None,
485
"""Commit working copy as a new revision.
487
The basic approach is to add all the file texts into the
488
store, then the inventory, then make a new revision pointing
489
to that inventory and store that.
491
This is not quite safe if the working copy changes during the
492
commit; for the moment that is simply not allowed. A better
493
approach is to make a temporary copy of the files before
494
computing their hashes, and then add those hashes in turn to
495
the inventory. This should mean at least that there are no
496
broken hash pointers. There is no way we can get a snapshot
497
of the whole directory at an instant. This would also have to
498
be robust against files disappearing, moving, etc. So the
499
whole thing is a bit hard.
501
timestamp -- if not None, seconds-since-epoch for a
502
postdated/predated commit.
504
self._need_writelock()
506
## TODO: Show branch names
508
# TODO: Don't commit if there are no changes, unless forced?
510
# First walk over the working inventory; and both update that
511
# and also build a new revision inventory. The revision
512
# inventory needs to hold the text-id, sha1 and size of the
513
# actual file versions committed in the revision. (These are
514
# not present in the working inventory.) We also need to
515
# detect missing/deleted files, and remove them from the
518
work_inv = self.read_working_inventory()
520
basis = self.basis_tree()
521
basis_inv = basis.inventory
523
for path, entry in work_inv.iter_entries():
524
## TODO: Cope with files that have gone missing.
526
## TODO: Check that the file kind has not changed from the previous
527
## revision of this file (if any).
531
p = self.abspath(path)
532
file_id = entry.file_id
533
mutter('commit prep file %s, id %r ' % (p, file_id))
535
if not os.path.exists(p):
536
mutter(" file is missing, removing from inventory")
538
show_status('D', entry.kind, quotefn(path))
539
missing_ids.append(file_id)
542
# TODO: Handle files that have been deleted
544
# TODO: Maybe a special case for empty files? Seems a
545
# waste to store them many times.
549
if basis_inv.has_id(file_id):
550
old_kind = basis_inv[file_id].kind
551
if old_kind != entry.kind:
552
bailout("entry %r changed kind from %r to %r"
553
% (file_id, old_kind, entry.kind))
555
if entry.kind == 'directory':
557
bailout("%s is entered as directory but not a directory" % quotefn(p))
558
elif entry.kind == 'file':
560
bailout("%s is entered as file but is not a file" % quotefn(p))
562
content = file(p, 'rb').read()
564
entry.text_sha1 = sha_string(content)
565
entry.text_size = len(content)
567
old_ie = basis_inv.has_id(file_id) and basis_inv[file_id]
569
and (old_ie.text_size == entry.text_size)
570
and (old_ie.text_sha1 == entry.text_sha1)):
571
## assert content == basis.get_file(file_id).read()
572
entry.text_id = basis_inv[file_id].text_id
573
mutter(' unchanged from previous text_id {%s}' %
577
entry.text_id = gen_file_id(entry.name)
578
self.text_store.add(content, entry.text_id)
579
mutter(' stored with text_id {%s}' % entry.text_id)
583
elif (old_ie.name == entry.name
584
and old_ie.parent_id == entry.parent_id):
589
show_status(state, entry.kind, quotefn(path))
591
for file_id in missing_ids:
592
# have to do this later so we don't mess up the iterator.
593
# since parents may be removed before their children we
596
# FIXME: There's probably a better way to do this; perhaps
597
# the workingtree should know how to filter itself.
598
if work_inv.has_id(file_id):
599
del work_inv[file_id]
602
inv_id = rev_id = _gen_revision_id(time.time())
604
inv_tmp = tempfile.TemporaryFile()
605
inv.write_xml(inv_tmp)
607
self.inventory_store.add(inv_tmp, inv_id)
608
mutter('new inventory_id is {%s}' % inv_id)
610
self._write_inventory(work_inv)
612
if timestamp == None:
613
timestamp = time.time()
615
if committer == None:
616
committer = username()
619
timezone = local_time_offset()
621
mutter("building commit log message")
622
rev = Revision(timestamp=timestamp,
625
precursor = self.last_patch(),
630
rev_tmp = tempfile.TemporaryFile()
631
rev.write_xml(rev_tmp)
633
self.revision_store.add(rev_tmp, rev_id)
634
mutter("new revision_id is {%s}" % rev_id)
636
## XXX: Everything up to here can simply be orphaned if we abort
637
## the commit; it will leave junk files behind but that doesn't
640
## TODO: Read back the just-generated changeset, and make sure it
641
## applies and recreates the right state.
643
## TODO: Also calculate and store the inventory SHA1
644
mutter("committing patch r%d" % (self.revno() + 1))
647
self.append_revision(rev_id)
650
note("commited r%d" % self.revno())
653
def append_revision(self, revision_id):
654
mutter("add {%s} to revision-history" % revision_id)
582
655
rev_history = self.revision_history()
583
rev_history.extend(revision_ids)
585
f = AtomicFile(self.controlfilename('revision-history'))
587
for rev_id in rev_history:
657
tmprhname = self.controlfilename('revision-history.tmp')
658
rhname = self.controlfilename('revision-history')
660
f = file(tmprhname, 'wt')
661
rev_history.append(revision_id)
662
f.write('\n'.join(rev_history))
666
if sys.platform == 'win32':
668
os.rename(tmprhname, rhname)
594
672
def get_revision(self, revision_id):
595
673
"""Return the Revision object for a named revision"""
598
if not revision_id or not isinstance(revision_id, basestring):
599
raise InvalidRevisionId(revision_id)
600
r = unpack_xml(Revision, self.revision_store[revision_id])
674
self._need_readlock()
675
r = Revision.read_xml(self.revision_store[revision_id])
604
676
assert r.revision_id == revision_id
608
def get_revision_delta(self, revno):
609
"""Return the delta for one revision.
611
The delta is relative to its mainline predecessor, or the
612
empty tree for revision 1.
614
assert isinstance(revno, int)
615
rh = self.revision_history()
616
if not (1 <= revno <= len(rh)):
617
raise InvalidRevisionNumber(revno)
619
# revno is 1-based; list is 0-based
621
new_tree = self.revision_tree(rh[revno-1])
623
old_tree = EmptyTree()
625
old_tree = self.revision_tree(rh[revno-2])
627
return compare_trees(old_tree, new_tree)
631
def get_revision_sha1(self, revision_id):
632
"""Hash the stored value of a revision, and return it."""
633
# In the future, revision entries will be signed. At that
634
# point, it is probably best *not* to include the signature
635
# in the revision hash. Because that lets you re-sign
636
# the revision, (add signatures/remove signatures) and still
637
# have all hash pointers stay consistent.
638
# But for now, just hash the contents.
639
return sha_file(self.revision_store[revision_id])
642
680
def get_inventory(self, inventory_id):
643
681
"""Get Inventory object by hash.
645
683
TODO: Perhaps for this and similar methods, take a revision
646
684
parameter which can be either an integer revno or a
648
from bzrlib.inventory import Inventory
649
from bzrlib.xml import unpack_xml
651
return unpack_xml(Inventory, self.inventory_store[inventory_id])
654
def get_inventory_sha1(self, inventory_id):
655
"""Return the sha1 hash of the inventory entry
657
return sha_file(self.inventory_store[inventory_id])
686
self._need_readlock()
687
i = Inventory.read_xml(self.inventory_store[inventory_id])
660
691
def get_revision_inventory(self, revision_id):
661
692
"""Return inventory of a past revision."""
662
# bzr 0.0.6 imposes the constraint that the inventory_id
663
# must be the same as its revision, so this is trivial.
693
self._need_readlock()
664
694
if revision_id == None:
665
from bzrlib.inventory import Inventory
666
return Inventory(self.get_root_id())
668
return self.get_inventory(revision_id)
697
return self.get_inventory(self.get_revision(revision_id).inventory_id)
671
700
def revision_history(self):
674
703
>>> ScratchBranch().revision_history()
679
return [l.rstrip('\r\n') for l in
680
self.controlfile('revision-history', 'r').readlines()]
685
def common_ancestor(self, other, self_revno=None, other_revno=None):
688
>>> sb = ScratchBranch(files=['foo', 'foo~'])
689
>>> sb.common_ancestor(sb) == (None, None)
691
>>> commit.commit(sb, "Committing first revision", verbose=False)
692
>>> sb.common_ancestor(sb)[0]
694
>>> clone = sb.clone()
695
>>> commit.commit(sb, "Committing second revision", verbose=False)
696
>>> sb.common_ancestor(sb)[0]
698
>>> sb.common_ancestor(clone)[0]
700
>>> commit.commit(clone, "Committing divergent second revision",
702
>>> sb.common_ancestor(clone)[0]
704
>>> sb.common_ancestor(clone) == clone.common_ancestor(sb)
706
>>> sb.common_ancestor(sb) != clone.common_ancestor(clone)
708
>>> clone2 = sb.clone()
709
>>> sb.common_ancestor(clone2)[0]
711
>>> sb.common_ancestor(clone2, self_revno=1)[0]
713
>>> sb.common_ancestor(clone2, other_revno=1)[0]
716
my_history = self.revision_history()
717
other_history = other.revision_history()
718
if self_revno is None:
719
self_revno = len(my_history)
720
if other_revno is None:
721
other_revno = len(other_history)
722
indices = range(min((self_revno, other_revno)))
725
if my_history[r] == other_history[r]:
726
return r+1, my_history[r]
706
self._need_readlock()
707
return [l.rstrip('\r\n') for l in self.controlfile('revision-history', 'r').readlines()]
710
def enum_history(self, direction):
711
"""Return (revno, revision_id) for history of branch.
714
'forward' is from earliest to latest
715
'reverse' is from latest to earliest
717
rh = self.revision_history()
718
if direction == 'forward':
723
elif direction == 'reverse':
729
raise BzrError('invalid history direction %r' % direction)
733
735
That is equivalent to the number of revisions committed to
738
>>> b = ScratchBranch()
741
>>> b.commit('no foo')
736
745
return len(self.revision_history())
739
748
def last_patch(self):
740
749
"""Return last patch hash, or None if no history.
751
>>> ScratchBranch().last_patch() == None
742
754
ph = self.revision_history()
749
def missing_revisions(self, other, stop_revision=None):
751
If self and other have not diverged, return a list of the revisions
752
present in other, but missing from self.
754
>>> from bzrlib.commit import commit
755
>>> bzrlib.trace.silent = True
756
>>> br1 = ScratchBranch()
757
>>> br2 = ScratchBranch()
758
>>> br1.missing_revisions(br2)
760
>>> commit(br2, "lala!", rev_id="REVISION-ID-1")
761
>>> br1.missing_revisions(br2)
763
>>> br2.missing_revisions(br1)
765
>>> commit(br1, "lala!", rev_id="REVISION-ID-1")
766
>>> br1.missing_revisions(br2)
768
>>> commit(br2, "lala!", rev_id="REVISION-ID-2A")
769
>>> br1.missing_revisions(br2)
771
>>> commit(br1, "lala!", rev_id="REVISION-ID-2B")
772
>>> br1.missing_revisions(br2)
773
Traceback (most recent call last):
774
DivergedBranches: These branches have diverged.
776
self_history = self.revision_history()
777
self_len = len(self_history)
778
other_history = other.revision_history()
779
other_len = len(other_history)
780
common_index = min(self_len, other_len) -1
781
if common_index >= 0 and \
782
self_history[common_index] != other_history[common_index]:
783
raise DivergedBranches(self, other)
785
if stop_revision is None:
786
stop_revision = other_len
787
elif stop_revision > other_len:
788
raise NoSuchRevision(self, stop_revision)
790
return other_history[self_len:stop_revision]
793
def update_revisions(self, other, stop_revision=None):
794
"""Pull in all new revisions from other branch.
796
>>> from bzrlib.commit import commit
797
>>> bzrlib.trace.silent = True
798
>>> br1 = ScratchBranch(files=['foo', 'bar'])
801
>>> commit(br1, "lala!", rev_id="REVISION-ID-1", verbose=False)
802
>>> br2 = ScratchBranch()
803
>>> br2.update_revisions(br1)
807
>>> br2.revision_history()
809
>>> br2.update_revisions(br1)
813
>>> br1.text_store.total_size() == br2.text_store.total_size()
816
from bzrlib.progress import ProgressBar
820
pb.update('comparing histories')
821
revision_ids = self.missing_revisions(other, stop_revision)
823
if hasattr(other.revision_store, "prefetch"):
824
other.revision_store.prefetch(revision_ids)
825
if hasattr(other.inventory_store, "prefetch"):
826
inventory_ids = [other.get_revision(r).inventory_id
827
for r in revision_ids]
828
other.inventory_store.prefetch(inventory_ids)
833
for rev_id in revision_ids:
835
pb.update('fetching revision', i, len(revision_ids))
836
rev = other.get_revision(rev_id)
837
revisions.append(rev)
838
inv = other.get_inventory(str(rev.inventory_id))
839
for key, entry in inv.iter_entries():
840
if entry.text_id is None:
842
if entry.text_id not in self.text_store:
843
needed_texts.add(entry.text_id)
847
count = self.text_store.copy_multi(other.text_store, needed_texts)
848
print "Added %d texts." % count
849
inventory_ids = [ f.inventory_id for f in revisions ]
850
count = self.inventory_store.copy_multi(other.inventory_store,
852
print "Added %d inventories." % count
853
revision_ids = [ f.revision_id for f in revisions]
854
count = self.revision_store.copy_multi(other.revision_store,
856
for revision_id in revision_ids:
857
self.append_revision(revision_id)
858
print "Added %d revisions." % count
861
def commit(self, *args, **kw):
862
from bzrlib.commit import commit
863
commit(self, *args, **kw)
866
def lookup_revision(self, revision):
867
"""Return the revision identifier for a given revision information."""
868
revno, info = self.get_revision_info(revision)
871
def get_revision_info(self, revision):
872
"""Return (revno, revision id) for revision identifier.
874
revision can be an integer, in which case it is assumed to be revno (though
875
this will translate negative values into positive ones)
876
revision can also be a string, in which case it is parsed for something like
877
'date:' or 'revid:' etc.
882
try:# Convert to int if possible
883
revision = int(revision)
886
revs = self.revision_history()
887
if isinstance(revision, int):
890
# Mabye we should do this first, but we don't need it if revision == 0
892
revno = len(revs) + revision + 1
895
elif isinstance(revision, basestring):
896
for prefix, func in Branch.REVISION_NAMESPACES.iteritems():
897
if revision.startswith(prefix):
898
revno = func(self, revs, revision)
901
raise BzrError('No namespace registered for string: %r' % revision)
903
if revno is None or revno <= 0 or revno > len(revs):
904
raise BzrError("no such revision %s" % revision)
905
return revno, revs[revno-1]
907
def _namespace_revno(self, revs, revision):
908
"""Lookup a revision by revision number"""
909
assert revision.startswith('revno:')
911
return int(revision[6:])
914
REVISION_NAMESPACES['revno:'] = _namespace_revno
916
def _namespace_revid(self, revs, revision):
917
assert revision.startswith('revid:')
919
return revs.index(revision[6:]) + 1
922
REVISION_NAMESPACES['revid:'] = _namespace_revid
924
def _namespace_last(self, revs, revision):
925
assert revision.startswith('last:')
927
offset = int(revision[5:])
932
raise BzrError('You must supply a positive value for --revision last:XXX')
933
return len(revs) - offset + 1
934
REVISION_NAMESPACES['last:'] = _namespace_last
936
def _namespace_tag(self, revs, revision):
937
assert revision.startswith('tag:')
938
raise BzrError('tag: namespace registered, but not implemented.')
939
REVISION_NAMESPACES['tag:'] = _namespace_tag
941
def _namespace_date(self, revs, revision):
942
assert revision.startswith('date:')
944
# Spec for date revisions:
946
# value can be 'yesterday', 'today', 'tomorrow' or a YYYY-MM-DD string.
947
# it can also start with a '+/-/='. '+' says match the first
948
# entry after the given date. '-' is match the first entry before the date
949
# '=' is match the first entry after, but still on the given date.
951
# +2005-05-12 says find the first matching entry after May 12th, 2005 at 0:00
952
# -2005-05-12 says find the first matching entry before May 12th, 2005 at 0:00
953
# =2005-05-12 says find the first match after May 12th, 2005 at 0:00 but before
954
# May 13th, 2005 at 0:00
956
# So the proper way of saying 'give me all entries for today' is:
957
# -r {date:+today}:{date:-tomorrow}
958
# The default is '=' when not supplied
961
if val[:1] in ('+', '-', '='):
962
match_style = val[:1]
965
today = datetime.datetime.today().replace(hour=0,minute=0,second=0,microsecond=0)
966
if val.lower() == 'yesterday':
967
dt = today - datetime.timedelta(days=1)
968
elif val.lower() == 'today':
970
elif val.lower() == 'tomorrow':
971
dt = today + datetime.timedelta(days=1)
974
# This should be done outside the function to avoid recompiling it.
975
_date_re = re.compile(
976
r'(?P<date>(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d))?'
978
r'(?P<time>(?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d))?)?'
980
m = _date_re.match(val)
981
if not m or (not m.group('date') and not m.group('time')):
982
raise BzrError('Invalid revision date %r' % revision)
985
year, month, day = int(m.group('year')), int(m.group('month')), int(m.group('day'))
987
year, month, day = today.year, today.month, today.day
989
hour = int(m.group('hour'))
990
minute = int(m.group('minute'))
991
if m.group('second'):
992
second = int(m.group('second'))
996
hour, minute, second = 0,0,0
998
dt = datetime.datetime(year=year, month=month, day=day,
999
hour=hour, minute=minute, second=second)
1003
if match_style == '-':
1005
elif match_style == '=':
1006
last = dt + datetime.timedelta(days=1)
1009
for i in range(len(revs)-1, -1, -1):
1010
r = self.get_revision(revs[i])
1011
# TODO: Handle timezone.
1012
dt = datetime.datetime.fromtimestamp(r.timestamp)
1013
if first >= dt and (last is None or dt >= last):
1016
for i in range(len(revs)):
1017
r = self.get_revision(revs[i])
1018
# TODO: Handle timezone.
1019
dt = datetime.datetime.fromtimestamp(r.timestamp)
1020
if first <= dt and (last is None or dt <= last):
1022
REVISION_NAMESPACES['date:'] = _namespace_date
761
def lookup_revision(self, revno):
762
"""Return revision hash for revision number."""
767
# list is 0-based; revisions are 1-based
768
return self.revision_history()[revno-1]
770
raise BzrError("no such revision %s" % revno)
1024
773
def revision_tree(self, revision_id):
1025
774
"""Return Tree for a revision on this branch.
1027
776
`revision_id` may be None for the null revision, in which case
1028
777
an `EmptyTree` is returned."""
1029
# TODO: refactor this to use an existing revision object
1030
# so we don't need to read it in twice.
778
self._need_readlock()
1031
779
if revision_id == None:
1032
return EmptyTree(self.get_root_id())
1034
782
inv = self.get_revision_inventory(revision_id)
1035
783
return RevisionTree(self.text_store, inv)
1060
818
This can change the directory or the filename or both.
820
self._need_writelock()
821
tree = self.working_tree()
823
if not tree.has_filename(from_rel):
824
bailout("can't rename: old working file %r does not exist" % from_rel)
825
if tree.has_filename(to_rel):
826
bailout("can't rename: new working file %r already exists" % to_rel)
828
file_id = inv.path2id(from_rel)
830
bailout("can't rename: old name %r is not versioned" % from_rel)
832
if inv.path2id(to_rel):
833
bailout("can't rename: new name %r is already versioned" % to_rel)
835
to_dir, to_tail = os.path.split(to_rel)
836
to_dir_id = inv.path2id(to_dir)
837
if to_dir_id == None and to_dir != '':
838
bailout("can't determine destination directory id for %r" % to_dir)
840
mutter("rename_one:")
841
mutter(" file_id {%s}" % file_id)
842
mutter(" from_rel %r" % from_rel)
843
mutter(" to_rel %r" % to_rel)
844
mutter(" to_dir %r" % to_dir)
845
mutter(" to_dir_id {%s}" % to_dir_id)
847
inv.rename(file_id, to_dir_id, to_tail)
849
print "%s => %s" % (from_rel, to_rel)
851
from_abs = self.abspath(from_rel)
852
to_abs = self.abspath(to_rel)
1064
tree = self.working_tree()
1065
inv = tree.inventory
1066
if not tree.has_filename(from_rel):
1067
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
1068
if tree.has_filename(to_rel):
1069
raise BzrError("can't rename: new working file %r already exists" % to_rel)
1071
file_id = inv.path2id(from_rel)
1073
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
1075
if inv.path2id(to_rel):
1076
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1078
to_dir, to_tail = os.path.split(to_rel)
1079
to_dir_id = inv.path2id(to_dir)
1080
if to_dir_id == None and to_dir != '':
1081
raise BzrError("can't determine destination directory id for %r" % to_dir)
1083
mutter("rename_one:")
1084
mutter(" file_id {%s}" % file_id)
1085
mutter(" from_rel %r" % from_rel)
1086
mutter(" to_rel %r" % to_rel)
1087
mutter(" to_dir %r" % to_dir)
1088
mutter(" to_dir_id {%s}" % to_dir_id)
1090
inv.rename(file_id, to_dir_id, to_tail)
1092
print "%s => %s" % (from_rel, to_rel)
1094
from_abs = self.abspath(from_rel)
1095
to_abs = self.abspath(to_rel)
1097
os.rename(from_abs, to_abs)
1099
raise BzrError("failed to rename %r to %r: %s"
1100
% (from_abs, to_abs, e[1]),
1101
["rename rolled back"])
1103
self._write_inventory(inv)
854
os.rename(from_abs, to_abs)
856
bailout("failed to rename %r to %r: %s"
857
% (from_abs, to_abs, e[1]),
858
["rename rolled back"])
860
self._write_inventory(inv)
1108
864
def move(self, from_paths, to_name):
1116
872
Note that to_name is only the last component of the new name;
1117
873
this doesn't change the directory.
1121
## TODO: Option to move IDs only
1122
assert not isinstance(from_paths, basestring)
1123
tree = self.working_tree()
1124
inv = tree.inventory
1125
to_abs = self.abspath(to_name)
1126
if not isdir(to_abs):
1127
raise BzrError("destination %r is not a directory" % to_abs)
1128
if not tree.has_filename(to_name):
1129
raise BzrError("destination %r not in working directory" % to_abs)
1130
to_dir_id = inv.path2id(to_name)
1131
if to_dir_id == None and to_name != '':
1132
raise BzrError("destination %r is not a versioned directory" % to_name)
1133
to_dir_ie = inv[to_dir_id]
1134
if to_dir_ie.kind not in ('directory', 'root_directory'):
1135
raise BzrError("destination %r is not a directory" % to_abs)
1137
to_idpath = inv.get_idpath(to_dir_id)
1139
for f in from_paths:
1140
if not tree.has_filename(f):
1141
raise BzrError("%r does not exist in working tree" % f)
1142
f_id = inv.path2id(f)
1144
raise BzrError("%r is not versioned" % f)
1145
name_tail = splitpath(f)[-1]
1146
dest_path = appendpath(to_name, name_tail)
1147
if tree.has_filename(dest_path):
1148
raise BzrError("destination %r already exists" % dest_path)
1149
if f_id in to_idpath:
1150
raise BzrError("can't move %r to a subdirectory of itself" % f)
1152
# OK, so there's a race here, it's possible that someone will
1153
# create a file in this interval and then the rename might be
1154
# left half-done. But we should have caught most problems.
1156
for f in from_paths:
1157
name_tail = splitpath(f)[-1]
1158
dest_path = appendpath(to_name, name_tail)
1159
print "%s => %s" % (f, dest_path)
1160
inv.rename(inv.path2id(f), to_dir_id, name_tail)
1162
os.rename(self.abspath(f), self.abspath(dest_path))
1164
raise BzrError("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
1165
["rename rolled back"])
1167
self._write_inventory(inv)
1172
def revert(self, filenames, old_tree=None, backups=True):
1173
"""Restore selected files to the versions from a previous tree.
1176
If true (default) backups are made of files before
1179
from bzrlib.errors import NotVersionedError, BzrError
1180
from bzrlib.atomicfile import AtomicFile
1181
from bzrlib.osutils import backup_file
1183
inv = self.read_working_inventory()
1184
if old_tree is None:
1185
old_tree = self.basis_tree()
1186
old_inv = old_tree.inventory
1189
for fn in filenames:
1190
file_id = inv.path2id(fn)
1192
raise NotVersionedError("not a versioned file", fn)
1193
if not old_inv.has_id(file_id):
1194
raise BzrError("file not present in old tree", fn, file_id)
1195
nids.append((fn, file_id))
1197
# TODO: Rename back if it was previously at a different location
1199
# TODO: If given a directory, restore the entire contents from
1200
# the previous version.
1202
# TODO: Make a backup to a temporary file.
1204
# TODO: If the file previously didn't exist, delete it?
1205
for fn, file_id in nids:
1208
f = AtomicFile(fn, 'wb')
1210
f.write(old_tree.get_file(file_id).read())
1216
def pending_merges(self):
1217
"""Return a list of pending merges.
1219
These are revisions that have been merged into the working
1220
directory but not yet committed.
1222
cfn = self.controlfilename('pending-merges')
1223
if not os.path.exists(cfn):
1226
for l in self.controlfile('pending-merges', 'r').readlines():
1227
p.append(l.rstrip('\n'))
1231
def add_pending_merge(self, revision_id):
1232
from bzrlib.revision import validate_revision_id
1234
validate_revision_id(revision_id)
1236
p = self.pending_merges()
1237
if revision_id in p:
1239
p.append(revision_id)
1240
self.set_pending_merges(p)
1243
def set_pending_merges(self, rev_list):
1244
from bzrlib.atomicfile import AtomicFile
1247
f = AtomicFile(self.controlfilename('pending-merges'))
875
self._need_writelock()
876
## TODO: Option to move IDs only
877
assert not isinstance(from_paths, basestring)
878
tree = self.working_tree()
880
to_abs = self.abspath(to_name)
881
if not isdir(to_abs):
882
bailout("destination %r is not a directory" % to_abs)
883
if not tree.has_filename(to_name):
884
bailout("destination %r not in working directory" % to_abs)
885
to_dir_id = inv.path2id(to_name)
886
if to_dir_id == None and to_name != '':
887
bailout("destination %r is not a versioned directory" % to_name)
888
to_dir_ie = inv[to_dir_id]
889
if to_dir_ie.kind not in ('directory', 'root_directory'):
890
bailout("destination %r is not a directory" % to_abs)
892
to_idpath = Set(inv.get_idpath(to_dir_id))
895
if not tree.has_filename(f):
896
bailout("%r does not exist in working tree" % f)
897
f_id = inv.path2id(f)
899
bailout("%r is not versioned" % f)
900
name_tail = splitpath(f)[-1]
901
dest_path = appendpath(to_name, name_tail)
902
if tree.has_filename(dest_path):
903
bailout("destination %r already exists" % dest_path)
904
if f_id in to_idpath:
905
bailout("can't move %r to a subdirectory of itself" % f)
907
# OK, so there's a race here, it's possible that someone will
908
# create a file in this interval and then the rename might be
909
# left half-done. But we should have caught most problems.
912
name_tail = splitpath(f)[-1]
913
dest_path = appendpath(to_name, name_tail)
914
print "%s => %s" % (f, dest_path)
915
inv.rename(inv.path2id(f), to_dir_id, name_tail)
917
os.rename(self.abspath(f), self.abspath(dest_path))
919
bailout("failed to rename %r to %r: %s" % (f, dest_path, e[1]),
920
["rename rolled back"])
922
self._write_inventory(inv)