265
246
if message_callback is None:
266
247
if message is not None:
267
248
if isinstance(message, str):
268
message = message.decode(get_user_encoding())
249
message = message.decode(bzrlib.user_encoding)
269
250
message_callback = lambda x: message
271
252
raise BzrError("The message or message_callback keyword"
272
253
" parameter is required for commit().")
274
255
self.bound_branch = None
256
self.any_entries_changed = False
275
257
self.any_entries_deleted = False
276
if exclude is not None:
277
self.exclude = sorted(
278
minimum_path_selection(exclude))
281
258
self.local = local
282
259
self.master_branch = None
260
self.master_locked = False
283
261
self.recursive = recursive
284
262
self.rev_id = None
285
# self.specific_files is None to indicate no filter, or any iterable to
286
# indicate a filter - [] means no files at all, as per iter_changes.
287
263
if specific_files is not None:
288
264
self.specific_files = sorted(
289
265
minimum_path_selection(specific_files))
291
267
self.specific_files = None
268
self.specific_file_ids = None
293
269
self.allow_pointless = allow_pointless
270
self.revprops = revprops
294
271
self.message_callback = message_callback
295
272
self.timestamp = timestamp
296
273
self.timezone = timezone
297
274
self.committer = committer
298
275
self.strict = strict
299
276
self.verbose = verbose
277
# accumulates an inventory delta to the basis entry, so we can make
278
# just the necessary updates to the workingtree's cached basis.
279
self._basis_delta = []
301
281
self.work_tree.lock_write()
302
operation.add_cleanup(self.work_tree.unlock)
303
self.parents = self.work_tree.get_parent_ids()
304
# We can use record_iter_changes IFF iter_changes is compatible with
305
# the command line parameters, and the repository has fast delta
306
# generation. See bug 347649.
307
self.use_record_iter_changes = (
309
not self.branch.repository._format.supports_tree_reference and
310
(self.branch.repository._format.fast_deltas or
311
len(self.parents) < 2))
312
282
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
313
operation.add_cleanup(self.pb.finished)
314
283
self.basis_revid = self.work_tree.last_revision()
315
284
self.basis_tree = self.work_tree.basis_tree()
316
285
self.basis_tree.lock_read()
317
operation.add_cleanup(self.basis_tree.unlock)
318
# Cannot commit with conflicts present.
319
if len(self.work_tree.conflicts()) > 0:
320
raise ConflictsInTree
322
# Setup the bound branch variables as needed.
323
self._check_bound_branch(operation, possible_master_transports)
325
# Check that the working tree is up to date
326
old_revno, new_revno = self._check_out_of_date_tree()
328
# Complete configuration setup
329
if reporter is not None:
330
self.reporter = reporter
331
elif self.reporter is None:
332
self.reporter = self._select_reporter()
333
if self.config is None:
334
self.config = self.branch.get_config()
336
self._set_specific_file_ids()
338
# Setup the progress bar. As the number of files that need to be
339
# committed in unknown, progress is reported as stages.
340
# We keep track of entries separately though and include that
341
# information in the progress bar during the relevant stages.
342
self.pb_stage_name = ""
343
self.pb_stage_count = 0
344
self.pb_stage_total = 5
345
if self.bound_branch:
346
self.pb_stage_total += 1
347
self.pb.show_pct = False
348
self.pb.show_spinner = False
349
self.pb.show_eta = False
350
self.pb.show_count = True
351
self.pb.show_bar = True
353
self._gather_parents()
354
# After a merge, a selected file commit is not supported.
355
# See 'bzr help merge' for an explanation as to why.
356
if len(self.parents) > 1 and self.specific_files is not None:
357
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
358
# Excludes are a form of selected file commit.
359
if len(self.parents) > 1 and self.exclude:
360
raise errors.CannotCommitSelectedFileMerge(self.exclude)
362
# Collect the changes
363
self._set_progress_stage("Collecting changes", counter=True)
364
self.builder = self.branch.get_commit_builder(self.parents,
365
self.config, timestamp, timezone, committer, self.revprops, rev_id)
368
self.builder.will_record_deletes()
369
# find the location being committed to
370
if self.bound_branch:
371
master_location = self.master_branch.base
373
master_location = self.branch.base
375
# report the start of the commit
376
self.reporter.started(new_revno, self.rev_id, master_location)
378
self._update_builder_with_changes()
379
self._check_pointless()
381
# TODO: Now the new inventory is known, check for conflicts.
382
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
383
# weave lines, because nothing should be recorded until it is known
384
# that commit will succeed.
385
self._set_progress_stage("Saving data locally")
386
self.builder.finish_inventory()
388
# Prompt the user for a commit message if none provided
389
message = message_callback(self)
390
self.message = message
392
# Add revision data to the local branch
393
self.rev_id = self.builder.commit(self.message)
396
mutter("aborting commit write group because of exception:")
397
trace.log_exception_quietly()
398
note("aborting commit write group: %r" % (e,))
402
self._process_pre_hooks(old_revno, new_revno)
404
# Upload revision data to the master.
405
# this will propagate merged revisions too if needed.
406
if self.bound_branch:
407
self._set_progress_stage("Uploading data to master branch")
408
# 'commit' to the master first so a timeout here causes the
409
# local branch to be out of date
410
self.master_branch.import_last_revision_info(
411
self.branch.repository, new_revno, self.rev_id)
413
# and now do the commit locally.
414
self.branch.set_last_revision_info(new_revno, self.rev_id)
416
# Make the working tree be up to date with the branch. This
417
# includes automatic changes scheduled to be made to the tree, such
418
# as updating its basis and unversioning paths that were missing.
419
self.work_tree.unversion(self.deleted_ids)
420
self._set_progress_stage("Updating the working tree")
421
self.work_tree.update_basis_by_delta(self.rev_id,
422
self.builder.get_basis_delta())
423
self.reporter.completed(new_revno, self.rev_id)
424
self._process_post_hooks(old_revno, new_revno)
287
# Cannot commit with conflicts present.
288
if len(self.work_tree.conflicts()) > 0:
289
raise ConflictsInTree
291
# Setup the bound branch variables as needed.
292
self._check_bound_branch()
294
# Check that the working tree is up to date
295
old_revno, new_revno = self._check_out_of_date_tree()
297
# Complete configuration setup
298
if reporter is not None:
299
self.reporter = reporter
300
elif self.reporter is None:
301
self.reporter = self._select_reporter()
302
if self.config is None:
303
self.config = self.branch.get_config()
305
# If provided, ensure the specified files are versioned
306
if self.specific_files is not None:
307
# Note: This routine is being called because it raises
308
# PathNotVersionedError as a side effect of finding the IDs. We
309
# later use the ids we found as input to the working tree
310
# inventory iterator, so we only consider those ids rather than
311
# examining the whole tree again.
312
# XXX: Dont we have filter_unversioned to do this more
314
self.specific_file_ids = tree.find_ids_across_trees(
315
specific_files, [self.basis_tree, self.work_tree])
317
# Setup the progress bar. As the number of files that need to be
318
# committed in unknown, progress is reported as stages.
319
# We keep track of entries separately though and include that
320
# information in the progress bar during the relevant stages.
321
self.pb_stage_name = ""
322
self.pb_stage_count = 0
323
self.pb_stage_total = 5
324
if self.bound_branch:
325
self.pb_stage_total += 1
326
self.pb.show_pct = False
327
self.pb.show_spinner = False
328
self.pb.show_eta = False
329
self.pb.show_count = True
330
self.pb.show_bar = True
332
# After a merge, a selected file commit is not supported.
333
# See 'bzr help merge' for an explanation as to why.
334
self.basis_inv = self.basis_tree.inventory
335
self._gather_parents()
336
if len(self.parents) > 1 and self.specific_files:
337
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
339
# Collect the changes
340
self._set_progress_stage("Collecting changes",
341
entries_title="Directory")
342
self.builder = self.branch.get_commit_builder(self.parents,
343
self.config, timestamp, timezone, committer, revprops, rev_id)
346
# find the location being committed to
347
if self.bound_branch:
348
master_location = self.master_branch.base
350
master_location = self.branch.base
352
# report the start of the commit
353
self.reporter.started(new_revno, self.rev_id, master_location)
355
self._update_builder_with_changes()
356
self._report_and_accumulate_deletes()
357
self._check_pointless()
359
# TODO: Now the new inventory is known, check for conflicts.
360
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
361
# weave lines, because nothing should be recorded until it is known
362
# that commit will succeed.
363
self._set_progress_stage("Saving data locally")
364
self.builder.finish_inventory()
366
# Prompt the user for a commit message if none provided
367
message = message_callback(self)
368
assert isinstance(message, unicode), type(message)
369
self.message = message
370
self._escape_commit_message()
372
# Add revision data to the local branch
373
self.rev_id = self.builder.commit(self.message)
379
self._process_pre_hooks(old_revno, new_revno)
381
# Upload revision data to the master.
382
# this will propagate merged revisions too if needed.
383
if self.bound_branch:
384
self._set_progress_stage("Uploading data to master branch")
385
self.master_branch.repository.fetch(self.branch.repository,
386
revision_id=self.rev_id)
387
# now the master has the revision data
388
# 'commit' to the master first so a timeout here causes the
389
# local branch to be out of date
390
self.master_branch.set_last_revision_info(new_revno,
393
# and now do the commit locally.
394
self.branch.set_last_revision_info(new_revno, self.rev_id)
396
# Make the working tree up to date with the branch
397
self._set_progress_stage("Updating the working tree")
398
self.work_tree.update_basis_by_delta(self.rev_id,
400
self.reporter.completed(new_revno, self.rev_id)
401
self._process_post_hooks(old_revno, new_revno)
425
404
return self.rev_id
427
406
def _select_reporter(self):
581
562
old_revno, old_revid, new_revno, self.rev_id,
582
563
tree_delta, future_tree)
566
"""Cleanup any open locks, progress bars etc."""
567
cleanups = [self._cleanup_bound_branch,
568
self.basis_tree.unlock,
569
self.work_tree.unlock,
571
found_exception = None
572
for cleanup in cleanups:
575
# we want every cleanup to run no matter what.
576
# so we have a catchall here, but we will raise the
577
# last encountered exception up the stack: and
578
# typically this will be useful enough.
581
if found_exception is not None:
582
# don't do a plan raise, because the last exception may have been
583
# trashed, e is our sure-to-work exception even though it loses the
584
# full traceback. XXX: RBC 20060421 perhaps we could check the
585
# exc_info and if its the same one do a plain raise otherwise
586
# 'raise e' as we do now.
589
def _cleanup_bound_branch(self):
590
"""Executed at the end of a try/finally to cleanup a bound branch.
592
If the branch wasn't bound, this is a no-op.
593
If it was, it resents self.branch to the local branch, instead
596
if not self.bound_branch:
598
if self.master_locked:
599
self.master_branch.unlock()
601
def _escape_commit_message(self):
602
"""Replace xml-incompatible control characters."""
603
# FIXME: RBC 20060419 this should be done by the revision
604
# serialiser not by commit. Then we can also add an unescaper
605
# in the deserializer and start roundtripping revision messages
606
# precisely. See repository_implementations/test_repository.py
608
# Python strings can include characters that can't be
609
# represented in well-formed XML; escape characters that
610
# aren't listed in the XML specification
611
# (http://www.w3.org/TR/REC-xml/#NT-Char).
612
self.message, escape_count = re.subn(
613
u'[^\x09\x0A\x0D\u0020-\uD7FF\uE000-\uFFFD]+',
614
lambda match: match.group(0).encode('unicode_escape'),
617
self.reporter.escaped(escape_count, self.message)
584
619
def _gather_parents(self):
585
620
"""Record the parents of a merge for merge detection."""
586
# TODO: Make sure that this list doesn't contain duplicate
621
# TODO: Make sure that this list doesn't contain duplicate
587
622
# entries and the order is preserved when doing this.
588
if self.use_record_iter_changes:
590
self.basis_inv = self.basis_tree.inventory
623
self.parents = self.work_tree.get_parent_ids()
591
624
self.parent_invs = [self.basis_inv]
592
625
for revision in self.parents[1:]:
593
626
if self.branch.repository.has_revision(revision):
600
633
def _update_builder_with_changes(self):
601
634
"""Update the commit builder with the data about what has changed.
603
exclude = self.exclude
636
# Build the revision inventory.
638
# This starts by creating a new empty inventory. Depending on
639
# which files are selected for commit, and what is present in the
640
# current tree, the new inventory is populated. inventory entries
641
# which are candidates for modification have their revision set to
642
# None; inventory entries that are carried over untouched have their
643
# revision set to their prior value.
645
# ESEPARATIONOFCONCERNS: this function is diffing and using the diff
646
# results to create a new inventory at the same time, which results
647
# in bugs like #46635. Any reason not to use/enhance Tree.changes_from?
604
650
specific_files = self.specific_files
605
651
mutter("Selecting files for commit with filter %s", specific_files)
608
if self.use_record_iter_changes:
609
iter_changes = self.work_tree.iter_changes(self.basis_tree,
610
specific_files=specific_files)
611
iter_changes = self._filter_iter_changes(iter_changes)
612
for file_id, path, fs_hash in self.builder.record_iter_changes(
613
self.work_tree, self.basis_revid, iter_changes):
614
self.work_tree._observed_sha1(file_id, path, fs_hash)
616
# Build the new inventory
617
self._populate_from_inventory()
618
self._record_unselected()
619
self._report_and_accumulate_deletes()
621
def _filter_iter_changes(self, iter_changes):
622
"""Process iter_changes.
624
This method reports on the changes in iter_changes to the user, and
625
converts 'missing' entries in the iter_changes iterator to 'deleted'
626
entries. 'missing' entries have their
628
:param iter_changes: An iter_changes to process.
629
:return: A generator of changes.
631
reporter = self.reporter
632
report_changes = reporter.is_verbose()
634
for change in iter_changes:
636
old_path = change[1][0]
637
new_path = change[1][1]
638
versioned = change[3][1]
640
versioned = change[3][1]
641
if kind is None and versioned:
644
reporter.missing(new_path)
645
deleted_ids.append(change[0])
646
# Reset the new path (None) and new versioned flag (False)
647
change = (change[0], (change[1][0], None), change[2],
648
(change[3][0], False)) + change[4:]
649
elif kind == 'tree-reference':
650
if self.recursive == 'down':
651
self._commit_nested_tree(change[0], change[1][1])
652
if change[3][0] or change[3][1]:
656
reporter.deleted(old_path)
657
elif old_path is None:
658
reporter.snapshot_change('added', new_path)
659
elif old_path != new_path:
660
reporter.renamed('renamed', old_path, new_path)
663
self.work_tree.branch.repository._format.rich_root_data):
664
# Don't report on changes to '' in non rich root
666
reporter.snapshot_change('modified', new_path)
667
self._next_progress_entry()
668
# Unversion IDs that were found to be deleted
669
self.deleted_ids = deleted_ids
671
def _record_unselected(self):
653
# Build the new inventory
654
self._populate_from_inventory(specific_files)
672
656
# If specific files are selected, then all un-selected files must be
673
657
# recorded in their previous state. For more details, see
674
658
# https://lists.ubuntu.com/archives/bazaar/2007q3/028476.html.
675
if self.specific_files or self.exclude:
676
specific_files = self.specific_files or []
677
660
for path, old_ie in self.basis_inv.iter_entries():
678
661
if old_ie.file_id in self.builder.new_inventory:
679
662
# already added - skip.
681
if (is_inside_any(specific_files, path)
682
and not is_inside_any(self.exclude, path)):
683
# was inside the selected path, and not excluded - if not
684
# present it has been deleted so skip.
664
if is_inside_any(specific_files, path):
665
# was inside the selected path, if not present it has been
686
# From here down it was either not selected, or was excluded:
687
# We preserve the entry unaltered.
668
if old_ie.kind == 'directory':
669
self._next_progress_entry()
670
# not in final inv yet, was not in the selected files, so is an
671
# entry to be preserved unaltered.
688
672
ie = old_ie.copy()
689
673
# Note: specific file commits after a merge are currently
690
674
# prohibited. This test is for sanity/safety in case it's
691
675
# required after that changes.
692
676
if len(self.parents) > 1:
693
677
ie.revision = None
694
self.builder.record_entry_contents(ie, self.parent_invs, path,
695
self.basis_tree, None)
678
delta, version_recorded = self.builder.record_entry_contents(
679
ie, self.parent_invs, path, self.basis_tree, None)
681
self.any_entries_changed = True
682
if delta: self._basis_delta.append(delta)
697
684
def _report_and_accumulate_deletes(self):
698
if (isinstance(self.basis_inv, Inventory)
699
and isinstance(self.builder.new_inventory, Inventory)):
700
# the older Inventory classes provide a _byid dict, and building a
701
# set from the keys of this dict is substantially faster than even
702
# getting a set of ids from the inventory
704
# <lifeless> set(dict) is roughly the same speed as
705
# set(iter(dict)) and both are significantly slower than
707
deleted_ids = set(self.basis_inv._byid.keys()) - \
708
set(self.builder.new_inventory._byid.keys())
710
deleted_ids = set(self.basis_inv) - set(self.builder.new_inventory)
685
# XXX: Could the list of deleted paths and ids be instead taken from
686
# _populate_from_inventory?
687
deleted_ids = set(self.basis_inv._byid.keys()) - \
688
set(self.builder.new_inventory._byid.keys())
712
690
self.any_entries_deleted = True
713
691
deleted = [(self.basis_tree.id2path(file_id), file_id)