246
279
if message_callback is None:
247
280
if message is not None:
248
281
if isinstance(message, str):
249
message = message.decode(bzrlib.user_encoding)
282
message = message.decode(get_user_encoding())
250
283
message_callback = lambda x: message
252
285
raise BzrError("The message or message_callback keyword"
253
286
" parameter is required for commit().")
255
288
self.bound_branch = None
256
self.any_entries_changed = False
257
289
self.any_entries_deleted = False
290
if exclude is not None:
291
self.exclude = sorted(
292
minimum_path_selection(exclude))
258
295
self.local = local
259
296
self.master_branch = None
260
self.master_locked = False
261
297
self.recursive = recursive
262
298
self.rev_id = None
299
# self.specific_files is None to indicate no filter, or any iterable to
300
# indicate a filter - [] means no files at all, as per iter_changes.
263
301
if specific_files is not None:
264
302
self.specific_files = sorted(
265
303
minimum_path_selection(specific_files))
267
305
self.specific_files = None
268
self.specific_file_ids = None
269
307
self.allow_pointless = allow_pointless
270
self.revprops = revprops
271
308
self.message_callback = message_callback
272
309
self.timestamp = timestamp
273
310
self.timezone = timezone
274
311
self.committer = committer
275
312
self.strict = strict
276
313
self.verbose = verbose
277
# accumulates an inventory delta to the basis entry, so we can make
278
# just the necessary updates to the workingtree's cached basis.
279
self._basis_delta = []
281
315
self.work_tree.lock_write()
316
operation.add_cleanup(self.work_tree.unlock)
317
self.parents = self.work_tree.get_parent_ids()
318
# We can use record_iter_changes IFF iter_changes is compatible with
319
# the command line parameters, and the repository has fast delta
320
# generation. See bug 347649.
321
self.use_record_iter_changes = (
323
not self.branch.repository._format.supports_tree_reference and
324
(self.branch.repository._format.fast_deltas or
325
len(self.parents) < 2))
282
326
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
327
operation.add_cleanup(self.pb.finished)
283
328
self.basis_revid = self.work_tree.last_revision()
284
329
self.basis_tree = self.work_tree.basis_tree()
285
330
self.basis_tree.lock_read()
331
operation.add_cleanup(self.basis_tree.unlock)
332
# Cannot commit with conflicts present.
333
if len(self.work_tree.conflicts()) > 0:
334
raise ConflictsInTree
336
# Setup the bound branch variables as needed.
337
self._check_bound_branch(operation, possible_master_transports)
339
# Check that the working tree is up to date
340
old_revno, new_revno = self._check_out_of_date_tree()
342
# Complete configuration setup
343
if reporter is not None:
344
self.reporter = reporter
345
elif self.reporter is None:
346
self.reporter = self._select_reporter()
347
if self.config is None:
348
self.config = self.branch.get_config()
350
self._set_specific_file_ids()
352
# Setup the progress bar. As the number of files that need to be
353
# committed in unknown, progress is reported as stages.
354
# We keep track of entries separately though and include that
355
# information in the progress bar during the relevant stages.
356
self.pb_stage_name = ""
357
self.pb_stage_count = 0
358
self.pb_stage_total = 5
359
if self.bound_branch:
360
self.pb_stage_total += 1
361
self.pb.show_pct = False
362
self.pb.show_spinner = False
363
self.pb.show_eta = False
364
self.pb.show_count = True
365
self.pb.show_bar = True
367
self._gather_parents()
368
# After a merge, a selected file commit is not supported.
369
# See 'bzr help merge' for an explanation as to why.
370
if len(self.parents) > 1 and self.specific_files is not None:
371
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
372
# Excludes are a form of selected file commit.
373
if len(self.parents) > 1 and self.exclude:
374
raise errors.CannotCommitSelectedFileMerge(self.exclude)
376
# Collect the changes
377
self._set_progress_stage("Collecting changes", counter=True)
378
self.builder = self.branch.get_commit_builder(self.parents,
379
self.config, timestamp, timezone, committer, self.revprops, rev_id)
287
# Cannot commit with conflicts present.
288
if len(self.work_tree.conflicts()) > 0:
289
raise ConflictsInTree
291
# Setup the bound branch variables as needed.
292
self._check_bound_branch()
294
# Check that the working tree is up to date
295
old_revno, new_revno = self._check_out_of_date_tree()
297
# Complete configuration setup
298
if reporter is not None:
299
self.reporter = reporter
300
elif self.reporter is None:
301
self.reporter = self._select_reporter()
302
if self.config is None:
303
self.config = self.branch.get_config()
305
# If provided, ensure the specified files are versioned
306
if self.specific_files is not None:
307
# Note: This routine is being called because it raises
308
# PathNotVersionedError as a side effect of finding the IDs. We
309
# later use the ids we found as input to the working tree
310
# inventory iterator, so we only consider those ids rather than
311
# examining the whole tree again.
312
# XXX: Dont we have filter_unversioned to do this more
314
self.specific_file_ids = tree.find_ids_across_trees(
315
specific_files, [self.basis_tree, self.work_tree])
317
# Setup the progress bar. As the number of files that need to be
318
# committed in unknown, progress is reported as stages.
319
# We keep track of entries separately though and include that
320
# information in the progress bar during the relevant stages.
321
self.pb_stage_name = ""
322
self.pb_stage_count = 0
323
self.pb_stage_total = 5
324
if self.bound_branch:
325
self.pb_stage_total += 1
326
self.pb.show_pct = False
327
self.pb.show_spinner = False
328
self.pb.show_eta = False
329
self.pb.show_count = True
330
self.pb.show_bar = True
332
# After a merge, a selected file commit is not supported.
333
# See 'bzr help merge' for an explanation as to why.
334
self.basis_inv = self.basis_tree.inventory
335
self._gather_parents()
336
if len(self.parents) > 1 and self.specific_files:
337
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
339
# Collect the changes
340
self._set_progress_stage("Collecting changes",
341
entries_title="Directory")
342
self.builder = self.branch.get_commit_builder(self.parents,
343
self.config, timestamp, timezone, committer, revprops, rev_id)
346
# find the location being committed to
347
if self.bound_branch:
348
master_location = self.master_branch.base
350
master_location = self.branch.base
352
# report the start of the commit
353
self.reporter.started(new_revno, self.rev_id, master_location)
355
self._update_builder_with_changes()
356
self._report_and_accumulate_deletes()
357
self._check_pointless()
359
# TODO: Now the new inventory is known, check for conflicts.
360
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
361
# weave lines, because nothing should be recorded until it is known
362
# that commit will succeed.
363
self._set_progress_stage("Saving data locally")
364
self.builder.finish_inventory()
366
# Prompt the user for a commit message if none provided
367
message = message_callback(self)
368
self.message = message
369
self._escape_commit_message()
371
# Add revision data to the local branch
372
self.rev_id = self.builder.commit(self.message)
378
self._process_pre_hooks(old_revno, new_revno)
380
# Upload revision data to the master.
381
# this will propagate merged revisions too if needed.
382
if self.bound_branch:
383
if not self.master_branch.repository.has_same_location(
384
self.branch.repository):
385
self._set_progress_stage("Uploading data to master branch")
386
self.master_branch.repository.fetch(self.branch.repository,
387
revision_id=self.rev_id)
388
# now the master has the revision data
389
# 'commit' to the master first so a timeout here causes the
390
# local branch to be out of date
391
self.master_branch.set_last_revision_info(new_revno,
394
# and now do the commit locally.
395
self.branch.set_last_revision_info(new_revno, self.rev_id)
397
# Make the working tree up to date with the branch
398
self._set_progress_stage("Updating the working tree")
399
self.work_tree.update_basis_by_delta(self.rev_id,
401
self.reporter.completed(new_revno, self.rev_id)
402
self._process_post_hooks(old_revno, new_revno)
382
self.builder.will_record_deletes()
383
# find the location being committed to
384
if self.bound_branch:
385
master_location = self.master_branch.base
387
master_location = self.branch.base
389
# report the start of the commit
390
self.reporter.started(new_revno, self.rev_id, master_location)
392
self._update_builder_with_changes()
393
self._check_pointless()
395
# TODO: Now the new inventory is known, check for conflicts.
396
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
397
# weave lines, because nothing should be recorded until it is known
398
# that commit will succeed.
399
self._set_progress_stage("Saving data locally")
400
self.builder.finish_inventory()
402
# Prompt the user for a commit message if none provided
403
message = message_callback(self)
404
self.message = message
406
# Add revision data to the local branch
407
self.rev_id = self.builder.commit(self.message)
410
mutter("aborting commit write group because of exception:")
411
trace.log_exception_quietly()
412
note("aborting commit write group: %r" % (e,))
416
self._process_pre_hooks(old_revno, new_revno)
418
# Upload revision data to the master.
419
# this will propagate merged revisions too if needed.
420
if self.bound_branch:
421
self._set_progress_stage("Uploading data to master branch")
422
# 'commit' to the master first so a timeout here causes the
423
# local branch to be out of date
424
self.master_branch.import_last_revision_info(
425
self.branch.repository, new_revno, self.rev_id)
427
# and now do the commit locally.
428
self.branch.set_last_revision_info(new_revno, self.rev_id)
430
# Make the working tree be up to date with the branch. This
431
# includes automatic changes scheduled to be made to the tree, such
432
# as updating its basis and unversioning paths that were missing.
433
self.work_tree.unversion(self.deleted_ids)
434
self._set_progress_stage("Updating the working tree")
435
self.work_tree.update_basis_by_delta(self.rev_id,
436
self.builder.get_basis_delta())
437
self.reporter.completed(new_revno, self.rev_id)
438
self._process_post_hooks(old_revno, new_revno)
405
439
return self.rev_id
407
441
def _select_reporter(self):
563
595
old_revno, old_revid, new_revno, self.rev_id,
564
596
tree_delta, future_tree)
567
"""Cleanup any open locks, progress bars etc."""
568
cleanups = [self._cleanup_bound_branch,
569
self.basis_tree.unlock,
570
self.work_tree.unlock,
572
found_exception = None
573
for cleanup in cleanups:
576
# we want every cleanup to run no matter what.
577
# so we have a catchall here, but we will raise the
578
# last encountered exception up the stack: and
579
# typically this will be useful enough.
582
if found_exception is not None:
583
# don't do a plan raise, because the last exception may have been
584
# trashed, e is our sure-to-work exception even though it loses the
585
# full traceback. XXX: RBC 20060421 perhaps we could check the
586
# exc_info and if its the same one do a plain raise otherwise
587
# 'raise e' as we do now.
590
def _cleanup_bound_branch(self):
591
"""Executed at the end of a try/finally to cleanup a bound branch.
593
If the branch wasn't bound, this is a no-op.
594
If it was, it resents self.branch to the local branch, instead
597
if not self.bound_branch:
599
if self.master_locked:
600
self.master_branch.unlock()
602
def _escape_commit_message(self):
603
"""Replace xml-incompatible control characters."""
604
# FIXME: RBC 20060419 this should be done by the revision
605
# serialiser not by commit. Then we can also add an unescaper
606
# in the deserializer and start roundtripping revision messages
607
# precisely. See repository_implementations/test_repository.py
609
# Python strings can include characters that can't be
610
# represented in well-formed XML; escape characters that
611
# aren't listed in the XML specification
612
# (http://www.w3.org/TR/REC-xml/#NT-Char).
613
self.message, escape_count = re.subn(
614
u'[^\x09\x0A\x0D\u0020-\uD7FF\uE000-\uFFFD]+',
615
lambda match: match.group(0).encode('unicode_escape'),
618
self.reporter.escaped(escape_count, self.message)
620
598
def _gather_parents(self):
621
599
"""Record the parents of a merge for merge detection."""
622
# TODO: Make sure that this list doesn't contain duplicate
600
# TODO: Make sure that this list doesn't contain duplicate
623
601
# entries and the order is preserved when doing this.
624
self.parents = self.work_tree.get_parent_ids()
602
if self.use_record_iter_changes:
604
self.basis_inv = self.basis_tree.inventory
625
605
self.parent_invs = [self.basis_inv]
626
606
for revision in self.parents[1:]:
627
607
if self.branch.repository.has_revision(revision):
634
614
def _update_builder_with_changes(self):
635
615
"""Update the commit builder with the data about what has changed.
637
# Build the revision inventory.
639
# This starts by creating a new empty inventory. Depending on
640
# which files are selected for commit, and what is present in the
641
# current tree, the new inventory is populated. inventory entries
642
# which are candidates for modification have their revision set to
643
# None; inventory entries that are carried over untouched have their
644
# revision set to their prior value.
646
# ESEPARATIONOFCONCERNS: this function is diffing and using the diff
647
# results to create a new inventory at the same time, which results
648
# in bugs like #46635. Any reason not to use/enhance Tree.changes_from?
617
exclude = self.exclude
651
618
specific_files = self.specific_files
652
619
mutter("Selecting files for commit with filter %s", specific_files)
654
# Build the new inventory
655
self._populate_from_inventory(specific_files)
622
if self.use_record_iter_changes:
623
iter_changes = self.work_tree.iter_changes(self.basis_tree,
624
specific_files=specific_files)
625
iter_changes = self._filter_iter_changes(iter_changes)
626
for file_id, path, fs_hash in self.builder.record_iter_changes(
627
self.work_tree, self.basis_revid, iter_changes):
628
self.work_tree._observed_sha1(file_id, path, fs_hash)
630
# Build the new inventory
631
self._populate_from_inventory()
632
self._record_unselected()
633
self._report_and_accumulate_deletes()
635
def _filter_iter_changes(self, iter_changes):
636
"""Process iter_changes.
638
This method reports on the changes in iter_changes to the user, and
639
converts 'missing' entries in the iter_changes iterator to 'deleted'
640
entries. 'missing' entries have their
642
:param iter_changes: An iter_changes to process.
643
:return: A generator of changes.
645
reporter = self.reporter
646
report_changes = reporter.is_verbose()
648
for change in iter_changes:
650
old_path = change[1][0]
651
new_path = change[1][1]
652
versioned = change[3][1]
654
versioned = change[3][1]
655
if kind is None and versioned:
658
reporter.missing(new_path)
659
deleted_ids.append(change[0])
660
# Reset the new path (None) and new versioned flag (False)
661
change = (change[0], (change[1][0], None), change[2],
662
(change[3][0], False)) + change[4:]
663
elif kind == 'tree-reference':
664
if self.recursive == 'down':
665
self._commit_nested_tree(change[0], change[1][1])
666
if change[3][0] or change[3][1]:
670
reporter.deleted(old_path)
671
elif old_path is None:
672
reporter.snapshot_change('added', new_path)
673
elif old_path != new_path:
674
reporter.renamed('renamed', old_path, new_path)
677
self.work_tree.branch.repository._format.rich_root_data):
678
# Don't report on changes to '' in non rich root
680
reporter.snapshot_change('modified', new_path)
681
self._next_progress_entry()
682
# Unversion IDs that were found to be deleted
683
self.deleted_ids = deleted_ids
685
def _record_unselected(self):
657
686
# If specific files are selected, then all un-selected files must be
658
687
# recorded in their previous state. For more details, see
659
688
# https://lists.ubuntu.com/archives/bazaar/2007q3/028476.html.
689
if self.specific_files or self.exclude:
690
specific_files = self.specific_files or []
661
691
for path, old_ie in self.basis_inv.iter_entries():
662
692
if old_ie.file_id in self.builder.new_inventory:
663
693
# already added - skip.
665
if is_inside_any(specific_files, path):
666
# was inside the selected path, if not present it has been
695
if (is_inside_any(specific_files, path)
696
and not is_inside_any(self.exclude, path)):
697
# was inside the selected path, and not excluded - if not
698
# present it has been deleted so skip.
669
if old_ie.kind == 'directory':
670
self._next_progress_entry()
671
# not in final inv yet, was not in the selected files, so is an
672
# entry to be preserved unaltered.
700
# From here down it was either not selected, or was excluded:
701
# We preserve the entry unaltered.
673
702
ie = old_ie.copy()
674
703
# Note: specific file commits after a merge are currently
675
704
# prohibited. This test is for sanity/safety in case it's
676
705
# required after that changes.
677
706
if len(self.parents) > 1:
678
707
ie.revision = None
679
delta, version_recorded = self.builder.record_entry_contents(
680
ie, self.parent_invs, path, self.basis_tree, None)
682
self.any_entries_changed = True
683
if delta: self._basis_delta.append(delta)
708
self.builder.record_entry_contents(ie, self.parent_invs, path,
709
self.basis_tree, None)
685
711
def _report_and_accumulate_deletes(self):
686
# XXX: Could the list of deleted paths and ids be instead taken from
687
# _populate_from_inventory?
688
deleted_ids = set(self.basis_inv._byid.keys()) - \
689
set(self.builder.new_inventory._byid.keys())
712
if (isinstance(self.basis_inv, Inventory)
713
and isinstance(self.builder.new_inventory, Inventory)):
714
# the older Inventory classes provide a _byid dict, and building a
715
# set from the keys of this dict is substantially faster than even
716
# getting a set of ids from the inventory
718
# <lifeless> set(dict) is roughly the same speed as
719
# set(iter(dict)) and both are significantly slower than
721
deleted_ids = set(self.basis_inv._byid.keys()) - \
722
set(self.builder.new_inventory._byid.keys())
724
deleted_ids = set(self.basis_inv) - set(self.builder.new_inventory)
691
726
self.any_entries_deleted = True
692
727
deleted = [(self.basis_tree.id2path(file_id), file_id)