~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/_annotator_pyx.pyx

  • Committer: John Arbash Meinel
  • Date: 2009-06-24 20:44:46 UTC
  • mto: This revision was merged to the branch mainline in revision 4522.
  • Revision ID: john@arbash-meinel.com-20090624204446-5f8iy7nbv6mdfs7v
Remove some debugging counters.

Current 'annotate' breakdown:
NEWS  builtins.py
6.564 2.265     overall time
  0.164 0.038     resolve heads() etc on annotated lines
  2.103 0.841     extract texts, convert delta to left-matching-block
  4.289 1.382     compute annotations
    3.533 1.101     get_matching_blocks() (left and right parents)
    0.704 0.180     update left-hand parent annotations (includes above)
    3.251 1.068     update right-hand parents
      0.118 0.030     resolve annotation lines


So by percentage, 54% of the time is in PatienceSequenceMatcher, 32% of the
time is extracting texts, and only 14% of the time is in the annotation work.

Show diffs side-by-side

added added

removed removed

Lines of Context:
54
54
    int PyObject_RichCompareBool_ptr "PyObject_RichCompareBool" (
55
55
        PyObject *, PyObject *, int opid)
56
56
 
 
57
 
57
58
from bzrlib import errors, graph as _mod_graph, osutils, patiencediff, ui
58
59
 
59
60
import time
211
212
    _check_annotations_are_lists(annotations, parent_annotations)
212
213
    par_list = <PyListObject *>parent_annotations
213
214
    ann_list = <PyListObject *>annotations
 
215
    # For NEWS and bzrlib/builtins.py, over 99% of the lines are simply copied
 
216
    # across from the parent entry. So this routine is heavily optimized for
 
217
    # that. Would be interesting if we could use memcpy() but we have to incref
 
218
    # and decref
214
219
    for parent_idx, lines_idx, match_len in matching_blocks:
215
220
        _check_match_ranges(parent_annotations, annotations,
216
221
                            parent_idx, lines_idx, match_len)
289
294
        parent_lines = self._text_cache[parent_key]
290
295
        parent_annotations = self._annotations_cache[parent_key]
291
296
        # PatienceSequenceMatcher should probably be part of Policy
292
 
        # t = c()
 
297
        t = c()
293
298
        matcher = patiencediff.PatienceSequenceMatcher(None,
294
299
            parent_lines, text)
295
300
        matching_blocks = matcher.get_matching_blocks()
296
 
        # _update_counter('get_matching_blocks()', c() - t)
 
301
        _update_counter('get_matching_blocks()', c() - t)
297
302
        return parent_annotations, matching_blocks
298
303
 
299
304
    def _update_from_one_parent(self, key, annotations, lines, parent_key):
385
390
        if parent_keys:
386
391
            t1 = c()
387
392
            self._update_from_one_parent(key, annotations, text, parent_keys[0])
388
 
            _update_counter('left parents', 1)
389
393
            t2 = c()
390
394
            for parent in parent_keys[1:]:
391
395
                self._update_from_other_parents(key, annotations, text,
392
396
                                                this_annotation, parent)
393
 
                _update_counter('right parents', 1)
394
397
            t3 = c()
395
398
            _update_counter('update left', t2 - t1)
396
399
            _update_counter('update rest', t3 - t2)
423
426
 
424
427
        This is meant as a compatibility thunk to how annotate() used to work.
425
428
        """
 
429
        cdef Py_ssize_t pos, num_lines
426
430
        t_first = c()
427
431
        annotations, lines = self.annotate(key)
428
432
        _update_counter('annotate time', c() - t_first)
429
433
        assert len(annotations) == len(lines)
 
434
        num_lines = len(lines)
430
435
        out = []
431
436
        heads = self._get_heads_provider().heads
432
 
        append = out.append
433
437
        t_second = c()
434
 
        for annotation, line in zip(annotations, lines):
 
438
        for pos from 0 <= pos < num_lines:
 
439
            annotation = annotations[pos]
 
440
            line = lines[pos]
435
441
            if len(annotation) == 1:
436
 
                _update_counter('one source', 1)
437
 
                append((annotation[0], line))
 
442
                head = annotation[0]
438
443
            else:
439
 
                _update_counter('multi source', 1)
440
 
                t = c()
441
444
                the_heads = heads(annotation)
442
 
                _update_counter('heads time', c() - t)
443
445
                if len(the_heads) == 1:
444
 
                    _update_counter('one head', 1)
445
446
                    for head in the_heads:
446
447
                        break
447
448
                else:
448
 
                    _update_counter('multi heads', 1)
449
449
                    # We need to resolve the ambiguity, for now just pick the
450
450
                    # sorted smallest
451
451
                    head = sorted(the_heads)[0]
452
 
                if head == annotation[0]:
453
 
                    _update_counter('first ann', 1)
454
 
                append((head, line))
 
452
            PyList_Append(out, (head, line))
455
453
        _update_counter('resolve annotations', c() - t_second)
456
454
        _update_counter('overall', c() - t_first)
457
455
        return out