~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/__init__.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-08-27 13:20:24 UTC
  • mfrom: (4650.1.9 test-speed)
  • Revision ID: pqm@pqm.ubuntu.com-20090827132024-h13eo7blndo2dfpl
(robertc) Detangle core logic inselftest to make it more
        understandable and compatible with current python. (Robert Collins)

Show diffs side-by-side

added added

removed removed

Lines of Context:
175
175
        self._overall_start_time = time.time()
176
176
        self._strict = strict
177
177
 
178
 
    def done(self):
179
 
        # nb: called stopTestRun in the version of this that Python merged
180
 
        # upstream, according to lifeless 20090803
 
178
    def stopTestRun(self):
 
179
        run = self.testsRun
 
180
        actionTaken = "Ran"
 
181
        stopTime = time.time()
 
182
        timeTaken = stopTime - self.startTime
 
183
        self.printErrors()
 
184
        self.stream.writeln(self.separator2)
 
185
        self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken,
 
186
                            run, run != 1 and "s" or "", timeTaken))
 
187
        self.stream.writeln()
 
188
        if not self.wasSuccessful():
 
189
            self.stream.write("FAILED (")
 
190
            failed, errored = map(len, (self.failures, self.errors))
 
191
            if failed:
 
192
                self.stream.write("failures=%d" % failed)
 
193
            if errored:
 
194
                if failed: self.stream.write(", ")
 
195
                self.stream.write("errors=%d" % errored)
 
196
            if self.known_failure_count:
 
197
                if failed or errored: self.stream.write(", ")
 
198
                self.stream.write("known_failure_count=%d" %
 
199
                    self.known_failure_count)
 
200
            self.stream.writeln(")")
 
201
        else:
 
202
            if self.known_failure_count:
 
203
                self.stream.writeln("OK (known_failures=%d)" %
 
204
                    self.known_failure_count)
 
205
            else:
 
206
                self.stream.writeln("OK")
 
207
        if self.skip_count > 0:
 
208
            skipped = self.skip_count
 
209
            self.stream.writeln('%d test%s skipped' %
 
210
                                (skipped, skipped != 1 and "s" or ""))
 
211
        if self.unsupported:
 
212
            for feature, count in sorted(self.unsupported.items()):
 
213
                self.stream.writeln("Missing feature '%s' skipped %d tests." %
 
214
                    (feature, count))
181
215
        if self._strict:
182
216
            ok = self.wasStrictlySuccessful()
183
217
        else:
184
218
            ok = self.wasSuccessful()
185
 
        if ok:
186
 
            self.stream.write('tests passed\n')
187
 
        else:
188
 
            self.stream.write('tests failed\n')
189
219
        if TestCase._first_thread_leaker_id:
190
220
            self.stream.write(
191
221
                '%s is leaking threads among %d leaking tests.\n' % (
383
413
        else:
384
414
            raise errors.BzrError("Unknown whence %r" % whence)
385
415
 
386
 
    def finished(self):
387
 
        pass
388
 
 
389
416
    def report_cleaning_up(self):
390
417
        pass
391
418
 
 
419
    def startTestRun(self):
 
420
        self.startTime = time.time()
 
421
 
392
422
    def report_success(self, test):
393
423
        pass
394
424
 
421
451
        self.pb.update_latency = 0
422
452
        self.pb.show_transport_activity = False
423
453
 
424
 
    def done(self):
 
454
    def stopTestRun(self):
425
455
        # called when the tests that are going to run have run
426
456
        self.pb.clear()
427
 
        super(TextTestResult, self).done()
428
 
 
429
 
    def finished(self):
430
457
        self.pb.finished()
 
458
        super(TextTestResult, self).stopTestRun()
431
459
 
432
 
    def report_starting(self):
 
460
    def startTestRun(self):
 
461
        super(TextTestResult, self).startTestRun()
433
462
        self.pb.update('[test 0/%d] Starting' % (self.num_tests))
434
463
 
435
464
    def printErrors(self):
514
543
            result = a_string
515
544
        return result.ljust(final_width)
516
545
 
517
 
    def report_starting(self):
 
546
    def startTestRun(self):
 
547
        super(VerboseTestResult, self).startTestRun()
518
548
        self.stream.write('running %d tests...\n' % self.num_tests)
519
549
 
520
550
    def report_test_start(self, test):
578
608
                 descriptions=0,
579
609
                 verbosity=1,
580
610
                 bench_history=None,
581
 
                 list_only=False,
582
611
                 strict=False,
583
612
                 result_decorators=None,
584
613
                 ):
593
622
        self.descriptions = descriptions
594
623
        self.verbosity = verbosity
595
624
        self._bench_history = bench_history
596
 
        self.list_only = list_only
597
625
        self._strict = strict
598
626
        self._result_decorators = result_decorators or []
599
627
 
600
628
    def run(self, test):
601
629
        "Run the given test case or test suite."
602
 
        startTime = time.time()
603
630
        if self.verbosity == 1:
604
631
            result_class = TextTestResult
605
632
        elif self.verbosity >= 2:
606
633
            result_class = VerboseTestResult
607
 
        result = result_class(self.stream,
 
634
        original_result = result_class(self.stream,
608
635
                              self.descriptions,
609
636
                              self.verbosity,
610
637
                              bench_history=self._bench_history,
611
638
                              strict=self._strict,
612
639
                              )
613
 
        run_result = result
 
640
        # Signal to result objects that look at stop early policy to stop,
 
641
        original_result.stop_early = self.stop_on_failure
 
642
        result = original_result
614
643
        for decorator in self._result_decorators:
615
 
            run_result = decorator(run_result)
616
 
        result.stop_early = self.stop_on_failure
617
 
        result.report_starting()
618
 
        if self.list_only:
619
 
            if self.verbosity >= 2:
620
 
                self.stream.writeln("Listing tests only ...\n")
621
 
            run = 0
622
 
            for t in iter_suite_tests(test):
623
 
                self.stream.writeln("%s" % (t.id()))
624
 
                run += 1
625
 
            return None
626
 
        else:
627
 
            try:
628
 
                import testtools
629
 
            except ImportError:
630
 
                test.run(run_result)
631
 
            else:
632
 
                if isinstance(test, testtools.ConcurrentTestSuite):
633
 
                    # We need to catch bzr specific behaviors
634
 
                    test.run(BZRTransformingResult(run_result))
635
 
                else:
636
 
                    test.run(run_result)
637
 
            run = result.testsRun
638
 
            actionTaken = "Ran"
639
 
        stopTime = time.time()
640
 
        timeTaken = stopTime - startTime
641
 
        result.printErrors()
642
 
        self.stream.writeln(result.separator2)
643
 
        self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken,
644
 
                            run, run != 1 and "s" or "", timeTaken))
645
 
        self.stream.writeln()
646
 
        if not result.wasSuccessful():
647
 
            self.stream.write("FAILED (")
648
 
            failed, errored = map(len, (result.failures, result.errors))
649
 
            if failed:
650
 
                self.stream.write("failures=%d" % failed)
651
 
            if errored:
652
 
                if failed: self.stream.write(", ")
653
 
                self.stream.write("errors=%d" % errored)
654
 
            if result.known_failure_count:
655
 
                if failed or errored: self.stream.write(", ")
656
 
                self.stream.write("known_failure_count=%d" %
657
 
                    result.known_failure_count)
658
 
            self.stream.writeln(")")
659
 
        else:
660
 
            if result.known_failure_count:
661
 
                self.stream.writeln("OK (known_failures=%d)" %
662
 
                    result.known_failure_count)
663
 
            else:
664
 
                self.stream.writeln("OK")
665
 
        if result.skip_count > 0:
666
 
            skipped = result.skip_count
667
 
            self.stream.writeln('%d test%s skipped' %
668
 
                                (skipped, skipped != 1 and "s" or ""))
669
 
        if result.unsupported:
670
 
            for feature, count in sorted(result.unsupported.items()):
671
 
                self.stream.writeln("Missing feature '%s' skipped %d tests." %
672
 
                    (feature, count))
673
 
        result.finished()
674
 
        return result
 
644
            result = decorator(result)
 
645
            result.stop_early = self.stop_on_failure
 
646
        try:
 
647
            import testtools
 
648
        except ImportError:
 
649
            pass
 
650
        else:
 
651
            if isinstance(test, testtools.ConcurrentTestSuite):
 
652
                # We need to catch bzr specific behaviors
 
653
                result = BZRTransformingResult(result)
 
654
        result.startTestRun()
 
655
        try:
 
656
            test.run(result)
 
657
        finally:
 
658
            result.stopTestRun()
 
659
        # higher level code uses our extended protocol to determine
 
660
        # what exit code to give.
 
661
        return original_result
675
662
 
676
663
 
677
664
def iter_suite_tests(suite):
2807
2794
                            descriptions=0,
2808
2795
                            verbosity=verbosity,
2809
2796
                            bench_history=bench_history,
2810
 
                            list_only=list_only,
2811
2797
                            strict=strict,
2812
2798
                            result_decorators=result_decorators,
2813
2799
                            )
2830
2816
        decorators.append(CountingDecorator)
2831
2817
    for decorator in decorators:
2832
2818
        suite = decorator(suite)
2833
 
    result = runner.run(suite)
2834
2819
    if list_only:
 
2820
        # Done after test suite decoration to allow randomisation etc
 
2821
        # to take effect, though that is of marginal benefit.
 
2822
        if verbosity >= 2:
 
2823
            stream.write("Listing tests only ...\n")
 
2824
        for t in iter_suite_tests(suite):
 
2825
            stream.write("%s\n" % (t.id()))
2835
2826
        return True
2836
 
    result.done()
 
2827
    result = runner.run(suite)
2837
2828
    if strict:
2838
2829
        return result.wasStrictlySuccessful()
2839
2830
    else:
3168
3159
    def stopTest(self, test):
3169
3160
        self.result.stopTest(test)
3170
3161
 
 
3162
    def startTestRun(self):
 
3163
        self.result.startTestRun()
 
3164
 
 
3165
    def stopTestRun(self):
 
3166
        self.result.stopTestRun()
 
3167
 
3171
3168
    def addSkip(self, test, reason):
3172
3169
        self.result.addSkip(test, reason)
3173
3170