~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/__init__.py

  • Committer: Robert Collins
  • Date: 2007-03-27 21:45:59 UTC
  • mfrom: (2379 +trunk)
  • mto: (2018.5.109 hpss)
  • mto: This revision was merged to the branch mainline in revision 2435.
  • Revision ID: robertc@robertcollins.net-20070327214559-3zto1rfp3g7dnzl9
Merge bzr.dev.

Show diffs side-by-side

added added

removed removed

Lines of Context:
180
180
        self.num_tests = num_tests
181
181
        self.error_count = 0
182
182
        self.failure_count = 0
 
183
        self.known_failure_count = 0
183
184
        self.skip_count = 0
 
185
        self.unsupported = {}
184
186
        self.count = 0
185
187
        self._overall_start_time = time.time()
186
188
    
221
223
        """Record that a test has started."""
222
224
        self._start_time = time.time()
223
225
 
 
226
    def _cleanupLogFile(self, test):
 
227
        # We can only do this if we have one of our TestCases, not if
 
228
        # we have a doctest.
 
229
        setKeepLogfile = getattr(test, 'setKeepLogfile', None)
 
230
        if setKeepLogfile is not None:
 
231
            setKeepLogfile()
 
232
 
224
233
    def addError(self, test, err):
 
234
        self.extractBenchmarkTime(test)
 
235
        self._cleanupLogFile(test)
225
236
        if isinstance(err[1], TestSkipped):
226
 
            return self.addSkipped(test, err)    
 
237
            return self.addSkipped(test, err)
 
238
        elif isinstance(err[1], UnavailableFeature):
 
239
            return self.addNotSupported(test, err[1].args[0])
227
240
        unittest.TestResult.addError(self, test, err)
228
 
        # We can only do this if we have one of our TestCases, not if
229
 
        # we have a doctest.
230
 
        setKeepLogfile = getattr(test, 'setKeepLogfile', None)
231
 
        if setKeepLogfile is not None:
232
 
            setKeepLogfile()
233
 
        self.extractBenchmarkTime(test)
 
241
        self.error_count += 1
234
242
        self.report_error(test, err)
235
243
        if self.stop_early:
236
244
            self.stop()
237
245
 
238
246
    def addFailure(self, test, err):
 
247
        self._cleanupLogFile(test)
 
248
        self.extractBenchmarkTime(test)
 
249
        if isinstance(err[1], KnownFailure):
 
250
            return self.addKnownFailure(test, err)
239
251
        unittest.TestResult.addFailure(self, test, err)
240
 
        # We can only do this if we have one of our TestCases, not if
241
 
        # we have a doctest.
242
 
        setKeepLogfile = getattr(test, 'setKeepLogfile', None)
243
 
        if setKeepLogfile is not None:
244
 
            setKeepLogfile()
245
 
        self.extractBenchmarkTime(test)
 
252
        self.failure_count += 1
246
253
        self.report_failure(test, err)
247
254
        if self.stop_early:
248
255
            self.stop()
249
256
 
 
257
    def addKnownFailure(self, test, err):
 
258
        self.known_failure_count += 1
 
259
        self.report_known_failure(test, err)
 
260
 
 
261
    def addNotSupported(self, test, feature):
 
262
        self.unsupported.setdefault(str(feature), 0)
 
263
        self.unsupported[str(feature)] += 1
 
264
        self.report_unsupported(test, feature)
 
265
 
250
266
    def addSuccess(self, test):
251
267
        self.extractBenchmarkTime(test)
252
268
        if self._bench_history is not None:
258
274
        unittest.TestResult.addSuccess(self, test)
259
275
 
260
276
    def addSkipped(self, test, skip_excinfo):
261
 
        self.extractBenchmarkTime(test)
262
277
        self.report_skip(test, skip_excinfo)
263
278
        # seems best to treat this as success from point-of-view of unittest
264
279
        # -- it actually does nothing so it barely matters :)
301
316
class TextTestResult(ExtendedTestResult):
302
317
    """Displays progress and results of tests in text form"""
303
318
 
304
 
    def __init__(self, *args, **kw):
305
 
        ExtendedTestResult.__init__(self, *args, **kw)
306
 
        self.pb = self.ui.nested_progress_bar()
 
319
    def __init__(self, stream, descriptions, verbosity,
 
320
                 bench_history=None,
 
321
                 num_tests=None,
 
322
                 pb=None,
 
323
                 ):
 
324
        ExtendedTestResult.__init__(self, stream, descriptions, verbosity,
 
325
            bench_history, num_tests)
 
326
        if pb is None:
 
327
            self.pb = self.ui.nested_progress_bar()
 
328
            self._supplied_pb = False
 
329
        else:
 
330
            self.pb = pb
 
331
            self._supplied_pb = True
307
332
        self.pb.show_pct = False
308
333
        self.pb.show_spinner = False
309
 
        self.pb.show_eta = False, 
 
334
        self.pb.show_eta = False,
310
335
        self.pb.show_count = False
311
336
        self.pb.show_bar = False
312
337
 
322
347
            a += ', %d errors' % self.error_count
323
348
        if self.failure_count:
324
349
            a += ', %d failed' % self.failure_count
 
350
        if self.known_failure_count:
 
351
            a += ', %d known failures' % self.known_failure_count
325
352
        if self.skip_count:
326
353
            a += ', %d skipped' % self.skip_count
 
354
        if self.unsupported:
 
355
            a += ', %d missing features' % len(self.unsupported)
327
356
        a += ']'
328
357
        return a
329
358
 
342
371
            return self._shortened_test_description(test)
343
372
 
344
373
    def report_error(self, test, err):
345
 
        self.error_count += 1
346
374
        self.pb.note('ERROR: %s\n    %s\n', 
347
375
            self._test_description(test),
348
376
            err[1],
349
377
            )
350
378
 
351
379
    def report_failure(self, test, err):
352
 
        self.failure_count += 1
353
380
        self.pb.note('FAIL: %s\n    %s\n', 
354
381
            self._test_description(test),
355
382
            err[1],
356
383
            )
357
384
 
 
385
    def report_known_failure(self, test, err):
 
386
        self.pb.note('XFAIL: %s\n%s\n',
 
387
            self._test_description(test), err[1])
 
388
 
358
389
    def report_skip(self, test, skip_excinfo):
359
390
        self.skip_count += 1
360
391
        if False:
371
402
                # progress bar...
372
403
                self.pb.note('SKIP: %s', skip_excinfo[1])
373
404
 
 
405
    def report_unsupported(self, test, feature):
 
406
        """test cannot be run because feature is missing."""
 
407
                  
374
408
    def report_cleaning_up(self):
375
409
        self.pb.update('cleaning up...')
376
410
 
377
411
    def finished(self):
378
 
        self.pb.finished()
 
412
        if not self._supplied_pb:
 
413
            self.pb.finished()
379
414
 
380
415
 
381
416
class VerboseTestResult(ExtendedTestResult):
414
449
        return '%s%s' % (indent, err[1])
415
450
 
416
451
    def report_error(self, test, err):
417
 
        self.error_count += 1
418
452
        self.stream.writeln('ERROR %s\n%s'
419
453
                % (self._testTimeString(),
420
454
                   self._error_summary(err)))
421
455
 
422
456
    def report_failure(self, test, err):
423
 
        self.failure_count += 1
424
457
        self.stream.writeln(' FAIL %s\n%s'
425
458
                % (self._testTimeString(),
426
459
                   self._error_summary(err)))
427
460
 
 
461
    def report_known_failure(self, test, err):
 
462
        self.stream.writeln('XFAIL %s\n%s'
 
463
                % (self._testTimeString(),
 
464
                   self._error_summary(err)))
 
465
 
428
466
    def report_success(self, test):
429
467
        self.stream.writeln('   OK %s' % self._testTimeString())
430
468
        for bench_called, stats in getattr(test, '_benchcalls', []):
431
469
            self.stream.writeln('LSProf output for %s(%s, %s)' % bench_called)
432
470
            stats.pprint(file=self.stream)
 
471
        # flush the stream so that we get smooth output. This verbose mode is
 
472
        # used to show the output in PQM.
433
473
        self.stream.flush()
434
474
 
435
475
    def report_skip(self, test, skip_excinfo):
438
478
                % (self._testTimeString(),
439
479
                   self._error_summary(skip_excinfo)))
440
480
 
 
481
    def report_unsupported(self, test, feature):
 
482
        """test cannot be run because feature is missing."""
 
483
        self.stream.writeln("NODEP %s\n    The feature '%s' is not available."
 
484
                %(self._testTimeString(), feature))
 
485
                  
 
486
 
441
487
 
442
488
class TextTestRunner(object):
443
489
    stop_on_failure = False
486
532
            if errored:
487
533
                if failed: self.stream.write(", ")
488
534
                self.stream.write("errors=%d" % errored)
 
535
            if result.known_failure_count:
 
536
                if failed or errored: self.stream.write(", ")
 
537
                self.stream.write("known_failure_count=%d" %
 
538
                    result.known_failure_count)
489
539
            self.stream.writeln(")")
490
540
        else:
491
 
            self.stream.writeln("OK")
 
541
            if result.known_failure_count:
 
542
                self.stream.writeln("OK (known_failures=%d)" %
 
543
                    result.known_failure_count)
 
544
            else:
 
545
                self.stream.writeln("OK")
492
546
        if result.skip_count > 0:
493
547
            skipped = result.skip_count
494
548
            self.stream.writeln('%d test%s skipped' %
495
549
                                (skipped, skipped != 1 and "s" or ""))
 
550
        if result.unsupported:
 
551
            for feature, count in sorted(result.unsupported.items()):
 
552
                self.stream.writeln("Missing feature '%s' skipped %d tests." %
 
553
                    (feature, count))
496
554
        result.report_cleaning_up()
497
555
        # This is still a little bogus, 
498
556
        # but only a little. Folk not using our testrunner will
545
603
    """Indicates that a test was intentionally skipped, rather than failing."""
546
604
 
547
605
 
 
606
class KnownFailure(AssertionError):
 
607
    """Indicates that a test failed in a precisely expected manner.
 
608
 
 
609
    Such failures dont block the whole test suite from passing because they are
 
610
    indicators of partially completed code or of future work. We have an
 
611
    explicit error for them so that we can ensure that they are always visible:
 
612
    KnownFailures are always shown in the output of bzr selftest.
 
613
    """
 
614
 
 
615
 
 
616
class UnavailableFeature(Exception):
 
617
    """A feature required for this test was not available.
 
618
 
 
619
    The feature should be used to construct the exception.
 
620
    """
 
621
 
 
622
 
548
623
class CommandFailed(Exception):
549
624
    pass
550
625
 
970
1045
    def _restoreHooks(self):
971
1046
        bzrlib.branch.Branch.hooks = self._preserved_hooks
972
1047
 
 
1048
    def knownFailure(self, reason):
 
1049
        """This test has failed for some known reason."""
 
1050
        raise KnownFailure(reason)
 
1051
 
 
1052
    def run(self, result=None):
 
1053
        if result is None: result = self.defaultTestResult()
 
1054
        for feature in getattr(self, '_test_needs_features', []):
 
1055
            if not feature.available():
 
1056
                result.startTest(self)
 
1057
                if getattr(result, 'addNotSupported', None):
 
1058
                    result.addNotSupported(self, feature)
 
1059
                else:
 
1060
                    result.addSuccess(self)
 
1061
                result.stopTest(self)
 
1062
                return
 
1063
        return unittest.TestCase.run(self, result)
 
1064
 
973
1065
    def tearDown(self):
974
1066
        self._runCleanups()
975
1067
        unittest.TestCase.tearDown(self)
1045
1137
        """Shortcut that splits cmd into words, runs, and returns stdout"""
1046
1138
        return self.run_bzr_captured(cmd.split(), retcode=retcode)[0]
1047
1139
 
 
1140
    def requireFeature(self, feature):
 
1141
        """This test requires a specific feature is available.
 
1142
 
 
1143
        :raises UnavailableFeature: When feature is not available.
 
1144
        """
 
1145
        if not feature.available():
 
1146
            raise UnavailableFeature(feature)
 
1147
 
1048
1148
    def run_bzr_captured(self, argv, retcode=0, encoding=None, stdin=None,
1049
1149
                         working_dir=None):
1050
1150
        """Invoke bzr and return (stdout, stderr).
2045
2145
                   'bzrlib.tests.test_ssh_transport',
2046
2146
                   'bzrlib.tests.test_status',
2047
2147
                   'bzrlib.tests.test_store',
 
2148
                   'bzrlib.tests.test_strace',
2048
2149
                   'bzrlib.tests.test_subsume',
2049
2150
                   'bzrlib.tests.test_symbol_versioning',
2050
2151
                   'bzrlib.tests.test_tag',
2136
2237
            if not quiet:
2137
2238
                print 'delete directory:', i
2138
2239
            shutil.rmtree(i)
 
2240
 
 
2241
 
 
2242
class Feature(object):
 
2243
    """An operating system Feature."""
 
2244
 
 
2245
    def __init__(self):
 
2246
        self._available = None
 
2247
 
 
2248
    def available(self):
 
2249
        """Is the feature available?
 
2250
 
 
2251
        :return: True if the feature is available.
 
2252
        """
 
2253
        if self._available is None:
 
2254
            self._available = self._probe()
 
2255
        return self._available
 
2256
 
 
2257
    def _probe(self):
 
2258
        """Implement this method in concrete features.
 
2259
 
 
2260
        :return: True if the feature is available.
 
2261
        """
 
2262
        raise NotImplementedError
 
2263
 
 
2264
    def __str__(self):
 
2265
        if getattr(self, 'feature_name', None):
 
2266
            return self.feature_name()
 
2267
        return self.__class__.__name__