~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/__init__.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2007-08-21 04:47:13 UTC
  • mfrom: (2695.1.5 test-cleanup)
  • Revision ID: pqm@pqm.ubuntu.com-20070821044713-ttnupbvhlsbwh1he
(mbp) fix problem with extractBenchmarkTime; better assertSubset

Show diffs side-by-side

added added

removed removed

Lines of Context:
158
158
class ExtendedTestResult(unittest._TextTestResult):
159
159
    """Accepts, reports and accumulates the results of running tests.
160
160
 
161
 
    Compared to this unittest version this class adds support for profiling,
162
 
    benchmarking, stopping as soon as a test fails,  and skipping tests.
163
 
    There are further-specialized subclasses for different types of display.
 
161
    Compared to this unittest version this class adds support for
 
162
    profiling, benchmarking, stopping as soon as a test fails,  and
 
163
    skipping tests.  There are further-specialized subclasses for
 
164
    different types of display.
 
165
 
 
166
    When a test finishes, in whatever way, it calls one of the addSuccess,
 
167
    addFailure or addError classes.  These in turn may redirect to a more
 
168
    specific case for the special test results supported by our extended
 
169
    tests.
 
170
 
 
171
    Note that just one of these objects is fed the results from many tests.
164
172
    """
165
173
 
166
174
    stop_early = False
200
208
        self.count = 0
201
209
        self._overall_start_time = time.time()
202
210
    
203
 
    def extractBenchmarkTime(self, testCase):
 
211
    def _extractBenchmarkTime(self, testCase):
204
212
        """Add a benchmark time for the current test case."""
205
 
        self._benchmarkTime = getattr(testCase, "_benchtime", None)
 
213
        return getattr(testCase, "_benchtime", None)
206
214
    
207
215
    def _elapsedTestTimeString(self):
208
216
        """Return a time string for the overall time the current test has taken."""
209
217
        return self._formatTime(time.time() - self._start_time)
210
218
 
211
 
    def _testTimeString(self):
212
 
        if self._benchmarkTime is not None:
 
219
    def _testTimeString(self, testCase):
 
220
        benchmark_time = self._extractBenchmarkTime(testCase)
 
221
        if benchmark_time is not None:
213
222
            return "%s/%s" % (
214
 
                self._formatTime(self._benchmarkTime),
 
223
                self._formatTime(benchmark_time),
215
224
                self._elapsedTestTimeString())
216
225
        else:
217
226
            return "           %s" % self._elapsedTestTimeString()
245
254
            setKeepLogfile()
246
255
 
247
256
    def addError(self, test, err):
248
 
        self.extractBenchmarkTime(test)
249
 
        self._cleanupLogFile(test)
 
257
        """Tell result that test finished with an error.
 
258
 
 
259
        Called from the TestCase run() method when the test
 
260
        fails with an unexpected error.
 
261
        """
 
262
        self._testConcluded(test)
250
263
        if isinstance(err[1], TestSkipped):
251
 
            return self.addSkipped(test, err)
 
264
            return self._addSkipped(test, err)
252
265
        elif isinstance(err[1], UnavailableFeature):
253
266
            return self.addNotSupported(test, err[1].args[0])
254
 
        unittest.TestResult.addError(self, test, err)
255
 
        self.error_count += 1
256
 
        self.report_error(test, err)
257
 
        if self.stop_early:
258
 
            self.stop()
 
267
        else:
 
268
            unittest.TestResult.addError(self, test, err)
 
269
            self.error_count += 1
 
270
            self.report_error(test, err)
 
271
            if self.stop_early:
 
272
                self.stop()
259
273
 
260
274
    def addFailure(self, test, err):
261
 
        self._cleanupLogFile(test)
262
 
        self.extractBenchmarkTime(test)
 
275
        """Tell result that test failed.
 
276
 
 
277
        Called from the TestCase run() method when the test
 
278
        fails because e.g. an assert() method failed.
 
279
        """
 
280
        self._testConcluded(test)
263
281
        if isinstance(err[1], KnownFailure):
264
 
            return self.addKnownFailure(test, err)
265
 
        unittest.TestResult.addFailure(self, test, err)
266
 
        self.failure_count += 1
267
 
        self.report_failure(test, err)
268
 
        if self.stop_early:
269
 
            self.stop()
270
 
 
271
 
    def addKnownFailure(self, test, err):
 
282
            return self._addKnownFailure(test, err)
 
283
        else:
 
284
            unittest.TestResult.addFailure(self, test, err)
 
285
            self.failure_count += 1
 
286
            self.report_failure(test, err)
 
287
            if self.stop_early:
 
288
                self.stop()
 
289
 
 
290
    def addSuccess(self, test):
 
291
        """Tell result that test completed successfully.
 
292
 
 
293
        Called from the TestCase run()
 
294
        """
 
295
        self._testConcluded(test)
 
296
        if self._bench_history is not None:
 
297
            benchmark_time = self._extractBenchmarkTime(test)
 
298
            if benchmark_time is not None:
 
299
                self._bench_history.write("%s %s\n" % (
 
300
                    self._formatTime(benchmark_time),
 
301
                    test.id()))
 
302
        self.report_success(test)
 
303
        unittest.TestResult.addSuccess(self, test)
 
304
 
 
305
    def _testConcluded(self, test):
 
306
        """Common code when a test has finished.
 
307
 
 
308
        Called regardless of whether it succeded, failed, etc.
 
309
        """
 
310
        self._cleanupLogFile(test)
 
311
 
 
312
    def _addKnownFailure(self, test, err):
272
313
        self.known_failure_count += 1
273
314
        self.report_known_failure(test, err)
274
315
 
275
316
    def addNotSupported(self, test, feature):
 
317
        """The test will not be run because of a missing feature.
 
318
        """
 
319
        # this can be called in two different ways: it may be that the
 
320
        # test started running, and then raised (through addError) 
 
321
        # UnavailableFeature.  Alternatively this method can be called
 
322
        # while probing for features before running the tests; in that
 
323
        # case we will see startTest and stopTest, but the test will never
 
324
        # actually run.
276
325
        self.unsupported.setdefault(str(feature), 0)
277
326
        self.unsupported[str(feature)] += 1
278
327
        self.report_unsupported(test, feature)
279
328
 
280
 
    def addSuccess(self, test):
281
 
        self.extractBenchmarkTime(test)
282
 
        if self._bench_history is not None:
283
 
            if self._benchmarkTime is not None:
284
 
                self._bench_history.write("%s %s\n" % (
285
 
                    self._formatTime(self._benchmarkTime),
286
 
                    test.id()))
287
 
        self.report_success(test)
288
 
        unittest.TestResult.addSuccess(self, test)
289
 
 
290
 
    def addSkipped(self, test, skip_excinfo):
 
329
    def _addSkipped(self, test, skip_excinfo):
291
330
        self.report_skip(test, skip_excinfo)
292
 
        # seems best to treat this as success from point-of-view of unittest
293
 
        # -- it actually does nothing so it barely matters :)
 
331
        # seems best to treat this as success from point-of-view of
 
332
        # unittest -- it actually does nothing so it barely matters :)
294
333
        try:
295
334
            test.tearDown()
296
335
        except KeyboardInterrupt:
458
497
 
459
498
    def report_error(self, test, err):
460
499
        self.stream.writeln('ERROR %s\n%s'
461
 
                % (self._testTimeString(),
 
500
                % (self._testTimeString(test),
462
501
                   self._error_summary(err)))
463
502
 
464
503
    def report_failure(self, test, err):
465
504
        self.stream.writeln(' FAIL %s\n%s'
466
 
                % (self._testTimeString(),
 
505
                % (self._testTimeString(test),
467
506
                   self._error_summary(err)))
468
507
 
469
508
    def report_known_failure(self, test, err):
470
509
        self.stream.writeln('XFAIL %s\n%s'
471
 
                % (self._testTimeString(),
 
510
                % (self._testTimeString(test),
472
511
                   self._error_summary(err)))
473
512
 
474
513
    def report_success(self, test):
475
 
        self.stream.writeln('   OK %s' % self._testTimeString())
 
514
        self.stream.writeln('   OK %s' % self._testTimeString(test))
476
515
        for bench_called, stats in getattr(test, '_benchcalls', []):
477
516
            self.stream.writeln('LSProf output for %s(%s, %s)' % bench_called)
478
517
            stats.pprint(file=self.stream)
483
522
    def report_skip(self, test, skip_excinfo):
484
523
        self.skip_count += 1
485
524
        self.stream.writeln(' SKIP %s\n%s'
486
 
                % (self._testTimeString(),
 
525
                % (self._testTimeString(test),
487
526
                   self._error_summary(skip_excinfo)))
488
527
 
489
528
    def report_unsupported(self, test, feature):
490
529
        """test cannot be run because feature is missing."""
491
530
        self.stream.writeln("NODEP %s\n    The feature '%s' is not available."
492
 
                %(self._testTimeString(), feature))
493
 
                  
 
531
                %(self._testTimeString(test), feature))
494
532
 
495
533
 
496
534
class TextTestRunner(object):
865
903
        """Assert that every entry in sublist is present in superlist."""
866
904
        missing = set(sublist) - set(superlist)
867
905
        if len(missing) > 0:
868
 
            raise AssertionError("value(s) %r not present in container %r" % 
 
906
            raise AssertionError("value(s) %r not present in container %r" %
869
907
                                 (missing, superlist))
870
908
 
871
909
    def assertListRaises(self, excClass, func, *args, **kwargs):