~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/__init__.py

(jameinel) Handle [:digit:] globbing tests better for python2.7 (Martin [gz])

Show diffs side-by-side

added added

removed removed

Lines of Context:
141
141
SUBUNIT_SEEK_CUR = 1
142
142
 
143
143
 
144
 
class ExtendedTestResult(unittest._TextTestResult):
 
144
class ExtendedTestResult(testtools.TextTestResult):
145
145
    """Accepts, reports and accumulates the results of running tests.
146
146
 
147
147
    Compared to the unittest version this class adds support for
168
168
        :param bench_history: Optionally, a writable file object to accumulate
169
169
            benchmark results.
170
170
        """
171
 
        unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
 
171
        testtools.TextTestResult.__init__(self, stream)
172
172
        if bench_history is not None:
173
173
            from bzrlib.version import _get_bzr_source_tree
174
174
            src_tree = _get_bzr_source_tree()
201
201
        actionTaken = "Ran"
202
202
        stopTime = time.time()
203
203
        timeTaken = stopTime - self.startTime
204
 
        self.printErrors()
205
 
        self.stream.writeln(self.separator2)
206
 
        self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken,
 
204
        # GZ 2010-07-19: Seems testtools has no printErrors method, and though
 
205
        #                the parent class method is similar have to duplicate
 
206
        self._show_list('ERROR', self.errors)
 
207
        self._show_list('FAIL', self.failures)
 
208
        self.stream.write(self.sep2)
 
209
        self.stream.write("%s %d test%s in %.3fs\n\n" % (actionTaken,
207
210
                            run, run != 1 and "s" or "", timeTaken))
208
 
        self.stream.writeln()
209
211
        if not self.wasSuccessful():
210
212
            self.stream.write("FAILED (")
211
213
            failed, errored = map(len, (self.failures, self.errors))
218
220
                if failed or errored: self.stream.write(", ")
219
221
                self.stream.write("known_failure_count=%d" %
220
222
                    self.known_failure_count)
221
 
            self.stream.writeln(")")
 
223
            self.stream.write(")\n")
222
224
        else:
223
225
            if self.known_failure_count:
224
 
                self.stream.writeln("OK (known_failures=%d)" %
 
226
                self.stream.write("OK (known_failures=%d)\n" %
225
227
                    self.known_failure_count)
226
228
            else:
227
 
                self.stream.writeln("OK")
 
229
                self.stream.write("OK\n")
228
230
        if self.skip_count > 0:
229
231
            skipped = self.skip_count
230
 
            self.stream.writeln('%d test%s skipped' %
 
232
            self.stream.write('%d test%s skipped\n' %
231
233
                                (skipped, skipped != 1 and "s" or ""))
232
234
        if self.unsupported:
233
235
            for feature, count in sorted(self.unsupported.items()):
234
 
                self.stream.writeln("Missing feature '%s' skipped %d tests." %
 
236
                self.stream.write("Missing feature '%s' skipped %d tests.\n" %
235
237
                    (feature, count))
236
238
        if self._strict:
237
239
            ok = self.wasStrictlySuccessful()
279
281
        return what
280
282
 
281
283
    def startTest(self, test):
282
 
        unittest.TestResult.startTest(self, test)
 
284
        super(ExtendedTestResult, self).startTest(test)
283
285
        if self.count == 0:
284
286
            self.startTests()
285
287
        self.report_test_start(test)
323
325
        fails with an unexpected error.
324
326
        """
325
327
        self._post_mortem()
326
 
        unittest.TestResult.addError(self, test, err)
 
328
        super(ExtendedTestResult, self).addError(test, err)
327
329
        self.error_count += 1
328
330
        self.report_error(test, err)
329
331
        if self.stop_early:
337
339
        fails because e.g. an assert() method failed.
338
340
        """
339
341
        self._post_mortem()
340
 
        unittest.TestResult.addFailure(self, test, err)
 
342
        super(ExtendedTestResult, self).addFailure(test, err)
341
343
        self.failure_count += 1
342
344
        self.report_failure(test, err)
343
345
        if self.stop_early:
357
359
                    test.id()))
358
360
        self.report_success(test)
359
361
        self._cleanupLogFile(test)
360
 
        unittest.TestResult.addSuccess(self, test)
 
362
        super(ExtendedTestResult, self).addSuccess(test)
361
363
        test._log_contents = ''
362
364
 
363
365
    def addExpectedFailure(self, test, err):
551
553
        return '%s%s' % (indent, err[1])
552
554
 
553
555
    def report_error(self, test, err):
554
 
        self.stream.writeln('ERROR %s\n%s'
 
556
        self.stream.write('ERROR %s\n%s\n'
555
557
                % (self._testTimeString(test),
556
558
                   self._error_summary(err)))
557
559
 
558
560
    def report_failure(self, test, err):
559
 
        self.stream.writeln(' FAIL %s\n%s'
 
561
        self.stream.write(' FAIL %s\n%s\n'
560
562
                % (self._testTimeString(test),
561
563
                   self._error_summary(err)))
562
564
 
563
565
    def report_known_failure(self, test, err):
564
 
        self.stream.writeln('XFAIL %s\n%s'
 
566
        self.stream.write('XFAIL %s\n%s\n'
565
567
                % (self._testTimeString(test),
566
568
                   self._error_summary(err)))
567
569
 
568
570
    def report_success(self, test):
569
 
        self.stream.writeln('   OK %s' % self._testTimeString(test))
 
571
        self.stream.write('   OK %s\n' % self._testTimeString(test))
570
572
        for bench_called, stats in getattr(test, '_benchcalls', []):
571
 
            self.stream.writeln('LSProf output for %s(%s, %s)' % bench_called)
 
573
            self.stream.write('LSProf output for %s(%s, %s)\n' % bench_called)
572
574
            stats.pprint(file=self.stream)
573
575
        # flush the stream so that we get smooth output. This verbose mode is
574
576
        # used to show the output in PQM.
575
577
        self.stream.flush()
576
578
 
577
579
    def report_skip(self, test, reason):
578
 
        self.stream.writeln(' SKIP %s\n%s'
 
580
        self.stream.write(' SKIP %s\n%s\n'
579
581
                % (self._testTimeString(test), reason))
580
582
 
581
583
    def report_not_applicable(self, test, reason):
582
 
        self.stream.writeln('  N/A %s\n    %s'
 
584
        self.stream.write('  N/A %s\n    %s\n'
583
585
                % (self._testTimeString(test), reason))
584
586
 
585
587
    def report_unsupported(self, test, feature):
586
588
        """test cannot be run because feature is missing."""
587
 
        self.stream.writeln("NODEP %s\n    The feature '%s' is not available."
 
589
        self.stream.write("NODEP %s\n    The feature '%s' is not available.\n"
588
590
                %(self._testTimeString(test), feature))
589
591
 
590
592
 
619
621
            encode = codec.encode
620
622
        stream = osutils.UnicodeOrBytesToBytesWriter(encode, stream)
621
623
        stream.encoding = new_encoding
622
 
        self.stream = unittest._WritelnDecorator(stream)
 
624
        self.stream = stream
623
625
        self.descriptions = descriptions
624
626
        self.verbosity = verbosity
625
627
        self._bench_history = bench_history