~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/__init__.py

  • Committer: John Arbash Meinel
  • Date: 2010-08-24 19:21:32 UTC
  • mto: This revision was merged to the branch mainline in revision 5390.
  • Revision ID: john@arbash-meinel.com-20100824192132-2ktt5adkbk5bk1ct
Handle test_source and extensions. Also define an 'extern' protocol, to allow
the test suite to recognize that returning an object of that type is a Python object.

Show diffs side-by-side

added added

removed removed

Lines of Context:
34
34
import difflib
35
35
import doctest
36
36
import errno
 
37
import itertools
37
38
import logging
38
39
import math
39
40
import os
140
141
SUBUNIT_SEEK_CUR = 1
141
142
 
142
143
 
143
 
class ExtendedTestResult(unittest._TextTestResult):
 
144
class ExtendedTestResult(testtools.TextTestResult):
144
145
    """Accepts, reports and accumulates the results of running tests.
145
146
 
146
147
    Compared to the unittest version this class adds support for
167
168
        :param bench_history: Optionally, a writable file object to accumulate
168
169
            benchmark results.
169
170
        """
170
 
        unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
 
171
        testtools.TextTestResult.__init__(self, stream)
171
172
        if bench_history is not None:
172
173
            from bzrlib.version import _get_bzr_source_tree
173
174
            src_tree = _get_bzr_source_tree()
200
201
        actionTaken = "Ran"
201
202
        stopTime = time.time()
202
203
        timeTaken = stopTime - self.startTime
203
 
        self.printErrors()
204
 
        self.stream.writeln(self.separator2)
205
 
        self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken,
 
204
        # GZ 2010-07-19: Seems testtools has no printErrors method, and though
 
205
        #                the parent class method is similar have to duplicate
 
206
        self._show_list('ERROR', self.errors)
 
207
        self._show_list('FAIL', self.failures)
 
208
        self.stream.write(self.sep2)
 
209
        self.stream.write("%s %d test%s in %.3fs\n\n" % (actionTaken,
206
210
                            run, run != 1 and "s" or "", timeTaken))
207
 
        self.stream.writeln()
208
211
        if not self.wasSuccessful():
209
212
            self.stream.write("FAILED (")
210
213
            failed, errored = map(len, (self.failures, self.errors))
217
220
                if failed or errored: self.stream.write(", ")
218
221
                self.stream.write("known_failure_count=%d" %
219
222
                    self.known_failure_count)
220
 
            self.stream.writeln(")")
 
223
            self.stream.write(")\n")
221
224
        else:
222
225
            if self.known_failure_count:
223
 
                self.stream.writeln("OK (known_failures=%d)" %
 
226
                self.stream.write("OK (known_failures=%d)\n" %
224
227
                    self.known_failure_count)
225
228
            else:
226
 
                self.stream.writeln("OK")
 
229
                self.stream.write("OK\n")
227
230
        if self.skip_count > 0:
228
231
            skipped = self.skip_count
229
 
            self.stream.writeln('%d test%s skipped' %
 
232
            self.stream.write('%d test%s skipped\n' %
230
233
                                (skipped, skipped != 1 and "s" or ""))
231
234
        if self.unsupported:
232
235
            for feature, count in sorted(self.unsupported.items()):
233
 
                self.stream.writeln("Missing feature '%s' skipped %d tests." %
 
236
                self.stream.write("Missing feature '%s' skipped %d tests.\n" %
234
237
                    (feature, count))
235
238
        if self._strict:
236
239
            ok = self.wasStrictlySuccessful()
274
277
 
275
278
    def _shortened_test_description(self, test):
276
279
        what = test.id()
277
 
        what = re.sub(r'^bzrlib\.(tests|benchmarks)\.', '', what)
 
280
        what = re.sub(r'^bzrlib\.tests\.', '', what)
278
281
        return what
279
282
 
280
283
    def startTest(self, test):
281
 
        unittest.TestResult.startTest(self, test)
 
284
        super(ExtendedTestResult, self).startTest(test)
282
285
        if self.count == 0:
283
286
            self.startTests()
284
287
        self.report_test_start(test)
322
325
        fails with an unexpected error.
323
326
        """
324
327
        self._post_mortem()
325
 
        unittest.TestResult.addError(self, test, err)
 
328
        super(ExtendedTestResult, self).addError(test, err)
326
329
        self.error_count += 1
327
330
        self.report_error(test, err)
328
331
        if self.stop_early:
336
339
        fails because e.g. an assert() method failed.
337
340
        """
338
341
        self._post_mortem()
339
 
        unittest.TestResult.addFailure(self, test, err)
 
342
        super(ExtendedTestResult, self).addFailure(test, err)
340
343
        self.failure_count += 1
341
344
        self.report_failure(test, err)
342
345
        if self.stop_early:
356
359
                    test.id()))
357
360
        self.report_success(test)
358
361
        self._cleanupLogFile(test)
359
 
        unittest.TestResult.addSuccess(self, test)
 
362
        super(ExtendedTestResult, self).addSuccess(test)
360
363
        test._log_contents = ''
361
364
 
362
365
    def addExpectedFailure(self, test, err):
550
553
        return '%s%s' % (indent, err[1])
551
554
 
552
555
    def report_error(self, test, err):
553
 
        self.stream.writeln('ERROR %s\n%s'
 
556
        self.stream.write('ERROR %s\n%s\n'
554
557
                % (self._testTimeString(test),
555
558
                   self._error_summary(err)))
556
559
 
557
560
    def report_failure(self, test, err):
558
 
        self.stream.writeln(' FAIL %s\n%s'
 
561
        self.stream.write(' FAIL %s\n%s\n'
559
562
                % (self._testTimeString(test),
560
563
                   self._error_summary(err)))
561
564
 
562
565
    def report_known_failure(self, test, err):
563
 
        self.stream.writeln('XFAIL %s\n%s'
 
566
        self.stream.write('XFAIL %s\n%s\n'
564
567
                % (self._testTimeString(test),
565
568
                   self._error_summary(err)))
566
569
 
567
570
    def report_success(self, test):
568
 
        self.stream.writeln('   OK %s' % self._testTimeString(test))
 
571
        self.stream.write('   OK %s\n' % self._testTimeString(test))
569
572
        for bench_called, stats in getattr(test, '_benchcalls', []):
570
 
            self.stream.writeln('LSProf output for %s(%s, %s)' % bench_called)
 
573
            self.stream.write('LSProf output for %s(%s, %s)\n' % bench_called)
571
574
            stats.pprint(file=self.stream)
572
575
        # flush the stream so that we get smooth output. This verbose mode is
573
576
        # used to show the output in PQM.
574
577
        self.stream.flush()
575
578
 
576
579
    def report_skip(self, test, reason):
577
 
        self.stream.writeln(' SKIP %s\n%s'
 
580
        self.stream.write(' SKIP %s\n%s\n'
578
581
                % (self._testTimeString(test), reason))
579
582
 
580
583
    def report_not_applicable(self, test, reason):
581
 
        self.stream.writeln('  N/A %s\n    %s'
 
584
        self.stream.write('  N/A %s\n    %s\n'
582
585
                % (self._testTimeString(test), reason))
583
586
 
584
587
    def report_unsupported(self, test, feature):
585
588
        """test cannot be run because feature is missing."""
586
 
        self.stream.writeln("NODEP %s\n    The feature '%s' is not available."
 
589
        self.stream.write("NODEP %s\n    The feature '%s' is not available.\n"
587
590
                %(self._testTimeString(test), feature))
588
591
 
589
592
 
618
621
            encode = codec.encode
619
622
        stream = osutils.UnicodeOrBytesToBytesWriter(encode, stream)
620
623
        stream.encoding = new_encoding
621
 
        self.stream = unittest._WritelnDecorator(stream)
 
624
        self.stream = stream
622
625
        self.descriptions = descriptions
623
626
        self.verbosity = verbosity
624
627
        self._bench_history = bench_history
2009
2012
 
2010
2013
    def get_bzr_path(self):
2011
2014
        """Return the path of the 'bzr' executable for this test suite."""
2012
 
        bzr_path = self.get_source_path()+'/bzr'
 
2015
        bzr_path = os.path.join(self.get_source_path(), "bzr")
2013
2016
        if not os.path.isfile(bzr_path):
2014
2017
            # We are probably installed. Assume sys.argv is the right file
2015
2018
            bzr_path = sys.argv[0]
3196
3199
 
3197
3200
def partition_tests(suite, count):
3198
3201
    """Partition suite into count lists of tests."""
3199
 
    result = []
3200
 
    tests = list(iter_suite_tests(suite))
3201
 
    tests_per_process = int(math.ceil(float(len(tests)) / count))
3202
 
    for block in range(count):
3203
 
        low_test = block * tests_per_process
3204
 
        high_test = low_test + tests_per_process
3205
 
        process_tests = tests[low_test:high_test]
3206
 
        result.append(process_tests)
3207
 
    return result
 
3202
    # This just assigns tests in a round-robin fashion.  On one hand this
 
3203
    # splits up blocks of related tests that might run faster if they shared
 
3204
    # resources, but on the other it avoids assigning blocks of slow tests to
 
3205
    # just one partition.  So the slowest partition shouldn't be much slower
 
3206
    # than the fastest.
 
3207
    partitions = [list() for i in range(count)]
 
3208
    tests = iter_suite_tests(suite)
 
3209
    for partition, test in itertools.izip(itertools.cycle(partitions), tests):
 
3210
        partition.append(test)
 
3211
    return partitions
3208
3212
 
3209
3213
 
3210
3214
def workaround_zealous_crypto_random():
3643
3647
        'bzrlib.doc',
3644
3648
        'bzrlib.tests.blackbox',
3645
3649
        'bzrlib.tests.commands',
 
3650
        'bzrlib.tests.doc_generate',
3646
3651
        'bzrlib.tests.per_branch',
3647
3652
        'bzrlib.tests.per_bzrdir',
3648
3653
        'bzrlib.tests.per_bzrdir_colo',
3664
3669
        'bzrlib.tests.per_workingtree',
3665
3670
        'bzrlib.tests.test__annotator',
3666
3671
        'bzrlib.tests.test__bencode',
 
3672
        'bzrlib.tests.test__btree_serializer',
3667
3673
        'bzrlib.tests.test__chk_map',
3668
3674
        'bzrlib.tests.test__dirstate_helpers',
3669
3675
        'bzrlib.tests.test__groupcompress',
3813
3819
        'bzrlib.tests.test_transport_log',
3814
3820
        'bzrlib.tests.test_tree',
3815
3821
        'bzrlib.tests.test_treebuilder',
 
3822
        'bzrlib.tests.test_treeshape',
3816
3823
        'bzrlib.tests.test_tsort',
3817
3824
        'bzrlib.tests.test_tuned_gzip',
3818
3825
        'bzrlib.tests.test_ui',
3822
3829
        'bzrlib.tests.test_urlutils',
3823
3830
        'bzrlib.tests.test_version',
3824
3831
        'bzrlib.tests.test_version_info',
 
3832
        'bzrlib.tests.test_versionedfile',
3825
3833
        'bzrlib.tests.test_weave',
3826
3834
        'bzrlib.tests.test_whitebox',
3827
3835
        'bzrlib.tests.test_win32utils',