1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
|
# Copyright (C) 2005 by Canonical Ltd
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Enhanced layer on unittest.
This does several things:
* nicer reporting as tests run
* test code can log messages into a buffer that is recorded to disk
and displayed if the test fails
* tests can be run in a separate directory, which is useful for code that
wants to create files
* utilities to run external commands and check their return code
and/or output
Test cases should normally subclass TestBase. The test runner should
call runsuite().
This is meant to become independent of bzr, though that's not quite
true yet.
"""
from unittest import TestResult, TestCase
# XXX: Don't need this anymore now we depend on python2.4
def _need_subprocess():
sys.stderr.write("sorry, this test suite requires the subprocess module\n"
"this is shipped with python2.4 and available separately for 2.3\n")
class CommandFailed(Exception):
pass
class TestSkipped(Exception):
"""Indicates that a test was intentionally skipped, rather than failing."""
# XXX: Not used yet
class TestBase(TestCase):
"""Base class for bzr test cases.
Just defines some useful helper functions; doesn't actually test
anything.
"""
# TODO: Special methods to invoke bzr, so that we can run it
# through a specified Python intepreter
OVERRIDE_PYTHON = None # to run with alternative python 'python'
BZRPATH = 'bzr'
_log_buf = ""
def setUp(self):
super(TestBase, self).setUp()
self.log("%s setup" % self.id())
def tearDown(self):
super(TestBase, self).tearDown()
self.log("%s teardown" % self.id())
self.log('')
def formcmd(self, cmd):
if isinstance(cmd, basestring):
cmd = cmd.split()
if cmd[0] == 'bzr':
cmd[0] = self.BZRPATH
if self.OVERRIDE_PYTHON:
cmd.insert(0, self.OVERRIDE_PYTHON)
self.log('$ %r' % cmd)
return cmd
def runcmd(self, cmd, retcode=0):
"""Run one command and check the return code.
Returns a tuple of (stdout,stderr) strings.
If a single string is based, it is split into words.
For commands that are not simple space-separated words, please
pass a list instead."""
try:
import shutil
from subprocess import call
except ImportError, e:
_need_subprocess()
raise
cmd = self.formcmd(cmd)
self.log('$ ' + ' '.join(cmd))
actual_retcode = call(cmd, stdout=self.TEST_LOG, stderr=self.TEST_LOG)
if retcode != actual_retcode:
raise CommandFailed("test failed: %r returned %d, expected %d"
% (cmd, actual_retcode, retcode))
def backtick(self, cmd, retcode=0):
"""Run a command and return its output"""
try:
import shutil
from subprocess import Popen, PIPE
except ImportError, e:
_need_subprocess()
raise
cmd = self.formcmd(cmd)
child = Popen(cmd, stdout=PIPE, stderr=self.TEST_LOG)
outd, errd = child.communicate()
self.log(outd)
actual_retcode = child.wait()
outd = outd.replace('\r', '')
if retcode != actual_retcode:
raise CommandFailed("test failed: %r returned %d, expected %d"
% (cmd, actual_retcode, retcode))
return outd
def build_tree(self, shape):
"""Build a test tree according to a pattern.
shape is a sequence of file specifications. If the final
character is '/', a directory is created.
This doesn't add anything to a branch.
"""
# XXX: It's OK to just create them using forward slashes on windows?
import os
for name in shape:
assert isinstance(name, basestring)
if name[-1] == '/':
os.mkdir(name[:-1])
else:
f = file(name, 'wt')
print >>f, "contents of", name
f.close()
def log(self, msg):
"""Log a message to a progress file"""
# XXX: The problem with this is that code that writes straight
# to the log file won't be shown when we display the log
# buffer; would be better to not have the in-memory buffer and
# instead just a log file per test, which is read in and
# displayed if the test fails. That seems to imply one log
# per test case, not globally. OK?
self._log_buf = self._log_buf + str(msg) + '\n'
print >>self.TEST_LOG, msg
def check_inventory_shape(self, inv, shape):
"""
Compare an inventory to a list of expected names.
Fail if they are not precisely equal.
"""
extras = []
shape = list(shape) # copy
for path, ie in inv.entries():
name = path.replace('\\', '/')
if ie.kind == 'dir':
name = name + '/'
if name in shape:
shape.remove(name)
else:
extras.append(name)
if shape:
self.fail("expected paths not found in inventory: %r" % shape)
if extras:
self.fail("unexpected paths found in inventory: %r" % extras)
def check_file_contents(self, filename, expect):
self.log("check contents of file %s" % filename)
contents = file(filename, 'r').read()
if contents != expect:
self.log("expected: %r" % expect)
self.log("actually: %r" % contents)
self.fail("contents of %s not as expected")
class InTempDir(TestBase):
"""Base class for tests run in a temporary branch."""
def setUp(self):
import os
self.test_dir = os.path.join(self.TEST_ROOT, self.__class__.__name__)
os.mkdir(self.test_dir)
os.chdir(self.test_dir)
def tearDown(self):
import os
os.chdir(self.TEST_ROOT)
class _MyResult(TestResult):
"""
Custom TestResult.
No special behaviour for now.
"""
def __init__(self, out, style):
self.out = out
TestResult.__init__(self)
assert style in ('none', 'progress', 'verbose')
self.style = style
def startTest(self, test):
# TODO: Maybe show test.shortDescription somewhere?
what = test.id()
# python2.3 has the bad habit of just "runit" for doctests
if what == 'runit':
what = test.shortDescription()
if self.style == 'verbose':
print >>self.out, '%-60.60s' % what,
self.out.flush()
elif self.style == 'progress':
self.out.write('~')
self.out.flush()
TestResult.startTest(self, test)
def stopTest(self, test):
# print
TestResult.stopTest(self, test)
def addError(self, test, err):
if self.style == 'verbose':
print >>self.out, 'ERROR'
TestResult.addError(self, test, err)
_show_test_failure('error', test, err, self.out)
def addFailure(self, test, err):
if self.style == 'verbose':
print >>self.out, 'FAILURE'
TestResult.addFailure(self, test, err)
_show_test_failure('failure', test, err, self.out)
def addSuccess(self, test):
if self.style == 'verbose':
print >>self.out, 'OK'
TestResult.addSuccess(self, test)
def run_suite(suite, name='test', verbose=False):
import os
import shutil
import time
import sys
_setup_test_log(name)
_setup_test_dir(name)
print
# save stdout & stderr so there's no leakage from code-under-test
real_stdout = sys.stdout
real_stderr = sys.stderr
sys.stdout = sys.stderr = TestBase.TEST_LOG
try:
if verbose:
style = 'verbose'
else:
style = 'progress'
result = _MyResult(real_stdout, style)
suite.run(result)
finally:
sys.stdout = real_stdout
sys.stderr = real_stderr
_show_results(result)
return result.wasSuccessful()
def _setup_test_log(name):
import time
import os
log_filename = os.path.abspath(name + '.log')
TestBase.TEST_LOG = open(log_filename, 'wt', buffering=1) # line buffered
print >>TestBase.TEST_LOG, "tests run at " + time.ctime()
print '%-30s %s' % ('test log', log_filename)
def _setup_test_dir(name):
import os
import shutil
TestBase.ORIG_DIR = os.getcwdu()
TestBase.TEST_ROOT = os.path.abspath(name + '.tmp')
print '%-30s %s' % ('running tests in', TestBase.TEST_ROOT)
if os.path.exists(TestBase.TEST_ROOT):
shutil.rmtree(TestBase.TEST_ROOT)
os.mkdir(TestBase.TEST_ROOT)
os.chdir(TestBase.TEST_ROOT)
# make a fake bzr directory there to prevent any tests propagating
# up onto the source directory's real branch
os.mkdir(os.path.join(TestBase.TEST_ROOT, '.bzr'))
def _show_results(result):
print
print '%4d tests run' % result.testsRun
print '%4d errors' % len(result.errors)
print '%4d failures' % len(result.failures)
def _show_test_failure(kind, case, exc_info, out):
from traceback import print_exception
print >>out
print >>out, '-' * 60
print >>out, case
desc = case.shortDescription()
if desc:
print >>out, ' (%s)' % desc
print_exception(exc_info[0], exc_info[1], exc_info[2], None, out)
if isinstance(case, TestBase):
print >>out
print >>out, 'log from this test:'
print >>out, case._log_buf
print >>out, '-' * 60
|