Merge lp:~lifeless/bzr/test-speed into lp:~bzr/bzr/trunk-old
- test-speed
- Merge into trunk-old
Status: | Merged |
---|---|
Merged at revision: | not available |
Proposed branch: | lp:~lifeless/bzr/test-speed |
Merge into: | lp:~bzr/bzr/trunk-old |
Diff against target: | 374 lines |
To merge this branch: | bzr merge lp:~lifeless/bzr/test-speed |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Vincent Ladeuil | Approve | ||
Review via email: mp+10775@code.launchpad.net |
Commit message
Description of the change
Robert Collins (lifeless) wrote : | # |
Robert Collins (lifeless) wrote : | # |
=== modified file 'NEWS'
--- NEWS 2009-08-26 07:02:45 +0000
+++ NEWS 2009-08-27 03:51:02 +0000
@@ -79,6 +79,9 @@
classes changed to manage lock lifetime of the trees they open in a way
consistent with reader-exclusive locks. (Robert Collins, #305006)
+* ``bzrlib.tests`` now uses ``stopTestRun`` for its ``TestResult``
+ subclasses - the same as python's unittest module. (Robert Collins)
+
Internals
*********
=== modified file 'bzrlib/
--- bzrlib/
+++ bzrlib/
@@ -175,17 +175,47 @@
- def done(self):
- # nb: called stopTestRun in the version of this that Python merged
- # upstream, according to lifeless 20090803
+ def stopTestRun(self):
+ run = self.testsRun
+ actionTaken = "Ran"
+ stopTime = time.time()
+ timeTaken = stopTime - self.startTime
+ self.printErrors()
+ self.stream.
+ self.stream.
+ run, run != 1 and "s" or "", timeTaken))
+ self.stream.
+ if not self.wasSuccess
+ self.stream.
+ failed, errored = map(len, (self.failures, self.errors))
+ if failed:
+ self.stream.
+ if errored:
+ if failed: self.stream.
+ self.stream.
+ if self.known_
+ if failed or errored: self.stream.
+ self.stream.
+ self.known_
+ self.stream.
+ else:
+ if self.known_
+ self.stream.
+ self.known_
+ else:
+ self.stream.
+ if self.skip_count > 0:
+ skipped = self.skip_count
+ self.stream.
+ (skipped, skipped != 1 and "s" or ""))
+ if self.unsupported:
+ for feature, count in sorted(
+ self.stream.
+ (feature, count))
if self._strict:
ok = self.wasStrictl
else:
ok = self.wasSuccess
- if ok:
- self.stream.
- else:
- self.stream.
if TestCase.
@@ -383,12 +413,12 @@
else:
raise errors.
- def finished(self):
- pass
-
def report_
pass
+ def startTestRun(self):
+ self.startTime = time.time()
+
def report_succe...
Vincent Ladeuil (vila) wrote : | # |
This far better that what we had.
You give us a far cleaner TestRunner, so whatever is left appears bigger now:
- the loop on result_decorators to propagate a boolean looks highly suspicious,
- the injection of BZRTransforming
that be a result decorator now ?
- I still don't like that self.verbosity handling here, again, can't that
be turned into a decorator ?
Given that this patch is a clear improvement, please land, the above can come later
(if you agree on them).
Preview Diff
1 | === modified file 'NEWS' |
2 | --- NEWS 2009-08-27 00:53:27 +0000 |
3 | +++ NEWS 2009-08-27 07:35:12 +0000 |
4 | @@ -90,6 +90,9 @@ |
5 | classes changed to manage lock lifetime of the trees they open in a way |
6 | consistent with reader-exclusive locks. (Robert Collins, #305006) |
7 | |
8 | +* ``bzrlib.tests`` now uses ``stopTestRun`` for its ``TestResult`` |
9 | + subclasses - the same as python's unittest module. (Robert Collins) |
10 | + |
11 | Internals |
12 | ********* |
13 | |
14 | |
15 | === modified file 'bzrlib/tests/__init__.py' |
16 | --- bzrlib/tests/__init__.py 2009-08-26 07:02:45 +0000 |
17 | +++ bzrlib/tests/__init__.py 2009-08-27 07:35:12 +0000 |
18 | @@ -175,17 +175,47 @@ |
19 | self._overall_start_time = time.time() |
20 | self._strict = strict |
21 | |
22 | - def done(self): |
23 | - # nb: called stopTestRun in the version of this that Python merged |
24 | - # upstream, according to lifeless 20090803 |
25 | + def stopTestRun(self): |
26 | + run = self.testsRun |
27 | + actionTaken = "Ran" |
28 | + stopTime = time.time() |
29 | + timeTaken = stopTime - self.startTime |
30 | + self.printErrors() |
31 | + self.stream.writeln(self.separator2) |
32 | + self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken, |
33 | + run, run != 1 and "s" or "", timeTaken)) |
34 | + self.stream.writeln() |
35 | + if not self.wasSuccessful(): |
36 | + self.stream.write("FAILED (") |
37 | + failed, errored = map(len, (self.failures, self.errors)) |
38 | + if failed: |
39 | + self.stream.write("failures=%d" % failed) |
40 | + if errored: |
41 | + if failed: self.stream.write(", ") |
42 | + self.stream.write("errors=%d" % errored) |
43 | + if self.known_failure_count: |
44 | + if failed or errored: self.stream.write(", ") |
45 | + self.stream.write("known_failure_count=%d" % |
46 | + self.known_failure_count) |
47 | + self.stream.writeln(")") |
48 | + else: |
49 | + if self.known_failure_count: |
50 | + self.stream.writeln("OK (known_failures=%d)" % |
51 | + self.known_failure_count) |
52 | + else: |
53 | + self.stream.writeln("OK") |
54 | + if self.skip_count > 0: |
55 | + skipped = self.skip_count |
56 | + self.stream.writeln('%d test%s skipped' % |
57 | + (skipped, skipped != 1 and "s" or "")) |
58 | + if self.unsupported: |
59 | + for feature, count in sorted(self.unsupported.items()): |
60 | + self.stream.writeln("Missing feature '%s' skipped %d tests." % |
61 | + (feature, count)) |
62 | if self._strict: |
63 | ok = self.wasStrictlySuccessful() |
64 | else: |
65 | ok = self.wasSuccessful() |
66 | - if ok: |
67 | - self.stream.write('tests passed\n') |
68 | - else: |
69 | - self.stream.write('tests failed\n') |
70 | if TestCase._first_thread_leaker_id: |
71 | self.stream.write( |
72 | '%s is leaking threads among %d leaking tests.\n' % ( |
73 | @@ -383,12 +413,12 @@ |
74 | else: |
75 | raise errors.BzrError("Unknown whence %r" % whence) |
76 | |
77 | - def finished(self): |
78 | - pass |
79 | - |
80 | def report_cleaning_up(self): |
81 | pass |
82 | |
83 | + def startTestRun(self): |
84 | + self.startTime = time.time() |
85 | + |
86 | def report_success(self, test): |
87 | pass |
88 | |
89 | @@ -421,15 +451,14 @@ |
90 | self.pb.update_latency = 0 |
91 | self.pb.show_transport_activity = False |
92 | |
93 | - def done(self): |
94 | + def stopTestRun(self): |
95 | # called when the tests that are going to run have run |
96 | self.pb.clear() |
97 | - super(TextTestResult, self).done() |
98 | - |
99 | - def finished(self): |
100 | self.pb.finished() |
101 | + super(TextTestResult, self).stopTestRun() |
102 | |
103 | - def report_starting(self): |
104 | + def startTestRun(self): |
105 | + super(TextTestResult, self).startTestRun() |
106 | self.pb.update('[test 0/%d] Starting' % (self.num_tests)) |
107 | |
108 | def printErrors(self): |
109 | @@ -514,7 +543,8 @@ |
110 | result = a_string |
111 | return result.ljust(final_width) |
112 | |
113 | - def report_starting(self): |
114 | + def startTestRun(self): |
115 | + super(VerboseTestResult, self).startTestRun() |
116 | self.stream.write('running %d tests...\n' % self.num_tests) |
117 | |
118 | def report_test_start(self, test): |
119 | @@ -578,7 +608,6 @@ |
120 | descriptions=0, |
121 | verbosity=1, |
122 | bench_history=None, |
123 | - list_only=False, |
124 | strict=False, |
125 | result_decorators=None, |
126 | ): |
127 | @@ -593,85 +622,43 @@ |
128 | self.descriptions = descriptions |
129 | self.verbosity = verbosity |
130 | self._bench_history = bench_history |
131 | - self.list_only = list_only |
132 | self._strict = strict |
133 | self._result_decorators = result_decorators or [] |
134 | |
135 | def run(self, test): |
136 | "Run the given test case or test suite." |
137 | - startTime = time.time() |
138 | if self.verbosity == 1: |
139 | result_class = TextTestResult |
140 | elif self.verbosity >= 2: |
141 | result_class = VerboseTestResult |
142 | - result = result_class(self.stream, |
143 | + original_result = result_class(self.stream, |
144 | self.descriptions, |
145 | self.verbosity, |
146 | bench_history=self._bench_history, |
147 | strict=self._strict, |
148 | ) |
149 | - run_result = result |
150 | + # Signal to result objects that look at stop early policy to stop, |
151 | + original_result.stop_early = self.stop_on_failure |
152 | + result = original_result |
153 | for decorator in self._result_decorators: |
154 | - run_result = decorator(run_result) |
155 | - result.stop_early = self.stop_on_failure |
156 | - result.report_starting() |
157 | - if self.list_only: |
158 | - if self.verbosity >= 2: |
159 | - self.stream.writeln("Listing tests only ...\n") |
160 | - run = 0 |
161 | - for t in iter_suite_tests(test): |
162 | - self.stream.writeln("%s" % (t.id())) |
163 | - run += 1 |
164 | - return None |
165 | - else: |
166 | - try: |
167 | - import testtools |
168 | - except ImportError: |
169 | - test.run(run_result) |
170 | - else: |
171 | - if isinstance(test, testtools.ConcurrentTestSuite): |
172 | - # We need to catch bzr specific behaviors |
173 | - test.run(BZRTransformingResult(run_result)) |
174 | - else: |
175 | - test.run(run_result) |
176 | - run = result.testsRun |
177 | - actionTaken = "Ran" |
178 | - stopTime = time.time() |
179 | - timeTaken = stopTime - startTime |
180 | - result.printErrors() |
181 | - self.stream.writeln(result.separator2) |
182 | - self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken, |
183 | - run, run != 1 and "s" or "", timeTaken)) |
184 | - self.stream.writeln() |
185 | - if not result.wasSuccessful(): |
186 | - self.stream.write("FAILED (") |
187 | - failed, errored = map(len, (result.failures, result.errors)) |
188 | - if failed: |
189 | - self.stream.write("failures=%d" % failed) |
190 | - if errored: |
191 | - if failed: self.stream.write(", ") |
192 | - self.stream.write("errors=%d" % errored) |
193 | - if result.known_failure_count: |
194 | - if failed or errored: self.stream.write(", ") |
195 | - self.stream.write("known_failure_count=%d" % |
196 | - result.known_failure_count) |
197 | - self.stream.writeln(")") |
198 | - else: |
199 | - if result.known_failure_count: |
200 | - self.stream.writeln("OK (known_failures=%d)" % |
201 | - result.known_failure_count) |
202 | - else: |
203 | - self.stream.writeln("OK") |
204 | - if result.skip_count > 0: |
205 | - skipped = result.skip_count |
206 | - self.stream.writeln('%d test%s skipped' % |
207 | - (skipped, skipped != 1 and "s" or "")) |
208 | - if result.unsupported: |
209 | - for feature, count in sorted(result.unsupported.items()): |
210 | - self.stream.writeln("Missing feature '%s' skipped %d tests." % |
211 | - (feature, count)) |
212 | - result.finished() |
213 | - return result |
214 | + result = decorator(result) |
215 | + result.stop_early = self.stop_on_failure |
216 | + try: |
217 | + import testtools |
218 | + except ImportError: |
219 | + pass |
220 | + else: |
221 | + if isinstance(test, testtools.ConcurrentTestSuite): |
222 | + # We need to catch bzr specific behaviors |
223 | + result = BZRTransformingResult(result) |
224 | + result.startTestRun() |
225 | + try: |
226 | + test.run(result) |
227 | + finally: |
228 | + result.stopTestRun() |
229 | + # higher level code uses our extended protocol to determine |
230 | + # what exit code to give. |
231 | + return original_result |
232 | |
233 | |
234 | def iter_suite_tests(suite): |
235 | @@ -2807,7 +2794,6 @@ |
236 | descriptions=0, |
237 | verbosity=verbosity, |
238 | bench_history=bench_history, |
239 | - list_only=list_only, |
240 | strict=strict, |
241 | result_decorators=result_decorators, |
242 | ) |
243 | @@ -2830,10 +2816,15 @@ |
244 | decorators.append(CountingDecorator) |
245 | for decorator in decorators: |
246 | suite = decorator(suite) |
247 | - result = runner.run(suite) |
248 | if list_only: |
249 | + # Done after test suite decoration to allow randomisation etc |
250 | + # to take effect, though that is of marginal benefit. |
251 | + if verbosity >= 2: |
252 | + stream.write("Listing tests only ...\n") |
253 | + for t in iter_suite_tests(suite): |
254 | + stream.write("%s\n" % (t.id())) |
255 | return True |
256 | - result.done() |
257 | + result = runner.run(suite) |
258 | if strict: |
259 | return result.wasStrictlySuccessful() |
260 | else: |
261 | @@ -3168,6 +3159,12 @@ |
262 | def stopTest(self, test): |
263 | self.result.stopTest(test) |
264 | |
265 | + def startTestRun(self): |
266 | + self.result.startTestRun() |
267 | + |
268 | + def stopTestRun(self): |
269 | + self.result.stopTestRun() |
270 | + |
271 | def addSkip(self, test, reason): |
272 | self.result.addSkip(test, reason) |
273 | |
274 | |
275 | === modified file 'bzrlib/tests/test_selftest.py' |
276 | --- bzrlib/tests/test_selftest.py 2009-08-26 06:33:13 +0000 |
277 | +++ bzrlib/tests/test_selftest.py 2009-08-27 07:35:12 +0000 |
278 | @@ -820,7 +820,7 @@ |
279 | def test_known_failure(self): |
280 | """A KnownFailure being raised should trigger several result actions.""" |
281 | class InstrumentedTestResult(tests.ExtendedTestResult): |
282 | - def done(self): pass |
283 | + def stopTestRun(self): pass |
284 | def startTests(self): pass |
285 | def report_test_start(self, test): pass |
286 | def report_known_failure(self, test, err): |
287 | @@ -874,7 +874,7 @@ |
288 | def test_add_not_supported(self): |
289 | """Test the behaviour of invoking addNotSupported.""" |
290 | class InstrumentedTestResult(tests.ExtendedTestResult): |
291 | - def done(self): pass |
292 | + def stopTestRun(self): pass |
293 | def startTests(self): pass |
294 | def report_test_start(self, test): pass |
295 | def report_unsupported(self, test, feature): |
296 | @@ -918,7 +918,7 @@ |
297 | def test_unavailable_exception(self): |
298 | """An UnavailableFeature being raised should invoke addNotSupported.""" |
299 | class InstrumentedTestResult(tests.ExtendedTestResult): |
300 | - def done(self): pass |
301 | + def stopTestRun(self): pass |
302 | def startTests(self): pass |
303 | def report_test_start(self, test): pass |
304 | def addNotSupported(self, test, feature): |
305 | @@ -1001,11 +1001,14 @@ |
306 | because of our use of global state. |
307 | """ |
308 | old_root = tests.TestCaseInTempDir.TEST_ROOT |
309 | + old_leak = tests.TestCase._first_thread_leaker_id |
310 | try: |
311 | tests.TestCaseInTempDir.TEST_ROOT = None |
312 | + tests.TestCase._first_thread_leaker_id = None |
313 | return testrunner.run(test) |
314 | finally: |
315 | tests.TestCaseInTempDir.TEST_ROOT = old_root |
316 | + tests.TestCase._first_thread_leaker_id = old_leak |
317 | |
318 | def test_known_failure_failed_run(self): |
319 | # run a test that generates a known failure which should be printed in |
320 | @@ -1291,6 +1294,34 @@ |
321 | self.assertContainsRe(log, 'this will be kept') |
322 | self.assertEqual(log, test._log_contents) |
323 | |
324 | + def test_startTestRun(self): |
325 | + """run should call result.startTestRun()""" |
326 | + calls = [] |
327 | + class LoggingDecorator(tests.ForwardingResult): |
328 | + def startTestRun(self): |
329 | + tests.ForwardingResult.startTestRun(self) |
330 | + calls.append('startTestRun') |
331 | + test = unittest.FunctionTestCase(lambda:None) |
332 | + stream = StringIO() |
333 | + runner = tests.TextTestRunner(stream=stream, |
334 | + result_decorators=[LoggingDecorator]) |
335 | + result = self.run_test_runner(runner, test) |
336 | + self.assertLength(1, calls) |
337 | + |
338 | + def test_stopTestRun(self): |
339 | + """run should call result.stopTestRun()""" |
340 | + calls = [] |
341 | + class LoggingDecorator(tests.ForwardingResult): |
342 | + def stopTestRun(self): |
343 | + tests.ForwardingResult.stopTestRun(self) |
344 | + calls.append('stopTestRun') |
345 | + test = unittest.FunctionTestCase(lambda:None) |
346 | + stream = StringIO() |
347 | + runner = tests.TextTestRunner(stream=stream, |
348 | + result_decorators=[LoggingDecorator]) |
349 | + result = self.run_test_runner(runner, test) |
350 | + self.assertLength(1, calls) |
351 | + |
352 | |
353 | class SampleTestCase(tests.TestCase): |
354 | |
355 | @@ -2934,19 +2965,3 @@ |
356 | self.verbosity) |
357 | tests.run_suite(suite, runner_class=MyRunner, stream=StringIO()) |
358 | self.assertLength(1, calls) |
359 | - |
360 | - def test_done(self): |
361 | - """run_suite should call result.done()""" |
362 | - self.calls = 0 |
363 | - def one_more_call(): self.calls += 1 |
364 | - def test_function(): |
365 | - pass |
366 | - test = unittest.FunctionTestCase(test_function) |
367 | - class InstrumentedTestResult(tests.ExtendedTestResult): |
368 | - def done(self): one_more_call() |
369 | - class MyRunner(tests.TextTestRunner): |
370 | - def run(self, test): |
371 | - return InstrumentedTestResult(self.stream, self.descriptions, |
372 | - self.verbosity) |
373 | - tests.run_suite(test, runner_class=MyRunner, stream=StringIO()) |
374 | - self.assertEquals(1, self.calls) |
Selftest was a bit tangled up. This makes it clearer without any
functional changes:
* Use startTestRun and stopTestRun, as upstream python does
* lift test listing code out of 'TestRunner' into the DWIM layer
* push all test result outputting down into ExtendedTestResult rather
than being in the runner.
This means that the runner knows how to run, the DWIM layer knows when
to run, and Results know how to report on results.
Code-reviews will show the wrong diff though. Sorry - latency on trunk etc.
-Rob