Merge lp:~lifeless/bzr/test-speed into lp:~bzr/bzr/trunk-old
- test-speed
- Merge into trunk-old
Status: | Merged |
---|---|
Merged at revision: | not available |
Proposed branch: | lp:~lifeless/bzr/test-speed |
Merge into: | lp:~bzr/bzr/trunk-old |
Diff against target: | 374 lines |
To merge this branch: | bzr merge lp:~lifeless/bzr/test-speed |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Vincent Ladeuil | Approve | ||
Review via email: mp+10775@code.launchpad.net |
Commit message
Description of the change
Robert Collins (lifeless) wrote : | # |
Robert Collins (lifeless) wrote : | # |
=== modified file 'NEWS'
--- NEWS 2009-08-26 07:02:45 +0000
+++ NEWS 2009-08-27 03:51:02 +0000
@@ -79,6 +79,9 @@
classes changed to manage lock lifetime of the trees they open in a way
consistent with reader-exclusive locks. (Robert Collins, #305006)
+* ``bzrlib.tests`` now uses ``stopTestRun`` for its ``TestResult``
+ subclasses - the same as python's unittest module. (Robert Collins)
+
Internals
*********
=== modified file 'bzrlib/
--- bzrlib/
+++ bzrlib/
@@ -175,17 +175,47 @@
- def done(self):
- # nb: called stopTestRun in the version of this that Python merged
- # upstream, according to lifeless 20090803
+ def stopTestRun(self):
+ run = self.testsRun
+ actionTaken = "Ran"
+ stopTime = time.time()
+ timeTaken = stopTime - self.startTime
+ self.printErrors()
+ self.stream.
+ self.stream.
+ run, run != 1 and "s" or "", timeTaken))
+ self.stream.
+ if not self.wasSuccess
+ self.stream.
+ failed, errored = map(len, (self.failures, self.errors))
+ if failed:
+ self.stream.
+ if errored:
+ if failed: self.stream.
+ self.stream.
+ if self.known_
+ if failed or errored: self.stream.
+ self.stream.
+ self.known_
+ self.stream.
+ else:
+ if self.known_
+ self.stream.
+ self.known_
+ else:
+ self.stream.
+ if self.skip_count > 0:
+ skipped = self.skip_count
+ self.stream.
+ (skipped, skipped != 1 and "s" or ""))
+ if self.unsupported:
+ for feature, count in sorted(
+ self.stream.
+ (feature, count))
if self._strict:
ok = self.wasStrictl
else:
ok = self.wasSuccess
- if ok:
- self.stream.
- else:
- self.stream.
if TestCase.
@@ -383,12 +413,12 @@
else:
raise errors.
- def finished(self):
- pass
-
def report_
pass
+ def startTestRun(self):
+ self.startTime = time.time()
+
def report_succe...
Vincent Ladeuil (vila) wrote : | # |
This far better that what we had.
You give us a far cleaner TestRunner, so whatever is left appears bigger now:
- the loop on result_decorators to propagate a boolean looks highly suspicious,
- the injection of BZRTransforming
that be a result decorator now ?
- I still don't like that self.verbosity handling here, again, can't that
be turned into a decorator ?
Given that this patch is a clear improvement, please land, the above can come later
(if you agree on them).
Preview Diff
1 | === modified file 'NEWS' | |||
2 | --- NEWS 2009-08-27 00:53:27 +0000 | |||
3 | +++ NEWS 2009-08-27 07:35:12 +0000 | |||
4 | @@ -90,6 +90,9 @@ | |||
5 | 90 | classes changed to manage lock lifetime of the trees they open in a way | 90 | classes changed to manage lock lifetime of the trees they open in a way |
6 | 91 | consistent with reader-exclusive locks. (Robert Collins, #305006) | 91 | consistent with reader-exclusive locks. (Robert Collins, #305006) |
7 | 92 | 92 | ||
8 | 93 | * ``bzrlib.tests`` now uses ``stopTestRun`` for its ``TestResult`` | ||
9 | 94 | subclasses - the same as python's unittest module. (Robert Collins) | ||
10 | 95 | |||
11 | 93 | Internals | 96 | Internals |
12 | 94 | ********* | 97 | ********* |
13 | 95 | 98 | ||
14 | 96 | 99 | ||
15 | === modified file 'bzrlib/tests/__init__.py' | |||
16 | --- bzrlib/tests/__init__.py 2009-08-26 07:02:45 +0000 | |||
17 | +++ bzrlib/tests/__init__.py 2009-08-27 07:35:12 +0000 | |||
18 | @@ -175,17 +175,47 @@ | |||
19 | 175 | self._overall_start_time = time.time() | 175 | self._overall_start_time = time.time() |
20 | 176 | self._strict = strict | 176 | self._strict = strict |
21 | 177 | 177 | ||
25 | 178 | def done(self): | 178 | def stopTestRun(self): |
26 | 179 | # nb: called stopTestRun in the version of this that Python merged | 179 | run = self.testsRun |
27 | 180 | # upstream, according to lifeless 20090803 | 180 | actionTaken = "Ran" |
28 | 181 | stopTime = time.time() | ||
29 | 182 | timeTaken = stopTime - self.startTime | ||
30 | 183 | self.printErrors() | ||
31 | 184 | self.stream.writeln(self.separator2) | ||
32 | 185 | self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken, | ||
33 | 186 | run, run != 1 and "s" or "", timeTaken)) | ||
34 | 187 | self.stream.writeln() | ||
35 | 188 | if not self.wasSuccessful(): | ||
36 | 189 | self.stream.write("FAILED (") | ||
37 | 190 | failed, errored = map(len, (self.failures, self.errors)) | ||
38 | 191 | if failed: | ||
39 | 192 | self.stream.write("failures=%d" % failed) | ||
40 | 193 | if errored: | ||
41 | 194 | if failed: self.stream.write(", ") | ||
42 | 195 | self.stream.write("errors=%d" % errored) | ||
43 | 196 | if self.known_failure_count: | ||
44 | 197 | if failed or errored: self.stream.write(", ") | ||
45 | 198 | self.stream.write("known_failure_count=%d" % | ||
46 | 199 | self.known_failure_count) | ||
47 | 200 | self.stream.writeln(")") | ||
48 | 201 | else: | ||
49 | 202 | if self.known_failure_count: | ||
50 | 203 | self.stream.writeln("OK (known_failures=%d)" % | ||
51 | 204 | self.known_failure_count) | ||
52 | 205 | else: | ||
53 | 206 | self.stream.writeln("OK") | ||
54 | 207 | if self.skip_count > 0: | ||
55 | 208 | skipped = self.skip_count | ||
56 | 209 | self.stream.writeln('%d test%s skipped' % | ||
57 | 210 | (skipped, skipped != 1 and "s" or "")) | ||
58 | 211 | if self.unsupported: | ||
59 | 212 | for feature, count in sorted(self.unsupported.items()): | ||
60 | 213 | self.stream.writeln("Missing feature '%s' skipped %d tests." % | ||
61 | 214 | (feature, count)) | ||
62 | 181 | if self._strict: | 215 | if self._strict: |
63 | 182 | ok = self.wasStrictlySuccessful() | 216 | ok = self.wasStrictlySuccessful() |
64 | 183 | else: | 217 | else: |
65 | 184 | ok = self.wasSuccessful() | 218 | ok = self.wasSuccessful() |
66 | 185 | if ok: | ||
67 | 186 | self.stream.write('tests passed\n') | ||
68 | 187 | else: | ||
69 | 188 | self.stream.write('tests failed\n') | ||
70 | 189 | if TestCase._first_thread_leaker_id: | 219 | if TestCase._first_thread_leaker_id: |
71 | 190 | self.stream.write( | 220 | self.stream.write( |
72 | 191 | '%s is leaking threads among %d leaking tests.\n' % ( | 221 | '%s is leaking threads among %d leaking tests.\n' % ( |
73 | @@ -383,12 +413,12 @@ | |||
74 | 383 | else: | 413 | else: |
75 | 384 | raise errors.BzrError("Unknown whence %r" % whence) | 414 | raise errors.BzrError("Unknown whence %r" % whence) |
76 | 385 | 415 | ||
77 | 386 | def finished(self): | ||
78 | 387 | pass | ||
79 | 388 | |||
80 | 389 | def report_cleaning_up(self): | 416 | def report_cleaning_up(self): |
81 | 390 | pass | 417 | pass |
82 | 391 | 418 | ||
83 | 419 | def startTestRun(self): | ||
84 | 420 | self.startTime = time.time() | ||
85 | 421 | |||
86 | 392 | def report_success(self, test): | 422 | def report_success(self, test): |
87 | 393 | pass | 423 | pass |
88 | 394 | 424 | ||
89 | @@ -421,15 +451,14 @@ | |||
90 | 421 | self.pb.update_latency = 0 | 451 | self.pb.update_latency = 0 |
91 | 422 | self.pb.show_transport_activity = False | 452 | self.pb.show_transport_activity = False |
92 | 423 | 453 | ||
94 | 424 | def done(self): | 454 | def stopTestRun(self): |
95 | 425 | # called when the tests that are going to run have run | 455 | # called when the tests that are going to run have run |
96 | 426 | self.pb.clear() | 456 | self.pb.clear() |
97 | 427 | super(TextTestResult, self).done() | ||
98 | 428 | |||
99 | 429 | def finished(self): | ||
100 | 430 | self.pb.finished() | 457 | self.pb.finished() |
101 | 458 | super(TextTestResult, self).stopTestRun() | ||
102 | 431 | 459 | ||
104 | 432 | def report_starting(self): | 460 | def startTestRun(self): |
105 | 461 | super(TextTestResult, self).startTestRun() | ||
106 | 433 | self.pb.update('[test 0/%d] Starting' % (self.num_tests)) | 462 | self.pb.update('[test 0/%d] Starting' % (self.num_tests)) |
107 | 434 | 463 | ||
108 | 435 | def printErrors(self): | 464 | def printErrors(self): |
109 | @@ -514,7 +543,8 @@ | |||
110 | 514 | result = a_string | 543 | result = a_string |
111 | 515 | return result.ljust(final_width) | 544 | return result.ljust(final_width) |
112 | 516 | 545 | ||
114 | 517 | def report_starting(self): | 546 | def startTestRun(self): |
115 | 547 | super(VerboseTestResult, self).startTestRun() | ||
116 | 518 | self.stream.write('running %d tests...\n' % self.num_tests) | 548 | self.stream.write('running %d tests...\n' % self.num_tests) |
117 | 519 | 549 | ||
118 | 520 | def report_test_start(self, test): | 550 | def report_test_start(self, test): |
119 | @@ -578,7 +608,6 @@ | |||
120 | 578 | descriptions=0, | 608 | descriptions=0, |
121 | 579 | verbosity=1, | 609 | verbosity=1, |
122 | 580 | bench_history=None, | 610 | bench_history=None, |
123 | 581 | list_only=False, | ||
124 | 582 | strict=False, | 611 | strict=False, |
125 | 583 | result_decorators=None, | 612 | result_decorators=None, |
126 | 584 | ): | 613 | ): |
127 | @@ -593,85 +622,43 @@ | |||
128 | 593 | self.descriptions = descriptions | 622 | self.descriptions = descriptions |
129 | 594 | self.verbosity = verbosity | 623 | self.verbosity = verbosity |
130 | 595 | self._bench_history = bench_history | 624 | self._bench_history = bench_history |
131 | 596 | self.list_only = list_only | ||
132 | 597 | self._strict = strict | 625 | self._strict = strict |
133 | 598 | self._result_decorators = result_decorators or [] | 626 | self._result_decorators = result_decorators or [] |
134 | 599 | 627 | ||
135 | 600 | def run(self, test): | 628 | def run(self, test): |
136 | 601 | "Run the given test case or test suite." | 629 | "Run the given test case or test suite." |
137 | 602 | startTime = time.time() | ||
138 | 603 | if self.verbosity == 1: | 630 | if self.verbosity == 1: |
139 | 604 | result_class = TextTestResult | 631 | result_class = TextTestResult |
140 | 605 | elif self.verbosity >= 2: | 632 | elif self.verbosity >= 2: |
141 | 606 | result_class = VerboseTestResult | 633 | result_class = VerboseTestResult |
143 | 607 | result = result_class(self.stream, | 634 | original_result = result_class(self.stream, |
144 | 608 | self.descriptions, | 635 | self.descriptions, |
145 | 609 | self.verbosity, | 636 | self.verbosity, |
146 | 610 | bench_history=self._bench_history, | 637 | bench_history=self._bench_history, |
147 | 611 | strict=self._strict, | 638 | strict=self._strict, |
148 | 612 | ) | 639 | ) |
150 | 613 | run_result = result | 640 | # Signal to result objects that look at stop early policy to stop, |
151 | 641 | original_result.stop_early = self.stop_on_failure | ||
152 | 642 | result = original_result | ||
153 | 614 | for decorator in self._result_decorators: | 643 | for decorator in self._result_decorators: |
214 | 615 | run_result = decorator(run_result) | 644 | result = decorator(result) |
215 | 616 | result.stop_early = self.stop_on_failure | 645 | result.stop_early = self.stop_on_failure |
216 | 617 | result.report_starting() | 646 | try: |
217 | 618 | if self.list_only: | 647 | import testtools |
218 | 619 | if self.verbosity >= 2: | 648 | except ImportError: |
219 | 620 | self.stream.writeln("Listing tests only ...\n") | 649 | pass |
220 | 621 | run = 0 | 650 | else: |
221 | 622 | for t in iter_suite_tests(test): | 651 | if isinstance(test, testtools.ConcurrentTestSuite): |
222 | 623 | self.stream.writeln("%s" % (t.id())) | 652 | # We need to catch bzr specific behaviors |
223 | 624 | run += 1 | 653 | result = BZRTransformingResult(result) |
224 | 625 | return None | 654 | result.startTestRun() |
225 | 626 | else: | 655 | try: |
226 | 627 | try: | 656 | test.run(result) |
227 | 628 | import testtools | 657 | finally: |
228 | 629 | except ImportError: | 658 | result.stopTestRun() |
229 | 630 | test.run(run_result) | 659 | # higher level code uses our extended protocol to determine |
230 | 631 | else: | 660 | # what exit code to give. |
231 | 632 | if isinstance(test, testtools.ConcurrentTestSuite): | 661 | return original_result |
172 | 633 | # We need to catch bzr specific behaviors | ||
173 | 634 | test.run(BZRTransformingResult(run_result)) | ||
174 | 635 | else: | ||
175 | 636 | test.run(run_result) | ||
176 | 637 | run = result.testsRun | ||
177 | 638 | actionTaken = "Ran" | ||
178 | 639 | stopTime = time.time() | ||
179 | 640 | timeTaken = stopTime - startTime | ||
180 | 641 | result.printErrors() | ||
181 | 642 | self.stream.writeln(result.separator2) | ||
182 | 643 | self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken, | ||
183 | 644 | run, run != 1 and "s" or "", timeTaken)) | ||
184 | 645 | self.stream.writeln() | ||
185 | 646 | if not result.wasSuccessful(): | ||
186 | 647 | self.stream.write("FAILED (") | ||
187 | 648 | failed, errored = map(len, (result.failures, result.errors)) | ||
188 | 649 | if failed: | ||
189 | 650 | self.stream.write("failures=%d" % failed) | ||
190 | 651 | if errored: | ||
191 | 652 | if failed: self.stream.write(", ") | ||
192 | 653 | self.stream.write("errors=%d" % errored) | ||
193 | 654 | if result.known_failure_count: | ||
194 | 655 | if failed or errored: self.stream.write(", ") | ||
195 | 656 | self.stream.write("known_failure_count=%d" % | ||
196 | 657 | result.known_failure_count) | ||
197 | 658 | self.stream.writeln(")") | ||
198 | 659 | else: | ||
199 | 660 | if result.known_failure_count: | ||
200 | 661 | self.stream.writeln("OK (known_failures=%d)" % | ||
201 | 662 | result.known_failure_count) | ||
202 | 663 | else: | ||
203 | 664 | self.stream.writeln("OK") | ||
204 | 665 | if result.skip_count > 0: | ||
205 | 666 | skipped = result.skip_count | ||
206 | 667 | self.stream.writeln('%d test%s skipped' % | ||
207 | 668 | (skipped, skipped != 1 and "s" or "")) | ||
208 | 669 | if result.unsupported: | ||
209 | 670 | for feature, count in sorted(result.unsupported.items()): | ||
210 | 671 | self.stream.writeln("Missing feature '%s' skipped %d tests." % | ||
211 | 672 | (feature, count)) | ||
212 | 673 | result.finished() | ||
213 | 674 | return result | ||
232 | 675 | 662 | ||
233 | 676 | 663 | ||
234 | 677 | def iter_suite_tests(suite): | 664 | def iter_suite_tests(suite): |
235 | @@ -2807,7 +2794,6 @@ | |||
236 | 2807 | descriptions=0, | 2794 | descriptions=0, |
237 | 2808 | verbosity=verbosity, | 2795 | verbosity=verbosity, |
238 | 2809 | bench_history=bench_history, | 2796 | bench_history=bench_history, |
239 | 2810 | list_only=list_only, | ||
240 | 2811 | strict=strict, | 2797 | strict=strict, |
241 | 2812 | result_decorators=result_decorators, | 2798 | result_decorators=result_decorators, |
242 | 2813 | ) | 2799 | ) |
243 | @@ -2830,10 +2816,15 @@ | |||
244 | 2830 | decorators.append(CountingDecorator) | 2816 | decorators.append(CountingDecorator) |
245 | 2831 | for decorator in decorators: | 2817 | for decorator in decorators: |
246 | 2832 | suite = decorator(suite) | 2818 | suite = decorator(suite) |
247 | 2833 | result = runner.run(suite) | ||
248 | 2834 | if list_only: | 2819 | if list_only: |
249 | 2820 | # Done after test suite decoration to allow randomisation etc | ||
250 | 2821 | # to take effect, though that is of marginal benefit. | ||
251 | 2822 | if verbosity >= 2: | ||
252 | 2823 | stream.write("Listing tests only ...\n") | ||
253 | 2824 | for t in iter_suite_tests(suite): | ||
254 | 2825 | stream.write("%s\n" % (t.id())) | ||
255 | 2835 | return True | 2826 | return True |
257 | 2836 | result.done() | 2827 | result = runner.run(suite) |
258 | 2837 | if strict: | 2828 | if strict: |
259 | 2838 | return result.wasStrictlySuccessful() | 2829 | return result.wasStrictlySuccessful() |
260 | 2839 | else: | 2830 | else: |
261 | @@ -3168,6 +3159,12 @@ | |||
262 | 3168 | def stopTest(self, test): | 3159 | def stopTest(self, test): |
263 | 3169 | self.result.stopTest(test) | 3160 | self.result.stopTest(test) |
264 | 3170 | 3161 | ||
265 | 3162 | def startTestRun(self): | ||
266 | 3163 | self.result.startTestRun() | ||
267 | 3164 | |||
268 | 3165 | def stopTestRun(self): | ||
269 | 3166 | self.result.stopTestRun() | ||
270 | 3167 | |||
271 | 3171 | def addSkip(self, test, reason): | 3168 | def addSkip(self, test, reason): |
272 | 3172 | self.result.addSkip(test, reason) | 3169 | self.result.addSkip(test, reason) |
273 | 3173 | 3170 | ||
274 | 3174 | 3171 | ||
275 | === modified file 'bzrlib/tests/test_selftest.py' | |||
276 | --- bzrlib/tests/test_selftest.py 2009-08-26 06:33:13 +0000 | |||
277 | +++ bzrlib/tests/test_selftest.py 2009-08-27 07:35:12 +0000 | |||
278 | @@ -820,7 +820,7 @@ | |||
279 | 820 | def test_known_failure(self): | 820 | def test_known_failure(self): |
280 | 821 | """A KnownFailure being raised should trigger several result actions.""" | 821 | """A KnownFailure being raised should trigger several result actions.""" |
281 | 822 | class InstrumentedTestResult(tests.ExtendedTestResult): | 822 | class InstrumentedTestResult(tests.ExtendedTestResult): |
283 | 823 | def done(self): pass | 823 | def stopTestRun(self): pass |
284 | 824 | def startTests(self): pass | 824 | def startTests(self): pass |
285 | 825 | def report_test_start(self, test): pass | 825 | def report_test_start(self, test): pass |
286 | 826 | def report_known_failure(self, test, err): | 826 | def report_known_failure(self, test, err): |
287 | @@ -874,7 +874,7 @@ | |||
288 | 874 | def test_add_not_supported(self): | 874 | def test_add_not_supported(self): |
289 | 875 | """Test the behaviour of invoking addNotSupported.""" | 875 | """Test the behaviour of invoking addNotSupported.""" |
290 | 876 | class InstrumentedTestResult(tests.ExtendedTestResult): | 876 | class InstrumentedTestResult(tests.ExtendedTestResult): |
292 | 877 | def done(self): pass | 877 | def stopTestRun(self): pass |
293 | 878 | def startTests(self): pass | 878 | def startTests(self): pass |
294 | 879 | def report_test_start(self, test): pass | 879 | def report_test_start(self, test): pass |
295 | 880 | def report_unsupported(self, test, feature): | 880 | def report_unsupported(self, test, feature): |
296 | @@ -918,7 +918,7 @@ | |||
297 | 918 | def test_unavailable_exception(self): | 918 | def test_unavailable_exception(self): |
298 | 919 | """An UnavailableFeature being raised should invoke addNotSupported.""" | 919 | """An UnavailableFeature being raised should invoke addNotSupported.""" |
299 | 920 | class InstrumentedTestResult(tests.ExtendedTestResult): | 920 | class InstrumentedTestResult(tests.ExtendedTestResult): |
301 | 921 | def done(self): pass | 921 | def stopTestRun(self): pass |
302 | 922 | def startTests(self): pass | 922 | def startTests(self): pass |
303 | 923 | def report_test_start(self, test): pass | 923 | def report_test_start(self, test): pass |
304 | 924 | def addNotSupported(self, test, feature): | 924 | def addNotSupported(self, test, feature): |
305 | @@ -1001,11 +1001,14 @@ | |||
306 | 1001 | because of our use of global state. | 1001 | because of our use of global state. |
307 | 1002 | """ | 1002 | """ |
308 | 1003 | old_root = tests.TestCaseInTempDir.TEST_ROOT | 1003 | old_root = tests.TestCaseInTempDir.TEST_ROOT |
309 | 1004 | old_leak = tests.TestCase._first_thread_leaker_id | ||
310 | 1004 | try: | 1005 | try: |
311 | 1005 | tests.TestCaseInTempDir.TEST_ROOT = None | 1006 | tests.TestCaseInTempDir.TEST_ROOT = None |
312 | 1007 | tests.TestCase._first_thread_leaker_id = None | ||
313 | 1006 | return testrunner.run(test) | 1008 | return testrunner.run(test) |
314 | 1007 | finally: | 1009 | finally: |
315 | 1008 | tests.TestCaseInTempDir.TEST_ROOT = old_root | 1010 | tests.TestCaseInTempDir.TEST_ROOT = old_root |
316 | 1011 | tests.TestCase._first_thread_leaker_id = old_leak | ||
317 | 1009 | 1012 | ||
318 | 1010 | def test_known_failure_failed_run(self): | 1013 | def test_known_failure_failed_run(self): |
319 | 1011 | # run a test that generates a known failure which should be printed in | 1014 | # run a test that generates a known failure which should be printed in |
320 | @@ -1291,6 +1294,34 @@ | |||
321 | 1291 | self.assertContainsRe(log, 'this will be kept') | 1294 | self.assertContainsRe(log, 'this will be kept') |
322 | 1292 | self.assertEqual(log, test._log_contents) | 1295 | self.assertEqual(log, test._log_contents) |
323 | 1293 | 1296 | ||
324 | 1297 | def test_startTestRun(self): | ||
325 | 1298 | """run should call result.startTestRun()""" | ||
326 | 1299 | calls = [] | ||
327 | 1300 | class LoggingDecorator(tests.ForwardingResult): | ||
328 | 1301 | def startTestRun(self): | ||
329 | 1302 | tests.ForwardingResult.startTestRun(self) | ||
330 | 1303 | calls.append('startTestRun') | ||
331 | 1304 | test = unittest.FunctionTestCase(lambda:None) | ||
332 | 1305 | stream = StringIO() | ||
333 | 1306 | runner = tests.TextTestRunner(stream=stream, | ||
334 | 1307 | result_decorators=[LoggingDecorator]) | ||
335 | 1308 | result = self.run_test_runner(runner, test) | ||
336 | 1309 | self.assertLength(1, calls) | ||
337 | 1310 | |||
338 | 1311 | def test_stopTestRun(self): | ||
339 | 1312 | """run should call result.stopTestRun()""" | ||
340 | 1313 | calls = [] | ||
341 | 1314 | class LoggingDecorator(tests.ForwardingResult): | ||
342 | 1315 | def stopTestRun(self): | ||
343 | 1316 | tests.ForwardingResult.stopTestRun(self) | ||
344 | 1317 | calls.append('stopTestRun') | ||
345 | 1318 | test = unittest.FunctionTestCase(lambda:None) | ||
346 | 1319 | stream = StringIO() | ||
347 | 1320 | runner = tests.TextTestRunner(stream=stream, | ||
348 | 1321 | result_decorators=[LoggingDecorator]) | ||
349 | 1322 | result = self.run_test_runner(runner, test) | ||
350 | 1323 | self.assertLength(1, calls) | ||
351 | 1324 | |||
352 | 1294 | 1325 | ||
353 | 1295 | class SampleTestCase(tests.TestCase): | 1326 | class SampleTestCase(tests.TestCase): |
354 | 1296 | 1327 | ||
355 | @@ -2934,19 +2965,3 @@ | |||
356 | 2934 | self.verbosity) | 2965 | self.verbosity) |
357 | 2935 | tests.run_suite(suite, runner_class=MyRunner, stream=StringIO()) | 2966 | tests.run_suite(suite, runner_class=MyRunner, stream=StringIO()) |
358 | 2936 | self.assertLength(1, calls) | 2967 | self.assertLength(1, calls) |
359 | 2937 | |||
360 | 2938 | def test_done(self): | ||
361 | 2939 | """run_suite should call result.done()""" | ||
362 | 2940 | self.calls = 0 | ||
363 | 2941 | def one_more_call(): self.calls += 1 | ||
364 | 2942 | def test_function(): | ||
365 | 2943 | pass | ||
366 | 2944 | test = unittest.FunctionTestCase(test_function) | ||
367 | 2945 | class InstrumentedTestResult(tests.ExtendedTestResult): | ||
368 | 2946 | def done(self): one_more_call() | ||
369 | 2947 | class MyRunner(tests.TextTestRunner): | ||
370 | 2948 | def run(self, test): | ||
371 | 2949 | return InstrumentedTestResult(self.stream, self.descriptions, | ||
372 | 2950 | self.verbosity) | ||
373 | 2951 | tests.run_suite(test, runner_class=MyRunner, stream=StringIO()) | ||
374 | 2952 | self.assertEquals(1, self.calls) |
Selftest was a bit tangled up. This makes it clearer without any
functional changes:
* Use startTestRun and stopTestRun, as upstream python does
* lift test listing code out of 'TestRunner' into the DWIM layer
* push all test result outputting down into ExtendedTestResult rather
than being in the runner.
This means that the runner knows how to run, the DWIM layer knows when
to run, and Results know how to report on results.
Code-reviews will show the wrong diff though. Sorry - latency on trunk etc.
-Rob