1 | n/a | #!/usr/local/bin/python -O |
---|
2 | n/a | |
---|
3 | n/a | """ A Python Benchmark Suite |
---|
4 | n/a | |
---|
5 | n/a | """ |
---|
6 | n/a | # Note: Please keep this module compatible to Python 2.6. |
---|
7 | n/a | # |
---|
8 | n/a | # Tests may include features in later Python versions, but these |
---|
9 | n/a | # should then be embedded in try-except clauses in the configuration |
---|
10 | n/a | # module Setup.py. |
---|
11 | n/a | # |
---|
12 | n/a | |
---|
13 | n/a | from __future__ import print_function |
---|
14 | n/a | |
---|
15 | n/a | # pybench Copyright |
---|
16 | n/a | __copyright__ = """\ |
---|
17 | n/a | Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com) |
---|
18 | n/a | Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com) |
---|
19 | n/a | |
---|
20 | n/a | All Rights Reserved. |
---|
21 | n/a | |
---|
22 | n/a | Permission to use, copy, modify, and distribute this software and its |
---|
23 | n/a | documentation for any purpose and without fee or royalty is hereby |
---|
24 | n/a | granted, provided that the above copyright notice appear in all copies |
---|
25 | n/a | and that both that copyright notice and this permission notice appear |
---|
26 | n/a | in supporting documentation or portions thereof, including |
---|
27 | n/a | modifications, that you make. |
---|
28 | n/a | |
---|
29 | n/a | THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO |
---|
30 | n/a | THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND |
---|
31 | n/a | FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, |
---|
32 | n/a | INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING |
---|
33 | n/a | FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, |
---|
34 | n/a | NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION |
---|
35 | n/a | WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! |
---|
36 | n/a | """ |
---|
37 | n/a | |
---|
38 | n/a | import sys |
---|
39 | n/a | import time |
---|
40 | n/a | import platform |
---|
41 | n/a | from CommandLine import * |
---|
42 | n/a | |
---|
43 | n/a | try: |
---|
44 | n/a | import cPickle |
---|
45 | n/a | pickle = cPickle |
---|
46 | n/a | except ImportError: |
---|
47 | n/a | import pickle |
---|
48 | n/a | |
---|
49 | n/a | # Version number; version history: see README file ! |
---|
50 | n/a | __version__ = '2.1' |
---|
51 | n/a | |
---|
52 | n/a | ### Constants |
---|
53 | n/a | |
---|
54 | n/a | # Second fractions |
---|
55 | n/a | MILLI_SECONDS = 1e3 |
---|
56 | n/a | MICRO_SECONDS = 1e6 |
---|
57 | n/a | |
---|
58 | n/a | # Percent unit |
---|
59 | n/a | PERCENT = 100 |
---|
60 | n/a | |
---|
61 | n/a | # Horizontal line length |
---|
62 | n/a | LINE = 79 |
---|
63 | n/a | |
---|
64 | n/a | # Minimum test run-time |
---|
65 | n/a | MIN_TEST_RUNTIME = 1e-3 |
---|
66 | n/a | |
---|
67 | n/a | # Number of calibration runs to use for calibrating the tests |
---|
68 | n/a | CALIBRATION_RUNS = 20 |
---|
69 | n/a | |
---|
70 | n/a | # Number of calibration loops to run for each calibration run |
---|
71 | n/a | CALIBRATION_LOOPS = 20 |
---|
72 | n/a | |
---|
73 | n/a | # Allow skipping calibration ? |
---|
74 | n/a | ALLOW_SKIPPING_CALIBRATION = 1 |
---|
75 | n/a | |
---|
76 | n/a | # Timer types |
---|
77 | n/a | TIMER_TIME_TIME = 'time.time' |
---|
78 | n/a | TIMER_TIME_PROCESS_TIME = 'time.process_time' |
---|
79 | n/a | TIMER_TIME_PERF_COUNTER = 'time.perf_counter' |
---|
80 | n/a | TIMER_TIME_CLOCK = 'time.clock' |
---|
81 | n/a | TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime' |
---|
82 | n/a | |
---|
83 | n/a | # Choose platform default timer |
---|
84 | n/a | if hasattr(time, 'perf_counter'): |
---|
85 | n/a | TIMER_PLATFORM_DEFAULT = TIMER_TIME_PERF_COUNTER |
---|
86 | n/a | elif sys.platform[:3] == 'win': |
---|
87 | n/a | # On WinXP this has 2.5ms resolution |
---|
88 | n/a | TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK |
---|
89 | n/a | else: |
---|
90 | n/a | # On Linux this has 1ms resolution |
---|
91 | n/a | TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME |
---|
92 | n/a | |
---|
93 | n/a | # Print debug information ? |
---|
94 | n/a | _debug = 0 |
---|
95 | n/a | |
---|
96 | n/a | ### Helpers |
---|
97 | n/a | |
---|
98 | n/a | def get_timer(timertype): |
---|
99 | n/a | |
---|
100 | n/a | if timertype == TIMER_TIME_TIME: |
---|
101 | n/a | return time.time |
---|
102 | n/a | elif timertype == TIMER_TIME_PROCESS_TIME: |
---|
103 | n/a | return time.process_time |
---|
104 | n/a | elif timertype == TIMER_TIME_PERF_COUNTER: |
---|
105 | n/a | return time.perf_counter |
---|
106 | n/a | elif timertype == TIMER_TIME_CLOCK: |
---|
107 | n/a | return time.clock |
---|
108 | n/a | elif timertype == TIMER_SYSTIMES_PROCESSTIME: |
---|
109 | n/a | import systimes |
---|
110 | n/a | return systimes.processtime |
---|
111 | n/a | else: |
---|
112 | n/a | raise TypeError('unknown timer type: %s' % timertype) |
---|
113 | n/a | |
---|
114 | n/a | def get_machine_details(): |
---|
115 | n/a | |
---|
116 | n/a | if _debug: |
---|
117 | n/a | print('Getting machine details...') |
---|
118 | n/a | buildno, builddate = platform.python_build() |
---|
119 | n/a | python = platform.python_version() |
---|
120 | n/a | # XXX this is now always UCS4, maybe replace it with 'PEP393' in 3.3+? |
---|
121 | n/a | if sys.maxunicode == 65535: |
---|
122 | n/a | # UCS2 build (standard) |
---|
123 | n/a | unitype = 'UCS2' |
---|
124 | n/a | else: |
---|
125 | n/a | # UCS4 build (most recent Linux distros) |
---|
126 | n/a | unitype = 'UCS4' |
---|
127 | n/a | bits, linkage = platform.architecture() |
---|
128 | n/a | return { |
---|
129 | n/a | 'platform': platform.platform(), |
---|
130 | n/a | 'processor': platform.processor(), |
---|
131 | n/a | 'executable': sys.executable, |
---|
132 | n/a | 'implementation': getattr(platform, 'python_implementation', |
---|
133 | n/a | lambda:'n/a')(), |
---|
134 | n/a | 'python': platform.python_version(), |
---|
135 | n/a | 'compiler': platform.python_compiler(), |
---|
136 | n/a | 'buildno': buildno, |
---|
137 | n/a | 'builddate': builddate, |
---|
138 | n/a | 'unicode': unitype, |
---|
139 | n/a | 'bits': bits, |
---|
140 | n/a | } |
---|
141 | n/a | |
---|
142 | n/a | def print_machine_details(d, indent=''): |
---|
143 | n/a | |
---|
144 | n/a | l = ['Machine Details:', |
---|
145 | n/a | ' Platform ID: %s' % d.get('platform', 'n/a'), |
---|
146 | n/a | ' Processor: %s' % d.get('processor', 'n/a'), |
---|
147 | n/a | '', |
---|
148 | n/a | 'Python:', |
---|
149 | n/a | ' Implementation: %s' % d.get('implementation', 'n/a'), |
---|
150 | n/a | ' Executable: %s' % d.get('executable', 'n/a'), |
---|
151 | n/a | ' Version: %s' % d.get('python', 'n/a'), |
---|
152 | n/a | ' Compiler: %s' % d.get('compiler', 'n/a'), |
---|
153 | n/a | ' Bits: %s' % d.get('bits', 'n/a'), |
---|
154 | n/a | ' Build: %s (#%s)' % (d.get('builddate', 'n/a'), |
---|
155 | n/a | d.get('buildno', 'n/a')), |
---|
156 | n/a | ' Unicode: %s' % d.get('unicode', 'n/a'), |
---|
157 | n/a | ] |
---|
158 | n/a | joiner = '\n' + indent |
---|
159 | n/a | print(indent + joiner.join(l) + '\n') |
---|
160 | n/a | |
---|
161 | n/a | ### Test baseclass |
---|
162 | n/a | |
---|
163 | n/a | class Test: |
---|
164 | n/a | |
---|
165 | n/a | """ All test must have this class as baseclass. It provides |
---|
166 | n/a | the necessary interface to the benchmark machinery. |
---|
167 | n/a | |
---|
168 | n/a | The tests must set .rounds to a value high enough to let the |
---|
169 | n/a | test run between 20-50 seconds. This is needed because |
---|
170 | n/a | clock()-timing only gives rather inaccurate values (on Linux, |
---|
171 | n/a | for example, it is accurate to a few hundreths of a |
---|
172 | n/a | second). If you don't want to wait that long, use a warp |
---|
173 | n/a | factor larger than 1. |
---|
174 | n/a | |
---|
175 | n/a | It is also important to set the .operations variable to a |
---|
176 | n/a | value representing the number of "virtual operations" done per |
---|
177 | n/a | call of .run(). |
---|
178 | n/a | |
---|
179 | n/a | If you change a test in some way, don't forget to increase |
---|
180 | n/a | its version number. |
---|
181 | n/a | |
---|
182 | n/a | """ |
---|
183 | n/a | |
---|
184 | n/a | ### Instance variables that each test should override |
---|
185 | n/a | |
---|
186 | n/a | # Version number of the test as float (x.yy); this is important |
---|
187 | n/a | # for comparisons of benchmark runs - tests with unequal version |
---|
188 | n/a | # number will not get compared. |
---|
189 | n/a | version = 2.1 |
---|
190 | n/a | |
---|
191 | n/a | # The number of abstract operations done in each round of the |
---|
192 | n/a | # test. An operation is the basic unit of what you want to |
---|
193 | n/a | # measure. The benchmark will output the amount of run-time per |
---|
194 | n/a | # operation. Note that in order to raise the measured timings |
---|
195 | n/a | # significantly above noise level, it is often required to repeat |
---|
196 | n/a | # sets of operations more than once per test round. The measured |
---|
197 | n/a | # overhead per test round should be less than 1 second. |
---|
198 | n/a | operations = 1 |
---|
199 | n/a | |
---|
200 | n/a | # Number of rounds to execute per test run. This should be |
---|
201 | n/a | # adjusted to a figure that results in a test run-time of between |
---|
202 | n/a | # 1-2 seconds. |
---|
203 | n/a | rounds = 100000 |
---|
204 | n/a | |
---|
205 | n/a | ### Internal variables |
---|
206 | n/a | |
---|
207 | n/a | # Mark this class as implementing a test |
---|
208 | n/a | is_a_test = 1 |
---|
209 | n/a | |
---|
210 | n/a | # Last timing: (real, run, overhead) |
---|
211 | n/a | last_timing = (0.0, 0.0, 0.0) |
---|
212 | n/a | |
---|
213 | n/a | # Warp factor to use for this test |
---|
214 | n/a | warp = 1 |
---|
215 | n/a | |
---|
216 | n/a | # Number of calibration runs to use |
---|
217 | n/a | calibration_runs = CALIBRATION_RUNS |
---|
218 | n/a | |
---|
219 | n/a | # List of calibration timings |
---|
220 | n/a | overhead_times = None |
---|
221 | n/a | |
---|
222 | n/a | # List of test run timings |
---|
223 | n/a | times = [] |
---|
224 | n/a | |
---|
225 | n/a | # Timer used for the benchmark |
---|
226 | n/a | timer = TIMER_PLATFORM_DEFAULT |
---|
227 | n/a | |
---|
228 | n/a | def __init__(self, warp=None, calibration_runs=None, timer=None): |
---|
229 | n/a | |
---|
230 | n/a | # Set parameters |
---|
231 | n/a | if warp is not None: |
---|
232 | n/a | self.rounds = int(self.rounds / warp) |
---|
233 | n/a | if self.rounds == 0: |
---|
234 | n/a | raise ValueError('warp factor set too high') |
---|
235 | n/a | self.warp = warp |
---|
236 | n/a | if calibration_runs is not None: |
---|
237 | n/a | if (not ALLOW_SKIPPING_CALIBRATION and |
---|
238 | n/a | calibration_runs < 1): |
---|
239 | n/a | raise ValueError('at least one calibration run is required') |
---|
240 | n/a | self.calibration_runs = calibration_runs |
---|
241 | n/a | if timer is not None: |
---|
242 | n/a | self.timer = timer |
---|
243 | n/a | |
---|
244 | n/a | # Init variables |
---|
245 | n/a | self.times = [] |
---|
246 | n/a | self.overhead_times = [] |
---|
247 | n/a | |
---|
248 | n/a | # We want these to be in the instance dict, so that pickle |
---|
249 | n/a | # saves them |
---|
250 | n/a | self.version = self.version |
---|
251 | n/a | self.operations = self.operations |
---|
252 | n/a | self.rounds = self.rounds |
---|
253 | n/a | |
---|
254 | n/a | def get_timer(self): |
---|
255 | n/a | |
---|
256 | n/a | """ Return the timer function to use for the test. |
---|
257 | n/a | |
---|
258 | n/a | """ |
---|
259 | n/a | return get_timer(self.timer) |
---|
260 | n/a | |
---|
261 | n/a | def compatible(self, other): |
---|
262 | n/a | |
---|
263 | n/a | """ Return 1/0 depending on whether the test is compatible |
---|
264 | n/a | with the other Test instance or not. |
---|
265 | n/a | |
---|
266 | n/a | """ |
---|
267 | n/a | if self.version != other.version: |
---|
268 | n/a | return 0 |
---|
269 | n/a | if self.rounds != other.rounds: |
---|
270 | n/a | return 0 |
---|
271 | n/a | return 1 |
---|
272 | n/a | |
---|
273 | n/a | def calibrate_test(self): |
---|
274 | n/a | |
---|
275 | n/a | if self.calibration_runs == 0: |
---|
276 | n/a | self.overhead_times = [0.0] |
---|
277 | n/a | return |
---|
278 | n/a | |
---|
279 | n/a | calibrate = self.calibrate |
---|
280 | n/a | timer = self.get_timer() |
---|
281 | n/a | calibration_loops = range(CALIBRATION_LOOPS) |
---|
282 | n/a | |
---|
283 | n/a | # Time the calibration loop overhead |
---|
284 | n/a | prep_times = [] |
---|
285 | n/a | for i in range(self.calibration_runs): |
---|
286 | n/a | t = timer() |
---|
287 | n/a | for i in calibration_loops: |
---|
288 | n/a | pass |
---|
289 | n/a | t = timer() - t |
---|
290 | n/a | prep_times.append(t / CALIBRATION_LOOPS) |
---|
291 | n/a | min_prep_time = min(prep_times) |
---|
292 | n/a | if _debug: |
---|
293 | n/a | print() |
---|
294 | n/a | print('Calib. prep time = %.6fms' % ( |
---|
295 | n/a | min_prep_time * MILLI_SECONDS)) |
---|
296 | n/a | |
---|
297 | n/a | # Time the calibration runs (doing CALIBRATION_LOOPS loops of |
---|
298 | n/a | # .calibrate() method calls each) |
---|
299 | n/a | for i in range(self.calibration_runs): |
---|
300 | n/a | t = timer() |
---|
301 | n/a | for i in calibration_loops: |
---|
302 | n/a | calibrate() |
---|
303 | n/a | t = timer() - t |
---|
304 | n/a | self.overhead_times.append(t / CALIBRATION_LOOPS |
---|
305 | n/a | - min_prep_time) |
---|
306 | n/a | |
---|
307 | n/a | # Check the measured times |
---|
308 | n/a | min_overhead = min(self.overhead_times) |
---|
309 | n/a | max_overhead = max(self.overhead_times) |
---|
310 | n/a | if _debug: |
---|
311 | n/a | print('Calib. overhead time = %.6fms' % ( |
---|
312 | n/a | min_overhead * MILLI_SECONDS)) |
---|
313 | n/a | if min_overhead < 0.0: |
---|
314 | n/a | raise ValueError('calibration setup did not work') |
---|
315 | n/a | if max_overhead - min_overhead > 0.1: |
---|
316 | n/a | raise ValueError( |
---|
317 | n/a | 'overhead calibration timing range too inaccurate: ' |
---|
318 | n/a | '%r - %r' % (min_overhead, max_overhead)) |
---|
319 | n/a | |
---|
320 | n/a | def run(self): |
---|
321 | n/a | |
---|
322 | n/a | """ Run the test in two phases: first calibrate, then |
---|
323 | n/a | do the actual test. Be careful to keep the calibration |
---|
324 | n/a | timing low w/r to the test timing. |
---|
325 | n/a | |
---|
326 | n/a | """ |
---|
327 | n/a | test = self.test |
---|
328 | n/a | timer = self.get_timer() |
---|
329 | n/a | |
---|
330 | n/a | # Get calibration |
---|
331 | n/a | min_overhead = min(self.overhead_times) |
---|
332 | n/a | |
---|
333 | n/a | # Test run |
---|
334 | n/a | t = timer() |
---|
335 | n/a | test() |
---|
336 | n/a | t = timer() - t |
---|
337 | n/a | if t < MIN_TEST_RUNTIME: |
---|
338 | n/a | raise ValueError('warp factor too high: ' |
---|
339 | n/a | 'test times are < 10ms') |
---|
340 | n/a | eff_time = t - min_overhead |
---|
341 | n/a | if eff_time < 0: |
---|
342 | n/a | raise ValueError('wrong calibration') |
---|
343 | n/a | self.last_timing = (eff_time, t, min_overhead) |
---|
344 | n/a | self.times.append(eff_time) |
---|
345 | n/a | |
---|
346 | n/a | def calibrate(self): |
---|
347 | n/a | |
---|
348 | n/a | """ Calibrate the test. |
---|
349 | n/a | |
---|
350 | n/a | This method should execute everything that is needed to |
---|
351 | n/a | setup and run the test - except for the actual operations |
---|
352 | n/a | that you intend to measure. pybench uses this method to |
---|
353 | n/a | measure the test implementation overhead. |
---|
354 | n/a | |
---|
355 | n/a | """ |
---|
356 | n/a | return |
---|
357 | n/a | |
---|
358 | n/a | def test(self): |
---|
359 | n/a | |
---|
360 | n/a | """ Run the test. |
---|
361 | n/a | |
---|
362 | n/a | The test needs to run self.rounds executing |
---|
363 | n/a | self.operations number of operations each. |
---|
364 | n/a | |
---|
365 | n/a | """ |
---|
366 | n/a | return |
---|
367 | n/a | |
---|
368 | n/a | def stat(self): |
---|
369 | n/a | |
---|
370 | n/a | """ Return test run statistics as tuple: |
---|
371 | n/a | |
---|
372 | n/a | (minimum run time, |
---|
373 | n/a | average run time, |
---|
374 | n/a | total run time, |
---|
375 | n/a | average time per operation, |
---|
376 | n/a | minimum overhead time) |
---|
377 | n/a | |
---|
378 | n/a | """ |
---|
379 | n/a | runs = len(self.times) |
---|
380 | n/a | if runs == 0: |
---|
381 | n/a | return 0.0, 0.0, 0.0, 0.0 |
---|
382 | n/a | min_time = min(self.times) |
---|
383 | n/a | total_time = sum(self.times) |
---|
384 | n/a | avg_time = total_time / float(runs) |
---|
385 | n/a | operation_avg = total_time / float(runs |
---|
386 | n/a | * self.rounds |
---|
387 | n/a | * self.operations) |
---|
388 | n/a | if self.overhead_times: |
---|
389 | n/a | min_overhead = min(self.overhead_times) |
---|
390 | n/a | else: |
---|
391 | n/a | min_overhead = self.last_timing[2] |
---|
392 | n/a | return min_time, avg_time, total_time, operation_avg, min_overhead |
---|
393 | n/a | |
---|
394 | n/a | ### Load Setup |
---|
395 | n/a | |
---|
396 | n/a | # This has to be done after the definition of the Test class, since |
---|
397 | n/a | # the Setup module will import subclasses using this class. |
---|
398 | n/a | |
---|
399 | n/a | import Setup |
---|
400 | n/a | |
---|
401 | n/a | ### Benchmark base class |
---|
402 | n/a | |
---|
403 | n/a | class Benchmark: |
---|
404 | n/a | |
---|
405 | n/a | # Name of the benchmark |
---|
406 | n/a | name = '' |
---|
407 | n/a | |
---|
408 | n/a | # Number of benchmark rounds to run |
---|
409 | n/a | rounds = 1 |
---|
410 | n/a | |
---|
411 | n/a | # Warp factor use to run the tests |
---|
412 | n/a | warp = 1 # Warp factor |
---|
413 | n/a | |
---|
414 | n/a | # Average benchmark round time |
---|
415 | n/a | roundtime = 0 |
---|
416 | n/a | |
---|
417 | n/a | # Benchmark version number as float x.yy |
---|
418 | n/a | version = 2.1 |
---|
419 | n/a | |
---|
420 | n/a | # Produce verbose output ? |
---|
421 | n/a | verbose = 0 |
---|
422 | n/a | |
---|
423 | n/a | # Dictionary with the machine details |
---|
424 | n/a | machine_details = None |
---|
425 | n/a | |
---|
426 | n/a | # Timer used for the benchmark |
---|
427 | n/a | timer = TIMER_PLATFORM_DEFAULT |
---|
428 | n/a | |
---|
429 | n/a | def __init__(self, name, verbose=None, timer=None, warp=None, |
---|
430 | n/a | calibration_runs=None): |
---|
431 | n/a | |
---|
432 | n/a | if name: |
---|
433 | n/a | self.name = name |
---|
434 | n/a | else: |
---|
435 | n/a | self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \ |
---|
436 | n/a | (time.localtime(time.time())[:6]) |
---|
437 | n/a | if verbose is not None: |
---|
438 | n/a | self.verbose = verbose |
---|
439 | n/a | if timer is not None: |
---|
440 | n/a | self.timer = timer |
---|
441 | n/a | if warp is not None: |
---|
442 | n/a | self.warp = warp |
---|
443 | n/a | if calibration_runs is not None: |
---|
444 | n/a | self.calibration_runs = calibration_runs |
---|
445 | n/a | |
---|
446 | n/a | # Init vars |
---|
447 | n/a | self.tests = {} |
---|
448 | n/a | if _debug: |
---|
449 | n/a | print('Getting machine details...') |
---|
450 | n/a | self.machine_details = get_machine_details() |
---|
451 | n/a | |
---|
452 | n/a | # Make .version an instance attribute to have it saved in the |
---|
453 | n/a | # Benchmark pickle |
---|
454 | n/a | self.version = self.version |
---|
455 | n/a | |
---|
456 | n/a | def get_timer(self): |
---|
457 | n/a | |
---|
458 | n/a | """ Return the timer function to use for the test. |
---|
459 | n/a | |
---|
460 | n/a | """ |
---|
461 | n/a | return get_timer(self.timer) |
---|
462 | n/a | |
---|
463 | n/a | def compatible(self, other): |
---|
464 | n/a | |
---|
465 | n/a | """ Return 1/0 depending on whether the benchmark is |
---|
466 | n/a | compatible with the other Benchmark instance or not. |
---|
467 | n/a | |
---|
468 | n/a | """ |
---|
469 | n/a | if self.version != other.version: |
---|
470 | n/a | return 0 |
---|
471 | n/a | if (self.machine_details == other.machine_details and |
---|
472 | n/a | self.timer != other.timer): |
---|
473 | n/a | return 0 |
---|
474 | n/a | if (self.calibration_runs == 0 and |
---|
475 | n/a | other.calibration_runs != 0): |
---|
476 | n/a | return 0 |
---|
477 | n/a | if (self.calibration_runs != 0 and |
---|
478 | n/a | other.calibration_runs == 0): |
---|
479 | n/a | return 0 |
---|
480 | n/a | return 1 |
---|
481 | n/a | |
---|
482 | n/a | def load_tests(self, setupmod, limitnames=None): |
---|
483 | n/a | |
---|
484 | n/a | # Add tests |
---|
485 | n/a | if self.verbose: |
---|
486 | n/a | print('Searching for tests ...') |
---|
487 | n/a | print('--------------------------------------') |
---|
488 | n/a | for testclass in setupmod.__dict__.values(): |
---|
489 | n/a | if not hasattr(testclass, 'is_a_test'): |
---|
490 | n/a | continue |
---|
491 | n/a | name = testclass.__name__ |
---|
492 | n/a | if name == 'Test': |
---|
493 | n/a | continue |
---|
494 | n/a | if (limitnames is not None and |
---|
495 | n/a | limitnames.search(name) is None): |
---|
496 | n/a | continue |
---|
497 | n/a | self.tests[name] = testclass( |
---|
498 | n/a | warp=self.warp, |
---|
499 | n/a | calibration_runs=self.calibration_runs, |
---|
500 | n/a | timer=self.timer) |
---|
501 | n/a | l = sorted(self.tests) |
---|
502 | n/a | if self.verbose: |
---|
503 | n/a | for name in l: |
---|
504 | n/a | print(' %s' % name) |
---|
505 | n/a | print('--------------------------------------') |
---|
506 | n/a | print(' %i tests found' % len(l)) |
---|
507 | n/a | print() |
---|
508 | n/a | |
---|
509 | n/a | def calibrate(self): |
---|
510 | n/a | |
---|
511 | n/a | print('Calibrating tests. Please wait...', end=' ') |
---|
512 | n/a | sys.stdout.flush() |
---|
513 | n/a | if self.verbose: |
---|
514 | n/a | print() |
---|
515 | n/a | print() |
---|
516 | n/a | print('Test min max') |
---|
517 | n/a | print('-' * LINE) |
---|
518 | n/a | tests = sorted(self.tests.items()) |
---|
519 | n/a | for i in range(len(tests)): |
---|
520 | n/a | name, test = tests[i] |
---|
521 | n/a | test.calibrate_test() |
---|
522 | n/a | if self.verbose: |
---|
523 | n/a | print('%30s: %6.3fms %6.3fms' % \ |
---|
524 | n/a | (name, |
---|
525 | n/a | min(test.overhead_times) * MILLI_SECONDS, |
---|
526 | n/a | max(test.overhead_times) * MILLI_SECONDS)) |
---|
527 | n/a | if self.verbose: |
---|
528 | n/a | print() |
---|
529 | n/a | print('Done with the calibration.') |
---|
530 | n/a | else: |
---|
531 | n/a | print('done.') |
---|
532 | n/a | print() |
---|
533 | n/a | |
---|
534 | n/a | def run(self): |
---|
535 | n/a | |
---|
536 | n/a | tests = sorted(self.tests.items()) |
---|
537 | n/a | timer = self.get_timer() |
---|
538 | n/a | print('Running %i round(s) of the suite at warp factor %i:' % \ |
---|
539 | n/a | (self.rounds, self.warp)) |
---|
540 | n/a | print() |
---|
541 | n/a | self.roundtimes = [] |
---|
542 | n/a | for i in range(self.rounds): |
---|
543 | n/a | if self.verbose: |
---|
544 | n/a | print(' Round %-25i effective absolute overhead' % (i+1)) |
---|
545 | n/a | total_eff_time = 0.0 |
---|
546 | n/a | for j in range(len(tests)): |
---|
547 | n/a | name, test = tests[j] |
---|
548 | n/a | if self.verbose: |
---|
549 | n/a | print('%30s:' % name, end=' ') |
---|
550 | n/a | test.run() |
---|
551 | n/a | (eff_time, abs_time, min_overhead) = test.last_timing |
---|
552 | n/a | total_eff_time = total_eff_time + eff_time |
---|
553 | n/a | if self.verbose: |
---|
554 | n/a | print(' %5.0fms %5.0fms %7.3fms' % \ |
---|
555 | n/a | (eff_time * MILLI_SECONDS, |
---|
556 | n/a | abs_time * MILLI_SECONDS, |
---|
557 | n/a | min_overhead * MILLI_SECONDS)) |
---|
558 | n/a | self.roundtimes.append(total_eff_time) |
---|
559 | n/a | if self.verbose: |
---|
560 | n/a | print(' ' |
---|
561 | n/a | ' ------------------------------') |
---|
562 | n/a | print(' ' |
---|
563 | n/a | ' Totals: %6.0fms' % |
---|
564 | n/a | (total_eff_time * MILLI_SECONDS)) |
---|
565 | n/a | print() |
---|
566 | n/a | else: |
---|
567 | n/a | print('* Round %i done in %.3f seconds.' % (i+1, |
---|
568 | n/a | total_eff_time)) |
---|
569 | n/a | print() |
---|
570 | n/a | |
---|
571 | n/a | def stat(self): |
---|
572 | n/a | |
---|
573 | n/a | """ Return benchmark run statistics as tuple: |
---|
574 | n/a | |
---|
575 | n/a | (minimum round time, |
---|
576 | n/a | average round time, |
---|
577 | n/a | maximum round time) |
---|
578 | n/a | |
---|
579 | n/a | XXX Currently not used, since the benchmark does test |
---|
580 | n/a | statistics across all rounds. |
---|
581 | n/a | |
---|
582 | n/a | """ |
---|
583 | n/a | runs = len(self.roundtimes) |
---|
584 | n/a | if runs == 0: |
---|
585 | n/a | return 0.0, 0.0 |
---|
586 | n/a | min_time = min(self.roundtimes) |
---|
587 | n/a | total_time = sum(self.roundtimes) |
---|
588 | n/a | avg_time = total_time / float(runs) |
---|
589 | n/a | max_time = max(self.roundtimes) |
---|
590 | n/a | return (min_time, avg_time, max_time) |
---|
591 | n/a | |
---|
592 | n/a | def print_header(self, title='Benchmark'): |
---|
593 | n/a | |
---|
594 | n/a | print('-' * LINE) |
---|
595 | n/a | print('%s: %s' % (title, self.name)) |
---|
596 | n/a | print('-' * LINE) |
---|
597 | n/a | print() |
---|
598 | n/a | print(' Rounds: %s' % self.rounds) |
---|
599 | n/a | print(' Warp: %s' % self.warp) |
---|
600 | n/a | print(' Timer: %s' % self.timer) |
---|
601 | n/a | print() |
---|
602 | n/a | if self.machine_details: |
---|
603 | n/a | print_machine_details(self.machine_details, indent=' ') |
---|
604 | n/a | print() |
---|
605 | n/a | |
---|
606 | n/a | def print_benchmark(self, hidenoise=0, limitnames=None): |
---|
607 | n/a | |
---|
608 | n/a | print('Test ' |
---|
609 | n/a | ' minimum average operation overhead') |
---|
610 | n/a | print('-' * LINE) |
---|
611 | n/a | tests = sorted(self.tests.items()) |
---|
612 | n/a | total_min_time = 0.0 |
---|
613 | n/a | total_avg_time = 0.0 |
---|
614 | n/a | for name, test in tests: |
---|
615 | n/a | if (limitnames is not None and |
---|
616 | n/a | limitnames.search(name) is None): |
---|
617 | n/a | continue |
---|
618 | n/a | (min_time, |
---|
619 | n/a | avg_time, |
---|
620 | n/a | total_time, |
---|
621 | n/a | op_avg, |
---|
622 | n/a | min_overhead) = test.stat() |
---|
623 | n/a | total_min_time = total_min_time + min_time |
---|
624 | n/a | total_avg_time = total_avg_time + avg_time |
---|
625 | n/a | print('%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \ |
---|
626 | n/a | (name, |
---|
627 | n/a | min_time * MILLI_SECONDS, |
---|
628 | n/a | avg_time * MILLI_SECONDS, |
---|
629 | n/a | op_avg * MICRO_SECONDS, |
---|
630 | n/a | min_overhead *MILLI_SECONDS)) |
---|
631 | n/a | print('-' * LINE) |
---|
632 | n/a | print('Totals: ' |
---|
633 | n/a | ' %6.0fms %6.0fms' % |
---|
634 | n/a | (total_min_time * MILLI_SECONDS, |
---|
635 | n/a | total_avg_time * MILLI_SECONDS, |
---|
636 | n/a | )) |
---|
637 | n/a | print() |
---|
638 | n/a | |
---|
639 | n/a | def print_comparison(self, compare_to, hidenoise=0, limitnames=None): |
---|
640 | n/a | |
---|
641 | n/a | # Check benchmark versions |
---|
642 | n/a | if compare_to.version != self.version: |
---|
643 | n/a | print('* Benchmark versions differ: ' |
---|
644 | n/a | 'cannot compare this benchmark to "%s" !' % |
---|
645 | n/a | compare_to.name) |
---|
646 | n/a | print() |
---|
647 | n/a | self.print_benchmark(hidenoise=hidenoise, |
---|
648 | n/a | limitnames=limitnames) |
---|
649 | n/a | return |
---|
650 | n/a | |
---|
651 | n/a | # Print header |
---|
652 | n/a | compare_to.print_header('Comparing with') |
---|
653 | n/a | print('Test ' |
---|
654 | n/a | ' minimum run-time average run-time') |
---|
655 | n/a | print(' ' |
---|
656 | n/a | ' this other diff this other diff') |
---|
657 | n/a | print('-' * LINE) |
---|
658 | n/a | |
---|
659 | n/a | # Print test comparisons |
---|
660 | n/a | tests = sorted(self.tests.items()) |
---|
661 | n/a | total_min_time = other_total_min_time = 0.0 |
---|
662 | n/a | total_avg_time = other_total_avg_time = 0.0 |
---|
663 | n/a | benchmarks_compatible = self.compatible(compare_to) |
---|
664 | n/a | tests_compatible = 1 |
---|
665 | n/a | for name, test in tests: |
---|
666 | n/a | if (limitnames is not None and |
---|
667 | n/a | limitnames.search(name) is None): |
---|
668 | n/a | continue |
---|
669 | n/a | (min_time, |
---|
670 | n/a | avg_time, |
---|
671 | n/a | total_time, |
---|
672 | n/a | op_avg, |
---|
673 | n/a | min_overhead) = test.stat() |
---|
674 | n/a | total_min_time = total_min_time + min_time |
---|
675 | n/a | total_avg_time = total_avg_time + avg_time |
---|
676 | n/a | try: |
---|
677 | n/a | other = compare_to.tests[name] |
---|
678 | n/a | except KeyError: |
---|
679 | n/a | other = None |
---|
680 | n/a | if other is None: |
---|
681 | n/a | # Other benchmark doesn't include the given test |
---|
682 | n/a | min_diff, avg_diff = 'n/a', 'n/a' |
---|
683 | n/a | other_min_time = 0.0 |
---|
684 | n/a | other_avg_time = 0.0 |
---|
685 | n/a | tests_compatible = 0 |
---|
686 | n/a | else: |
---|
687 | n/a | (other_min_time, |
---|
688 | n/a | other_avg_time, |
---|
689 | n/a | other_total_time, |
---|
690 | n/a | other_op_avg, |
---|
691 | n/a | other_min_overhead) = other.stat() |
---|
692 | n/a | other_total_min_time = other_total_min_time + other_min_time |
---|
693 | n/a | other_total_avg_time = other_total_avg_time + other_avg_time |
---|
694 | n/a | if (benchmarks_compatible and |
---|
695 | n/a | test.compatible(other)): |
---|
696 | n/a | # Both benchmark and tests are comparable |
---|
697 | n/a | min_diff = ((min_time * self.warp) / |
---|
698 | n/a | (other_min_time * other.warp) - 1.0) |
---|
699 | n/a | avg_diff = ((avg_time * self.warp) / |
---|
700 | n/a | (other_avg_time * other.warp) - 1.0) |
---|
701 | n/a | if hidenoise and abs(min_diff) < 10.0: |
---|
702 | n/a | min_diff = '' |
---|
703 | n/a | else: |
---|
704 | n/a | min_diff = '%+5.1f%%' % (min_diff * PERCENT) |
---|
705 | n/a | if hidenoise and abs(avg_diff) < 10.0: |
---|
706 | n/a | avg_diff = '' |
---|
707 | n/a | else: |
---|
708 | n/a | avg_diff = '%+5.1f%%' % (avg_diff * PERCENT) |
---|
709 | n/a | else: |
---|
710 | n/a | # Benchmark or tests are not comparable |
---|
711 | n/a | min_diff, avg_diff = 'n/a', 'n/a' |
---|
712 | n/a | tests_compatible = 0 |
---|
713 | n/a | print('%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \ |
---|
714 | n/a | (name, |
---|
715 | n/a | min_time * MILLI_SECONDS, |
---|
716 | n/a | other_min_time * MILLI_SECONDS * compare_to.warp / self.warp, |
---|
717 | n/a | min_diff, |
---|
718 | n/a | avg_time * MILLI_SECONDS, |
---|
719 | n/a | other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp, |
---|
720 | n/a | avg_diff)) |
---|
721 | n/a | print('-' * LINE) |
---|
722 | n/a | |
---|
723 | n/a | # Summarise test results |
---|
724 | n/a | if not benchmarks_compatible or not tests_compatible: |
---|
725 | n/a | min_diff, avg_diff = 'n/a', 'n/a' |
---|
726 | n/a | else: |
---|
727 | n/a | if other_total_min_time != 0.0: |
---|
728 | n/a | min_diff = '%+5.1f%%' % ( |
---|
729 | n/a | ((total_min_time * self.warp) / |
---|
730 | n/a | (other_total_min_time * compare_to.warp) - 1.0) * PERCENT) |
---|
731 | n/a | else: |
---|
732 | n/a | min_diff = 'n/a' |
---|
733 | n/a | if other_total_avg_time != 0.0: |
---|
734 | n/a | avg_diff = '%+5.1f%%' % ( |
---|
735 | n/a | ((total_avg_time * self.warp) / |
---|
736 | n/a | (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT) |
---|
737 | n/a | else: |
---|
738 | n/a | avg_diff = 'n/a' |
---|
739 | n/a | print('Totals: ' |
---|
740 | n/a | ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % |
---|
741 | n/a | (total_min_time * MILLI_SECONDS, |
---|
742 | n/a | (other_total_min_time * compare_to.warp/self.warp |
---|
743 | n/a | * MILLI_SECONDS), |
---|
744 | n/a | min_diff, |
---|
745 | n/a | total_avg_time * MILLI_SECONDS, |
---|
746 | n/a | (other_total_avg_time * compare_to.warp/self.warp |
---|
747 | n/a | * MILLI_SECONDS), |
---|
748 | n/a | avg_diff |
---|
749 | n/a | )) |
---|
750 | n/a | print() |
---|
751 | n/a | print('(this=%s, other=%s)' % (self.name, |
---|
752 | n/a | compare_to.name)) |
---|
753 | n/a | print() |
---|
754 | n/a | |
---|
755 | n/a | class PyBenchCmdline(Application): |
---|
756 | n/a | |
---|
757 | n/a | header = ("PYBENCH - a benchmark test suite for Python " |
---|
758 | n/a | "interpreters/compilers.") |
---|
759 | n/a | |
---|
760 | n/a | version = __version__ |
---|
761 | n/a | |
---|
762 | n/a | debug = _debug |
---|
763 | n/a | |
---|
764 | n/a | options = [ArgumentOption('-n', |
---|
765 | n/a | 'number of rounds', |
---|
766 | n/a | Setup.Number_of_rounds), |
---|
767 | n/a | ArgumentOption('-f', |
---|
768 | n/a | 'save benchmark to file arg', |
---|
769 | n/a | ''), |
---|
770 | n/a | ArgumentOption('-c', |
---|
771 | n/a | 'compare benchmark with the one in file arg', |
---|
772 | n/a | ''), |
---|
773 | n/a | ArgumentOption('-s', |
---|
774 | n/a | 'show benchmark in file arg, then exit', |
---|
775 | n/a | ''), |
---|
776 | n/a | ArgumentOption('-w', |
---|
777 | n/a | 'set warp factor to arg', |
---|
778 | n/a | Setup.Warp_factor), |
---|
779 | n/a | ArgumentOption('-t', |
---|
780 | n/a | 'run only tests with names matching arg', |
---|
781 | n/a | ''), |
---|
782 | n/a | ArgumentOption('-C', |
---|
783 | n/a | 'set the number of calibration runs to arg', |
---|
784 | n/a | CALIBRATION_RUNS), |
---|
785 | n/a | SwitchOption('-d', |
---|
786 | n/a | 'hide noise in comparisons', |
---|
787 | n/a | 0), |
---|
788 | n/a | SwitchOption('-v', |
---|
789 | n/a | 'verbose output (not recommended)', |
---|
790 | n/a | 0), |
---|
791 | n/a | SwitchOption('--with-gc', |
---|
792 | n/a | 'enable garbage collection', |
---|
793 | n/a | 0), |
---|
794 | n/a | SwitchOption('--with-syscheck', |
---|
795 | n/a | 'use default sys check interval', |
---|
796 | n/a | 0), |
---|
797 | n/a | ArgumentOption('--timer', |
---|
798 | n/a | 'use given timer', |
---|
799 | n/a | TIMER_PLATFORM_DEFAULT), |
---|
800 | n/a | ] |
---|
801 | n/a | |
---|
802 | n/a | about = """\ |
---|
803 | n/a | The normal operation is to run the suite and display the |
---|
804 | n/a | results. Use -f to save them for later reuse or comparisons. |
---|
805 | n/a | |
---|
806 | n/a | Available timers: |
---|
807 | n/a | |
---|
808 | n/a | time.time |
---|
809 | n/a | time.clock |
---|
810 | n/a | systimes.processtime |
---|
811 | n/a | |
---|
812 | n/a | Examples: |
---|
813 | n/a | |
---|
814 | n/a | python2.1 pybench.py -f p21.pybench |
---|
815 | n/a | python2.5 pybench.py -f p25.pybench |
---|
816 | n/a | python pybench.py -s p25.pybench -c p21.pybench |
---|
817 | n/a | """ |
---|
818 | n/a | copyright = __copyright__ |
---|
819 | n/a | |
---|
820 | n/a | def main(self): |
---|
821 | n/a | |
---|
822 | n/a | rounds = self.values['-n'] |
---|
823 | n/a | reportfile = self.values['-f'] |
---|
824 | n/a | show_bench = self.values['-s'] |
---|
825 | n/a | compare_to = self.values['-c'] |
---|
826 | n/a | hidenoise = self.values['-d'] |
---|
827 | n/a | warp = int(self.values['-w']) |
---|
828 | n/a | withgc = self.values['--with-gc'] |
---|
829 | n/a | limitnames = self.values['-t'] |
---|
830 | n/a | if limitnames: |
---|
831 | n/a | if _debug: |
---|
832 | n/a | print('* limiting test names to one with substring "%s"' % \ |
---|
833 | n/a | limitnames) |
---|
834 | n/a | limitnames = re.compile(limitnames, re.I) |
---|
835 | n/a | else: |
---|
836 | n/a | limitnames = None |
---|
837 | n/a | verbose = self.verbose |
---|
838 | n/a | withsyscheck = self.values['--with-syscheck'] |
---|
839 | n/a | calibration_runs = self.values['-C'] |
---|
840 | n/a | timer = self.values['--timer'] |
---|
841 | n/a | |
---|
842 | n/a | print('-' * LINE) |
---|
843 | n/a | print('PYBENCH %s' % __version__) |
---|
844 | n/a | print('-' * LINE) |
---|
845 | n/a | print('* using %s %s' % ( |
---|
846 | n/a | getattr(platform, 'python_implementation', lambda:'Python')(), |
---|
847 | n/a | ' '.join(sys.version.split()))) |
---|
848 | n/a | |
---|
849 | n/a | # Switch off garbage collection |
---|
850 | n/a | if not withgc: |
---|
851 | n/a | try: |
---|
852 | n/a | import gc |
---|
853 | n/a | except ImportError: |
---|
854 | n/a | print('* Python version doesn\'t support garbage collection') |
---|
855 | n/a | else: |
---|
856 | n/a | try: |
---|
857 | n/a | gc.disable() |
---|
858 | n/a | except NotImplementedError: |
---|
859 | n/a | print('* Python version doesn\'t support gc.disable') |
---|
860 | n/a | else: |
---|
861 | n/a | print('* disabled garbage collection') |
---|
862 | n/a | |
---|
863 | n/a | # "Disable" sys check interval |
---|
864 | n/a | if not withsyscheck: |
---|
865 | n/a | # Too bad the check interval uses an int instead of a long... |
---|
866 | n/a | value = 2147483647 |
---|
867 | n/a | try: |
---|
868 | n/a | sys.setcheckinterval(value) |
---|
869 | n/a | except (AttributeError, NotImplementedError): |
---|
870 | n/a | print('* Python version doesn\'t support sys.setcheckinterval') |
---|
871 | n/a | else: |
---|
872 | n/a | print('* system check interval set to maximum: %s' % value) |
---|
873 | n/a | |
---|
874 | n/a | if timer == TIMER_SYSTIMES_PROCESSTIME: |
---|
875 | n/a | import systimes |
---|
876 | n/a | print('* using timer: systimes.processtime (%s)' % \ |
---|
877 | n/a | systimes.SYSTIMES_IMPLEMENTATION) |
---|
878 | n/a | else: |
---|
879 | n/a | # Check that the clock function does exist |
---|
880 | n/a | try: |
---|
881 | n/a | get_timer(timer) |
---|
882 | n/a | except TypeError: |
---|
883 | n/a | print("* Error: Unknown timer: %s" % timer) |
---|
884 | n/a | return |
---|
885 | n/a | |
---|
886 | n/a | print('* using timer: %s' % timer) |
---|
887 | n/a | if hasattr(time, 'get_clock_info'): |
---|
888 | n/a | info = time.get_clock_info(timer[5:]) |
---|
889 | n/a | print('* timer: resolution=%s, implementation=%s' |
---|
890 | n/a | % (info.resolution, info.implementation)) |
---|
891 | n/a | |
---|
892 | n/a | print() |
---|
893 | n/a | |
---|
894 | n/a | if compare_to: |
---|
895 | n/a | try: |
---|
896 | n/a | f = open(compare_to,'rb') |
---|
897 | n/a | bench = pickle.load(f) |
---|
898 | n/a | bench.name = compare_to |
---|
899 | n/a | f.close() |
---|
900 | n/a | compare_to = bench |
---|
901 | n/a | except IOError as reason: |
---|
902 | n/a | print('* Error opening/reading file %s: %s' % ( |
---|
903 | n/a | repr(compare_to), |
---|
904 | n/a | reason)) |
---|
905 | n/a | compare_to = None |
---|
906 | n/a | |
---|
907 | n/a | if show_bench: |
---|
908 | n/a | try: |
---|
909 | n/a | f = open(show_bench,'rb') |
---|
910 | n/a | bench = pickle.load(f) |
---|
911 | n/a | bench.name = show_bench |
---|
912 | n/a | f.close() |
---|
913 | n/a | bench.print_header() |
---|
914 | n/a | if compare_to: |
---|
915 | n/a | bench.print_comparison(compare_to, |
---|
916 | n/a | hidenoise=hidenoise, |
---|
917 | n/a | limitnames=limitnames) |
---|
918 | n/a | else: |
---|
919 | n/a | bench.print_benchmark(hidenoise=hidenoise, |
---|
920 | n/a | limitnames=limitnames) |
---|
921 | n/a | except IOError as reason: |
---|
922 | n/a | print('* Error opening/reading file %s: %s' % ( |
---|
923 | n/a | repr(show_bench), |
---|
924 | n/a | reason)) |
---|
925 | n/a | print() |
---|
926 | n/a | return |
---|
927 | n/a | |
---|
928 | n/a | if reportfile: |
---|
929 | n/a | print('Creating benchmark: %s (rounds=%i, warp=%i)' % \ |
---|
930 | n/a | (reportfile, rounds, warp)) |
---|
931 | n/a | print() |
---|
932 | n/a | |
---|
933 | n/a | # Create benchmark object |
---|
934 | n/a | bench = Benchmark(reportfile, |
---|
935 | n/a | verbose=verbose, |
---|
936 | n/a | timer=timer, |
---|
937 | n/a | warp=warp, |
---|
938 | n/a | calibration_runs=calibration_runs) |
---|
939 | n/a | bench.rounds = rounds |
---|
940 | n/a | bench.load_tests(Setup, limitnames=limitnames) |
---|
941 | n/a | try: |
---|
942 | n/a | bench.calibrate() |
---|
943 | n/a | bench.run() |
---|
944 | n/a | except KeyboardInterrupt: |
---|
945 | n/a | print() |
---|
946 | n/a | print('*** KeyboardInterrupt -- Aborting') |
---|
947 | n/a | print() |
---|
948 | n/a | return |
---|
949 | n/a | bench.print_header() |
---|
950 | n/a | if compare_to: |
---|
951 | n/a | bench.print_comparison(compare_to, |
---|
952 | n/a | hidenoise=hidenoise, |
---|
953 | n/a | limitnames=limitnames) |
---|
954 | n/a | else: |
---|
955 | n/a | bench.print_benchmark(hidenoise=hidenoise, |
---|
956 | n/a | limitnames=limitnames) |
---|
957 | n/a | |
---|
958 | n/a | # Ring bell |
---|
959 | n/a | sys.stderr.write('\007') |
---|
960 | n/a | |
---|
961 | n/a | if reportfile: |
---|
962 | n/a | try: |
---|
963 | n/a | f = open(reportfile,'wb') |
---|
964 | n/a | bench.name = reportfile |
---|
965 | n/a | pickle.dump(bench,f) |
---|
966 | n/a | f.close() |
---|
967 | n/a | except IOError as reason: |
---|
968 | n/a | print('* Error opening/writing reportfile %s: %s' % ( |
---|
969 | n/a | reportfile, |
---|
970 | n/a | reason)) |
---|
971 | n/a | print() |
---|
972 | n/a | |
---|
973 | n/a | if __name__ == '__main__': |
---|
974 | n/a | PyBenchCmdline() |
---|