ยปCore Development>Code coverage>Tools/pybench/pybench.py

Python code coverage for Tools/pybench/pybench.py

#countcontent
1n/a#!/usr/local/bin/python -O
2n/a
3n/a""" A Python Benchmark Suite
4n/a
5n/a"""
6n/a# Note: Please keep this module compatible to Python 2.6.
7n/a#
8n/a# Tests may include features in later Python versions, but these
9n/a# should then be embedded in try-except clauses in the configuration
10n/a# module Setup.py.
11n/a#
12n/a
13n/afrom __future__ import print_function
14n/a
15n/a# pybench Copyright
16n/a__copyright__ = """\
17n/aCopyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)
18n/aCopyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)
19n/a
20n/a All Rights Reserved.
21n/a
22n/aPermission to use, copy, modify, and distribute this software and its
23n/adocumentation for any purpose and without fee or royalty is hereby
24n/agranted, provided that the above copyright notice appear in all copies
25n/aand that both that copyright notice and this permission notice appear
26n/ain supporting documentation or portions thereof, including
27n/amodifications, that you make.
28n/a
29n/aTHE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
30n/aTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
31n/aFITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
32n/aINDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
33n/aFROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
34n/aNEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
35n/aWITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
36n/a"""
37n/a
38n/aimport sys
39n/aimport time
40n/aimport platform
41n/afrom CommandLine import *
42n/a
43n/atry:
44n/a import cPickle
45n/a pickle = cPickle
46n/aexcept ImportError:
47n/a import pickle
48n/a
49n/a# Version number; version history: see README file !
50n/a__version__ = '2.1'
51n/a
52n/a### Constants
53n/a
54n/a# Second fractions
55n/aMILLI_SECONDS = 1e3
56n/aMICRO_SECONDS = 1e6
57n/a
58n/a# Percent unit
59n/aPERCENT = 100
60n/a
61n/a# Horizontal line length
62n/aLINE = 79
63n/a
64n/a# Minimum test run-time
65n/aMIN_TEST_RUNTIME = 1e-3
66n/a
67n/a# Number of calibration runs to use for calibrating the tests
68n/aCALIBRATION_RUNS = 20
69n/a
70n/a# Number of calibration loops to run for each calibration run
71n/aCALIBRATION_LOOPS = 20
72n/a
73n/a# Allow skipping calibration ?
74n/aALLOW_SKIPPING_CALIBRATION = 1
75n/a
76n/a# Timer types
77n/aTIMER_TIME_TIME = 'time.time'
78n/aTIMER_TIME_PROCESS_TIME = 'time.process_time'
79n/aTIMER_TIME_PERF_COUNTER = 'time.perf_counter'
80n/aTIMER_TIME_CLOCK = 'time.clock'
81n/aTIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
82n/a
83n/a# Choose platform default timer
84n/aif hasattr(time, 'perf_counter'):
85n/a TIMER_PLATFORM_DEFAULT = TIMER_TIME_PERF_COUNTER
86n/aelif sys.platform[:3] == 'win':
87n/a # On WinXP this has 2.5ms resolution
88n/a TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
89n/aelse:
90n/a # On Linux this has 1ms resolution
91n/a TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
92n/a
93n/a# Print debug information ?
94n/a_debug = 0
95n/a
96n/a### Helpers
97n/a
98n/adef get_timer(timertype):
99n/a
100n/a if timertype == TIMER_TIME_TIME:
101n/a return time.time
102n/a elif timertype == TIMER_TIME_PROCESS_TIME:
103n/a return time.process_time
104n/a elif timertype == TIMER_TIME_PERF_COUNTER:
105n/a return time.perf_counter
106n/a elif timertype == TIMER_TIME_CLOCK:
107n/a return time.clock
108n/a elif timertype == TIMER_SYSTIMES_PROCESSTIME:
109n/a import systimes
110n/a return systimes.processtime
111n/a else:
112n/a raise TypeError('unknown timer type: %s' % timertype)
113n/a
114n/adef get_machine_details():
115n/a
116n/a if _debug:
117n/a print('Getting machine details...')
118n/a buildno, builddate = platform.python_build()
119n/a python = platform.python_version()
120n/a # XXX this is now always UCS4, maybe replace it with 'PEP393' in 3.3+?
121n/a if sys.maxunicode == 65535:
122n/a # UCS2 build (standard)
123n/a unitype = 'UCS2'
124n/a else:
125n/a # UCS4 build (most recent Linux distros)
126n/a unitype = 'UCS4'
127n/a bits, linkage = platform.architecture()
128n/a return {
129n/a 'platform': platform.platform(),
130n/a 'processor': platform.processor(),
131n/a 'executable': sys.executable,
132n/a 'implementation': getattr(platform, 'python_implementation',
133n/a lambda:'n/a')(),
134n/a 'python': platform.python_version(),
135n/a 'compiler': platform.python_compiler(),
136n/a 'buildno': buildno,
137n/a 'builddate': builddate,
138n/a 'unicode': unitype,
139n/a 'bits': bits,
140n/a }
141n/a
142n/adef print_machine_details(d, indent=''):
143n/a
144n/a l = ['Machine Details:',
145n/a ' Platform ID: %s' % d.get('platform', 'n/a'),
146n/a ' Processor: %s' % d.get('processor', 'n/a'),
147n/a '',
148n/a 'Python:',
149n/a ' Implementation: %s' % d.get('implementation', 'n/a'),
150n/a ' Executable: %s' % d.get('executable', 'n/a'),
151n/a ' Version: %s' % d.get('python', 'n/a'),
152n/a ' Compiler: %s' % d.get('compiler', 'n/a'),
153n/a ' Bits: %s' % d.get('bits', 'n/a'),
154n/a ' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
155n/a d.get('buildno', 'n/a')),
156n/a ' Unicode: %s' % d.get('unicode', 'n/a'),
157n/a ]
158n/a joiner = '\n' + indent
159n/a print(indent + joiner.join(l) + '\n')
160n/a
161n/a### Test baseclass
162n/a
163n/aclass Test:
164n/a
165n/a """ All test must have this class as baseclass. It provides
166n/a the necessary interface to the benchmark machinery.
167n/a
168n/a The tests must set .rounds to a value high enough to let the
169n/a test run between 20-50 seconds. This is needed because
170n/a clock()-timing only gives rather inaccurate values (on Linux,
171n/a for example, it is accurate to a few hundreths of a
172n/a second). If you don't want to wait that long, use a warp
173n/a factor larger than 1.
174n/a
175n/a It is also important to set the .operations variable to a
176n/a value representing the number of "virtual operations" done per
177n/a call of .run().
178n/a
179n/a If you change a test in some way, don't forget to increase
180n/a its version number.
181n/a
182n/a """
183n/a
184n/a ### Instance variables that each test should override
185n/a
186n/a # Version number of the test as float (x.yy); this is important
187n/a # for comparisons of benchmark runs - tests with unequal version
188n/a # number will not get compared.
189n/a version = 2.1
190n/a
191n/a # The number of abstract operations done in each round of the
192n/a # test. An operation is the basic unit of what you want to
193n/a # measure. The benchmark will output the amount of run-time per
194n/a # operation. Note that in order to raise the measured timings
195n/a # significantly above noise level, it is often required to repeat
196n/a # sets of operations more than once per test round. The measured
197n/a # overhead per test round should be less than 1 second.
198n/a operations = 1
199n/a
200n/a # Number of rounds to execute per test run. This should be
201n/a # adjusted to a figure that results in a test run-time of between
202n/a # 1-2 seconds.
203n/a rounds = 100000
204n/a
205n/a ### Internal variables
206n/a
207n/a # Mark this class as implementing a test
208n/a is_a_test = 1
209n/a
210n/a # Last timing: (real, run, overhead)
211n/a last_timing = (0.0, 0.0, 0.0)
212n/a
213n/a # Warp factor to use for this test
214n/a warp = 1
215n/a
216n/a # Number of calibration runs to use
217n/a calibration_runs = CALIBRATION_RUNS
218n/a
219n/a # List of calibration timings
220n/a overhead_times = None
221n/a
222n/a # List of test run timings
223n/a times = []
224n/a
225n/a # Timer used for the benchmark
226n/a timer = TIMER_PLATFORM_DEFAULT
227n/a
228n/a def __init__(self, warp=None, calibration_runs=None, timer=None):
229n/a
230n/a # Set parameters
231n/a if warp is not None:
232n/a self.rounds = int(self.rounds / warp)
233n/a if self.rounds == 0:
234n/a raise ValueError('warp factor set too high')
235n/a self.warp = warp
236n/a if calibration_runs is not None:
237n/a if (not ALLOW_SKIPPING_CALIBRATION and
238n/a calibration_runs < 1):
239n/a raise ValueError('at least one calibration run is required')
240n/a self.calibration_runs = calibration_runs
241n/a if timer is not None:
242n/a self.timer = timer
243n/a
244n/a # Init variables
245n/a self.times = []
246n/a self.overhead_times = []
247n/a
248n/a # We want these to be in the instance dict, so that pickle
249n/a # saves them
250n/a self.version = self.version
251n/a self.operations = self.operations
252n/a self.rounds = self.rounds
253n/a
254n/a def get_timer(self):
255n/a
256n/a """ Return the timer function to use for the test.
257n/a
258n/a """
259n/a return get_timer(self.timer)
260n/a
261n/a def compatible(self, other):
262n/a
263n/a """ Return 1/0 depending on whether the test is compatible
264n/a with the other Test instance or not.
265n/a
266n/a """
267n/a if self.version != other.version:
268n/a return 0
269n/a if self.rounds != other.rounds:
270n/a return 0
271n/a return 1
272n/a
273n/a def calibrate_test(self):
274n/a
275n/a if self.calibration_runs == 0:
276n/a self.overhead_times = [0.0]
277n/a return
278n/a
279n/a calibrate = self.calibrate
280n/a timer = self.get_timer()
281n/a calibration_loops = range(CALIBRATION_LOOPS)
282n/a
283n/a # Time the calibration loop overhead
284n/a prep_times = []
285n/a for i in range(self.calibration_runs):
286n/a t = timer()
287n/a for i in calibration_loops:
288n/a pass
289n/a t = timer() - t
290n/a prep_times.append(t / CALIBRATION_LOOPS)
291n/a min_prep_time = min(prep_times)
292n/a if _debug:
293n/a print()
294n/a print('Calib. prep time = %.6fms' % (
295n/a min_prep_time * MILLI_SECONDS))
296n/a
297n/a # Time the calibration runs (doing CALIBRATION_LOOPS loops of
298n/a # .calibrate() method calls each)
299n/a for i in range(self.calibration_runs):
300n/a t = timer()
301n/a for i in calibration_loops:
302n/a calibrate()
303n/a t = timer() - t
304n/a self.overhead_times.append(t / CALIBRATION_LOOPS
305n/a - min_prep_time)
306n/a
307n/a # Check the measured times
308n/a min_overhead = min(self.overhead_times)
309n/a max_overhead = max(self.overhead_times)
310n/a if _debug:
311n/a print('Calib. overhead time = %.6fms' % (
312n/a min_overhead * MILLI_SECONDS))
313n/a if min_overhead < 0.0:
314n/a raise ValueError('calibration setup did not work')
315n/a if max_overhead - min_overhead > 0.1:
316n/a raise ValueError(
317n/a 'overhead calibration timing range too inaccurate: '
318n/a '%r - %r' % (min_overhead, max_overhead))
319n/a
320n/a def run(self):
321n/a
322n/a """ Run the test in two phases: first calibrate, then
323n/a do the actual test. Be careful to keep the calibration
324n/a timing low w/r to the test timing.
325n/a
326n/a """
327n/a test = self.test
328n/a timer = self.get_timer()
329n/a
330n/a # Get calibration
331n/a min_overhead = min(self.overhead_times)
332n/a
333n/a # Test run
334n/a t = timer()
335n/a test()
336n/a t = timer() - t
337n/a if t < MIN_TEST_RUNTIME:
338n/a raise ValueError('warp factor too high: '
339n/a 'test times are < 10ms')
340n/a eff_time = t - min_overhead
341n/a if eff_time < 0:
342n/a raise ValueError('wrong calibration')
343n/a self.last_timing = (eff_time, t, min_overhead)
344n/a self.times.append(eff_time)
345n/a
346n/a def calibrate(self):
347n/a
348n/a """ Calibrate the test.
349n/a
350n/a This method should execute everything that is needed to
351n/a setup and run the test - except for the actual operations
352n/a that you intend to measure. pybench uses this method to
353n/a measure the test implementation overhead.
354n/a
355n/a """
356n/a return
357n/a
358n/a def test(self):
359n/a
360n/a """ Run the test.
361n/a
362n/a The test needs to run self.rounds executing
363n/a self.operations number of operations each.
364n/a
365n/a """
366n/a return
367n/a
368n/a def stat(self):
369n/a
370n/a """ Return test run statistics as tuple:
371n/a
372n/a (minimum run time,
373n/a average run time,
374n/a total run time,
375n/a average time per operation,
376n/a minimum overhead time)
377n/a
378n/a """
379n/a runs = len(self.times)
380n/a if runs == 0:
381n/a return 0.0, 0.0, 0.0, 0.0
382n/a min_time = min(self.times)
383n/a total_time = sum(self.times)
384n/a avg_time = total_time / float(runs)
385n/a operation_avg = total_time / float(runs
386n/a * self.rounds
387n/a * self.operations)
388n/a if self.overhead_times:
389n/a min_overhead = min(self.overhead_times)
390n/a else:
391n/a min_overhead = self.last_timing[2]
392n/a return min_time, avg_time, total_time, operation_avg, min_overhead
393n/a
394n/a### Load Setup
395n/a
396n/a# This has to be done after the definition of the Test class, since
397n/a# the Setup module will import subclasses using this class.
398n/a
399n/aimport Setup
400n/a
401n/a### Benchmark base class
402n/a
403n/aclass Benchmark:
404n/a
405n/a # Name of the benchmark
406n/a name = ''
407n/a
408n/a # Number of benchmark rounds to run
409n/a rounds = 1
410n/a
411n/a # Warp factor use to run the tests
412n/a warp = 1 # Warp factor
413n/a
414n/a # Average benchmark round time
415n/a roundtime = 0
416n/a
417n/a # Benchmark version number as float x.yy
418n/a version = 2.1
419n/a
420n/a # Produce verbose output ?
421n/a verbose = 0
422n/a
423n/a # Dictionary with the machine details
424n/a machine_details = None
425n/a
426n/a # Timer used for the benchmark
427n/a timer = TIMER_PLATFORM_DEFAULT
428n/a
429n/a def __init__(self, name, verbose=None, timer=None, warp=None,
430n/a calibration_runs=None):
431n/a
432n/a if name:
433n/a self.name = name
434n/a else:
435n/a self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
436n/a (time.localtime(time.time())[:6])
437n/a if verbose is not None:
438n/a self.verbose = verbose
439n/a if timer is not None:
440n/a self.timer = timer
441n/a if warp is not None:
442n/a self.warp = warp
443n/a if calibration_runs is not None:
444n/a self.calibration_runs = calibration_runs
445n/a
446n/a # Init vars
447n/a self.tests = {}
448n/a if _debug:
449n/a print('Getting machine details...')
450n/a self.machine_details = get_machine_details()
451n/a
452n/a # Make .version an instance attribute to have it saved in the
453n/a # Benchmark pickle
454n/a self.version = self.version
455n/a
456n/a def get_timer(self):
457n/a
458n/a """ Return the timer function to use for the test.
459n/a
460n/a """
461n/a return get_timer(self.timer)
462n/a
463n/a def compatible(self, other):
464n/a
465n/a """ Return 1/0 depending on whether the benchmark is
466n/a compatible with the other Benchmark instance or not.
467n/a
468n/a """
469n/a if self.version != other.version:
470n/a return 0
471n/a if (self.machine_details == other.machine_details and
472n/a self.timer != other.timer):
473n/a return 0
474n/a if (self.calibration_runs == 0 and
475n/a other.calibration_runs != 0):
476n/a return 0
477n/a if (self.calibration_runs != 0 and
478n/a other.calibration_runs == 0):
479n/a return 0
480n/a return 1
481n/a
482n/a def load_tests(self, setupmod, limitnames=None):
483n/a
484n/a # Add tests
485n/a if self.verbose:
486n/a print('Searching for tests ...')
487n/a print('--------------------------------------')
488n/a for testclass in setupmod.__dict__.values():
489n/a if not hasattr(testclass, 'is_a_test'):
490n/a continue
491n/a name = testclass.__name__
492n/a if name == 'Test':
493n/a continue
494n/a if (limitnames is not None and
495n/a limitnames.search(name) is None):
496n/a continue
497n/a self.tests[name] = testclass(
498n/a warp=self.warp,
499n/a calibration_runs=self.calibration_runs,
500n/a timer=self.timer)
501n/a l = sorted(self.tests)
502n/a if self.verbose:
503n/a for name in l:
504n/a print(' %s' % name)
505n/a print('--------------------------------------')
506n/a print(' %i tests found' % len(l))
507n/a print()
508n/a
509n/a def calibrate(self):
510n/a
511n/a print('Calibrating tests. Please wait...', end=' ')
512n/a sys.stdout.flush()
513n/a if self.verbose:
514n/a print()
515n/a print()
516n/a print('Test min max')
517n/a print('-' * LINE)
518n/a tests = sorted(self.tests.items())
519n/a for i in range(len(tests)):
520n/a name, test = tests[i]
521n/a test.calibrate_test()
522n/a if self.verbose:
523n/a print('%30s: %6.3fms %6.3fms' % \
524n/a (name,
525n/a min(test.overhead_times) * MILLI_SECONDS,
526n/a max(test.overhead_times) * MILLI_SECONDS))
527n/a if self.verbose:
528n/a print()
529n/a print('Done with the calibration.')
530n/a else:
531n/a print('done.')
532n/a print()
533n/a
534n/a def run(self):
535n/a
536n/a tests = sorted(self.tests.items())
537n/a timer = self.get_timer()
538n/a print('Running %i round(s) of the suite at warp factor %i:' % \
539n/a (self.rounds, self.warp))
540n/a print()
541n/a self.roundtimes = []
542n/a for i in range(self.rounds):
543n/a if self.verbose:
544n/a print(' Round %-25i effective absolute overhead' % (i+1))
545n/a total_eff_time = 0.0
546n/a for j in range(len(tests)):
547n/a name, test = tests[j]
548n/a if self.verbose:
549n/a print('%30s:' % name, end=' ')
550n/a test.run()
551n/a (eff_time, abs_time, min_overhead) = test.last_timing
552n/a total_eff_time = total_eff_time + eff_time
553n/a if self.verbose:
554n/a print(' %5.0fms %5.0fms %7.3fms' % \
555n/a (eff_time * MILLI_SECONDS,
556n/a abs_time * MILLI_SECONDS,
557n/a min_overhead * MILLI_SECONDS))
558n/a self.roundtimes.append(total_eff_time)
559n/a if self.verbose:
560n/a print(' '
561n/a ' ------------------------------')
562n/a print(' '
563n/a ' Totals: %6.0fms' %
564n/a (total_eff_time * MILLI_SECONDS))
565n/a print()
566n/a else:
567n/a print('* Round %i done in %.3f seconds.' % (i+1,
568n/a total_eff_time))
569n/a print()
570n/a
571n/a def stat(self):
572n/a
573n/a """ Return benchmark run statistics as tuple:
574n/a
575n/a (minimum round time,
576n/a average round time,
577n/a maximum round time)
578n/a
579n/a XXX Currently not used, since the benchmark does test
580n/a statistics across all rounds.
581n/a
582n/a """
583n/a runs = len(self.roundtimes)
584n/a if runs == 0:
585n/a return 0.0, 0.0
586n/a min_time = min(self.roundtimes)
587n/a total_time = sum(self.roundtimes)
588n/a avg_time = total_time / float(runs)
589n/a max_time = max(self.roundtimes)
590n/a return (min_time, avg_time, max_time)
591n/a
592n/a def print_header(self, title='Benchmark'):
593n/a
594n/a print('-' * LINE)
595n/a print('%s: %s' % (title, self.name))
596n/a print('-' * LINE)
597n/a print()
598n/a print(' Rounds: %s' % self.rounds)
599n/a print(' Warp: %s' % self.warp)
600n/a print(' Timer: %s' % self.timer)
601n/a print()
602n/a if self.machine_details:
603n/a print_machine_details(self.machine_details, indent=' ')
604n/a print()
605n/a
606n/a def print_benchmark(self, hidenoise=0, limitnames=None):
607n/a
608n/a print('Test '
609n/a ' minimum average operation overhead')
610n/a print('-' * LINE)
611n/a tests = sorted(self.tests.items())
612n/a total_min_time = 0.0
613n/a total_avg_time = 0.0
614n/a for name, test in tests:
615n/a if (limitnames is not None and
616n/a limitnames.search(name) is None):
617n/a continue
618n/a (min_time,
619n/a avg_time,
620n/a total_time,
621n/a op_avg,
622n/a min_overhead) = test.stat()
623n/a total_min_time = total_min_time + min_time
624n/a total_avg_time = total_avg_time + avg_time
625n/a print('%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
626n/a (name,
627n/a min_time * MILLI_SECONDS,
628n/a avg_time * MILLI_SECONDS,
629n/a op_avg * MICRO_SECONDS,
630n/a min_overhead *MILLI_SECONDS))
631n/a print('-' * LINE)
632n/a print('Totals: '
633n/a ' %6.0fms %6.0fms' %
634n/a (total_min_time * MILLI_SECONDS,
635n/a total_avg_time * MILLI_SECONDS,
636n/a ))
637n/a print()
638n/a
639n/a def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
640n/a
641n/a # Check benchmark versions
642n/a if compare_to.version != self.version:
643n/a print('* Benchmark versions differ: '
644n/a 'cannot compare this benchmark to "%s" !' %
645n/a compare_to.name)
646n/a print()
647n/a self.print_benchmark(hidenoise=hidenoise,
648n/a limitnames=limitnames)
649n/a return
650n/a
651n/a # Print header
652n/a compare_to.print_header('Comparing with')
653n/a print('Test '
654n/a ' minimum run-time average run-time')
655n/a print(' '
656n/a ' this other diff this other diff')
657n/a print('-' * LINE)
658n/a
659n/a # Print test comparisons
660n/a tests = sorted(self.tests.items())
661n/a total_min_time = other_total_min_time = 0.0
662n/a total_avg_time = other_total_avg_time = 0.0
663n/a benchmarks_compatible = self.compatible(compare_to)
664n/a tests_compatible = 1
665n/a for name, test in tests:
666n/a if (limitnames is not None and
667n/a limitnames.search(name) is None):
668n/a continue
669n/a (min_time,
670n/a avg_time,
671n/a total_time,
672n/a op_avg,
673n/a min_overhead) = test.stat()
674n/a total_min_time = total_min_time + min_time
675n/a total_avg_time = total_avg_time + avg_time
676n/a try:
677n/a other = compare_to.tests[name]
678n/a except KeyError:
679n/a other = None
680n/a if other is None:
681n/a # Other benchmark doesn't include the given test
682n/a min_diff, avg_diff = 'n/a', 'n/a'
683n/a other_min_time = 0.0
684n/a other_avg_time = 0.0
685n/a tests_compatible = 0
686n/a else:
687n/a (other_min_time,
688n/a other_avg_time,
689n/a other_total_time,
690n/a other_op_avg,
691n/a other_min_overhead) = other.stat()
692n/a other_total_min_time = other_total_min_time + other_min_time
693n/a other_total_avg_time = other_total_avg_time + other_avg_time
694n/a if (benchmarks_compatible and
695n/a test.compatible(other)):
696n/a # Both benchmark and tests are comparable
697n/a min_diff = ((min_time * self.warp) /
698n/a (other_min_time * other.warp) - 1.0)
699n/a avg_diff = ((avg_time * self.warp) /
700n/a (other_avg_time * other.warp) - 1.0)
701n/a if hidenoise and abs(min_diff) < 10.0:
702n/a min_diff = ''
703n/a else:
704n/a min_diff = '%+5.1f%%' % (min_diff * PERCENT)
705n/a if hidenoise and abs(avg_diff) < 10.0:
706n/a avg_diff = ''
707n/a else:
708n/a avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
709n/a else:
710n/a # Benchmark or tests are not comparable
711n/a min_diff, avg_diff = 'n/a', 'n/a'
712n/a tests_compatible = 0
713n/a print('%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
714n/a (name,
715n/a min_time * MILLI_SECONDS,
716n/a other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
717n/a min_diff,
718n/a avg_time * MILLI_SECONDS,
719n/a other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
720n/a avg_diff))
721n/a print('-' * LINE)
722n/a
723n/a # Summarise test results
724n/a if not benchmarks_compatible or not tests_compatible:
725n/a min_diff, avg_diff = 'n/a', 'n/a'
726n/a else:
727n/a if other_total_min_time != 0.0:
728n/a min_diff = '%+5.1f%%' % (
729n/a ((total_min_time * self.warp) /
730n/a (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
731n/a else:
732n/a min_diff = 'n/a'
733n/a if other_total_avg_time != 0.0:
734n/a avg_diff = '%+5.1f%%' % (
735n/a ((total_avg_time * self.warp) /
736n/a (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
737n/a else:
738n/a avg_diff = 'n/a'
739n/a print('Totals: '
740n/a ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
741n/a (total_min_time * MILLI_SECONDS,
742n/a (other_total_min_time * compare_to.warp/self.warp
743n/a * MILLI_SECONDS),
744n/a min_diff,
745n/a total_avg_time * MILLI_SECONDS,
746n/a (other_total_avg_time * compare_to.warp/self.warp
747n/a * MILLI_SECONDS),
748n/a avg_diff
749n/a ))
750n/a print()
751n/a print('(this=%s, other=%s)' % (self.name,
752n/a compare_to.name))
753n/a print()
754n/a
755n/aclass PyBenchCmdline(Application):
756n/a
757n/a header = ("PYBENCH - a benchmark test suite for Python "
758n/a "interpreters/compilers.")
759n/a
760n/a version = __version__
761n/a
762n/a debug = _debug
763n/a
764n/a options = [ArgumentOption('-n',
765n/a 'number of rounds',
766n/a Setup.Number_of_rounds),
767n/a ArgumentOption('-f',
768n/a 'save benchmark to file arg',
769n/a ''),
770n/a ArgumentOption('-c',
771n/a 'compare benchmark with the one in file arg',
772n/a ''),
773n/a ArgumentOption('-s',
774n/a 'show benchmark in file arg, then exit',
775n/a ''),
776n/a ArgumentOption('-w',
777n/a 'set warp factor to arg',
778n/a Setup.Warp_factor),
779n/a ArgumentOption('-t',
780n/a 'run only tests with names matching arg',
781n/a ''),
782n/a ArgumentOption('-C',
783n/a 'set the number of calibration runs to arg',
784n/a CALIBRATION_RUNS),
785n/a SwitchOption('-d',
786n/a 'hide noise in comparisons',
787n/a 0),
788n/a SwitchOption('-v',
789n/a 'verbose output (not recommended)',
790n/a 0),
791n/a SwitchOption('--with-gc',
792n/a 'enable garbage collection',
793n/a 0),
794n/a SwitchOption('--with-syscheck',
795n/a 'use default sys check interval',
796n/a 0),
797n/a ArgumentOption('--timer',
798n/a 'use given timer',
799n/a TIMER_PLATFORM_DEFAULT),
800n/a ]
801n/a
802n/a about = """\
803n/aThe normal operation is to run the suite and display the
804n/aresults. Use -f to save them for later reuse or comparisons.
805n/a
806n/aAvailable timers:
807n/a
808n/a time.time
809n/a time.clock
810n/a systimes.processtime
811n/a
812n/aExamples:
813n/a
814n/apython2.1 pybench.py -f p21.pybench
815n/apython2.5 pybench.py -f p25.pybench
816n/apython pybench.py -s p25.pybench -c p21.pybench
817n/a"""
818n/a copyright = __copyright__
819n/a
820n/a def main(self):
821n/a
822n/a rounds = self.values['-n']
823n/a reportfile = self.values['-f']
824n/a show_bench = self.values['-s']
825n/a compare_to = self.values['-c']
826n/a hidenoise = self.values['-d']
827n/a warp = int(self.values['-w'])
828n/a withgc = self.values['--with-gc']
829n/a limitnames = self.values['-t']
830n/a if limitnames:
831n/a if _debug:
832n/a print('* limiting test names to one with substring "%s"' % \
833n/a limitnames)
834n/a limitnames = re.compile(limitnames, re.I)
835n/a else:
836n/a limitnames = None
837n/a verbose = self.verbose
838n/a withsyscheck = self.values['--with-syscheck']
839n/a calibration_runs = self.values['-C']
840n/a timer = self.values['--timer']
841n/a
842n/a print('-' * LINE)
843n/a print('PYBENCH %s' % __version__)
844n/a print('-' * LINE)
845n/a print('* using %s %s' % (
846n/a getattr(platform, 'python_implementation', lambda:'Python')(),
847n/a ' '.join(sys.version.split())))
848n/a
849n/a # Switch off garbage collection
850n/a if not withgc:
851n/a try:
852n/a import gc
853n/a except ImportError:
854n/a print('* Python version doesn\'t support garbage collection')
855n/a else:
856n/a try:
857n/a gc.disable()
858n/a except NotImplementedError:
859n/a print('* Python version doesn\'t support gc.disable')
860n/a else:
861n/a print('* disabled garbage collection')
862n/a
863n/a # "Disable" sys check interval
864n/a if not withsyscheck:
865n/a # Too bad the check interval uses an int instead of a long...
866n/a value = 2147483647
867n/a try:
868n/a sys.setcheckinterval(value)
869n/a except (AttributeError, NotImplementedError):
870n/a print('* Python version doesn\'t support sys.setcheckinterval')
871n/a else:
872n/a print('* system check interval set to maximum: %s' % value)
873n/a
874n/a if timer == TIMER_SYSTIMES_PROCESSTIME:
875n/a import systimes
876n/a print('* using timer: systimes.processtime (%s)' % \
877n/a systimes.SYSTIMES_IMPLEMENTATION)
878n/a else:
879n/a # Check that the clock function does exist
880n/a try:
881n/a get_timer(timer)
882n/a except TypeError:
883n/a print("* Error: Unknown timer: %s" % timer)
884n/a return
885n/a
886n/a print('* using timer: %s' % timer)
887n/a if hasattr(time, 'get_clock_info'):
888n/a info = time.get_clock_info(timer[5:])
889n/a print('* timer: resolution=%s, implementation=%s'
890n/a % (info.resolution, info.implementation))
891n/a
892n/a print()
893n/a
894n/a if compare_to:
895n/a try:
896n/a f = open(compare_to,'rb')
897n/a bench = pickle.load(f)
898n/a bench.name = compare_to
899n/a f.close()
900n/a compare_to = bench
901n/a except IOError as reason:
902n/a print('* Error opening/reading file %s: %s' % (
903n/a repr(compare_to),
904n/a reason))
905n/a compare_to = None
906n/a
907n/a if show_bench:
908n/a try:
909n/a f = open(show_bench,'rb')
910n/a bench = pickle.load(f)
911n/a bench.name = show_bench
912n/a f.close()
913n/a bench.print_header()
914n/a if compare_to:
915n/a bench.print_comparison(compare_to,
916n/a hidenoise=hidenoise,
917n/a limitnames=limitnames)
918n/a else:
919n/a bench.print_benchmark(hidenoise=hidenoise,
920n/a limitnames=limitnames)
921n/a except IOError as reason:
922n/a print('* Error opening/reading file %s: %s' % (
923n/a repr(show_bench),
924n/a reason))
925n/a print()
926n/a return
927n/a
928n/a if reportfile:
929n/a print('Creating benchmark: %s (rounds=%i, warp=%i)' % \
930n/a (reportfile, rounds, warp))
931n/a print()
932n/a
933n/a # Create benchmark object
934n/a bench = Benchmark(reportfile,
935n/a verbose=verbose,
936n/a timer=timer,
937n/a warp=warp,
938n/a calibration_runs=calibration_runs)
939n/a bench.rounds = rounds
940n/a bench.load_tests(Setup, limitnames=limitnames)
941n/a try:
942n/a bench.calibrate()
943n/a bench.run()
944n/a except KeyboardInterrupt:
945n/a print()
946n/a print('*** KeyboardInterrupt -- Aborting')
947n/a print()
948n/a return
949n/a bench.print_header()
950n/a if compare_to:
951n/a bench.print_comparison(compare_to,
952n/a hidenoise=hidenoise,
953n/a limitnames=limitnames)
954n/a else:
955n/a bench.print_benchmark(hidenoise=hidenoise,
956n/a limitnames=limitnames)
957n/a
958n/a # Ring bell
959n/a sys.stderr.write('\007')
960n/a
961n/a if reportfile:
962n/a try:
963n/a f = open(reportfile,'wb')
964n/a bench.name = reportfile
965n/a pickle.dump(bench,f)
966n/a f.close()
967n/a except IOError as reason:
968n/a print('* Error opening/writing reportfile %s: %s' % (
969n/a reportfile,
970n/a reason))
971n/a print()
972n/a
973n/aif __name__ == '__main__':
974n/a PyBenchCmdline()