1 | n/a | """Benchmark some basic import use-cases. |
---|
2 | n/a | |
---|
3 | n/a | The assumption is made that this benchmark is run in a fresh interpreter and |
---|
4 | n/a | thus has no external changes made to import-related attributes in sys. |
---|
5 | n/a | |
---|
6 | n/a | """ |
---|
7 | n/a | from test.test_importlib import util |
---|
8 | n/a | import decimal |
---|
9 | n/a | import imp |
---|
10 | n/a | import importlib |
---|
11 | n/a | import importlib.machinery |
---|
12 | n/a | import json |
---|
13 | n/a | import os |
---|
14 | n/a | import py_compile |
---|
15 | n/a | import sys |
---|
16 | n/a | import tabnanny |
---|
17 | n/a | import timeit |
---|
18 | n/a | |
---|
19 | n/a | |
---|
20 | n/a | def bench(name, cleanup=lambda: None, *, seconds=1, repeat=3): |
---|
21 | n/a | """Bench the given statement as many times as necessary until total |
---|
22 | n/a | executions take one second.""" |
---|
23 | n/a | stmt = "__import__({!r})".format(name) |
---|
24 | n/a | timer = timeit.Timer(stmt) |
---|
25 | n/a | for x in range(repeat): |
---|
26 | n/a | total_time = 0 |
---|
27 | n/a | count = 0 |
---|
28 | n/a | while total_time < seconds: |
---|
29 | n/a | try: |
---|
30 | n/a | total_time += timer.timeit(1) |
---|
31 | n/a | finally: |
---|
32 | n/a | cleanup() |
---|
33 | n/a | count += 1 |
---|
34 | n/a | else: |
---|
35 | n/a | # One execution too far |
---|
36 | n/a | if total_time > seconds: |
---|
37 | n/a | count -= 1 |
---|
38 | n/a | yield count // seconds |
---|
39 | n/a | |
---|
40 | n/a | def from_cache(seconds, repeat): |
---|
41 | n/a | """sys.modules""" |
---|
42 | n/a | name = '<benchmark import>' |
---|
43 | n/a | module = imp.new_module(name) |
---|
44 | n/a | module.__file__ = '<test>' |
---|
45 | n/a | module.__package__ = '' |
---|
46 | n/a | with util.uncache(name): |
---|
47 | n/a | sys.modules[name] = module |
---|
48 | n/a | yield from bench(name, repeat=repeat, seconds=seconds) |
---|
49 | n/a | |
---|
50 | n/a | |
---|
51 | n/a | def builtin_mod(seconds, repeat): |
---|
52 | n/a | """Built-in module""" |
---|
53 | n/a | name = 'errno' |
---|
54 | n/a | if name in sys.modules: |
---|
55 | n/a | del sys.modules[name] |
---|
56 | n/a | # Relying on built-in importer being implicit. |
---|
57 | n/a | yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat, |
---|
58 | n/a | seconds=seconds) |
---|
59 | n/a | |
---|
60 | n/a | |
---|
61 | n/a | def source_wo_bytecode(seconds, repeat): |
---|
62 | n/a | """Source w/o bytecode: small""" |
---|
63 | n/a | sys.dont_write_bytecode = True |
---|
64 | n/a | try: |
---|
65 | n/a | name = '__importlib_test_benchmark__' |
---|
66 | n/a | # Clears out sys.modules and puts an entry at the front of sys.path. |
---|
67 | n/a | with util.create_modules(name) as mapping: |
---|
68 | n/a | assert not os.path.exists(imp.cache_from_source(mapping[name])) |
---|
69 | n/a | sys.meta_path.append(importlib.machinery.PathFinder) |
---|
70 | n/a | loader = (importlib.machinery.SourceFileLoader, |
---|
71 | n/a | importlib.machinery.SOURCE_SUFFIXES) |
---|
72 | n/a | sys.path_hooks.append(importlib.machinery.FileFinder.path_hook(loader)) |
---|
73 | n/a | yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat, |
---|
74 | n/a | seconds=seconds) |
---|
75 | n/a | finally: |
---|
76 | n/a | sys.dont_write_bytecode = False |
---|
77 | n/a | |
---|
78 | n/a | |
---|
79 | n/a | def _wo_bytecode(module): |
---|
80 | n/a | name = module.__name__ |
---|
81 | n/a | def benchmark_wo_bytecode(seconds, repeat): |
---|
82 | n/a | """Source w/o bytecode: {}""" |
---|
83 | n/a | bytecode_path = imp.cache_from_source(module.__file__) |
---|
84 | n/a | if os.path.exists(bytecode_path): |
---|
85 | n/a | os.unlink(bytecode_path) |
---|
86 | n/a | sys.dont_write_bytecode = True |
---|
87 | n/a | try: |
---|
88 | n/a | yield from bench(name, lambda: sys.modules.pop(name), |
---|
89 | n/a | repeat=repeat, seconds=seconds) |
---|
90 | n/a | finally: |
---|
91 | n/a | sys.dont_write_bytecode = False |
---|
92 | n/a | |
---|
93 | n/a | benchmark_wo_bytecode.__doc__ = benchmark_wo_bytecode.__doc__.format(name) |
---|
94 | n/a | return benchmark_wo_bytecode |
---|
95 | n/a | |
---|
96 | n/a | tabnanny_wo_bytecode = _wo_bytecode(tabnanny) |
---|
97 | n/a | decimal_wo_bytecode = _wo_bytecode(decimal) |
---|
98 | n/a | |
---|
99 | n/a | |
---|
100 | n/a | def source_writing_bytecode(seconds, repeat): |
---|
101 | n/a | """Source writing bytecode: small""" |
---|
102 | n/a | assert not sys.dont_write_bytecode |
---|
103 | n/a | name = '__importlib_test_benchmark__' |
---|
104 | n/a | with util.create_modules(name) as mapping: |
---|
105 | n/a | sys.meta_path.append(importlib.machinery.PathFinder) |
---|
106 | n/a | loader = (importlib.machinery.SourceFileLoader, |
---|
107 | n/a | importlib.machinery.SOURCE_SUFFIXES) |
---|
108 | n/a | sys.path_hooks.append(importlib.machinery.FileFinder.path_hook(loader)) |
---|
109 | n/a | def cleanup(): |
---|
110 | n/a | sys.modules.pop(name) |
---|
111 | n/a | os.unlink(imp.cache_from_source(mapping[name])) |
---|
112 | n/a | for result in bench(name, cleanup, repeat=repeat, seconds=seconds): |
---|
113 | n/a | assert not os.path.exists(imp.cache_from_source(mapping[name])) |
---|
114 | n/a | yield result |
---|
115 | n/a | |
---|
116 | n/a | |
---|
117 | n/a | def _writing_bytecode(module): |
---|
118 | n/a | name = module.__name__ |
---|
119 | n/a | def writing_bytecode_benchmark(seconds, repeat): |
---|
120 | n/a | """Source writing bytecode: {}""" |
---|
121 | n/a | assert not sys.dont_write_bytecode |
---|
122 | n/a | def cleanup(): |
---|
123 | n/a | sys.modules.pop(name) |
---|
124 | n/a | os.unlink(imp.cache_from_source(module.__file__)) |
---|
125 | n/a | yield from bench(name, cleanup, repeat=repeat, seconds=seconds) |
---|
126 | n/a | |
---|
127 | n/a | writing_bytecode_benchmark.__doc__ = ( |
---|
128 | n/a | writing_bytecode_benchmark.__doc__.format(name)) |
---|
129 | n/a | return writing_bytecode_benchmark |
---|
130 | n/a | |
---|
131 | n/a | tabnanny_writing_bytecode = _writing_bytecode(tabnanny) |
---|
132 | n/a | decimal_writing_bytecode = _writing_bytecode(decimal) |
---|
133 | n/a | |
---|
134 | n/a | |
---|
135 | n/a | def source_using_bytecode(seconds, repeat): |
---|
136 | n/a | """Source w/ bytecode: small""" |
---|
137 | n/a | name = '__importlib_test_benchmark__' |
---|
138 | n/a | with util.create_modules(name) as mapping: |
---|
139 | n/a | sys.meta_path.append(importlib.machinery.PathFinder) |
---|
140 | n/a | loader = (importlib.machinery.SourceFileLoader, |
---|
141 | n/a | importlib.machinery.SOURCE_SUFFIXES) |
---|
142 | n/a | sys.path_hooks.append(importlib.machinery.FileFinder.path_hook(loader)) |
---|
143 | n/a | py_compile.compile(mapping[name]) |
---|
144 | n/a | assert os.path.exists(imp.cache_from_source(mapping[name])) |
---|
145 | n/a | yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat, |
---|
146 | n/a | seconds=seconds) |
---|
147 | n/a | |
---|
148 | n/a | |
---|
149 | n/a | def _using_bytecode(module): |
---|
150 | n/a | name = module.__name__ |
---|
151 | n/a | def using_bytecode_benchmark(seconds, repeat): |
---|
152 | n/a | """Source w/ bytecode: {}""" |
---|
153 | n/a | py_compile.compile(module.__file__) |
---|
154 | n/a | yield from bench(name, lambda: sys.modules.pop(name), repeat=repeat, |
---|
155 | n/a | seconds=seconds) |
---|
156 | n/a | |
---|
157 | n/a | using_bytecode_benchmark.__doc__ = ( |
---|
158 | n/a | using_bytecode_benchmark.__doc__.format(name)) |
---|
159 | n/a | return using_bytecode_benchmark |
---|
160 | n/a | |
---|
161 | n/a | tabnanny_using_bytecode = _using_bytecode(tabnanny) |
---|
162 | n/a | decimal_using_bytecode = _using_bytecode(decimal) |
---|
163 | n/a | |
---|
164 | n/a | |
---|
165 | n/a | def main(import_, options): |
---|
166 | n/a | if options.source_file: |
---|
167 | n/a | with options.source_file: |
---|
168 | n/a | prev_results = json.load(options.source_file) |
---|
169 | n/a | else: |
---|
170 | n/a | prev_results = {} |
---|
171 | n/a | __builtins__.__import__ = import_ |
---|
172 | n/a | benchmarks = (from_cache, builtin_mod, |
---|
173 | n/a | source_writing_bytecode, |
---|
174 | n/a | source_wo_bytecode, source_using_bytecode, |
---|
175 | n/a | tabnanny_writing_bytecode, |
---|
176 | n/a | tabnanny_wo_bytecode, tabnanny_using_bytecode, |
---|
177 | n/a | decimal_writing_bytecode, |
---|
178 | n/a | decimal_wo_bytecode, decimal_using_bytecode, |
---|
179 | n/a | ) |
---|
180 | n/a | if options.benchmark: |
---|
181 | n/a | for b in benchmarks: |
---|
182 | n/a | if b.__doc__ == options.benchmark: |
---|
183 | n/a | benchmarks = [b] |
---|
184 | n/a | break |
---|
185 | n/a | else: |
---|
186 | n/a | print('Unknown benchmark: {!r}'.format(options.benchmark, |
---|
187 | n/a | file=sys.stderr)) |
---|
188 | n/a | sys.exit(1) |
---|
189 | n/a | seconds = 1 |
---|
190 | n/a | seconds_plural = 's' if seconds > 1 else '' |
---|
191 | n/a | repeat = 3 |
---|
192 | n/a | header = ('Measuring imports/second over {} second{}, best out of {}\n' |
---|
193 | n/a | 'Entire benchmark run should take about {} seconds\n' |
---|
194 | n/a | 'Using {!r} as __import__\n') |
---|
195 | n/a | print(header.format(seconds, seconds_plural, repeat, |
---|
196 | n/a | len(benchmarks) * seconds * repeat, __import__)) |
---|
197 | n/a | new_results = {} |
---|
198 | n/a | for benchmark in benchmarks: |
---|
199 | n/a | print(benchmark.__doc__, "[", end=' ') |
---|
200 | n/a | sys.stdout.flush() |
---|
201 | n/a | results = [] |
---|
202 | n/a | for result in benchmark(seconds=seconds, repeat=repeat): |
---|
203 | n/a | results.append(result) |
---|
204 | n/a | print(result, end=' ') |
---|
205 | n/a | sys.stdout.flush() |
---|
206 | n/a | assert not sys.dont_write_bytecode |
---|
207 | n/a | print("]", "best is", format(max(results), ',d')) |
---|
208 | n/a | new_results[benchmark.__doc__] = results |
---|
209 | n/a | if prev_results: |
---|
210 | n/a | print('\n\nComparing new vs. old\n') |
---|
211 | n/a | for benchmark in benchmarks: |
---|
212 | n/a | benchmark_name = benchmark.__doc__ |
---|
213 | n/a | old_result = max(prev_results[benchmark_name]) |
---|
214 | n/a | new_result = max(new_results[benchmark_name]) |
---|
215 | n/a | result = '{:,d} vs. {:,d} ({:%})'.format(new_result, |
---|
216 | n/a | old_result, |
---|
217 | n/a | new_result/old_result) |
---|
218 | n/a | print(benchmark_name, ':', result) |
---|
219 | n/a | if options.dest_file: |
---|
220 | n/a | with options.dest_file: |
---|
221 | n/a | json.dump(new_results, options.dest_file, indent=2) |
---|
222 | n/a | |
---|
223 | n/a | |
---|
224 | n/a | if __name__ == '__main__': |
---|
225 | n/a | import argparse |
---|
226 | n/a | |
---|
227 | n/a | parser = argparse.ArgumentParser() |
---|
228 | n/a | parser.add_argument('-b', '--builtin', dest='builtin', action='store_true', |
---|
229 | n/a | default=False, help="use the built-in __import__") |
---|
230 | n/a | parser.add_argument('-r', '--read', dest='source_file', |
---|
231 | n/a | type=argparse.FileType('r'), |
---|
232 | n/a | help='file to read benchmark data from to compare ' |
---|
233 | n/a | 'against') |
---|
234 | n/a | parser.add_argument('-w', '--write', dest='dest_file', |
---|
235 | n/a | type=argparse.FileType('w'), |
---|
236 | n/a | help='file to write benchmark data to') |
---|
237 | n/a | parser.add_argument('--benchmark', dest='benchmark', |
---|
238 | n/a | help='specific benchmark to run') |
---|
239 | n/a | options = parser.parse_args() |
---|
240 | n/a | import_ = __import__ |
---|
241 | n/a | if not options.builtin: |
---|
242 | n/a | import_ = importlib.__import__ |
---|
243 | n/a | |
---|
244 | n/a | main(import_, options) |
---|