@@ -157,10 +157,10 @@ def using_bytecode_benchmark(seconds, repeat):
157157decimal_using_bytecode = _using_bytecode (decimal )
158158
159159
160- def main (import_ , filename = None , benchmark = None ):
161- if filename and os . path . exists ( filename ) :
162- with open ( filename , 'r' ) as file :
163- prev_results = json .load (file )
160+ def main (import_ , options ):
161+ if options . source_file :
162+ with options . source_file :
163+ prev_results = json .load (options . source_file )
164164 else :
165165 prev_results = {}
166166 __builtins__ .__import__ = import_
@@ -172,13 +172,14 @@ def main(import_, filename=None, benchmark=None):
172172 decimal_writing_bytecode ,
173173 decimal_wo_bytecode , decimal_using_bytecode ,
174174 )
175- if benchmark :
175+ if options . benchmark :
176176 for b in benchmarks :
177- if b .__doc__ == benchmark :
177+ if b .__doc__ == options . benchmark :
178178 benchmarks = [b ]
179179 break
180180 else :
181- print ('Unknown benchmark: {!r}' .format (benchmark , file = sys .stderr ))
181+ print ('Unknown benchmark: {!r}' .format (options .benchmark ,
182+ file = sys .stderr ))
182183 sys .exit (1 )
183184 seconds = 1
184185 seconds_plural = 's' if seconds > 1 else ''
@@ -200,22 +201,19 @@ def main(import_, filename=None, benchmark=None):
200201 assert not sys .dont_write_bytecode
201202 print ("]" , "best is" , format (max (results ), ',d' ))
202203 new_results [benchmark .__doc__ ] = results
203- prev_results [import_ .__module__ ] = new_results
204- if 'importlib._bootstrap' in prev_results and 'builtins' in prev_results :
205- print ('\n \n Comparing importlib vs. __import__\n ' )
206- importlib_results = prev_results ['importlib._bootstrap' ]
207- builtins_results = prev_results ['builtins' ]
204+ if prev_results :
205+ print ('\n \n Comparing new vs. old\n ' )
208206 for benchmark in benchmarks :
209207 benchmark_name = benchmark .__doc__
210- importlib_result = max (importlib_results [benchmark_name ])
211- builtins_result = max (builtins_results [benchmark_name ])
212- result = '{:,d} vs. {:,d} ({:%})' .format (importlib_result ,
213- builtins_result ,
214- importlib_result / builtins_result )
208+ old_result = max (prev_results [benchmark_name ])
209+ new_result = max (new_results [benchmark_name ])
210+ result = '{:,d} vs. {:,d} ({:%})' .format (new_result ,
211+ old_result ,
212+ new_result / old_result )
215213 print (benchmark_name , ':' , result )
216- if filename :
217- with open ( filename , 'w' ) as file :
218- json .dump (prev_results , file , indent = 2 )
214+ if options . dest_file :
215+ with options . dest_file :
216+ json .dump (new_results , options . dest_file , indent = 2 )
219217
220218
221219if __name__ == '__main__' :
@@ -224,18 +222,18 @@ def main(import_, filename=None, benchmark=None):
224222 parser = argparse .ArgumentParser ()
225223 parser .add_argument ('-b' , '--builtin' , dest = 'builtin' , action = 'store_true' ,
226224 default = False , help = "use the built-in __import__" )
227- parser .add_argument ('-f' , '--file' , dest = 'filename' , default = None ,
228- help = 'file to read/write results from/to'
229- '(incompatible w/ --benchmark)' )
225+ parser .add_argument ('-r' , '--read' , dest = 'source_file' ,
226+ type = argparse .FileType ('r' ),
227+ help = 'file to read benchmark data from to compare '
228+ 'against' )
229+ parser .add_argument ('-w' , '--write' , dest = 'dest_file' ,
230+ type = argparse .FileType ('w' ),
231+ help = 'file to write benchmark data to' )
230232 parser .add_argument ('--benchmark' , dest = 'benchmark' ,
231- help = 'specific benchmark to run '
232- '(incompatible w/ --file)' )
233+ help = 'specific benchmark to run' )
233234 options = parser .parse_args ()
234- if options .filename and options .benchmark :
235- print ('Cannot specify a benchmark *and* read/write results' )
236- sys .exit (1 )
237235 import_ = __import__
238236 if not options .builtin :
239237 import_ = importlib .__import__
240238
241- main (import_ , options . filename , options . benchmark )
239+ main (import_ , options )
0 commit comments