@@ -5,7 +5,7 @@ use criterion::{
5
5
use rustpython_compiler:: Mode ;
6
6
use rustpython_vm:: { AsObject , Interpreter , PyResult , Settings } ;
7
7
use std:: {
8
- ffi , fs, io,
8
+ fs, io,
9
9
path:: { Path , PathBuf } ,
10
10
} ;
11
11
@@ -36,95 +36,68 @@ pub struct MicroBenchmark {
36
36
}
37
37
38
38
fn bench_cpython_code ( group : & mut BenchmarkGroup < WallTime > , bench : & MicroBenchmark ) {
39
- let gil = cpython:: Python :: acquire_gil ( ) ;
40
- let py = gil. python ( ) ;
41
-
42
- let setup_code = ffi:: CString :: new ( & * bench. setup ) . unwrap ( ) ;
43
- let setup_name = ffi:: CString :: new ( format ! ( "{}_setup" , bench. name) ) . unwrap ( ) ;
44
- let setup_code = cpy_compile_code ( py, & setup_code, & setup_name) . unwrap ( ) ;
45
-
46
- let code = ffi:: CString :: new ( & * bench. code ) . unwrap ( ) ;
47
- let name = ffi:: CString :: new ( & * bench. name ) . unwrap ( ) ;
48
- let code = cpy_compile_code ( py, & code, & name) . unwrap ( ) ;
49
-
50
- let bench_func = |( globals, locals) : & mut ( cpython:: PyDict , cpython:: PyDict ) | {
51
- let res = cpy_run_code ( py, & code, globals, locals) ;
52
- if let Err ( e) = res {
53
- e. print ( py) ;
54
- panic ! ( "Error running microbenchmark" )
55
- }
56
- } ;
57
-
58
- let bench_setup = |iterations| {
59
- let globals = cpython:: PyDict :: new ( py) ;
60
- // setup the __builtins__ attribute - no other way to do this (other than manually) as far
61
- // as I can tell
62
- let _ = py. run ( "" , Some ( & globals) , None ) ;
63
- let locals = cpython:: PyDict :: new ( py) ;
64
- if let Some ( idx) = iterations {
65
- globals. set_item ( py, "ITERATIONS" , idx) . unwrap ( ) ;
66
- }
39
+ pyo3:: Python :: with_gil ( |py| {
40
+ let setup_name = format ! ( "{}_setup" , bench. name) ;
41
+ let setup_code = cpy_compile_code ( py, & bench. setup , & setup_name) . unwrap ( ) ;
42
+
43
+ let code = cpy_compile_code ( py, & bench. code , & bench. name ) . unwrap ( ) ;
44
+
45
+ // Grab the exec function in advance so we don't have lookups in the hot code
46
+ let builtins =
47
+ pyo3:: types:: PyModule :: import ( py, "builtins" ) . expect ( "Failed to import builtins" ) ;
48
+ let exec = builtins. getattr ( "exec" ) . expect ( "no exec in builtins" ) ;
49
+
50
+ let bench_func = |( globals, locals) : & mut ( & pyo3:: types:: PyDict , & pyo3:: types:: PyDict ) | {
51
+ let res = exec. call ( ( code, & * globals, & * locals) , None ) ;
52
+ if let Err ( e) = res {
53
+ e. print ( py) ;
54
+ panic ! ( "Error running microbenchmark" )
55
+ }
56
+ } ;
67
57
68
- let res = cpy_run_code ( py, & setup_code, & globals, & locals) ;
69
- if let Err ( e) = res {
70
- e. print ( py) ;
71
- panic ! ( "Error running microbenchmark setup code" )
72
- }
73
- ( globals, locals)
74
- } ;
75
-
76
- if bench. iterate {
77
- for idx in ( 100 ..=1_000 ) . step_by ( 200 ) {
78
- group. throughput ( Throughput :: Elements ( idx as u64 ) ) ;
79
- group. bench_with_input ( BenchmarkId :: new ( "cpython" , & bench. name ) , & idx, |b, idx| {
80
- b. iter_batched_ref (
81
- || bench_setup ( Some ( * idx) ) ,
82
- bench_func,
83
- BatchSize :: LargeInput ,
84
- ) ;
85
- } ) ;
86
- }
87
- } else {
88
- group. bench_function ( BenchmarkId :: new ( "cpython" , & bench. name ) , move |b| {
89
- b. iter_batched_ref ( || bench_setup ( None ) , bench_func, BatchSize :: LargeInput ) ;
90
- } ) ;
91
- }
F438
92
- }
58
+ let bench_setup = |iterations| {
59
+ let globals = pyo3:: types:: PyDict :: new ( py) ;
60
+ let locals = pyo3:: types:: PyDict :: new ( py) ;
61
+ if let Some ( idx) = iterations {
62
+ globals. set_item ( "ITERATIONS" , idx) . unwrap ( ) ;
63
+ }
93
64
94
- unsafe fn cpy_res (
95
- py : cpython:: Python < ' _ > ,
96
- x : * mut python3_sys:: PyObject ,
97
- ) -> cpython:: PyResult < cpython:: PyObject > {
98
- cpython:: PyObject :: from_owned_ptr_opt ( py, x) . ok_or_else ( || cpython:: PyErr :: fetch ( py) )
99
- }
65
+ let res = exec. call ( ( setup_code, & globals, & locals) , None ) ;
66
+ if let Err ( e) = res {
67
+ e. print ( py) ;
68
+ panic ! ( "Error running microbenchmark setup code" )
69
+ }
70
+ ( globals, locals)
71
+ } ;
100
72
101
- fn cpy_compile_code (
102
- py : cpython:: Python < ' _ > ,
103
- s : & ffi:: CStr ,
104
- fname : & ffi:: CStr ,
105
- ) -> cpython:: PyResult < cpython:: PyObject > {
106
- unsafe {
107
- let res =
108
- python3_sys:: Py_CompileString ( s. as_ptr ( ) , fname. as_ptr ( ) , python3_sys:: Py_file_input ) ;
109
- cpy_res ( py, res)
110
- }
73
+ if bench. iterate {
74
+ for idx in ( 100 ..=1_000 ) . step_by ( 200 ) {
75
+ group. throughput ( Throughput :: Elements ( idx as u64 ) ) ;
76
+ group. bench_with_input ( BenchmarkId :: new ( "cpython" , & bench. name ) , & idx, |b, idx| {
77
+ b. iter_batched_ref (
78
+ || bench_setup ( Some ( * idx) ) ,
79
+ bench_func,
80
+ BatchSize :: LargeInput ,
81
+ ) ;
82
+ } ) ;
83
+ }
84
+ } else {
85
+ group. bench_function ( BenchmarkId :: new ( "cpython" , & bench. name ) , move |b| {
86
+ b. iter_batched_ref ( || bench_setup ( None ) , bench_func, BatchSize :: LargeInput ) ;
87
+ } ) ;
88
+ }
89
+ } )
111
90
}
112
91
113
- fn cpy_run_code (
114
- py : cpython:: Python < ' _ > ,
115
- code : & cpython:: PyObject ,
116
- locals : & cpython:: PyDict ,
117
- globals : & cpython:: PyDict ,
118
- ) -> cpython:: PyResult < cpython:: PyObject > {
119
- use cpython:: PythonObject ;
120
- unsafe {
121
- let res = python3_sys:: PyEval_EvalCode (
122
- code. as_ptr ( ) ,
123
- locals. as_object ( ) . as_ptr ( ) ,
124
- globals. as_object ( ) . as_ptr ( ) ,
125
- ) ;
126
- cpy_res ( py, res)
127
- }
92
+ fn cpy_compile_code < ' a > (
93
+ py : pyo3:: Python < ' a > ,
94
+ code : & str ,
95
+ name : & str ,
96
+ ) -> pyo3:: PyResult < & ' a pyo3:: types:: PyCode > {
97
+ let builtins =
98
+ pyo3:: types:: PyModule :: import ( py, "builtins" ) . expect ( "Failed to import builtins" ) ;
99
+ let compile = builtins. getattr ( "compile" ) . expect ( "no compile in builtins" ) ;
100
+ compile. call1 ( ( code, name, "exec" ) ) ?. extract ( )
128
101
}
129
102
130
103
fn bench_rustpy_code ( group : & mut BenchmarkGroup < WallTime > , bench : & MicroBenchmark ) {
0 commit comments