8000 Add a benchmark to measure gc traversal (#244) · python/pyperformance@bd386ad · GitHub
[go: up one dir, main page]

Skip to content

Commit bd386ad

Browse files
authored
Add a benchmark to measure gc traversal (#244)
1 parent 948cfce commit bd386ad

File tree

2 files changed

+46
-0
lines changed

2 files changed

+46
-0
lines changed
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
[project]
2+
name = "pyperformance_bm_gc_traversal"
3+
requires-python = ">=3.8"
4+
dependencies = ["pyperf"]
5+
urls = {repository = "https://github.com/python/pyperformance"}
6+
dynamic = ["version"]
7+
8+
[tool.pyperformance]
9+
name = "gc_traversal"
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import pyperf
2+
import gc
3+
4+
N_LEVELS = 1000
5+
6+
7+
def create_recursive_containers(n_levels):
8+
9+
current_list = []
10+
for n in range(n_levels):
11+
new_list = [None] * n
12+
for index in range(n):
13+
new_list[index] = current_list
14+
current_list = new_list
15+
16+
return current_list
17+
18+
19+
def benchamark_collection(loops, n_levels):
20+
total_time = 0
21+
all_cycles = create_recursive_containers(n_levels)
22+
for _ in range(loops):
23+
gc.collect()
24+
# Main loop to measure
25+
t0 = pyperf.perf_counter()
26+
collected = gc.collect()
27+
total_time += pyperf.perf_counter() - t0
28+
29+
assert collected == 0
30+
31+
return total_time
32+
33+
34+
if __name__ == "__main__":
35+
runner = pyperf.Runner()
36+
runner.metadata["description"] = "GC traversal benchmark"
37+
runner.bench_time_func("gc_traversal", benchamark_collection, N_LEVELS)

0 commit comments

Comments
 (0)
0