From 607aa20e0c0a2c7f473bf7531c907bca616c60f8 Mon Sep 17 00:00:00 2001 From: Pablo Galindo Date: Mon, 7 Nov 2022 14:25:35 +0000 Subject: [PATCH] Add benchmark to measure gc collection of a big chain of cycles Signed-off-by: Pablo Galindo --- .../benchmarks/bm_gc_collect/pyproject.toml | 9 +++ .../benchmarks/bm_gc_collect/run_benchmark.py | 64 +++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 pyperformance/data-files/benchmarks/bm_gc_collect/pyproject.toml create mode 100644 pyperformance/data-files/benchmarks/bm_gc_collect/run_benchmark.py diff --git a/pyperformance/data-files/benchmarks/bm_gc_collect/pyproject.toml b/pyperformance/data-files/benchmarks/bm_gc_collect/pyproject.toml new file mode 100644 index 00000000..cb0c4038 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_gc_collect/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_gc_collect" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "gc_collect" diff --git a/pyperformance/data-files/benchmarks/bm_gc_collect/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_gc_collect/run_benchmark.py new file mode 100644 index 00000000..db0e4153 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_gc_collect/run_benchmark.py @@ -0,0 +1,64 @@ +import pyperf +import gc + +CYCLES = 100 +LINKS = 20 + + +class Node: + def __init__(self): + self.next = None + self.prev = None + + def link_next(self, next): + self.next = next + self.next.prev = self + + +def create_cycle(node, n_links): + """Create a cycle of n_links nodes, starting with node.""" + + if n_links == 0: + return + + current = node + for i in range(n_links): + next_node = Node() + current.link_next(next_node) + current = next_node + + current.link_next(node) + + +def create_gc_cycles(n_cycles, n_links): + """Create n_cycles cycles n_links+1 nodes each.""" + + cycles = [] + for _ in range(n_cycles): + node = Node() + cycles.append(node) + create_cycle(node, n_links) + return cycles + + +def benchamark_collection(loops, cycles, links): + total_time = 0 + for _ in range(loops): + gc.collect() + all_cycles = create_gc_cycles(cycles, links) + + # Main loop to measure + del all_cycles + t0 = pyperf.perf_counter() + collected = gc.collect() + total_time += pyperf.perf_counter() - t0 + + assert collected >= cycles * (links + 1) + + return total_time + + +if __name__ == "__main__": + runner = pyperf.Runner() + runner.metadata["description"] = "GC link benchmark" + runner.bench_time_func("create_gc_cycles", benchamark_collection, CYCLES, LINKS)