8000 Add stacked pool benchmark by lplewa · Pull Request #1347 · oneapi-src/unified-memory-framework · GitHub
[go: up one dir, main page]

Skip to content

Add stacked pool benchmark #1347

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions benchmark/benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,39 @@ UMF_BENCHMARK_REGISTER_F(peak_alloc_benchmark, scalable_pool_uniform)

#endif

// stacked pool benchmarks

UMF_BENCHMARK_TEMPLATE_DEFINE(multiple_malloc_free_benchmark,
disjoint_pool_stack_fix, fixed_alloc_size,
pool_stacked_allocator<os_provider>);

UMF_BENCHMARK_REGISTER_F(multiple_malloc_free_benchmark,
disjoint_pool_stack_fix)
->Apply(&default_multiple_alloc_fix_size)
->Apply(&multithreaded);

UMF_BENCHMARK_TEMPLATE_DEFINE(multiple_malloc_free_benchmark,
disjoint_pool_stack_uniform, uniform_alloc_size,
pool_stacked_allocator<os_provider>);
UMF_BENCHMARK_REGISTER_F(multiple_malloc_free_benchmark,
disjoint_pool_stack_uniform)
->Apply(&default_multiple_alloc_uniform_size)
->Apply(&multithreaded);

UMF_BENCHMARK_TEMPLATE_DEFINE(peak_alloc_benchmark, disjoint_pool_stack_fix,
fixed_alloc_size,
pool_stacked_allocator<os_provider>);
UMF_BENCHMARK_REGISTER_F(peak_alloc_benchmark, disjoint_pool_stack_fix)
->Apply(&default_multiple_alloc_fix_size)
->Apply(&multithreaded);

UMF_BENCHMARK_TEMPLATE_DEFINE(peak_alloc_benchmark, disjoint_pool_stack_uniform,
uniform_alloc_size,
pool_stacked_allocator<os_provider>);
UMF_BENCHMARK_REGISTER_F(peak_alloc_benchmark, disjoint_pool_stack_uniform)
->Apply(&default_multiple_alloc_uniform_size)
->Apply(&multithreaded);

//BENCHMARK_MAIN();
int main(int argc, char **argv) {
if (initAffinityMask()) {
Expand Down
54 changes: 47 additions & 7 deletions benchmark/benchmark.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
#include <list>
#include <malloc.h>
#include <random>
#include <stdexcept>

#include <benchmark/benchmark.h>
#include <umf/memory_pool.h>
Expand Down Expand Up @@ -209,32 +210,71 @@ class provider_allocator : public allocator_interface {
// TODO: assert Pool to be a pool_interface<provider_interface>.
template <typename Pool> class pool_allocator : public allocator_interface {
public:
unsigned SetUp(::benchmark::State &state, unsigned argPos) override {
virtual unsigned SetUp(::benchmark::State &state,
unsigned argPos) override {
pool.SetUp(state);
return argPos;
}

void preBench(::benchmark::State &state) override { pool.preBench(state); }
void postBench(::benchmark::State &state) override {
virtual void preBench(::benchmark::State &state) override {
pool.preBench(state);
}
virtual void postBench(::benchmark::State &state) override {
pool.postBench(state);
}

void TearDown(::benchmark::State &state) override { pool.TearDown(state); }
virtual void TearDown(::benchmark::State &state) override {
pool.TearDown(state);
}

void *benchAlloc(size_t size) override {
virtual void *benchAlloc(size_t size) override {
return umfPoolMalloc(pool.pool, size);
}

void benchFree(void *ptr, [[maybe_unused]] size_t size) override {
virtual void benchFree(void *ptr, [[maybe_unused]] size_t size) override {
umfPoolFree(pool.pool, ptr);
}

static std::string name() { return Pool::name(); }

private:
protected:
Pool pool;
};

template <typename Provider>
class pool_stacked_allocator
: public pool_allocator<disjoint_pool_stack<Provider>> {
using base = pool_allocator<disjoint_pool_stack<Provider>>;

public:
virtual void preBench([[maybe_unused]] ::benchmark::State &state) override {
// we do not measure fragmentation for stack pools
}
virtual void
postBench([[maybe_unused]] ::benchmark::State &state) override {
// we do not measure fragmentation for stack pools
}
void *benchAlloc(size_t size) override {
static thread_local int counter = 0;
static auto pool_number = base::pool.pools.size();
// stacked pools has limited space, so we might need a few
// tries to find one with free space
auto retry = pool_number;
while (retry--) {
void *ptr = umfPoolMalloc(
base::pool.pools[(++counter % pool_number)], size);
if (ptr != NULL) {
return ptr;
}
}
return NULL;
}

void benchFree(void *ptr, [[maybe_unused]] size_t size) override {
umfFree(ptr);
}
};

template <typename Size, typename Allocator>
struct benchmark_interface : public benchmark::Fixture {
int parseArgs(::benchmark::State &state, int argPos) {
Expand Down
136 changes: 136 additions & 0 deletions benchmark/benchmark_umf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,142 @@ struct disjoint_pool : public pool_interface<Provider> {
}
};

// benchmark tracking provider, by creating big number pools(2^7) stacked
template <typename Provider>
struct disjoint_pool_stack : public disjoint_pool<Provider> {
using base = disjoint_pool<Provider>;

std::vector<umf_memory_provider_handle_t> providers;
std::vector<umf_memory_pool_handle_t> pools;
std::vector<void *> pool_ptrs;

static constexpr size_t firstPoolSize = 2ull * 1024 * 1024 * 1024; // 2GB
static constexpr size_t levels = 7;

void SetUp(::benchmark::State &state) {
base::provider.SetUp(state);
if (state.thread_index() != 0) {
return;
}

providers.push_back(base::provider.provider);
base::provider.provider = nullptr;

auto params = base::getParams(state);
umf_memory_pool_handle_t rootPool = nullptr;
auto umf_result = umfPoolCreate(base::getOps(state), providers[0],
params.get(), 0, &rootPool);
if (umf_result != UMF_RESULT_SUCCESS) {
state.SkipWithError("umfPoolCreate() failed");
return;
}

pools.push_back(rootPool); // root pool

umf_fixed_memory_provider_params_handle_t params_fixed = nullptr;
umf_result = umfFixedMemoryProviderParamsCreate(
&params_fixed, (void *)0x1, 0x1); // dummy

size_t poolSize = firstPoolSize;
size_t level_start = 0;
size_t level_pools = 1;

for (size_t level = 1; level < levels; ++level) {
// split each pools for 3 parts - two for children, and third from other allocations from this pool
poolSize /= 3;
size_t new_level_pools = level_pools * 2;

for (size_t parent_idx = 0; parent_idx < level_pools;
++parent_idx) {
umf_memory_pool_handle_t parent_pool =
pools[level_start + parent_idx];

for (int child = 0; child < 2; ++child) {
void *ptr = umfPoolMalloc(parent_pool, poolSize);
if (!ptr) {
state.SkipWithError("umfPoolMalloc() failed");
return;
}
pool_ptrs.push_back(ptr);

umf_result = umfFixedMemoryProviderParamsSetMemory(
params_fixed, ptr, poolSize);
umf_memory_provider_handle_t prov;
umf_result = umfMemoryProviderCreate(
umfFixedMemoryProviderOps(), params_fixed, &prov);
if (umf_result != UMF_RESULT_SUCCESS) {
state.SkipWithError("umfMemoryProviderCreate() failed");
return;
}
providers.push_back(prov);

umf_memory_pool_handle_t newPool;
umf_result = umfPoolCreate(base::getOps(state), prov,
params.get(), 0, &newPool);
if (umf_result != UMF_RESULT_SUCCESS) {
state.SkipWithError("umfPoolCreate() failed");
return;
}

pools.push_back(newPool);
}
}

level_start += level_pools;
level_pools = new_level_pools;
}

umfFixed 9E81 MemoryProviderParamsDestroy(params_fixed);
}

void TearDown(::benchmark::State &state) {
if (state.thread_index() != 0) {
return;
}

size_t pool_index = pools.size();
size_t provider_index = providers.size();
size_t ptr_index = pool_ptrs.size();

// Go from last level to first (excluding level 0, root)
for (int level = levels - 1; level > 0; --level) {
size_t level_pools = 1ull << level; // 2^level pools

// Destroy pools
for (size_t i = 0; i < level_pools; ++i) {
--pool_index;
umfPoolDestroy(pools[pool_index]);
}

// Destroy providers and free pointers
for (size_t i = 0; i < level_pools; ++i) {
--provider_index;
umfMemoryProviderDestroy(providers[provider_index]);

--ptr_index;
void *ptr = pool_ptrs[ptr_index];
if (ptr) {
umfFree(ptr);
}
}
}

// Root pool and provider
umfPoolDestroy(pools[0]);
umfMemoryProviderDestroy(providers[0]);

pools.clear();
providers.clear();
pool_ptrs.clear();

base::TearDown(state);
}

static std::string name() {
return "disjoint_pool_stacked<" + Provider::name() + ">";
}
};

#ifdef UMF_POOL_JEMALLOC_ENABLED
template <typename Provider>
struct jemalloc_pool : public pool_interface<Provider> {
Expand Down
1 change: 1 addition & 0 deletions src/libumf.def
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ EXPORTS
umfFixedMemoryProviderOps
umfFixedMemoryProviderParamsCreate
umfFixedMemoryProviderParamsDestroy
umfFixedMemoryProviderParamsSetMemory
umfLevelZeroMemoryProviderParamsSetFreePolicy
umfLevelZeroMemoryProviderParamsSetDeviceOrdinal
; Added in UMF_0.12
Expand Down
1 change: 1 addition & 0 deletions src/libumf.map
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ UMF_0.11 {
umfFixedMemoryProviderOps;
umfFixedMemoryProviderParamsCreate;
umfFixedMemoryProviderParamsDestroy;
umfFixedMemoryProviderParamsSetMemory;
umfLevelZeroMemoryProviderParamsSetFreePolicy;
umfLevelZeroMemoryProviderParamsSetDeviceOrdinal;
} UMF_0.10;
Expand Down
Loading
Loading
0