8000 [POC] add umfDisjointPoolTrimMemory by bratpiorka · Pull Request #1318 · oneapi-src/unified-memory-framework · GitHub
[go: up one dir, main page]

Skip to content

[POC] add umfDisjointPoolTrimMemory #1318

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions include/umf/pools/pool_disjoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,12 @@ umf_result_t
umfDisjointPoolParamsSetName(umf_disjoint_pool_params_handle_t hParams,
const char *name);

/// @brief Tries to release unused pooled memory back to the provider.
/// @param pool handle to the memory pool.
/// @param minSlabsToKeep minimum number of slabs to keep.
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
umf_result_t umfDisjointPoolTrimMemory(void *pool, size_t minSlabsToKeep);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we do it thru ctl?

Copy link
Contributor
@vinser52 vinser52 May 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How this API is going to be used? And why is it disjoint pool specific?

Copy link
Contributor
@vinser52 vinser52 May 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since we are trying to stabilize the API before the 1.0 release, we should review such changes at the UMF tech meeting first.
I mean draft PR is OK, but let’s discuss before merge

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@vinser52 I definitely wanted to discuss this first. Here, I just wanted to unblock @ldorau in his work and provide the required functionality


const umf_memory_pool_ops_t *umfDisjointPoolOps(void);

#ifdef __cplusplus
Expand Down
1 change: 1 addition & 0 deletions src/libumf.def
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ EXPORTS
umfCtlExec
umfCtlGet
umfCtlSet
umfDisjointPoolTrimMemory
umfJemallocPoolParamsCreate
umfJemallocPoolParamsDestroy
umfJemallocPoolParamsSetNumArenas
1 change: 1 addition & 0 deletions src/libumf.map
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ UMF_0.12 {
umfCtlExec;
umfCtlGet;
umfCtlSet;
umfDisjointPoolTrimMemory;
umfJemallocPoolParamsCreate;
umfJemallocPoolParamsDestroy;
umfJemallocPoolParamsSetNumArenas;
Expand Down
39 changes: 39 additions & 0 deletions src/pool/pool_disjoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,45 @@ static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = {
.get_last_allocation_error = disjoint_pool_get_last_allocation_error,
};

umf_result_t umfDisjointPoolTrimMemory(void *pool, size_t minSlabsToKeep) {
if (pool == NULL) {
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
}
disjoint_pool_t *hPool = (disjoint_pool_t *)pool;

for (size_t i = 0; i < hPool->buckets_num; i++) {
bucket_t *bucket = hPool->buckets[i];
utils_mutex_lock(&bucket->bucket_lock);

int skip = (int)minSlabsToKeep;
// remove empty slabs from the pool
slab_list_item_t *it = NULL, *tmp = NULL;
LL_FOREACH_SAFE(bucket->available_slabs, it, tmp) {
slab_t *slab = it->val;
if (slab->num_chunks_allocated == 0) {
// skip first minSlabsToKeep slabs from each bucket
if (--skip >= 0) {
continue;
}

// remove slab
pool_unregister_slab(hPool, slab);
DL_DELETE(bucket->available_slabs, it);
assert(bucket->available_slabs_num > 0);
bucket->available_slabs_num--;
destroy_slab(slab);

// update stats
bucket_update_stats(bucket, 0, -1);
}
}

utils_mutex_unlock(&bucket->bucket_lock);
}

return UMF_RESULT_SUCCESS;
}

const umf_memory_pool_ops_t *umfDisjointPoolOps(void) {
return &UMF_DISJOINT_POOL_OPS;
}
Expand Down
79 changes: 79 additions & 0 deletions test/pools/disjoint_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,85 @@ TEST_F(test, sharedLimits) {
EXPECT_EQ(MaxSize / SlabMinSize * 2, numFrees);
}

TEST_F(test, disjointPoolTrim) {
struct memory_provider : public umf_test::provider_base_t {
umf_result_t alloc(size_t size, size_t alignment, void **ptr) noexcept {
*ptr = umf_ba_global_aligned_alloc(size, alignment);
return UMF_RESULT_SUCCESS;
}

umf_result_t free(void *ptr, [[maybe_unused]] size_t size) noexcept {
umf_ba_global_free(ptr);
return UMF_RESULT_SUCCESS;
}
};

umf_memory_provider_ops_t provider_ops =
umf_test::providerMakeCOps<memory_provider, void>();

auto providerUnique =
wrapProviderUnique(createProviderChecked(&provider_ops, nullptr));

umf_memory_provider_handle_t provider_handle;
provider_handle = providerUnique.get();

umf_disjoint_pool_params_handle_t params =
(umf_disjoint_pool_params_handle_t)defaultDisjointPoolConfig();
params->pool_trace = 3;
// Set the slab min size to 64 so allocating 64 bytes will use the whole
// slab.
params->slab_min_size = 64;
params->capacity = 4;

// in "internals" test we use ops interface to directly manipulate the pool
// structure
const umf_memory_pool_ops_t *ops = umfDisjointPoolOps();
EXPECT_NE(ops, nullptr);

disjoint_pool_t *pool;
umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool);
EXPECT_EQ(res, UMF_RESULT_SUCCESS);
EXPECT_NE(pool, nullptr);

// do 4 allocs, then free all of them
size_t size = 64;
void *ptrs[4] = {0};
ptrs[0] = ops->malloc(pool, size);
EXPECT_NE(ptrs[0], nullptr);
ptrs[1] = ops->malloc(pool, size);
EXPECT_NE(ptrs[1], nullptr);
ptrs[2] = ops->malloc(pool, size);
EXPECT_NE(ptrs[2], nullptr);
ptrs[3] = ops->malloc(pool, size);
EXPECT_NE(ptrs[3], nullptr);

ops->free(pool, ptrs[0]);
ops->free(pool, ptrs[1]);
ops->free(pool, ptrs[2]);
ops->free(pool, ptrs[3]);

// Because we set the slab min size to 64, each allocation should go to the
// separate slab. Additionally, because we set the capacity to 4, all slabs
// should still be in the pool available for new allocations.
EXPECT_EQ(pool->buckets[0]->available_slabs_num, 4);
EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, 0);
EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, 4);

// Trim memory - leave only one slab
umfDisjointPoolTrimMemory(pool, 1);
EXPECT_EQ(pool->buckets[0]->available_slabs_num, 1);
EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, 1);

// Trim the rest of memory
umfDisjointPoolTrimMemory(pool, 0);
EXPECT_EQ(pool->buckets[0]->available_slabs_num, 0);
EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, 0);

ops->finalize(pool);
res = umfDisjointPoolParamsDestroy(params);
EXPECT_EQ(res, UMF_RESULT_SUCCESS);
}

TEST_F(test, disjointPoolNullParams) {
umf_result_t res = umfDisjointPoolParamsCreate(nullptr);
EXPECT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT);
Expand Down
Loading
0