diff --git a/include/umf/pools/pool_disjoint.h b/include/umf/pools/pool_disjoint.h index 640184c97..bf64748ae 100644 --- a/include/umf/pools/pool_disjoint.h +++ b/include/umf/pools/pool_disjoint.h @@ -106,6 +106,12 @@ umf_result_t umfDisjointPoolParamsSetName(umf_disjoint_pool_params_handle_t hParams, const char *name); +/// @brief Tries to release unused pooled memory back to the provider. +/// @param pool handle to the memory pool. +/// @param minSlabsToKeep minimum number of slabs to keep. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfDisjointPoolTrimMemory(void *pool, size_t minSlabsToKeep); + const umf_memory_pool_ops_t *umfDisjointPoolOps(void); #ifdef __cplusplus diff --git a/src/libumf.def b/src/libumf.def index 925beab3b..403d62cf6 100644 --- a/src/libumf.def +++ b/src/libumf.def @@ -139,6 +139,7 @@ EXPORTS umfCtlExec umfCtlGet umfCtlSet + umfDisjointPoolTrimMemory umfJemallocPoolParamsCreate umfJemallocPoolParamsDestroy umfJemallocPoolParamsSetNumArenas diff --git a/src/libumf.map b/src/libumf.map index a9a94b3a3..10231067c 100644 --- a/src/libumf.map +++ b/src/libumf.map @@ -139,6 +139,7 @@ UMF_0.12 { umfCtlExec; umfCtlGet; umfCtlSet; + umfDisjointPoolTrimMemory; umfJemallocPoolParamsCreate; umfJemallocPoolParamsDestroy; umfJemallocPoolParamsSetNumArenas; diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index 8ce9f70dd..87574cb4b 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -941,6 +941,45 @@ static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = { .get_last_allocation_error = disjoint_pool_get_last_allocation_error, }; +umf_result_t umfDisjointPoolTrimMemory(void *pool, size_t minSlabsToKeep) { + if (pool == NULL) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + disjoint_pool_t *hPool = (disjoint_pool_t *)pool; + + for (size_t i = 0; i < hPool->buckets_num; i++) { + bucket_t *bucket = hPool->buckets[i]; + utils_mutex_lock(&bucket->bucket_lock); + + int skip = (int)minSlabsToKeep; + // remove empty slabs from the pool + slab_list_item_t *it = NULL, *tmp = NULL; + LL_FOREACH_SAFE(bucket->available_slabs, it, tmp) { + slab_t *slab = it->val; + if (slab->num_chunks_allocated == 0) { + // skip first minSlabsToKeep slabs from each bucket + if (--skip >= 0) { + continue; + } + + // remove slab + pool_unregister_slab(hPool, slab); + DL_DELETE(bucket->available_slabs, it); + assert(bucket->available_slabs_num > 0); + bucket->available_slabs_num--; + destroy_slab(slab); + + // update stats + bucket_update_stats(bucket, 0, -1); + } + } + + utils_mutex_unlock(&bucket->bucket_lock); + } + + return UMF_RESULT_SUCCESS; +} + const umf_memory_pool_ops_t *umfDisjointPoolOps(void) { return &UMF_DISJOINT_POOL_OPS; } diff --git a/test/pools/disjoint_pool.cpp b/test/pools/disjoint_pool.cpp index f64e61931..d4bb5e1aa 100644 --- a/test/pools/disjoint_pool.cpp +++ b/test/pools/disjoint_pool.cpp @@ -269,6 +269,85 @@ TEST_F(test, sharedLimits) { EXPECT_EQ(MaxSize / SlabMinSize * 2, numFrees); } +TEST_F(test, disjointPoolTrim) { + struct memory_provider : public umf_test::provider_base_t { + umf_result_t alloc(size_t size, size_t alignment, void **ptr) noexcept { + *ptr = umf_ba_global_aligned_alloc(size, alignment); + return UMF_RESULT_SUCCESS; + } + + umf_result_t free(void *ptr, [[maybe_unused]] size_t size) noexcept { + umf_ba_global_free(ptr); + return UMF_RESULT_SUCCESS; + } + }; + + umf_memory_provider_ops_t provider_ops = + umf_test::providerMakeCOps(); + + auto providerUnique = + wrapProviderUnique(createProviderChecked(&provider_ops, nullptr)); + + umf_memory_provider_handle_t provider_handle; + provider_handle = providerUnique.get(); + + umf_disjoint_pool_params_handle_t params = + (umf_disjoint_pool_params_handle_t)defaultDisjointPoolConfig(); + params->pool_trace = 3; + // Set the slab min size to 64 so allocating 64 bytes will use the whole + // slab. + params->slab_min_size = 64; + params->capacity = 4; + + // in "internals" test we use ops interface to directly manipulate the pool + // structure + const umf_memory_pool_ops_t *ops = umfDisjointPoolOps(); + EXPECT_NE(ops, nullptr); + + disjoint_pool_t *pool; + umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_NE(pool, nullptr); + + // do 4 allocs, then free all of them + size_t size = 64; + void *ptrs[4] = {0}; + ptrs[0] = ops->malloc(pool, size); + EXPECT_NE(ptrs[0], nullptr); + ptrs[1] = ops->malloc(pool, size); + EXPECT_NE(ptrs[1], nullptr); + ptrs[2] = ops->malloc(pool, size); + EXPECT_NE(ptrs[2], nullptr); + ptrs[3] = ops->malloc(pool, size); + EXPECT_NE(ptrs[3], nullptr); + + ops->free(pool, ptrs[0]); + ops->free(pool, ptrs[1]); + ops->free(pool, ptrs[2]); + ops->free(pool, ptrs[3]); + + // Because we set the slab min size to 64, each allocation should go to the + // separate slab. Additionally, because we set the capacity to 4, all slabs + // should still be in the pool available for new allocations. + EXPECT_EQ(pool->buckets[0]->available_slabs_num, 4); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, 0); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, 4); + + // Trim memory - leave only one slab + umfDisjointPoolTrimMemory(pool, 1); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, 1); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, 1); + + // Trim the rest of memory + umfDisjointPoolTrimMemory(pool, 0); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, 0); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, 0); + + ops->finalize(pool); + res = umfDisjointPoolParamsDestroy(params); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); +} + TEST_F(test, disjointPoolNullParams) { umf_result_t res = umfDisjointPoolParamsCreate(nullptr); EXPECT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT);