Optimize small buffers allocator

- Do not wait for GPU completion on pool exhaust if allocs are in use,
allocate new pool instead
- Reuse existing pool if allocs are not in use

Related-To: NEO-7769

Signed-off-by: Igor Venevtsev <igor.venevtsev@intel.com>
This commit is contained in:
Igor Venevtsev
2023-03-14 14:25:47 +00:00
committed by Compute-Runtime-Automation
parent e645f58b65
commit f57ff2913c
5 changed files with 286 additions and 170 deletions

View File

@@ -51,9 +51,11 @@ Context::Context(
Context::~Context() {
gtpinNotifyContextDestroy((cl_context)this);
if (multiRootDeviceTimestampPacketAllocator.get() != nullptr) {
multiRootDeviceTimestampPacketAllocator.reset();
}
if (smallBufferPoolAllocator.isAggregatedSmallBuffersEnabled(this)) {
smallBufferPoolAllocator.releaseSmallBufferPool();
}
@@ -502,26 +504,89 @@ bool Context::BufferPoolAllocator::isAggregatedSmallBuffersEnabled(Context *cont
(isSupportedForSingleDeviceContexts && context->isSingleDeviceContext());
}
void Context::BufferPoolAllocator::initAggregatedSmallBuffers(Context *context) {
Context::BufferPoolAllocator::BufferPool::BufferPool(Context *context) : memoryManager(context->memoryManager) {
static constexpr cl_mem_flags flags{};
[[maybe_unused]] cl_int errcodeRet{};
Buffer::AdditionalBufferCreateArgs bufferCreateArgs{};
bufferCreateArgs.doNotProvidePerformanceHints = true;
bufferCreateArgs.makeAllocationLockable = true;
this->mainStorage = Buffer::create(context,
flags,
BufferPoolAllocator::aggregatedSmallBuffersPoolSize,
nullptr,
bufferCreateArgs,
errcodeRet);
if (this->mainStorage) {
this->chunkAllocator.reset(new HeapAllocator(BufferPoolAllocator::startingOffset,
BufferPoolAllocator::aggregatedSmallBuffersPoolSize,
BufferPoolAllocator::chunkAlignment));
mainStorage.reset(Buffer::create(context,
flags,
BufferPoolAllocator::aggregatedSmallBuffersPoolSize,
nullptr,
bufferCreateArgs,
errcodeRet));
if (mainStorage) {
chunkAllocator.reset(new HeapAllocator(BufferPoolAllocator::startingOffset,
BufferPoolAllocator::aggregatedSmallBuffersPoolSize,
BufferPoolAllocator::chunkAlignment));
context->decRefInternal();
}
}
Context::BufferPoolAllocator::BufferPool::BufferPool(BufferPool &&bufferPool) : memoryManager(bufferPool.memoryManager),
mainStorage(std::move(bufferPool.mainStorage)),
chunkAllocator(std::move(bufferPool.chunkAllocator)) {}
Buffer *Context::BufferPoolAllocator::BufferPool::allocate(const MemoryProperties &memoryProperties,
cl_mem_flags flags,
cl_mem_flags_intel flagsIntel,
size_t requestedSize,
void *hostPtr,
cl_int &errcodeRet) {
cl_buffer_region bufferRegion{};
size_t actualSize = requestedSize;
bufferRegion.origin = static_cast<size_t>(chunkAllocator->allocate(actualSize));
if (bufferRegion.origin == 0) {
return nullptr;
}
bufferRegion.origin -= BufferPoolAllocator::startingOffset;
bufferRegion.size = requestedSize;
auto bufferFromPool = mainStorage->createSubBuffer(flags, flagsIntel, &bufferRegion, errcodeRet);
bufferFromPool->createFunction = mainStorage->createFunction;
bufferFromPool->setSizeInPoolAllocator(actualSize);
return bufferFromPool;
}
bool Context::BufferPoolAllocator::BufferPool::isPoolBuffer(const MemObj *buffer) const {
return mainStorage.get() == buffer;
}
bool Context::BufferPoolAllocator::BufferPool::drain() {
for (auto allocation : mainStorage->getMultiGraphicsAllocation().getGraphicsAllocations()) {
if (memoryManager->allocInUse(*allocation)) {
return false;
}
}
chunkAllocator.reset(new HeapAllocator(BufferPoolAllocator::startingOffset,
BufferPoolAllocator::aggregatedSmallBuffersPoolSize,
BufferPoolAllocator::chunkAlignment));
return true;
}
void Context::BufferPoolAllocator::addNewBufferPool() {
Context::BufferPoolAllocator::BufferPool bufferPool(context);
if (bufferPool.mainStorage) {
bufferPools.push_back(std::move(bufferPool));
}
}
void Context::BufferPoolAllocator::initAggregatedSmallBuffers(Context *context) {
this->context = context;
addNewBufferPool();
}
bool Context::BufferPoolAllocator::isPoolBuffer(const MemObj *buffer) const {
for (auto &bufferPool : bufferPools) {
if (bufferPool.isPoolBuffer(buffer)) {
return true;
}
}
return false;
}
Buffer *Context::BufferPoolAllocator::allocateBufferFromPool(const MemoryProperties &memoryProperties,
cl_mem_flags flags,
cl_mem_flags_intel flagsIntel,
@@ -529,45 +594,52 @@ Buffer *Context::BufferPoolAllocator::allocateBufferFromPool(const MemoryPropert
void *hostPtr,
cl_int &errcodeRet) {
errcodeRet = CL_MEM_OBJECT_ALLOCATION_FAILURE;
if (this->mainStorage &&
this->isSizeWithinThreshold(requestedSize) &&
this->flagsAllowBufferFromPool(flags, flagsIntel)) {
auto lock = std::unique_lock<std::mutex>(this->mutex);
cl_buffer_region bufferRegion{};
size_t actualSize = requestedSize;
bufferRegion.origin = static_cast<size_t>(this->chunkAllocator->allocate(actualSize));
if (bufferRegion.origin == 0) {
return nullptr;
}
bufferRegion.origin -= BufferPoolAllocator::startingOffset;
bufferRegion.size = requestedSize;
auto bufferFromPool = this->mainStorage->createSubBuffer(flags, flagsIntel, &bufferRegion, errcodeRet);
bufferFromPool->createFunction = this->mainStorage->createFunction;
bufferFromPool->setSizeInPoolAllocator(actualSize);
if (bufferPools.empty() ||
!isSizeWithinThreshold(requestedSize) ||
!flagsAllowBufferFromPool(flags, flagsIntel)) {
return nullptr;
}
auto lock = std::unique_lock<std::mutex>(mutex);
auto bufferFromPool = allocateFromPools(memoryProperties, flags, flagsIntel, requestedSize, hostPtr, errcodeRet);
if (bufferFromPool != nullptr) {
return bufferFromPool;
}
return nullptr;
}
bool Context::BufferPoolAllocator::isPoolBuffer(const MemObj *buffer) const {
return buffer != nullptr && this->mainStorage == buffer;
}
void Context::BufferPoolAllocator::tryFreeFromPoolBuffer(MemObj *possiblePoolBuffer, size_t offset, size_t size) {
if (this->isPoolBuffer(possiblePoolBuffer)) {
auto lock = std::unique_lock<std::mutex>(this->mutex);
DEBUG_BREAK_IF(!this->mainStorage);
DEBUG_BREAK_IF(size == 0);
auto internalBufferAddress = offset + BufferPoolAllocator::startingOffset;
this->chunkAllocator->free(internalBufferAddress, size);
}
drainOrAddNewBufferPool();
return allocateFromPools(memoryProperties, flags, flagsIntel, requestedSize, hostPtr, errcodeRet);
}
void Context::BufferPoolAllocator::releaseSmallBufferPool() {
DEBUG_BREAK_IF(!this->mainStorage);
delete this->mainStorage;
this->mainStorage = nullptr;
bufferPools.clear();
}
void Context::BufferPoolAllocator::drainOrAddNewBufferPool() {
for (auto &bufferPool : bufferPools) {
if (bufferPool.drain()) {
return;
}
}
addNewBufferPool();
}
Buffer *Context::BufferPoolAllocator::allocateFromPools(const MemoryProperties &memoryProperties,
cl_mem_flags flags,
cl_mem_flags_intel flagsIntel,
size_t requestedSize,
void *hostPtr,
cl_int &errcodeRet) {
for (auto &bufferPool : bufferPools) {
auto bufferFromPool = bufferPool.allocate(memoryProperties, flags, flagsIntel, requestedSize, hostPtr, errcodeRet);
if (bufferFromPool != nullptr) {
return bufferFromPool;
}
}
return nullptr;
}
TagAllocatorBase *Context::getMultiRootDeviceTimestampPacketAllocator() {
return multiRootDeviceTimestampPacketAllocator.get();
}

View File

@@ -60,24 +60,46 @@ class Context : public BaseObject<_cl_context> {
size_t size,
void *hostPtr,
cl_int &errcodeRet);
void tryFreeFromPoolBuffer(MemObj *possiblePoolBuffer, size_t offset, size_t size);
void releaseSmallBufferPool();
bool isAggregatedSmallBuffersEnabled(Context *context) const;
void initAggregatedSmallBuffers(Context *context);
bool isPoolBuffer(const MemObj *buffer) const;
bool flagsAllowBufferFromPool(const cl_mem_flags &flags, const cl_mem_flags_intel &flagsIntel) const;
protected:
Buffer *allocateFromPools(const MemoryProperties &memoryProperties,
cl_mem_flags flags,
cl_mem_flags_intel flagsIntel,
size_t size,
void *hostPtr,
cl_int &errcodeRet);
inline bool isSizeWithinThreshold(size_t size) const {
return BufferPoolAllocator::smallBufferThreshold >= size;
}
Buffer *mainStorage{nullptr};
std::unique_ptr<HeapAllocator> chunkAllocator;
void drainOrAddNewBufferPool();
void addNewBufferPool();
struct BufferPool {
BufferPool(Context *context);
BufferPool(BufferPool &&bufferPool);
bool isPoolBuffer(const MemObj *buffer) const;
Buffer *allocate(const MemoryProperties &memoryProperties,
cl_mem_flags flags,
cl_mem_flags_intel flagsIntel,
size_t size,
void *hostPtr,
cl_int &errcodeRet);
bool drain();
MemoryManager *memoryManager{nullptr};
std::unique_ptr<Buffer> mainStorage;
std::unique_ptr<HeapAllocator> chunkAllocator;
};
Context *context{nullptr};
std::mutex mutex;
std::vector<BufferPool> bufferPools;
};
static const cl_ulong objectMagic = 0xA4234321DC002130LL;
@@ -221,7 +243,7 @@ class Context : public BaseObject<_cl_context> {
static Platform *getPlatformFromProperties(const cl_context_properties *properties, cl_int &errcode);
BufferPoolAllocator &getBufferPoolAllocator() {
return this->smallBufferPoolAllocator;
return smallBufferPoolAllocator;
}
TagAllocatorBase *getMultiRootDeviceTimestampPacketAllocator();
std::unique_lock<std::mutex> obtainOwnershipForMultiRootDeviceAllocator();

View File

@@ -90,8 +90,6 @@ MemObj::~MemObj() {
}
destroyGraphicsAllocation(graphicsAllocation, doAsyncDestructions);
graphicsAllocation = nullptr;
} else if (graphicsAllocation && context->getBufferPoolAllocator().isPoolBuffer(associatedMemObject)) {
memoryManager->waitForEnginesCompletion(*graphicsAllocation);
}
if (!associatedMemObject) {
releaseMapAllocation(rootDeviceIndex, doAsyncDestructions);
@@ -102,7 +100,6 @@ MemObj::~MemObj() {
}
if (associatedMemObject) {
associatedMemObject->decRefInternal();
context->getBufferPoolAllocator().tryFreeFromPoolBuffer(associatedMemObject, this->offset, this->sizeInPoolAllocator);
}
if (!associatedMemObject) {
releaseAllocatedMapPtr();

View File

@@ -30,12 +30,6 @@ class AggregatedSmallBuffersTestTemplate : public ::testing::Test {
}
}
void TearDown() override {
if (this->context->getBufferPoolAllocator().isAggregatedSmallBuffersEnabled(context.get())) {
this->context->getBufferPoolAllocator().releaseSmallBufferPool();
}
}
void setAllocationToFail(bool shouldFail) {
this->mockMemoryManager->failInDevicePoolWithError = shouldFail;
}
@@ -62,7 +56,7 @@ class AggregatedSmallBuffersTestTemplate : public ::testing::Test {
this->setAllocationToFail(failMainStorageAllocation);
cl_device_id devices[] = {device};
this->context.reset(Context::create<MockContext>(nullptr, ClDeviceVector(devices, 1), nullptr, nullptr, retVal));
ASSERT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(retVal, CL_SUCCESS);
this->setAllocationToFail(false);
this->poolAllocator = static_cast<MockBufferPoolAllocator *>(&context->smallBufferPoolAllocator);
}
@@ -83,8 +77,8 @@ class AggregatedSmallBuffersKernelTest : public AggregatedSmallBuffersTestTempla
retVal = CL_INVALID_VALUE;
pMultiDeviceKernel.reset(MultiDeviceKernel::create<MockKernel>(pProgram.get(), MockKernel::toKernelInfoContainer(*pKernelInfo, device->getRootDeviceIndex()), &retVal));
pKernel = static_cast<MockKernel *>(pMultiDeviceKernel->getKernel(device->getRootDeviceIndex()));
ASSERT_NE(pKernel, nullptr);
ASSERT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(pKernel, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
pKernel->setCrossThreadData(pCrossThreadData, sizeof(pCrossThreadData));
pKernelArg = (void **)(pKernel->getCrossThreadData() + pKernelInfo->argAsPtr(0).stateless);
@@ -143,87 +137,140 @@ HWTEST_F(AggregatedSmallBuffersDefaultTest, givenDifferentFlagValuesAndSingleOrM
using AggregatedSmallBuffersDisabledTest = AggregatedSmallBuffersTestTemplate<0>;
TEST_F(AggregatedSmallBuffersDisabledTest, givenAggregatedSmallBuffersDisabledWhenBufferCreateCalledThenDoNotUsePool) {
ASSERT_FALSE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_EQ(poolAllocator->mainStorage, nullptr);
EXPECT_FALSE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(0u, poolAllocator->bufferPools.size());
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_NE(buffer, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(poolAllocator->mainStorage, nullptr);
EXPECT_NE(nullptr, buffer);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(0u, poolAllocator->bufferPools.size());
}
using AggregatedSmallBuffersEnabledTest = AggregatedSmallBuffersTestTemplate<1>;
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledWhenAllocatingMainStorageThenMakeDeviceBufferLockable) {
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
ASSERT_NE(mockMemoryManager->lastAllocationProperties, nullptr);
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
EXPECT_NE(nullptr, mockMemoryManager->lastAllocationProperties);
EXPECT_TRUE(mockMemoryManager->lastAllocationProperties->makeDeviceBufferLockable);
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndSizeLargerThanThresholdWhenBufferCreateCalledThenDoNotUsePool) {
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
size = PoolAllocator::smallBufferThreshold + 1;
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_NE(buffer, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(nullptr, buffer);
EXPECT_EQ(CL_SUCCESS, retVal);
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndSizeLowerThenChunkAlignmentWhenBufferCreatedAndDestroyedThenSizeIsAsRequestedAndCorrectSizeIsFreed) {
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
ASSERT_EQ(poolAllocator->chunkAllocator->getUsedSize(), 0u);
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndSizeLowerThenChunkAlignmentWhenBufferCreatedAndDestroyedThenSizeIsAsRequestedAndCorrectSizeIsNotFreed) {
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
EXPECT_EQ(0u, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
size = PoolAllocator::chunkAlignment / 2;
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_NE(buffer, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(buffer->getSize(), size);
EXPECT_EQ(poolAllocator->chunkAllocator->getUsedSize(), PoolAllocator::chunkAlignment);
EXPECT_EQ(CL_SUCCESS, retVal);
EXPECT_EQ(size, buffer->getSize());
EXPECT_EQ(PoolAllocator::chunkAlignment, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
auto mockBuffer = static_cast<MockBuffer *>(buffer.get());
EXPECT_EQ(mockBuffer->sizeInPoolAllocator, PoolAllocator::chunkAlignment);
EXPECT_EQ(PoolAllocator::chunkAlignment, mockBuffer->sizeInPoolAllocator);
buffer.reset(nullptr);
EXPECT_EQ(poolAllocator->chunkAllocator->getUsedSize(), 0u);
EXPECT_EQ(PoolAllocator::chunkAlignment, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndSizeEqualToThresholdWhenBufferCreateCalledThenUsePool) {
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_NE(buffer, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(poolAllocator->mainStorage, nullptr);
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
auto mockBuffer = static_cast<MockBuffer *>(buffer.get());
EXPECT_GE(mockBuffer->getSize(), size);
EXPECT_GE(mockBuffer->getOffset(), 0u);
EXPECT_LE(mockBuffer->getOffset(), PoolAllocator::aggregatedSmallBuffersPoolSize - size);
EXPECT_TRUE(mockBuffer->isSubBuffer());
EXPECT_EQ(poolAllocator->mainStorage, mockBuffer->associatedMemObject);
EXPECT_EQ(mockBuffer->associatedMemObject, poolAllocator->bufferPools[0].mainStorage.get());
retVal = clReleaseMemObject(buffer.release());
EXPECT_EQ(retVal, CL_SUCCESS);
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledWhenClReleaseMemObjectCalledThenWaitForEnginesCompletionCalled) {
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledWhenClReleaseMemObjectCalledThenWaitForEnginesCompletionNotCalledAndMemoryRegionIsNotFreed) {
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
EXPECT_EQ(0u, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
ASSERT_NE(buffer, nullptr);
ASSERT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(buffer, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(poolAllocator->mainStorage, nullptr);
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
auto mockBuffer = static_cast<MockBuffer *>(buffer.get());
ASSERT_TRUE(mockBuffer->isSubBuffer());
ASSERT_EQ(poolAllocator->mainStorage, mockBuffer->associatedMemObject);
EXPECT_TRUE(mockBuffer->isSubBuffer());
EXPECT_EQ(mockBuffer->associatedMemObject, poolAllocator->bufferPools[0].mainStorage.get());
ASSERT_EQ(mockMemoryManager->waitForEnginesCompletionCalled, 0u);
EXPECT_EQ(mockMemoryManager->waitForEnginesCompletionCalled, 0u);
retVal = clReleaseMemObject(buffer.release());
ASSERT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(mockMemoryManager->waitForEnginesCompletionCalled, 1u);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(mockMemoryManager->waitForEnginesCompletionCalled, 0u);
EXPECT_EQ(size, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndBufferPoolIsExhaustedAndAllocationsAreNotInUseThenPoolIsReused) {
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
constexpr auto buffersToCreate = PoolAllocator::aggregatedSmallBuffersPoolSize / PoolAllocator::smallBufferThreshold;
std::vector<std::unique_ptr<Buffer>> buffers(buffersToCreate);
for (auto i = 0u; i < buffersToCreate; i++) {
buffers[i].reset(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
}
EXPECT_EQ(size * buffersToCreate, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
EXPECT_EQ(0u, mockMemoryManager->allocInUseCalled);
mockMemoryManager->deferAllocInUse = false;
std::unique_ptr<Buffer> bufferAfterExhaustMustSucceed(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_EQ(poolAllocator->bufferPools[0].mainStorage->getMultiGraphicsAllocation().getGraphicsAllocations().size(), mockMemoryManager->allocInUseCalled);
EXPECT_EQ(size, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndBufferPoolIsExhaustedAndAllocationsAreInUseThenNewPoolIsCreated) {
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
constexpr auto buffersToCreate = PoolAllocator::aggregatedSmallBuffersPoolSize / PoolAllocator::smallBufferThreshold;
std::vector<std::unique_ptr<Buffer>> buffers(buffersToCreate);
for (auto i = 0u; i < buffersToCreate; i++) {
buffers[i].reset(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
}
EXPECT_EQ(size * buffersToCreate, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
EXPECT_EQ(0u, mockMemoryManager->allocInUseCalled);
mockMemoryManager->deferAllocInUse = true;
std::unique_ptr<Buffer> bufferAfterExhaustMustSucceed(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(2u, poolAllocator->bufferPools.size());
EXPECT_EQ(poolAllocator->bufferPools[0].mainStorage->getMultiGraphicsAllocation().getGraphicsAllocations().size(), mockMemoryManager->allocInUseCalled);
EXPECT_EQ(size * buffersToCreate, poolAllocator->bufferPools[0].chunkAllocator->getUsedSize());
EXPECT_EQ(size, poolAllocator->bufferPools[1].chunkAllocator->getUsedSize());
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenCopyHostPointerWhenCreatingBufferButCopyFailedThenDoNotUsePool) {
@@ -249,14 +296,15 @@ TEST_F(AggregatedSmallBuffersEnabledTest, givenCopyHostPointerWhenCreatingBuffer
unsigned char dataToCopy[PoolAllocator::smallBufferThreshold];
hostPtr = dataToCopy;
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
if (commandQueue->writeBufferCounter == 0) {
GTEST_SKIP();
}
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
EXPECT_NE(buffer, nullptr);
auto mockBuffer = static_cast<MockBuffer *>(buffer.get());
EXPECT_FALSE(mockBuffer->isSubBuffer());
@@ -265,8 +313,9 @@ TEST_F(AggregatedSmallBuffersEnabledTest, givenCopyHostPointerWhenCreatingBuffer
}
TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndSizeEqualToThresholdWhenBufferCreateCalledMultipleTimesThenUsePool) {
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_EQ(1u, poolAllocator->bufferPools.size());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
constexpr auto buffersToCreate = PoolAllocator::aggregatedSmallBuffersPoolSize / PoolAllocator::smallBufferThreshold;
std::vector<std::unique_ptr<Buffer>> buffers(buffersToCreate);
@@ -274,11 +323,8 @@ TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndS
buffers[i].reset(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
}
EXPECT_NE(poolAllocator->mainStorage, nullptr);
std::unique_ptr<Buffer> bufferAfterPoolIsFull(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(bufferAfterPoolIsFull, nullptr);
EXPECT_FALSE(bufferAfterPoolIsFull->isSubBuffer());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
using Bounds = struct {
size_t left;
@@ -292,7 +338,8 @@ TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndS
EXPECT_NE(buffers[i], nullptr);
EXPECT_TRUE(buffers[i]->isSubBuffer());
auto mockBuffer = static_cast<MockBuffer *>(buffers[i].get());
EXPECT_EQ(poolAllocator->mainStorage, mockBuffer->associatedMemObject);
EXPECT_EQ(mockBuffer->associatedMemObject, poolAllocator->bufferPools[0].mainStorage.get());
EXPECT_NE(nullptr, poolAllocator->bufferPools[0].mainStorage.get());
EXPECT_GE(mockBuffer->getSize(), size);
EXPECT_GE(mockBuffer->getOffset(), 0u);
EXPECT_LE(mockBuffer->getOffset(), PoolAllocator::aggregatedSmallBuffersPoolSize - size);
@@ -307,42 +354,24 @@ TEST_F(AggregatedSmallBuffersEnabledTest, givenAggregatedSmallBuffersEnabledAndS
subBuffersBounds[j].right <= subBuffersBounds[i].left);
}
}
// freeing subbuffer frees space in pool
ASSERT_LT(poolAllocator->chunkAllocator->getLeftSize(), size);
clReleaseMemObject(buffers[0].release());
EXPECT_GE(poolAllocator->chunkAllocator->getLeftSize(), size);
std::unique_ptr<Buffer> bufferAfterPoolHasSpaceAgain(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(bufferAfterPoolHasSpaceAgain, nullptr);
EXPECT_TRUE(bufferAfterPoolHasSpaceAgain->isSubBuffer());
// subbuffer after free does not overlap
subBuffersBounds[0] = Bounds{bufferAfterPoolHasSpaceAgain->getOffset(), bufferAfterPoolHasSpaceAgain->getOffset() + bufferAfterPoolHasSpaceAgain->getSize()};
for (auto i = 0u; i < buffersToCreate; i++) {
for (auto j = i + 1; j < buffersToCreate; j++) {
EXPECT_TRUE(subBuffersBounds[i].right <= subBuffersBounds[j].left ||
subBuffersBounds[j].right <= subBuffersBounds[i].left);
}
}
}
TEST_F(AggregatedSmallBuffersKernelTest, givenBufferFromPoolWhenOffsetSubbufferIsPassedToSetKernelArgThenCorrectGpuVAIsPatched) {
std::unique_ptr<Buffer> unusedBuffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
ASSERT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
ASSERT_GT(buffer->getOffset(), 0u);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(buffer, nullptr);
EXPECT_GT(buffer->getOffset(), 0u);
cl_buffer_region region;
region.origin = 0xc0;
region.size = 32;
cl_int error = 0;
std::unique_ptr<Buffer> subBuffer(buffer->createSubBuffer(buffer->getFlags(), buffer->getFlagsIntel(), &region, error));
ASSERT_NE(subBuffer, nullptr);
EXPECT_NE(subBuffer, nullptr);
EXPECT_EQ(ptrOffset(buffer->getCpuAddress(), region.origin), subBuffer->getCpuAddress());
const auto graphicsAllocation = subBuffer->getGraphicsAllocation(device->getRootDeviceIndex());
ASSERT_NE(graphicsAllocation, nullptr);
EXPECT_NE(graphicsAllocation, nullptr);
const auto gpuAddress = graphicsAllocation->getGpuAddress();
EXPECT_EQ(ptrOffset(gpuAddress, buffer->getOffset() + region.origin), subBuffer->getBufferAddress(device->getRootDeviceIndex()));
@@ -353,13 +382,13 @@ TEST_F(AggregatedSmallBuffersKernelTest, givenBufferFromPoolWhenOffsetSubbufferI
using AggregatedSmallBuffersEnabledTestFailPoolInit = AggregatedSmallBuffersTestTemplate<1, true>;
TEST_F(AggregatedSmallBuffersEnabledTestFailPoolInit, givenAggregatedSmallBuffersEnabledAndSizeEqualToThresholdWhenBufferCreateCalledButPoolCreateFailedThenDoNotUsePool) {
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_EQ(poolAllocator->mainStorage, nullptr);
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_TRUE(poolAllocator->bufferPools.empty());
std::unique_ptr<Buffer> buffer(Buffer::create(context.get(), flags, size, hostPtr, retVal));
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(buffer.get(), nullptr);
EXPECT_EQ(poolAllocator->mainStorage, nullptr);
EXPECT_TRUE(poolAllocator->bufferPools.empty());
}
using AggregatedSmallBuffersEnabledTestDoNotRunSetup = AggregatedSmallBuffersTestTemplate<1, false, false>;
@@ -368,9 +397,9 @@ TEST_F(AggregatedSmallBuffersEnabledTestDoNotRunSetup, givenAggregatedSmallBuffe
testing::internal::CaptureStdout();
DebugManager.flags.PrintDriverDiagnostics.set(1);
setUpImpl();
ASSERT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
ASSERT_NE(poolAllocator->mainStorage, nullptr);
ASSERT_NE(context->driverDiagnostics, nullptr);
EXPECT_TRUE(poolAllocator->isAggregatedSmallBuffersEnabled(context.get()));
EXPECT_FALSE(poolAllocator->bufferPools.empty());
EXPECT_NE(context->driverDiagnostics, nullptr);
std::string output = testing::internal::GetCapturedStdout();
EXPECT_EQ(0u, output.size());
}
@@ -383,7 +412,7 @@ class AggregatedSmallBuffersApiTestTemplate : public ::testing::Test {
auto device = deviceFactory->rootDevices[0];
cl_device_id devices[] = {device};
clContext = clCreateContext(nullptr, 1, devices, nullptr, nullptr, &retVal);
ASSERT_EQ(retVal, CL_SUCCESS);
EXPECT_EQ(retVal, CL_SUCCESS);
context = castToObject<Context>(clContext);
poolAllocator = static_cast<MockBufferPoolAllocator *>(&context->getBufferPoolAllocator());
}
@@ -416,7 +445,7 @@ TEST_F(AggregatedSmallBuffersEnabledApiTest, givenNotSmallBufferWhenCreatingBuff
size = PoolAllocator::smallBufferThreshold + 1;
cl_mem buffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
EXPECT_NE(buffer, nullptr);
MockBuffer *asBuffer = static_cast<MockBuffer *>(buffer);
EXPECT_FALSE(asBuffer->isSubBuffer());
@@ -431,14 +460,13 @@ TEST_F(AggregatedSmallBuffersEnabledApiTest, givenSmallBufferWhenCreatingBufferT
auto contextRefCountBefore = context->getRefInternalCount();
cl_mem smallBuffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(smallBuffer, nullptr);
EXPECT_NE(smallBuffer, nullptr);
MockBuffer *asBuffer = static_cast<MockBuffer *>(smallBuffer);
EXPECT_TRUE(asBuffer->isSubBuffer());
Buffer *parentBuffer = static_cast<Buffer *>(asBuffer->associatedMemObject);
EXPECT_EQ(2, parentBuffer->getRefInternalCount());
MockBufferPoolAllocator *mockBufferPoolAllocator = static_cast<MockBufferPoolAllocator *>(&context->getBufferPoolAllocator());
EXPECT_EQ(parentBuffer, mockBufferPoolAllocator->mainStorage);
EXPECT_EQ(parentBuffer, poolAllocator->bufferPools[0].mainStorage.get());
retVal = clReleaseMemObject(smallBuffer);
EXPECT_EQ(retVal, CL_SUCCESS);
@@ -452,14 +480,13 @@ TEST_F(AggregatedSmallBuffersEnabledApiTest, givenSmallBufferWhenCreatingBufferW
auto contextRefCountBefore = context->getRefInternalCount();
cl_mem smallBuffer = clCreateBufferWithProperties(clContext, nullptr, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(smallBuffer, nullptr);
EXPECT_NE(smallBuffer, nullptr);
MockBuffer *asBuffer = static_cast<MockBuffer *>(smallBuffer);
EXPECT_TRUE(asBuffer->isSubBuffer());
Buffer *parentBuffer = static_cast<Buffer *>(asBuffer->associatedMemObject);
EXPECT_EQ(2, parentBuffer->getRefInternalCount());
MockBufferPoolAllocator *mockBufferPoolAllocator = static_cast<MockBufferPoolAllocator *>(&context->getBufferPoolAllocator());
EXPECT_EQ(parentBuffer, mockBufferPoolAllocator->mainStorage);
EXPECT_EQ(parentBuffer, poolAllocator->bufferPools[0].mainStorage.get());
retVal = clReleaseMemObject(smallBuffer);
EXPECT_EQ(retVal, CL_SUCCESS);
@@ -474,14 +501,13 @@ TEST_F(AggregatedSmallBuffersEnabledApiTest, givenSmallBufferWhenCreatingBufferW
cl_mem_properties memProperties{};
cl_mem smallBuffer = clCreateBufferWithProperties(clContext, &memProperties, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(smallBuffer, nullptr);
EXPECT_NE(smallBuffer, nullptr);
MockBuffer *asBuffer = static_cast<MockBuffer *>(smallBuffer);
EXPECT_TRUE(asBuffer->isSubBuffer());
Buffer *parentBuffer = static_cast<Buffer *>(asBuffer->associatedMemObject);
EXPECT_EQ(2, parentBuffer->getRefInternalCount());
MockBufferPoolAllocator *mockBufferPoolAllocator = static_cast<MockBufferPoolAllocator *>(&context->getBufferPoolAllocator());
EXPECT_EQ(parentBuffer, mockBufferPoolAllocator->mainStorage);
EXPECT_EQ(parentBuffer, poolAllocator->bufferPools[0].mainStorage.get());
retVal = clReleaseMemObject(smallBuffer);
EXPECT_EQ(retVal, CL_SUCCESS);
@@ -494,7 +520,7 @@ TEST_F(AggregatedSmallBuffersEnabledApiTest, givenSmallBufferWhenCreatingBufferW
TEST_F(AggregatedSmallBuffersEnabledApiTest, givenBufferFromPoolWhenGetMemObjInfoCalledThenReturnValuesLikeForNormalBuffer) {
cl_mem buffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
EXPECT_NE(buffer, nullptr);
MockBuffer *asBuffer = static_cast<MockBuffer *>(buffer);
EXPECT_TRUE(asBuffer->isSubBuffer());
@@ -521,14 +547,14 @@ TEST_F(AggregatedSmallBuffersEnabledApiTest, givenSubBufferNotFromPoolAndAggrega
size_t size = PoolAllocator::smallBufferThreshold + 1;
cl_mem largeBuffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
ASSERT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(largeBuffer, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(largeBuffer, nullptr);
cl_buffer_region region{};
region.size = 1;
cl_mem subBuffer = clCreateSubBuffer(largeBuffer, flags, CL_BUFFER_CREATE_TYPE_REGION, &region, &retVal);
ASSERT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(subBuffer, nullptr);
EXPECT_EQ(retVal, CL_SUCCESS);
EXPECT_NE(subBuffer, nullptr);
DebugManager.flags.ExperimentalSmallBufferPoolAllocator.set(1);
retVal = clReleaseMemObject(subBuffer);
@@ -549,14 +575,13 @@ TEST_F(AggregatedSmallBuffersEnabledApiTest, givenCopyHostPointerWhenCreatingBuf
cl_mem smallBuffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(context->getRefInternalCount(), contextRefCountBefore + 1);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(smallBuffer, nullptr);
EXPECT_NE(smallBuffer, nullptr);
MockBuffer *asBuffer = static_cast<MockBuffer *>(smallBuffer);
EXPECT_TRUE(asBuffer->isSubBuffer());
Buffer *parentBuffer = static_cast<Buffer *>(asBuffer->associatedMemObject);
EXPECT_EQ(2, parentBuffer->getRefInternalCount());
MockBufferPoolAllocator *mockBufferPoolAllocator = static_cast<MockBufferPoolAllocator *>(&context->getBufferPoolAllocator());
EXPECT_EQ(parentBuffer, mockBufferPoolAllocator->mainStorage);
EXPECT_EQ(parentBuffer, poolAllocator->bufferPools[0].mainStorage.get());
// check that data has been copied
auto address = asBuffer->getCpuAddress();
@@ -579,17 +604,17 @@ TEST_F(AggregatedSmallBuffersSubBufferApiTest, givenBufferFromPoolWhenCreateSubB
cl_mem buffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
EXPECT_NE(buffer, nullptr);
MockBuffer *mockBuffer = static_cast<MockBuffer *>(buffer);
EXPECT_GT(mockBuffer->offset, 0u);
EXPECT_EQ(ptrOffset(poolAllocator->mainStorage->getCpuAddress(), mockBuffer->getOffset()), mockBuffer->getCpuAddress());
EXPECT_EQ(ptrOffset(poolAllocator->bufferPools[0].mainStorage->getCpuAddress(), mockBuffer->getOffset()), mockBuffer->getCpuAddress());
cl_buffer_region region{};
region.size = 1;
region.origin = size / 2;
cl_mem subBuffer = clCreateSubBuffer(buffer, flags, CL_BUFFER_CREATE_TYPE_REGION, &region, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(subBuffer, nullptr);
EXPECT_NE(subBuffer, nullptr);
MockBuffer *mockSubBuffer = static_cast<MockBuffer *>(subBuffer);
EXPECT_EQ(mockSubBuffer->associatedMemObject, buffer);
EXPECT_EQ(ptrOffset(mockBuffer->getCpuAddress(), region.origin), mockSubBuffer->getCpuAddress());
@@ -609,16 +634,16 @@ TEST_F(AggregatedSmallBuffersSubBufferApiTest, givenBufferFromPoolWhenCreateSubB
TEST_F(AggregatedSmallBuffersSubBufferApiTest, givenSubBufferFromBufferPoolWhenGetMemObjInfoCalledThenReturnValuesLikeForNormalSubBuffer) {
cl_mem buffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
EXPECT_NE(buffer, nullptr);
MockBuffer *mockBuffer = static_cast<MockBuffer *>(buffer);
ASSERT_TRUE(context->getBufferPoolAllocator().isPoolBuffer(mockBuffer->associatedMemObject));
EXPECT_TRUE(context->getBufferPoolAllocator().isPoolBuffer(mockBuffer->associatedMemObject));
cl_buffer_region region{};
region.size = 1;
region.origin = size / 2;
cl_mem subBuffer = clCreateSubBuffer(buffer, flags, CL_BUFFER_CREATE_TYPE_REGION, &region, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(subBuffer, nullptr);
EXPECT_NE(subBuffer, nullptr);
cl_mem associatedMemObj = nullptr;
retVal = clGetMemObjectInfo(subBuffer, CL_MEM_ASSOCIATED_MEMOBJECT, sizeof(cl_mem), &associatedMemObj, nullptr);
@@ -642,7 +667,7 @@ TEST_F(AggregatedSmallBuffersSubBufferApiTest, givenSubBufferFromBufferPoolWhenG
TEST_F(AggregatedSmallBuffersSubBufferApiTest, givenBufferFromPoolWhenCreateSubBufferCalledWithRegionOutsideBufferThenItFails) {
cl_mem buffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
EXPECT_NE(buffer, nullptr);
cl_buffer_region region{};
region.size = size + 1;
@@ -666,14 +691,14 @@ TEST_F(AggregatedSmallBuffersSubBufferApiTest, givenBufferFromPoolWhenCreateSubB
TEST_F(AggregatedSmallBuffersSubBufferApiTest, givenSubBufferFromBufferFromPoolWhenCreateSubBufferCalledThenItFails) {
cl_mem buffer = clCreateBuffer(clContext, flags, size, hostPtr, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(buffer, nullptr);
EXPECT_NE(buffer, nullptr);
cl_buffer_region region{};
region.size = 1;
region.origin = size / 2;
cl_mem subBuffer = clCreateSubBuffer(buffer, flags, CL_BUFFER_CREATE_TYPE_REGION, &region, &retVal);
EXPECT_EQ(retVal, CL_SUCCESS);
ASSERT_NE(subBuffer, nullptr);
EXPECT_NE(subBuffer, nullptr);
region.origin = 0;
cl_mem subSubBuffer = clCreateSubBuffer(subBuffer, flags, CL_BUFFER_CREATE_TYPE_REGION, &region, &retVal);

View File

@@ -55,9 +55,9 @@ class MockContext : public Context {
class MockBufferPoolAllocator : public BufferPoolAllocator {
public:
using BufferPoolAllocator::chunkAllocator;
using BufferPoolAllocator::BufferPool;
using BufferPoolAllocator::bufferPools;
using BufferPoolAllocator::isAggregatedSmallBuffersEnabled;
using BufferPoolAllocator::mainStorage;
};
private: