mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-12 17:33:00 +08:00
fix: l0, bindless image from pooled alloc
Make sure pool offset is applied when growing pools are used. Related-To: NEO-16317, GSD-11804 Signed-off-by: Dominik Dabek <dominik.dabek@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
feff29fd47
commit
8575573dbd
@@ -839,6 +839,16 @@ void Device::allocateSyncBufferHandler() {
|
||||
}
|
||||
}
|
||||
|
||||
UsmMemAllocPool *Device::getUsmPoolOwningPtr(const void *ptr) {
|
||||
if (getUsmMemAllocPool() &&
|
||||
getUsmMemAllocPool()->isInPool(ptr)) {
|
||||
return getUsmMemAllocPool();
|
||||
} else if (getUsmMemAllocPoolsManager()) {
|
||||
return getUsmMemAllocPoolsManager()->getPoolContainingAlloc(ptr);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint64_t Device::getGlobalMemorySize(uint32_t deviceBitfield) const {
|
||||
auto globalMemorySize = getMemoryManager()->isLocalMemorySupported(this->getRootDeviceIndex())
|
||||
? getMemoryManager()->getLocalMemorySize(this->getRootDeviceIndex(), deviceBitfield)
|
||||
|
||||
@@ -213,6 +213,7 @@ class Device : public ReferenceTrackedObject<Device>, NEO::NonCopyableAndNonMova
|
||||
UsmMemAllocPool *getUsmMemAllocPool() {
|
||||
return usmMemAllocPool.get();
|
||||
}
|
||||
UsmMemAllocPool *getUsmPoolOwningPtr(const void *ptr);
|
||||
UsmMemAllocPool *getUsmConstantSurfaceAllocPool() {
|
||||
return usmConstantSurfaceAllocPool.get();
|
||||
}
|
||||
|
||||
@@ -2624,22 +2624,53 @@ TEST(Device, givenDeviceWhenCallingUsmAllocationPoolMethodsThenCorrectValueRetur
|
||||
EXPECT_EQ(nullptr, device->getUsmMemAllocPool());
|
||||
device->cleanupUsmAllocationPool();
|
||||
|
||||
auto *usmAllocPool = new MockUsmMemAllocPool;
|
||||
device->resetUsmAllocationPool(usmAllocPool);
|
||||
EXPECT_EQ(usmAllocPool, device->getUsmMemAllocPool());
|
||||
usmAllocPool->callBaseCleanup = false;
|
||||
EXPECT_EQ(0u, usmAllocPool->cleanupCalled);
|
||||
device->cleanupUsmAllocationPool();
|
||||
EXPECT_EQ(1u, usmAllocPool->cleanupCalled);
|
||||
auto poolInfo = UsmMemAllocPoolsManager::poolInfos[0];
|
||||
|
||||
EXPECT_EQ(nullptr, device->getUsmMemAllocPoolsManager());
|
||||
RootDeviceIndicesContainer rootDeviceIndices = {device->getRootDeviceIndex()};
|
||||
std::map<uint32_t, DeviceBitfield> deviceBitfields{{device->getRootDeviceIndex(), device->getDeviceBitfield()}};
|
||||
MockUsmMemAllocPoolsManager *usmAllocPoolManager = new MockUsmMemAllocPoolsManager(InternalMemoryType::deviceUnifiedMemory, rootDeviceIndices, deviceBitfields, device.get());
|
||||
device->resetUsmAllocationPoolManager(usmAllocPoolManager);
|
||||
EXPECT_EQ(usmAllocPoolManager, device->getUsmMemAllocPoolsManager());
|
||||
usmAllocPoolManager->canAddPoolCallBase = true;
|
||||
EXPECT_TRUE(usmAllocPoolManager->canAddPool(UsmMemAllocPoolsManager::poolInfos[0]));
|
||||
void *beforePoolPtr = addrToPtr(0xBEEF - 1);
|
||||
void *poolStartPtr = addrToPtr(0xBEEF);
|
||||
void *poolEndPtr = addrToPtr(0xBEEF + poolInfo.poolSize - 1);
|
||||
void *pastPoolEndPtr = addrToPtr(0xBEEF + poolInfo.poolSize);
|
||||
{
|
||||
auto *usmAllocPool = new MockUsmMemAllocPool;
|
||||
device->resetUsmAllocationPool(usmAllocPool);
|
||||
EXPECT_EQ(usmAllocPool, device->getUsmMemAllocPool());
|
||||
usmAllocPool->pool = poolStartPtr;
|
||||
usmAllocPool->poolEnd = pastPoolEndPtr;
|
||||
usmAllocPool->poolInfo = poolInfo;
|
||||
usmAllocPool->callBaseCleanup = false;
|
||||
EXPECT_EQ(nullptr, device->getUsmPoolOwningPtr(beforePoolPtr));
|
||||
EXPECT_EQ(usmAllocPool, device->getUsmPoolOwningPtr(poolStartPtr));
|
||||
EXPECT_EQ(usmAllocPool, device->getUsmPoolOwningPtr(poolEndPtr));
|
||||
EXPECT_EQ(nullptr, device->getUsmPoolOwningPtr(pastPoolEndPtr));
|
||||
|
||||
EXPECT_EQ(0u, usmAllocPool->cleanupCalled);
|
||||
device->cleanupUsmAllocationPool();
|
||||
EXPECT_EQ(1u, usmAllocPool->cleanupCalled);
|
||||
device->resetUsmAllocationPool(nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
EXPECT_EQ(nullptr, device->getUsmMemAllocPoolsManager());
|
||||
RootDeviceIndicesContainer rootDeviceIndices = {device->getRootDeviceIndex()};
|
||||
std::map<uint32_t, DeviceBitfield> deviceBitfields{{device->getRootDeviceIndex(), device->getDeviceBitfield()}};
|
||||
MockUsmMemAllocPoolsManager *usmAllocPoolManager = new MockUsmMemAllocPoolsManager(InternalMemoryType::deviceUnifiedMemory, rootDeviceIndices, deviceBitfields, device.get());
|
||||
device->resetUsmAllocationPoolManager(usmAllocPoolManager);
|
||||
EXPECT_EQ(usmAllocPoolManager, device->getUsmMemAllocPoolsManager());
|
||||
usmAllocPoolManager->canAddPoolCallBase = true;
|
||||
EXPECT_TRUE(usmAllocPoolManager->canAddPool(UsmMemAllocPoolsManager::poolInfos[0]));
|
||||
|
||||
auto usmAllocPool = new MockUsmMemAllocPool;
|
||||
usmAllocPool->pool = poolStartPtr;
|
||||
usmAllocPool->poolEnd = pastPoolEndPtr;
|
||||
usmAllocPool->poolInfo = poolInfo;
|
||||
usmAllocPool->callBaseCleanup = false;
|
||||
usmAllocPoolManager->pools[poolInfo].push_back(std::unique_ptr<UsmMemAllocPool>(usmAllocPool));
|
||||
|
||||
EXPECT_EQ(nullptr, device->getUsmPoolOwningPtr(beforePoolPtr));
|
||||
EXPECT_EQ(usmAllocPool, device->getUsmPoolOwningPtr(poolStartPtr));
|
||||
EXPECT_EQ(usmAllocPool, device->getUsmPoolOwningPtr(poolEndPtr));
|
||||
EXPECT_EQ(nullptr, device->getUsmPoolOwningPtr(pastPoolEndPtr));
|
||||
}
|
||||
}
|
||||
|
||||
TEST(GroupDevicesTest, whenMultipleDevicesAreCreatedThenGroupDevicesCreatesVectorPerEachProductFamilySortedOverGpuTypeAndProductFamily) {
|
||||
|
||||
Reference in New Issue
Block a user