Support Mapping Multiple Physical Memory with one VA and Bug Fix

- Added support for mapping any portion of a virtual allocation to a
physical mapping with a lookup function for reserved virtual addresses.
- Added support for multiple mappings linked to the same virtual
reservation.
- Fixed bug with 64 bit addresses on windows with invalid addresses
passed to the user.

Related-To: LOCI-3904, LOCI-3914, LOCI-3931

Signed-off-by: Spruit, Neil R <neil.r.spruit@intel.com>
This commit is contained in:
Spruit, Neil R 2023-01-21 00:37:25 +00:00 committed by Compute-Runtime-Automation
parent b4f04a800b
commit b5692c04cd
5 changed files with 308 additions and 34 deletions

View File

@ -720,6 +720,23 @@ ze_result_t ContextImp::activateMetricGroups(zet_device_handle_t hDevice,
return L0::Device::fromHandle(hDevice)->activateMetricGroupsDeferred(count, phMetricGroups);
}
NEO::VirtualMemoryReservation *ContextImp::findSupportedVirtualReservation(const void *ptr, size_t size) {
void *address = const_cast<void *>(ptr);
auto allocation = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().lower_bound(address);
if (allocation != this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().end()) {
if (ptr == allocation->first && ptrOffset(reinterpret_cast<uint64_t>(allocation->first), allocation->second->virtualAddressRange.size) >= ptrOffset(reinterpret_cast<uint64_t>(address), size)) {
return allocation->second;
}
}
if (allocation != this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().begin()) {
allocation--;
if (ptrOffset(allocation->first, allocation->second->virtualAddressRange.size) >= ptrOffset(address, size)) {
return allocation->second;
}
}
return nullptr;
}
ze_result_t ContextImp::reserveVirtualMem(const void *pStart,
size_t size,
void **pptr) {
@ -741,7 +758,6 @@ ze_result_t ContextImp::reserveVirtualMem(const void *pStart,
virtualMemoryReservation->flags.readWrite = false;
virtualMemoryReservation->flags.readOnly = false;
virtualMemoryReservation->flags.noAccess = true;
virtualMemoryReservation->mappedAllocation = nullptr;
auto lock = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().insert(std::pair<void *, NEO::VirtualMemoryReservation *>(reinterpret_cast<void *>(virtualMemoryReservation->virtualAddressRange.address), virtualMemoryReservation));
*pptr = reinterpret_cast<void *>(virtualMemoryReservation->virtualAddressRange.address);
@ -761,6 +777,7 @@ ze_result_t ContextImp::freeVirtualMem(const void *ptr,
this->driverHandle->getMemoryManager()->freeGpuAddress(virtualMemoryReservation->virtualAddressRange, virtualMemoryReservation->rootDeviceIndex);
delete virtualMemoryReservation;
this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().erase(it);
virtualMemoryReservation = nullptr;
return ZE_RESULT_SUCCESS;
} else {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
@ -842,12 +859,10 @@ ze_result_t ContextImp::mapVirtualMem(const void *ptr,
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
std::map<void *, NEO::VirtualMemoryReservation *>::iterator virtualIt;
NEO::VirtualMemoryReservation *virtualMemoryReservation = nullptr;
auto lockVirtual = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
virtualIt = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().find(const_cast<void *>(ptr));
if (virtualIt != this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().end()) {
virtualMemoryReservation = virtualIt->second;
virtualMemoryReservation = findSupportedVirtualReservation(ptr, size);
if (virtualMemoryReservation) {
switch (access) {
case ZE_MEMORY_ACCESS_ATTRIBUTE_NONE:
virtualMemoryReservation->flags.readOnly = false;
@ -867,9 +882,13 @@ ze_result_t ContextImp::mapVirtualMem(const void *ptr,
default:
return ZE_RESULT_ERROR_INVALID_ENUMERATION;
}
if (virtualMemoryReservation->mappedAllocation != nullptr) {
if (virtualMemoryReservation->mappedAllocations.size() > 0) {
std::map<void *, NEO::MemoryMappedRange *>::iterator physicalMapIt;
physicalMapIt = virtualMemoryReservation->mappedAllocations.find(const_cast<void *>(ptr));
if (physicalMapIt != virtualMemoryReservation->mappedAllocations.end()) {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
}
} else {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
@ -879,10 +898,14 @@ ze_result_t ContextImp::mapVirtualMem(const void *ptr,
allocData.gpuAllocations.addAllocation(allocationNode->allocation);
allocData.cpuAllocation = nullptr;
allocData.device = allocationNode->device;
allocData.size = virtualMemoryReservation->virtualAddressRange.size;
allocData.size = size;
allocData.pageSizeForAlignment = MemoryConstants::pageSize64k;
allocData.setAllocId(this->driverHandle->svmAllocsManager->allocationsCounter++);
virtualMemoryReservation->mappedAllocation = allocationNode;
NEO::MemoryMappedRange *mappedRange = new NEO::MemoryMappedRange;
mappedRange->ptr = ptr;
mappedRange->size = size;
mappedRange->mappedAllocation = allocationNode;
virtualMemoryReservation->mappedAllocations.insert(std::pair<void *, NEO::MemoryMappedRange *>(const_cast<void *>(ptr), mappedRange));
this->driverHandle->getSvmAllocsManager()->insertSVMAlloc(allocData);
NEO::MemoryOperationsHandler *memoryOperationsIface = allocationNode->device->getRootDeviceEnvironment().memoryOperationsInterface.get();
auto success = memoryOperationsIface->makeResident(allocationNode->device, ArrayRef<NEO::GraphicsAllocation *>(&allocationNode->allocation, 1));
@ -895,18 +918,23 @@ ze_result_t ContextImp::mapVirtualMem(const void *ptr,
ze_result_t ContextImp::unMapVirtualMem(const void *ptr,
size_t size) {
std::map<void *, NEO::VirtualMemoryReservation *>::iterator it;
auto lock = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
it = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().find(const_cast<void *>(ptr));
if (it != this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().end()) {
NEO::VirtualMemoryReservation *virtualMemoryReservation = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().at(const_cast<void *>(ptr));
NEO::SvmAllocationData *allocData = this->driverHandle->getSvmAllocsManager()->getSVMAlloc(reinterpret_cast<void *>(virtualMemoryReservation->mappedAllocation->allocation->getGpuAddress()));
NEO::VirtualMemoryReservation *virtualMemoryReservation = nullptr;
auto lockVirtual = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
virtualMemoryReservation = findSupportedVirtualReservation(ptr, size);
if (virtualMemoryReservation) {
std::map<void *, NEO::MemoryMappedRange *>::iterator physicalMapIt;
physicalMapIt = virtualMemoryReservation->mappedAllocations.find(const_cast<void *>(ptr));
if (physicalMapIt != virtualMemoryReservation->mappedAllocations.end()) {
NEO::PhysicalMemoryAllocation *physicalAllocation = physicalMapIt->second->mappedAllocation;
NEO::SvmAllocationData *allocData = this->driverHandle->getSvmAllocsManager()->getSVMAlloc(reinterpret_cast<void *>(physicalAllocation->allocation->getGpuAddress()));
this->driverHandle->getSvmAllocsManager()->removeSVMAlloc(*allocData);
NEO::Device *device = virtualMemoryReservation->mappedAllocation->device;
NEO::Device *device = physicalAllocation->device;
NEO::CommandStreamReceiver *csr = device->getDefaultEngine().commandStreamReceiver;
NEO::OsContext *osContext = &csr->getOsContext();
this->driverHandle->getMemoryManager()->unMapPhysicalToVirtualMemory(virtualMemoryReservation->mappedAllocation->allocation, reinterpret_cast<uint64_t>(ptr), size, osContext, virtualMemoryReservation->rootDeviceIndex);
virtualMemoryReservation->mappedAllocation = nullptr;
this->driverHandle->getMemoryManager()->unMapPhysicalToVirtualMemory(physicalAllocation->allocation, reinterpret_cast<uint64_t>(ptr), size, osContext, virtualMemoryReservation->rootDeviceIndex);
delete physicalMapIt->second;
virtualMemoryReservation->mappedAllocations.erase(physicalMapIt);
}
}
return ZE_RESULT_SUCCESS;
}
@ -914,11 +942,10 @@ ze_result_t ContextImp::unMapVirtualMem(const void *ptr,
ze_result_t ContextImp::setVirtualMemAccessAttribute(const void *ptr,
size_t size,
ze_memory_access_attribute_t access) {
std::map<void *, NEO::VirtualMemoryReservation *>::iterator it;
auto lock = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
it = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().find(const_cast<void *>(ptr));
if (it != this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().end()) {
NEO::VirtualMemoryReservation *virtualMemoryReservation = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().at(const_cast<void *>(ptr));
NEO::VirtualMemoryReservation *virtualMemoryReservation = nullptr;
auto lockVirtual = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
virtualMemoryReservation = findSupportedVirtualReservation(ptr, size);
if (virtualMemoryReservation) {
switch (access) {
case ZE_MEMORY_ACCESS_ATTRIBUTE_NONE:
virtualMemoryReservation->flags.readOnly = false;
@ -948,11 +975,10 @@ ze_result_t ContextImp::getVirtualMemAccessAttribute(const void *ptr,
size_t size,
ze_memory_access_attribute_t *access,
size_t *outSize) {
std::map<void *, NEO::VirtualMemoryReservation *>::iterator it;
auto lock = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
it = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().find(const_cast<void *>(ptr));
if (it != this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().end()) {
NEO::VirtualMemoryReservation *virtualMemoryReservation = this->driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().at(const_cast<void *>(ptr));
NEO::VirtualMemoryReservation *virtualMemoryReservation = nullptr;
auto lockVirtual = this->driverHandle->getMemoryManager()->lockVirtualMemoryReservationMap();
virtualMemoryReservation = findSupportedVirtualReservation(ptr, size);
if (virtualMemoryReservation) {
if (virtualMemoryReservation->flags.readWrite) {
*access = ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE;
} else if (virtualMemoryReservation->flags.readOnly) {

View File

@ -162,6 +162,7 @@ struct ContextImp : Context {
this->deviceHandles.push_back(deviceHandle);
this->numDevices = static_cast<uint32_t>(this->deviceHandles.size());
}
NEO::VirtualMemoryReservation *findSupportedVirtualReservation(const void *ptr, size_t size);
protected:
bool isAllocationSuitableForCompression(const StructuresLookupTable &structuresLookupTable, Device &device, size_t allocSize);

View File

@ -128,6 +128,87 @@ TEST_F(MultiDeviceContextTests,
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
TEST_F(MultiDeviceContextTests,
whenMappingReservedMemoryOnPhysicalMemoryOnMultiDeviceThenSuccessReturned) {
ze_context_handle_t hContext;
ze_context_desc_t desc = {ZE_STRUCTURE_TYPE_CONTEXT_DESC, nullptr, 0};
ze_result_t res = driverHandle->createContext(&desc, 0u, nullptr, &hContext);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
driverHandle->devices[0]->getNEODevice()->getExecutionEnvironment()->rootDeviceEnvironments[0]->memoryOperationsInterface =
std::make_unique<NEO::MockMemoryOperations>();
driverHandle->devices[1]->getNEODevice()->getExecutionEnvironment()->rootDeviceEnvironments[1]->memoryOperationsInterface =
std::make_unique<NEO::MockMemoryOperations>();
ContextImp *contextImp = static_cast<ContextImp *>(L0::Context::fromHandle(hContext));
void *pStart = 0x0;
size_t size = 4096u;
void *ptr = nullptr;
size_t pagesize = 0u;
res = contextImp->queryVirtualMemPageSize(driverHandle->devices[0], size, &pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->reserveVirtualMem(pStart, pagesize * 2, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_GT(static_cast<int>(driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().size()), 0);
ze_physical_mem_desc_t descMem = {ZE_STRUCTURE_TYPE_PHYSICAL_MEM_DESC, nullptr, 0, pagesize};
ze_physical_mem_handle_t mem = {};
res = contextImp->createPhysicalMem(driverHandle->devices[0], &descMem, &mem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_physical_mem_handle_t secondHalfMem = {};
res = contextImp->createPhysicalMem(driverHandle->devices[1], &descMem, &secondHalfMem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_memory_access_attribute_t access = {ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE};
size_t offset = 0;
std::vector<ze_memory_access_attribute_t> memoryAccessFlags = {
ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE, ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY,
ZE_MEMORY_ACCESS_ATTRIBUTE_NONE};
void *offsetAddr =
reinterpret_cast<void *>(reinterpret_cast<uint64_t>(ptr) + pagesize);
for (auto accessFlags : memoryAccessFlags) {
res = contextImp->mapVirtualMem(ptr, pagesize, mem, offset, accessFlags);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->mapVirtualMem(offsetAddr, pagesize, secondHalfMem, offset, accessFlags);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->setVirtualMemAccessAttribute(ptr, pagesize, access);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->setVirtualMemAccessAttribute(offsetAddr, pagesize, access);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_memory_access_attribute_t outAccess = {};
size_t outSize = 0;
res = contextImp->getVirtualMemAccessAttribute(ptr, pagesize, &outAccess, &outSize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_EQ(pagesize * 2, outSize);
res = contextImp->getVirtualMemAccessAttribute(offsetAddr, pagesize, &outAccess, &outSize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_EQ(pagesize * 2, outSize);
res = contextImp->unMapVirtualMem(ptr, pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->unMapVirtualMem(offsetAddr, pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
res = contextImp->destroyPhysicalMem(mem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->destroyPhysicalMem(secondHalfMem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->freeVirtualMem(ptr, pagesize * 2);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->destroy();
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
TEST_F(MultiDeviceContextTests,
whenAllocatingSharedMemoryWithDeviceNotDefinedForContextThenDeviceLostIsReturned) {
ze_context_handle_t hContext;
@ -1290,6 +1371,147 @@ TEST_F(ContextTest, whenCallingVirtualMemReserveWithPStartWithSuccessfulAllocati
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
TEST_F(ContextTest, whenUsingOffsetsIntoReservedVirtualMemoryThenMappingIsSuccessful) {
ze_context_handle_t hContext;
ze_context_desc_t desc = {ZE_STRUCTURE_TYPE_CONTEXT_DESC, nullptr, 0};
ze_result_t res = driverHandle->createContext(&desc, 0u, nullptr, &hContext);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
device->getNEODevice()->getExecutionEnvironment()->rootDeviceEnvironments[0]->memoryOperationsInterface =
std::make_unique<NEO::MockMemoryOperations>();
ContextImp *contextImp = static_cast<ContextImp *>(L0::Context::fromHandle(hContext));
void *pStart = 0x0;
size_t size = 4096u;
void *ptr = nullptr;
size_t pagesize = 0u;
res = contextImp->queryVirtualMemPageSize(device, size, &pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->reserveVirtualMem(pStart, pagesize * 2, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_GT(static_cast<int>(driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().size()), 0);
ze_physical_mem_desc_t descMem = {ZE_STRUCTURE_TYPE_PHYSICAL_MEM_DESC, nullptr, 0, pagesize};
ze_physical_mem_handle_t mem = {};
res = contextImp->createPhysicalMem(device, &descMem, &mem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_memory_access_attribute_t access = {ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE};
size_t offset = 0;
std::vector<ze_memory_access_attribute_t> memoryAccessFlags = {
ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE, ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY,
ZE_MEMORY_ACCESS_ATTRIBUTE_NONE};
void *offsetAddr =
reinterpret_cast<void *>(reinterpret_cast<uint64_t>(ptr) + pagesize);
for (auto accessFlags : memoryAccessFlags) {
res = contextImp->mapVirtualMem(offsetAddr, pagesize, mem, offset, accessFlags);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->setVirtualMemAccessAttribute(offsetAddr, pagesize, access);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_memory_access_attribute_t outAccess = {};
size_t outSize = 0;
res = contextImp->getVirtualMemAccessAttribute(offsetAddr, pagesize, &outAccess, &outSize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_EQ(pagesize * 2, outSize);
res = contextImp->unMapVirtualMem(offsetAddr, pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
res = contextImp->destroyPhysicalMem(mem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->freeVirtualMem(ptr, pagesize * 2);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->destroy();
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
TEST_F(ContextTest, whenUsingOffsetsIntoReservedVirtualMemoryWithMultiplePhysicalMemoryThenMappingIsSuccessful) {
ze_context_handle_t hContext;
ze_context_desc_t desc = {ZE_STRUCTURE_TYPE_CONTEXT_DESC, nullptr, 0};
ze_result_t res = driverHandle->createContext(&desc, 0u, nullptr, &hContext);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
device->getNEODevice()->getExecutionEnvironment()->rootDeviceEnvironments[0]->memoryOperationsInterface =
std::make_unique<NEO::MockMemoryOperations>();
ContextImp *contextImp = static_cast<ContextImp *>(L0::Context::fromHandle(hContext));
void *pStart = 0x0;
size_t size = 4096u;
void *ptr = nullptr;
size_t pagesize = 0u;
res = contextImp->queryVirtualMemPageSize(device, size, &pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->reserveVirtualMem(pStart, pagesize * 2, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_GT(static_cast<int>(driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().size()), 0);
ze_physical_mem_desc_t descMem = {ZE_STRUCTURE_TYPE_PHYSICAL_MEM_DESC, nullptr, 0, pagesize};
ze_physical_mem_handle_t mem = {};
res = contextImp->createPhysicalMem(device, &descMem, &mem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_physical_mem_handle_t secondHalfMem = {};
res = contextImp->createPhysicalMem(device, &descMem, &secondHalfMem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_memory_access_attribute_t access = {ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE};
size_t offset = 0;
std::vector<ze_memory_access_attribute_t> memoryAccessFlags = {
ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE, ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY,
ZE_MEMORY_ACCESS_ATTRIBUTE_NONE};
void *offsetAddr =
reinterpret_cast<void *>(reinterpret_cast<uint64_t>(ptr) + pagesize);
for (auto accessFlags : memoryAccessFlags) {
res = contextImp->mapVirtualMem(ptr, pagesize, mem, offset, accessFlags);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->mapVirtualMem(offsetAddr, pagesize, secondHalfMem, offset, accessFlags);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->setVirtualMemAccessAttribute(ptr, pagesize, access);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->setVirtualMemAccessAttribute(offsetAddr, pagesize, access);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
ze_memory_access_attribute_t outAccess = {};
size_t outSize = 0;
res = contextImp->getVirtualMemAccessAttribute(ptr, pagesize, &outAccess, &outSize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_EQ(pagesize * 2, outSize);
res = contextImp->getVirtualMemAccessAttribute(offsetAddr, pagesize, &outAccess, &outSize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_EQ(pagesize * 2, outSize);
res = contextImp->unMapVirtualMem(ptr, pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->unMapVirtualMem(offsetAddr, pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
res = contextImp->destroyPhysicalMem(mem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->destroyPhysicalMem(secondHalfMem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->freeVirtualMem(ptr, pagesize * 2);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->destroy();
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
}
TEST_F(ContextTest, whenCallingVirtualMemoryReservationWhenOutOfMemoryThenOutOfMemoryReturned) {
ze_context_handle_t hContext;
ze_context_desc_t desc = {ZE_STRUCTURE_TYPE_CONTEXT_DESC, nullptr, 0};
@ -1419,7 +1641,7 @@ TEST_F(ContextTest, whenCallingMapVirtualMemoryWithInvalidValuesThenFailureRetur
size_t pagesize = 0u;
res = contextImp->queryVirtualMemPageSize(device, size, &pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->reserveVirtualMem(pStart, pagesize, &ptr);
res = contextImp->reserveVirtualMem(pStart, pagesize * 2, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
EXPECT_GT(static_cast<int>(driverHandle->getMemoryManager()->getVirtualMemoryReservationMap().size()), 0);
@ -1439,9 +1661,20 @@ TEST_F(ContextTest, whenCallingMapVirtualMemoryWithInvalidValuesThenFailureRetur
res = contextImp->mapVirtualMem(nullptr, pagesize, mem, offset, access);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, res);
res = contextImp->mapVirtualMem(ptr, pagesize * 4, mem, offset, access);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, res);
void *offsetAddr =
reinterpret_cast<void *>(reinterpret_cast<uint64_t>(ptr) + pagesize);
res = contextImp->mapVirtualMem(offsetAddr, pagesize * 2, mem, offset, access);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, res);
res = contextImp->unMapVirtualMem(nullptr, pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->unMapVirtualMem(ptr, pagesize);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->mapVirtualMem(ptr, 0u, mem, offset, access);
EXPECT_EQ(ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT, res);
@ -1462,7 +1695,7 @@ TEST_F(ContextTest, whenCallingMapVirtualMemoryWithInvalidValuesThenFailureRetur
res = contextImp->destroyPhysicalMem(mem);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->freeVirtualMem(ptr, pagesize);
res = contextImp->freeVirtualMem(ptr, pagesize * 2);
EXPECT_EQ(ZE_RESULT_SUCCESS, res);
res = contextImp->destroy();

View File

@ -59,9 +59,16 @@ struct AddressRange {
size_t size;
};
struct MemoryMappedRange {
const void *ptr;
size_t size;
struct PhysicalMemoryAllocation *mappedAllocation;
};
struct VirtualMemoryReservation {
AddressRange virtualAddressRange;
MemoryFlags flags;
std::map<void *, MemoryMappedRange *> mappedAllocations;
struct PhysicalMemoryAllocation *mappedAllocation;
uint32_t rootDeviceIndex;
};

View File

@ -78,11 +78,16 @@ void WddmMemoryManager::unMapPhysicalToVirtualMemory(GraphicsAllocation *physica
wddm->reserveGpuVirtualAddress(gpuRange, gfxPartition->getHeapMinimalAddress(HeapIndex::HEAP_STANDARD64KB), gfxPartition->getHeapLimit(HeapIndex::HEAP_STANDARD64KB), bufferSize);
physicalAllocation->setCpuPtrAndGpuAddress(nullptr, 0u);
physicalAllocation->setReservedAddressRange(nullptr, 0u);
WddmAllocation *wddmAllocation = reinterpret_cast<WddmAllocation *>(physicalAllocation);
wddmAllocation->mappedPhysicalMemoryReservation = false;
}
bool WddmMemoryManager::mapPhysicalToVirtualMemory(GraphicsAllocation *physicalAllocation, uint64_t gpuRange, size_t bufferSize) {
WddmAllocation *wddmAllocation = reinterpret_cast<WddmAllocation *>(physicalAllocation);
wddmAllocation->mappedPhysicalMemoryReservation = mapGpuVirtualAddress(wddmAllocation, reinterpret_cast<void *>(gpuRange));
auto decanonizedAddress = getGmmHelper(physicalAllocation->getRootDeviceIndex())->decanonize(gpuRange);
wddmAllocation->mappedPhysicalMemoryReservation = mapGpuVirtualAddress(wddmAllocation, reinterpret_cast<void *>(decanonizedAddress));
physicalAllocation->setCpuPtrAndGpuAddress(nullptr, gpuRange);
physicalAllocation->setReservedAddressRange(reinterpret_cast<void *>(gpuRange), bufferSize);
return wddmAllocation->mappedPhysicalMemoryReservation;
}
@ -823,6 +828,8 @@ AddressRange WddmMemoryManager::reserveGpuAddress(const void *requiredStartAddre
break;
}
}
auto gmmHelper = executionEnvironment.rootDeviceEnvironments[*reservedOnRootDeviceIndex]->getGmmHelper();
gpuVa = gmmHelper->canonize(gpuVa);
return AddressRange{gpuVa, reservedSize};
}