feature: add support for zeMemGet/SetAtomicAccessAttributeExp

Resolves: NEO-8219

Signed-off-by: John Falkowski <john.falkowski@intel.com>
This commit is contained in:
John Falkowski
2023-09-20 22:34:19 +00:00
committed by Compute-Runtime-Automation
parent 1ac37d4a49
commit 56f05303c9
24 changed files with 735 additions and 1 deletions

View File

@@ -657,6 +657,8 @@ zeGetMemExpProcAddrTable(
ze_result_t result = ZE_RESULT_SUCCESS;
pDdiTable->pfnGetIpcHandleFromFileDescriptorExp = L0::zeMemGetIpcHandleFromFileDescriptorExp;
pDdiTable->pfnGetFileDescriptorFromIpcHandleExp = L0::zeMemGetFileDescriptorFromIpcHandleExp;
pDdiTable->pfnSetAtomicAccessAttributeExp = L0::zeMemSetAtomicAccessAttributeExp;
pDdiTable->pfnGetAtomicAccessAttributeExp = L0::zeMemGetAtomicAccessAttributeExp;
driverDdiTable.coreDdiTable.MemExp = *pDdiTable;
return result;
}

View File

@@ -176,6 +176,14 @@ ze_result_t zeRTASParallelOperationDestroyExp(ze_rtas_parallel_operation_exp_han
return L0::RTASParallelOperation::fromHandle(hParallelOperation)->destroy();
}
ze_result_t zeMemSetAtomicAccessAttributeExp(ze_context_handle_t hContext, ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t attr) {
return L0::Context::fromHandle(hContext)->setAtomicAccessAttribute(L0::Device::fromHandle(hDevice), ptr, size, attr);
}
ze_result_t zeMemGetAtomicAccessAttributeExp(ze_context_handle_t hContext, ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t *pAttr) {
return L0::Context::fromHandle(hContext)->getAtomicAccessAttribute(L0::Device::fromHandle(hDevice), ptr, size, pAttr);
}
} // namespace L0
extern "C" {
@@ -367,4 +375,24 @@ zeRTASParallelOperationDestroyExp(
return L0::zeRTASParallelOperationDestroyExp(hParallelOperation);
}
ZE_APIEXPORT ze_result_t ZE_APICALL
zeMemSetAtomicAccessAttributeExp(
ze_context_handle_t hContext,
ze_device_handle_t hDevice,
const void *ptr,
size_t size,
ze_memory_atomic_attr_exp_flags_t attr) {
return L0::zeMemSetAtomicAccessAttributeExp(hContext, hDevice, ptr, size, attr);
}
ZE_APIEXPORT ze_result_t ZE_APICALL
zeMemGetAtomicAccessAttributeExp(
ze_context_handle_t hContext,
ze_device_handle_t hDevice,
const void *ptr,
size_t size,
ze_memory_atomic_attr_exp_flags_t *pAttr) {
return L0::zeMemGetAtomicAccessAttributeExp(hContext, hDevice, ptr, size, pAttr);
}
} // extern "C"

View File

@@ -143,4 +143,18 @@ ze_result_t zeRTASParallelOperationJoinExp(
ze_result_t zeRTASParallelOperationDestroyExp(
ze_rtas_parallel_operation_exp_handle_t hParallelOperation);
ze_result_t zeMemSetAtomicAccessAttributeExp(
ze_context_handle_t hContext,
ze_device_handle_t hDevice,
const void *ptr,
size_t size,
ze_memory_atomic_attr_exp_flags_t attr);
ze_result_t zeMemGetAtomicAccessAttributeExp(
ze_context_handle_t hContext,
ze_device_handle_t hDevice,
const void *ptr,
size_t size,
ze_memory_atomic_attr_exp_flags_t *pAttr);
} // namespace L0

View File

@@ -103,6 +103,8 @@ struct Context : _ze_context_handle_t {
ze_device_handle_t *phDevice) = 0;
virtual ze_result_t getImageAllocProperties(Image *image,
ze_image_allocation_ext_properties_t *pAllocProperties) = 0;
virtual ze_result_t setAtomicAccessAttribute(ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t attr) = 0;
virtual ze_result_t getAtomicAccessAttribute(ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t *pAttr) = 0;
virtual ze_result_t createModule(ze_device_handle_t hDevice,
const ze_module_desc_t *desc,
ze_module_handle_t *phModule,

View File

@@ -27,6 +27,10 @@
#include "level_zero/core/source/memory/memory_operations_helper.h"
#include "level_zero/core/source/module/module.h"
namespace NEO {
enum class AtomicAccessMode : uint32_t;
} // namespace NEO
namespace L0 {
ze_result_t ContextImp::destroy() {
@@ -843,6 +847,81 @@ ze_result_t ContextImp::getImageAllocProperties(Image *image, ze_image_allocatio
return handleAllocationExtensions(alloc, ZE_MEMORY_TYPE_DEVICE, pAllocProperties->pNext, driverHandle);
}
ze_result_t ContextImp::setAtomicAccessAttribute(ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t attr) {
if (nullptr == hDevice) {
// no support for atomics for multi-device shared allocations at the moment
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
uint32_t attrEval = 0;
memcpy_s(&attrEval, sizeof(uint32_t), &attr, sizeof(uint32_t));
ze_device_memory_access_properties_t memProp;
auto device = Device::fromHandle(hDevice);
auto allocData = device->getDriverHandle()->getSvmAllocsManager()->getSVMAlloc(ptr);
if (allocData == nullptr) {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
ze_memory_allocation_properties_t memoryProperties = {};
this->getMemAllocProperties(ptr, &memoryProperties, &hDevice);
if (memoryProperties.type != ZE_MEMORY_TYPE_SHARED) {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
DeviceImp *deviceImp = static_cast<DeviceImp *>((L0::Device::fromHandle(hDevice)));
if (attrEval == 0) {
deviceImp->atomicAccessAllocations[allocData] = attr;
return ZE_RESULT_SUCCESS;
}
deviceImp->getMemoryAccessProperties(&memProp);
NEO::AtomicAccessMode mode = NEO::AtomicAccessMode::None;
if (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_DEVICE_ATOMICS) {
if (!(memProp.deviceAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_ATOMIC)) {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
mode = NEO::AtomicAccessMode::Device;
}
if (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_HOST_ATOMICS) {
if (!(memProp.hostAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_ATOMIC)) {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
mode = NEO::AtomicAccessMode::Host;
}
if (attrEval & ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_SYSTEM_ATOMICS) {
if ((!(memProp.sharedSingleDeviceAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT_ATOMIC)) &&
(!(memProp.sharedCrossDeviceAllocCapabilities & ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT_ATOMIC))) {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
mode = NEO::AtomicAccessMode::System;
}
auto alloc = allocData->gpuAllocations.getGraphicsAllocation(deviceImp->getRootDeviceIndex());
auto memoryManager = device->getDriverHandle()->getMemoryManager();
memoryManager->setAtomicAccess(alloc, size, mode, deviceImp->getRootDeviceIndex());
deviceImp->atomicAccessAllocations[allocData] = attr;
return ZE_RESULT_SUCCESS;
}
ze_result_t ContextImp::getAtomicAccessAttribute(ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t *pAttr) {
auto device = Device::fromHandle(hDevice);
auto allocData = device->getDriverHandle()->getSvmAllocsManager()->getSVMAlloc(ptr);
if (allocData == nullptr) {
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
DeviceImp *deviceImp = static_cast<DeviceImp *>((L0::Device::fromHandle(hDevice)));
if (deviceImp->atomicAccessAllocations.find(allocData) != deviceImp->atomicAccessAllocations.end()) {
*pAttr = deviceImp->atomicAccessAllocations[allocData];
return ZE_RESULT_SUCCESS;
}
return ZE_RESULT_ERROR_INVALID_ARGUMENT;
}
ze_result_t ContextImp::createModule(ze_device_handle_t hDevice,
const ze_module_desc_t *desc,
ze_module_handle_t *phModule,

View File

@@ -101,6 +101,8 @@ struct ContextImp : Context {
ze_device_handle_t *phDevice) override;
ze_result_t getImageAllocProperties(Image *image,
ze_image_allocation_ext_properties_t *pAllocProperties) override;
ze_result_t setAtomicAccessAttribute(ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t attr) override;
ze_result_t getAtomicAccessAttribute(ze_device_handle_t hDevice, const void *ptr, size_t size, ze_memory_atomic_attr_exp_flags_t *pAttr) override;
ze_result_t createModule(ze_device_handle_t hDevice,
const ze_module_desc_t *desc,
ze_module_handle_t *phModule,

View File

@@ -142,6 +142,7 @@ struct DeviceImp : public Device {
std::unordered_map<const void *, L0::Image *> peerImageAllocations;
NEO::SpinLock peerImageAllocationsMutex;
std::map<NEO::SvmAllocationData *, NEO::MemAdviseFlags> memAdviseSharedAllocations;
std::map<NEO::SvmAllocationData *, ze_memory_atomic_attr_exp_flags_t> atomicAccessAllocations;
std::unique_ptr<NEO::AllocationsList> allocationsForReuse;
std::unique_ptr<NEO::DriverInfo> driverInfo;
void createSysmanHandle(bool isSubDevice);

View File

@@ -2339,6 +2339,37 @@ TEST_F(MultipleDevicesTest, givenTheSameDeviceThenCanAccessPeerReturnsTrue) {
EXPECT_TRUE(canAccess);
}
TEST_F(MultipleDevicesTest, whenCallingsetAtomicAccessAttributeForSystemAccessSharedCrossDeviceThenSuccessIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
L0::Device *device0 = driverHandle->devices[0];
auto &hwInfo = device0->getNEODevice()->getHardwareInfo();
DebugManagerStateRestore restorer;
DebugManager.flags.UseKmdMigration.set(true);
DebugManager.flags.EnableRecoverablePageFaults.set(true);
DebugManager.flags.EnableConcurrentSharedCrossP2PDeviceAccess.set(true);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device0->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_SYSTEM_ATOMICS;
result = context->setAtomicAccessAttribute(device0->toHandle(), ptr, size, attr);
if ((hwInfo.capabilityTable.p2pAccessSupported == true) && (hwInfo.capabilityTable.p2pAtomicAccessSupported == true)) {
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
}
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MultipleDevicesDisabledImplicitScalingTest, givenTwoRootDevicesFromSameFamilyThenCanAccessPeerSuccessfullyCompletes) {
L0::Device *device0 = driverHandle->devices[0];
L0::Device *device1 = driverHandle->devices[1];

View File

@@ -43,6 +43,7 @@
#include "level_zero/core/test/unit_tests/mocks/mock_cmdlist.h"
#include "level_zero/core/test/unit_tests/mocks/mock_context.h"
#include "level_zero/core/test/unit_tests/mocks/mock_kernel.h"
#include "level_zero/core/test/unit_tests/mocks/mock_memory_manager.h"
namespace L0 {
namespace ult {
@@ -957,6 +958,428 @@ TEST_F(MemoryTest, givenNoSupportForDualStorageSharedMemoryWhenAllocatingSharedM
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeWithZeroInputSuccessIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = 0;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeWithDeviceNullptrFailureIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = 0;
result = context->setAtomicAccessAttribute(nullptr, ptr, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeForNonSharedMemoryAllocationFailureIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocHostMem(&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = 0;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
result = context->allocDeviceMem(device->toHandle(),
&deviceDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
attr = 0;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeWithInvalidAllocationPtrErrorIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_ATOMICS;
void *ptr2 = reinterpret_cast<void *>(0xface);
result = context->setAtomicAccessAttribute(device->toHandle(), ptr2, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeWithNoAtomicsThenSuccessIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeWithInsufficientCapabilityThenFailureIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
DebugManagerStateRestore restorer;
DebugManager.flags.EnableConcurrentSharedCrossP2PDeviceAccess.set(false);
struct MockProductHelperAtomic : NEO::ProductHelperHw<IGFX_UNKNOWN> {
MockProductHelperAtomic() = default;
uint64_t getDeviceMemCapabilities() const override {
return 0;
}
uint64_t getHostMemCapabilities(const HardwareInfo *hwInfo) const override {
return 0;
}
uint64_t getSingleDeviceSharedMemCapabilities() const override {
return 0;
}
};
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
auto mockProductHelper = std::make_unique<MockProductHelperAtomic>();
std::unique_ptr<ProductHelper> productHelper = std::move(mockProductHelper);
auto &rootDeviceEnvironment = neoDevice->getRootDeviceEnvironmentRef();
std::swap(rootDeviceEnvironment.productHelper, productHelper);
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_DEVICE_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_HOST_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_SYSTEM_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeForDeviceAccessThenSuccessIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
struct MockProductHelperAtomic : NEO::ProductHelperHw<IGFX_UNKNOWN> {
MockProductHelperAtomic() = default;
uint64_t getDeviceMemCapabilities() const override {
return 3;
}
};
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
auto mockProductHelper = std::make_unique<MockProductHelperAtomic>();
std::unique_ptr<ProductHelper> productHelper = std::move(mockProductHelper);
auto &rootDeviceEnvironment = neoDevice->getRootDeviceEnvironmentRef();
std::swap(rootDeviceEnvironment.productHelper, productHelper);
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_DEVICE_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeForHostAccessThenSuccessIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
struct MockProductHelperAtomic : NEO::ProductHelperHw<IGFX_UNKNOWN> {
MockProductHelperAtomic() = default;
uint64_t getHostMemCapabilities(const HardwareInfo *hwInfo) const override {
return 3;
}
};
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
auto mockProductHelper = std::make_unique<MockProductHelperAtomic>();
std::unique_ptr<ProductHelper> productHelper = std::move(mockProductHelper);
auto &rootDeviceEnvironment = neoDevice->getRootDeviceEnvironmentRef();
std::swap(rootDeviceEnvironment.productHelper, productHelper);
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_HOST_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeForSystemAccessSharedSingleThenSuccessIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
struct MockProductHelperAtomic : NEO::ProductHelperHw<IGFX_UNKNOWN> {
MockProductHelperAtomic() = default;
uint64_t getSingleDeviceSharedMemCapabilities() const override {
return 15;
}
};
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
auto mockProductHelper = std::make_unique<MockProductHelperAtomic>();
std::unique_ptr<ProductHelper> productHelper = std::move(mockProductHelper);
auto &rootDeviceEnvironment = neoDevice->getRootDeviceEnvironmentRef();
std::swap(rootDeviceEnvironment.productHelper, productHelper);
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_SYSTEM_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingGetAtomicAccessAttributeThenSuccessIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_SYSTEM_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
void *ptr2 = reinterpret_cast<void *>(0x5678);
result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr2);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr2);
attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_DEVICE_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr2, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
ze_memory_atomic_attr_exp_flags_t attrGet = 0;
result = context->getAtomicAccessAttribute(device->toHandle(), ptr, size, &attrGet);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_EQ(ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_SYSTEM_ATOMICS, attrGet);
attrGet = 0;
result = context->getAtomicAccessAttribute(device->toHandle(), ptr2, size, &attrGet);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_EQ(ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_DEVICE_ATOMICS, attrGet);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
result = context->freeMem(ptr2);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingSetAtomicAccessAttributeMoreThanOnceThenGetAtomicAccessAttributeReturnsLastSetting) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_SYSTEM_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
ze_memory_atomic_attr_exp_flags_t attrGet = 0;
result = context->getAtomicAccessAttribute(device->toHandle(), ptr, size, &attrGet);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_EQ(ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_ATOMICS, attrGet);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingGetAtomicAccessAttributeWithInvalidAllocationPtrErrorIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_SYSTEM_ATOMICS;
result = context->setAtomicAccessAttribute(device->toHandle(), ptr, size, attr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
ze_memory_atomic_attr_exp_flags_t attrGet = 0;
void *ptr2 = reinterpret_cast<void *>(0xFACE);
result = context->getAtomicAccessAttribute(device->toHandle(), ptr2, size, &attrGet);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenCallingGetAtomicAccessAttributeWithAttributeNotSetErrorIsReturned) {
size_t size = 10;
size_t alignment = 1u;
void *ptr = reinterpret_cast<void *>(0x1234);
ze_device_mem_alloc_desc_t deviceDesc = {};
ze_host_mem_alloc_desc_t hostDesc = {};
ze_result_t result = context->allocSharedMem(device->toHandle(),
&deviceDesc,
&hostDesc,
size, alignment, &ptr);
EXPECT_EQ(ZE_RESULT_SUCCESS, result);
EXPECT_NE(nullptr, ptr);
ze_memory_atomic_attr_exp_flags_t attr = ZE_MEMORY_ATOMIC_ATTR_EXP_FLAG_NO_SYSTEM_ATOMICS;
result = context->setAtomicAccessAttribute(nullptr, ptr, size, attr);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
ze_memory_atomic_attr_exp_flags_t attrGet = 0;
result = context->getAtomicAccessAttribute(device->toHandle(), ptr, size, &attrGet);
EXPECT_EQ(ZE_RESULT_ERROR_INVALID_ARGUMENT, result);
result = context->freeMem(ptr);
ASSERT_EQ(result, ZE_RESULT_SUCCESS);
}
TEST_F(MemoryTest, whenAllocatingHostMemoryWithUseHostPtrFlagThenExternalHostPtrIsSet) {
size_t size = 10;
size_t alignment = 1u;