diff --git a/level_zero/sysman/source/shared/linux/sysman_kmd_interface.cpp b/level_zero/sysman/source/shared/linux/sysman_kmd_interface.cpp index 7553f36053..e747ba2db7 100644 --- a/level_zero/sysman/source/shared/linux/sysman_kmd_interface.cpp +++ b/level_zero/sysman/source/shared/linux/sysman_kmd_interface.cpp @@ -224,17 +224,17 @@ int64_t SysmanKmdInterfaceXe::getEngineActivityFd(zes_engine_group_t engineGroup switch (engineGroup) { case ZES_ENGINE_GROUP_ALL: - config = XE_PMU_ANY_ENGINE_GROUP_BUSY(subDeviceId); + config = DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(subDeviceId); break; case ZES_ENGINE_GROUP_COMPUTE_ALL: case ZES_ENGINE_GROUP_RENDER_ALL: - config = XE_PMU_RENDER_GROUP_BUSY(subDeviceId); + config = DRM_XE_PMU_RENDER_GROUP_BUSY(subDeviceId); break; case ZES_ENGINE_GROUP_COPY_ALL: - config = XE_PMU_COPY_GROUP_BUSY(subDeviceId); + config = DRM_XE_PMU_COPY_GROUP_BUSY(subDeviceId); break; case ZES_ENGINE_GROUP_MEDIA_ALL: - config = XE_PMU_MEDIA_GROUP_BUSY(subDeviceId); + config = DRM_XE_PMU_MEDIA_GROUP_BUSY(subDeviceId); break; default: break; diff --git a/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp b/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp index e7e2900b93..fe8566c019 100644 --- a/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp +++ b/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp @@ -86,15 +86,15 @@ const char *IoctlHelperXe::xeGetClassName(int className) { const char *IoctlHelperXe::xeGetBindOperationName(int bindOperation) { switch (bindOperation) { - case XE_VM_BIND_OP_MAP: + case DRM_XE_VM_BIND_OP_MAP: return "MAP"; - case XE_VM_BIND_OP_UNMAP: + case DRM_XE_VM_BIND_OP_UNMAP: return "UNMAP"; - case XE_VM_BIND_OP_MAP_USERPTR: + case DRM_XE_VM_BIND_OP_MAP_USERPTR: return "MAP_USERPTR"; - case XE_VM_BIND_OP_UNMAP_ALL: + case DRM_XE_VM_BIND_OP_UNMAP_ALL: return "UNMAP ALL"; - case XE_VM_BIND_OP_PREFETCH: + case DRM_XE_VM_BIND_OP_PREFETCH: return "PREFETCH"; } return "Unknown operation"; @@ -102,13 +102,13 @@ const char *IoctlHelperXe::xeGetBindOperationName(int bindOperation) { const char *IoctlHelperXe::xeGetBindFlagsName(int bindFlags) { switch (bindFlags) { - case XE_VM_BIND_FLAG_READONLY: + case DRM_XE_VM_BIND_FLAG_READONLY: return "READ_ONLY"; - case XE_VM_BIND_FLAG_ASYNC: + case DRM_XE_VM_BIND_FLAG_ASYNC: return "ASYNC"; - case XE_VM_BIND_FLAG_IMMEDIATE: + case DRM_XE_VM_BIND_FLAG_IMMEDIATE: return "IMMEDIATE"; - case XE_VM_BIND_FLAG_NULL: + case DRM_XE_VM_BIND_FLAG_NULL: return "NULL"; } return "Unknown flag"; @@ -149,27 +149,27 @@ bool IoctlHelperXe::initialize() { struct drm_xe_query_config *config = reinterpret_cast(data.data()); queryConfig.data = castToUint64(config); IoctlHelper::ioctl(DrmIoctl::Query, &queryConfig); - xeLog("XE_QUERY_CONFIG_REV_AND_DEVICE_ID\t%#llx\n", - config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID]); + xeLog("DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID\t%#llx\n", + config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID]); xeLog(" REV_ID\t\t\t\t%#llx\n", - (config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff); + (config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff); xeLog(" DEVICE_ID\t\t\t\t%#llx\n", - config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff); - xeLog("XE_QUERY_CONFIG_FLAGS\t\t\t%#llx\n", - config->info[XE_QUERY_CONFIG_FLAGS]); - xeLog(" XE_QUERY_CONFIG_FLAGS_HAS_VRAM\t%s\n", - config->info[XE_QUERY_CONFIG_FLAGS] & - XE_QUERY_CONFIG_FLAGS_HAS_VRAM + config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff); + xeLog("DRM_XE_QUERY_CONFIG_FLAGS\t\t\t%#llx\n", + config->info[DRM_XE_QUERY_CONFIG_FLAGS]); + xeLog(" DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM\t%s\n", + config->info[DRM_XE_QUERY_CONFIG_FLAGS] & + DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM ? "ON" : "OFF"); - xeLog("XE_QUERY_CONFIG_MIN_ALIGNMENT\t\t%#llx\n", - config->info[XE_QUERY_CONFIG_MIN_ALIGNMENT]); - xeLog("XE_QUERY_CONFIG_VA_BITS\t\t%#llx\n", - config->info[XE_QUERY_CONFIG_VA_BITS]); + xeLog("DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT\t\t%#llx\n", + config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT]); + xeLog("DRM_XE_QUERY_CONFIG_VA_BITS\t\t%#llx\n", + config->info[DRM_XE_QUERY_CONFIG_VA_BITS]); - chipsetId = config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff; - revId = static_cast((config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff); - hasVram = config->info[XE_QUERY_CONFIG_FLAGS] & XE_QUERY_CONFIG_FLAGS_HAS_VRAM ? 1 : 0; + chipsetId = config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff; + revId = static_cast((config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff); + hasVram = config->info[DRM_XE_QUERY_CONFIG_FLAGS] & DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM ? 1 : 0; memset(&queryConfig, 0, sizeof(queryConfig)); queryConfig.query = DRM_XE_DEVICE_QUERY_HWCONFIG; @@ -279,7 +279,7 @@ inline MemoryRegion createMemoryRegionFromXeMemRegion(const drm_xe_query_mem_reg } std::unique_ptr IoctlHelperXe::createMemoryInfo() { - auto memUsageData = queryData(DRM_XE_DEVICE_QUERY_MEM_USAGE); + auto memUsageData = queryData(DRM_XE_DEVICE_QUERY_MEM_REGIONS); auto gtListData = queryData(DRM_XE_DEVICE_QUERY_GT_LIST); if (memUsageData.empty() || gtListData.empty()) { @@ -287,15 +287,15 @@ std::unique_ptr IoctlHelperXe::createMemoryInfo() { } MemoryInfo::RegionContainer regionsContainer{}; - auto xeMemUsageData = reinterpret_cast(memUsageData.data()); + auto xeMemRegionsData = reinterpret_cast(memUsageData.data()); auto xeGtListData = reinterpret_cast(gtListData.data()); std::array memoryRegionInstances{}; - for (auto i = 0u; i < xeMemUsageData->num_regions; i++) { - auto ®ion = xeMemUsageData->regions[i]; + for (auto i = 0u; i < xeMemRegionsData->num_regions; i++) { + auto ®ion = xeMemRegionsData->regions[i]; memoryRegionInstances[region.instance] = ®ion; - if (region.mem_class == XE_MEM_REGION_CLASS_SYSMEM) { + if (region.mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) { regionsContainer.push_back(createMemoryRegionFromXeMemRegion(region)); } } @@ -305,9 +305,9 @@ std::unique_ptr IoctlHelperXe::createMemoryInfo() { } for (auto i = 0u; i < xeGtListData->num_gt; i++) { - if (xeGtListData->gt_list[i].type != XE_QUERY_GT_TYPE_MEDIA) { - uint64_t nativeMemRegions = xeGtListData->gt_list[i].native_mem_regions; - auto regionIndex = Math::log2(nativeMemRegions); + if (xeGtListData->gt_list[i].type != DRM_XE_QUERY_GT_TYPE_MEDIA) { + uint64_t nearMemRegions = xeGtListData->gt_list[i].near_mem_regions; + auto regionIndex = Math::log2(nearMemRegions); UNRECOVERABLE_IF(!memoryRegionInstances[regionIndex]); regionsContainer.push_back(createMemoryRegionFromXeMemRegion(*memoryRegionInstances[regionIndex])); xeTimestampFrequency = xeGtListData->gt_list[i].clock_freq; @@ -467,7 +467,7 @@ bool IoctlHelperXe::getTopologyDataAndMap(const HardwareInfo &hwInfo, DrmQueryTo auto tileIndex = 0u; for (auto gt = 0u; gt < gtIdToTile.size(); gt++) { - if (xeGtListData->gt_list[gt].type != XE_QUERY_GT_TYPE_MEDIA) { + if (xeGtListData->gt_list[gt].type != DRM_XE_QUERY_GT_TYPE_MEDIA) { gtIdToTile[gt] = tileIndex++; } } @@ -481,15 +481,15 @@ bool IoctlHelperXe::getTopologyDataAndMap(const HardwareInfo &hwInfo, DrmQueryTo uint32_t gtId = topo->gt_id; - if (xeGtListData->gt_list[gtId].type != XE_QUERY_GT_TYPE_MEDIA) { + if (xeGtListData->gt_list[gtId].type != DRM_XE_QUERY_GT_TYPE_MEDIA) { switch (topo->type) { - case XE_TOPO_DSS_GEOMETRY: + case DRM_XE_TOPO_DSS_GEOMETRY: fillMask(geomDss[gtIdToTile[gtId]], topo); break; - case XE_TOPO_DSS_COMPUTE: + case DRM_XE_TOPO_DSS_COMPUTE: fillMask(computeDss[gtIdToTile[gtId]], topo); break; - case XE_TOPO_EU_PER_DSS: + case DRM_XE_TOPO_EU_PER_DSS: fillMask(euDss[gtIdToTile[gtId]], topo); break; default: @@ -612,7 +612,7 @@ int IoctlHelperXe::xeWaitUserFence(uint64_t mask, uint16_t op, uint64_t addr, ui struct drm_xe_wait_user_fence wait = {}; wait.addr = addr; wait.op = op; - wait.flags = DRM_XE_UFENCE_WAIT_SOFT_OP; + wait.flags = DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP; wait.value = value; wait.mask = mask; wait.timeout = timeout; @@ -630,16 +630,16 @@ int IoctlHelperXe::waitUserFence(uint32_t ctxId, uint64_t address, uint64_t mask; switch (dataWidth) { case static_cast(Drm::ValueWidth::U64): - mask = DRM_XE_UFENCE_WAIT_U64; + mask = DRM_XE_UFENCE_WAIT_MASK_U64; break; case static_cast(Drm::ValueWidth::U32): - mask = DRM_XE_UFENCE_WAIT_U32; + mask = DRM_XE_UFENCE_WAIT_MASK_U32; break; case static_cast(Drm::ValueWidth::U16): - mask = DRM_XE_UFENCE_WAIT_U16; + mask = DRM_XE_UFENCE_WAIT_MASK_U16; break; default: - mask = DRM_XE_UFENCE_WAIT_U8; + mask = DRM_XE_UFENCE_WAIT_MASK_U8; break; } if (timeout == -1) { @@ -647,7 +647,7 @@ int IoctlHelperXe::waitUserFence(uint32_t ctxId, uint64_t address, timeout = TimeoutControls::maxTimeout; } if (address) { - return xeWaitUserFence(mask, DRM_XE_UFENCE_WAIT_GTE, address, value, timeout); + return xeWaitUserFence(mask, DRM_XE_UFENCE_WAIT_OP_GTE, address, value, timeout); } return 0; } @@ -714,7 +714,7 @@ int IoctlHelperXe::execBuffer(ExecBuffer *execBuffer, uint64_t completionGpuAddr completionGpuAddress, counterValue, engine); struct drm_xe_sync sync[1] = {}; - sync[0].flags = DRM_XE_SYNC_USER_FENCE | DRM_XE_SYNC_SIGNAL; + sync[0].flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL; sync[0].addr = completionGpuAddress; sync[0].timeline_value = counterValue; struct drm_xe_exec exec = {}; @@ -883,9 +883,9 @@ int IoctlHelperXe::getDrmParamValue(DrmParam drmParam) const { switch (drmParam) { case DrmParam::MemoryClassDevice: - return XE_MEM_REGION_CLASS_VRAM; + return DRM_XE_MEM_REGION_CLASS_VRAM; case DrmParam::MemoryClassSystem: - return XE_MEM_REGION_CLASS_SYSMEM; + return DRM_XE_MEM_REGION_CLASS_SYSMEM; case DrmParam::EngineClassRender: return DRM_XE_ENGINE_CLASS_RENDER; case DrmParam::EngineClassCopy: @@ -1067,10 +1067,10 @@ int IoctlHelperXe::ioctl(DrmIoctl request, void *arg) { case DrmIoctl::GemVmCreate: { GemVmControl *d = static_cast(arg); struct drm_xe_vm_create args = {}; - args.flags = DRM_XE_VM_CREATE_ASYNC_DEFAULT | - DRM_XE_VM_CREATE_COMPUTE_MODE; + args.flags = DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | + DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE; if (drm.hasPageFaultSupport()) { - args.flags |= DRM_XE_VM_CREATE_FAULT_MODE; + args.flags |= DRM_XE_VM_CREATE_FLAG_FAULT_MODE; } ret = IoctlHelper::ioctl(request, &args); d->vmId = ret ? 0 : args.vm_id; @@ -1238,7 +1238,7 @@ int IoctlHelperXe::xeVmBind(const VmBindParams &vmBindParams, bool isBind) { if (index != invalidIndex) { drm_xe_sync sync[1] = {}; - sync[0].flags = DRM_XE_SYNC_USER_FENCE | DRM_XE_SYNC_SIGNAL; + sync[0].flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL; auto xeBindExtUserFence = reinterpret_cast(vmBindParams.extensions); UNRECOVERABLE_IF(!xeBindExtUserFence); UNRECOVERABLE_IF(xeBindExtUserFence->tag != UserFenceExtension::tagValue); @@ -1252,19 +1252,19 @@ int IoctlHelperXe::xeVmBind(const VmBindParams &vmBindParams, bool isBind) { bind.syncs = reinterpret_cast(&sync); bind.bind.range = vmBindParams.length; bind.bind.addr = gmmHelper->decanonize(vmBindParams.start); - bind.bind.flags = XE_VM_BIND_FLAG_ASYNC; + bind.bind.flags = DRM_XE_VM_BIND_FLAG_ASYNC; bind.bind.obj_offset = vmBindParams.offset; if (isBind) { - bind.bind.op = XE_VM_BIND_OP_MAP; + bind.bind.op = DRM_XE_VM_BIND_OP_MAP; bind.bind.obj = vmBindParams.handle; if (bindInfo[index].handle & XE_USERPTR_FAKE_FLAG) { - bind.bind.op = XE_VM_BIND_OP_MAP_USERPTR; + bind.bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR; bind.bind.obj = 0; bind.bind.obj_offset = bindInfo[index].userptr; } } else { - bind.bind.op = XE_VM_BIND_OP_UNMAP; + bind.bind.op = DRM_XE_VM_BIND_OP_UNMAP; bind.bind.obj = 0; if (bindInfo[index].handle & XE_USERPTR_FAKE_FLAG) { bind.bind.obj_offset = bindInfo[index].userptr; @@ -1293,7 +1293,7 @@ int IoctlHelperXe::xeVmBind(const VmBindParams &vmBindParams, bool isBind) { return ret; } - return xeWaitUserFence(DRM_XE_UFENCE_WAIT_U64, DRM_XE_UFENCE_WAIT_EQ, + return xeWaitUserFence(DRM_XE_UFENCE_WAIT_MASK_U64, DRM_XE_UFENCE_WAIT_OP_EQ, sync[0].addr, sync[0].timeline_value, XE_ONE_SEC); } diff --git a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp index eaa69d4b49..3d22d7d762 100644 --- a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp @@ -314,8 +314,8 @@ TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallingAnyMethodThenDummyValueIsRe // Default no translation: verifyDrmGetParamValue(static_cast(DrmParam::ExecRender), DrmParam::ExecRender); // test exception: - verifyDrmGetParamValue(XE_MEM_REGION_CLASS_VRAM, DrmParam::MemoryClassDevice); - verifyDrmGetParamValue(XE_MEM_REGION_CLASS_SYSMEM, DrmParam::MemoryClassSystem); + verifyDrmGetParamValue(DRM_XE_MEM_REGION_CLASS_VRAM, DrmParam::MemoryClassDevice); + verifyDrmGetParamValue(DRM_XE_MEM_REGION_CLASS_SYSMEM, DrmParam::MemoryClassSystem); verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_RENDER, DrmParam::EngineClassRender); verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_COPY, DrmParam::EngineClassCopy); verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_VIDEO_DECODE, DrmParam::EngineClassVideo); @@ -459,17 +459,17 @@ TEST(IoctlHelperXeTest, verifyPublicFunctions) { verifyXeClassName("vecs", DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE); verifyXeClassName("ccs", DRM_XE_ENGINE_CLASS_COMPUTE); - verifyXeOperationBindName("MAP", XE_VM_BIND_OP_MAP); - verifyXeOperationBindName("UNMAP", XE_VM_BIND_OP_UNMAP); - verifyXeOperationBindName("MAP_USERPTR", XE_VM_BIND_OP_MAP_USERPTR); - verifyXeOperationBindName("UNMAP ALL", XE_VM_BIND_OP_UNMAP_ALL); - verifyXeOperationBindName("PREFETCH", XE_VM_BIND_OP_PREFETCH); + verifyXeOperationBindName("MAP", DRM_XE_VM_BIND_OP_MAP); + verifyXeOperationBindName("UNMAP", DRM_XE_VM_BIND_OP_UNMAP); + verifyXeOperationBindName("MAP_USERPTR", DRM_XE_VM_BIND_OP_MAP_USERPTR); + verifyXeOperationBindName("UNMAP ALL", DRM_XE_VM_BIND_OP_UNMAP_ALL); + verifyXeOperationBindName("PREFETCH", DRM_XE_VM_BIND_OP_PREFETCH); verifyXeOperationBindName("Unknown operation", -1); - verifyXeFlagsBindName("READ_ONLY", XE_VM_BIND_FLAG_READONLY); - verifyXeFlagsBindName("ASYNC", XE_VM_BIND_FLAG_ASYNC); - verifyXeFlagsBindName("IMMEDIATE", XE_VM_BIND_FLAG_IMMEDIATE); - verifyXeFlagsBindName("NULL", XE_VM_BIND_FLAG_NULL); + verifyXeFlagsBindName("READ_ONLY", DRM_XE_VM_BIND_FLAG_READONLY); + verifyXeFlagsBindName("ASYNC", DRM_XE_VM_BIND_FLAG_ASYNC); + verifyXeFlagsBindName("IMMEDIATE", DRM_XE_VM_BIND_FLAG_IMMEDIATE); + verifyXeFlagsBindName("NULL", DRM_XE_VM_BIND_FLAG_NULL); verifyXeFlagsBindName("Unknown flag", -1); verifyXeEngineClassName("DRM_XE_ENGINE_CLASS_RENDER", DRM_XE_ENGINE_CLASS_RENDER); @@ -539,17 +539,17 @@ TEST(IoctlHelperXeTest, whenCallingIoctlThenProperValueIsReturned) { { GemVmControl test = {}; drm.pageFaultSupported = false; - uint32_t expectedVmCreateFlags = DRM_XE_VM_CREATE_ASYNC_DEFAULT | - DRM_XE_VM_CREATE_COMPUTE_MODE; + uint32_t expectedVmCreateFlags = DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | + DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE; ret = mockXeIoctlHelper->ioctl(DrmIoctl::GemVmCreate, &test); EXPECT_EQ(0, ret); EXPECT_EQ(static_cast(test.vmId), testValueVmId); EXPECT_EQ(test.flags, expectedVmCreateFlags); drm.pageFaultSupported = true; - expectedVmCreateFlags = DRM_XE_VM_CREATE_ASYNC_DEFAULT | - DRM_XE_VM_CREATE_COMPUTE_MODE | - DRM_XE_VM_CREATE_FAULT_MODE; + expectedVmCreateFlags = DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | + DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE | + DRM_XE_VM_CREATE_FLAG_FAULT_MODE; ret = mockXeIoctlHelper->ioctl(DrmIoctl::GemVmCreate, &test); EXPECT_EQ(0, ret); EXPECT_EQ(static_cast(test.vmId), testValueVmId); @@ -686,9 +686,9 @@ TEST(IoctlHelperXeTest, givenGeomDssWhenGetTopologyDataAndMapThenResultsAreCorre uint16_t tileId = 0; for (auto gtId = 0u; gtId < 3u; gtId++) { - drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_GEOMETRY, 8, {0b11'1111, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_COMPUTE, 8, {0, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(gtId, XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0b1111'1111, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0b11'1111, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0b1111'1111, 0, 0, 0, 0, 0, 0}); } DrmQueryTopologyData topologyData{}; TopologyMap topologyMap{}; @@ -733,9 +733,9 @@ TEST(IoctlHelperXeTest, givenComputeDssWhenGetTopologyDataAndMapThenResultsAreCo uint16_t tileId = 0; for (auto gtId = 0u; gtId < 3u; gtId++) { - drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); - drm.addMockedQueryTopologyData(gtId, XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); + drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0}); } DrmQueryTopologyData topologyData{}; @@ -784,20 +784,20 @@ TEST(IoctlHelperXeTest, givenOnlyMediaTypeWhenGetTopologyDataAndMapThenSubsliceI auto xeQueryGtList = reinterpret_cast(drm.queryGtList.begin()); xeQueryGtList->num_gt = 1; xeQueryGtList->gt_list[0] = { - XE_QUERY_GT_TYPE_MEDIA, // type - 0, // gt_id - 12500000, // clock freq - 0b100, // native mem regions - 0x011, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MEDIA, // type + 0, // gt_id + 12500000, // clock freq + 0b100, // native mem regions + 0x011, // slow mem regions }; auto &hwInfo = *executionEnvironment->rootDeviceEnvironments[0]->getHardwareInfo(); auto xeIoctlHelper = std::make_unique(drm); uint16_t tileId = 0; - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0}); DrmQueryTopologyData topologyData{}; TopologyMap topologyMap{}; @@ -829,40 +829,40 @@ TEST(IoctlHelperXeTest, givenMainAndMediaTypesWhenGetTopologyDataAndMapThenResul auto xeQueryGtList = reinterpret_cast(drm.queryGtList.begin()); xeQueryGtList->num_gt = 4; xeQueryGtList->gt_list[0] = { - XE_QUERY_GT_TYPE_MAIN, // type - 0, // gt_id - 12500000, // clock freq - 0b100, // native mem regions - 0x011, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MAIN, // type + 0, // gt_id + 12500000, // clock freq + 0b100, // native mem regions + 0x011, // slow mem regions }; xeQueryGtList->gt_list[1] = { - XE_QUERY_GT_TYPE_MEDIA, // type - 0, // gt_id - 12500000, // clock freq - 0b100, // native mem regions - 0x011, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MEDIA, // type + 0, // gt_id + 12500000, // clock freq + 0b100, // native mem regions + 0x011, // slow mem regions }; xeQueryGtList->gt_list[2] = { - XE_QUERY_GT_TYPE_MAIN, // type - 0, // gt_id - 12500000, // clock freq - 0b010, // native mem regions - 0x101, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MAIN, // type + 0, // gt_id + 12500000, // clock freq + 0b010, // native mem regions + 0x101, // slow mem regions }; xeQueryGtList->gt_list[3] = { - XE_QUERY_GT_TYPE_MEDIA, // type - 0, // gt_id - 12500000, // clock freq - 0b001, // native mem regions - 0x100, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MEDIA, // type + 0, // gt_id + 12500000, // clock freq + 0b001, // native mem regions + 0x100, // slow mem regions }; auto &hwInfo = *executionEnvironment->rootDeviceEnvironments[0]->getHardwareInfo(); auto xeIoctlHelper = std::make_unique(drm); for (auto tileId = 0; tileId < 4; tileId++) { - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); } DrmQueryTopologyData topologyData{}; @@ -890,10 +890,10 @@ TEST(IoctlHelperXeTest, givenMainAndMediaTypesWhenGetTopologyDataAndMapThenResul struct DrmMockXe2T : public DrmMockXe { DrmMockXe2T(RootDeviceEnvironment &rootDeviceEnvironment) : DrmMockXe(rootDeviceEnvironment) { - auto xeQueryMemUsage = reinterpret_cast(queryMemUsage); + auto xeQueryMemUsage = reinterpret_cast(queryMemUsage); xeQueryMemUsage->num_regions = 3; xeQueryMemUsage->regions[0] = { - XE_MEM_REGION_CLASS_VRAM, // class + DRM_XE_MEM_REGION_CLASS_VRAM, // class 1, // instance 0, // padding MemoryConstants::pageSize, // min page size @@ -901,15 +901,15 @@ struct DrmMockXe2T : public DrmMockXe { MemoryConstants::megaByte // used size }; xeQueryMemUsage->regions[1] = { - XE_MEM_REGION_CLASS_SYSMEM, // class - 0, // instance - 0, // padding - MemoryConstants::pageSize, // min page size - MemoryConstants::gigaByte, // total size - MemoryConstants::kiloByte // used size + DRM_XE_MEM_REGION_CLASS_SYSMEM, // class + 0, // instance + 0, // padding + MemoryConstants::pageSize, // min page size + MemoryConstants::gigaByte, // total size + MemoryConstants::kiloByte // used size }; xeQueryMemUsage->regions[2] = { - XE_MEM_REGION_CLASS_VRAM, // class + DRM_XE_MEM_REGION_CLASS_VRAM, // class 2, // instance 0, // padding MemoryConstants::pageSize, // min page size @@ -920,18 +920,18 @@ struct DrmMockXe2T : public DrmMockXe { auto xeQueryGtList = reinterpret_cast(queryGtList.begin()); xeQueryGtList->num_gt = 2; xeQueryGtList->gt_list[0] = { - XE_QUERY_GT_TYPE_MAIN, // type - 0, // gt_id - 12500000, // clock freq - 0b100, // native mem regions - 0x011, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MAIN, // type + 0, // gt_id + 12500000, // clock freq + 0b100, // native mem regions + 0x011, // slow mem regions }; xeQueryGtList->gt_list[1] = { - XE_QUERY_GT_TYPE_MAIN, // type - 0, // gt_id - 12500000, // clock freq - 0b010, // native mem regions - 0x101, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MAIN, // type + 0, // gt_id + 12500000, // clock freq + 0b010, // native mem regions + 0x101, // slow mem regions }; } }; @@ -945,9 +945,9 @@ TEST(IoctlHelperXeTest, given2TileAndComputeDssWhenGetTopologyDataAndMapThenResu // symetric tiles for (uint16_t tileId = 0; tileId < 2u; tileId++) { - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); } DrmQueryTopologyData topologyData{}; @@ -1000,14 +1000,14 @@ TEST(IoctlHelperXeTest, given2TileWithDisabledDssOn1TileAndComputeDssWhenGetTopo // half dss disabled on tile 0 uint16_t tileId = 0; - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); tileId = 1; - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); DrmQueryTopologyData topologyData{}; TopologyMap topologyMap{}; @@ -1068,9 +1068,9 @@ TEST(IoctlHelperXeTest, given2TileWithDisabledEvenDssAndComputeDssWhenGetTopolog // even dss disabled uint8_t data = 0b1010'1010; - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {data, data, data, data, data, data, data, data}); - drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {data, data, data, data, data, data, data, data}); + drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0}); } DrmQueryTopologyData topologyData{}; @@ -1261,17 +1261,17 @@ TEST(IoctlHelperXeTest, whenCreatingMemoryInfoThenProperMemoryBanksAreDiscovered EXPECT_NE(nullptr, memoryInfo); auto memoryClassInstance0 = memoryInfo->getMemoryRegionClassAndInstance(0, *defaultHwInfo); - EXPECT_EQ(static_cast(XE_MEM_REGION_CLASS_SYSMEM), memoryClassInstance0.memoryClass); + EXPECT_EQ(static_cast(DRM_XE_MEM_REGION_CLASS_SYSMEM), memoryClassInstance0.memoryClass); EXPECT_EQ(0u, memoryClassInstance0.memoryInstance); EXPECT_EQ(MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0)); auto memoryClassInstance1 = memoryInfo->getMemoryRegionClassAndInstance(0b01, *defaultHwInfo); - EXPECT_EQ(static_cast(XE_MEM_REGION_CLASS_VRAM), memoryClassInstance1.memoryClass); + EXPECT_EQ(static_cast(DRM_XE_MEM_REGION_CLASS_VRAM), memoryClassInstance1.memoryClass); EXPECT_EQ(2u, memoryClassInstance1.memoryInstance); EXPECT_EQ(4 * MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0b01)); auto memoryClassInstance2 = memoryInfo->getMemoryRegionClassAndInstance(0b10, *defaultHwInfo); - EXPECT_EQ(static_cast(XE_MEM_REGION_CLASS_VRAM), memoryClassInstance2.memoryClass); + EXPECT_EQ(static_cast(DRM_XE_MEM_REGION_CLASS_VRAM), memoryClassInstance2.memoryClass); EXPECT_EQ(1u, memoryClassInstance2.memoryInstance); EXPECT_EQ(2 * MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0b10)); @@ -1308,7 +1308,7 @@ TEST(IoctlHelperXeTest, givenNoMemoryRegionsWhenCreatingMemoryInfoThenMemoryInfo DrmMockXe drm{*executionEnvironment->rootDeviceEnvironments[0]}; auto xeIoctlHelper = std::make_unique(drm); - auto xeQueryMemUsage = reinterpret_cast(drm.queryMemUsage); + auto xeQueryMemUsage = reinterpret_cast(drm.queryMemUsage); xeQueryMemUsage->num_regions = 0u; auto memoryInfo = xeIoctlHelper->createMemoryInfo(); EXPECT_EQ(nullptr, memoryInfo); @@ -1402,10 +1402,10 @@ TEST(IoctlHelperXeTest, whenCallingVmBindThenWaitUserFenceIsCalled) { auto &waitUserFence = drm.waitUserFenceInputs[0]; EXPECT_EQ(fenceAddress, waitUserFence.addr); - EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_EQ), waitUserFence.op); - EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_SOFT_OP), waitUserFence.flags); + EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_OP_EQ), waitUserFence.op); + EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP), waitUserFence.flags); EXPECT_EQ(fenceValue, waitUserFence.value); - EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_U64), waitUserFence.mask); + EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_MASK_U64), waitUserFence.mask); EXPECT_EQ(static_cast(XE_ONE_SEC), waitUserFence.timeout); EXPECT_EQ(0u, waitUserFence.num_engines); EXPECT_EQ(0u, waitUserFence.instances); @@ -1426,10 +1426,10 @@ TEST(IoctlHelperXeTest, whenCallingVmBindThenWaitUserFenceIsCalled) { auto &waitUserFence = drm.waitUserFenceInputs[0]; EXPECT_EQ(fenceAddress, waitUserFence.addr); - EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_EQ), waitUserFence.op); - EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_SOFT_OP), waitUserFence.flags); + EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_OP_EQ), waitUserFence.op); + EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP), waitUserFence.flags); EXPECT_EQ(fenceValue, waitUserFence.value); - EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_U64), waitUserFence.mask); + EXPECT_EQ(static_cast(DRM_XE_UFENCE_WAIT_MASK_U64), waitUserFence.mask); EXPECT_EQ(static_cast(XE_ONE_SEC), waitUserFence.timeout); EXPECT_EQ(0u, waitUserFence.num_engines); EXPECT_EQ(0u, waitUserFence.instances); diff --git a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h index 60c77015b7..e8922b52a7 100644 --- a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h +++ b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h @@ -47,10 +47,10 @@ inline constexpr uint32_t testValueGemCreate = 0x8273; class DrmMockXe : public DrmMockCustom { public: DrmMockXe(RootDeviceEnvironment &rootDeviceEnvironment) : DrmMockCustom(rootDeviceEnvironment) { - auto xeQueryMemUsage = reinterpret_cast(queryMemUsage); + auto xeQueryMemUsage = reinterpret_cast(queryMemUsage); xeQueryMemUsage->num_regions = 3; xeQueryMemUsage->regions[0] = { - XE_MEM_REGION_CLASS_VRAM, // class + DRM_XE_MEM_REGION_CLASS_VRAM, // class 1, // instance 0, // padding MemoryConstants::pageSize, // min page size @@ -58,15 +58,15 @@ class DrmMockXe : public DrmMockCustom { MemoryConstants::megaByte // used size }; xeQueryMemUsage->regions[1] = { - XE_MEM_REGION_CLASS_SYSMEM, // class - 0, // instance - 0, // padding - MemoryConstants::pageSize, // min page size - MemoryConstants::gigaByte, // total size - MemoryConstants::kiloByte // used size + DRM_XE_MEM_REGION_CLASS_SYSMEM, // class + 0, // instance + 0, // padding + MemoryConstants::pageSize, // min page size + MemoryConstants::gigaByte, // total size + MemoryConstants::kiloByte // used size }; xeQueryMemUsage->regions[2] = { - XE_MEM_REGION_CLASS_VRAM, // class + DRM_XE_MEM_REGION_CLASS_VRAM, // class 2, // instance 0, // padding MemoryConstants::pageSize, // min page size @@ -77,25 +77,25 @@ class DrmMockXe : public DrmMockCustom { auto xeQueryGtList = reinterpret_cast(queryGtList.begin()); xeQueryGtList->num_gt = 3; xeQueryGtList->gt_list[0] = { - XE_QUERY_GT_TYPE_MAIN, // type - 0, // gt_id - 12500000, // clock_freq - 0b100, // native mem regions - 0x011, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MAIN, // type + 0, // gt_id + 12500000, // clock_freq + 0b100, // native mem regions + 0x011, // slow mem regions }; xeQueryGtList->gt_list[1] = { - XE_QUERY_GT_TYPE_MEDIA, // type - 1, // gt_id - 12500000, // clock freq - 0b001, // native mem regions - 0x110, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MEDIA, // type + 1, // gt_id + 12500000, // clock freq + 0b001, // native mem regions + 0x110, // slow mem regions }; xeQueryGtList->gt_list[2] = { - XE_QUERY_GT_TYPE_MAIN, // type - 0, // gt_id - 12500000, // clock freq - 0b010, // native mem regions - 0x101, // slow mem regions + DRM_XE_QUERY_GT_TYPE_MAIN, // type + 0, // gt_id + 12500000, // clock freq + 0b010, // native mem regions + 0x101, // slow mem regions }; } @@ -169,7 +169,7 @@ class DrmMockXe : public DrmMockCustom { } deviceQuery->size = sizeof(queryEngines); break; - case DRM_XE_DEVICE_QUERY_MEM_USAGE: + case DRM_XE_DEVICE_QUERY_MEM_REGIONS: if (deviceQuery->data) { memcpy_s(reinterpret_cast(deviceQuery->data), deviceQuery->size, queryMemUsage, sizeof(queryMemUsage)); } diff --git a/third_party/uapi/drm/xe_drm.h b/third_party/uapi/drm/xe_drm.h index babfaf0fe4..255b360a1c 100644 --- a/third_party/uapi/drm/xe_drm.h +++ b/third_party/uapi/drm/xe_drm.h @@ -19,12 +19,12 @@ extern "C" { /** * DOC: uevent generated by xe on it's pci node. * - * XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt + * DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt * fails. The value supplied with the event is always "NEEDS_RESET". * Additional information supplied is tile id and gt id of the gt unit for * which reset has failed. */ -#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS" +#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS" /** * struct xe_user_extension - Base class for defining a chain of extensions @@ -141,21 +141,22 @@ struct drm_xe_engine_class_instance { __u16 engine_instance; __u16 gt_id; - __u16 rsvd; + /** @pad: MBZ */ + __u16 pad; }; /** * enum drm_xe_memory_class - Supported memory classes. */ enum drm_xe_memory_class { - /** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ - XE_MEM_REGION_CLASS_SYSMEM = 0, + /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ + DRM_XE_MEM_REGION_CLASS_SYSMEM = 0, /** - * @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this + * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this * represents the memory that is local to the device, which we * call VRAM. Not valid on integrated platforms. */ - XE_MEM_REGION_CLASS_VRAM + DRM_XE_MEM_REGION_CLASS_VRAM }; /** @@ -215,7 +216,7 @@ struct drm_xe_query_mem_region { * always equal the @total_size, since all of it will be CPU * accessible. * - * Note this is only tracked for XE_MEM_REGION_CLASS_VRAM + * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM * regions (for other types the value here will always equal * zero). */ @@ -227,7 +228,7 @@ struct drm_xe_query_mem_region { * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable * accounting. Without this the value here will always equal * zero. Note this is only currently tracked for - * XE_MEM_REGION_CLASS_VRAM regions (for other types the value + * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value * here will always be zero). */ __u64 cpu_visible_used; @@ -290,13 +291,13 @@ struct drm_xe_query_engine_cycles { }; /** - * struct drm_xe_query_mem_usage - describe memory regions and usage + * struct drm_xe_query_mem_regions - describe memory regions * * If a query is made with a struct drm_xe_device_query where .query - * is equal to DRM_XE_DEVICE_QUERY_MEM_USAGE, then the reply uses - * struct drm_xe_query_mem_usage in .data. + * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses + * struct drm_xe_query_mem_regions in .data. */ -struct drm_xe_query_mem_usage { +struct drm_xe_query_mem_regions { /** @num_regions: number of memory regions returned in @regions */ __u32 num_regions; /** @pad: MBZ */ @@ -320,12 +321,12 @@ struct drm_xe_query_config { /** @pad: MBZ */ __u32 pad; -#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 -#define XE_QUERY_CONFIG_FLAGS 1 - #define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0) -#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2 -#define XE_QUERY_CONFIG_VA_BITS 3 -#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 +#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 +#define DRM_XE_QUERY_CONFIG_FLAGS 1 + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0) +#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 +#define DRM_XE_QUERY_CONFIG_VA_BITS 3 +#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 /** @info: array of elements containing the config info */ __u64 info[]; }; @@ -339,8 +340,8 @@ struct drm_xe_query_config { * implementing graphics and/or media operations. */ struct drm_xe_query_gt { -#define XE_QUERY_GT_TYPE_MAIN 0 -#define XE_QUERY_GT_TYPE_MEDIA 1 +#define DRM_XE_QUERY_GT_TYPE_MAIN 0 +#define DRM_XE_QUERY_GT_TYPE_MEDIA 1 /** @type: GT type: Main or Media */ __u16 type; /** @gt_id: Unique ID of this GT within the PCI Device */ @@ -348,17 +349,19 @@ struct drm_xe_query_gt { /** @clock_freq: A clock frequency for timestamp */ __u32 clock_freq; /** - * @native_mem_regions: Bit mask of instances from - * drm_xe_query_mem_usage that lives on the same GPU/Tile and have - * direct access. + * @near_mem_regions: Bit mask of instances from + * drm_xe_query_mem_regions that are nearest to the current engines + * of this GT. */ - __u64 native_mem_regions; + __u64 near_mem_regions; /** - * @slow_mem_regions: Bit mask of instances from - * drm_xe_query_mem_usage that this GT can indirectly access, although - * they live on a different GPU/Tile. + * @far_mem_regions: Bit mask of instances from + * drm_xe_query_mem_regions that are far from the engines of this GT. + * In general, they have extra indirections when compared to the + * @near_mem_regions. For a discrete device this could mean system + * memory and memory living in a different tile. */ - __u64 slow_mem_regions; + __u64 far_mem_regions; /** @reserved: Reserved */ __u64 reserved[8]; }; @@ -400,7 +403,7 @@ struct drm_xe_query_topology_mask { * DSS_GEOMETRY ff ff ff ff 00 00 00 00 * means 32 DSS are available for geometry. */ -#define XE_TOPO_DSS_GEOMETRY (1 << 0) +#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0) /* * To query the mask of Dual Sub Slices (DSS) available for compute * operations. For example a query response containing the following @@ -408,7 +411,7 @@ struct drm_xe_query_topology_mask { * DSS_COMPUTE ff ff ff ff 00 00 00 00 * means 32 DSS are available for compute. */ -#define XE_TOPO_DSS_COMPUTE (1 << 1) +#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1) /* * To query the mask of Execution Units (EU) available per Dual Sub * Slices (DSS). For example a query response containing the following @@ -416,7 +419,7 @@ struct drm_xe_query_topology_mask { * EU_PER_DSS ff ff 00 00 00 00 00 00 * means each DSS has 16 EU. */ -#define XE_TOPO_EU_PER_DSS (1 << 2) +#define DRM_XE_TOPO_EU_PER_DSS (1 << 2) /** @type: type of mask */ __u16 type; @@ -467,7 +470,7 @@ struct drm_xe_device_query { __u64 extensions; #define DRM_XE_DEVICE_QUERY_ENGINES 0 -#define DRM_XE_DEVICE_QUERY_MEM_USAGE 1 +#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1 #define DRM_XE_DEVICE_QUERY_CONFIG 2 #define DRM_XE_DEVICE_QUERY_GT_LIST 3 #define DRM_XE_DEVICE_QUERY_HWCONFIG 4 @@ -497,8 +500,8 @@ struct drm_xe_gem_create { */ __u64 size; -#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24) -#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25) +#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24) +#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25) /* * When using VRAM as a possible placement, ensure that the corresponding VRAM * allocation will always use the CPU accessible part of VRAM. This is important @@ -514,7 +517,7 @@ struct drm_xe_gem_create { * display surfaces, therefore the kernel requires setting this flag for such * objects, otherwise an error is thrown on small-bar systems. */ -#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26) +#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26) /** * @flags: Flags, currently a mask of memory instances of where BO can * be placed @@ -581,14 +584,14 @@ struct drm_xe_ext_set_property { }; struct drm_xe_vm_create { -#define XE_VM_EXTENSION_SET_PROPERTY 0 +#define DRM_XE_VM_EXTENSION_SET_PROPERTY 0 /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; -#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0) -#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1) -#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2) -#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3) +#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0) +#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (1 << 1) +#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (1 << 2) +#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 3) /** @flags: Flags */ __u32 flags; @@ -644,34 +647,38 @@ struct drm_xe_vm_bind_op { */ __u64 tile_mask; -#define XE_VM_BIND_OP_MAP 0x0 -#define XE_VM_BIND_OP_UNMAP 0x1 -#define XE_VM_BIND_OP_MAP_USERPTR 0x2 -#define XE_VM_BIND_OP_UNMAP_ALL 0x3 -#define XE_VM_BIND_OP_PREFETCH 0x4 +#define DRM_XE_VM_BIND_OP_MAP 0x0 +#define DRM_XE_VM_BIND_OP_UNMAP 0x1 +#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2 +#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3 +#define DRM_XE_VM_BIND_OP_PREFETCH 0x4 /** @op: Bind operation to perform */ __u32 op; -#define XE_VM_BIND_FLAG_READONLY (0x1 << 0) -#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1) +#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0) +#define DRM_XE_VM_BIND_FLAG_ASYNC (1 << 1) /* * Valid on a faulting VM only, do the MAP operation immediately rather * than deferring the MAP to the page fault handler. */ -#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2) +#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 2) /* * When the NULL flag is set, the page tables are setup with a special * bit which indicates writes are dropped and all reads return zero. In - * the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP + * the future, the NULL flags will only be valid for DRM_XE_VM_BIND_OP_MAP * operations, the BO handle MBZ, and the BO offset MBZ. This flag is * intended to implement VK sparse bindings. */ -#define XE_VM_BIND_FLAG_NULL (0x1 << 3) +#define DRM_XE_VM_BIND_FLAG_NULL (1 << 3) /** @flags: Bind flags */ __u32 flags; - /** @mem_region: Memory region to prefetch VMA to, instance not a mask */ - __u32 region; + /** + * @prefetch_mem_region_instance: Memory region to prefetch VMA to. + * It is a region instance, not a mask. + * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation. + */ + __u32 prefetch_mem_region_instance; /** @reserved: Reserved */ __u64 reserved[2]; @@ -721,19 +728,19 @@ struct drm_xe_vm_bind { __u64 reserved[2]; }; -/* For use with XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */ +/* For use with DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */ /* Monitor 128KB contiguous region with 4K sub-granularity */ -#define XE_ACC_GRANULARITY_128K 0 +#define DRM_XE_ACC_GRANULARITY_128K 0 /* Monitor 2MB contiguous region with 64KB sub-granularity */ -#define XE_ACC_GRANULARITY_2M 1 +#define DRM_XE_ACC_GRANULARITY_2M 1 /* Monitor 16MB contiguous region with 512KB sub-granularity */ -#define XE_ACC_GRANULARITY_16M 2 +#define DRM_XE_ACC_GRANULARITY_16M 2 /* Monitor 64MB contiguous region with 2M sub-granularity */ -#define XE_ACC_GRANULARITY_64M 3 +#define DRM_XE_ACC_GRANULARITY_64M 3 /** * struct drm_xe_exec_queue_set_property - exec queue set property @@ -747,14 +754,14 @@ struct drm_xe_exec_queue_set_property { /** @exec_queue_id: Exec queue ID */ __u32 exec_queue_id; -#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 -#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 -#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 -#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 -#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7 /** @property: property to set */ __u32 property; @@ -766,7 +773,7 @@ struct drm_xe_exec_queue_set_property { }; struct drm_xe_exec_queue_create { -#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 +#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; @@ -805,7 +812,7 @@ struct drm_xe_exec_queue_get_property { /** @exec_queue_id: Exec queue ID */ __u32 exec_queue_id; -#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 +#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 /** @property: property to get */ __u32 property; @@ -831,11 +838,11 @@ struct drm_xe_sync { /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; -#define DRM_XE_SYNC_SYNCOBJ 0x0 -#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1 -#define DRM_XE_SYNC_DMA_BUF 0x2 -#define DRM_XE_SYNC_USER_FENCE 0x3 -#define DRM_XE_SYNC_SIGNAL 0x10 +#define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0 +#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1 +#define DRM_XE_SYNC_FLAG_DMA_BUF 0x2 +#define DRM_XE_SYNC_FLAG_USER_FENCE 0x3 +#define DRM_XE_SYNC_FLAG_SIGNAL 0x10 __u32 flags; /** @pad: MBZ */ @@ -912,17 +919,17 @@ struct drm_xe_wait_user_fence { */ __u64 addr; -#define DRM_XE_UFENCE_WAIT_EQ 0 -#define DRM_XE_UFENCE_WAIT_NEQ 1 -#define DRM_XE_UFENCE_WAIT_GT 2 -#define DRM_XE_UFENCE_WAIT_GTE 3 -#define DRM_XE_UFENCE_WAIT_LT 4 -#define DRM_XE_UFENCE_WAIT_LTE 5 +#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0 +#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1 +#define DRM_XE_UFENCE_WAIT_OP_GT 0x2 +#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3 +#define DRM_XE_UFENCE_WAIT_OP_LT 0x4 +#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5 /** @op: wait operation (type of comparison) */ __u16 op; -#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */ -#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1) +#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */ +#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1) /** @flags: wait flags */ __u16 flags; @@ -932,18 +939,19 @@ struct drm_xe_wait_user_fence { /** @value: compare value */ __u64 value; -#define DRM_XE_UFENCE_WAIT_U8 0xffu -#define DRM_XE_UFENCE_WAIT_U16 0xffffu -#define DRM_XE_UFENCE_WAIT_U32 0xffffffffu -#define DRM_XE_UFENCE_WAIT_U64 0xffffffffffffffffu +#define DRM_XE_UFENCE_WAIT_MASK_U8 0xffu +#define DRM_XE_UFENCE_WAIT_MASK_U16 0xffffu +#define DRM_XE_UFENCE_WAIT_MASK_U32 0xffffffffu +#define DRM_XE_UFENCE_WAIT_MASK_U64 0xffffffffffffffffu /** @mask: comparison mask */ __u64 mask; + /** * @timeout: how long to wait before bailing, value in nanoseconds. - * Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout) + * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout) * it contains timeout expressed in nanoseconds to wait (fence will * expire at now() + timeout). - * When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait + * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait * will end at timeout (uses system MONOTONIC_CLOCK). * Passing negative timeout leads to neverending wait. * @@ -956,13 +964,13 @@ struct drm_xe_wait_user_fence { /** * @num_engines: number of engine instances to wait on, must be zero - * when DRM_XE_UFENCE_WAIT_SOFT_OP set + * when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set */ __u64 num_engines; /** * @instances: user pointer to array of drm_xe_engine_class_instance to - * wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set + * wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set */ __u64 instances; @@ -973,11 +981,11 @@ struct drm_xe_wait_user_fence { /** * DOC: XE PMU event config IDs * - * Check 'man perf_event_open' to use the ID's XE_PMU_XXXX listed in xe_drm.h + * Check 'man perf_event_open' to use the ID's DRM_XE_PMU_XXXX listed in xe_drm.h * in 'struct perf_event_attr' as part of perf_event_open syscall to read a * particular event. * - * For example to open the XE_PMU_RENDER_GROUP_BUSY(0): + * For example to open the DRMXE_PMU_RENDER_GROUP_BUSY(0): * * .. code-block:: C * @@ -991,7 +999,7 @@ struct drm_xe_wait_user_fence { * attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED; * attr.use_clockid = 1; * attr.clockid = CLOCK_MONOTONIC; - * attr.config = XE_PMU_RENDER_GROUP_BUSY(0); + * attr.config = DRM_XE_PMU_RENDER_GROUP_BUSY(0); * * fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0); */ @@ -999,15 +1007,15 @@ struct drm_xe_wait_user_fence { /* * Top bits of every counter are GT id. */ -#define __XE_PMU_GT_SHIFT (56) +#define __DRM_XE_PMU_GT_SHIFT (56) -#define ___XE_PMU_OTHER(gt, x) \ - (((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT)) +#define ___DRM_XE_PMU_OTHER(gt, x) \ + (((__u64)(x)) | ((__u64)(gt) << __DRM_XE_PMU_GT_SHIFT)) -#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 0) -#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1) -#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2) -#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3) +#define DRM_XE_PMU_RENDER_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 0) +#define DRM_XE_PMU_COPY_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 1) +#define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 2) +#define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 3) #if defined(__cplusplus) }