mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-21 09:14:47 +08:00
fix: align NEO code to new uAPI header
Related-To: NEO-9566 Signed-off-by: Naklicki, Mateusz <mateusz.naklicki@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
a02ac1c140
commit
dc29c08abd
@@ -224,17 +224,17 @@ int64_t SysmanKmdInterfaceXe::getEngineActivityFd(zes_engine_group_t engineGroup
|
|||||||
|
|
||||||
switch (engineGroup) {
|
switch (engineGroup) {
|
||||||
case ZES_ENGINE_GROUP_ALL:
|
case ZES_ENGINE_GROUP_ALL:
|
||||||
config = XE_PMU_ANY_ENGINE_GROUP_BUSY(subDeviceId);
|
config = DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(subDeviceId);
|
||||||
break;
|
break;
|
||||||
case ZES_ENGINE_GROUP_COMPUTE_ALL:
|
case ZES_ENGINE_GROUP_COMPUTE_ALL:
|
||||||
case ZES_ENGINE_GROUP_RENDER_ALL:
|
case ZES_ENGINE_GROUP_RENDER_ALL:
|
||||||
config = XE_PMU_RENDER_GROUP_BUSY(subDeviceId);
|
config = DRM_XE_PMU_RENDER_GROUP_BUSY(subDeviceId);
|
||||||
break;
|
break;
|
||||||
case ZES_ENGINE_GROUP_COPY_ALL:
|
case ZES_ENGINE_GROUP_COPY_ALL:
|
||||||
config = XE_PMU_COPY_GROUP_BUSY(subDeviceId);
|
config = DRM_XE_PMU_COPY_GROUP_BUSY(subDeviceId);
|
||||||
break;
|
break;
|
||||||
case ZES_ENGINE_GROUP_MEDIA_ALL:
|
case ZES_ENGINE_GROUP_MEDIA_ALL:
|
||||||
config = XE_PMU_MEDIA_GROUP_BUSY(subDeviceId);
|
config = DRM_XE_PMU_MEDIA_GROUP_BUSY(subDeviceId);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -86,15 +86,15 @@ const char *IoctlHelperXe::xeGetClassName(int className) {
|
|||||||
|
|
||||||
const char *IoctlHelperXe::xeGetBindOperationName(int bindOperation) {
|
const char *IoctlHelperXe::xeGetBindOperationName(int bindOperation) {
|
||||||
switch (bindOperation) {
|
switch (bindOperation) {
|
||||||
case XE_VM_BIND_OP_MAP:
|
case DRM_XE_VM_BIND_OP_MAP:
|
||||||
return "MAP";
|
return "MAP";
|
||||||
case XE_VM_BIND_OP_UNMAP:
|
case DRM_XE_VM_BIND_OP_UNMAP:
|
||||||
return "UNMAP";
|
return "UNMAP";
|
||||||
case XE_VM_BIND_OP_MAP_USERPTR:
|
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
|
||||||
return "MAP_USERPTR";
|
return "MAP_USERPTR";
|
||||||
case XE_VM_BIND_OP_UNMAP_ALL:
|
case DRM_XE_VM_BIND_OP_UNMAP_ALL:
|
||||||
return "UNMAP ALL";
|
return "UNMAP ALL";
|
||||||
case XE_VM_BIND_OP_PREFETCH:
|
case DRM_XE_VM_BIND_OP_PREFETCH:
|
||||||
return "PREFETCH";
|
return "PREFETCH";
|
||||||
}
|
}
|
||||||
return "Unknown operation";
|
return "Unknown operation";
|
||||||
@@ -102,13 +102,13 @@ const char *IoctlHelperXe::xeGetBindOperationName(int bindOperation) {
|
|||||||
|
|
||||||
const char *IoctlHelperXe::xeGetBindFlagsName(int bindFlags) {
|
const char *IoctlHelperXe::xeGetBindFlagsName(int bindFlags) {
|
||||||
switch (bindFlags) {
|
switch (bindFlags) {
|
||||||
case XE_VM_BIND_FLAG_READONLY:
|
case DRM_XE_VM_BIND_FLAG_READONLY:
|
||||||
return "READ_ONLY";
|
return "READ_ONLY";
|
||||||
case XE_VM_BIND_FLAG_ASYNC:
|
case DRM_XE_VM_BIND_FLAG_ASYNC:
|
||||||
return "ASYNC";
|
return "ASYNC";
|
||||||
case XE_VM_BIND_FLAG_IMMEDIATE:
|
case DRM_XE_VM_BIND_FLAG_IMMEDIATE:
|
||||||
return "IMMEDIATE";
|
return "IMMEDIATE";
|
||||||
case XE_VM_BIND_FLAG_NULL:
|
case DRM_XE_VM_BIND_FLAG_NULL:
|
||||||
return "NULL";
|
return "NULL";
|
||||||
}
|
}
|
||||||
return "Unknown flag";
|
return "Unknown flag";
|
||||||
@@ -149,27 +149,27 @@ bool IoctlHelperXe::initialize() {
|
|||||||
struct drm_xe_query_config *config = reinterpret_cast<struct drm_xe_query_config *>(data.data());
|
struct drm_xe_query_config *config = reinterpret_cast<struct drm_xe_query_config *>(data.data());
|
||||||
queryConfig.data = castToUint64(config);
|
queryConfig.data = castToUint64(config);
|
||||||
IoctlHelper::ioctl(DrmIoctl::Query, &queryConfig);
|
IoctlHelper::ioctl(DrmIoctl::Query, &queryConfig);
|
||||||
xeLog("XE_QUERY_CONFIG_REV_AND_DEVICE_ID\t%#llx\n",
|
xeLog("DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID\t%#llx\n",
|
||||||
config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID]);
|
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID]);
|
||||||
xeLog(" REV_ID\t\t\t\t%#llx\n",
|
xeLog(" REV_ID\t\t\t\t%#llx\n",
|
||||||
(config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff);
|
(config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff);
|
||||||
xeLog(" DEVICE_ID\t\t\t\t%#llx\n",
|
xeLog(" DEVICE_ID\t\t\t\t%#llx\n",
|
||||||
config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff);
|
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff);
|
||||||
xeLog("XE_QUERY_CONFIG_FLAGS\t\t\t%#llx\n",
|
xeLog("DRM_XE_QUERY_CONFIG_FLAGS\t\t\t%#llx\n",
|
||||||
config->info[XE_QUERY_CONFIG_FLAGS]);
|
config->info[DRM_XE_QUERY_CONFIG_FLAGS]);
|
||||||
xeLog(" XE_QUERY_CONFIG_FLAGS_HAS_VRAM\t%s\n",
|
xeLog(" DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM\t%s\n",
|
||||||
config->info[XE_QUERY_CONFIG_FLAGS] &
|
config->info[DRM_XE_QUERY_CONFIG_FLAGS] &
|
||||||
XE_QUERY_CONFIG_FLAGS_HAS_VRAM
|
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM
|
||||||
? "ON"
|
? "ON"
|
||||||
: "OFF");
|
: "OFF");
|
||||||
xeLog("XE_QUERY_CONFIG_MIN_ALIGNMENT\t\t%#llx\n",
|
xeLog("DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT\t\t%#llx\n",
|
||||||
config->info[XE_QUERY_CONFIG_MIN_ALIGNMENT]);
|
config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT]);
|
||||||
xeLog("XE_QUERY_CONFIG_VA_BITS\t\t%#llx\n",
|
xeLog("DRM_XE_QUERY_CONFIG_VA_BITS\t\t%#llx\n",
|
||||||
config->info[XE_QUERY_CONFIG_VA_BITS]);
|
config->info[DRM_XE_QUERY_CONFIG_VA_BITS]);
|
||||||
|
|
||||||
chipsetId = config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff;
|
chipsetId = config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff;
|
||||||
revId = static_cast<int>((config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff);
|
revId = static_cast<int>((config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xff);
|
||||||
hasVram = config->info[XE_QUERY_CONFIG_FLAGS] & XE_QUERY_CONFIG_FLAGS_HAS_VRAM ? 1 : 0;
|
hasVram = config->info[DRM_XE_QUERY_CONFIG_FLAGS] & DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM ? 1 : 0;
|
||||||
|
|
||||||
memset(&queryConfig, 0, sizeof(queryConfig));
|
memset(&queryConfig, 0, sizeof(queryConfig));
|
||||||
queryConfig.query = DRM_XE_DEVICE_QUERY_HWCONFIG;
|
queryConfig.query = DRM_XE_DEVICE_QUERY_HWCONFIG;
|
||||||
@@ -279,7 +279,7 @@ inline MemoryRegion createMemoryRegionFromXeMemRegion(const drm_xe_query_mem_reg
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<MemoryInfo> IoctlHelperXe::createMemoryInfo() {
|
std::unique_ptr<MemoryInfo> IoctlHelperXe::createMemoryInfo() {
|
||||||
auto memUsageData = queryData<uint64_t>(DRM_XE_DEVICE_QUERY_MEM_USAGE);
|
auto memUsageData = queryData<uint64_t>(DRM_XE_DEVICE_QUERY_MEM_REGIONS);
|
||||||
auto gtListData = queryData<uint64_t>(DRM_XE_DEVICE_QUERY_GT_LIST);
|
auto gtListData = queryData<uint64_t>(DRM_XE_DEVICE_QUERY_GT_LIST);
|
||||||
|
|
||||||
if (memUsageData.empty() || gtListData.empty()) {
|
if (memUsageData.empty() || gtListData.empty()) {
|
||||||
@@ -287,15 +287,15 @@ std::unique_ptr<MemoryInfo> IoctlHelperXe::createMemoryInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
MemoryInfo::RegionContainer regionsContainer{};
|
MemoryInfo::RegionContainer regionsContainer{};
|
||||||
auto xeMemUsageData = reinterpret_cast<drm_xe_query_mem_usage *>(memUsageData.data());
|
auto xeMemRegionsData = reinterpret_cast<drm_xe_query_mem_regions *>(memUsageData.data());
|
||||||
auto xeGtListData = reinterpret_cast<drm_xe_query_gt_list *>(gtListData.data());
|
auto xeGtListData = reinterpret_cast<drm_xe_query_gt_list *>(gtListData.data());
|
||||||
|
|
||||||
std::array<drm_xe_query_mem_region *, 64> memoryRegionInstances{};
|
std::array<drm_xe_query_mem_region *, 64> memoryRegionInstances{};
|
||||||
|
|
||||||
for (auto i = 0u; i < xeMemUsageData->num_regions; i++) {
|
for (auto i = 0u; i < xeMemRegionsData->num_regions; i++) {
|
||||||
auto ®ion = xeMemUsageData->regions[i];
|
auto ®ion = xeMemRegionsData->regions[i];
|
||||||
memoryRegionInstances[region.instance] = ®ion;
|
memoryRegionInstances[region.instance] = ®ion;
|
||||||
if (region.mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
|
if (region.mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
|
||||||
regionsContainer.push_back(createMemoryRegionFromXeMemRegion(region));
|
regionsContainer.push_back(createMemoryRegionFromXeMemRegion(region));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -305,9 +305,9 @@ std::unique_ptr<MemoryInfo> IoctlHelperXe::createMemoryInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (auto i = 0u; i < xeGtListData->num_gt; i++) {
|
for (auto i = 0u; i < xeGtListData->num_gt; i++) {
|
||||||
if (xeGtListData->gt_list[i].type != XE_QUERY_GT_TYPE_MEDIA) {
|
if (xeGtListData->gt_list[i].type != DRM_XE_QUERY_GT_TYPE_MEDIA) {
|
||||||
uint64_t nativeMemRegions = xeGtListData->gt_list[i].native_mem_regions;
|
uint64_t nearMemRegions = xeGtListData->gt_list[i].near_mem_regions;
|
||||||
auto regionIndex = Math::log2(nativeMemRegions);
|
auto regionIndex = Math::log2(nearMemRegions);
|
||||||
UNRECOVERABLE_IF(!memoryRegionInstances[regionIndex]);
|
UNRECOVERABLE_IF(!memoryRegionInstances[regionIndex]);
|
||||||
regionsContainer.push_back(createMemoryRegionFromXeMemRegion(*memoryRegionInstances[regionIndex]));
|
regionsContainer.push_back(createMemoryRegionFromXeMemRegion(*memoryRegionInstances[regionIndex]));
|
||||||
xeTimestampFrequency = xeGtListData->gt_list[i].clock_freq;
|
xeTimestampFrequency = xeGtListData->gt_list[i].clock_freq;
|
||||||
@@ -467,7 +467,7 @@ bool IoctlHelperXe::getTopologyDataAndMap(const HardwareInfo &hwInfo, DrmQueryTo
|
|||||||
|
|
||||||
auto tileIndex = 0u;
|
auto tileIndex = 0u;
|
||||||
for (auto gt = 0u; gt < gtIdToTile.size(); gt++) {
|
for (auto gt = 0u; gt < gtIdToTile.size(); gt++) {
|
||||||
if (xeGtListData->gt_list[gt].type != XE_QUERY_GT_TYPE_MEDIA) {
|
if (xeGtListData->gt_list[gt].type != DRM_XE_QUERY_GT_TYPE_MEDIA) {
|
||||||
gtIdToTile[gt] = tileIndex++;
|
gtIdToTile[gt] = tileIndex++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -481,15 +481,15 @@ bool IoctlHelperXe::getTopologyDataAndMap(const HardwareInfo &hwInfo, DrmQueryTo
|
|||||||
|
|
||||||
uint32_t gtId = topo->gt_id;
|
uint32_t gtId = topo->gt_id;
|
||||||
|
|
||||||
if (xeGtListData->gt_list[gtId].type != XE_QUERY_GT_TYPE_MEDIA) {
|
if (xeGtListData->gt_list[gtId].type != DRM_XE_QUERY_GT_TYPE_MEDIA) {
|
||||||
switch (topo->type) {
|
switch (topo->type) {
|
||||||
case XE_TOPO_DSS_GEOMETRY:
|
case DRM_XE_TOPO_DSS_GEOMETRY:
|
||||||
fillMask(geomDss[gtIdToTile[gtId]], topo);
|
fillMask(geomDss[gtIdToTile[gtId]], topo);
|
||||||
break;
|
break;
|
||||||
case XE_TOPO_DSS_COMPUTE:
|
case DRM_XE_TOPO_DSS_COMPUTE:
|
||||||
fillMask(computeDss[gtIdToTile[gtId]], topo);
|
fillMask(computeDss[gtIdToTile[gtId]], topo);
|
||||||
break;
|
break;
|
||||||
case XE_TOPO_EU_PER_DSS:
|
case DRM_XE_TOPO_EU_PER_DSS:
|
||||||
fillMask(euDss[gtIdToTile[gtId]], topo);
|
fillMask(euDss[gtIdToTile[gtId]], topo);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -612,7 +612,7 @@ int IoctlHelperXe::xeWaitUserFence(uint64_t mask, uint16_t op, uint64_t addr, ui
|
|||||||
struct drm_xe_wait_user_fence wait = {};
|
struct drm_xe_wait_user_fence wait = {};
|
||||||
wait.addr = addr;
|
wait.addr = addr;
|
||||||
wait.op = op;
|
wait.op = op;
|
||||||
wait.flags = DRM_XE_UFENCE_WAIT_SOFT_OP;
|
wait.flags = DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP;
|
||||||
wait.value = value;
|
wait.value = value;
|
||||||
wait.mask = mask;
|
wait.mask = mask;
|
||||||
wait.timeout = timeout;
|
wait.timeout = timeout;
|
||||||
@@ -630,16 +630,16 @@ int IoctlHelperXe::waitUserFence(uint32_t ctxId, uint64_t address,
|
|||||||
uint64_t mask;
|
uint64_t mask;
|
||||||
switch (dataWidth) {
|
switch (dataWidth) {
|
||||||
case static_cast<uint32_t>(Drm::ValueWidth::U64):
|
case static_cast<uint32_t>(Drm::ValueWidth::U64):
|
||||||
mask = DRM_XE_UFENCE_WAIT_U64;
|
mask = DRM_XE_UFENCE_WAIT_MASK_U64;
|
||||||
break;
|
break;
|
||||||
case static_cast<uint32_t>(Drm::ValueWidth::U32):
|
case static_cast<uint32_t>(Drm::ValueWidth::U32):
|
||||||
mask = DRM_XE_UFENCE_WAIT_U32;
|
mask = DRM_XE_UFENCE_WAIT_MASK_U32;
|
||||||
break;
|
break;
|
||||||
case static_cast<uint32_t>(Drm::ValueWidth::U16):
|
case static_cast<uint32_t>(Drm::ValueWidth::U16):
|
||||||
mask = DRM_XE_UFENCE_WAIT_U16;
|
mask = DRM_XE_UFENCE_WAIT_MASK_U16;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
mask = DRM_XE_UFENCE_WAIT_U8;
|
mask = DRM_XE_UFENCE_WAIT_MASK_U8;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (timeout == -1) {
|
if (timeout == -1) {
|
||||||
@@ -647,7 +647,7 @@ int IoctlHelperXe::waitUserFence(uint32_t ctxId, uint64_t address,
|
|||||||
timeout = TimeoutControls::maxTimeout;
|
timeout = TimeoutControls::maxTimeout;
|
||||||
}
|
}
|
||||||
if (address) {
|
if (address) {
|
||||||
return xeWaitUserFence(mask, DRM_XE_UFENCE_WAIT_GTE, address, value, timeout);
|
return xeWaitUserFence(mask, DRM_XE_UFENCE_WAIT_OP_GTE, address, value, timeout);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -714,7 +714,7 @@ int IoctlHelperXe::execBuffer(ExecBuffer *execBuffer, uint64_t completionGpuAddr
|
|||||||
completionGpuAddress, counterValue, engine);
|
completionGpuAddress, counterValue, engine);
|
||||||
|
|
||||||
struct drm_xe_sync sync[1] = {};
|
struct drm_xe_sync sync[1] = {};
|
||||||
sync[0].flags = DRM_XE_SYNC_USER_FENCE | DRM_XE_SYNC_SIGNAL;
|
sync[0].flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL;
|
||||||
sync[0].addr = completionGpuAddress;
|
sync[0].addr = completionGpuAddress;
|
||||||
sync[0].timeline_value = counterValue;
|
sync[0].timeline_value = counterValue;
|
||||||
struct drm_xe_exec exec = {};
|
struct drm_xe_exec exec = {};
|
||||||
@@ -883,9 +883,9 @@ int IoctlHelperXe::getDrmParamValue(DrmParam drmParam) const {
|
|||||||
|
|
||||||
switch (drmParam) {
|
switch (drmParam) {
|
||||||
case DrmParam::MemoryClassDevice:
|
case DrmParam::MemoryClassDevice:
|
||||||
return XE_MEM_REGION_CLASS_VRAM;
|
return DRM_XE_MEM_REGION_CLASS_VRAM;
|
||||||
case DrmParam::MemoryClassSystem:
|
case DrmParam::MemoryClassSystem:
|
||||||
return XE_MEM_REGION_CLASS_SYSMEM;
|
return DRM_XE_MEM_REGION_CLASS_SYSMEM;
|
||||||
case DrmParam::EngineClassRender:
|
case DrmParam::EngineClassRender:
|
||||||
return DRM_XE_ENGINE_CLASS_RENDER;
|
return DRM_XE_ENGINE_CLASS_RENDER;
|
||||||
case DrmParam::EngineClassCopy:
|
case DrmParam::EngineClassCopy:
|
||||||
@@ -1067,10 +1067,10 @@ int IoctlHelperXe::ioctl(DrmIoctl request, void *arg) {
|
|||||||
case DrmIoctl::GemVmCreate: {
|
case DrmIoctl::GemVmCreate: {
|
||||||
GemVmControl *d = static_cast<GemVmControl *>(arg);
|
GemVmControl *d = static_cast<GemVmControl *>(arg);
|
||||||
struct drm_xe_vm_create args = {};
|
struct drm_xe_vm_create args = {};
|
||||||
args.flags = DRM_XE_VM_CREATE_ASYNC_DEFAULT |
|
args.flags = DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
|
||||||
DRM_XE_VM_CREATE_COMPUTE_MODE;
|
DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE;
|
||||||
if (drm.hasPageFaultSupport()) {
|
if (drm.hasPageFaultSupport()) {
|
||||||
args.flags |= DRM_XE_VM_CREATE_FAULT_MODE;
|
args.flags |= DRM_XE_VM_CREATE_FLAG_FAULT_MODE;
|
||||||
}
|
}
|
||||||
ret = IoctlHelper::ioctl(request, &args);
|
ret = IoctlHelper::ioctl(request, &args);
|
||||||
d->vmId = ret ? 0 : args.vm_id;
|
d->vmId = ret ? 0 : args.vm_id;
|
||||||
@@ -1238,7 +1238,7 @@ int IoctlHelperXe::xeVmBind(const VmBindParams &vmBindParams, bool isBind) {
|
|||||||
if (index != invalidIndex) {
|
if (index != invalidIndex) {
|
||||||
|
|
||||||
drm_xe_sync sync[1] = {};
|
drm_xe_sync sync[1] = {};
|
||||||
sync[0].flags = DRM_XE_SYNC_USER_FENCE | DRM_XE_SYNC_SIGNAL;
|
sync[0].flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL;
|
||||||
auto xeBindExtUserFence = reinterpret_cast<UserFenceExtension *>(vmBindParams.extensions);
|
auto xeBindExtUserFence = reinterpret_cast<UserFenceExtension *>(vmBindParams.extensions);
|
||||||
UNRECOVERABLE_IF(!xeBindExtUserFence);
|
UNRECOVERABLE_IF(!xeBindExtUserFence);
|
||||||
UNRECOVERABLE_IF(xeBindExtUserFence->tag != UserFenceExtension::tagValue);
|
UNRECOVERABLE_IF(xeBindExtUserFence->tag != UserFenceExtension::tagValue);
|
||||||
@@ -1252,19 +1252,19 @@ int IoctlHelperXe::xeVmBind(const VmBindParams &vmBindParams, bool isBind) {
|
|||||||
bind.syncs = reinterpret_cast<uintptr_t>(&sync);
|
bind.syncs = reinterpret_cast<uintptr_t>(&sync);
|
||||||
bind.bind.range = vmBindParams.length;
|
bind.bind.range = vmBindParams.length;
|
||||||
bind.bind.addr = gmmHelper->decanonize(vmBindParams.start);
|
bind.bind.addr = gmmHelper->decanonize(vmBindParams.start);
|
||||||
bind.bind.flags = XE_VM_BIND_FLAG_ASYNC;
|
bind.bind.flags = DRM_XE_VM_BIND_FLAG_ASYNC;
|
||||||
bind.bind.obj_offset = vmBindParams.offset;
|
bind.bind.obj_offset = vmBindParams.offset;
|
||||||
|
|
||||||
if (isBind) {
|
if (isBind) {
|
||||||
bind.bind.op = XE_VM_BIND_OP_MAP;
|
bind.bind.op = DRM_XE_VM_BIND_OP_MAP;
|
||||||
bind.bind.obj = vmBindParams.handle;
|
bind.bind.obj = vmBindParams.handle;
|
||||||
if (bindInfo[index].handle & XE_USERPTR_FAKE_FLAG) {
|
if (bindInfo[index].handle & XE_USERPTR_FAKE_FLAG) {
|
||||||
bind.bind.op = XE_VM_BIND_OP_MAP_USERPTR;
|
bind.bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR;
|
||||||
bind.bind.obj = 0;
|
bind.bind.obj = 0;
|
||||||
bind.bind.obj_offset = bindInfo[index].userptr;
|
bind.bind.obj_offset = bindInfo[index].userptr;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bind.bind.op = XE_VM_BIND_OP_UNMAP;
|
bind.bind.op = DRM_XE_VM_BIND_OP_UNMAP;
|
||||||
bind.bind.obj = 0;
|
bind.bind.obj = 0;
|
||||||
if (bindInfo[index].handle & XE_USERPTR_FAKE_FLAG) {
|
if (bindInfo[index].handle & XE_USERPTR_FAKE_FLAG) {
|
||||||
bind.bind.obj_offset = bindInfo[index].userptr;
|
bind.bind.obj_offset = bindInfo[index].userptr;
|
||||||
@@ -1293,7 +1293,7 @@ int IoctlHelperXe::xeVmBind(const VmBindParams &vmBindParams, bool isBind) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return xeWaitUserFence(DRM_XE_UFENCE_WAIT_U64, DRM_XE_UFENCE_WAIT_EQ,
|
return xeWaitUserFence(DRM_XE_UFENCE_WAIT_MASK_U64, DRM_XE_UFENCE_WAIT_OP_EQ,
|
||||||
sync[0].addr,
|
sync[0].addr,
|
||||||
sync[0].timeline_value, XE_ONE_SEC);
|
sync[0].timeline_value, XE_ONE_SEC);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -314,8 +314,8 @@ TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallingAnyMethodThenDummyValueIsRe
|
|||||||
// Default no translation:
|
// Default no translation:
|
||||||
verifyDrmGetParamValue(static_cast<int>(DrmParam::ExecRender), DrmParam::ExecRender);
|
verifyDrmGetParamValue(static_cast<int>(DrmParam::ExecRender), DrmParam::ExecRender);
|
||||||
// test exception:
|
// test exception:
|
||||||
verifyDrmGetParamValue(XE_MEM_REGION_CLASS_VRAM, DrmParam::MemoryClassDevice);
|
verifyDrmGetParamValue(DRM_XE_MEM_REGION_CLASS_VRAM, DrmParam::MemoryClassDevice);
|
||||||
verifyDrmGetParamValue(XE_MEM_REGION_CLASS_SYSMEM, DrmParam::MemoryClassSystem);
|
verifyDrmGetParamValue(DRM_XE_MEM_REGION_CLASS_SYSMEM, DrmParam::MemoryClassSystem);
|
||||||
verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_RENDER, DrmParam::EngineClassRender);
|
verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_RENDER, DrmParam::EngineClassRender);
|
||||||
verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_COPY, DrmParam::EngineClassCopy);
|
verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_COPY, DrmParam::EngineClassCopy);
|
||||||
verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_VIDEO_DECODE, DrmParam::EngineClassVideo);
|
verifyDrmGetParamValue(DRM_XE_ENGINE_CLASS_VIDEO_DECODE, DrmParam::EngineClassVideo);
|
||||||
@@ -459,17 +459,17 @@ TEST(IoctlHelperXeTest, verifyPublicFunctions) {
|
|||||||
verifyXeClassName("vecs", DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE);
|
verifyXeClassName("vecs", DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE);
|
||||||
verifyXeClassName("ccs", DRM_XE_ENGINE_CLASS_COMPUTE);
|
verifyXeClassName("ccs", DRM_XE_ENGINE_CLASS_COMPUTE);
|
||||||
|
|
||||||
verifyXeOperationBindName("MAP", XE_VM_BIND_OP_MAP);
|
verifyXeOperationBindName("MAP", DRM_XE_VM_BIND_OP_MAP);
|
||||||
verifyXeOperationBindName("UNMAP", XE_VM_BIND_OP_UNMAP);
|
verifyXeOperationBindName("UNMAP", DRM_XE_VM_BIND_OP_UNMAP);
|
||||||
verifyXeOperationBindName("MAP_USERPTR", XE_VM_BIND_OP_MAP_USERPTR);
|
verifyXeOperationBindName("MAP_USERPTR", DRM_XE_VM_BIND_OP_MAP_USERPTR);
|
||||||
verifyXeOperationBindName("UNMAP ALL", XE_VM_BIND_OP_UNMAP_ALL);
|
verifyXeOperationBindName("UNMAP ALL", DRM_XE_VM_BIND_OP_UNMAP_ALL);
|
||||||
verifyXeOperationBindName("PREFETCH", XE_VM_BIND_OP_PREFETCH);
|
verifyXeOperationBindName("PREFETCH", DRM_XE_VM_BIND_OP_PREFETCH);
|
||||||
verifyXeOperationBindName("Unknown operation", -1);
|
verifyXeOperationBindName("Unknown operation", -1);
|
||||||
|
|
||||||
verifyXeFlagsBindName("READ_ONLY", XE_VM_BIND_FLAG_READONLY);
|
verifyXeFlagsBindName("READ_ONLY", DRM_XE_VM_BIND_FLAG_READONLY);
|
||||||
verifyXeFlagsBindName("ASYNC", XE_VM_BIND_FLAG_ASYNC);
|
verifyXeFlagsBindName("ASYNC", DRM_XE_VM_BIND_FLAG_ASYNC);
|
||||||
verifyXeFlagsBindName("IMMEDIATE", XE_VM_BIND_FLAG_IMMEDIATE);
|
verifyXeFlagsBindName("IMMEDIATE", DRM_XE_VM_BIND_FLAG_IMMEDIATE);
|
||||||
verifyXeFlagsBindName("NULL", XE_VM_BIND_FLAG_NULL);
|
verifyXeFlagsBindName("NULL", DRM_XE_VM_BIND_FLAG_NULL);
|
||||||
verifyXeFlagsBindName("Unknown flag", -1);
|
verifyXeFlagsBindName("Unknown flag", -1);
|
||||||
|
|
||||||
verifyXeEngineClassName("DRM_XE_ENGINE_CLASS_RENDER", DRM_XE_ENGINE_CLASS_RENDER);
|
verifyXeEngineClassName("DRM_XE_ENGINE_CLASS_RENDER", DRM_XE_ENGINE_CLASS_RENDER);
|
||||||
@@ -539,17 +539,17 @@ TEST(IoctlHelperXeTest, whenCallingIoctlThenProperValueIsReturned) {
|
|||||||
{
|
{
|
||||||
GemVmControl test = {};
|
GemVmControl test = {};
|
||||||
drm.pageFaultSupported = false;
|
drm.pageFaultSupported = false;
|
||||||
uint32_t expectedVmCreateFlags = DRM_XE_VM_CREATE_ASYNC_DEFAULT |
|
uint32_t expectedVmCreateFlags = DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
|
||||||
DRM_XE_VM_CREATE_COMPUTE_MODE;
|
DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE;
|
||||||
ret = mockXeIoctlHelper->ioctl(DrmIoctl::GemVmCreate, &test);
|
ret = mockXeIoctlHelper->ioctl(DrmIoctl::GemVmCreate, &test);
|
||||||
EXPECT_EQ(0, ret);
|
EXPECT_EQ(0, ret);
|
||||||
EXPECT_EQ(static_cast<int>(test.vmId), testValueVmId);
|
EXPECT_EQ(static_cast<int>(test.vmId), testValueVmId);
|
||||||
EXPECT_EQ(test.flags, expectedVmCreateFlags);
|
EXPECT_EQ(test.flags, expectedVmCreateFlags);
|
||||||
|
|
||||||
drm.pageFaultSupported = true;
|
drm.pageFaultSupported = true;
|
||||||
expectedVmCreateFlags = DRM_XE_VM_CREATE_ASYNC_DEFAULT |
|
expectedVmCreateFlags = DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
|
||||||
DRM_XE_VM_CREATE_COMPUTE_MODE |
|
DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE |
|
||||||
DRM_XE_VM_CREATE_FAULT_MODE;
|
DRM_XE_VM_CREATE_FLAG_FAULT_MODE;
|
||||||
ret = mockXeIoctlHelper->ioctl(DrmIoctl::GemVmCreate, &test);
|
ret = mockXeIoctlHelper->ioctl(DrmIoctl::GemVmCreate, &test);
|
||||||
EXPECT_EQ(0, ret);
|
EXPECT_EQ(0, ret);
|
||||||
EXPECT_EQ(static_cast<int>(test.vmId), testValueVmId);
|
EXPECT_EQ(static_cast<int>(test.vmId), testValueVmId);
|
||||||
@@ -686,9 +686,9 @@ TEST(IoctlHelperXeTest, givenGeomDssWhenGetTopologyDataAndMapThenResultsAreCorre
|
|||||||
|
|
||||||
uint16_t tileId = 0;
|
uint16_t tileId = 0;
|
||||||
for (auto gtId = 0u; gtId < 3u; gtId++) {
|
for (auto gtId = 0u; gtId < 3u; gtId++) {
|
||||||
drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_GEOMETRY, 8, {0b11'1111, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0b11'1111, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_COMPUTE, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(gtId, XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0b1111'1111, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0b1111'1111, 0, 0, 0, 0, 0, 0});
|
||||||
}
|
}
|
||||||
DrmQueryTopologyData topologyData{};
|
DrmQueryTopologyData topologyData{};
|
||||||
TopologyMap topologyMap{};
|
TopologyMap topologyMap{};
|
||||||
@@ -733,9 +733,9 @@ TEST(IoctlHelperXeTest, givenComputeDssWhenGetTopologyDataAndMapThenResultsAreCo
|
|||||||
|
|
||||||
uint16_t tileId = 0;
|
uint16_t tileId = 0;
|
||||||
for (auto gtId = 0u; gtId < 3u; gtId++) {
|
for (auto gtId = 0u; gtId < 3u; gtId++) {
|
||||||
drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(gtId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
||||||
drm.addMockedQueryTopologyData(gtId, XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(gtId, DRM_XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0});
|
||||||
}
|
}
|
||||||
|
|
||||||
DrmQueryTopologyData topologyData{};
|
DrmQueryTopologyData topologyData{};
|
||||||
@@ -784,20 +784,20 @@ TEST(IoctlHelperXeTest, givenOnlyMediaTypeWhenGetTopologyDataAndMapThenSubsliceI
|
|||||||
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(drm.queryGtList.begin());
|
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(drm.queryGtList.begin());
|
||||||
xeQueryGtList->num_gt = 1;
|
xeQueryGtList->num_gt = 1;
|
||||||
xeQueryGtList->gt_list[0] = {
|
xeQueryGtList->gt_list[0] = {
|
||||||
XE_QUERY_GT_TYPE_MEDIA, // type
|
DRM_XE_QUERY_GT_TYPE_MEDIA, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b100, // native mem regions
|
0b100, // native mem regions
|
||||||
0x011, // slow mem regions
|
0x011, // slow mem regions
|
||||||
};
|
};
|
||||||
|
|
||||||
auto &hwInfo = *executionEnvironment->rootDeviceEnvironments[0]->getHardwareInfo();
|
auto &hwInfo = *executionEnvironment->rootDeviceEnvironments[0]->getHardwareInfo();
|
||||||
auto xeIoctlHelper = std::make_unique<MockIoctlHelperXe>(drm);
|
auto xeIoctlHelper = std::make_unique<MockIoctlHelperXe>(drm);
|
||||||
|
|
||||||
uint16_t tileId = 0;
|
uint16_t tileId = 0;
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 8, {0b1111'1111, 0, 0, 0, 0, 0, 0, 0});
|
||||||
|
|
||||||
DrmQueryTopologyData topologyData{};
|
DrmQueryTopologyData topologyData{};
|
||||||
TopologyMap topologyMap{};
|
TopologyMap topologyMap{};
|
||||||
@@ -829,40 +829,40 @@ TEST(IoctlHelperXeTest, givenMainAndMediaTypesWhenGetTopologyDataAndMapThenResul
|
|||||||
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(drm.queryGtList.begin());
|
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(drm.queryGtList.begin());
|
||||||
xeQueryGtList->num_gt = 4;
|
xeQueryGtList->num_gt = 4;
|
||||||
xeQueryGtList->gt_list[0] = {
|
xeQueryGtList->gt_list[0] = {
|
||||||
XE_QUERY_GT_TYPE_MAIN, // type
|
DRM_XE_QUERY_GT_TYPE_MAIN, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b100, // native mem regions
|
0b100, // native mem regions
|
||||||
0x011, // slow mem regions
|
0x011, // slow mem regions
|
||||||
};
|
};
|
||||||
xeQueryGtList->gt_list[1] = {
|
xeQueryGtList->gt_list[1] = {
|
||||||
XE_QUERY_GT_TYPE_MEDIA, // type
|
DRM_XE_QUERY_GT_TYPE_MEDIA, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b100, // native mem regions
|
0b100, // native mem regions
|
||||||
0x011, // slow mem regions
|
0x011, // slow mem regions
|
||||||
};
|
};
|
||||||
xeQueryGtList->gt_list[2] = {
|
xeQueryGtList->gt_list[2] = {
|
||||||
XE_QUERY_GT_TYPE_MAIN, // type
|
DRM_XE_QUERY_GT_TYPE_MAIN, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b010, // native mem regions
|
0b010, // native mem regions
|
||||||
0x101, // slow mem regions
|
0x101, // slow mem regions
|
||||||
};
|
};
|
||||||
xeQueryGtList->gt_list[3] = {
|
xeQueryGtList->gt_list[3] = {
|
||||||
XE_QUERY_GT_TYPE_MEDIA, // type
|
DRM_XE_QUERY_GT_TYPE_MEDIA, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b001, // native mem regions
|
0b001, // native mem regions
|
||||||
0x100, // slow mem regions
|
0x100, // slow mem regions
|
||||||
};
|
};
|
||||||
|
|
||||||
auto &hwInfo = *executionEnvironment->rootDeviceEnvironments[0]->getHardwareInfo();
|
auto &hwInfo = *executionEnvironment->rootDeviceEnvironments[0]->getHardwareInfo();
|
||||||
auto xeIoctlHelper = std::make_unique<MockIoctlHelperXe>(drm);
|
auto xeIoctlHelper = std::make_unique<MockIoctlHelperXe>(drm);
|
||||||
for (auto tileId = 0; tileId < 4; tileId++) {
|
for (auto tileId = 0; tileId < 4; tileId++) {
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
||||||
}
|
}
|
||||||
|
|
||||||
DrmQueryTopologyData topologyData{};
|
DrmQueryTopologyData topologyData{};
|
||||||
@@ -890,10 +890,10 @@ TEST(IoctlHelperXeTest, givenMainAndMediaTypesWhenGetTopologyDataAndMapThenResul
|
|||||||
|
|
||||||
struct DrmMockXe2T : public DrmMockXe {
|
struct DrmMockXe2T : public DrmMockXe {
|
||||||
DrmMockXe2T(RootDeviceEnvironment &rootDeviceEnvironment) : DrmMockXe(rootDeviceEnvironment) {
|
DrmMockXe2T(RootDeviceEnvironment &rootDeviceEnvironment) : DrmMockXe(rootDeviceEnvironment) {
|
||||||
auto xeQueryMemUsage = reinterpret_cast<drm_xe_query_mem_usage *>(queryMemUsage);
|
auto xeQueryMemUsage = reinterpret_cast<drm_xe_query_mem_regions *>(queryMemUsage);
|
||||||
xeQueryMemUsage->num_regions = 3;
|
xeQueryMemUsage->num_regions = 3;
|
||||||
xeQueryMemUsage->regions[0] = {
|
xeQueryMemUsage->regions[0] = {
|
||||||
XE_MEM_REGION_CLASS_VRAM, // class
|
DRM_XE_MEM_REGION_CLASS_VRAM, // class
|
||||||
1, // instance
|
1, // instance
|
||||||
0, // padding
|
0, // padding
|
||||||
MemoryConstants::pageSize, // min page size
|
MemoryConstants::pageSize, // min page size
|
||||||
@@ -901,15 +901,15 @@ struct DrmMockXe2T : public DrmMockXe {
|
|||||||
MemoryConstants::megaByte // used size
|
MemoryConstants::megaByte // used size
|
||||||
};
|
};
|
||||||
xeQueryMemUsage->regions[1] = {
|
xeQueryMemUsage->regions[1] = {
|
||||||
XE_MEM_REGION_CLASS_SYSMEM, // class
|
DRM_XE_MEM_REGION_CLASS_SYSMEM, // class
|
||||||
0, // instance
|
0, // instance
|
||||||
0, // padding
|
0, // padding
|
||||||
MemoryConstants::pageSize, // min page size
|
MemoryConstants::pageSize, // min page size
|
||||||
MemoryConstants::gigaByte, // total size
|
MemoryConstants::gigaByte, // total size
|
||||||
MemoryConstants::kiloByte // used size
|
MemoryConstants::kiloByte // used size
|
||||||
};
|
};
|
||||||
xeQueryMemUsage->regions[2] = {
|
xeQueryMemUsage->regions[2] = {
|
||||||
XE_MEM_REGION_CLASS_VRAM, // class
|
DRM_XE_MEM_REGION_CLASS_VRAM, // class
|
||||||
2, // instance
|
2, // instance
|
||||||
0, // padding
|
0, // padding
|
||||||
MemoryConstants::pageSize, // min page size
|
MemoryConstants::pageSize, // min page size
|
||||||
@@ -920,18 +920,18 @@ struct DrmMockXe2T : public DrmMockXe {
|
|||||||
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(queryGtList.begin());
|
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(queryGtList.begin());
|
||||||
xeQueryGtList->num_gt = 2;
|
xeQueryGtList->num_gt = 2;
|
||||||
xeQueryGtList->gt_list[0] = {
|
xeQueryGtList->gt_list[0] = {
|
||||||
XE_QUERY_GT_TYPE_MAIN, // type
|
DRM_XE_QUERY_GT_TYPE_MAIN, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b100, // native mem regions
|
0b100, // native mem regions
|
||||||
0x011, // slow mem regions
|
0x011, // slow mem regions
|
||||||
};
|
};
|
||||||
xeQueryGtList->gt_list[1] = {
|
xeQueryGtList->gt_list[1] = {
|
||||||
XE_QUERY_GT_TYPE_MAIN, // type
|
DRM_XE_QUERY_GT_TYPE_MAIN, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b010, // native mem regions
|
0b010, // native mem regions
|
||||||
0x101, // slow mem regions
|
0x101, // slow mem regions
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -945,9 +945,9 @@ TEST(IoctlHelperXeTest, given2TileAndComputeDssWhenGetTopologyDataAndMapThenResu
|
|||||||
|
|
||||||
// symetric tiles
|
// symetric tiles
|
||||||
for (uint16_t tileId = 0; tileId < 2u; tileId++) {
|
for (uint16_t tileId = 0; tileId < 2u; tileId++) {
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
||||||
}
|
}
|
||||||
|
|
||||||
DrmQueryTopologyData topologyData{};
|
DrmQueryTopologyData topologyData{};
|
||||||
@@ -1000,14 +1000,14 @@ TEST(IoctlHelperXeTest, given2TileWithDisabledDssOn1TileAndComputeDssWhenGetTopo
|
|||||||
|
|
||||||
// half dss disabled on tile 0
|
// half dss disabled on tile 0
|
||||||
uint16_t tileId = 0;
|
uint16_t tileId = 0;
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
||||||
|
|
||||||
tileId = 1;
|
tileId = 1;
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
||||||
|
|
||||||
DrmQueryTopologyData topologyData{};
|
DrmQueryTopologyData topologyData{};
|
||||||
TopologyMap topologyMap{};
|
TopologyMap topologyMap{};
|
||||||
@@ -1068,9 +1068,9 @@ TEST(IoctlHelperXeTest, given2TileWithDisabledEvenDssAndComputeDssWhenGetTopolog
|
|||||||
// even dss disabled
|
// even dss disabled
|
||||||
uint8_t data = 0b1010'1010;
|
uint8_t data = 0b1010'1010;
|
||||||
|
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_GEOMETRY, 8, {0, 0, 0, 0, 0, 0, 0, 0});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_DSS_COMPUTE, 8, {data, data, data, data, data, data, data, data});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_DSS_COMPUTE, 8, {data, data, data, data, data, data, data, data});
|
||||||
drm.addMockedQueryTopologyData(tileId, XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
drm.addMockedQueryTopologyData(tileId, DRM_XE_TOPO_EU_PER_DSS, 4, {0b1111'1111, 0, 0, 0});
|
||||||
}
|
}
|
||||||
|
|
||||||
DrmQueryTopologyData topologyData{};
|
DrmQueryTopologyData topologyData{};
|
||||||
@@ -1261,17 +1261,17 @@ TEST(IoctlHelperXeTest, whenCreatingMemoryInfoThenProperMemoryBanksAreDiscovered
|
|||||||
EXPECT_NE(nullptr, memoryInfo);
|
EXPECT_NE(nullptr, memoryInfo);
|
||||||
|
|
||||||
auto memoryClassInstance0 = memoryInfo->getMemoryRegionClassAndInstance(0, *defaultHwInfo);
|
auto memoryClassInstance0 = memoryInfo->getMemoryRegionClassAndInstance(0, *defaultHwInfo);
|
||||||
EXPECT_EQ(static_cast<uint16_t>(XE_MEM_REGION_CLASS_SYSMEM), memoryClassInstance0.memoryClass);
|
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_MEM_REGION_CLASS_SYSMEM), memoryClassInstance0.memoryClass);
|
||||||
EXPECT_EQ(0u, memoryClassInstance0.memoryInstance);
|
EXPECT_EQ(0u, memoryClassInstance0.memoryInstance);
|
||||||
EXPECT_EQ(MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0));
|
EXPECT_EQ(MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0));
|
||||||
|
|
||||||
auto memoryClassInstance1 = memoryInfo->getMemoryRegionClassAndInstance(0b01, *defaultHwInfo);
|
auto memoryClassInstance1 = memoryInfo->getMemoryRegionClassAndInstance(0b01, *defaultHwInfo);
|
||||||
EXPECT_EQ(static_cast<uint16_t>(XE_MEM_REGION_CLASS_VRAM), memoryClassInstance1.memoryClass);
|
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_MEM_REGION_CLASS_VRAM), memoryClassInstance1.memoryClass);
|
||||||
EXPECT_EQ(2u, memoryClassInstance1.memoryInstance);
|
EXPECT_EQ(2u, memoryClassInstance1.memoryInstance);
|
||||||
EXPECT_EQ(4 * MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0b01));
|
EXPECT_EQ(4 * MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0b01));
|
||||||
|
|
||||||
auto memoryClassInstance2 = memoryInfo->getMemoryRegionClassAndInstance(0b10, *defaultHwInfo);
|
auto memoryClassInstance2 = memoryInfo->getMemoryRegionClassAndInstance(0b10, *defaultHwInfo);
|
||||||
EXPECT_EQ(static_cast<uint16_t>(XE_MEM_REGION_CLASS_VRAM), memoryClassInstance2.memoryClass);
|
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_MEM_REGION_CLASS_VRAM), memoryClassInstance2.memoryClass);
|
||||||
EXPECT_EQ(1u, memoryClassInstance2.memoryInstance);
|
EXPECT_EQ(1u, memoryClassInstance2.memoryInstance);
|
||||||
EXPECT_EQ(2 * MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0b10));
|
EXPECT_EQ(2 * MemoryConstants::gigaByte, memoryInfo->getMemoryRegionSize(0b10));
|
||||||
|
|
||||||
@@ -1308,7 +1308,7 @@ TEST(IoctlHelperXeTest, givenNoMemoryRegionsWhenCreatingMemoryInfoThenMemoryInfo
|
|||||||
DrmMockXe drm{*executionEnvironment->rootDeviceEnvironments[0]};
|
DrmMockXe drm{*executionEnvironment->rootDeviceEnvironments[0]};
|
||||||
auto xeIoctlHelper = std::make_unique<MockIoctlHelperXe>(drm);
|
auto xeIoctlHelper = std::make_unique<MockIoctlHelperXe>(drm);
|
||||||
|
|
||||||
auto xeQueryMemUsage = reinterpret_cast<drm_xe_query_mem_usage *>(drm.queryMemUsage);
|
auto xeQueryMemUsage = reinterpret_cast<drm_xe_query_mem_regions *>(drm.queryMemUsage);
|
||||||
xeQueryMemUsage->num_regions = 0u;
|
xeQueryMemUsage->num_regions = 0u;
|
||||||
auto memoryInfo = xeIoctlHelper->createMemoryInfo();
|
auto memoryInfo = xeIoctlHelper->createMemoryInfo();
|
||||||
EXPECT_EQ(nullptr, memoryInfo);
|
EXPECT_EQ(nullptr, memoryInfo);
|
||||||
@@ -1402,10 +1402,10 @@ TEST(IoctlHelperXeTest, whenCallingVmBindThenWaitUserFenceIsCalled) {
|
|||||||
auto &waitUserFence = drm.waitUserFenceInputs[0];
|
auto &waitUserFence = drm.waitUserFenceInputs[0];
|
||||||
|
|
||||||
EXPECT_EQ(fenceAddress, waitUserFence.addr);
|
EXPECT_EQ(fenceAddress, waitUserFence.addr);
|
||||||
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_EQ), waitUserFence.op);
|
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_OP_EQ), waitUserFence.op);
|
||||||
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_SOFT_OP), waitUserFence.flags);
|
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP), waitUserFence.flags);
|
||||||
EXPECT_EQ(fenceValue, waitUserFence.value);
|
EXPECT_EQ(fenceValue, waitUserFence.value);
|
||||||
EXPECT_EQ(static_cast<uint64_t>(DRM_XE_UFENCE_WAIT_U64), waitUserFence.mask);
|
EXPECT_EQ(static_cast<uint64_t>(DRM_XE_UFENCE_WAIT_MASK_U64), waitUserFence.mask);
|
||||||
EXPECT_EQ(static_cast<int64_t>(XE_ONE_SEC), waitUserFence.timeout);
|
EXPECT_EQ(static_cast<int64_t>(XE_ONE_SEC), waitUserFence.timeout);
|
||||||
EXPECT_EQ(0u, waitUserFence.num_engines);
|
EXPECT_EQ(0u, waitUserFence.num_engines);
|
||||||
EXPECT_EQ(0u, waitUserFence.instances);
|
EXPECT_EQ(0u, waitUserFence.instances);
|
||||||
@@ -1426,10 +1426,10 @@ TEST(IoctlHelperXeTest, whenCallingVmBindThenWaitUserFenceIsCalled) {
|
|||||||
auto &waitUserFence = drm.waitUserFenceInputs[0];
|
auto &waitUserFence = drm.waitUserFenceInputs[0];
|
||||||
|
|
||||||
EXPECT_EQ(fenceAddress, waitUserFence.addr);
|
EXPECT_EQ(fenceAddress, waitUserFence.addr);
|
||||||
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_EQ), waitUserFence.op);
|
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_OP_EQ), waitUserFence.op);
|
||||||
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_SOFT_OP), waitUserFence.flags);
|
EXPECT_EQ(static_cast<uint16_t>(DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP), waitUserFence.flags);
|
||||||
EXPECT_EQ(fenceValue, waitUserFence.value);
|
EXPECT_EQ(fenceValue, waitUserFence.value);
|
||||||
EXPECT_EQ(static_cast<uint64_t>(DRM_XE_UFENCE_WAIT_U64), waitUserFence.mask);
|
EXPECT_EQ(static_cast<uint64_t>(DRM_XE_UFENCE_WAIT_MASK_U64), waitUserFence.mask);
|
||||||
EXPECT_EQ(static_cast<int64_t>(XE_ONE_SEC), waitUserFence.timeout);
|
EXPECT_EQ(static_cast<int64_t>(XE_ONE_SEC), waitUserFence.timeout);
|
||||||
EXPECT_EQ(0u, waitUserFence.num_engines);
|
EXPECT_EQ(0u, waitUserFence.num_engines);
|
||||||
EXPECT_EQ(0u, waitUserFence.instances);
|
EXPECT_EQ(0u, waitUserFence.instances);
|
||||||
|
|||||||
@@ -47,10 +47,10 @@ inline constexpr uint32_t testValueGemCreate = 0x8273;
|
|||||||
class DrmMockXe : public DrmMockCustom {
|
class DrmMockXe : public DrmMockCustom {
|
||||||
public:
|
public:
|
||||||
DrmMockXe(RootDeviceEnvironment &rootDeviceEnvironment) : DrmMockCustom(rootDeviceEnvironment) {
|
DrmMockXe(RootDeviceEnvironment &rootDeviceEnvironment) : DrmMockCustom(rootDeviceEnvironment) {
|
||||||
auto xeQueryMemUsage = reinterpret_cast<drm_xe_query_mem_usage *>(queryMemUsage);
|
auto xeQueryMemUsage = reinterpret_cast<drm_xe_query_mem_regions *>(queryMemUsage);
|
||||||
xeQueryMemUsage->num_regions = 3;
|
xeQueryMemUsage->num_regions = 3;
|
||||||
xeQueryMemUsage->regions[0] = {
|
xeQueryMemUsage->regions[0] = {
|
||||||
XE_MEM_REGION_CLASS_VRAM, // class
|
DRM_XE_MEM_REGION_CLASS_VRAM, // class
|
||||||
1, // instance
|
1, // instance
|
||||||
0, // padding
|
0, // padding
|
||||||
MemoryConstants::pageSize, // min page size
|
MemoryConstants::pageSize, // min page size
|
||||||
@@ -58,15 +58,15 @@ class DrmMockXe : public DrmMockCustom {
|
|||||||
MemoryConstants::megaByte // used size
|
MemoryConstants::megaByte // used size
|
||||||
};
|
};
|
||||||
xeQueryMemUsage->regions[1] = {
|
xeQueryMemUsage->regions[1] = {
|
||||||
XE_MEM_REGION_CLASS_SYSMEM, // class
|
DRM_XE_MEM_REGION_CLASS_SYSMEM, // class
|
||||||
0, // instance
|
0, // instance
|
||||||
0, // padding
|
0, // padding
|
||||||
MemoryConstants::pageSize, // min page size
|
MemoryConstants::pageSize, // min page size
|
||||||
MemoryConstants::gigaByte, // total size
|
MemoryConstants::gigaByte, // total size
|
||||||
MemoryConstants::kiloByte // used size
|
MemoryConstants::kiloByte // used size
|
||||||
};
|
};
|
||||||
xeQueryMemUsage->regions[2] = {
|
xeQueryMemUsage->regions[2] = {
|
||||||
XE_MEM_REGION_CLASS_VRAM, // class
|
DRM_XE_MEM_REGION_CLASS_VRAM, // class
|
||||||
2, // instance
|
2, // instance
|
||||||
0, // padding
|
0, // padding
|
||||||
MemoryConstants::pageSize, // min page size
|
MemoryConstants::pageSize, // min page size
|
||||||
@@ -77,25 +77,25 @@ class DrmMockXe : public DrmMockCustom {
|
|||||||
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(queryGtList.begin());
|
auto xeQueryGtList = reinterpret_cast<drm_xe_query_gt_list *>(queryGtList.begin());
|
||||||
xeQueryGtList->num_gt = 3;
|
xeQueryGtList->num_gt = 3;
|
||||||
xeQueryGtList->gt_list[0] = {
|
xeQueryGtList->gt_list[0] = {
|
||||||
XE_QUERY_GT_TYPE_MAIN, // type
|
DRM_XE_QUERY_GT_TYPE_MAIN, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock_freq
|
12500000, // clock_freq
|
||||||
0b100, // native mem regions
|
0b100, // native mem regions
|
||||||
0x011, // slow mem regions
|
0x011, // slow mem regions
|
||||||
};
|
};
|
||||||
xeQueryGtList->gt_list[1] = {
|
xeQueryGtList->gt_list[1] = {
|
||||||
XE_QUERY_GT_TYPE_MEDIA, // type
|
DRM_XE_QUERY_GT_TYPE_MEDIA, // type
|
||||||
1, // gt_id
|
1, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b001, // native mem regions
|
0b001, // native mem regions
|
||||||
0x110, // slow mem regions
|
0x110, // slow mem regions
|
||||||
};
|
};
|
||||||
xeQueryGtList->gt_list[2] = {
|
xeQueryGtList->gt_list[2] = {
|
||||||
XE_QUERY_GT_TYPE_MAIN, // type
|
DRM_XE_QUERY_GT_TYPE_MAIN, // type
|
||||||
0, // gt_id
|
0, // gt_id
|
||||||
12500000, // clock freq
|
12500000, // clock freq
|
||||||
0b010, // native mem regions
|
0b010, // native mem regions
|
||||||
0x101, // slow mem regions
|
0x101, // slow mem regions
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,7 +169,7 @@ class DrmMockXe : public DrmMockCustom {
|
|||||||
}
|
}
|
||||||
deviceQuery->size = sizeof(queryEngines);
|
deviceQuery->size = sizeof(queryEngines);
|
||||||
break;
|
break;
|
||||||
case DRM_XE_DEVICE_QUERY_MEM_USAGE:
|
case DRM_XE_DEVICE_QUERY_MEM_REGIONS:
|
||||||
if (deviceQuery->data) {
|
if (deviceQuery->data) {
|
||||||
memcpy_s(reinterpret_cast<void *>(deviceQuery->data), deviceQuery->size, queryMemUsage, sizeof(queryMemUsage));
|
memcpy_s(reinterpret_cast<void *>(deviceQuery->data), deviceQuery->size, queryMemUsage, sizeof(queryMemUsage));
|
||||||
}
|
}
|
||||||
|
|||||||
206
third_party/uapi/drm/xe_drm.h
vendored
206
third_party/uapi/drm/xe_drm.h
vendored
@@ -19,12 +19,12 @@ extern "C" {
|
|||||||
/**
|
/**
|
||||||
* DOC: uevent generated by xe on it's pci node.
|
* DOC: uevent generated by xe on it's pci node.
|
||||||
*
|
*
|
||||||
* XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
|
* DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
|
||||||
* fails. The value supplied with the event is always "NEEDS_RESET".
|
* fails. The value supplied with the event is always "NEEDS_RESET".
|
||||||
* Additional information supplied is tile id and gt id of the gt unit for
|
* Additional information supplied is tile id and gt id of the gt unit for
|
||||||
* which reset has failed.
|
* which reset has failed.
|
||||||
*/
|
*/
|
||||||
#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
|
#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct xe_user_extension - Base class for defining a chain of extensions
|
* struct xe_user_extension - Base class for defining a chain of extensions
|
||||||
@@ -141,21 +141,22 @@ struct drm_xe_engine_class_instance {
|
|||||||
|
|
||||||
__u16 engine_instance;
|
__u16 engine_instance;
|
||||||
__u16 gt_id;
|
__u16 gt_id;
|
||||||
__u16 rsvd;
|
/** @pad: MBZ */
|
||||||
|
__u16 pad;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* enum drm_xe_memory_class - Supported memory classes.
|
* enum drm_xe_memory_class - Supported memory classes.
|
||||||
*/
|
*/
|
||||||
enum drm_xe_memory_class {
|
enum drm_xe_memory_class {
|
||||||
/** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
|
/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
|
||||||
XE_MEM_REGION_CLASS_SYSMEM = 0,
|
DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
|
||||||
/**
|
/**
|
||||||
* @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
|
* @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
|
||||||
* represents the memory that is local to the device, which we
|
* represents the memory that is local to the device, which we
|
||||||
* call VRAM. Not valid on integrated platforms.
|
* call VRAM. Not valid on integrated platforms.
|
||||||
*/
|
*/
|
||||||
XE_MEM_REGION_CLASS_VRAM
|
DRM_XE_MEM_REGION_CLASS_VRAM
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -215,7 +216,7 @@ struct drm_xe_query_mem_region {
|
|||||||
* always equal the @total_size, since all of it will be CPU
|
* always equal the @total_size, since all of it will be CPU
|
||||||
* accessible.
|
* accessible.
|
||||||
*
|
*
|
||||||
* Note this is only tracked for XE_MEM_REGION_CLASS_VRAM
|
* Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
|
||||||
* regions (for other types the value here will always equal
|
* regions (for other types the value here will always equal
|
||||||
* zero).
|
* zero).
|
||||||
*/
|
*/
|
||||||
@@ -227,7 +228,7 @@ struct drm_xe_query_mem_region {
|
|||||||
* Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
|
* Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
|
||||||
* accounting. Without this the value here will always equal
|
* accounting. Without this the value here will always equal
|
||||||
* zero. Note this is only currently tracked for
|
* zero. Note this is only currently tracked for
|
||||||
* XE_MEM_REGION_CLASS_VRAM regions (for other types the value
|
* DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
|
||||||
* here will always be zero).
|
* here will always be zero).
|
||||||
*/
|
*/
|
||||||
__u64 cpu_visible_used;
|
__u64 cpu_visible_used;
|
||||||
@@ -290,13 +291,13 @@ struct drm_xe_query_engine_cycles {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct drm_xe_query_mem_usage - describe memory regions and usage
|
* struct drm_xe_query_mem_regions - describe memory regions
|
||||||
*
|
*
|
||||||
* If a query is made with a struct drm_xe_device_query where .query
|
* If a query is made with a struct drm_xe_device_query where .query
|
||||||
* is equal to DRM_XE_DEVICE_QUERY_MEM_USAGE, then the reply uses
|
* is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
|
||||||
* struct drm_xe_query_mem_usage in .data.
|
* struct drm_xe_query_mem_regions in .data.
|
||||||
*/
|
*/
|
||||||
struct drm_xe_query_mem_usage {
|
struct drm_xe_query_mem_regions {
|
||||||
/** @num_regions: number of memory regions returned in @regions */
|
/** @num_regions: number of memory regions returned in @regions */
|
||||||
__u32 num_regions;
|
__u32 num_regions;
|
||||||
/** @pad: MBZ */
|
/** @pad: MBZ */
|
||||||
@@ -320,12 +321,12 @@ struct drm_xe_query_config {
|
|||||||
/** @pad: MBZ */
|
/** @pad: MBZ */
|
||||||
__u32 pad;
|
__u32 pad;
|
||||||
|
|
||||||
#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
|
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
|
||||||
#define XE_QUERY_CONFIG_FLAGS 1
|
#define DRM_XE_QUERY_CONFIG_FLAGS 1
|
||||||
#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0)
|
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
|
||||||
#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2
|
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
|
||||||
#define XE_QUERY_CONFIG_VA_BITS 3
|
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
|
||||||
#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
|
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
|
||||||
/** @info: array of elements containing the config info */
|
/** @info: array of elements containing the config info */
|
||||||
__u64 info[];
|
__u64 info[];
|
||||||
};
|
};
|
||||||
@@ -339,8 +340,8 @@ struct drm_xe_query_config {
|
|||||||
* implementing graphics and/or media operations.
|
* implementing graphics and/or media operations.
|
||||||
*/
|
*/
|
||||||
struct drm_xe_query_gt {
|
struct drm_xe_query_gt {
|
||||||
#define XE_QUERY_GT_TYPE_MAIN 0
|
#define DRM_XE_QUERY_GT_TYPE_MAIN 0
|
||||||
#define XE_QUERY_GT_TYPE_MEDIA 1
|
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
|
||||||
/** @type: GT type: Main or Media */
|
/** @type: GT type: Main or Media */
|
||||||
__u16 type;
|
__u16 type;
|
||||||
/** @gt_id: Unique ID of this GT within the PCI Device */
|
/** @gt_id: Unique ID of this GT within the PCI Device */
|
||||||
@@ -348,17 +349,19 @@ struct drm_xe_query_gt {
|
|||||||
/** @clock_freq: A clock frequency for timestamp */
|
/** @clock_freq: A clock frequency for timestamp */
|
||||||
__u32 clock_freq;
|
__u32 clock_freq;
|
||||||
/**
|
/**
|
||||||
* @native_mem_regions: Bit mask of instances from
|
* @near_mem_regions: Bit mask of instances from
|
||||||
* drm_xe_query_mem_usage that lives on the same GPU/Tile and have
|
* drm_xe_query_mem_regions that are nearest to the current engines
|
||||||
* direct access.
|
* of this GT.
|
||||||
*/
|
*/
|
||||||
__u64 native_mem_regions;
|
__u64 near_mem_regions;
|
||||||
/**
|
/**
|
||||||
* @slow_mem_regions: Bit mask of instances from
|
* @far_mem_regions: Bit mask of instances from
|
||||||
* drm_xe_query_mem_usage that this GT can indirectly access, although
|
* drm_xe_query_mem_regions that are far from the engines of this GT.
|
||||||
* they live on a different GPU/Tile.
|
* In general, they have extra indirections when compared to the
|
||||||
|
* @near_mem_regions. For a discrete device this could mean system
|
||||||
|
* memory and memory living in a different tile.
|
||||||
*/
|
*/
|
||||||
__u64 slow_mem_regions;
|
__u64 far_mem_regions;
|
||||||
/** @reserved: Reserved */
|
/** @reserved: Reserved */
|
||||||
__u64 reserved[8];
|
__u64 reserved[8];
|
||||||
};
|
};
|
||||||
@@ -400,7 +403,7 @@ struct drm_xe_query_topology_mask {
|
|||||||
* DSS_GEOMETRY ff ff ff ff 00 00 00 00
|
* DSS_GEOMETRY ff ff ff ff 00 00 00 00
|
||||||
* means 32 DSS are available for geometry.
|
* means 32 DSS are available for geometry.
|
||||||
*/
|
*/
|
||||||
#define XE_TOPO_DSS_GEOMETRY (1 << 0)
|
#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
|
||||||
/*
|
/*
|
||||||
* To query the mask of Dual Sub Slices (DSS) available for compute
|
* To query the mask of Dual Sub Slices (DSS) available for compute
|
||||||
* operations. For example a query response containing the following
|
* operations. For example a query response containing the following
|
||||||
@@ -408,7 +411,7 @@ struct drm_xe_query_topology_mask {
|
|||||||
* DSS_COMPUTE ff ff ff ff 00 00 00 00
|
* DSS_COMPUTE ff ff ff ff 00 00 00 00
|
||||||
* means 32 DSS are available for compute.
|
* means 32 DSS are available for compute.
|
||||||
*/
|
*/
|
||||||
#define XE_TOPO_DSS_COMPUTE (1 << 1)
|
#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
|
||||||
/*
|
/*
|
||||||
* To query the mask of Execution Units (EU) available per Dual Sub
|
* To query the mask of Execution Units (EU) available per Dual Sub
|
||||||
* Slices (DSS). For example a query response containing the following
|
* Slices (DSS). For example a query response containing the following
|
||||||
@@ -416,7 +419,7 @@ struct drm_xe_query_topology_mask {
|
|||||||
* EU_PER_DSS ff ff 00 00 00 00 00 00
|
* EU_PER_DSS ff ff 00 00 00 00 00 00
|
||||||
* means each DSS has 16 EU.
|
* means each DSS has 16 EU.
|
||||||
*/
|
*/
|
||||||
#define XE_TOPO_EU_PER_DSS (1 << 2)
|
#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
|
||||||
/** @type: type of mask */
|
/** @type: type of mask */
|
||||||
__u16 type;
|
__u16 type;
|
||||||
|
|
||||||
@@ -467,7 +470,7 @@ struct drm_xe_device_query {
|
|||||||
__u64 extensions;
|
__u64 extensions;
|
||||||
|
|
||||||
#define DRM_XE_DEVICE_QUERY_ENGINES 0
|
#define DRM_XE_DEVICE_QUERY_ENGINES 0
|
||||||
#define DRM_XE_DEVICE_QUERY_MEM_USAGE 1
|
#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
|
||||||
#define DRM_XE_DEVICE_QUERY_CONFIG 2
|
#define DRM_XE_DEVICE_QUERY_CONFIG 2
|
||||||
#define DRM_XE_DEVICE_QUERY_GT_LIST 3
|
#define DRM_XE_DEVICE_QUERY_GT_LIST 3
|
||||||
#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
|
#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
|
||||||
@@ -497,8 +500,8 @@ struct drm_xe_gem_create {
|
|||||||
*/
|
*/
|
||||||
__u64 size;
|
__u64 size;
|
||||||
|
|
||||||
#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
|
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
|
||||||
#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
|
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
|
||||||
/*
|
/*
|
||||||
* When using VRAM as a possible placement, ensure that the corresponding VRAM
|
* When using VRAM as a possible placement, ensure that the corresponding VRAM
|
||||||
* allocation will always use the CPU accessible part of VRAM. This is important
|
* allocation will always use the CPU accessible part of VRAM. This is important
|
||||||
@@ -514,7 +517,7 @@ struct drm_xe_gem_create {
|
|||||||
* display surfaces, therefore the kernel requires setting this flag for such
|
* display surfaces, therefore the kernel requires setting this flag for such
|
||||||
* objects, otherwise an error is thrown on small-bar systems.
|
* objects, otherwise an error is thrown on small-bar systems.
|
||||||
*/
|
*/
|
||||||
#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
|
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
|
||||||
/**
|
/**
|
||||||
* @flags: Flags, currently a mask of memory instances of where BO can
|
* @flags: Flags, currently a mask of memory instances of where BO can
|
||||||
* be placed
|
* be placed
|
||||||
@@ -581,14 +584,14 @@ struct drm_xe_ext_set_property {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct drm_xe_vm_create {
|
struct drm_xe_vm_create {
|
||||||
#define XE_VM_EXTENSION_SET_PROPERTY 0
|
#define DRM_XE_VM_EXTENSION_SET_PROPERTY 0
|
||||||
/** @extensions: Pointer to the first extension struct, if any */
|
/** @extensions: Pointer to the first extension struct, if any */
|
||||||
__u64 extensions;
|
__u64 extensions;
|
||||||
|
|
||||||
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
|
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
|
||||||
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
|
#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (1 << 1)
|
||||||
#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
|
#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (1 << 2)
|
||||||
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
|
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 3)
|
||||||
/** @flags: Flags */
|
/** @flags: Flags */
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
|
||||||
@@ -644,34 +647,38 @@ struct drm_xe_vm_bind_op {
|
|||||||
*/
|
*/
|
||||||
__u64 tile_mask;
|
__u64 tile_mask;
|
||||||
|
|
||||||
#define XE_VM_BIND_OP_MAP 0x0
|
#define DRM_XE_VM_BIND_OP_MAP 0x0
|
||||||
#define XE_VM_BIND_OP_UNMAP 0x1
|
#define DRM_XE_VM_BIND_OP_UNMAP 0x1
|
||||||
#define XE_VM_BIND_OP_MAP_USERPTR 0x2
|
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
|
||||||
#define XE_VM_BIND_OP_UNMAP_ALL 0x3
|
#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
|
||||||
#define XE_VM_BIND_OP_PREFETCH 0x4
|
#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
|
||||||
/** @op: Bind operation to perform */
|
/** @op: Bind operation to perform */
|
||||||
__u32 op;
|
__u32 op;
|
||||||
|
|
||||||
#define XE_VM_BIND_FLAG_READONLY (0x1 << 0)
|
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
|
||||||
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
|
#define DRM_XE_VM_BIND_FLAG_ASYNC (1 << 1)
|
||||||
/*
|
/*
|
||||||
* Valid on a faulting VM only, do the MAP operation immediately rather
|
* Valid on a faulting VM only, do the MAP operation immediately rather
|
||||||
* than deferring the MAP to the page fault handler.
|
* than deferring the MAP to the page fault handler.
|
||||||
*/
|
*/
|
||||||
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
|
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 2)
|
||||||
/*
|
/*
|
||||||
* When the NULL flag is set, the page tables are setup with a special
|
* When the NULL flag is set, the page tables are setup with a special
|
||||||
* bit which indicates writes are dropped and all reads return zero. In
|
* bit which indicates writes are dropped and all reads return zero. In
|
||||||
* the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
|
* the future, the NULL flags will only be valid for DRM_XE_VM_BIND_OP_MAP
|
||||||
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
|
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
|
||||||
* intended to implement VK sparse bindings.
|
* intended to implement VK sparse bindings.
|
||||||
*/
|
*/
|
||||||
#define XE_VM_BIND_FLAG_NULL (0x1 << 3)
|
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 3)
|
||||||
/** @flags: Bind flags */
|
/** @flags: Bind flags */
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
|
||||||
/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
|
/**
|
||||||
__u32 region;
|
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
|
||||||
|
* It is a region instance, not a mask.
|
||||||
|
* To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
|
||||||
|
*/
|
||||||
|
__u32 prefetch_mem_region_instance;
|
||||||
|
|
||||||
/** @reserved: Reserved */
|
/** @reserved: Reserved */
|
||||||
__u64 reserved[2];
|
__u64 reserved[2];
|
||||||
@@ -721,19 +728,19 @@ struct drm_xe_vm_bind {
|
|||||||
__u64 reserved[2];
|
__u64 reserved[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* For use with XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
|
/* For use with DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
|
||||||
|
|
||||||
/* Monitor 128KB contiguous region with 4K sub-granularity */
|
/* Monitor 128KB contiguous region with 4K sub-granularity */
|
||||||
#define XE_ACC_GRANULARITY_128K 0
|
#define DRM_XE_ACC_GRANULARITY_128K 0
|
||||||
|
|
||||||
/* Monitor 2MB contiguous region with 64KB sub-granularity */
|
/* Monitor 2MB contiguous region with 64KB sub-granularity */
|
||||||
#define XE_ACC_GRANULARITY_2M 1
|
#define DRM_XE_ACC_GRANULARITY_2M 1
|
||||||
|
|
||||||
/* Monitor 16MB contiguous region with 512KB sub-granularity */
|
/* Monitor 16MB contiguous region with 512KB sub-granularity */
|
||||||
#define XE_ACC_GRANULARITY_16M 2
|
#define DRM_XE_ACC_GRANULARITY_16M 2
|
||||||
|
|
||||||
/* Monitor 64MB contiguous region with 2M sub-granularity */
|
/* Monitor 64MB contiguous region with 2M sub-granularity */
|
||||||
#define XE_ACC_GRANULARITY_64M 3
|
#define DRM_XE_ACC_GRANULARITY_64M 3
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct drm_xe_exec_queue_set_property - exec queue set property
|
* struct drm_xe_exec_queue_set_property - exec queue set property
|
||||||
@@ -747,14 +754,14 @@ struct drm_xe_exec_queue_set_property {
|
|||||||
/** @exec_queue_id: Exec queue ID */
|
/** @exec_queue_id: Exec queue ID */
|
||||||
__u32 exec_queue_id;
|
__u32 exec_queue_id;
|
||||||
|
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
|
||||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
|
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
|
||||||
/** @property: property to set */
|
/** @property: property to set */
|
||||||
__u32 property;
|
__u32 property;
|
||||||
|
|
||||||
@@ -766,7 +773,7 @@ struct drm_xe_exec_queue_set_property {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct drm_xe_exec_queue_create {
|
struct drm_xe_exec_queue_create {
|
||||||
#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
|
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
|
||||||
/** @extensions: Pointer to the first extension struct, if any */
|
/** @extensions: Pointer to the first extension struct, if any */
|
||||||
__u64 extensions;
|
__u64 extensions;
|
||||||
|
|
||||||
@@ -805,7 +812,7 @@ struct drm_xe_exec_queue_get_property {
|
|||||||
/** @exec_queue_id: Exec queue ID */
|
/** @exec_queue_id: Exec queue ID */
|
||||||
__u32 exec_queue_id;
|
__u32 exec_queue_id;
|
||||||
|
|
||||||
#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
|
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
|
||||||
/** @property: property to get */
|
/** @property: property to get */
|
||||||
__u32 property;
|
__u32 property;
|
||||||
|
|
||||||
@@ -831,11 +838,11 @@ struct drm_xe_sync {
|
|||||||
/** @extensions: Pointer to the first extension struct, if any */
|
/** @extensions: Pointer to the first extension struct, if any */
|
||||||
__u64 extensions;
|
__u64 extensions;
|
||||||
|
|
||||||
#define DRM_XE_SYNC_SYNCOBJ 0x0
|
#define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0
|
||||||
#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1
|
#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1
|
||||||
#define DRM_XE_SYNC_DMA_BUF 0x2
|
#define DRM_XE_SYNC_FLAG_DMA_BUF 0x2
|
||||||
#define DRM_XE_SYNC_USER_FENCE 0x3
|
#define DRM_XE_SYNC_FLAG_USER_FENCE 0x3
|
||||||
#define DRM_XE_SYNC_SIGNAL 0x10
|
#define DRM_XE_SYNC_FLAG_SIGNAL 0x10
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
|
||||||
/** @pad: MBZ */
|
/** @pad: MBZ */
|
||||||
@@ -912,17 +919,17 @@ struct drm_xe_wait_user_fence {
|
|||||||
*/
|
*/
|
||||||
__u64 addr;
|
__u64 addr;
|
||||||
|
|
||||||
#define DRM_XE_UFENCE_WAIT_EQ 0
|
#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
|
||||||
#define DRM_XE_UFENCE_WAIT_NEQ 1
|
#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
|
||||||
#define DRM_XE_UFENCE_WAIT_GT 2
|
#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
|
||||||
#define DRM_XE_UFENCE_WAIT_GTE 3
|
#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
|
||||||
#define DRM_XE_UFENCE_WAIT_LT 4
|
#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
|
||||||
#define DRM_XE_UFENCE_WAIT_LTE 5
|
#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
|
||||||
/** @op: wait operation (type of comparison) */
|
/** @op: wait operation (type of comparison) */
|
||||||
__u16 op;
|
__u16 op;
|
||||||
|
|
||||||
#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
|
#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
|
||||||
#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1)
|
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
|
||||||
/** @flags: wait flags */
|
/** @flags: wait flags */
|
||||||
__u16 flags;
|
__u16 flags;
|
||||||
|
|
||||||
@@ -932,18 +939,19 @@ struct drm_xe_wait_user_fence {
|
|||||||
/** @value: compare value */
|
/** @value: compare value */
|
||||||
__u64 value;
|
__u64 value;
|
||||||
|
|
||||||
#define DRM_XE_UFENCE_WAIT_U8 0xffu
|
#define DRM_XE_UFENCE_WAIT_MASK_U8 0xffu
|
||||||
#define DRM_XE_UFENCE_WAIT_U16 0xffffu
|
#define DRM_XE_UFENCE_WAIT_MASK_U16 0xffffu
|
||||||
#define DRM_XE_UFENCE_WAIT_U32 0xffffffffu
|
#define DRM_XE_UFENCE_WAIT_MASK_U32 0xffffffffu
|
||||||
#define DRM_XE_UFENCE_WAIT_U64 0xffffffffffffffffu
|
#define DRM_XE_UFENCE_WAIT_MASK_U64 0xffffffffffffffffu
|
||||||
/** @mask: comparison mask */
|
/** @mask: comparison mask */
|
||||||
__u64 mask;
|
__u64 mask;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @timeout: how long to wait before bailing, value in nanoseconds.
|
* @timeout: how long to wait before bailing, value in nanoseconds.
|
||||||
* Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout)
|
* Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
|
||||||
* it contains timeout expressed in nanoseconds to wait (fence will
|
* it contains timeout expressed in nanoseconds to wait (fence will
|
||||||
* expire at now() + timeout).
|
* expire at now() + timeout).
|
||||||
* When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait
|
* When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
|
||||||
* will end at timeout (uses system MONOTONIC_CLOCK).
|
* will end at timeout (uses system MONOTONIC_CLOCK).
|
||||||
* Passing negative timeout leads to neverending wait.
|
* Passing negative timeout leads to neverending wait.
|
||||||
*
|
*
|
||||||
@@ -956,13 +964,13 @@ struct drm_xe_wait_user_fence {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* @num_engines: number of engine instances to wait on, must be zero
|
* @num_engines: number of engine instances to wait on, must be zero
|
||||||
* when DRM_XE_UFENCE_WAIT_SOFT_OP set
|
* when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
|
||||||
*/
|
*/
|
||||||
__u64 num_engines;
|
__u64 num_engines;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @instances: user pointer to array of drm_xe_engine_class_instance to
|
* @instances: user pointer to array of drm_xe_engine_class_instance to
|
||||||
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
|
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
|
||||||
*/
|
*/
|
||||||
__u64 instances;
|
__u64 instances;
|
||||||
|
|
||||||
@@ -973,11 +981,11 @@ struct drm_xe_wait_user_fence {
|
|||||||
/**
|
/**
|
||||||
* DOC: XE PMU event config IDs
|
* DOC: XE PMU event config IDs
|
||||||
*
|
*
|
||||||
* Check 'man perf_event_open' to use the ID's XE_PMU_XXXX listed in xe_drm.h
|
* Check 'man perf_event_open' to use the ID's DRM_XE_PMU_XXXX listed in xe_drm.h
|
||||||
* in 'struct perf_event_attr' as part of perf_event_open syscall to read a
|
* in 'struct perf_event_attr' as part of perf_event_open syscall to read a
|
||||||
* particular event.
|
* particular event.
|
||||||
*
|
*
|
||||||
* For example to open the XE_PMU_RENDER_GROUP_BUSY(0):
|
* For example to open the DRMXE_PMU_RENDER_GROUP_BUSY(0):
|
||||||
*
|
*
|
||||||
* .. code-block:: C
|
* .. code-block:: C
|
||||||
*
|
*
|
||||||
@@ -991,7 +999,7 @@ struct drm_xe_wait_user_fence {
|
|||||||
* attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
|
* attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
|
||||||
* attr.use_clockid = 1;
|
* attr.use_clockid = 1;
|
||||||
* attr.clockid = CLOCK_MONOTONIC;
|
* attr.clockid = CLOCK_MONOTONIC;
|
||||||
* attr.config = XE_PMU_RENDER_GROUP_BUSY(0);
|
* attr.config = DRM_XE_PMU_RENDER_GROUP_BUSY(0);
|
||||||
*
|
*
|
||||||
* fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
|
* fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
|
||||||
*/
|
*/
|
||||||
@@ -999,15 +1007,15 @@ struct drm_xe_wait_user_fence {
|
|||||||
/*
|
/*
|
||||||
* Top bits of every counter are GT id.
|
* Top bits of every counter are GT id.
|
||||||
*/
|
*/
|
||||||
#define __XE_PMU_GT_SHIFT (56)
|
#define __DRM_XE_PMU_GT_SHIFT (56)
|
||||||
|
|
||||||
#define ___XE_PMU_OTHER(gt, x) \
|
#define ___DRM_XE_PMU_OTHER(gt, x) \
|
||||||
(((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT))
|
(((__u64)(x)) | ((__u64)(gt) << __DRM_XE_PMU_GT_SHIFT))
|
||||||
|
|
||||||
#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 0)
|
#define DRM_XE_PMU_RENDER_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 0)
|
||||||
#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1)
|
#define DRM_XE_PMU_COPY_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 1)
|
||||||
#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2)
|
#define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 2)
|
||||||
#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3)
|
#define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 3)
|
||||||
|
|
||||||
#if defined(__cplusplus)
|
#if defined(__cplusplus)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user