fix: align NEO code to new uAPI header

Related-To: NEO-9566
Signed-off-by: Naklicki, Mateusz <mateusz.naklicki@intel.com>
This commit is contained in:
Naklicki, Mateusz
2023-11-27 13:16:31 +00:00
committed by Compute-Runtime-Automation
parent a02ac1c140
commit dc29c08abd
5 changed files with 286 additions and 278 deletions

View File

@@ -19,12 +19,12 @@ extern "C" {
/**
* DOC: uevent generated by xe on it's pci node.
*
* XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
* DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
* fails. The value supplied with the event is always "NEEDS_RESET".
* Additional information supplied is tile id and gt id of the gt unit for
* which reset has failed.
*/
#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
/**
* struct xe_user_extension - Base class for defining a chain of extensions
@@ -141,21 +141,22 @@ struct drm_xe_engine_class_instance {
__u16 engine_instance;
__u16 gt_id;
__u16 rsvd;
/** @pad: MBZ */
__u16 pad;
};
/**
* enum drm_xe_memory_class - Supported memory classes.
*/
enum drm_xe_memory_class {
/** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
XE_MEM_REGION_CLASS_SYSMEM = 0,
/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
/**
* @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
* @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
* represents the memory that is local to the device, which we
* call VRAM. Not valid on integrated platforms.
*/
XE_MEM_REGION_CLASS_VRAM
DRM_XE_MEM_REGION_CLASS_VRAM
};
/**
@@ -215,7 +216,7 @@ struct drm_xe_query_mem_region {
* always equal the @total_size, since all of it will be CPU
* accessible.
*
* Note this is only tracked for XE_MEM_REGION_CLASS_VRAM
* Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
* regions (for other types the value here will always equal
* zero).
*/
@@ -227,7 +228,7 @@ struct drm_xe_query_mem_region {
* Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
* accounting. Without this the value here will always equal
* zero. Note this is only currently tracked for
* XE_MEM_REGION_CLASS_VRAM regions (for other types the value
* DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
* here will always be zero).
*/
__u64 cpu_visible_used;
@@ -290,13 +291,13 @@ struct drm_xe_query_engine_cycles {
};
/**
* struct drm_xe_query_mem_usage - describe memory regions and usage
* struct drm_xe_query_mem_regions - describe memory regions
*
* If a query is made with a struct drm_xe_device_query where .query
* is equal to DRM_XE_DEVICE_QUERY_MEM_USAGE, then the reply uses
* struct drm_xe_query_mem_usage in .data.
* is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
* struct drm_xe_query_mem_regions in .data.
*/
struct drm_xe_query_mem_usage {
struct drm_xe_query_mem_regions {
/** @num_regions: number of memory regions returned in @regions */
__u32 num_regions;
/** @pad: MBZ */
@@ -320,12 +321,12 @@ struct drm_xe_query_config {
/** @pad: MBZ */
__u32 pad;
#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define XE_QUERY_CONFIG_FLAGS 1
#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0)
#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define XE_QUERY_CONFIG_VA_BITS 3
#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define DRM_XE_QUERY_CONFIG_FLAGS 1
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
/** @info: array of elements containing the config info */
__u64 info[];
};
@@ -339,8 +340,8 @@ struct drm_xe_query_config {
* implementing graphics and/or media operations.
*/
struct drm_xe_query_gt {
#define XE_QUERY_GT_TYPE_MAIN 0
#define XE_QUERY_GT_TYPE_MEDIA 1
#define DRM_XE_QUERY_GT_TYPE_MAIN 0
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
/** @type: GT type: Main or Media */
__u16 type;
/** @gt_id: Unique ID of this GT within the PCI Device */
@@ -348,17 +349,19 @@ struct drm_xe_query_gt {
/** @clock_freq: A clock frequency for timestamp */
__u32 clock_freq;
/**
* @native_mem_regions: Bit mask of instances from
* drm_xe_query_mem_usage that lives on the same GPU/Tile and have
* direct access.
* @near_mem_regions: Bit mask of instances from
* drm_xe_query_mem_regions that are nearest to the current engines
* of this GT.
*/
__u64 native_mem_regions;
__u64 near_mem_regions;
/**
* @slow_mem_regions: Bit mask of instances from
* drm_xe_query_mem_usage that this GT can indirectly access, although
* they live on a different GPU/Tile.
* @far_mem_regions: Bit mask of instances from
* drm_xe_query_mem_regions that are far from the engines of this GT.
* In general, they have extra indirections when compared to the
* @near_mem_regions. For a discrete device this could mean system
* memory and memory living in a different tile.
*/
__u64 slow_mem_regions;
__u64 far_mem_regions;
/** @reserved: Reserved */
__u64 reserved[8];
};
@@ -400,7 +403,7 @@ struct drm_xe_query_topology_mask {
* DSS_GEOMETRY ff ff ff ff 00 00 00 00
* means 32 DSS are available for geometry.
*/
#define XE_TOPO_DSS_GEOMETRY (1 << 0)
#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
/*
* To query the mask of Dual Sub Slices (DSS) available for compute
* operations. For example a query response containing the following
@@ -408,7 +411,7 @@ struct drm_xe_query_topology_mask {
* DSS_COMPUTE ff ff ff ff 00 00 00 00
* means 32 DSS are available for compute.
*/
#define XE_TOPO_DSS_COMPUTE (1 << 1)
#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
/*
* To query the mask of Execution Units (EU) available per Dual Sub
* Slices (DSS). For example a query response containing the following
@@ -416,7 +419,7 @@ struct drm_xe_query_topology_mask {
* EU_PER_DSS ff ff 00 00 00 00 00 00
* means each DSS has 16 EU.
*/
#define XE_TOPO_EU_PER_DSS (1 << 2)
#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
/** @type: type of mask */
__u16 type;
@@ -467,7 +470,7 @@ struct drm_xe_device_query {
__u64 extensions;
#define DRM_XE_DEVICE_QUERY_ENGINES 0
#define DRM_XE_DEVICE_QUERY_MEM_USAGE 1
#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
#define DRM_XE_DEVICE_QUERY_CONFIG 2
#define DRM_XE_DEVICE_QUERY_GT_LIST 3
#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
@@ -497,8 +500,8 @@ struct drm_xe_gem_create {
*/
__u64 size;
#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
/*
* When using VRAM as a possible placement, ensure that the corresponding VRAM
* allocation will always use the CPU accessible part of VRAM. This is important
@@ -514,7 +517,7 @@ struct drm_xe_gem_create {
* display surfaces, therefore the kernel requires setting this flag for such
* objects, otherwise an error is thrown on small-bar systems.
*/
#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
/**
* @flags: Flags, currently a mask of memory instances of where BO can
* be placed
@@ -581,14 +584,14 @@ struct drm_xe_ext_set_property {
};
struct drm_xe_vm_create {
#define XE_VM_EXTENSION_SET_PROPERTY 0
#define DRM_XE_VM_EXTENSION_SET_PROPERTY 0
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (1 << 1)
#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (1 << 2)
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 3)
/** @flags: Flags */
__u32 flags;
@@ -644,34 +647,38 @@ struct drm_xe_vm_bind_op {
*/
__u64 tile_mask;
#define XE_VM_BIND_OP_MAP 0x0
#define XE_VM_BIND_OP_UNMAP 0x1
#define XE_VM_BIND_OP_MAP_USERPTR 0x2
#define XE_VM_BIND_OP_UNMAP_ALL 0x3
#define XE_VM_BIND_OP_PREFETCH 0x4
#define DRM_XE_VM_BIND_OP_MAP 0x0
#define DRM_XE_VM_BIND_OP_UNMAP 0x1
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
/** @op: Bind operation to perform */
__u32 op;
#define XE_VM_BIND_FLAG_READONLY (0x1 << 0)
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_ASYNC (1 << 1)
/*
* Valid on a faulting VM only, do the MAP operation immediately rather
* than deferring the MAP to the page fault handler.
*/
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 2)
/*
* When the NULL flag is set, the page tables are setup with a special
* bit which indicates writes are dropped and all reads return zero. In
* the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
* the future, the NULL flags will only be valid for DRM_XE_VM_BIND_OP_MAP
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
* intended to implement VK sparse bindings.
*/
#define XE_VM_BIND_FLAG_NULL (0x1 << 3)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 3)
/** @flags: Bind flags */
__u32 flags;
/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
__u32 region;
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
* To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
*/
__u32 prefetch_mem_region_instance;
/** @reserved: Reserved */
__u64 reserved[2];
@@ -721,19 +728,19 @@ struct drm_xe_vm_bind {
__u64 reserved[2];
};
/* For use with XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
/* For use with DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
/* Monitor 128KB contiguous region with 4K sub-granularity */
#define XE_ACC_GRANULARITY_128K 0
#define DRM_XE_ACC_GRANULARITY_128K 0
/* Monitor 2MB contiguous region with 64KB sub-granularity */
#define XE_ACC_GRANULARITY_2M 1
#define DRM_XE_ACC_GRANULARITY_2M 1
/* Monitor 16MB contiguous region with 512KB sub-granularity */
#define XE_ACC_GRANULARITY_16M 2
#define DRM_XE_ACC_GRANULARITY_16M 2
/* Monitor 64MB contiguous region with 2M sub-granularity */
#define XE_ACC_GRANULARITY_64M 3
#define DRM_XE_ACC_GRANULARITY_64M 3
/**
* struct drm_xe_exec_queue_set_property - exec queue set property
@@ -747,14 +754,14 @@ struct drm_xe_exec_queue_set_property {
/** @exec_queue_id: Exec queue ID */
__u32 exec_queue_id;
#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
/** @property: property to set */
__u32 property;
@@ -766,7 +773,7 @@ struct drm_xe_exec_queue_set_property {
};
struct drm_xe_exec_queue_create {
#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@@ -805,7 +812,7 @@ struct drm_xe_exec_queue_get_property {
/** @exec_queue_id: Exec queue ID */
__u32 exec_queue_id;
#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
/** @property: property to get */
__u32 property;
@@ -831,11 +838,11 @@ struct drm_xe_sync {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
#define DRM_XE_SYNC_SYNCOBJ 0x0
#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_DMA_BUF 0x2
#define DRM_XE_SYNC_USER_FENCE 0x3
#define DRM_XE_SYNC_SIGNAL 0x10
#define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0
#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_FLAG_DMA_BUF 0x2
#define DRM_XE_SYNC_FLAG_USER_FENCE 0x3
#define DRM_XE_SYNC_FLAG_SIGNAL 0x10
__u32 flags;
/** @pad: MBZ */
@@ -912,17 +919,17 @@ struct drm_xe_wait_user_fence {
*/
__u64 addr;
#define DRM_XE_UFENCE_WAIT_EQ 0
#define DRM_XE_UFENCE_WAIT_NEQ 1
#define DRM_XE_UFENCE_WAIT_GT 2
#define DRM_XE_UFENCE_WAIT_GTE 3
#define DRM_XE_UFENCE_WAIT_LT 4
#define DRM_XE_UFENCE_WAIT_LTE 5
#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
/** @op: wait operation (type of comparison) */
__u16 op;
#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1)
#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
/** @flags: wait flags */
__u16 flags;
@@ -932,18 +939,19 @@ struct drm_xe_wait_user_fence {
/** @value: compare value */
__u64 value;
#define DRM_XE_UFENCE_WAIT_U8 0xffu
#define DRM_XE_UFENCE_WAIT_U16 0xffffu
#define DRM_XE_UFENCE_WAIT_U32 0xffffffffu
#define DRM_XE_UFENCE_WAIT_U64 0xffffffffffffffffu
#define DRM_XE_UFENCE_WAIT_MASK_U8 0xffu
#define DRM_XE_UFENCE_WAIT_MASK_U16 0xffffu
#define DRM_XE_UFENCE_WAIT_MASK_U32 0xffffffffu
#define DRM_XE_UFENCE_WAIT_MASK_U64 0xffffffffffffffffu
/** @mask: comparison mask */
__u64 mask;
/**
* @timeout: how long to wait before bailing, value in nanoseconds.
* Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout)
* Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
* it contains timeout expressed in nanoseconds to wait (fence will
* expire at now() + timeout).
* When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait
* When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
* will end at timeout (uses system MONOTONIC_CLOCK).
* Passing negative timeout leads to neverending wait.
*
@@ -956,13 +964,13 @@ struct drm_xe_wait_user_fence {
/**
* @num_engines: number of engine instances to wait on, must be zero
* when DRM_XE_UFENCE_WAIT_SOFT_OP set
* when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 num_engines;
/**
* @instances: user pointer to array of drm_xe_engine_class_instance to
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 instances;
@@ -973,11 +981,11 @@ struct drm_xe_wait_user_fence {
/**
* DOC: XE PMU event config IDs
*
* Check 'man perf_event_open' to use the ID's XE_PMU_XXXX listed in xe_drm.h
* Check 'man perf_event_open' to use the ID's DRM_XE_PMU_XXXX listed in xe_drm.h
* in 'struct perf_event_attr' as part of perf_event_open syscall to read a
* particular event.
*
* For example to open the XE_PMU_RENDER_GROUP_BUSY(0):
* For example to open the DRMXE_PMU_RENDER_GROUP_BUSY(0):
*
* .. code-block:: C
*
@@ -991,7 +999,7 @@ struct drm_xe_wait_user_fence {
* attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
* attr.use_clockid = 1;
* attr.clockid = CLOCK_MONOTONIC;
* attr.config = XE_PMU_RENDER_GROUP_BUSY(0);
* attr.config = DRM_XE_PMU_RENDER_GROUP_BUSY(0);
*
* fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
*/
@@ -999,15 +1007,15 @@ struct drm_xe_wait_user_fence {
/*
* Top bits of every counter are GT id.
*/
#define __XE_PMU_GT_SHIFT (56)
#define __DRM_XE_PMU_GT_SHIFT (56)
#define ___XE_PMU_OTHER(gt, x) \
(((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT))
#define ___DRM_XE_PMU_OTHER(gt, x) \
(((__u64)(x)) | ((__u64)(gt) << __DRM_XE_PMU_GT_SHIFT))
#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 0)
#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1)
#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2)
#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3)
#define DRM_XE_PMU_RENDER_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 0)
#define DRM_XE_PMU_COPY_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 1)
#define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 2)
#define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 3)
#if defined(__cplusplus)
}