build: Update xe debug uapi header

Signed-off-by: Brandon Yates <brandon.yates@intel.com>
This commit is contained in:
Brandon Yates
2024-06-07 20:43:12 +00:00
committed by Compute-Runtime-Automation
parent 7a404e0f52
commit a7dc19cf9a
5 changed files with 64 additions and 29 deletions

3
third_party/uapi-eudebug/drm/.version vendored Normal file
View File

@@ -0,0 +1,3 @@
git_url: https://gitlab.freedesktop.org/miku/kernel/-/tree/eudebug-dev
git_revision: 48236857dc7f41e5d28c25651428c4394a58966a

View File

@@ -102,7 +102,6 @@ extern "C" {
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_DEBUG_METADATA_CREATE 0x0b
#define DRM_XE_DEBUG_METADATA_DESTROY 0x0c
/* Must be kept compact -- no holes */
#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
@@ -517,7 +516,7 @@ struct drm_xe_query_topology_mask {
#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
/** @type: type of mask */
__u16 type;
@@ -854,10 +853,12 @@ struct drm_xe_vm_bind_op_ext_attach_debug {
* - %DRM_XE_VM_BIND_OP_PREFETCH
*
* and the @flags can be:
* - %DRM_XE_VM_BIND_FLAG_READONLY
* - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the
* - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
* to ensure write protection
* - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
* MAP operation immediately rather than deferring the MAP to the page
* fault handler.
* fault handler. This is implied on a non-faulting VM as there is no
* fault handler to defer to.
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
* tables are setup with a special bit which indicates writes are
* dropped and all reads return zero. In the future, the NULL flags
@@ -955,7 +956,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_CAPTURE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
/** @flags: Bind flags */
__u32 flags;
@@ -1070,24 +1071,11 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
/* Monitor 128KB contiguous region with 4K sub-granularity */
#define DRM_XE_ACC_GRANULARITY_128K 0
/* Monitor 2MB contiguous region with 64KB sub-granularity */
#define DRM_XE_ACC_GRANULARITY_2M 1
/* Monitor 16MB contiguous region with 512KB sub-granularity */
#define DRM_XE_ACC_GRANULARITY_16M 2
/* Monitor 64MB contiguous region with 2M sub-granularity */
#define DRM_XE_ACC_GRANULARITY_64M 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_EU_DEBUG 8
/* Set eu debug specific flags */
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_EUDEBUG 2
#define DRM_XE_EXEC_QUEUE_EUDEBUG_FLAG_ENABLE (1 << 0)
#define DRM_XE_EXEC_QUEUE_EUDEBUG_FLAG_PAGEFAULT_ENABLE (1 << 1)
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@@ -1387,6 +1375,9 @@ struct drm_xe_debug_metadata_create {
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_MODULE_AREA 2
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SBA_AREA 3
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA 4
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_NUM (1 + \
WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA)
/** @type: Type of metadata */
__u64 type;

View File

@@ -92,6 +92,47 @@ struct drm_xe_eudebug_event_eu_attention {
__u8 bitmask[0];
} __attribute__((packed));
/*
* When client (debuggee) does vm_bind_ioctl() following event
* sequence will be created (for the debugger):
*
* ┌───────────────────────┐
* │ EVENT_VM_BIND ├───────┬─┬─┐
* └───────────────────────┘ │ │ │
* ┌───────────────────────┐ │ │ │
* │ EVENT_VM_BIND_OP #1 ├───┘ │ │
* └───────────────────────┘ │ │
* ... │ │
* ┌───────────────────────┐ │ │
* │ EVENT_VM_BIND_OP #n ├─────┘ │
* └───────────────────────┘ │
* │
* ┌───────────────────────┐ │
* │ EVENT_UFENCE ├───────┘
* └───────────────────────┘
*
* All the events below VM_BIND will reference the VM_BIND
* they associate with, by field .vm_bind_ref_seqno.
* event_ufence will only be included if the client did
* attach sync of type UFENCE into its vm_bind_ioctl().
*
* When EVENT_UFENCE is sent by the driver, all the OPs of
* the original VM_BIND are completed and the [addr,range]
* contained in them are present and modifiable through the
* vm accessors. Accessing [addr, range] before related ufence
* event will lead to undefined results as the actual bind
* operations are async and the backing storage might not
* be there on a moment of receiving the event.
*
* Client's UFENCE sync will be held by the driver: client's
* drm_xe_wait_ufence will not complete and the value of the ufence
* wont appear until ufence is acked by the debugger process calling
* DRM_XE_EUDEBUG_IOCTL_ACK_EVENT with the event_ufence.base.seqno.
* This will signal the fence, .value will update and the wait will
* complete allowing the client to continue.
*
*/
struct drm_xe_eudebug_event_vm_bind {
struct drm_xe_eudebug_event base;
@@ -242,4 +283,4 @@ struct drm_xe_eudebug_ack_event {
}
#endif
#endif /* _UAPI_XE_DRM_TMP_H_ */
#endif /* _UAPI_XE_DRM_TMP_H_ */