diff --git a/shared/source/os_interface/linux/xe/eudebug/eudebug_interface_upstream.cpp b/shared/source/os_interface/linux/xe/eudebug/eudebug_interface_upstream.cpp index a7ecac2832..24638ee9bd 100644 --- a/shared/source/os_interface/linux/xe/eudebug/eudebug_interface_upstream.cpp +++ b/shared/source/os_interface/linux/xe/eudebug/eudebug_interface_upstream.cpp @@ -32,7 +32,7 @@ uint32_t EuDebugInterfaceUpstream::getParamValue(EuDebugParam param) const { case EuDebugParam::eventTypeExecQueue: return DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE; case EuDebugParam::eventTypeExecQueuePlacements: - return DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE_PLACEMENTS; + return 0; case EuDebugParam::eventTypeMetadata: return DRM_XE_EUDEBUG_EVENT_METADATA; case EuDebugParam::eventTypeOpen: diff --git a/shared/test/unit_test/os_interface/linux/xe/eudebug/eudebug_interface_upstream_tests.cpp b/shared/test/unit_test/os_interface/linux/xe/eudebug/eudebug_interface_upstream_tests.cpp index f2a4a93a41..c828b29b96 100644 --- a/shared/test/unit_test/os_interface/linux/xe/eudebug/eudebug_interface_upstream_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/xe/eudebug/eudebug_interface_upstream_tests.cpp @@ -24,7 +24,7 @@ TEST(EuDebugInterfaceUpstreamTest, whenGettingParamValueThenCorrectValueIsReturn EXPECT_EQ(static_cast(DRM_XE_EUDEBUG_EVENT_STATE_CHANGE), euDebugInterface.getParamValue(EuDebugParam::eventBitStateChange)); EXPECT_EQ(static_cast(DRM_XE_EUDEBUG_EVENT_EU_ATTENTION), euDebugInterface.getParamValue(EuDebugParam::eventTypeEuAttention)); EXPECT_EQ(static_cast(DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE), euDebugInterface.getParamValue(EuDebugParam::eventTypeExecQueue)); - EXPECT_EQ(static_cast(DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE_PLACEMENTS), euDebugInterface.getParamValue(EuDebugParam::eventTypeExecQueuePlacements)); + EXPECT_EQ(static_cast(0), euDebugInterface.getParamValue(EuDebugParam::eventTypeExecQueuePlacements)); EXPECT_EQ(static_cast(DRM_XE_EUDEBUG_EVENT_METADATA), euDebugInterface.getParamValue(EuDebugParam::eventTypeMetadata)); EXPECT_EQ(static_cast(DRM_XE_EUDEBUG_EVENT_OPEN), euDebugInterface.getParamValue(EuDebugParam::eventTypeOpen)); EXPECT_EQ(static_cast(DRM_XE_EUDEBUG_EVENT_PAGEFAULT), euDebugInterface.getParamValue(EuDebugParam::eventTypePagefault)); diff --git a/third_party/uapi-eudebug/drm/.version b/third_party/uapi-eudebug/drm/.version index 497f800f40..9e60432019 100644 --- a/third_party/uapi-eudebug/drm/.version +++ b/third_party/uapi-eudebug/drm/.version @@ -1,3 +1,3 @@ git_url: https://gitlab.freedesktop.org/miku/kernel/-/tree/eudebug-dev -git_revision: 0f42548c4fae7021f002867fc0faa2cff8fde7d8 +git_revision: 446269b9ea8d6d08a35437b0b4bc85f64e070ebd diff --git a/third_party/uapi-eudebug/drm/drm.h b/third_party/uapi-eudebug/drm/drm.h index e79dd961ad..9a1b18c040 100644 --- a/third_party/uapi-eudebug/drm/drm.h +++ b/third_party/uapi-eudebug/drm/drm.h @@ -32,1394 +32,1395 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef _DRM_H_ -#define _DRM_H_ - -#if defined(__linux__) - -#include -#include -typedef unsigned int drm_handle_t; - -#else /* One of the BSDs */ - -#include -#include -#include -typedef int8_t __s8; -typedef uint8_t __u8; -typedef int16_t __s16; -typedef uint16_t __u16; -typedef int32_t __s32; -typedef uint32_t __u32; -typedef int64_t __s64; -typedef uint64_t __u64; -typedef size_t __kernel_size_t; -typedef unsigned long drm_handle_t; - -#endif - -#if defined(__cplusplus) -extern "C" { -#endif - -#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ -#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ -#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ -#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ - -#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ -#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ -#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) -#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) -#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) - -typedef unsigned int drm_context_t; -typedef unsigned int drm_drawable_t; -typedef unsigned int drm_magic_t; - -/* - * Cliprect. - * - * \warning: If you change this structure, make sure you change - * XF86DRIClipRectRec in the server as well - * - * \note KW: Actually it's illegal to change either for - * backwards-compatibility reasons. - */ -struct drm_clip_rect { - unsigned short x1; - unsigned short y1; - unsigned short x2; - unsigned short y2; -}; - -/* - * Drawable information. - */ -struct drm_drawable_info { - unsigned int num_rects; - struct drm_clip_rect *rects; -}; - -/* - * Texture region, - */ -struct drm_tex_region { - unsigned char next; - unsigned char prev; - unsigned char in_use; - unsigned char padding; - unsigned int age; -}; - -/* - * Hardware lock. - * - * The lock structure is a simple cache-line aligned integer. To avoid - * processor bus contention on a multiprocessor system, there should not be any - * other data stored in the same cache line. - */ -struct drm_hw_lock { - __volatile__ unsigned int lock; /**< lock variable */ - char padding[60]; /**< Pad to cache line */ -}; - -/* - * DRM_IOCTL_VERSION ioctl argument type. - * - * \sa drmGetVersion(). - */ -struct drm_version { - int version_major; /**< Major version */ - int version_minor; /**< Minor version */ - int version_patchlevel; /**< Patch level */ - __kernel_size_t name_len; /**< Length of name buffer */ - char *name; /**< Name of driver */ - __kernel_size_t date_len; /**< Length of date buffer */ - char *date; /**< User-space buffer to hold date */ - __kernel_size_t desc_len; /**< Length of desc buffer */ - char *desc; /**< User-space buffer to hold desc */ -}; - -/* - * DRM_IOCTL_GET_UNIQUE ioctl argument type. - * - * \sa drmGetBusid() and drmSetBusId(). - */ -struct drm_unique { - __kernel_size_t unique_len; /**< Length of unique */ - char *unique; /**< Unique name for driver instantiation */ -}; - -struct drm_list { - int count; /**< Length of user-space structures */ - struct drm_version *version; -}; - -struct drm_block { - int unused; -}; - -/* - * DRM_IOCTL_CONTROL ioctl argument type. - * - * \sa drmCtlInstHandler() and drmCtlUninstHandler(). - */ -struct drm_control { - enum { - DRM_ADD_COMMAND, - DRM_RM_COMMAND, - DRM_INST_HANDLER, - DRM_UNINST_HANDLER - } func; - int irq; -}; - -/* - * Type of memory to map. - */ -enum drm_map_type { - _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ - _DRM_REGISTERS = 1, /**< no caching, no core dump */ - _DRM_SHM = 2, /**< shared, cached */ - _DRM_AGP = 3, /**< AGP/GART */ - _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ - _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */ -}; - -/* - * Memory mapping flags. - */ -enum drm_map_flags { - _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ - _DRM_READ_ONLY = 0x02, - _DRM_LOCKED = 0x04, /**< shared, cached, locked */ - _DRM_KERNEL = 0x08, /**< kernel requires access */ - _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ - _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ - _DRM_REMOVABLE = 0x40, /**< Removable mapping */ - _DRM_DRIVER = 0x80 /**< Managed by driver */ -}; - -struct drm_ctx_priv_map { - unsigned int ctx_id; /**< Context requesting private mapping */ - void *handle; /**< Handle of map */ -}; - -/* - * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls - * argument type. - * - * \sa drmAddMap(). - */ -struct drm_map { - unsigned long offset; /**< Requested physical address (0 for SAREA)*/ - unsigned long size; /**< Requested physical size (bytes) */ - enum drm_map_type type; /**< Type of memory to map */ - enum drm_map_flags flags; /**< Flags */ - void *handle; /**< User-space: "Handle" to pass to mmap() */ - /**< Kernel-space: kernel-virtual address */ - int mtrr; /**< MTRR slot used */ - /* Private data */ -}; - -/* - * DRM_IOCTL_GET_CLIENT ioctl argument type. - */ -struct drm_client { - int idx; /**< Which client desired? */ - int auth; /**< Is client authenticated? */ - unsigned long pid; /**< Process ID */ - unsigned long uid; /**< User ID */ - unsigned long magic; /**< Magic */ - unsigned long iocs; /**< Ioctl count */ -}; - -enum drm_stat_type { - _DRM_STAT_LOCK, - _DRM_STAT_OPENS, - _DRM_STAT_CLOSES, - _DRM_STAT_IOCTLS, - _DRM_STAT_LOCKS, - _DRM_STAT_UNLOCKS, - _DRM_STAT_VALUE, /**< Generic value */ - _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ - _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ - - _DRM_STAT_IRQ, /**< IRQ */ - _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ - _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ - _DRM_STAT_DMA, /**< DMA */ - _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ - _DRM_STAT_MISSED /**< Missed DMA opportunity */ - /* Add to the *END* of the list */ -}; - -/* - * DRM_IOCTL_GET_STATS ioctl argument type. - */ -struct drm_stats { - unsigned long count; - struct { - unsigned long value; - enum drm_stat_type type; - } data[15]; -}; - -/* - * Hardware locking flags. - */ -enum drm_lock_flags { - _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ - _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ - _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ - _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ - /* These *HALT* flags aren't supported yet - -- they will be used to support the - full-screen DGA-like mode. */ - _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ - _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ -}; - -/* - * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. - * - * \sa drmGetLock() and drmUnlock(). - */ -struct drm_lock { - int context; - enum drm_lock_flags flags; -}; - -/* - * DMA flags - * - * \warning - * These values \e must match xf86drm.h. - * - * \sa drm_dma. - */ -enum drm_dma_flags { - /* Flags for DMA buffer dispatch */ - _DRM_DMA_BLOCK = 0x01, /**< - * Block until buffer dispatched. - * - * \note The buffer may not yet have - * been processed by the hardware -- - * getting a hardware lock with the - * hardware quiescent will ensure - * that the buffer has been - * processed. - */ - _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ - _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ - - /* Flags for DMA buffer request */ - _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ - _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ - _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ -}; - -/* - * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. - * - * \sa drmAddBufs(). - */ -struct drm_buf_desc { - int count; /**< Number of buffers of this size */ - int size; /**< Size in bytes */ - int low_mark; /**< Low water mark */ - int high_mark; /**< High water mark */ - enum { - _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ - _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ - _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ - _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ - _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ - } flags; - unsigned long agp_start; /**< - * Start address of where the AGP buffers are - * in the AGP aperture - */ -}; - -/* - * DRM_IOCTL_INFO_BUFS ioctl argument type. - */ -struct drm_buf_info { - int count; /**< Entries in list */ - struct drm_buf_desc *list; -}; - -/* - * DRM_IOCTL_FREE_BUFS ioctl argument type. - */ -struct drm_buf_free { - int count; - int *list; -}; - -/* - * Buffer information - * - * \sa drm_buf_map. - */ -struct drm_buf_pub { - int idx; /**< Index into the master buffer list */ - int total; /**< Buffer size */ - int used; /**< Amount of buffer in use (for DMA) */ - void *address; /**< Address of buffer */ -}; - -/* - * DRM_IOCTL_MAP_BUFS ioctl argument type. - */ -struct drm_buf_map { - int count; /**< Length of the buffer list */ -#ifdef __cplusplus - void *virt; -#else - void *virtual; /**< Mmap'd area in user-virtual */ -#endif - struct drm_buf_pub *list; /**< Buffer information */ -}; - -/* - * DRM_IOCTL_DMA ioctl argument type. - * - * Indices here refer to the offset into the buffer list in drm_buf_get. - * - * \sa drmDMA(). - */ -struct drm_dma { - int context; /**< Context handle */ - int send_count; /**< Number of buffers to send */ - int *send_indices; /**< List of handles to buffers */ - int *send_sizes; /**< Lengths of data to send */ - enum drm_dma_flags flags; /**< Flags */ - int request_count; /**< Number of buffers requested */ - int request_size; /**< Desired size for buffers */ - int *request_indices; /**< Buffer information */ - int *request_sizes; - int granted_count; /**< Number of buffers granted */ -}; - -enum drm_ctx_flags { - _DRM_CONTEXT_PRESERVED = 0x01, - _DRM_CONTEXT_2DONLY = 0x02 -}; - -/* - * DRM_IOCTL_ADD_CTX ioctl argument type. - * - * \sa drmCreateContext() and drmDestroyContext(). - */ -struct drm_ctx { - drm_context_t handle; - enum drm_ctx_flags flags; -}; - -/* - * DRM_IOCTL_RES_CTX ioctl argument type. - */ -struct drm_ctx_res { - int count; - struct drm_ctx *contexts; -}; - -/* - * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. - */ -struct drm_draw { - drm_drawable_t handle; -}; - -/* - * DRM_IOCTL_UPDATE_DRAW ioctl argument type. - */ -typedef enum { - DRM_DRAWABLE_CLIPRECTS -} drm_drawable_info_type_t; - -struct drm_update_draw { - drm_drawable_t handle; - unsigned int type; - unsigned int num; - unsigned long long data; -}; - -/* - * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. - */ -struct drm_auth { - drm_magic_t magic; -}; - -/* - * DRM_IOCTL_IRQ_BUSID ioctl argument type. - * - * \sa drmGetInterruptFromBusID(). - */ -struct drm_irq_busid { - int irq; /**< IRQ number */ - int busnum; /**< bus number */ - int devnum; /**< device number */ - int funcnum; /**< function number */ -}; - -enum drm_vblank_seq_type { - _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ - _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ - /* bits 1-6 are reserved for high crtcs */ - _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, - _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ - _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ - _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ - _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ - _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ -}; -#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 - -#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) -#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ - _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) - -struct drm_wait_vblank_request { - enum drm_vblank_seq_type type; - unsigned int sequence; - unsigned long signal; -}; - -struct drm_wait_vblank_reply { - enum drm_vblank_seq_type type; - unsigned int sequence; - long tval_sec; - long tval_usec; -}; - -/* - * DRM_IOCTL_WAIT_VBLANK ioctl argument type. - * - * \sa drmWaitVBlank(). - */ -union drm_wait_vblank { - struct drm_wait_vblank_request request; - struct drm_wait_vblank_reply reply; -}; - -#define _DRM_PRE_MODESET 1 -#define _DRM_POST_MODESET 2 - -/* - * DRM_IOCTL_MODESET_CTL ioctl argument type - * - * \sa drmModesetCtl(). - */ -struct drm_modeset_ctl { - __u32 crtc; - __u32 cmd; -}; - -/* - * DRM_IOCTL_AGP_ENABLE ioctl argument type. - * - * \sa drmAgpEnable(). - */ -struct drm_agp_mode { - unsigned long mode; /**< AGP mode */ -}; - -/* - * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. - * - * \sa drmAgpAlloc() and drmAgpFree(). - */ -struct drm_agp_buffer { - unsigned long size; /**< In bytes -- will round to page boundary */ - unsigned long handle; /**< Used for binding / unbinding */ - unsigned long type; /**< Type of memory to allocate */ - unsigned long physical; /**< Physical used by i810 */ -}; - -/* - * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. - * - * \sa drmAgpBind() and drmAgpUnbind(). - */ -struct drm_agp_binding { - unsigned long handle; /**< From drm_agp_buffer */ - unsigned long offset; /**< In bytes -- will round to page boundary */ -}; - -/* - * DRM_IOCTL_AGP_INFO ioctl argument type. - * - * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), - * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), - * drmAgpVendorId() and drmAgpDeviceId(). - */ -struct drm_agp_info { - int agp_version_major; - int agp_version_minor; - unsigned long mode; - unsigned long aperture_base; /* physical address */ - unsigned long aperture_size; /* bytes */ - unsigned long memory_allowed; /* bytes */ - unsigned long memory_used; - - /* PCI information */ - unsigned short id_vendor; - unsigned short id_device; -}; - -/* - * DRM_IOCTL_SG_ALLOC ioctl argument type. - */ -struct drm_scatter_gather { - unsigned long size; /**< In bytes -- will round to page boundary */ - unsigned long handle; /**< Used for mapping / unmapping */ -}; - -/* - * DRM_IOCTL_SET_VERSION ioctl argument type. - */ -struct drm_set_version { - int drm_di_major; - int drm_di_minor; - int drm_dd_major; - int drm_dd_minor; -}; - -/* DRM_IOCTL_GEM_CLOSE ioctl argument type */ -struct drm_gem_close { - /** Handle of the object to be closed. */ - __u32 handle; - __u32 pad; -}; - -/* DRM_IOCTL_GEM_FLINK ioctl argument type */ -struct drm_gem_flink { - /** Handle for the object being named */ - __u32 handle; - - /** Returned global name */ - __u32 name; -}; - -/* DRM_IOCTL_GEM_OPEN ioctl argument type */ -struct drm_gem_open { - /** Name of object being opened */ - __u32 name; - - /** Returned handle for the object */ - __u32 handle; - - /** Returned size of the object */ - __u64 size; -}; - -/** - * DRM_CAP_DUMB_BUFFER - * - * If set to 1, the driver supports creating dumb buffers via the - * &DRM_IOCTL_MODE_CREATE_DUMB ioctl. - */ -#define DRM_CAP_DUMB_BUFFER 0x1 -/** - * DRM_CAP_VBLANK_HIGH_CRTC - * - * If set to 1, the kernel supports specifying a :ref:`CRTC index` - * in the high bits of &drm_wait_vblank_request.type. - * - * Starting kernel version 2.6.39, this capability is always set to 1. - */ -#define DRM_CAP_VBLANK_HIGH_CRTC 0x2 -/** - * DRM_CAP_DUMB_PREFERRED_DEPTH - * - * The preferred bit depth for dumb buffers. - * - * The bit depth is the number of bits used to indicate the color of a single - * pixel excluding any padding. This is different from the number of bits per - * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per - * pixel. - * - * Note that this preference only applies to dumb buffers, it's irrelevant for - * other types of buffers. - */ -#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 -/** - * DRM_CAP_DUMB_PREFER_SHADOW - * - * If set to 1, the driver prefers userspace to render to a shadow buffer - * instead of directly rendering to a dumb buffer. For best speed, userspace - * should do streaming ordered memory copies into the dumb buffer and never - * read from it. - * - * Note that this preference only applies to dumb buffers, it's irrelevant for - * other types of buffers. - */ -#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 -/** - * DRM_CAP_PRIME - * - * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT - * and &DRM_PRIME_CAP_EXPORT. - * - * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and - * &DRM_PRIME_CAP_EXPORT are always advertised. - * - * PRIME buffers are exposed as dma-buf file descriptors. - * See :ref:`prime_buffer_sharing`. - */ -#define DRM_CAP_PRIME 0x5 -/** - * DRM_PRIME_CAP_IMPORT - * - * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME - * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl. - * - * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME. - */ -#define DRM_PRIME_CAP_IMPORT 0x1 -/** - * DRM_PRIME_CAP_EXPORT - * - * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME - * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl. - * - * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME. - */ -#define DRM_PRIME_CAP_EXPORT 0x2 -/** - * DRM_CAP_TIMESTAMP_MONOTONIC - * - * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in - * struct drm_event_vblank. If set to 1, the kernel will report timestamps with - * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these - * clocks. - * - * Starting from kernel version 2.6.39, the default value for this capability - * is 1. Starting kernel version 4.15, this capability is always set to 1. - */ -#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 -/** - * DRM_CAP_ASYNC_PAGE_FLIP - * - * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy - * page-flips. - */ -#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 -/** - * DRM_CAP_CURSOR_WIDTH - * - * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid - * width x height combination for the hardware cursor. The intention is that a - * hardware agnostic userspace can query a cursor plane size to use. - * - * Note that the cross-driver contract is to merely return a valid size; - * drivers are free to attach another meaning on top, eg. i915 returns the - * maximum plane size. - */ -#define DRM_CAP_CURSOR_WIDTH 0x8 -/** - * DRM_CAP_CURSOR_HEIGHT - * - * See &DRM_CAP_CURSOR_WIDTH. - */ -#define DRM_CAP_CURSOR_HEIGHT 0x9 -/** - * DRM_CAP_ADDFB2_MODIFIERS - * - * If set to 1, the driver supports supplying modifiers in the - * &DRM_IOCTL_MODE_ADDFB2 ioctl. - */ -#define DRM_CAP_ADDFB2_MODIFIERS 0x10 -/** - * DRM_CAP_PAGE_FLIP_TARGET - * - * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and - * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in - * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP - * ioctl. - */ -#define DRM_CAP_PAGE_FLIP_TARGET 0x11 -/** - * DRM_CAP_CRTC_IN_VBLANK_EVENT - * - * If set to 1, the kernel supports reporting the CRTC ID in - * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and - * &DRM_EVENT_FLIP_COMPLETE events. - * - * Starting kernel version 4.12, this capability is always set to 1. - */ -#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 -/** - * DRM_CAP_SYNCOBJ - * - * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`. - */ -#define DRM_CAP_SYNCOBJ 0x13 -/** - * DRM_CAP_SYNCOBJ_TIMELINE - * - * If set to 1, the driver supports timeline operations on sync objects. See - * :ref:`drm_sync_objects`. - */ -#define DRM_CAP_SYNCOBJ_TIMELINE 0x14 -/** - * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP - * - * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic - * commits. - */ -#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15 - -/* DRM_IOCTL_GET_CAP ioctl argument type */ -struct drm_get_cap { - __u64 capability; - __u64 value; -}; - -/** - * DRM_CLIENT_CAP_STEREO_3D - * - * If set to 1, the DRM core will expose the stereo 3D capabilities of the - * monitor by advertising the supported 3D layouts in the flags of struct - * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``. - * - * This capability is always supported for all drivers starting from kernel - * version 3.13. - */ -#define DRM_CLIENT_CAP_STEREO_3D 1 - -/** - * DRM_CLIENT_CAP_UNIVERSAL_PLANES - * - * If set to 1, the DRM core will expose all planes (overlay, primary, and - * cursor) to userspace. - * - * This capability has been introduced in kernel version 3.15. Starting from - * kernel version 3.17, this capability is always supported for all drivers. - */ -#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 - -/** - * DRM_CLIENT_CAP_ATOMIC - * - * If set to 1, the DRM core will expose atomic properties to userspace. This - * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and - * &DRM_CLIENT_CAP_ASPECT_RATIO. - * - * If the driver doesn't support atomic mode-setting, enabling this capability - * will fail with -EOPNOTSUPP. - * - * This capability has been introduced in kernel version 4.0. Starting from - * kernel version 4.2, this capability is always supported for atomic-capable - * drivers. - */ -#define DRM_CLIENT_CAP_ATOMIC 3 - -/** - * DRM_CLIENT_CAP_ASPECT_RATIO - * - * If set to 1, the DRM core will provide aspect ratio information in modes. - * See ``DRM_MODE_FLAG_PIC_AR_*``. - * - * This capability is always supported for all drivers starting from kernel - * version 4.18. - */ -#define DRM_CLIENT_CAP_ASPECT_RATIO 4 - -/** - * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS - * - * If set to 1, the DRM core will expose special connectors to be used for - * writing back to memory the scene setup in the commit. The client must enable - * &DRM_CLIENT_CAP_ATOMIC first. - * - * This capability is always supported for atomic-capable drivers starting from - * kernel version 4.19. - */ -#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 - -/** - * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT - * - * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and - * virtualbox) have additional restrictions for cursor planes (thus - * making cursor planes on those drivers not truly universal,) e.g. - * they need cursor planes to act like one would expect from a mouse - * cursor and have correctly set hotspot properties. - * If this client cap is not set the DRM core will hide cursor plane on - * those virtualized drivers because not setting it implies that the - * client is not capable of dealing with those extra restictions. - * Clients which do set cursor hotspot and treat the cursor plane - * like a mouse cursor should set this property. - * The client must enable &DRM_CLIENT_CAP_ATOMIC first. - * - * Setting this property on drivers which do not special case - * cursor planes (i.e. non-virtualized drivers) will return - * EOPNOTSUPP, which can be used by userspace to gauge - * requirements of the hardware/drivers they're running on. - * - * This capability is always supported for atomic-capable virtualized - * drivers starting from kernel version 6.6. - */ -#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6 - -/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ -struct drm_set_client_cap { - __u64 capability; - __u64 value; -}; - -#define DRM_RDWR O_RDWR -#define DRM_CLOEXEC O_CLOEXEC -struct drm_prime_handle { - __u32 handle; - - /** Flags.. only applicable for handle->fd */ - __u32 flags; - - /** Returned dmabuf file descriptor */ - __s32 fd; -}; - -struct drm_syncobj_create { - __u32 handle; -#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0) - __u32 flags; -}; - -struct drm_syncobj_destroy { - __u32 handle; - __u32 pad; -}; - -#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0) -#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0) -struct drm_syncobj_handle { - __u32 handle; - __u32 flags; - - __s32 fd; - __u32 pad; -}; - -struct drm_syncobj_transfer { - __u32 src_handle; - __u32 dst_handle; - __u64 src_point; - __u64 dst_point; - __u32 flags; - __u32 pad; -}; - -#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) -#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) -#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ -#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */ -struct drm_syncobj_wait { - __u64 handles; - /* absolute timeout */ - __s64 timeout_nsec; - __u32 count_handles; - __u32 flags; - __u32 first_signaled; /* only valid when not waiting all */ - __u32 pad; - /** - * @deadline_nsec - fence deadline hint - * - * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing - * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is - * set. - */ - __u64 deadline_nsec; -}; - -struct drm_syncobj_timeline_wait { - __u64 handles; - /* wait on specific timeline point for every handles*/ - __u64 points; - /* absolute timeout */ - __s64 timeout_nsec; - __u32 count_handles; - __u32 flags; - __u32 first_signaled; /* only valid when not waiting all */ - __u32 pad; - /** - * @deadline_nsec - fence deadline hint - * - * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing - * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is - * set. - */ - __u64 deadline_nsec; -}; - -/** - * struct drm_syncobj_eventfd - * @handle: syncobj handle. - * @flags: Zero to wait for the point to be signalled, or - * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be - * available for the point. - * @point: syncobj timeline point (set to zero for binary syncobjs). - * @fd: Existing eventfd to sent events to. - * @pad: Must be zero. - * - * Register an eventfd to be signalled by a syncobj. The eventfd counter will - * be incremented by one. - */ -struct drm_syncobj_eventfd { - __u32 handle; - __u32 flags; - __u64 point; - __s32 fd; - __u32 pad; -}; - - -struct drm_syncobj_array { - __u64 handles; - __u32 count_handles; - __u32 pad; -}; - -#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */ -struct drm_syncobj_timeline_array { - __u64 handles; - __u64 points; - __u32 count_handles; - __u32 flags; -}; - - -/* Query current scanout sequence number */ -struct drm_crtc_get_sequence { - __u32 crtc_id; /* requested crtc_id */ - __u32 active; /* return: crtc output is active */ - __u64 sequence; /* return: most recent vblank sequence */ - __s64 sequence_ns; /* return: most recent time of first pixel out */ -}; - -/* Queue event to be delivered at specified sequence. Time stamp marks - * when the first pixel of the refresh cycle leaves the display engine - * for the display - */ -#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */ -#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */ - -struct drm_crtc_queue_sequence { - __u32 crtc_id; - __u32 flags; - __u64 sequence; /* on input, target sequence. on output, actual sequence */ - __u64 user_data; /* user data passed to event */ -}; - -#define DRM_CLIENT_NAME_MAX_LEN 64 -struct drm_set_client_name { - __u64 name_len; - __u64 name; -}; - - -#if defined(__cplusplus) -} -#endif - -#include "drm_mode.h" - -#if defined(__cplusplus) -extern "C" { -#endif - -#define DRM_IOCTL_BASE 'd' -#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) -#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) -#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) -#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) - -#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) -#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) -#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) -#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) -#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) -#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) -#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) -#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) -#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) -/** - * DRM_IOCTL_GEM_CLOSE - Close a GEM handle. - * - * GEM handles are not reference-counted by the kernel. User-space is - * responsible for managing their lifetime. For example, if user-space imports - * the same memory object twice on the same DRM file description, the same GEM - * handle is returned by both imports, and user-space needs to ensure - * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen - * when a memory object is allocated, then exported and imported again on the - * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception - * and always returns fresh new GEM handles even if an existing GEM handle - * already refers to the same memory object before the IOCTL is performed. - */ -#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) -#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) -#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) -#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) -#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) - -#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) -#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) -#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) -#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) -#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) -#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) -#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) -#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) -#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) -#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) -#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) - -#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) - -#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) -#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) - -#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) -#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) - -#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) -#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) -#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) -#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) -#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) -#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) -#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) -#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) -#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) -#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) -#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) -#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) -#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) - -/** - * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD. - * - * User-space sets &drm_prime_handle.handle with the GEM handle to export and - * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in - * &drm_prime_handle.fd. - * - * The export can fail for any driver-specific reason, e.g. because export is - * not supported for this specific GEM handle (but might be for others). - * - * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT. - */ -#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) -/** - * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle. - * - * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to - * import, and gets back a GEM handle in &drm_prime_handle.handle. - * &drm_prime_handle.flags is unused. - * - * If an existing GEM handle refers to the memory object backing the DMA-BUF, - * that GEM handle is returned. Therefore user-space which needs to handle - * arbitrary DMA-BUFs must have a user-space lookup data structure to manually - * reference-count duplicated GEM handles. For more information see - * &DRM_IOCTL_GEM_CLOSE. - * - * The import can fail for any driver-specific reason, e.g. because import is - * only supported for DMA-BUFs allocated on this DRM device. - * - * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT. - */ -#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) - -#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) -#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) -#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) -#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) -#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) -#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) -#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) -#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) - -#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) -#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) - -#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) - -#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence) -#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence) - -#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) - -#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) -#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) -#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) -#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) -#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) -#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) -#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) -#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) -#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ -#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ - -#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) -#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) -#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) -#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) -#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) -/** - * DRM_IOCTL_MODE_RMFB - Remove a framebuffer. - * - * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL - * argument is a framebuffer object ID. - * - * Warning: removing a framebuffer currently in-use on an enabled plane will - * disable that plane. The CRTC the plane is linked to may also be disabled - * (depending on driver capabilities). - */ -#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) -#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) -#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) - -/** - * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object. - * - * KMS dumb buffers provide a very primitive way to allocate a buffer object - * suitable for scanout and map it for software rendering. KMS dumb buffers are - * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb - * buffers are not suitable to be displayed on any other device than the KMS - * device where they were allocated from. Also see - * :ref:`kms_dumb_buffer_objects`. - * - * The IOCTL argument is a struct drm_mode_create_dumb. - * - * User-space is expected to create a KMS dumb buffer via this IOCTL, then add - * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via - * &DRM_IOCTL_MODE_MAP_DUMB. - * - * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported. - * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate - * driver preferences for dumb buffers. - */ -#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) -#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) -#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) -#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) -#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) -#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) -#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) -#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) -#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) -#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) -#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic) -#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) -#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) - -#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create) -#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) -#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) -#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) -#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait) -#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) -#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) - -#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease) -#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees) -#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease) -#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease) - -#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait) -#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array) -#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) -#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) - -/** - * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata. - * - * This queries metadata about a framebuffer. User-space fills - * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the - * struct as the output. - * - * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles - * will be filled with GEM buffer handles. Fresh new GEM handles are always - * returned, even if another GEM handle referring to the same memory object - * already exists on the DRM file description. The caller is responsible for - * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same - * new handle will be returned for multiple planes in case they use the same - * memory object. Planes are valid until one has a zero handle -- this can be - * used to compute the number of planes. - * - * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid - * until one has a zero &drm_mode_fb_cmd2.pitches. - * - * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set - * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the - * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier. - * - * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space - * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately - * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not - * double-close handles which are specified multiple times in the array. - */ -#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) - -#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) - -/** - * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer. - * - * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL - * argument is a framebuffer object ID. - * - * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable - * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept - * alive. When the plane no longer uses the framebuffer (because the - * framebuffer is replaced with another one, or the plane is disabled), the - * framebuffer is cleaned up. - * - * This is useful to implement flicker-free transitions between two processes. - * - * Depending on the threat model, user-space may want to ensure that the - * framebuffer doesn't expose any sensitive user information: closed - * framebuffers attached to a plane can be read back by the next DRM master. - */ -#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb) - -/** - * DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file - * - * Having a name allows for easier tracking and debugging. - * The length of the name (without null ending char) must be - * <= DRM_CLIENT_NAME_MAX_LEN. - * The call will fail if the name contains whitespaces or non-printable chars. - */ -#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name) - -/* - * Device specific ioctls should only be in their respective headers - * The device specific ioctl range is from 0x40 to 0x9f. - * Generic IOCTLS restart at 0xA0. - * - * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and - * drmCommandReadWrite(). - */ -#define DRM_COMMAND_BASE 0x40 -#define DRM_COMMAND_END 0xA0 - -/** - * struct drm_event - Header for DRM events - * @type: event type. - * @length: total number of payload bytes (including header). - * - * This struct is a header for events written back to user-space on the DRM FD. - * A read on the DRM FD will always only return complete events: e.g. if the - * read buffer is 100 bytes large and there are two 64 byte events pending, - * only one will be returned. - * - * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and - * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK, - * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE. - */ -struct drm_event { - __u32 type; - __u32 length; -}; - -/** - * DRM_EVENT_VBLANK - vertical blanking event - * - * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the - * &_DRM_VBLANK_EVENT flag set. - * - * The event payload is a struct drm_event_vblank. - */ -#define DRM_EVENT_VBLANK 0x01 -/** - * DRM_EVENT_FLIP_COMPLETE - page-flip completion event - * - * This event is sent in response to an atomic commit or legacy page-flip with - * the &DRM_MODE_PAGE_FLIP_EVENT flag set. - * - * The event payload is a struct drm_event_vblank. - */ -#define DRM_EVENT_FLIP_COMPLETE 0x02 -/** - * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event - * - * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE. - * - * The event payload is a struct drm_event_crtc_sequence. - */ -#define DRM_EVENT_CRTC_SEQUENCE 0x03 - -struct drm_event_vblank { - struct drm_event base; - __u64 user_data; - __u32 tv_sec; - __u32 tv_usec; - __u32 sequence; - __u32 crtc_id; /* 0 on older kernels that do not support this */ -}; - -/* Event delivered at sequence. Time stamp marks when the first pixel - * of the refresh cycle leaves the display engine for the display - */ -struct drm_event_crtc_sequence { - struct drm_event base; - __u64 user_data; - __s64 time_ns; - __u64 sequence; -}; - -/* typedef area */ -typedef struct drm_clip_rect drm_clip_rect_t; -typedef struct drm_drawable_info drm_drawable_info_t; -typedef struct drm_tex_region drm_tex_region_t; -typedef struct drm_hw_lock drm_hw_lock_t; -typedef struct drm_version drm_version_t; -typedef struct drm_unique drm_unique_t; -typedef struct drm_list drm_list_t; -typedef struct drm_block drm_block_t; -typedef struct drm_control drm_control_t; -typedef enum drm_map_type drm_map_type_t; -typedef enum drm_map_flags drm_map_flags_t; -typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; -typedef struct drm_map drm_map_t; -typedef struct drm_client drm_client_t; -typedef enum drm_stat_type drm_stat_type_t; -typedef struct drm_stats drm_stats_t; -typedef enum drm_lock_flags drm_lock_flags_t; -typedef struct drm_lock drm_lock_t; -typedef enum drm_dma_flags drm_dma_flags_t; -typedef struct drm_buf_desc drm_buf_desc_t; -typedef struct drm_buf_info drm_buf_info_t; -typedef struct drm_buf_free drm_buf_free_t; -typedef struct drm_buf_pub drm_buf_pub_t; -typedef struct drm_buf_map drm_buf_map_t; -typedef struct drm_dma drm_dma_t; -typedef union drm_wait_vblank drm_wait_vblank_t; -typedef struct drm_agp_mode drm_agp_mode_t; -typedef enum drm_ctx_flags drm_ctx_flags_t; -typedef struct drm_ctx drm_ctx_t; -typedef struct drm_ctx_res drm_ctx_res_t; -typedef struct drm_draw drm_draw_t; -typedef struct drm_update_draw drm_update_draw_t; -typedef struct drm_auth drm_auth_t; -typedef struct drm_irq_busid drm_irq_busid_t; -typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; - -typedef struct drm_agp_buffer drm_agp_buffer_t; -typedef struct drm_agp_binding drm_agp_binding_t; -typedef struct drm_agp_info drm_agp_info_t; -typedef struct drm_scatter_gather drm_scatter_gather_t; -typedef struct drm_set_version drm_set_version_t; - -#if defined(__cplusplus) -} -#endif - -#endif + #ifndef _DRM_H_ + #define _DRM_H_ + + #if defined(__linux__) + + #include + #include + typedef unsigned int drm_handle_t; + + #else /* One of the BSDs */ + + #include + #include + #include + typedef int8_t __s8; + typedef uint8_t __u8; + typedef int16_t __s16; + typedef uint16_t __u16; + typedef int32_t __s32; + typedef uint32_t __u32; + typedef int64_t __s64; + typedef uint64_t __u64; + typedef size_t __kernel_size_t; + typedef unsigned long drm_handle_t; + + #endif + + #if defined(__cplusplus) + extern "C" { + #endif + + #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ + #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ + #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ + #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ + + #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ + #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ + #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) + #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) + #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) + + typedef unsigned int drm_context_t; + typedef unsigned int drm_drawable_t; + typedef unsigned int drm_magic_t; + + /* + * Cliprect. + * + * \warning: If you change this structure, make sure you change + * XF86DRIClipRectRec in the server as well + * + * \note KW: Actually it's illegal to change either for + * backwards-compatibility reasons. + */ + struct drm_clip_rect { + unsigned short x1; + unsigned short y1; + unsigned short x2; + unsigned short y2; + }; + + /* + * Drawable information. + */ + struct drm_drawable_info { + unsigned int num_rects; + struct drm_clip_rect *rects; + }; + + /* + * Texture region, + */ + struct drm_tex_region { + unsigned char next; + unsigned char prev; + unsigned char in_use; + unsigned char padding; + unsigned int age; + }; + + /* + * Hardware lock. + * + * The lock structure is a simple cache-line aligned integer. To avoid + * processor bus contention on a multiprocessor system, there should not be any + * other data stored in the same cache line. + */ + struct drm_hw_lock { + __volatile__ unsigned int lock; /**< lock variable */ + char padding[60]; /**< Pad to cache line */ + }; + + /* + * DRM_IOCTL_VERSION ioctl argument type. + * + * \sa drmGetVersion(). + */ + struct drm_version { + int version_major; /**< Major version */ + int version_minor; /**< Minor version */ + int version_patchlevel; /**< Patch level */ + __kernel_size_t name_len; /**< Length of name buffer */ + char *name; /**< Name of driver */ + __kernel_size_t date_len; /**< Length of date buffer */ + char *date; /**< User-space buffer to hold date */ + __kernel_size_t desc_len; /**< Length of desc buffer */ + char *desc; /**< User-space buffer to hold desc */ + }; + + /* + * DRM_IOCTL_GET_UNIQUE ioctl argument type. + * + * \sa drmGetBusid() and drmSetBusId(). + */ + struct drm_unique { + __kernel_size_t unique_len; /**< Length of unique */ + char *unique; /**< Unique name for driver instantiation */ + }; + + struct drm_list { + int count; /**< Length of user-space structures */ + struct drm_version *version; + }; + + struct drm_block { + int unused; + }; + + /* + * DRM_IOCTL_CONTROL ioctl argument type. + * + * \sa drmCtlInstHandler() and drmCtlUninstHandler(). + */ + struct drm_control { + enum { + DRM_ADD_COMMAND, + DRM_RM_COMMAND, + DRM_INST_HANDLER, + DRM_UNINST_HANDLER + } func; + int irq; + }; + + /* + * Type of memory to map. + */ + enum drm_map_type { + _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ + _DRM_REGISTERS = 1, /**< no caching, no core dump */ + _DRM_SHM = 2, /**< shared, cached */ + _DRM_AGP = 3, /**< AGP/GART */ + _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ + _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */ + }; + + /* + * Memory mapping flags. + */ + enum drm_map_flags { + _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ + _DRM_READ_ONLY = 0x02, + _DRM_LOCKED = 0x04, /**< shared, cached, locked */ + _DRM_KERNEL = 0x08, /**< kernel requires access */ + _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ + _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ + _DRM_REMOVABLE = 0x40, /**< Removable mapping */ + _DRM_DRIVER = 0x80 /**< Managed by driver */ + }; + + struct drm_ctx_priv_map { + unsigned int ctx_id; /**< Context requesting private mapping */ + void *handle; /**< Handle of map */ + }; + + /* + * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls + * argument type. + * + * \sa drmAddMap(). + */ + struct drm_map { + unsigned long offset; /**< Requested physical address (0 for SAREA)*/ + unsigned long size; /**< Requested physical size (bytes) */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ + void *handle; /**< User-space: "Handle" to pass to mmap() */ + /**< Kernel-space: kernel-virtual address */ + int mtrr; /**< MTRR slot used */ + /* Private data */ + }; + + /* + * DRM_IOCTL_GET_CLIENT ioctl argument type. + */ + struct drm_client { + int idx; /**< Which client desired? */ + int auth; /**< Is client authenticated? */ + unsigned long pid; /**< Process ID */ + unsigned long uid; /**< User ID */ + unsigned long magic; /**< Magic */ + unsigned long iocs; /**< Ioctl count */ + }; + + enum drm_stat_type { + _DRM_STAT_LOCK, + _DRM_STAT_OPENS, + _DRM_STAT_CLOSES, + _DRM_STAT_IOCTLS, + _DRM_STAT_LOCKS, + _DRM_STAT_UNLOCKS, + _DRM_STAT_VALUE, /**< Generic value */ + _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ + _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ + + _DRM_STAT_IRQ, /**< IRQ */ + _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ + _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ + _DRM_STAT_DMA, /**< DMA */ + _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ + _DRM_STAT_MISSED /**< Missed DMA opportunity */ + /* Add to the *END* of the list */ + }; + + /* + * DRM_IOCTL_GET_STATS ioctl argument type. + */ + struct drm_stats { + unsigned long count; + struct { + unsigned long value; + enum drm_stat_type type; + } data[15]; + }; + + /* + * Hardware locking flags. + */ + enum drm_lock_flags { + _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ + _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ + _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ + _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ + /* These *HALT* flags aren't supported yet + -- they will be used to support the + full-screen DGA-like mode. */ + _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ + _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ + }; + + /* + * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. + * + * \sa drmGetLock() and drmUnlock(). + */ + struct drm_lock { + int context; + enum drm_lock_flags flags; + }; + + /* + * DMA flags + * + * \warning + * These values \e must match xf86drm.h. + * + * \sa drm_dma. + */ + enum drm_dma_flags { + /* Flags for DMA buffer dispatch */ + _DRM_DMA_BLOCK = 0x01, /**< + * Block until buffer dispatched. + * + * \note The buffer may not yet have + * been processed by the hardware -- + * getting a hardware lock with the + * hardware quiescent will ensure + * that the buffer has been + * processed. + */ + _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ + _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ + + /* Flags for DMA buffer request */ + _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ + _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ + _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ + }; + + /* + * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. + * + * \sa drmAddBufs(). + */ + struct drm_buf_desc { + int count; /**< Number of buffers of this size */ + int size; /**< Size in bytes */ + int low_mark; /**< Low water mark */ + int high_mark; /**< High water mark */ + enum { + _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ + _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ + _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ + _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ + _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ + } flags; + unsigned long agp_start; /**< + * Start address of where the AGP buffers are + * in the AGP aperture + */ + }; + + /* + * DRM_IOCTL_INFO_BUFS ioctl argument type. + */ + struct drm_buf_info { + int count; /**< Entries in list */ + struct drm_buf_desc *list; + }; + + /* + * DRM_IOCTL_FREE_BUFS ioctl argument type. + */ + struct drm_buf_free { + int count; + int *list; + }; + + /* + * Buffer information + * + * \sa drm_buf_map. + */ + struct drm_buf_pub { + int idx; /**< Index into the master buffer list */ + int total; /**< Buffer size */ + int used; /**< Amount of buffer in use (for DMA) */ + void *address; /**< Address of buffer */ + }; + + /* + * DRM_IOCTL_MAP_BUFS ioctl argument type. + */ + struct drm_buf_map { + int count; /**< Length of the buffer list */ + #ifdef __cplusplus + void *virt; + #else + void *virtual; /**< Mmap'd area in user-virtual */ + #endif + struct drm_buf_pub *list; /**< Buffer information */ + }; + + /* + * DRM_IOCTL_DMA ioctl argument type. + * + * Indices here refer to the offset into the buffer list in drm_buf_get. + * + * \sa drmDMA(). + */ + struct drm_dma { + int context; /**< Context handle */ + int send_count; /**< Number of buffers to send */ + int *send_indices; /**< List of handles to buffers */ + int *send_sizes; /**< Lengths of data to send */ + enum drm_dma_flags flags; /**< Flags */ + int request_count; /**< Number of buffers requested */ + int request_size; /**< Desired size for buffers */ + int *request_indices; /**< Buffer information */ + int *request_sizes; + int granted_count; /**< Number of buffers granted */ + }; + + enum drm_ctx_flags { + _DRM_CONTEXT_PRESERVED = 0x01, + _DRM_CONTEXT_2DONLY = 0x02 + }; + + /* + * DRM_IOCTL_ADD_CTX ioctl argument type. + * + * \sa drmCreateContext() and drmDestroyContext(). + */ + struct drm_ctx { + drm_context_t handle; + enum drm_ctx_flags flags; + }; + + /* + * DRM_IOCTL_RES_CTX ioctl argument type. + */ + struct drm_ctx_res { + int count; + struct drm_ctx *contexts; + }; + + /* + * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. + */ + struct drm_draw { + drm_drawable_t handle; + }; + + /* + * DRM_IOCTL_UPDATE_DRAW ioctl argument type. + */ + typedef enum { + DRM_DRAWABLE_CLIPRECTS + } drm_drawable_info_type_t; + + struct drm_update_draw { + drm_drawable_t handle; + unsigned int type; + unsigned int num; + unsigned long long data; + }; + + /* + * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. + */ + struct drm_auth { + drm_magic_t magic; + }; + + /* + * DRM_IOCTL_IRQ_BUSID ioctl argument type. + * + * \sa drmGetInterruptFromBusID(). + */ + struct drm_irq_busid { + int irq; /**< IRQ number */ + int busnum; /**< bus number */ + int devnum; /**< device number */ + int funcnum; /**< function number */ + }; + + enum drm_vblank_seq_type { + _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ + _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ + /* bits 1-6 are reserved for high crtcs */ + _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, + _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ + _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ + _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ + _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ + _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ + }; + #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 + + #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) + #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ + _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) + + struct drm_wait_vblank_request { + enum drm_vblank_seq_type type; + unsigned int sequence; + unsigned long signal; + }; + + struct drm_wait_vblank_reply { + enum drm_vblank_seq_type type; + unsigned int sequence; + long tval_sec; + long tval_usec; + }; + + /* + * DRM_IOCTL_WAIT_VBLANK ioctl argument type. + * + * \sa drmWaitVBlank(). + */ + union drm_wait_vblank { + struct drm_wait_vblank_request request; + struct drm_wait_vblank_reply reply; + }; + + #define _DRM_PRE_MODESET 1 + #define _DRM_POST_MODESET 2 + + /* + * DRM_IOCTL_MODESET_CTL ioctl argument type + * + * \sa drmModesetCtl(). + */ + struct drm_modeset_ctl { + __u32 crtc; + __u32 cmd; + }; + + /* + * DRM_IOCTL_AGP_ENABLE ioctl argument type. + * + * \sa drmAgpEnable(). + */ + struct drm_agp_mode { + unsigned long mode; /**< AGP mode */ + }; + + /* + * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. + * + * \sa drmAgpAlloc() and drmAgpFree(). + */ + struct drm_agp_buffer { + unsigned long size; /**< In bytes -- will round to page boundary */ + unsigned long handle; /**< Used for binding / unbinding */ + unsigned long type; /**< Type of memory to allocate */ + unsigned long physical; /**< Physical used by i810 */ + }; + + /* + * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. + * + * \sa drmAgpBind() and drmAgpUnbind(). + */ + struct drm_agp_binding { + unsigned long handle; /**< From drm_agp_buffer */ + unsigned long offset; /**< In bytes -- will round to page boundary */ + }; + + /* + * DRM_IOCTL_AGP_INFO ioctl argument type. + * + * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), + * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), + * drmAgpVendorId() and drmAgpDeviceId(). + */ + struct drm_agp_info { + int agp_version_major; + int agp_version_minor; + unsigned long mode; + unsigned long aperture_base; /* physical address */ + unsigned long aperture_size; /* bytes */ + unsigned long memory_allowed; /* bytes */ + unsigned long memory_used; + + /* PCI information */ + unsigned short id_vendor; + unsigned short id_device; + }; + + /* + * DRM_IOCTL_SG_ALLOC ioctl argument type. + */ + struct drm_scatter_gather { + unsigned long size; /**< In bytes -- will round to page boundary */ + unsigned long handle; /**< Used for mapping / unmapping */ + }; + + /* + * DRM_IOCTL_SET_VERSION ioctl argument type. + */ + struct drm_set_version { + int drm_di_major; + int drm_di_minor; + int drm_dd_major; + int drm_dd_minor; + }; + + /* DRM_IOCTL_GEM_CLOSE ioctl argument type */ + struct drm_gem_close { + /** Handle of the object to be closed. */ + __u32 handle; + __u32 pad; + }; + + /* DRM_IOCTL_GEM_FLINK ioctl argument type */ + struct drm_gem_flink { + /** Handle for the object being named */ + __u32 handle; + + /** Returned global name */ + __u32 name; + }; + + /* DRM_IOCTL_GEM_OPEN ioctl argument type */ + struct drm_gem_open { + /** Name of object being opened */ + __u32 name; + + /** Returned handle for the object */ + __u32 handle; + + /** Returned size of the object */ + __u64 size; + }; + + /** + * DRM_CAP_DUMB_BUFFER + * + * If set to 1, the driver supports creating dumb buffers via the + * &DRM_IOCTL_MODE_CREATE_DUMB ioctl. + */ + #define DRM_CAP_DUMB_BUFFER 0x1 + /** + * DRM_CAP_VBLANK_HIGH_CRTC + * + * If set to 1, the kernel supports specifying a :ref:`CRTC index` + * in the high bits of &drm_wait_vblank_request.type. + * + * Starting kernel version 2.6.39, this capability is always set to 1. + */ + #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 + /** + * DRM_CAP_DUMB_PREFERRED_DEPTH + * + * The preferred bit depth for dumb buffers. + * + * The bit depth is the number of bits used to indicate the color of a single + * pixel excluding any padding. This is different from the number of bits per + * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per + * pixel. + * + * Note that this preference only applies to dumb buffers, it's irrelevant for + * other types of buffers. + */ + #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 + /** + * DRM_CAP_DUMB_PREFER_SHADOW + * + * If set to 1, the driver prefers userspace to render to a shadow buffer + * instead of directly rendering to a dumb buffer. For best speed, userspace + * should do streaming ordered memory copies into the dumb buffer and never + * read from it. + * + * Note that this preference only applies to dumb buffers, it's irrelevant for + * other types of buffers. + */ + #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 + /** + * DRM_CAP_PRIME + * + * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT + * and &DRM_PRIME_CAP_EXPORT. + * + * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and + * &DRM_PRIME_CAP_EXPORT are always advertised. + * + * PRIME buffers are exposed as dma-buf file descriptors. + * See :ref:`prime_buffer_sharing`. + */ + #define DRM_CAP_PRIME 0x5 + /** + * DRM_PRIME_CAP_IMPORT + * + * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME + * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl. + * + * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME. + */ + #define DRM_PRIME_CAP_IMPORT 0x1 + /** + * DRM_PRIME_CAP_EXPORT + * + * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME + * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl. + * + * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME. + */ + #define DRM_PRIME_CAP_EXPORT 0x2 + /** + * DRM_CAP_TIMESTAMP_MONOTONIC + * + * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in + * struct drm_event_vblank. If set to 1, the kernel will report timestamps with + * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these + * clocks. + * + * Starting from kernel version 2.6.39, the default value for this capability + * is 1. Starting kernel version 4.15, this capability is always set to 1. + */ + #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 + /** + * DRM_CAP_ASYNC_PAGE_FLIP + * + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy + * page-flips. + */ + #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 + /** + * DRM_CAP_CURSOR_WIDTH + * + * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid + * width x height combination for the hardware cursor. The intention is that a + * hardware agnostic userspace can query a cursor plane size to use. + * + * Note that the cross-driver contract is to merely return a valid size; + * drivers are free to attach another meaning on top, eg. i915 returns the + * maximum plane size. + */ + #define DRM_CAP_CURSOR_WIDTH 0x8 + /** + * DRM_CAP_CURSOR_HEIGHT + * + * See &DRM_CAP_CURSOR_WIDTH. + */ + #define DRM_CAP_CURSOR_HEIGHT 0x9 + /** + * DRM_CAP_ADDFB2_MODIFIERS + * + * If set to 1, the driver supports supplying modifiers in the + * &DRM_IOCTL_MODE_ADDFB2 ioctl. + */ + #define DRM_CAP_ADDFB2_MODIFIERS 0x10 + /** + * DRM_CAP_PAGE_FLIP_TARGET + * + * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and + * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in + * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP + * ioctl. + */ + #define DRM_CAP_PAGE_FLIP_TARGET 0x11 + /** + * DRM_CAP_CRTC_IN_VBLANK_EVENT + * + * If set to 1, the kernel supports reporting the CRTC ID in + * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and + * &DRM_EVENT_FLIP_COMPLETE events. + * + * Starting kernel version 4.12, this capability is always set to 1. + */ + #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 + /** + * DRM_CAP_SYNCOBJ + * + * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`. + */ + #define DRM_CAP_SYNCOBJ 0x13 + /** + * DRM_CAP_SYNCOBJ_TIMELINE + * + * If set to 1, the driver supports timeline operations on sync objects. See + * :ref:`drm_sync_objects`. + */ + #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 + /** + * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP + * + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic + * commits. + */ + #define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15 + + /* DRM_IOCTL_GET_CAP ioctl argument type */ + struct drm_get_cap { + __u64 capability; + __u64 value; + }; + + /** + * DRM_CLIENT_CAP_STEREO_3D + * + * If set to 1, the DRM core will expose the stereo 3D capabilities of the + * monitor by advertising the supported 3D layouts in the flags of struct + * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 3.13. + */ + #define DRM_CLIENT_CAP_STEREO_3D 1 + + /** + * DRM_CLIENT_CAP_UNIVERSAL_PLANES + * + * If set to 1, the DRM core will expose all planes (overlay, primary, and + * cursor) to userspace. + * + * This capability has been introduced in kernel version 3.15. Starting from + * kernel version 3.17, this capability is always supported for all drivers. + */ + #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 + + /** + * DRM_CLIENT_CAP_ATOMIC + * + * If set to 1, the DRM core will expose atomic properties to userspace. This + * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and + * &DRM_CLIENT_CAP_ASPECT_RATIO. + * + * If the driver doesn't support atomic mode-setting, enabling this capability + * will fail with -EOPNOTSUPP. + * + * This capability has been introduced in kernel version 4.0. Starting from + * kernel version 4.2, this capability is always supported for atomic-capable + * drivers. + */ + #define DRM_CLIENT_CAP_ATOMIC 3 + + /** + * DRM_CLIENT_CAP_ASPECT_RATIO + * + * If set to 1, the DRM core will provide aspect ratio information in modes. + * See ``DRM_MODE_FLAG_PIC_AR_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 4.18. + */ + #define DRM_CLIENT_CAP_ASPECT_RATIO 4 + + /** + * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS + * + * If set to 1, the DRM core will expose special connectors to be used for + * writing back to memory the scene setup in the commit. The client must enable + * &DRM_CLIENT_CAP_ATOMIC first. + * + * This capability is always supported for atomic-capable drivers starting from + * kernel version 4.19. + */ + #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 + + /** + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT + * + * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and + * virtualbox) have additional restrictions for cursor planes (thus + * making cursor planes on those drivers not truly universal,) e.g. + * they need cursor planes to act like one would expect from a mouse + * cursor and have correctly set hotspot properties. + * If this client cap is not set the DRM core will hide cursor plane on + * those virtualized drivers because not setting it implies that the + * client is not capable of dealing with those extra restictions. + * Clients which do set cursor hotspot and treat the cursor plane + * like a mouse cursor should set this property. + * The client must enable &DRM_CLIENT_CAP_ATOMIC first. + * + * Setting this property on drivers which do not special case + * cursor planes (i.e. non-virtualized drivers) will return + * EOPNOTSUPP, which can be used by userspace to gauge + * requirements of the hardware/drivers they're running on. + * + * This capability is always supported for atomic-capable virtualized + * drivers starting from kernel version 6.6. + */ + #define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6 + + /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ + struct drm_set_client_cap { + __u64 capability; + __u64 value; + }; + + #define DRM_RDWR O_RDWR + #define DRM_CLOEXEC O_CLOEXEC + struct drm_prime_handle { + __u32 handle; + + /** Flags.. only applicable for handle->fd */ + __u32 flags; + + /** Returned dmabuf file descriptor */ + __s32 fd; + }; + + struct drm_syncobj_create { + __u32 handle; + #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0) + __u32 flags; + }; + + struct drm_syncobj_destroy { + __u32 handle; + __u32 pad; + }; + + #define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0) + #define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0) + struct drm_syncobj_handle { + __u32 handle; + __u32 flags; + + __s32 fd; + __u32 pad; + }; + + struct drm_syncobj_transfer { + __u32 src_handle; + __u32 dst_handle; + __u64 src_point; + __u64 dst_point; + __u32 flags; + __u32 pad; + }; + + #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) + #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) + #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ + #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */ + struct drm_syncobj_wait { + __u64 handles; + /* absolute timeout */ + __s64 timeout_nsec; + __u32 count_handles; + __u32 flags; + __u32 first_signaled; /* only valid when not waiting all */ + __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; + }; + + struct drm_syncobj_timeline_wait { + __u64 handles; + /* wait on specific timeline point for every handles*/ + __u64 points; + /* absolute timeout */ + __s64 timeout_nsec; + __u32 count_handles; + __u32 flags; + __u32 first_signaled; /* only valid when not waiting all */ + __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; + }; + + /** + * struct drm_syncobj_eventfd + * @handle: syncobj handle. + * @flags: Zero to wait for the point to be signalled, or + * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be + * available for the point. + * @point: syncobj timeline point (set to zero for binary syncobjs). + * @fd: Existing eventfd to sent events to. + * @pad: Must be zero. + * + * Register an eventfd to be signalled by a syncobj. The eventfd counter will + * be incremented by one. + */ + struct drm_syncobj_eventfd { + __u32 handle; + __u32 flags; + __u64 point; + __s32 fd; + __u32 pad; + }; + + + struct drm_syncobj_array { + __u64 handles; + __u32 count_handles; + __u32 pad; + }; + + #define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */ + struct drm_syncobj_timeline_array { + __u64 handles; + __u64 points; + __u32 count_handles; + __u32 flags; + }; + + + /* Query current scanout sequence number */ + struct drm_crtc_get_sequence { + __u32 crtc_id; /* requested crtc_id */ + __u32 active; /* return: crtc output is active */ + __u64 sequence; /* return: most recent vblank sequence */ + __s64 sequence_ns; /* return: most recent time of first pixel out */ + }; + + /* Queue event to be delivered at specified sequence. Time stamp marks + * when the first pixel of the refresh cycle leaves the display engine + * for the display + */ + #define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */ + #define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */ + + struct drm_crtc_queue_sequence { + __u32 crtc_id; + __u32 flags; + __u64 sequence; /* on input, target sequence. on output, actual sequence */ + __u64 user_data; /* user data passed to event */ + }; + + #define DRM_CLIENT_NAME_MAX_LEN 64 + struct drm_set_client_name { + __u64 name_len; + __u64 name; + }; + + + #if defined(__cplusplus) + } + #endif + + #include "drm_mode.h" + + #if defined(__cplusplus) + extern "C" { + #endif + + #define DRM_IOCTL_BASE 'd' + #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) + #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) + #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) + #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) + + #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) + #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) + #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) + #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) + #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) + #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) + #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) + #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) + #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) + /** + * DRM_IOCTL_GEM_CLOSE - Close a GEM handle. + * + * GEM handles are not reference-counted by the kernel. User-space is + * responsible for managing their lifetime. For example, if user-space imports + * the same memory object twice on the same DRM file description, the same GEM + * handle is returned by both imports, and user-space needs to ensure + * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen + * when a memory object is allocated, then exported and imported again on the + * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception + * and always returns fresh new GEM handles even if an existing GEM handle + * already refers to the same memory object before the IOCTL is performed. + */ + #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) + #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) + #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) + #define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) + #define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) + + #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) + #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) + #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) + #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) + #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) + #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) + #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) + #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) + #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) + #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) + #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) + + #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) + + #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) + #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) + + #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) + #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) + + #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) + #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) + #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) + #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) + #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) + #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) + #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) + #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) + #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) + #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) + #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) + #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) + #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) + + /** + * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD. + * + * User-space sets &drm_prime_handle.handle with the GEM handle to export and + * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in + * &drm_prime_handle.fd. + * + * The export can fail for any driver-specific reason, e.g. because export is + * not supported for this specific GEM handle (but might be for others). + * + * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT. + */ + #define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) + /** + * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle. + * + * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to + * import, and gets back a GEM handle in &drm_prime_handle.handle. + * &drm_prime_handle.flags is unused. + * + * If an existing GEM handle refers to the memory object backing the DMA-BUF, + * that GEM handle is returned. Therefore user-space which needs to handle + * arbitrary DMA-BUFs must have a user-space lookup data structure to manually + * reference-count duplicated GEM handles. For more information see + * &DRM_IOCTL_GEM_CLOSE. + * + * The import can fail for any driver-specific reason, e.g. because import is + * only supported for DMA-BUFs allocated on this DRM device. + * + * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT. + */ + #define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) + + #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) + #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) + #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) + #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) + #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) + #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) + #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) + #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) + + #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) + #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) + + #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) + + #define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence) + #define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence) + + #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) + + #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) + #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) + #define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) + #define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) + #define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) + #define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) + #define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) + #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) + #define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ + #define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ + + #define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) + #define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) + #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) + #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) + #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) + /** + * DRM_IOCTL_MODE_RMFB - Remove a framebuffer. + * + * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL + * argument is a framebuffer object ID. + * + * Warning: removing a framebuffer currently in-use on an enabled plane will + * disable that plane. The CRTC the plane is linked to may also be disabled + * (depending on driver capabilities). + */ + #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) + #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) + #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) + + /** + * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object. + * + * KMS dumb buffers provide a very primitive way to allocate a buffer object + * suitable for scanout and map it for software rendering. KMS dumb buffers are + * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb + * buffers are not suitable to be displayed on any other device than the KMS + * device where they were allocated from. Also see + * :ref:`kms_dumb_buffer_objects`. + * + * The IOCTL argument is a struct drm_mode_create_dumb. + * + * User-space is expected to create a KMS dumb buffer via this IOCTL, then add + * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via + * &DRM_IOCTL_MODE_MAP_DUMB. + * + * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported. + * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate + * driver preferences for dumb buffers. + */ + #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) + #define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) + #define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) + #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) + #define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) + #define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) + #define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) + #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) + #define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) + #define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) + #define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic) + #define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) + #define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) + + #define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create) + #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) + #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) + #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) + #define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait) + #define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) + #define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) + + #define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease) + #define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees) + #define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease) + #define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease) + + #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait) + #define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array) + #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) + #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) + + /** + * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata. + * + * This queries metadata about a framebuffer. User-space fills + * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the + * struct as the output. + * + * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles + * will be filled with GEM buffer handles. Fresh new GEM handles are always + * returned, even if another GEM handle referring to the same memory object + * already exists on the DRM file description. The caller is responsible for + * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same + * new handle will be returned for multiple planes in case they use the same + * memory object. Planes are valid until one has a zero handle -- this can be + * used to compute the number of planes. + * + * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid + * until one has a zero &drm_mode_fb_cmd2.pitches. + * + * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set + * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the + * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier. + * + * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space + * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately + * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not + * double-close handles which are specified multiple times in the array. + */ + #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) + + #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) + + /** + * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer. + * + * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL + * argument is a framebuffer object ID. + * + * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable + * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept + * alive. When the plane no longer uses the framebuffer (because the + * framebuffer is replaced with another one, or the plane is disabled), the + * framebuffer is cleaned up. + * + * This is useful to implement flicker-free transitions between two processes. + * + * Depending on the threat model, user-space may want to ensure that the + * framebuffer doesn't expose any sensitive user information: closed + * framebuffers attached to a plane can be read back by the next DRM master. + */ + #define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb) + + /** + * DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file + * + * Having a name allows for easier tracking and debugging. + * The length of the name (without null ending char) must be + * <= DRM_CLIENT_NAME_MAX_LEN. + * The call will fail if the name contains whitespaces or non-printable chars. + */ + #define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name) + + /* + * Device specific ioctls should only be in their respective headers + * The device specific ioctl range is from 0x40 to 0x9f. + * Generic IOCTLS restart at 0xA0. + * + * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and + * drmCommandReadWrite(). + */ + #define DRM_COMMAND_BASE 0x40 + #define DRM_COMMAND_END 0xA0 + + /** + * struct drm_event - Header for DRM events + * @type: event type. + * @length: total number of payload bytes (including header). + * + * This struct is a header for events written back to user-space on the DRM FD. + * A read on the DRM FD will always only return complete events: e.g. if the + * read buffer is 100 bytes large and there are two 64 byte events pending, + * only one will be returned. + * + * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and + * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK, + * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE. + */ + struct drm_event { + __u32 type; + __u32 length; + }; + + /** + * DRM_EVENT_VBLANK - vertical blanking event + * + * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the + * &_DRM_VBLANK_EVENT flag set. + * + * The event payload is a struct drm_event_vblank. + */ + #define DRM_EVENT_VBLANK 0x01 + /** + * DRM_EVENT_FLIP_COMPLETE - page-flip completion event + * + * This event is sent in response to an atomic commit or legacy page-flip with + * the &DRM_MODE_PAGE_FLIP_EVENT flag set. + * + * The event payload is a struct drm_event_vblank. + */ + #define DRM_EVENT_FLIP_COMPLETE 0x02 + /** + * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event + * + * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE. + * + * The event payload is a struct drm_event_crtc_sequence. + */ + #define DRM_EVENT_CRTC_SEQUENCE 0x03 + + struct drm_event_vblank { + struct drm_event base; + __u64 user_data; + __u32 tv_sec; + __u32 tv_usec; + __u32 sequence; + __u32 crtc_id; /* 0 on older kernels that do not support this */ + }; + + /* Event delivered at sequence. Time stamp marks when the first pixel + * of the refresh cycle leaves the display engine for the display + */ + struct drm_event_crtc_sequence { + struct drm_event base; + __u64 user_data; + __s64 time_ns; + __u64 sequence; + }; + + /* typedef area */ + typedef struct drm_clip_rect drm_clip_rect_t; + typedef struct drm_drawable_info drm_drawable_info_t; + typedef struct drm_tex_region drm_tex_region_t; + typedef struct drm_hw_lock drm_hw_lock_t; + typedef struct drm_version drm_version_t; + typedef struct drm_unique drm_unique_t; + typedef struct drm_list drm_list_t; + typedef struct drm_block drm_block_t; + typedef struct drm_control drm_control_t; + typedef enum drm_map_type drm_map_type_t; + typedef enum drm_map_flags drm_map_flags_t; + typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; + typedef struct drm_map drm_map_t; + typedef struct drm_client drm_client_t; + typedef enum drm_stat_type drm_stat_type_t; + typedef struct drm_stats drm_stats_t; + typedef enum drm_lock_flags drm_lock_flags_t; + typedef struct drm_lock drm_lock_t; + typedef enum drm_dma_flags drm_dma_flags_t; + typedef struct drm_buf_desc drm_buf_desc_t; + typedef struct drm_buf_info drm_buf_info_t; + typedef struct drm_buf_free drm_buf_free_t; + typedef struct drm_buf_pub drm_buf_pub_t; + typedef struct drm_buf_map drm_buf_map_t; + typedef struct drm_dma drm_dma_t; + typedef union drm_wait_vblank drm_wait_vblank_t; + typedef struct drm_agp_mode drm_agp_mode_t; + typedef enum drm_ctx_flags drm_ctx_flags_t; + typedef struct drm_ctx drm_ctx_t; + typedef struct drm_ctx_res drm_ctx_res_t; + typedef struct drm_draw drm_draw_t; + typedef struct drm_update_draw drm_update_draw_t; + typedef struct drm_auth drm_auth_t; + typedef struct drm_irq_busid drm_irq_busid_t; + typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; + + typedef struct drm_agp_buffer drm_agp_buffer_t; + typedef struct drm_agp_binding drm_agp_binding_t; + typedef struct drm_agp_info drm_agp_info_t; + typedef struct drm_scatter_gather drm_scatter_gather_t; + typedef struct drm_set_version drm_set_version_t; + + #if defined(__cplusplus) + } + #endif + + #endif + \ No newline at end of file diff --git a/third_party/uapi-eudebug/drm/drm_mode.h b/third_party/uapi-eudebug/drm/drm_mode.h index c082810c08..da3fade41f 100644 --- a/third_party/uapi-eudebug/drm/drm_mode.h +++ b/third_party/uapi-eudebug/drm/drm_mode.h @@ -24,1339 +24,1340 @@ * IN THE SOFTWARE. */ -#ifndef _DRM_MODE_H -#define _DRM_MODE_H - -#include "drm.h" - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * DOC: overview - * - * DRM exposes many UAPI and structure definitions to have a consistent - * and standardized interface with users. - * Userspace can refer to these structure definitions and UAPI formats - * to communicate to drivers. - */ - -#define DRM_CONNECTOR_NAME_LEN 32 -#define DRM_DISPLAY_MODE_LEN 32 -#define DRM_PROP_NAME_LEN 32 - -#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */ -#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */ -#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */ -#define DRM_MODE_TYPE_PREFERRED (1<<3) -#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */ -#define DRM_MODE_TYPE_USERDEF (1<<5) -#define DRM_MODE_TYPE_DRIVER (1<<6) - -#define DRM_MODE_TYPE_ALL (DRM_MODE_TYPE_PREFERRED | \ - DRM_MODE_TYPE_USERDEF | \ - DRM_MODE_TYPE_DRIVER) - -/* Video mode flags */ -/* bit compatible with the xrandr RR_ definitions (bits 0-13) - * - * ABI warning: Existing userspace really expects - * the mode flags to match the xrandr definitions. Any - * changes that don't match the xrandr definitions will - * likely need a new client cap or some other mechanism - * to avoid breaking existing userspace. This includes - * allocating new flags in the previously unused bits! - */ -#define DRM_MODE_FLAG_PHSYNC (1<<0) -#define DRM_MODE_FLAG_NHSYNC (1<<1) -#define DRM_MODE_FLAG_PVSYNC (1<<2) -#define DRM_MODE_FLAG_NVSYNC (1<<3) -#define DRM_MODE_FLAG_INTERLACE (1<<4) -#define DRM_MODE_FLAG_DBLSCAN (1<<5) -#define DRM_MODE_FLAG_CSYNC (1<<6) -#define DRM_MODE_FLAG_PCSYNC (1<<7) -#define DRM_MODE_FLAG_NCSYNC (1<<8) -#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ -#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */ -#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */ -#define DRM_MODE_FLAG_DBLCLK (1<<12) -#define DRM_MODE_FLAG_CLKDIV2 (1<<13) - /* - * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX - * (define not exposed to user space). + #ifndef _DRM_MODE_H + #define _DRM_MODE_H + + #include "drm.h" + + #if defined(__cplusplus) + extern "C" { + #endif + + /** + * DOC: overview + * + * DRM exposes many UAPI and structure definitions to have a consistent + * and standardized interface with users. + * Userspace can refer to these structure definitions and UAPI formats + * to communicate to drivers. */ -#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) -#define DRM_MODE_FLAG_3D_NONE (0<<14) -#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) -#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) -#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) -#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14) -#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14) -#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14) -#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) -#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) - -/* Picture aspect ratio options */ -#define DRM_MODE_PICTURE_ASPECT_NONE 0 -#define DRM_MODE_PICTURE_ASPECT_4_3 1 -#define DRM_MODE_PICTURE_ASPECT_16_9 2 -#define DRM_MODE_PICTURE_ASPECT_64_27 3 -#define DRM_MODE_PICTURE_ASPECT_256_135 4 - -/* Content type options */ -#define DRM_MODE_CONTENT_TYPE_NO_DATA 0 -#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1 -#define DRM_MODE_CONTENT_TYPE_PHOTO 2 -#define DRM_MODE_CONTENT_TYPE_CINEMA 3 -#define DRM_MODE_CONTENT_TYPE_GAME 4 - -/* Aspect ratio flag bitmask (4 bits 22:19) */ -#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19) -#define DRM_MODE_FLAG_PIC_AR_NONE \ - (DRM_MODE_PICTURE_ASPECT_NONE<<19) -#define DRM_MODE_FLAG_PIC_AR_4_3 \ - (DRM_MODE_PICTURE_ASPECT_4_3<<19) -#define DRM_MODE_FLAG_PIC_AR_16_9 \ - (DRM_MODE_PICTURE_ASPECT_16_9<<19) -#define DRM_MODE_FLAG_PIC_AR_64_27 \ - (DRM_MODE_PICTURE_ASPECT_64_27<<19) -#define DRM_MODE_FLAG_PIC_AR_256_135 \ - (DRM_MODE_PICTURE_ASPECT_256_135<<19) - -#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \ - DRM_MODE_FLAG_NHSYNC | \ - DRM_MODE_FLAG_PVSYNC | \ - DRM_MODE_FLAG_NVSYNC | \ - DRM_MODE_FLAG_INTERLACE | \ - DRM_MODE_FLAG_DBLSCAN | \ - DRM_MODE_FLAG_CSYNC | \ - DRM_MODE_FLAG_PCSYNC | \ - DRM_MODE_FLAG_NCSYNC | \ - DRM_MODE_FLAG_HSKEW | \ - DRM_MODE_FLAG_DBLCLK | \ - DRM_MODE_FLAG_CLKDIV2 | \ - DRM_MODE_FLAG_3D_MASK) - -/* DPMS flags */ -/* bit compatible with the xorg definitions. */ -#define DRM_MODE_DPMS_ON 0 -#define DRM_MODE_DPMS_STANDBY 1 -#define DRM_MODE_DPMS_SUSPEND 2 -#define DRM_MODE_DPMS_OFF 3 - -/* Scaling mode options */ -#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or - software can still scale) */ -#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ -#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ -#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ - -/* Dithering mode options */ -#define DRM_MODE_DITHERING_OFF 0 -#define DRM_MODE_DITHERING_ON 1 -#define DRM_MODE_DITHERING_AUTO 2 - -/* Dirty info options */ -#define DRM_MODE_DIRTY_OFF 0 -#define DRM_MODE_DIRTY_ON 1 -#define DRM_MODE_DIRTY_ANNOTATE 2 - -/* Link Status options */ -#define DRM_MODE_LINK_STATUS_GOOD 0 -#define DRM_MODE_LINK_STATUS_BAD 1 - -/* - * DRM_MODE_ROTATE_ - * - * Signals that a drm plane is been rotated degrees in counter - * clockwise direction. - * - * This define is provided as a convenience, looking up the property id - * using the name->prop id lookup is the preferred method. - */ -#define DRM_MODE_ROTATE_0 (1<<0) -#define DRM_MODE_ROTATE_90 (1<<1) -#define DRM_MODE_ROTATE_180 (1<<2) -#define DRM_MODE_ROTATE_270 (1<<3) - -/* - * DRM_MODE_ROTATE_MASK - * - * Bitmask used to look for drm plane rotations. - */ -#define DRM_MODE_ROTATE_MASK (\ - DRM_MODE_ROTATE_0 | \ - DRM_MODE_ROTATE_90 | \ - DRM_MODE_ROTATE_180 | \ - DRM_MODE_ROTATE_270) - -/* - * DRM_MODE_REFLECT_ - * - * Signals that the contents of a drm plane is reflected along the axis, - * in the same way as mirroring. - * See kerneldoc chapter "Plane Composition Properties" for more details. - * - * This define is provided as a convenience, looking up the property id - * using the name->prop id lookup is the preferred method. - */ -#define DRM_MODE_REFLECT_X (1<<4) -#define DRM_MODE_REFLECT_Y (1<<5) - -/* - * DRM_MODE_REFLECT_MASK - * - * Bitmask used to look for drm plane reflections. - */ -#define DRM_MODE_REFLECT_MASK (\ - DRM_MODE_REFLECT_X | \ - DRM_MODE_REFLECT_Y) - -/* Content Protection Flags */ -#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0 -#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1 -#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2 - -/** - * struct drm_mode_modeinfo - Display mode information. - * @clock: pixel clock in kHz - * @hdisplay: horizontal display size - * @hsync_start: horizontal sync start - * @hsync_end: horizontal sync end - * @htotal: horizontal total size - * @hskew: horizontal skew - * @vdisplay: vertical display size - * @vsync_start: vertical sync start - * @vsync_end: vertical sync end - * @vtotal: vertical total size - * @vscan: vertical scan - * @vrefresh: approximate vertical refresh rate in Hz - * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines - * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines - * @name: string describing the mode resolution - * - * This is the user-space API display mode information structure. For the - * kernel version see struct drm_display_mode. - */ -struct drm_mode_modeinfo { - __u32 clock; - __u16 hdisplay; - __u16 hsync_start; - __u16 hsync_end; - __u16 htotal; - __u16 hskew; - __u16 vdisplay; - __u16 vsync_start; - __u16 vsync_end; - __u16 vtotal; - __u16 vscan; - - __u32 vrefresh; - - __u32 flags; - __u32 type; - char name[DRM_DISPLAY_MODE_LEN]; -}; - -struct drm_mode_card_res { - __u64 fb_id_ptr; - __u64 crtc_id_ptr; - __u64 connector_id_ptr; - __u64 encoder_id_ptr; - __u32 count_fbs; - __u32 count_crtcs; - __u32 count_connectors; - __u32 count_encoders; - __u32 min_width; - __u32 max_width; - __u32 min_height; - __u32 max_height; -}; - -struct drm_mode_crtc { - __u64 set_connectors_ptr; - __u32 count_connectors; - - __u32 crtc_id; /**< Id */ - __u32 fb_id; /**< Id of framebuffer */ - - __u32 x; /**< x Position on the framebuffer */ - __u32 y; /**< y Position on the framebuffer */ - - __u32 gamma_size; - __u32 mode_valid; - struct drm_mode_modeinfo mode; -}; - -#define DRM_MODE_PRESENT_TOP_FIELD (1<<0) -#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) - -/* Planes blend with or override other bits on the CRTC */ -struct drm_mode_set_plane { - __u32 plane_id; - __u32 crtc_id; - __u32 fb_id; /* fb object contains surface format type */ - __u32 flags; /* see above flags */ - - /* Signed dest location allows it to be partially off screen */ - __s32 crtc_x; - __s32 crtc_y; - __u32 crtc_w; - __u32 crtc_h; - - /* Source values are 16.16 fixed point */ - __u32 src_x; - __u32 src_y; - __u32 src_h; - __u32 src_w; -}; - -/** - * struct drm_mode_get_plane - Get plane metadata. - * - * Userspace can perform a GETPLANE ioctl to retrieve information about a - * plane. - * - * To retrieve the number of formats supported, set @count_format_types to zero - * and call the ioctl. @count_format_types will be updated with the value. - * - * To retrieve these formats, allocate an array with the memory needed to store - * @count_format_types formats. Point @format_type_ptr to this array and call - * the ioctl again (with @count_format_types still set to the value returned in - * the first ioctl call). - */ -struct drm_mode_get_plane { - /** - * @plane_id: Object ID of the plane whose information should be - * retrieved. Set by caller. - */ - __u32 plane_id; - - /** @crtc_id: Object ID of the current CRTC. */ - __u32 crtc_id; - /** @fb_id: Object ID of the current fb. */ - __u32 fb_id; - - /** - * @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's - * are created and they receive an index, which corresponds to their - * position in the bitmask. Bit N corresponds to - * :ref:`CRTC index` N. - */ - __u32 possible_crtcs; - /** @gamma_size: Never used. */ - __u32 gamma_size; - - /** @count_format_types: Number of formats. */ - __u32 count_format_types; - /** - * @format_type_ptr: Pointer to ``__u32`` array of formats that are - * supported by the plane. These formats do not require modifiers. - */ - __u64 format_type_ptr; -}; - -struct drm_mode_get_plane_res { - __u64 plane_id_ptr; - __u32 count_planes; -}; - -#define DRM_MODE_ENCODER_NONE 0 -#define DRM_MODE_ENCODER_DAC 1 -#define DRM_MODE_ENCODER_TMDS 2 -#define DRM_MODE_ENCODER_LVDS 3 -#define DRM_MODE_ENCODER_TVDAC 4 -#define DRM_MODE_ENCODER_VIRTUAL 5 -#define DRM_MODE_ENCODER_DSI 6 -#define DRM_MODE_ENCODER_DPMST 7 -#define DRM_MODE_ENCODER_DPI 8 - -struct drm_mode_get_encoder { - __u32 encoder_id; - __u32 encoder_type; - - __u32 crtc_id; /**< Id of crtc */ - - __u32 possible_crtcs; - __u32 possible_clones; -}; - -/* This is for connectors with multiple signal types. */ -/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ -enum drm_mode_subconnector { - DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */ - DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */ - DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */ - DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */ - DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */ - DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */ - DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */ - DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */ - DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */ - DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */ - DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */ - DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */ - DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */ -}; - -#define DRM_MODE_CONNECTOR_Unknown 0 -#define DRM_MODE_CONNECTOR_VGA 1 -#define DRM_MODE_CONNECTOR_DVII 2 -#define DRM_MODE_CONNECTOR_DVID 3 -#define DRM_MODE_CONNECTOR_DVIA 4 -#define DRM_MODE_CONNECTOR_Composite 5 -#define DRM_MODE_CONNECTOR_SVIDEO 6 -#define DRM_MODE_CONNECTOR_LVDS 7 -#define DRM_MODE_CONNECTOR_Component 8 -#define DRM_MODE_CONNECTOR_9PinDIN 9 -#define DRM_MODE_CONNECTOR_DisplayPort 10 -#define DRM_MODE_CONNECTOR_HDMIA 11 -#define DRM_MODE_CONNECTOR_HDMIB 12 -#define DRM_MODE_CONNECTOR_TV 13 -#define DRM_MODE_CONNECTOR_eDP 14 -#define DRM_MODE_CONNECTOR_VIRTUAL 15 -#define DRM_MODE_CONNECTOR_DSI 16 -#define DRM_MODE_CONNECTOR_DPI 17 -#define DRM_MODE_CONNECTOR_WRITEBACK 18 -#define DRM_MODE_CONNECTOR_SPI 19 -#define DRM_MODE_CONNECTOR_USB 20 - -/** - * struct drm_mode_get_connector - Get connector metadata. - * - * User-space can perform a GETCONNECTOR ioctl to retrieve information about a - * connector. User-space is expected to retrieve encoders, modes and properties - * by performing this ioctl at least twice: the first time to retrieve the - * number of elements, the second time to retrieve the elements themselves. - * - * To retrieve the number of elements, set @count_props and @count_encoders to - * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct - * drm_mode_modeinfo element. - * - * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr, - * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and - * @count_encoders to their capacity. - * - * Performing the ioctl only twice may be racy: the number of elements may have - * changed with a hotplug event in-between the two ioctls. User-space is - * expected to retry the last ioctl until the number of elements stabilizes. - * The kernel won't fill any array which doesn't have the expected length. - * - * **Force-probing a connector** - * - * If the @count_modes field is set to zero and the DRM client is the current - * DRM master, the kernel will perform a forced probe on the connector to - * refresh the connector status, modes and EDID. A forced-probe can be slow, - * might cause flickering and the ioctl will block. - * - * User-space needs to force-probe connectors to ensure their metadata is - * up-to-date at startup and after receiving a hot-plug event. User-space - * may perform a forced-probe when the user explicitly requests it. User-space - * shouldn't perform a forced-probe in other situations. - */ -struct drm_mode_get_connector { - /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */ - __u64 encoders_ptr; - /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */ - __u64 modes_ptr; - /** @props_ptr: Pointer to ``__u32`` array of property IDs. */ - __u64 props_ptr; - /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */ - __u64 prop_values_ptr; - - /** @count_modes: Number of modes. */ - __u32 count_modes; - /** @count_props: Number of properties. */ - __u32 count_props; - /** @count_encoders: Number of encoders. */ - __u32 count_encoders; - - /** @encoder_id: Object ID of the current encoder. */ - __u32 encoder_id; - /** @connector_id: Object ID of the connector. */ - __u32 connector_id; - /** - * @connector_type: Type of the connector. - * - * See DRM_MODE_CONNECTOR_* defines. - */ - __u32 connector_type; - /** - * @connector_type_id: Type-specific connector number. - * - * This is not an object ID. This is a per-type connector number. Each - * (type, type_id) combination is unique across all connectors of a DRM - * device. - * - * The (type, type_id) combination is not a stable identifier: the - * type_id can change depending on the driver probe order. - */ - __u32 connector_type_id; - - /** - * @connection: Status of the connector. - * - * See enum drm_connector_status. - */ - __u32 connection; - /** @mm_width: Width of the connected sink in millimeters. */ - __u32 mm_width; - /** @mm_height: Height of the connected sink in millimeters. */ - __u32 mm_height; - /** - * @subpixel: Subpixel order of the connected sink. - * - * See enum subpixel_order. - */ - __u32 subpixel; - - /** @pad: Padding, must be zero. */ - __u32 pad; -}; - -#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */ -#define DRM_MODE_PROP_RANGE (1<<1) -#define DRM_MODE_PROP_IMMUTABLE (1<<2) -#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ -#define DRM_MODE_PROP_BLOB (1<<4) -#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ - -/* non-extended types: legacy bitmask, one bit per type: */ -#define DRM_MODE_PROP_LEGACY_TYPE ( \ - DRM_MODE_PROP_RANGE | \ - DRM_MODE_PROP_ENUM | \ - DRM_MODE_PROP_BLOB | \ - DRM_MODE_PROP_BITMASK) - -/* extended-types: rather than continue to consume a bit per type, - * grab a chunk of the bits to use as integer type id. - */ -#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 -#define DRM_MODE_PROP_TYPE(n) ((n) << 6) -#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) -#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) - -/* the PROP_ATOMIC flag is used to hide properties from userspace that - * is not aware of atomic properties. This is mostly to work around - * older userspace (DDX drivers) that read/write each prop they find, - * without being aware that this could be triggering a lengthy modeset. - */ -#define DRM_MODE_PROP_ATOMIC 0x80000000 - -/** - * struct drm_mode_property_enum - Description for an enum/bitfield entry. - * @value: numeric value for this enum entry. - * @name: symbolic name for this enum entry. - * - * See struct drm_property_enum for details. - */ -struct drm_mode_property_enum { - __u64 value; - char name[DRM_PROP_NAME_LEN]; -}; - -/** - * struct drm_mode_get_property - Get property metadata. - * - * User-space can perform a GETPROPERTY ioctl to retrieve information about a - * property. The same property may be attached to multiple objects, see - * "Modeset Base Object Abstraction". - * - * The meaning of the @values_ptr field changes depending on the property type. - * See &drm_property.flags for more details. - * - * The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the - * property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For - * backwards compatibility, the kernel will always set @count_enum_blobs to - * zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must - * ignore these two fields if the property has a different type. - * - * User-space is expected to retrieve values and enums by performing this ioctl - * at least twice: the first time to retrieve the number of elements, the - * second time to retrieve the elements themselves. - * - * To retrieve the number of elements, set @count_values and @count_enum_blobs - * to zero, then call the ioctl. @count_values will be updated with the number - * of elements. If the property has the type &DRM_MODE_PROP_ENUM or - * &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well. - * - * To retrieve the elements themselves, allocate an array for @values_ptr and - * set @count_values to its capacity. If the property has the type - * &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for - * @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl - * again will fill the arrays. - */ -struct drm_mode_get_property { - /** @values_ptr: Pointer to a ``__u64`` array. */ - __u64 values_ptr; - /** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */ - __u64 enum_blob_ptr; - - /** - * @prop_id: Object ID of the property which should be retrieved. Set - * by the caller. - */ - __u32 prop_id; - /** - * @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for - * a definition of the flags. - */ - __u32 flags; - /** - * @name: Symbolic property name. User-space should use this field to - * recognize properties. - */ - char name[DRM_PROP_NAME_LEN]; - - /** @count_values: Number of elements in @values_ptr. */ - __u32 count_values; - /** @count_enum_blobs: Number of elements in @enum_blob_ptr. */ - __u32 count_enum_blobs; -}; - -struct drm_mode_connector_set_property { - __u64 value; - __u32 prop_id; - __u32 connector_id; -}; - -#define DRM_MODE_OBJECT_CRTC 0xcccccccc -#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 -#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 -#define DRM_MODE_OBJECT_MODE 0xdededede -#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 -#define DRM_MODE_OBJECT_FB 0xfbfbfbfb -#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb -#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee -#define DRM_MODE_OBJECT_ANY 0 - -struct drm_mode_obj_get_properties { - __u64 props_ptr; - __u64 prop_values_ptr; - __u32 count_props; - __u32 obj_id; - __u32 obj_type; -}; - -struct drm_mode_obj_set_property { - __u64 value; - __u32 prop_id; - __u32 obj_id; - __u32 obj_type; -}; - -struct drm_mode_get_blob { - __u32 blob_id; - __u32 length; - __u64 data; -}; - -struct drm_mode_fb_cmd { - __u32 fb_id; - __u32 width; - __u32 height; - __u32 pitch; - __u32 bpp; - __u32 depth; - /* driver specific handle */ - __u32 handle; -}; - -#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ -#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */ - -/** - * struct drm_mode_fb_cmd2 - Frame-buffer metadata. - * - * This struct holds frame-buffer metadata. There are two ways to use it: - * - * - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2 - * ioctl to register a new frame-buffer. The new frame-buffer object ID will - * be set by the kernel in @fb_id. - * - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to - * fetch metadata about an existing frame-buffer. - * - * In case of planar formats, this struct allows up to 4 buffer objects with - * offsets and pitches per plane. The pitch and offset order are dictated by - * the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as: - * - * YUV 4:2:0 image with a plane of 8-bit Y samples followed by an - * interleaved U/V plane containing 8-bit 2x2 subsampled colour difference - * samples. - * - * So it would consist of a Y plane at ``offsets[0]`` and a UV plane at - * ``offsets[1]``. - * - * To accommodate tiled, compressed, etc formats, a modifier can be specified. - * For more information see the "Format Modifiers" section. Note that even - * though it looks like we have a modifier per-plane, we in fact do not. The - * modifier for each plane must be identical. Thus all combinations of - * different data layouts for multi-plane formats must be enumerated as - * separate modifiers. - * - * All of the entries in @handles, @pitches, @offsets and @modifier must be - * zero when unused. Warning, for @offsets and @modifier zero can't be used to - * figure out whether the entry is used or not since it's a valid value (a zero - * offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR). - */ -struct drm_mode_fb_cmd2 { - /** @fb_id: Object ID of the frame-buffer. */ - __u32 fb_id; - /** @width: Width of the frame-buffer. */ - __u32 width; - /** @height: Height of the frame-buffer. */ - __u32 height; - /** - * @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in - * ``drm_fourcc.h``. - */ - __u32 pixel_format; - /** - * @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and - * &DRM_MODE_FB_MODIFIERS). - */ - __u32 flags; - - /** - * @handles: GEM buffer handle, one per plane. Set to 0 if the plane is - * unused. The same handle can be used for multiple planes. - */ - __u32 handles[4]; - /** @pitches: Pitch (aka. stride) in bytes, one per plane. */ - __u32 pitches[4]; - /** @offsets: Offset into the buffer in bytes, one per plane. */ - __u32 offsets[4]; - /** - * @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*`` - * constants in ``drm_fourcc.h``. All planes must use the same - * modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags. - */ - __u64 modifier[4]; -}; - -#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 -#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 -#define DRM_MODE_FB_DIRTY_FLAGS 0x03 - -#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 - -/* - * Mark a region of a framebuffer as dirty. - * - * Some hardware does not automatically update display contents - * as a hardware or software draw to a framebuffer. This ioctl - * allows userspace to tell the kernel and the hardware what - * regions of the framebuffer have changed. - * - * The kernel or hardware is free to update more then just the - * region specified by the clip rects. The kernel or hardware - * may also delay and/or coalesce several calls to dirty into a - * single update. - * - * Userspace may annotate the updates, the annotates are a - * promise made by the caller that the change is either a copy - * of pixels or a fill of a single color in the region specified. - * - * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then - * the number of updated regions are half of num_clips given, - * where the clip rects are paired in src and dst. The width and - * height of each one of the pairs must match. - * - * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller - * promises that the region specified of the clip rects is filled - * completely with a single color as given in the color argument. - */ - -struct drm_mode_fb_dirty_cmd { - __u32 fb_id; - __u32 flags; - __u32 color; - __u32 num_clips; - __u64 clips_ptr; -}; - -struct drm_mode_mode_cmd { - __u32 connector_id; - struct drm_mode_modeinfo mode; -}; - -#define DRM_MODE_CURSOR_BO 0x01 -#define DRM_MODE_CURSOR_MOVE 0x02 -#define DRM_MODE_CURSOR_FLAGS 0x03 - -/* - * depending on the value in flags different members are used. - * - * CURSOR_BO uses - * crtc_id - * width - * height - * handle - if 0 turns the cursor off - * - * CURSOR_MOVE uses - * crtc_id - * x - * y - */ -struct drm_mode_cursor { - __u32 flags; - __u32 crtc_id; - __s32 x; - __s32 y; - __u32 width; - __u32 height; - /* driver specific handle */ - __u32 handle; -}; - -struct drm_mode_cursor2 { - __u32 flags; - __u32 crtc_id; - __s32 x; - __s32 y; - __u32 width; - __u32 height; - /* driver specific handle */ - __u32 handle; - __s32 hot_x; - __s32 hot_y; -}; - -struct drm_mode_crtc_lut { - __u32 crtc_id; - __u32 gamma_size; - - /* pointers to arrays */ - __u64 red; - __u64 green; - __u64 blue; -}; - -struct drm_color_ctm { - /* - * Conversion matrix in S31.32 sign-magnitude - * (not two's complement!) format. - * - * out matrix in - * |R| |0 1 2| |R| - * |G| = |3 4 5| x |G| - * |B| |6 7 8| |B| - */ - __u64 matrix[9]; -}; - -struct drm_color_lut { - /* - * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and - * 0xffff == 1.0. - */ - __u16 red; - __u16 green; - __u16 blue; - __u16 reserved; -}; - -/** - * struct drm_plane_size_hint - Plane size hints - * @width: The width of the plane in pixel - * @height: The height of the plane in pixel - * - * The plane SIZE_HINTS property blob contains an - * array of struct drm_plane_size_hint. - */ -struct drm_plane_size_hint { - __u16 width; - __u16 height; -}; - -/** - * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data. - * - * HDR Metadata Infoframe as per CTA 861.G spec. This is expected - * to match exactly with the spec. - * - * Userspace is expected to pass the metadata information as per - * the format described in this structure. - */ -struct hdr_metadata_infoframe { - /** - * @eotf: Electro-Optical Transfer Function (EOTF) - * used in the stream. - */ - __u8 eotf; - /** - * @metadata_type: Static_Metadata_Descriptor_ID. - */ - __u8 metadata_type; - /** - * @display_primaries: Color Primaries of the Data. - * These are coded as unsigned 16-bit values in units of - * 0.00002, where 0x0000 represents zero and 0xC350 - * represents 1.0000. - * @display_primaries.x: X coordinate of color primary. - * @display_primaries.y: Y coordinate of color primary. - */ - struct { - __u16 x, y; - } display_primaries[3]; - /** - * @white_point: White Point of Colorspace Data. - * These are coded as unsigned 16-bit values in units of - * 0.00002, where 0x0000 represents zero and 0xC350 - * represents 1.0000. - * @white_point.x: X coordinate of whitepoint of color primary. - * @white_point.y: Y coordinate of whitepoint of color primary. - */ - struct { - __u16 x, y; - } white_point; - /** - * @max_display_mastering_luminance: Max Mastering Display Luminance. - * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, - * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. - */ - __u16 max_display_mastering_luminance; - /** - * @min_display_mastering_luminance: Min Mastering Display Luminance. - * This value is coded as an unsigned 16-bit value in units of - * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF - * represents 6.5535 cd/m2. - */ - __u16 min_display_mastering_luminance; - /** - * @max_cll: Max Content Light Level. - * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, - * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. - */ - __u16 max_cll; - /** - * @max_fall: Max Frame Average Light Level. - * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, - * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. - */ - __u16 max_fall; -}; - -/** - * struct hdr_output_metadata - HDR output metadata - * - * Metadata Information to be passed from userspace - */ -struct hdr_output_metadata { - /** - * @metadata_type: Static_Metadata_Descriptor_ID. - */ - __u32 metadata_type; - /** - * @hdmi_metadata_type1: HDR Metadata Infoframe. - */ - union { - struct hdr_metadata_infoframe hdmi_metadata_type1; - }; -}; - -/** - * DRM_MODE_PAGE_FLIP_EVENT - * - * Request that the kernel sends back a vblank event (see - * struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the - * page-flip is done. - */ -#define DRM_MODE_PAGE_FLIP_EVENT 0x01 -/** - * DRM_MODE_PAGE_FLIP_ASYNC - * - * Request that the page-flip is performed as soon as possible, ie. with no - * delay due to waiting for vblank. This may cause tearing to be visible on - * the screen. - * - * When used with atomic uAPI, the driver will return an error if the hardware - * doesn't support performing an asynchronous page-flip for this update. - * User-space should handle this, e.g. by falling back to a regular page-flip. - * - * Note, some hardware might need to perform one last synchronous page-flip - * before being able to switch to asynchronous page-flips. As an exception, - * the driver will return success even though that first page-flip is not - * asynchronous. - */ -#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 -#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4 -#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8 -#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \ - DRM_MODE_PAGE_FLIP_TARGET_RELATIVE) -/** - * DRM_MODE_PAGE_FLIP_FLAGS - * - * Bitmask of flags suitable for &drm_mode_crtc_page_flip_target.flags. - */ -#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \ - DRM_MODE_PAGE_FLIP_ASYNC | \ - DRM_MODE_PAGE_FLIP_TARGET) - -/* - * Request a page flip on the specified crtc. - * - * This ioctl will ask KMS to schedule a page flip for the specified - * crtc. Once any pending rendering targeting the specified fb (as of - * ioctl time) has completed, the crtc will be reprogrammed to display - * that fb after the next vertical refresh. The ioctl returns - * immediately, but subsequent rendering to the current fb will block - * in the execbuffer ioctl until the page flip happens. If a page - * flip is already pending as the ioctl is called, EBUSY will be - * returned. - * - * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank - * event (see drm.h: struct drm_event_vblank) when the page flip is - * done. The user_data field passed in with this ioctl will be - * returned as the user_data field in the vblank event struct. - * - * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen - * 'as soon as possible', meaning that it not delay waiting for vblank. - * This may cause tearing on the screen. - * - * The reserved field must be zero. - */ - -struct drm_mode_crtc_page_flip { - __u32 crtc_id; - __u32 fb_id; - __u32 flags; - __u32 reserved; - __u64 user_data; -}; - -/* - * Request a page flip on the specified crtc. - * - * Same as struct drm_mode_crtc_page_flip, but supports new flags and - * re-purposes the reserved field: - * - * The sequence field must be zero unless either of the - * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When - * the ABSOLUTE flag is specified, the sequence field denotes the absolute - * vblank sequence when the flip should take effect. When the RELATIVE - * flag is specified, the sequence field denotes the relative (to the - * current one when the ioctl is called) vblank sequence when the flip - * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to - * make sure the vblank sequence before the target one has passed before - * calling this ioctl. The purpose of the - * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify - * the target for when code dealing with a page flip runs during a - * vertical blank period. - */ - -struct drm_mode_crtc_page_flip_target { - __u32 crtc_id; - __u32 fb_id; - __u32 flags; - __u32 sequence; - __u64 user_data; -}; - -/** - * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout. - * @height: buffer height in pixels - * @width: buffer width in pixels - * @bpp: bits per pixel - * @flags: must be zero - * @handle: buffer object handle - * @pitch: number of bytes between two consecutive lines - * @size: size of the whole buffer in bytes - * - * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds, - * the kernel fills @handle, @pitch and @size. - */ -struct drm_mode_create_dumb { - __u32 height; - __u32 width; - __u32 bpp; - __u32 flags; - - __u32 handle; - __u32 pitch; - __u64 size; -}; - -/* set up for mmap of a dumb scanout buffer */ -struct drm_mode_map_dumb { - /** Handle for the object being mapped. */ - __u32 handle; - __u32 pad; - /** - * Fake offset to use for subsequent mmap call - * - * This is a fixed-size type for 32/64 compatibility. - */ - __u64 offset; -}; - -struct drm_mode_destroy_dumb { - __u32 handle; -}; - -/** - * DRM_MODE_ATOMIC_TEST_ONLY - * - * Do not apply the atomic commit, instead check whether the hardware supports - * this configuration. - * - * See &drm_mode_config_funcs.atomic_check for more details on test-only - * commits. - */ -#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100 -/** - * DRM_MODE_ATOMIC_NONBLOCK - * - * Do not block while applying the atomic commit. The &DRM_IOCTL_MODE_ATOMIC - * IOCTL returns immediately instead of waiting for the changes to be applied - * in hardware. Note, the driver will still check that the update can be - * applied before retuning. - */ -#define DRM_MODE_ATOMIC_NONBLOCK 0x0200 -/** - * DRM_MODE_ATOMIC_ALLOW_MODESET - * - * Allow the update to result in temporary or transient visible artifacts while - * the update is being applied. Applying the update may also take significantly - * more time than a page flip. All visual artifacts will disappear by the time - * the update is completed, as signalled through the vblank event's timestamp - * (see struct drm_event_vblank). - * - * This flag must be set when the KMS update might cause visible artifacts. - * Without this flag such KMS update will return a EINVAL error. What kind of - * update may cause visible artifacts depends on the driver and the hardware. - * User-space that needs to know beforehand if an update might cause visible - * artifacts can use &DRM_MODE_ATOMIC_TEST_ONLY without - * &DRM_MODE_ATOMIC_ALLOW_MODESET to see if it fails. - * - * To the best of the driver's knowledge, visual artifacts are guaranteed to - * not appear when this flag is not set. Some sinks might display visual - * artifacts outside of the driver's control. - */ -#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400 - -/** - * DRM_MODE_ATOMIC_FLAGS - * - * Bitfield of flags accepted by the &DRM_IOCTL_MODE_ATOMIC IOCTL in - * &drm_mode_atomic.flags. - */ -#define DRM_MODE_ATOMIC_FLAGS (\ - DRM_MODE_PAGE_FLIP_EVENT |\ - DRM_MODE_PAGE_FLIP_ASYNC |\ - DRM_MODE_ATOMIC_TEST_ONLY |\ - DRM_MODE_ATOMIC_NONBLOCK |\ - DRM_MODE_ATOMIC_ALLOW_MODESET) - -struct drm_mode_atomic { - __u32 flags; - __u32 count_objs; - __u64 objs_ptr; - __u64 count_props_ptr; - __u64 props_ptr; - __u64 prop_values_ptr; - __u64 reserved; - __u64 user_data; -}; - -struct drm_format_modifier_blob { -#define FORMAT_BLOB_CURRENT 1 - /* Version of this blob format */ - __u32 version; - - /* Flags */ - __u32 flags; - - /* Number of fourcc formats supported */ - __u32 count_formats; - - /* Where in this blob the formats exist (in bytes) */ - __u32 formats_offset; - - /* Number of drm_format_modifiers */ - __u32 count_modifiers; - - /* Where in this blob the modifiers exist (in bytes) */ - __u32 modifiers_offset; - - /* __u32 formats[] */ - /* struct drm_format_modifier modifiers[] */ -}; - -struct drm_format_modifier { - /* Bitmask of formats in get_plane format list this info applies to. The - * offset allows a sliding window of which 64 formats (bits). - * - * Some examples: - * In today's world with < 65 formats, and formats 0, and 2 are - * supported - * 0x0000000000000005 - * ^-offset = 0, formats = 5 - * - * If the number formats grew to 128, and formats 98-102 are - * supported with the modifier: - * - * 0x0000007c00000000 0000000000000000 - * ^ - * |__offset = 64, formats = 0x7c00000000 - * - */ - __u64 formats; - __u32 offset; - __u32 pad; - - /* The modifier that applies to the >get_plane format list bitmask. */ - __u64 modifier; -}; - -/** - * struct drm_mode_create_blob - Create New blob property - * - * Create a new 'blob' data property, copying length bytes from data pointer, - * and returning new blob ID. - */ -struct drm_mode_create_blob { - /** @data: Pointer to data to copy. */ - __u64 data; - /** @length: Length of data to copy. */ - __u32 length; - /** @blob_id: Return: new property ID. */ - __u32 blob_id; -}; - -/** - * struct drm_mode_destroy_blob - Destroy user blob - * @blob_id: blob_id to destroy - * - * Destroy a user-created blob property. - * - * User-space can release blobs as soon as they do not need to refer to them by - * their blob object ID. For instance, if you are using a MODE_ID blob in an - * atomic commit and you will not make another commit re-using the same ID, you - * can destroy the blob as soon as the commit has been issued, without waiting - * for it to complete. - */ -struct drm_mode_destroy_blob { - __u32 blob_id; -}; - -/** - * struct drm_mode_create_lease - Create lease - * - * Lease mode resources, creating another drm_master. - * - * The @object_ids array must reference at least one CRTC, one connector and - * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively, - * the lease can be completely empty. - */ -struct drm_mode_create_lease { - /** @object_ids: Pointer to array of object ids (__u32) */ - __u64 object_ids; - /** @object_count: Number of object ids */ - __u32 object_count; - /** @flags: flags for new FD (O_CLOEXEC, etc) */ - __u32 flags; - - /** @lessee_id: Return: unique identifier for lessee. */ - __u32 lessee_id; - /** @fd: Return: file descriptor to new drm_master file */ - __u32 fd; -}; - -/** - * struct drm_mode_list_lessees - List lessees - * - * List lesses from a drm_master. - */ -struct drm_mode_list_lessees { - /** - * @count_lessees: Number of lessees. - * - * On input, provides length of the array. - * On output, provides total number. No - * more than the input number will be written - * back, so two calls can be used to get - * the size and then the data. - */ - __u32 count_lessees; - /** @pad: Padding. */ - __u32 pad; - - /** - * @lessees_ptr: Pointer to lessees. - * - * Pointer to __u64 array of lessee ids - */ - __u64 lessees_ptr; -}; - -/** - * struct drm_mode_get_lease - Get Lease - * - * Get leased objects. - */ -struct drm_mode_get_lease { - /** - * @count_objects: Number of leased objects. - * - * On input, provides length of the array. - * On output, provides total number. No - * more than the input number will be written - * back, so two calls can be used to get - * the size and then the data. - */ - __u32 count_objects; - /** @pad: Padding. */ - __u32 pad; - - /** - * @objects_ptr: Pointer to objects. - * - * Pointer to __u32 array of object ids. - */ - __u64 objects_ptr; -}; - -/** - * struct drm_mode_revoke_lease - Revoke lease - */ -struct drm_mode_revoke_lease { - /** @lessee_id: Unique ID of lessee */ - __u32 lessee_id; -}; - -/** - * struct drm_mode_rect - Two dimensional rectangle. - * @x1: Horizontal starting coordinate (inclusive). - * @y1: Vertical starting coordinate (inclusive). - * @x2: Horizontal ending coordinate (exclusive). - * @y2: Vertical ending coordinate (exclusive). - * - * With drm subsystem using struct drm_rect to manage rectangular area this - * export it to user-space. - * - * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS. - */ -struct drm_mode_rect { - __s32 x1; - __s32 y1; - __s32 x2; - __s32 y2; -}; - -/** - * struct drm_mode_closefb - * @fb_id: Framebuffer ID. - * @pad: Must be zero. - */ -struct drm_mode_closefb { - __u32 fb_id; - __u32 pad; -}; - -#if defined(__cplusplus) -} -#endif - -#endif + + #define DRM_CONNECTOR_NAME_LEN 32 + #define DRM_DISPLAY_MODE_LEN 32 + #define DRM_PROP_NAME_LEN 32 + + #define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */ + #define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */ + #define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */ + #define DRM_MODE_TYPE_PREFERRED (1<<3) + #define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */ + #define DRM_MODE_TYPE_USERDEF (1<<5) + #define DRM_MODE_TYPE_DRIVER (1<<6) + + #define DRM_MODE_TYPE_ALL (DRM_MODE_TYPE_PREFERRED | \ + DRM_MODE_TYPE_USERDEF | \ + DRM_MODE_TYPE_DRIVER) + + /* Video mode flags */ + /* bit compatible with the xrandr RR_ definitions (bits 0-13) + * + * ABI warning: Existing userspace really expects + * the mode flags to match the xrandr definitions. Any + * changes that don't match the xrandr definitions will + * likely need a new client cap or some other mechanism + * to avoid breaking existing userspace. This includes + * allocating new flags in the previously unused bits! + */ + #define DRM_MODE_FLAG_PHSYNC (1<<0) + #define DRM_MODE_FLAG_NHSYNC (1<<1) + #define DRM_MODE_FLAG_PVSYNC (1<<2) + #define DRM_MODE_FLAG_NVSYNC (1<<3) + #define DRM_MODE_FLAG_INTERLACE (1<<4) + #define DRM_MODE_FLAG_DBLSCAN (1<<5) + #define DRM_MODE_FLAG_CSYNC (1<<6) + #define DRM_MODE_FLAG_PCSYNC (1<<7) + #define DRM_MODE_FLAG_NCSYNC (1<<8) + #define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ + #define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */ + #define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */ + #define DRM_MODE_FLAG_DBLCLK (1<<12) + #define DRM_MODE_FLAG_CLKDIV2 (1<<13) + /* + * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX + * (define not exposed to user space). + */ + #define DRM_MODE_FLAG_3D_MASK (0x1f<<14) + #define DRM_MODE_FLAG_3D_NONE (0<<14) + #define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) + #define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) + #define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) + #define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14) + #define DRM_MODE_FLAG_3D_L_DEPTH (5<<14) + #define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14) + #define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) + #define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) + + /* Picture aspect ratio options */ + #define DRM_MODE_PICTURE_ASPECT_NONE 0 + #define DRM_MODE_PICTURE_ASPECT_4_3 1 + #define DRM_MODE_PICTURE_ASPECT_16_9 2 + #define DRM_MODE_PICTURE_ASPECT_64_27 3 + #define DRM_MODE_PICTURE_ASPECT_256_135 4 + + /* Content type options */ + #define DRM_MODE_CONTENT_TYPE_NO_DATA 0 + #define DRM_MODE_CONTENT_TYPE_GRAPHICS 1 + #define DRM_MODE_CONTENT_TYPE_PHOTO 2 + #define DRM_MODE_CONTENT_TYPE_CINEMA 3 + #define DRM_MODE_CONTENT_TYPE_GAME 4 + + /* Aspect ratio flag bitmask (4 bits 22:19) */ + #define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19) + #define DRM_MODE_FLAG_PIC_AR_NONE \ + (DRM_MODE_PICTURE_ASPECT_NONE<<19) + #define DRM_MODE_FLAG_PIC_AR_4_3 \ + (DRM_MODE_PICTURE_ASPECT_4_3<<19) + #define DRM_MODE_FLAG_PIC_AR_16_9 \ + (DRM_MODE_PICTURE_ASPECT_16_9<<19) + #define DRM_MODE_FLAG_PIC_AR_64_27 \ + (DRM_MODE_PICTURE_ASPECT_64_27<<19) + #define DRM_MODE_FLAG_PIC_AR_256_135 \ + (DRM_MODE_PICTURE_ASPECT_256_135<<19) + + #define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \ + DRM_MODE_FLAG_NHSYNC | \ + DRM_MODE_FLAG_PVSYNC | \ + DRM_MODE_FLAG_NVSYNC | \ + DRM_MODE_FLAG_INTERLACE | \ + DRM_MODE_FLAG_DBLSCAN | \ + DRM_MODE_FLAG_CSYNC | \ + DRM_MODE_FLAG_PCSYNC | \ + DRM_MODE_FLAG_NCSYNC | \ + DRM_MODE_FLAG_HSKEW | \ + DRM_MODE_FLAG_DBLCLK | \ + DRM_MODE_FLAG_CLKDIV2 | \ + DRM_MODE_FLAG_3D_MASK) + + /* DPMS flags */ + /* bit compatible with the xorg definitions. */ + #define DRM_MODE_DPMS_ON 0 + #define DRM_MODE_DPMS_STANDBY 1 + #define DRM_MODE_DPMS_SUSPEND 2 + #define DRM_MODE_DPMS_OFF 3 + + /* Scaling mode options */ + #define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or + software can still scale) */ + #define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ + #define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ + #define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ + + /* Dithering mode options */ + #define DRM_MODE_DITHERING_OFF 0 + #define DRM_MODE_DITHERING_ON 1 + #define DRM_MODE_DITHERING_AUTO 2 + + /* Dirty info options */ + #define DRM_MODE_DIRTY_OFF 0 + #define DRM_MODE_DIRTY_ON 1 + #define DRM_MODE_DIRTY_ANNOTATE 2 + + /* Link Status options */ + #define DRM_MODE_LINK_STATUS_GOOD 0 + #define DRM_MODE_LINK_STATUS_BAD 1 + + /* + * DRM_MODE_ROTATE_ + * + * Signals that a drm plane is been rotated degrees in counter + * clockwise direction. + * + * This define is provided as a convenience, looking up the property id + * using the name->prop id lookup is the preferred method. + */ + #define DRM_MODE_ROTATE_0 (1<<0) + #define DRM_MODE_ROTATE_90 (1<<1) + #define DRM_MODE_ROTATE_180 (1<<2) + #define DRM_MODE_ROTATE_270 (1<<3) + + /* + * DRM_MODE_ROTATE_MASK + * + * Bitmask used to look for drm plane rotations. + */ + #define DRM_MODE_ROTATE_MASK (\ + DRM_MODE_ROTATE_0 | \ + DRM_MODE_ROTATE_90 | \ + DRM_MODE_ROTATE_180 | \ + DRM_MODE_ROTATE_270) + + /* + * DRM_MODE_REFLECT_ + * + * Signals that the contents of a drm plane is reflected along the axis, + * in the same way as mirroring. + * See kerneldoc chapter "Plane Composition Properties" for more details. + * + * This define is provided as a convenience, looking up the property id + * using the name->prop id lookup is the preferred method. + */ + #define DRM_MODE_REFLECT_X (1<<4) + #define DRM_MODE_REFLECT_Y (1<<5) + + /* + * DRM_MODE_REFLECT_MASK + * + * Bitmask used to look for drm plane reflections. + */ + #define DRM_MODE_REFLECT_MASK (\ + DRM_MODE_REFLECT_X | \ + DRM_MODE_REFLECT_Y) + + /* Content Protection Flags */ + #define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0 + #define DRM_MODE_CONTENT_PROTECTION_DESIRED 1 + #define DRM_MODE_CONTENT_PROTECTION_ENABLED 2 + + /** + * struct drm_mode_modeinfo - Display mode information. + * @clock: pixel clock in kHz + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan + * @vrefresh: approximate vertical refresh rate in Hz + * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines + * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines + * @name: string describing the mode resolution + * + * This is the user-space API display mode information structure. For the + * kernel version see struct drm_display_mode. + */ + struct drm_mode_modeinfo { + __u32 clock; + __u16 hdisplay; + __u16 hsync_start; + __u16 hsync_end; + __u16 htotal; + __u16 hskew; + __u16 vdisplay; + __u16 vsync_start; + __u16 vsync_end; + __u16 vtotal; + __u16 vscan; + + __u32 vrefresh; + + __u32 flags; + __u32 type; + char name[DRM_DISPLAY_MODE_LEN]; + }; + + struct drm_mode_card_res { + __u64 fb_id_ptr; + __u64 crtc_id_ptr; + __u64 connector_id_ptr; + __u64 encoder_id_ptr; + __u32 count_fbs; + __u32 count_crtcs; + __u32 count_connectors; + __u32 count_encoders; + __u32 min_width; + __u32 max_width; + __u32 min_height; + __u32 max_height; + }; + + struct drm_mode_crtc { + __u64 set_connectors_ptr; + __u32 count_connectors; + + __u32 crtc_id; /**< Id */ + __u32 fb_id; /**< Id of framebuffer */ + + __u32 x; /**< x Position on the framebuffer */ + __u32 y; /**< y Position on the framebuffer */ + + __u32 gamma_size; + __u32 mode_valid; + struct drm_mode_modeinfo mode; + }; + + #define DRM_MODE_PRESENT_TOP_FIELD (1<<0) + #define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) + + /* Planes blend with or override other bits on the CRTC */ + struct drm_mode_set_plane { + __u32 plane_id; + __u32 crtc_id; + __u32 fb_id; /* fb object contains surface format type */ + __u32 flags; /* see above flags */ + + /* Signed dest location allows it to be partially off screen */ + __s32 crtc_x; + __s32 crtc_y; + __u32 crtc_w; + __u32 crtc_h; + + /* Source values are 16.16 fixed point */ + __u32 src_x; + __u32 src_y; + __u32 src_h; + __u32 src_w; + }; + + /** + * struct drm_mode_get_plane - Get plane metadata. + * + * Userspace can perform a GETPLANE ioctl to retrieve information about a + * plane. + * + * To retrieve the number of formats supported, set @count_format_types to zero + * and call the ioctl. @count_format_types will be updated with the value. + * + * To retrieve these formats, allocate an array with the memory needed to store + * @count_format_types formats. Point @format_type_ptr to this array and call + * the ioctl again (with @count_format_types still set to the value returned in + * the first ioctl call). + */ + struct drm_mode_get_plane { + /** + * @plane_id: Object ID of the plane whose information should be + * retrieved. Set by caller. + */ + __u32 plane_id; + + /** @crtc_id: Object ID of the current CRTC. */ + __u32 crtc_id; + /** @fb_id: Object ID of the current fb. */ + __u32 fb_id; + + /** + * @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's + * are created and they receive an index, which corresponds to their + * position in the bitmask. Bit N corresponds to + * :ref:`CRTC index` N. + */ + __u32 possible_crtcs; + /** @gamma_size: Never used. */ + __u32 gamma_size; + + /** @count_format_types: Number of formats. */ + __u32 count_format_types; + /** + * @format_type_ptr: Pointer to ``__u32`` array of formats that are + * supported by the plane. These formats do not require modifiers. + */ + __u64 format_type_ptr; + }; + + struct drm_mode_get_plane_res { + __u64 plane_id_ptr; + __u32 count_planes; + }; + + #define DRM_MODE_ENCODER_NONE 0 + #define DRM_MODE_ENCODER_DAC 1 + #define DRM_MODE_ENCODER_TMDS 2 + #define DRM_MODE_ENCODER_LVDS 3 + #define DRM_MODE_ENCODER_TVDAC 4 + #define DRM_MODE_ENCODER_VIRTUAL 5 + #define DRM_MODE_ENCODER_DSI 6 + #define DRM_MODE_ENCODER_DPMST 7 + #define DRM_MODE_ENCODER_DPI 8 + + struct drm_mode_get_encoder { + __u32 encoder_id; + __u32 encoder_type; + + __u32 crtc_id; /**< Id of crtc */ + + __u32 possible_crtcs; + __u32 possible_clones; + }; + + /* This is for connectors with multiple signal types. */ + /* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ + enum drm_mode_subconnector { + DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */ + DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */ + DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */ + DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */ + DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */ + DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */ + DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */ + DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */ + DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */ + DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */ + DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */ + DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */ + DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */ + }; + + #define DRM_MODE_CONNECTOR_Unknown 0 + #define DRM_MODE_CONNECTOR_VGA 1 + #define DRM_MODE_CONNECTOR_DVII 2 + #define DRM_MODE_CONNECTOR_DVID 3 + #define DRM_MODE_CONNECTOR_DVIA 4 + #define DRM_MODE_CONNECTOR_Composite 5 + #define DRM_MODE_CONNECTOR_SVIDEO 6 + #define DRM_MODE_CONNECTOR_LVDS 7 + #define DRM_MODE_CONNECTOR_Component 8 + #define DRM_MODE_CONNECTOR_9PinDIN 9 + #define DRM_MODE_CONNECTOR_DisplayPort 10 + #define DRM_MODE_CONNECTOR_HDMIA 11 + #define DRM_MODE_CONNECTOR_HDMIB 12 + #define DRM_MODE_CONNECTOR_TV 13 + #define DRM_MODE_CONNECTOR_eDP 14 + #define DRM_MODE_CONNECTOR_VIRTUAL 15 + #define DRM_MODE_CONNECTOR_DSI 16 + #define DRM_MODE_CONNECTOR_DPI 17 + #define DRM_MODE_CONNECTOR_WRITEBACK 18 + #define DRM_MODE_CONNECTOR_SPI 19 + #define DRM_MODE_CONNECTOR_USB 20 + + /** + * struct drm_mode_get_connector - Get connector metadata. + * + * User-space can perform a GETCONNECTOR ioctl to retrieve information about a + * connector. User-space is expected to retrieve encoders, modes and properties + * by performing this ioctl at least twice: the first time to retrieve the + * number of elements, the second time to retrieve the elements themselves. + * + * To retrieve the number of elements, set @count_props and @count_encoders to + * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct + * drm_mode_modeinfo element. + * + * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr, + * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and + * @count_encoders to their capacity. + * + * Performing the ioctl only twice may be racy: the number of elements may have + * changed with a hotplug event in-between the two ioctls. User-space is + * expected to retry the last ioctl until the number of elements stabilizes. + * The kernel won't fill any array which doesn't have the expected length. + * + * **Force-probing a connector** + * + * If the @count_modes field is set to zero and the DRM client is the current + * DRM master, the kernel will perform a forced probe on the connector to + * refresh the connector status, modes and EDID. A forced-probe can be slow, + * might cause flickering and the ioctl will block. + * + * User-space needs to force-probe connectors to ensure their metadata is + * up-to-date at startup and after receiving a hot-plug event. User-space + * may perform a forced-probe when the user explicitly requests it. User-space + * shouldn't perform a forced-probe in other situations. + */ + struct drm_mode_get_connector { + /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */ + __u64 encoders_ptr; + /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */ + __u64 modes_ptr; + /** @props_ptr: Pointer to ``__u32`` array of property IDs. */ + __u64 props_ptr; + /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */ + __u64 prop_values_ptr; + + /** @count_modes: Number of modes. */ + __u32 count_modes; + /** @count_props: Number of properties. */ + __u32 count_props; + /** @count_encoders: Number of encoders. */ + __u32 count_encoders; + + /** @encoder_id: Object ID of the current encoder. */ + __u32 encoder_id; + /** @connector_id: Object ID of the connector. */ + __u32 connector_id; + /** + * @connector_type: Type of the connector. + * + * See DRM_MODE_CONNECTOR_* defines. + */ + __u32 connector_type; + /** + * @connector_type_id: Type-specific connector number. + * + * This is not an object ID. This is a per-type connector number. Each + * (type, type_id) combination is unique across all connectors of a DRM + * device. + * + * The (type, type_id) combination is not a stable identifier: the + * type_id can change depending on the driver probe order. + */ + __u32 connector_type_id; + + /** + * @connection: Status of the connector. + * + * See enum drm_connector_status. + */ + __u32 connection; + /** @mm_width: Width of the connected sink in millimeters. */ + __u32 mm_width; + /** @mm_height: Height of the connected sink in millimeters. */ + __u32 mm_height; + /** + * @subpixel: Subpixel order of the connected sink. + * + * See enum subpixel_order. + */ + __u32 subpixel; + + /** @pad: Padding, must be zero. */ + __u32 pad; + }; + + #define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */ + #define DRM_MODE_PROP_RANGE (1<<1) + #define DRM_MODE_PROP_IMMUTABLE (1<<2) + #define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ + #define DRM_MODE_PROP_BLOB (1<<4) + #define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ + + /* non-extended types: legacy bitmask, one bit per type: */ + #define DRM_MODE_PROP_LEGACY_TYPE ( \ + DRM_MODE_PROP_RANGE | \ + DRM_MODE_PROP_ENUM | \ + DRM_MODE_PROP_BLOB | \ + DRM_MODE_PROP_BITMASK) + + /* extended-types: rather than continue to consume a bit per type, + * grab a chunk of the bits to use as integer type id. + */ + #define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 + #define DRM_MODE_PROP_TYPE(n) ((n) << 6) + #define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) + #define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) + + /* the PROP_ATOMIC flag is used to hide properties from userspace that + * is not aware of atomic properties. This is mostly to work around + * older userspace (DDX drivers) that read/write each prop they find, + * without being aware that this could be triggering a lengthy modeset. + */ + #define DRM_MODE_PROP_ATOMIC 0x80000000 + + /** + * struct drm_mode_property_enum - Description for an enum/bitfield entry. + * @value: numeric value for this enum entry. + * @name: symbolic name for this enum entry. + * + * See struct drm_property_enum for details. + */ + struct drm_mode_property_enum { + __u64 value; + char name[DRM_PROP_NAME_LEN]; + }; + + /** + * struct drm_mode_get_property - Get property metadata. + * + * User-space can perform a GETPROPERTY ioctl to retrieve information about a + * property. The same property may be attached to multiple objects, see + * "Modeset Base Object Abstraction". + * + * The meaning of the @values_ptr field changes depending on the property type. + * See &drm_property.flags for more details. + * + * The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the + * property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For + * backwards compatibility, the kernel will always set @count_enum_blobs to + * zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must + * ignore these two fields if the property has a different type. + * + * User-space is expected to retrieve values and enums by performing this ioctl + * at least twice: the first time to retrieve the number of elements, the + * second time to retrieve the elements themselves. + * + * To retrieve the number of elements, set @count_values and @count_enum_blobs + * to zero, then call the ioctl. @count_values will be updated with the number + * of elements. If the property has the type &DRM_MODE_PROP_ENUM or + * &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well. + * + * To retrieve the elements themselves, allocate an array for @values_ptr and + * set @count_values to its capacity. If the property has the type + * &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for + * @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl + * again will fill the arrays. + */ + struct drm_mode_get_property { + /** @values_ptr: Pointer to a ``__u64`` array. */ + __u64 values_ptr; + /** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */ + __u64 enum_blob_ptr; + + /** + * @prop_id: Object ID of the property which should be retrieved. Set + * by the caller. + */ + __u32 prop_id; + /** + * @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for + * a definition of the flags. + */ + __u32 flags; + /** + * @name: Symbolic property name. User-space should use this field to + * recognize properties. + */ + char name[DRM_PROP_NAME_LEN]; + + /** @count_values: Number of elements in @values_ptr. */ + __u32 count_values; + /** @count_enum_blobs: Number of elements in @enum_blob_ptr. */ + __u32 count_enum_blobs; + }; + + struct drm_mode_connector_set_property { + __u64 value; + __u32 prop_id; + __u32 connector_id; + }; + + #define DRM_MODE_OBJECT_CRTC 0xcccccccc + #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 + #define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 + #define DRM_MODE_OBJECT_MODE 0xdededede + #define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 + #define DRM_MODE_OBJECT_FB 0xfbfbfbfb + #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb + #define DRM_MODE_OBJECT_PLANE 0xeeeeeeee + #define DRM_MODE_OBJECT_ANY 0 + + struct drm_mode_obj_get_properties { + __u64 props_ptr; + __u64 prop_values_ptr; + __u32 count_props; + __u32 obj_id; + __u32 obj_type; + }; + + struct drm_mode_obj_set_property { + __u64 value; + __u32 prop_id; + __u32 obj_id; + __u32 obj_type; + }; + + struct drm_mode_get_blob { + __u32 blob_id; + __u32 length; + __u64 data; + }; + + struct drm_mode_fb_cmd { + __u32 fb_id; + __u32 width; + __u32 height; + __u32 pitch; + __u32 bpp; + __u32 depth; + /* driver specific handle */ + __u32 handle; + }; + + #define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ + #define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */ + + /** + * struct drm_mode_fb_cmd2 - Frame-buffer metadata. + * + * This struct holds frame-buffer metadata. There are two ways to use it: + * + * - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2 + * ioctl to register a new frame-buffer. The new frame-buffer object ID will + * be set by the kernel in @fb_id. + * - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to + * fetch metadata about an existing frame-buffer. + * + * In case of planar formats, this struct allows up to 4 buffer objects with + * offsets and pitches per plane. The pitch and offset order are dictated by + * the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as: + * + * YUV 4:2:0 image with a plane of 8-bit Y samples followed by an + * interleaved U/V plane containing 8-bit 2x2 subsampled colour difference + * samples. + * + * So it would consist of a Y plane at ``offsets[0]`` and a UV plane at + * ``offsets[1]``. + * + * To accommodate tiled, compressed, etc formats, a modifier can be specified. + * For more information see the "Format Modifiers" section. Note that even + * though it looks like we have a modifier per-plane, we in fact do not. The + * modifier for each plane must be identical. Thus all combinations of + * different data layouts for multi-plane formats must be enumerated as + * separate modifiers. + * + * All of the entries in @handles, @pitches, @offsets and @modifier must be + * zero when unused. Warning, for @offsets and @modifier zero can't be used to + * figure out whether the entry is used or not since it's a valid value (a zero + * offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR). + */ + struct drm_mode_fb_cmd2 { + /** @fb_id: Object ID of the frame-buffer. */ + __u32 fb_id; + /** @width: Width of the frame-buffer. */ + __u32 width; + /** @height: Height of the frame-buffer. */ + __u32 height; + /** + * @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in + * ``drm_fourcc.h``. + */ + __u32 pixel_format; + /** + * @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and + * &DRM_MODE_FB_MODIFIERS). + */ + __u32 flags; + + /** + * @handles: GEM buffer handle, one per plane. Set to 0 if the plane is + * unused. The same handle can be used for multiple planes. + */ + __u32 handles[4]; + /** @pitches: Pitch (aka. stride) in bytes, one per plane. */ + __u32 pitches[4]; + /** @offsets: Offset into the buffer in bytes, one per plane. */ + __u32 offsets[4]; + /** + * @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*`` + * constants in ``drm_fourcc.h``. All planes must use the same + * modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags. + */ + __u64 modifier[4]; + }; + + #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 + #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 + #define DRM_MODE_FB_DIRTY_FLAGS 0x03 + + #define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 + + /* + * Mark a region of a framebuffer as dirty. + * + * Some hardware does not automatically update display contents + * as a hardware or software draw to a framebuffer. This ioctl + * allows userspace to tell the kernel and the hardware what + * regions of the framebuffer have changed. + * + * The kernel or hardware is free to update more then just the + * region specified by the clip rects. The kernel or hardware + * may also delay and/or coalesce several calls to dirty into a + * single update. + * + * Userspace may annotate the updates, the annotates are a + * promise made by the caller that the change is either a copy + * of pixels or a fill of a single color in the region specified. + * + * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then + * the number of updated regions are half of num_clips given, + * where the clip rects are paired in src and dst. The width and + * height of each one of the pairs must match. + * + * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller + * promises that the region specified of the clip rects is filled + * completely with a single color as given in the color argument. + */ + + struct drm_mode_fb_dirty_cmd { + __u32 fb_id; + __u32 flags; + __u32 color; + __u32 num_clips; + __u64 clips_ptr; + }; + + struct drm_mode_mode_cmd { + __u32 connector_id; + struct drm_mode_modeinfo mode; + }; + + #define DRM_MODE_CURSOR_BO 0x01 + #define DRM_MODE_CURSOR_MOVE 0x02 + #define DRM_MODE_CURSOR_FLAGS 0x03 + + /* + * depending on the value in flags different members are used. + * + * CURSOR_BO uses + * crtc_id + * width + * height + * handle - if 0 turns the cursor off + * + * CURSOR_MOVE uses + * crtc_id + * x + * y + */ + struct drm_mode_cursor { + __u32 flags; + __u32 crtc_id; + __s32 x; + __s32 y; + __u32 width; + __u32 height; + /* driver specific handle */ + __u32 handle; + }; + + struct drm_mode_cursor2 { + __u32 flags; + __u32 crtc_id; + __s32 x; + __s32 y; + __u32 width; + __u32 height; + /* driver specific handle */ + __u32 handle; + __s32 hot_x; + __s32 hot_y; + }; + + struct drm_mode_crtc_lut { + __u32 crtc_id; + __u32 gamma_size; + + /* pointers to arrays */ + __u64 red; + __u64 green; + __u64 blue; + }; + + struct drm_color_ctm { + /* + * Conversion matrix in S31.32 sign-magnitude + * (not two's complement!) format. + * + * out matrix in + * |R| |0 1 2| |R| + * |G| = |3 4 5| x |G| + * |B| |6 7 8| |B| + */ + __u64 matrix[9]; + }; + + struct drm_color_lut { + /* + * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and + * 0xffff == 1.0. + */ + __u16 red; + __u16 green; + __u16 blue; + __u16 reserved; + }; + + /** + * struct drm_plane_size_hint - Plane size hints + * @width: The width of the plane in pixel + * @height: The height of the plane in pixel + * + * The plane SIZE_HINTS property blob contains an + * array of struct drm_plane_size_hint. + */ + struct drm_plane_size_hint { + __u16 width; + __u16 height; + }; + + /** + * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data. + * + * HDR Metadata Infoframe as per CTA 861.G spec. This is expected + * to match exactly with the spec. + * + * Userspace is expected to pass the metadata information as per + * the format described in this structure. + */ + struct hdr_metadata_infoframe { + /** + * @eotf: Electro-Optical Transfer Function (EOTF) + * used in the stream. + */ + __u8 eotf; + /** + * @metadata_type: Static_Metadata_Descriptor_ID. + */ + __u8 metadata_type; + /** + * @display_primaries: Color Primaries of the Data. + * These are coded as unsigned 16-bit values in units of + * 0.00002, where 0x0000 represents zero and 0xC350 + * represents 1.0000. + * @display_primaries.x: X coordinate of color primary. + * @display_primaries.y: Y coordinate of color primary. + */ + struct { + __u16 x, y; + } display_primaries[3]; + /** + * @white_point: White Point of Colorspace Data. + * These are coded as unsigned 16-bit values in units of + * 0.00002, where 0x0000 represents zero and 0xC350 + * represents 1.0000. + * @white_point.x: X coordinate of whitepoint of color primary. + * @white_point.y: Y coordinate of whitepoint of color primary. + */ + struct { + __u16 x, y; + } white_point; + /** + * @max_display_mastering_luminance: Max Mastering Display Luminance. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + __u16 max_display_mastering_luminance; + /** + * @min_display_mastering_luminance: Min Mastering Display Luminance. + * This value is coded as an unsigned 16-bit value in units of + * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF + * represents 6.5535 cd/m2. + */ + __u16 min_display_mastering_luminance; + /** + * @max_cll: Max Content Light Level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + __u16 max_cll; + /** + * @max_fall: Max Frame Average Light Level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + __u16 max_fall; + }; + + /** + * struct hdr_output_metadata - HDR output metadata + * + * Metadata Information to be passed from userspace + */ + struct hdr_output_metadata { + /** + * @metadata_type: Static_Metadata_Descriptor_ID. + */ + __u32 metadata_type; + /** + * @hdmi_metadata_type1: HDR Metadata Infoframe. + */ + union { + struct hdr_metadata_infoframe hdmi_metadata_type1; + }; + }; + + /** + * DRM_MODE_PAGE_FLIP_EVENT + * + * Request that the kernel sends back a vblank event (see + * struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the + * page-flip is done. + */ + #define DRM_MODE_PAGE_FLIP_EVENT 0x01 + /** + * DRM_MODE_PAGE_FLIP_ASYNC + * + * Request that the page-flip is performed as soon as possible, ie. with no + * delay due to waiting for vblank. This may cause tearing to be visible on + * the screen. + * + * When used with atomic uAPI, the driver will return an error if the hardware + * doesn't support performing an asynchronous page-flip for this update. + * User-space should handle this, e.g. by falling back to a regular page-flip. + * + * Note, some hardware might need to perform one last synchronous page-flip + * before being able to switch to asynchronous page-flips. As an exception, + * the driver will return success even though that first page-flip is not + * asynchronous. + */ + #define DRM_MODE_PAGE_FLIP_ASYNC 0x02 + #define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4 + #define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8 + #define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \ + DRM_MODE_PAGE_FLIP_TARGET_RELATIVE) + /** + * DRM_MODE_PAGE_FLIP_FLAGS + * + * Bitmask of flags suitable for &drm_mode_crtc_page_flip_target.flags. + */ + #define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \ + DRM_MODE_PAGE_FLIP_ASYNC | \ + DRM_MODE_PAGE_FLIP_TARGET) + + /* + * Request a page flip on the specified crtc. + * + * This ioctl will ask KMS to schedule a page flip for the specified + * crtc. Once any pending rendering targeting the specified fb (as of + * ioctl time) has completed, the crtc will be reprogrammed to display + * that fb after the next vertical refresh. The ioctl returns + * immediately, but subsequent rendering to the current fb will block + * in the execbuffer ioctl until the page flip happens. If a page + * flip is already pending as the ioctl is called, EBUSY will be + * returned. + * + * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank + * event (see drm.h: struct drm_event_vblank) when the page flip is + * done. The user_data field passed in with this ioctl will be + * returned as the user_data field in the vblank event struct. + * + * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen + * 'as soon as possible', meaning that it not delay waiting for vblank. + * This may cause tearing on the screen. + * + * The reserved field must be zero. + */ + + struct drm_mode_crtc_page_flip { + __u32 crtc_id; + __u32 fb_id; + __u32 flags; + __u32 reserved; + __u64 user_data; + }; + + /* + * Request a page flip on the specified crtc. + * + * Same as struct drm_mode_crtc_page_flip, but supports new flags and + * re-purposes the reserved field: + * + * The sequence field must be zero unless either of the + * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When + * the ABSOLUTE flag is specified, the sequence field denotes the absolute + * vblank sequence when the flip should take effect. When the RELATIVE + * flag is specified, the sequence field denotes the relative (to the + * current one when the ioctl is called) vblank sequence when the flip + * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to + * make sure the vblank sequence before the target one has passed before + * calling this ioctl. The purpose of the + * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify + * the target for when code dealing with a page flip runs during a + * vertical blank period. + */ + + struct drm_mode_crtc_page_flip_target { + __u32 crtc_id; + __u32 fb_id; + __u32 flags; + __u32 sequence; + __u64 user_data; + }; + + /** + * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout. + * @height: buffer height in pixels + * @width: buffer width in pixels + * @bpp: bits per pixel + * @flags: must be zero + * @handle: buffer object handle + * @pitch: number of bytes between two consecutive lines + * @size: size of the whole buffer in bytes + * + * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds, + * the kernel fills @handle, @pitch and @size. + */ + struct drm_mode_create_dumb { + __u32 height; + __u32 width; + __u32 bpp; + __u32 flags; + + __u32 handle; + __u32 pitch; + __u64 size; + }; + + /* set up for mmap of a dumb scanout buffer */ + struct drm_mode_map_dumb { + /** Handle for the object being mapped. */ + __u32 handle; + __u32 pad; + /** + * Fake offset to use for subsequent mmap call + * + * This is a fixed-size type for 32/64 compatibility. + */ + __u64 offset; + }; + + struct drm_mode_destroy_dumb { + __u32 handle; + }; + + /** + * DRM_MODE_ATOMIC_TEST_ONLY + * + * Do not apply the atomic commit, instead check whether the hardware supports + * this configuration. + * + * See &drm_mode_config_funcs.atomic_check for more details on test-only + * commits. + */ + #define DRM_MODE_ATOMIC_TEST_ONLY 0x0100 + /** + * DRM_MODE_ATOMIC_NONBLOCK + * + * Do not block while applying the atomic commit. The &DRM_IOCTL_MODE_ATOMIC + * IOCTL returns immediately instead of waiting for the changes to be applied + * in hardware. Note, the driver will still check that the update can be + * applied before retuning. + */ + #define DRM_MODE_ATOMIC_NONBLOCK 0x0200 + /** + * DRM_MODE_ATOMIC_ALLOW_MODESET + * + * Allow the update to result in temporary or transient visible artifacts while + * the update is being applied. Applying the update may also take significantly + * more time than a page flip. All visual artifacts will disappear by the time + * the update is completed, as signalled through the vblank event's timestamp + * (see struct drm_event_vblank). + * + * This flag must be set when the KMS update might cause visible artifacts. + * Without this flag such KMS update will return a EINVAL error. What kind of + * update may cause visible artifacts depends on the driver and the hardware. + * User-space that needs to know beforehand if an update might cause visible + * artifacts can use &DRM_MODE_ATOMIC_TEST_ONLY without + * &DRM_MODE_ATOMIC_ALLOW_MODESET to see if it fails. + * + * To the best of the driver's knowledge, visual artifacts are guaranteed to + * not appear when this flag is not set. Some sinks might display visual + * artifacts outside of the driver's control. + */ + #define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400 + + /** + * DRM_MODE_ATOMIC_FLAGS + * + * Bitfield of flags accepted by the &DRM_IOCTL_MODE_ATOMIC IOCTL in + * &drm_mode_atomic.flags. + */ + #define DRM_MODE_ATOMIC_FLAGS (\ + DRM_MODE_PAGE_FLIP_EVENT |\ + DRM_MODE_PAGE_FLIP_ASYNC |\ + DRM_MODE_ATOMIC_TEST_ONLY |\ + DRM_MODE_ATOMIC_NONBLOCK |\ + DRM_MODE_ATOMIC_ALLOW_MODESET) + + struct drm_mode_atomic { + __u32 flags; + __u32 count_objs; + __u64 objs_ptr; + __u64 count_props_ptr; + __u64 props_ptr; + __u64 prop_values_ptr; + __u64 reserved; + __u64 user_data; + }; + + struct drm_format_modifier_blob { + #define FORMAT_BLOB_CURRENT 1 + /* Version of this blob format */ + __u32 version; + + /* Flags */ + __u32 flags; + + /* Number of fourcc formats supported */ + __u32 count_formats; + + /* Where in this blob the formats exist (in bytes) */ + __u32 formats_offset; + + /* Number of drm_format_modifiers */ + __u32 count_modifiers; + + /* Where in this blob the modifiers exist (in bytes) */ + __u32 modifiers_offset; + + /* __u32 formats[] */ + /* struct drm_format_modifier modifiers[] */ + }; + + struct drm_format_modifier { + /* Bitmask of formats in get_plane format list this info applies to. The + * offset allows a sliding window of which 64 formats (bits). + * + * Some examples: + * In today's world with < 65 formats, and formats 0, and 2 are + * supported + * 0x0000000000000005 + * ^-offset = 0, formats = 5 + * + * If the number formats grew to 128, and formats 98-102 are + * supported with the modifier: + * + * 0x0000007c00000000 0000000000000000 + * ^ + * |__offset = 64, formats = 0x7c00000000 + * + */ + __u64 formats; + __u32 offset; + __u32 pad; + + /* The modifier that applies to the >get_plane format list bitmask. */ + __u64 modifier; + }; + + /** + * struct drm_mode_create_blob - Create New blob property + * + * Create a new 'blob' data property, copying length bytes from data pointer, + * and returning new blob ID. + */ + struct drm_mode_create_blob { + /** @data: Pointer to data to copy. */ + __u64 data; + /** @length: Length of data to copy. */ + __u32 length; + /** @blob_id: Return: new property ID. */ + __u32 blob_id; + }; + + /** + * struct drm_mode_destroy_blob - Destroy user blob + * @blob_id: blob_id to destroy + * + * Destroy a user-created blob property. + * + * User-space can release blobs as soon as they do not need to refer to them by + * their blob object ID. For instance, if you are using a MODE_ID blob in an + * atomic commit and you will not make another commit re-using the same ID, you + * can destroy the blob as soon as the commit has been issued, without waiting + * for it to complete. + */ + struct drm_mode_destroy_blob { + __u32 blob_id; + }; + + /** + * struct drm_mode_create_lease - Create lease + * + * Lease mode resources, creating another drm_master. + * + * The @object_ids array must reference at least one CRTC, one connector and + * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively, + * the lease can be completely empty. + */ + struct drm_mode_create_lease { + /** @object_ids: Pointer to array of object ids (__u32) */ + __u64 object_ids; + /** @object_count: Number of object ids */ + __u32 object_count; + /** @flags: flags for new FD (O_CLOEXEC, etc) */ + __u32 flags; + + /** @lessee_id: Return: unique identifier for lessee. */ + __u32 lessee_id; + /** @fd: Return: file descriptor to new drm_master file */ + __u32 fd; + }; + + /** + * struct drm_mode_list_lessees - List lessees + * + * List lesses from a drm_master. + */ + struct drm_mode_list_lessees { + /** + * @count_lessees: Number of lessees. + * + * On input, provides length of the array. + * On output, provides total number. No + * more than the input number will be written + * back, so two calls can be used to get + * the size and then the data. + */ + __u32 count_lessees; + /** @pad: Padding. */ + __u32 pad; + + /** + * @lessees_ptr: Pointer to lessees. + * + * Pointer to __u64 array of lessee ids + */ + __u64 lessees_ptr; + }; + + /** + * struct drm_mode_get_lease - Get Lease + * + * Get leased objects. + */ + struct drm_mode_get_lease { + /** + * @count_objects: Number of leased objects. + * + * On input, provides length of the array. + * On output, provides total number. No + * more than the input number will be written + * back, so two calls can be used to get + * the size and then the data. + */ + __u32 count_objects; + /** @pad: Padding. */ + __u32 pad; + + /** + * @objects_ptr: Pointer to objects. + * + * Pointer to __u32 array of object ids. + */ + __u64 objects_ptr; + }; + + /** + * struct drm_mode_revoke_lease - Revoke lease + */ + struct drm_mode_revoke_lease { + /** @lessee_id: Unique ID of lessee */ + __u32 lessee_id; + }; + + /** + * struct drm_mode_rect - Two dimensional rectangle. + * @x1: Horizontal starting coordinate (inclusive). + * @y1: Vertical starting coordinate (inclusive). + * @x2: Horizontal ending coordinate (exclusive). + * @y2: Vertical ending coordinate (exclusive). + * + * With drm subsystem using struct drm_rect to manage rectangular area this + * export it to user-space. + * + * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS. + */ + struct drm_mode_rect { + __s32 x1; + __s32 y1; + __s32 x2; + __s32 y2; + }; + + /** + * struct drm_mode_closefb + * @fb_id: Framebuffer ID. + * @pad: Must be zero. + */ + struct drm_mode_closefb { + __u32 fb_id; + __u32 pad; + }; + + #if defined(__cplusplus) + } + #endif + + #endif + \ No newline at end of file diff --git a/third_party/uapi-eudebug/drm/xe_drm.h b/third_party/uapi-eudebug/drm/xe_drm.h index ce20f8998e..27ffd5598f 100644 --- a/third_party/uapi-eudebug/drm/xe_drm.h +++ b/third_party/uapi-eudebug/drm/xe_drm.h @@ -3,1810 +3,1947 @@ * Copyright © 2023 Intel Corporation */ -#ifndef _XE_DRM_H_ -#define _XE_DRM_H_ - -#include "drm.h" - -#if defined(__cplusplus) -extern "C" { -#endif - -/* - * Please note that modifications to all structs defined here are - * subject to backwards-compatibility constraints. - * Sections in this file are organized as follows: - * 1. IOCTL definition - * 2. Extension definition and helper structs - * 3. IOCTL's Query structs in the order of the Query's entries. - * 4. The rest of IOCTL structs in the order of IOCTL declaration. + #ifndef _XE_DRM_H_ + #define _XE_DRM_H_ + + #include "drm.h" + + #if defined(__cplusplus) + extern "C" { + #endif + + /* + * Please note that modifications to all structs defined here are + * subject to backwards-compatibility constraints. + * Sections in this file are organized as follows: + * 1. IOCTL definition + * 2. Extension definition and helper structs + * 3. IOCTL's Query structs in the order of the Query's entries. + * 4. The rest of IOCTL structs in the order of IOCTL declaration. + */ + + /** + * DOC: Xe Device Block Diagram + * + * The diagram below represents a high-level simplification of a discrete + * GPU supported by the Xe driver. It shows some device components which + * are necessary to understand this API, as well as how their relations + * to each other. This diagram does not represent real hardware:: + * + * ┌──────────────────────────────────────────────────────────────────┐ + * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │ + * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │ + * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │ + * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │ + * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │ + * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │ + * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │ + * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │ + * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │ + * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │ + * └─────────────────────────────Device0───────┬──────────────────────┘ + * │ + * ───────────────────────┴────────── PCI bus + */ + + /** + * DOC: Xe uAPI Overview + * + * This section aims to describe the Xe's IOCTL entries, its structs, and other + * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related + * entries and usage. + * + * List of supported IOCTLs: + * - &DRM_IOCTL_XE_DEVICE_QUERY + * - &DRM_IOCTL_XE_GEM_CREATE + * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET + * - &DRM_IOCTL_XE_VM_CREATE + * - &DRM_IOCTL_XE_VM_DESTROY + * - &DRM_IOCTL_XE_VM_BIND + * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE + * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY + * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY + * - &DRM_IOCTL_XE_EXEC + * - &DRM_IOCTL_XE_WAIT_USER_FENCE + * - &DRM_IOCTL_XE_OBSERVATION + */ + + /* + * xe specific ioctls. + * + * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie + * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset + * against DRM_COMMAND_BASE and should be between [0x0, 0x60). + */ + #define DRM_XE_DEVICE_QUERY 0x00 + #define DRM_XE_GEM_CREATE 0x01 + #define DRM_XE_GEM_MMAP_OFFSET 0x02 + #define DRM_XE_VM_CREATE 0x03 + #define DRM_XE_VM_DESTROY 0x04 + #define DRM_XE_VM_BIND 0x05 + #define DRM_XE_EXEC_QUEUE_CREATE 0x06 + #define DRM_XE_EXEC_QUEUE_DESTROY 0x07 + #define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08 + #define DRM_XE_EXEC 0x09 + #define DRM_XE_WAIT_USER_FENCE 0x0a + #define DRM_XE_OBSERVATION 0x0b + #define DRM_XE_EUDEBUG_CONNECT 0x0c + #define DRM_XE_DEBUG_METADATA_CREATE 0x0d + #define DRM_XE_DEBUG_METADATA_DESTROY 0x0e + /* Must be kept compact -- no holes */ + + #define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query) + #define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create) + #define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset) + #define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create) + #define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy) + #define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind) + #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create) + #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy) + #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) + #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) + #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) + #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param) + #define DRM_IOCTL_XE_EUDEBUG_CONNECT DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EUDEBUG_CONNECT, struct drm_xe_eudebug_connect) + #define DRM_IOCTL_XE_DEBUG_METADATA_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEBUG_METADATA_CREATE, struct drm_xe_debug_metadata_create) + #define DRM_IOCTL_XE_DEBUG_METADATA_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_DEBUG_METADATA_DESTROY, struct drm_xe_debug_metadata_destroy) + + /** + * DOC: Xe IOCTL Extensions + * + * Before detailing the IOCTLs and its structs, it is important to highlight + * that every IOCTL in Xe is extensible. + * + * Many interfaces need to grow over time. In most cases we can simply + * extend the struct and have userspace pass in more data. Another option, + * as demonstrated by Vulkan's approach to providing extensions for forward + * and backward compatibility, is to use a list of optional structs to + * provide those extra details. + * + * The key advantage to using an extension chain is that it allows us to + * redefine the interface more easily than an ever growing struct of + * increasing complexity, and for large parts of that interface to be + * entirely optional. The downside is more pointer chasing; chasing across + * the boundary with pointers encapsulated inside u64. + * + * Example chaining: + * + * .. code-block:: C + * + * struct drm_xe_user_extension ext3 { + * .next_extension = 0, // end + * .name = ..., + * }; + * struct drm_xe_user_extension ext2 { + * .next_extension = (uintptr_t)&ext3, + * .name = ..., + * }; + * struct drm_xe_user_extension ext1 { + * .next_extension = (uintptr_t)&ext2, + * .name = ..., + * }; + * + * Typically the struct drm_xe_user_extension would be embedded in some uAPI + * struct, and in this case we would feed it the head of the chain(i.e ext1), + * which would then apply all of the above extensions. */ - -/** - * DOC: Xe Device Block Diagram - * - * The diagram below represents a high-level simplification of a discrete - * GPU supported by the Xe driver. It shows some device components which - * are necessary to understand this API, as well as how their relations - * to each other. This diagram does not represent real hardware:: - * - * ┌──────────────────────────────────────────────────────────────────┐ - * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │ - * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │ - * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │ - * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │ - * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │ - * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │ - * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ - * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │ - * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ - * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ - * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │ - * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ - * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ - * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │ - * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ - * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ - * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │ - * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ - * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ - * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │ - * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ - * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │ - * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │ - * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │ - * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │ - * └─────────────────────────────Device0───────┬──────────────────────┘ - * │ - * ───────────────────────┴────────── PCI bus - */ - -/** - * DOC: Xe uAPI Overview - * - * This section aims to describe the Xe's IOCTL entries, its structs, and other - * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related - * entries and usage. - * - * List of supported IOCTLs: - * - &DRM_IOCTL_XE_DEVICE_QUERY - * - &DRM_IOCTL_XE_GEM_CREATE - * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET - * - &DRM_IOCTL_XE_VM_CREATE - * - &DRM_IOCTL_XE_VM_DESTROY - * - &DRM_IOCTL_XE_VM_BIND - * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE - * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY - * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY - * - &DRM_IOCTL_XE_EXEC - * - &DRM_IOCTL_XE_WAIT_USER_FENCE - * - &DRM_IOCTL_XE_OBSERVATION - */ - -/* - * xe specific ioctls. - * - * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie - * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset - * against DRM_COMMAND_BASE and should be between [0x0, 0x60). - */ -#define DRM_XE_DEVICE_QUERY 0x00 -#define DRM_XE_GEM_CREATE 0x01 -#define DRM_XE_GEM_MMAP_OFFSET 0x02 -#define DRM_XE_VM_CREATE 0x03 -#define DRM_XE_VM_DESTROY 0x04 -#define DRM_XE_VM_BIND 0x05 -#define DRM_XE_EXEC_QUEUE_CREATE 0x06 -#define DRM_XE_EXEC_QUEUE_DESTROY 0x07 -#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08 -#define DRM_XE_EXEC 0x09 -#define DRM_XE_WAIT_USER_FENCE 0x0a -#define DRM_XE_OBSERVATION 0x0b -#define DRM_XE_EUDEBUG_CONNECT 0x0c -#define DRM_XE_DEBUG_METADATA_CREATE 0x0d -#define DRM_XE_DEBUG_METADATA_DESTROY 0x0e -/* Must be kept compact -- no holes */ - -#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query) -#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create) -#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset) -#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create) -#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy) -#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind) -#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create) -#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy) -#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) -#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) -#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) -#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param) -#define DRM_IOCTL_XE_EUDEBUG_CONNECT DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EUDEBUG_CONNECT, struct drm_xe_eudebug_connect) -#define DRM_IOCTL_XE_DEBUG_METADATA_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEBUG_METADATA_CREATE, struct drm_xe_debug_metadata_create) -#define DRM_IOCTL_XE_DEBUG_METADATA_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_DEBUG_METADATA_DESTROY, struct drm_xe_debug_metadata_destroy) - -/** - * DOC: Xe IOCTL Extensions - * - * Before detailing the IOCTLs and its structs, it is important to highlight - * that every IOCTL in Xe is extensible. - * - * Many interfaces need to grow over time. In most cases we can simply - * extend the struct and have userspace pass in more data. Another option, - * as demonstrated by Vulkan's approach to providing extensions for forward - * and backward compatibility, is to use a list of optional structs to - * provide those extra details. - * - * The key advantage to using an extension chain is that it allows us to - * redefine the interface more easily than an ever growing struct of - * increasing complexity, and for large parts of that interface to be - * entirely optional. The downside is more pointer chasing; chasing across - * the boundary with pointers encapsulated inside u64. - * - * Example chaining: - * - * .. code-block:: C - * - * struct drm_xe_user_extension ext3 { - * .next_extension = 0, // end - * .name = ..., - * }; - * struct drm_xe_user_extension ext2 { - * .next_extension = (uintptr_t)&ext3, - * .name = ..., - * }; - * struct drm_xe_user_extension ext1 { - * .next_extension = (uintptr_t)&ext2, - * .name = ..., - * }; - * - * Typically the struct drm_xe_user_extension would be embedded in some uAPI - * struct, and in this case we would feed it the head of the chain(i.e ext1), - * which would then apply all of the above extensions. -*/ - -/** - * struct drm_xe_user_extension - Base class for defining a chain of extensions - */ -struct drm_xe_user_extension { - /** - * @next_extension: - * - * Pointer to the next struct drm_xe_user_extension, or zero if the end. - */ - __u64 next_extension; - - /** - * @name: Name of the extension. - * - * Note that the name here is just some integer. - * - * Also note that the name space for this is not global for the whole - * driver, but rather its scope/meaning is limited to the specific piece - * of uAPI which has embedded the struct drm_xe_user_extension. - */ - __u32 name; - - /** - * @pad: MBZ - * - * All undefined bits must be zero. - */ - __u32 pad; -}; - -/** - * struct drm_xe_ext_set_property - Generic set property extension - * - * A generic struct that allows any of the Xe's IOCTL to be extended - * with a set_property operation. - */ -struct drm_xe_ext_set_property { - /** @base: base user extension */ - struct drm_xe_user_extension base; - - /** @property: property to set */ - __u32 property; - - /** @pad: MBZ */ - __u32 pad; - - /** @value: property value */ - __u64 value; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_engine_class_instance - instance of an engine class - * - * It is returned as part of the @drm_xe_engine, but it also is used as - * the input of engine selection for both @drm_xe_exec_queue_create and - * @drm_xe_query_engine_cycles - * - * The @engine_class can be: - * - %DRM_XE_ENGINE_CLASS_RENDER - * - %DRM_XE_ENGINE_CLASS_COPY - * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE - * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE - * - %DRM_XE_ENGINE_CLASS_COMPUTE - * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual - * hardware engine class). Used for creating ordered queues of VM - * bind operations. - */ -struct drm_xe_engine_class_instance { -#define DRM_XE_ENGINE_CLASS_RENDER 0 -#define DRM_XE_ENGINE_CLASS_COPY 1 -#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2 -#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3 -#define DRM_XE_ENGINE_CLASS_COMPUTE 4 -#define DRM_XE_ENGINE_CLASS_VM_BIND 5 - /** @engine_class: engine class id */ - __u16 engine_class; - /** @engine_instance: engine instance id */ - __u16 engine_instance; - /** @gt_id: Unique ID of this GT within the PCI Device */ - __u16 gt_id; - /** @pad: MBZ */ - __u16 pad; -}; - -/** - * struct drm_xe_engine - describe hardware engine - */ -struct drm_xe_engine { - /** @instance: The @drm_xe_engine_class_instance */ - struct drm_xe_engine_class_instance instance; - - /** @reserved: Reserved */ - __u64 reserved[3]; -}; - -/** - * struct drm_xe_query_engines - describe engines - * - * If a query is made with a struct @drm_xe_device_query where .query - * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of - * struct @drm_xe_query_engines in .data. - */ -struct drm_xe_query_engines { - /** @num_engines: number of engines returned in @engines */ - __u32 num_engines; - /** @pad: MBZ */ - __u32 pad; - /** @engines: The returned engines for this device */ - struct drm_xe_engine engines[]; -}; - -/** - * enum drm_xe_memory_class - Supported memory classes. - */ -enum drm_xe_memory_class { - /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ - DRM_XE_MEM_REGION_CLASS_SYSMEM = 0, - /** - * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this - * represents the memory that is local to the device, which we - * call VRAM. Not valid on integrated platforms. - */ - DRM_XE_MEM_REGION_CLASS_VRAM -}; - -/** - * struct drm_xe_mem_region - Describes some region as known to - * the driver. - */ -struct drm_xe_mem_region { - /** - * @mem_class: The memory class describing this region. - * - * See enum drm_xe_memory_class for supported values. - */ - __u16 mem_class; - /** - * @instance: The unique ID for this region, which serves as the - * index in the placement bitmask used as argument for - * &DRM_IOCTL_XE_GEM_CREATE - */ - __u16 instance; - /** - * @min_page_size: Min page-size in bytes for this region. - * - * When the kernel allocates memory for this region, the - * underlying pages will be at least @min_page_size in size. - * Buffer objects with an allowable placement in this region must be - * created with a size aligned to this value. - * GPU virtual address mappings of (parts of) buffer objects that - * may be placed in this region must also have their GPU virtual - * address and range aligned to this value. - * Affected IOCTLS will return %-EINVAL if alignment restrictions are - * not met. - */ - __u32 min_page_size; - /** - * @total_size: The usable size in bytes for this region. - */ - __u64 total_size; - /** - * @used: Estimate of the memory used in bytes for this region. - * - * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable - * accounting. Without this the value here will always equal - * zero. - */ - __u64 used; - /** - * @cpu_visible_size: How much of this region can be CPU - * accessed, in bytes. - * - * This will always be <= @total_size, and the remainder (if - * any) will not be CPU accessible. If the CPU accessible part - * is smaller than @total_size then this is referred to as a - * small BAR system. - * - * On systems without small BAR (full BAR), the probed_size will - * always equal the @total_size, since all of it will be CPU - * accessible. - * - * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM - * regions (for other types the value here will always equal - * zero). - */ - __u64 cpu_visible_size; - /** - * @cpu_visible_used: Estimate of CPU visible memory used, in - * bytes. - * - * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable - * accounting. Without this the value here will always equal - * zero. Note this is only currently tracked for - * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value - * here will always be zero). - */ - __u64 cpu_visible_used; - /** @reserved: Reserved */ - __u64 reserved[6]; -}; - -/** - * struct drm_xe_query_mem_regions - describe memory regions - * - * If a query is made with a struct drm_xe_device_query where .query - * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses - * struct drm_xe_query_mem_regions in .data. - */ -struct drm_xe_query_mem_regions { - /** @num_mem_regions: number of memory regions returned in @mem_regions */ - __u32 num_mem_regions; - /** @pad: MBZ */ - __u32 pad; - /** @mem_regions: The returned memory regions for this device */ - struct drm_xe_mem_region mem_regions[]; -}; - -/** - * struct drm_xe_query_config - describe the device configuration - * - * If a query is made with a struct drm_xe_device_query where .query - * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses - * struct drm_xe_query_config in .data. - * - * The index in @info can be: - * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits) - * and the device revision (next 8 bits) - * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device - * configuration, see list below - * - * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device - * has usable VRAM - * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment - * required by this device, typically SZ_4K or SZ_64K - * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address - * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest - * available exec queue priority - */ -struct drm_xe_query_config { - /** @num_params: number of parameters returned in info */ - __u32 num_params; - - /** @pad: MBZ */ - __u32 pad; - -#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 -#define DRM_XE_QUERY_CONFIG_FLAGS 1 - #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0) -#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 -#define DRM_XE_QUERY_CONFIG_VA_BITS 3 -#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 - /** @info: array of elements containing the config info */ - __u64 info[]; -}; - -/** - * struct drm_xe_gt - describe an individual GT. - * - * To be used with drm_xe_query_gt_list, which will return a list with all the - * existing GT individual descriptions. - * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for - * implementing graphics and/or media operations. - * - * The index in @type can be: - * - %DRM_XE_QUERY_GT_TYPE_MAIN - * - %DRM_XE_QUERY_GT_TYPE_MEDIA - */ -struct drm_xe_gt { -#define DRM_XE_QUERY_GT_TYPE_MAIN 0 -#define DRM_XE_QUERY_GT_TYPE_MEDIA 1 - /** @type: GT type: Main or Media */ - __u16 type; - /** @tile_id: Tile ID where this GT lives (Information only) */ - __u16 tile_id; - /** @gt_id: Unique ID of this GT within the PCI Device */ - __u16 gt_id; - /** @pad: MBZ */ - __u16 pad[3]; - /** @reference_clock: A clock frequency for timestamp */ - __u32 reference_clock; - /** - * @near_mem_regions: Bit mask of instances from - * drm_xe_query_mem_regions that are nearest to the current engines - * of this GT. - * Each index in this mask refers directly to the struct - * drm_xe_query_mem_regions' instance, no assumptions should - * be made about order. The type of each region is described - * by struct drm_xe_query_mem_regions' mem_class. - */ - __u64 near_mem_regions; - /** - * @far_mem_regions: Bit mask of instances from - * drm_xe_query_mem_regions that are far from the engines of this GT. - * In general, they have extra indirections when compared to the - * @near_mem_regions. For a discrete device this could mean system - * memory and memory living in a different tile. - * Each index in this mask refers directly to the struct - * drm_xe_query_mem_regions' instance, no assumptions should - * be made about order. The type of each region is described - * by struct drm_xe_query_mem_regions' mem_class. - */ - __u64 far_mem_regions; - /** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */ - __u16 ip_ver_major; - /** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */ - __u16 ip_ver_minor; - /** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */ - __u16 ip_ver_rev; - /** @pad2: MBZ */ - __u16 pad2; - /** @reserved: Reserved */ - __u64 reserved[7]; -}; - -/** - * struct drm_xe_query_gt_list - A list with GT description items. - * - * If a query is made with a struct drm_xe_device_query where .query - * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct - * drm_xe_query_gt_list in .data. - */ -struct drm_xe_query_gt_list { - /** @num_gt: number of GT items returned in gt_list */ - __u32 num_gt; - /** @pad: MBZ */ - __u32 pad; - /** @gt_list: The GT list returned for this device */ - struct drm_xe_gt gt_list[]; -}; - -/** - * struct drm_xe_query_topology_mask - describe the topology mask of a GT - * - * This is the hardware topology which reflects the internal physical - * structure of the GPU. - * - * If a query is made with a struct drm_xe_device_query where .query - * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses - * struct drm_xe_query_topology_mask in .data. - * - * The @type can be: - * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices - * (DSS) available for geometry operations. For example a query response - * containing the following in mask: - * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00`` - * means 32 DSS are available for geometry. - * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices - * (DSS) available for compute operations. For example a query response - * containing the following in mask: - * ``DSS_COMPUTE ff ff ff ff 00 00 00 00`` - * means 32 DSS are available for compute. - * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type - * may be omitted if the driver is unable to query the mask from the - * hardware. - * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU) - * available per Dual Sub Slices (DSS). For example a query response - * containing the following in mask: - * ``EU_PER_DSS ff ff 00 00 00 00 00 00`` - * means each DSS has 16 SIMD8 EUs. This type may be omitted if device - * doesn't have SIMD8 EUs. - * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution - * Units (EU) available per Dual Sub Slices (DSS). For example a query - * response containing the following in mask: - * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00`` - * means each DSS has 16 SIMD16 EUs. This type may be omitted if device - * doesn't have SIMD16 EUs. - */ -struct drm_xe_query_topology_mask { - /** @gt_id: GT ID the mask is associated with */ - __u16 gt_id; - -#define DRM_XE_TOPO_DSS_GEOMETRY 1 -#define DRM_XE_TOPO_DSS_COMPUTE 2 -#define DRM_XE_TOPO_L3_BANK 3 -#define DRM_XE_TOPO_EU_PER_DSS 4 -#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5 - /** @type: type of mask */ - __u16 type; - - /** @num_bytes: number of bytes in requested mask */ - __u32 num_bytes; - - /** @mask: little-endian mask of @num_bytes */ - __u8 mask[]; -}; - -/** - * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps - * - * If a query is made with a struct drm_xe_device_query where .query is equal to - * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles - * in .data. struct drm_xe_query_engine_cycles is allocated by the user and - * .data points to this allocated structure. - * - * The query returns the engine cycles, which along with GT's @reference_clock, - * can be used to calculate the engine timestamp. In addition the - * query returns a set of cpu timestamps that indicate when the command - * streamer cycle count was captured. - */ -struct drm_xe_query_engine_cycles { - /** - * @eci: This is input by the user and is the engine for which command - * streamer cycles is queried. - */ - struct drm_xe_engine_class_instance eci; - - /** - * @clockid: This is input by the user and is the reference clock id for - * CPU timestamp. For definition, see clock_gettime(2) and - * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC, - * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI. - */ - __s32 clockid; - - /** @width: Width of the engine cycle counter in bits. */ - __u32 width; - - /** - * @engine_cycles: Engine cycles as read from its register - * at 0x358 offset. - */ - __u64 engine_cycles; - - /** - * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before - * reading the engine_cycles register using the reference clockid set by the - * user. - */ - __u64 cpu_timestamp; - - /** - * @cpu_delta: Time delta in ns captured around reading the lower dword - * of the engine_cycles register. - */ - __u64 cpu_delta; -}; - -/** - * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version - * - * Given a uc_type this will return the branch, major, minor and patch version - * of the micro-controller firmware. - */ -struct drm_xe_query_uc_fw_version { - /** @uc_type: The micro-controller type to query firmware version */ -#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0 -#define XE_QUERY_UC_TYPE_HUC 1 - __u16 uc_type; - - /** @pad: MBZ */ - __u16 pad; - - /** @branch_ver: branch uc fw version */ - __u32 branch_ver; - /** @major_ver: major uc fw version */ - __u32 major_ver; - /** @minor_ver: minor uc fw version */ - __u32 minor_ver; - /** @patch_ver: patch uc fw version */ - __u32 patch_ver; - - /** @pad2: MBZ */ - __u32 pad2; - - /** @reserved: Reserved */ - __u64 reserved; -}; - -/** - * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main - * structure to query device information - * - * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_* - * and sets the value in the query member. This determines the type of - * the structure provided by the driver in data, among struct drm_xe_query_*. - * - * The @query can be: - * - %DRM_XE_DEVICE_QUERY_ENGINES - * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS - * - %DRM_XE_DEVICE_QUERY_CONFIG - * - %DRM_XE_DEVICE_QUERY_GT_LIST - * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware - * configuration of the device such as information on slices, memory, - * caches, and so on. It is provided as a table of key / value - * attributes. - * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY - * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES - * - * If size is set to 0, the driver fills it with the required size for - * the requested type of data to query. If size is equal to the required - * size, the queried information is copied into data. If size is set to - * a value different from 0 and different from the required size, the - * IOCTL call returns -EINVAL. - * - * For example the following code snippet allows retrieving and printing - * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES: - * - * .. code-block:: C - * - * struct drm_xe_query_engines *engines; - * struct drm_xe_device_query query = { - * .extensions = 0, - * .query = DRM_XE_DEVICE_QUERY_ENGINES, - * .size = 0, - * .data = 0, - * }; - * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); - * engines = malloc(query.size); - * query.data = (uintptr_t)engines; - * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); - * for (int i = 0; i < engines->num_engines; i++) { - * printf("Engine %d: %s\n", i, - * engines->engines[i].instance.engine_class == - * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER": - * engines->engines[i].instance.engine_class == - * DRM_XE_ENGINE_CLASS_COPY ? "COPY": - * engines->engines[i].instance.engine_class == - * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE": - * engines->engines[i].instance.engine_class == - * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE": - * engines->engines[i].instance.engine_class == - * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE": - * "UNKNOWN"); - * } - * free(engines); - */ -struct drm_xe_device_query { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - -#define DRM_XE_DEVICE_QUERY_ENGINES 0 -#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1 -#define DRM_XE_DEVICE_QUERY_CONFIG 2 -#define DRM_XE_DEVICE_QUERY_GT_LIST 3 -#define DRM_XE_DEVICE_QUERY_HWCONFIG 4 -#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5 -#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 -#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7 -#define DRM_XE_DEVICE_QUERY_OA_UNITS 8 - /** @query: The type of data to query */ - __u32 query; - - /** @size: Size of the queried data */ - __u32 size; - - /** @data: Queried data is placed here */ - __u64 data; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for - * gem creation - * - * The @flags can be: - * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT - * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a - * possible placement, ensure that the corresponding VRAM allocation - * will always use the CPU accessible part of VRAM. This is important - * for small-bar systems (on full-bar systems this gets turned into a - * noop). - * Note1: System memory can be used as an extra placement if the kernel - * should spill the allocation to system memory, if space can't be made - * available in the CPU accessible part of VRAM (giving the same - * behaviour as the i915 interface, see - * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS). - * Note2: For clear-color CCS surfaces the kernel needs to read the - * clear-color value stored in the buffer, and on discrete platforms we - * need to use VRAM for display surfaces, therefore the kernel requires - * setting this flag for such objects, otherwise an error is thrown on - * small-bar systems. - * - * @cpu_caching supports the following values: - * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back - * caching. On iGPU this can't be used for scanout surfaces. Currently - * not allowed for objects placed in VRAM. - * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This - * is uncached. Scanout surfaces should likely use this. All objects - * that can be placed in VRAM must use this. - */ -struct drm_xe_gem_create { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** - * @size: Size of the object to be created, must match region - * (system or vram) minimum alignment (&min_page_size). - */ - __u64 size; - - /** - * @placement: A mask of memory instances of where BO can be placed. - * Each index in this mask refers directly to the struct - * drm_xe_query_mem_regions' instance, no assumptions should - * be made about order. The type of each region is described - * by struct drm_xe_query_mem_regions' mem_class. - */ - __u32 placement; - -#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0) -#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1) -#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2) - /** - * @flags: Flags, currently a mask of memory instances of where BO can - * be placed - */ - __u32 flags; - - /** - * @vm_id: Attached VM, if any - * - * If a VM is specified, this BO must: - * - * 1. Only ever be bound to that VM. - * 2. Cannot be exported as a PRIME fd. - */ - __u32 vm_id; - - /** - * @handle: Returned handle for the object. - * - * Object handles are nonzero. - */ - __u32 handle; - -#define DRM_XE_GEM_CPU_CACHING_WB 1 -#define DRM_XE_GEM_CPU_CACHING_WC 2 - /** - * @cpu_caching: The CPU caching mode to select for this object. If - * mmaping the object the mode selected here will also be used. The - * exception is when mapping system memory (including data evicted - * to system) on discrete GPUs. The caching mode selected will - * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency - * between GPU- and CPU is guaranteed. The caching mode of - * existing CPU-mappings will be updated transparently to - * user-space clients. - */ - __u16 cpu_caching; - /** @pad: MBZ */ - __u16 pad[3]; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET - */ -struct drm_xe_gem_mmap_offset { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @handle: Handle for the object being mapped. */ - __u32 handle; - - /** @flags: Must be zero */ - __u32 flags; - - /** @offset: The fake offset to use for subsequent mmap call */ - __u64 offset; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE - * - * The @flags can be: - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts - * exec submissions to its exec_queues that don't have an upper time - * limit on the job execution time. But exec submissions to these - * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ, - * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF, - * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL. - * LR VMs can be created in recoverable page-fault mode using - * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it. - * If that flag is omitted, the UMD can not rely on the slightly - * different per-VM overcommit semantics that are enabled by - * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may - * still enable recoverable pagefaults if supported by the device. - * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also - * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on - * demand when accessed, and also allows per-VM overcommit of memory. - * The xe driver internally uses recoverable pagefaults to implement - * this. - */ -struct drm_xe_vm_create { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - -#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0) -#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1) -#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2) - /** @flags: Flags */ - __u32 flags; - - /** @vm_id: Returned VM ID */ - __u32 vm_id; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY - */ -struct drm_xe_vm_destroy { - /** @vm_id: VM ID */ - __u32 vm_id; - - /** @pad: MBZ */ - __u32 pad; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -struct drm_xe_vm_bind_op_ext_attach_debug { - /** @base: base user extension */ - struct drm_xe_user_extension base; - - /** @id: Debug object id from create metadata */ - __u64 metadata_id; - - /** @flags: Flags */ - __u64 flags; - - /** @cookie: Cookie */ - __u64 cookie; - - /** @reserved: Reserved */ - __u64 reserved; -}; - -/** - * struct drm_xe_vm_bind_op - run bind operations - * - * The @op can be: - * - %DRM_XE_VM_BIND_OP_MAP - * - %DRM_XE_VM_BIND_OP_UNMAP - * - %DRM_XE_VM_BIND_OP_MAP_USERPTR - * - %DRM_XE_VM_BIND_OP_UNMAP_ALL - * - %DRM_XE_VM_BIND_OP_PREFETCH - * - * and the @flags can be: - * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only - * to ensure write protection - * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the - * MAP operation immediately rather than deferring the MAP to the page - * fault handler. This is implied on a non-faulting VM as there is no - * fault handler to defer to. - * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page - * tables are setup with a special bit which indicates writes are - * dropped and all reads return zero. In the future, the NULL flags - * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO - * handle MBZ, and the BO offset MBZ. This flag is intended to - * implement VK sparse bindings. - */ - -struct drm_xe_vm_bind_op { -#define XE_VM_BIND_OP_EXTENSIONS_ATTACH_DEBUG 0 - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** - * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP - */ - __u32 obj; - - /** - * @pat_index: The platform defined @pat_index to use for this mapping. - * The index basically maps to some predefined memory attributes, - * including things like caching, coherency, compression etc. The exact - * meaning of the pat_index is platform specific and defined in the - * Bspec and PRMs. When the KMD sets up the binding the index here is - * encoded into the ppGTT PTE. - * - * For coherency the @pat_index needs to be at least 1way coherent when - * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD - * will extract the coherency mode from the @pat_index and reject if - * there is a mismatch (see note below for pre-MTL platforms). - * - * Note: On pre-MTL platforms there is only a caching mode and no - * explicit coherency mode, but on such hardware there is always a - * shared-LLC (or is dgpu) so all GT memory accesses are coherent with - * CPU caches even with the caching mode set as uncached. It's only the - * display engine that is incoherent (on dgpu it must be in VRAM which - * is always mapped as WC on the CPU). However to keep the uapi somewhat - * consistent with newer platforms the KMD groups the different cache - * levels into the following coherency buckets on all pre-MTL platforms: - * - * ppGTT UC -> COH_NONE - * ppGTT WC -> COH_NONE - * ppGTT WT -> COH_NONE - * ppGTT WB -> COH_AT_LEAST_1WAY - * - * In practice UC/WC/WT should only ever used for scanout surfaces on - * such platforms (or perhaps in general for dma-buf if shared with - * another device) since it is only the display engine that is actually - * incoherent. Everything else should typically use WB given that we - * have a shared-LLC. On MTL+ this completely changes and the HW - * defines the coherency mode as part of the @pat_index, where - * incoherent GT access is possible. - * - * Note: For userptr and externally imported dma-buf the kernel expects - * either 1WAY or 2WAY for the @pat_index. - * - * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions - * on the @pat_index. For such mappings there is no actual memory being - * mapped (the address in the PTE is invalid), so the various PAT memory - * attributes likely do not apply. Simply leaving as zero is one - * option (still a valid pat_index). - */ - __u16 pat_index; - - /** @pad: MBZ */ - __u16 pad; - - union { - /** - * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE, - * ignored for unbind - */ - __u64 obj_offset; - - /** @userptr: user pointer to bind on */ - __u64 userptr; - }; - - /** - * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL - */ - __u64 range; - - /** @addr: Address to operate on, MBZ for UNMAP_ALL */ - __u64 addr; - -#define DRM_XE_VM_BIND_OP_MAP 0x0 -#define DRM_XE_VM_BIND_OP_UNMAP 0x1 -#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2 -#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3 -#define DRM_XE_VM_BIND_OP_PREFETCH 0x4 - /** @op: Bind operation to perform */ - __u32 op; - -#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0) -#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) -#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) -#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) - /** @flags: Bind flags */ - __u32 flags; - - /** - * @prefetch_mem_region_instance: Memory region to prefetch VMA to. - * It is a region instance, not a mask. - * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation. - */ - __u32 prefetch_mem_region_instance; - - /** @pad2: MBZ */ - __u32 pad2; - - /** @reserved: Reserved */ - __u64 reserved[3]; -}; - -/** - * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND - * - * Below is an example of a minimal use of @drm_xe_vm_bind to - * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to - * illustrate `userptr`. It can be synchronized by using the example - * provided for @drm_xe_sync. - * - * .. code-block:: C - * - * data = aligned_alloc(ALIGNMENT, BO_SIZE); - * struct drm_xe_vm_bind bind = { - * .vm_id = vm, - * .num_binds = 1, - * .bind.obj = 0, - * .bind.obj_offset = to_user_pointer(data), - * .bind.range = BO_SIZE, - * .bind.addr = BIND_ADDRESS, - * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR, - * .bind.flags = 0, - * .num_syncs = 1, - * .syncs = &sync, - * .exec_queue_id = 0, - * }; - * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind); - * - */ -struct drm_xe_vm_bind { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @vm_id: The ID of the VM to bind to */ - __u32 vm_id; - - /** - * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND - * and exec queue must have same vm_id. If zero, the default VM bind engine - * is used. - */ - __u32 exec_queue_id; - - /** @pad: MBZ */ - __u32 pad; - - /** @num_binds: number of binds in this IOCTL */ - __u32 num_binds; - - union { - /** @bind: used if num_binds == 1 */ - struct drm_xe_vm_bind_op bind; - - /** - * @vector_of_binds: userptr to array of struct - * drm_xe_vm_bind_op if num_binds > 1 - */ - __u64 vector_of_binds; - }; - - /** @pad2: MBZ */ - __u32 pad2; - - /** @num_syncs: amount of syncs to wait on */ - __u32 num_syncs; - - /** @syncs: pointer to struct drm_xe_sync array */ - __u64 syncs; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE - * - * The example below shows how to use @drm_xe_exec_queue_create to create - * a simple exec_queue (no parallel submission) of class - * &DRM_XE_ENGINE_CLASS_RENDER. - * - * .. code-block:: C - * - * struct drm_xe_engine_class_instance instance = { - * .engine_class = DRM_XE_ENGINE_CLASS_RENDER, - * }; - * struct drm_xe_exec_queue_create exec_queue_create = { - * .extensions = 0, - * .vm_id = vm, - * .num_bb_per_exec = 1, - * .num_eng_per_bb = 1, - * .instances = to_user_pointer(&instance), - * }; - * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); - * - */ -struct drm_xe_exec_queue_create { -#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 -#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 -#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 -#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_EUDEBUG 2 -#define DRM_XE_EXEC_QUEUE_EUDEBUG_FLAG_ENABLE (1 << 0) - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @width: submission width (number BB per exec) for this exec queue */ - __u16 width; - - /** @num_placements: number of valid placements for this exec queue */ - __u16 num_placements; - - /** @vm_id: VM to use for this exec queue */ - __u32 vm_id; - - /** @flags: MBZ */ - __u32 flags; - - /** @exec_queue_id: Returned exec queue ID */ - __u32 exec_queue_id; - - /** - * @instances: user pointer to a 2-d array of struct - * drm_xe_engine_class_instance - * - * length = width (i) * num_placements (j) - * index = j + i * width - */ - __u64 instances; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY - */ -struct drm_xe_exec_queue_destroy { - /** @exec_queue_id: Exec queue ID */ - __u32 exec_queue_id; - - /** @pad: MBZ */ - __u32 pad; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY - * - * The @property can be: - * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN - */ -struct drm_xe_exec_queue_get_property { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @exec_queue_id: Exec queue ID */ - __u32 exec_queue_id; - -#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 - /** @property: property to get */ - __u32 property; - - /** @value: property value */ - __u64 value; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_sync - sync object - * - * The @type can be: - * - %DRM_XE_SYNC_TYPE_SYNCOBJ - * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ - * - %DRM_XE_SYNC_TYPE_USER_FENCE - * - * and the @flags can be: - * - %DRM_XE_SYNC_FLAG_SIGNAL - * - * A minimal use of @drm_xe_sync looks like this: - * - * .. code-block:: C - * - * struct drm_xe_sync sync = { - * .flags = DRM_XE_SYNC_FLAG_SIGNAL, - * .type = DRM_XE_SYNC_TYPE_SYNCOBJ, - * }; - * struct drm_syncobj_create syncobj_create = { 0 }; - * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create); - * sync.handle = syncobj_create.handle; - * ... - * use of &sync in drm_xe_exec or drm_xe_vm_bind - * ... - * struct drm_syncobj_wait wait = { - * .handles = &sync.handle, - * .timeout_nsec = INT64_MAX, - * .count_handles = 1, - * .flags = 0, - * .first_signaled = 0, - * .pad = 0, - * }; - * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait); - */ -struct drm_xe_sync { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - -#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0 -#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1 -#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2 - /** @type: Type of the this sync object */ - __u32 type; - -#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0) - /** @flags: Sync Flags */ - __u32 flags; - - union { - /** @handle: Handle for the object */ - __u32 handle; - - /** - * @addr: Address of user fence. When sync is passed in via exec - * IOCTL this is a GPU address in the VM. When sync passed in via - * VM bind IOCTL this is a user pointer. In either case, it is - * the users responsibility that this address is present and - * mapped when the user fence is signalled. Must be qword - * aligned. - */ - __u64 addr; - }; - - /** - * @timeline_value: Input for the timeline sync object. Needs to be - * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ. - */ - __u64 timeline_value; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC - * - * This is an example to use @drm_xe_exec for execution of the object - * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue - * (see example in @drm_xe_exec_queue_create). It can be synchronized - * by using the example provided for @drm_xe_sync. - * - * .. code-block:: C - * - * struct drm_xe_exec exec = { - * .exec_queue_id = exec_queue, - * .syncs = &sync, - * .num_syncs = 1, - * .address = BIND_ADDRESS, - * .num_batch_buffer = 1, - * }; - * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec); - * - */ -struct drm_xe_exec { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @exec_queue_id: Exec queue ID for the batch buffer */ - __u32 exec_queue_id; - - /** @num_syncs: Amount of struct drm_xe_sync in array. */ - __u32 num_syncs; - - /** @syncs: Pointer to struct drm_xe_sync array. */ - __u64 syncs; - - /** - * @address: address of batch buffer if num_batch_buffer == 1 or an - * array of batch buffer addresses - */ - __u64 address; - - /** - * @num_batch_buffer: number of batch buffer in this exec, must match - * the width of the engine - */ - __u16 num_batch_buffer; - - /** @pad: MBZ */ - __u16 pad[3]; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE - * - * Wait on user fence, XE will wake-up on every HW engine interrupt in the - * instances list and check if user fence is complete:: - * - * (*addr & MASK) OP (VALUE & MASK) - * - * Returns to user on user fence completion or timeout. - * - * The @op can be: - * - %DRM_XE_UFENCE_WAIT_OP_EQ - * - %DRM_XE_UFENCE_WAIT_OP_NEQ - * - %DRM_XE_UFENCE_WAIT_OP_GT - * - %DRM_XE_UFENCE_WAIT_OP_GTE - * - %DRM_XE_UFENCE_WAIT_OP_LT - * - %DRM_XE_UFENCE_WAIT_OP_LTE - * - * and the @flags can be: - * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME - * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP - * - * The @mask values can be for example: - * - 0xffu for u8 - * - 0xffffu for u16 - * - 0xffffffffu for u32 - * - 0xffffffffffffffffu for u64 - */ -struct drm_xe_wait_user_fence { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** - * @addr: user pointer address to wait on, must qword aligned - */ - __u64 addr; - -#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0 -#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1 -#define DRM_XE_UFENCE_WAIT_OP_GT 0x2 -#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3 -#define DRM_XE_UFENCE_WAIT_OP_LT 0x4 -#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5 - /** @op: wait operation (type of comparison) */ - __u16 op; - -#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0) - /** @flags: wait flags */ - __u16 flags; - - /** @pad: MBZ */ - __u32 pad; - - /** @value: compare value */ - __u64 value; - - /** @mask: comparison mask */ - __u64 mask; - - /** - * @timeout: how long to wait before bailing, value in nanoseconds. - * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout) - * it contains timeout expressed in nanoseconds to wait (fence will - * expire at now() + timeout). - * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait - * will end at timeout (uses system MONOTONIC_CLOCK). - * Passing negative timeout leads to neverending wait. - * - * On relative timeout this value is updated with timeout left - * (for restarting the call in case of signal delivery). - * On absolute timeout this value stays intact (restarted call still - * expire at the same point of time). - */ - __s64 timeout; - - /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */ - __u32 exec_queue_id; - - /** @pad2: MBZ */ - __u32 pad2; - - /** @reserved: Reserved */ - __u64 reserved[2]; -}; - -/** - * enum drm_xe_observation_type - Observation stream types - */ -enum drm_xe_observation_type { - /** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */ - DRM_XE_OBSERVATION_TYPE_OA, -}; - -/** - * enum drm_xe_observation_op - Observation stream ops - */ -enum drm_xe_observation_op { - /** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */ - DRM_XE_OBSERVATION_OP_STREAM_OPEN, - - /** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */ - DRM_XE_OBSERVATION_OP_ADD_CONFIG, - - /** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */ - DRM_XE_OBSERVATION_OP_REMOVE_CONFIG, -}; - -/** - * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION - * - * The observation layer enables multiplexing observation streams of - * multiple types. The actual params for a particular stream operation are - * supplied via the @param pointer (use __copy_from_user to get these - * params). - */ -struct drm_xe_observation_param { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - /** @observation_type: observation stream type, of enum @drm_xe_observation_type */ - __u64 observation_type; - /** @observation_op: observation stream op, of enum @drm_xe_observation_op */ - __u64 observation_op; - /** @param: Pointer to actual stream params */ - __u64 param; -}; - -/** - * enum drm_xe_observation_ioctls - Observation stream fd ioctl's - * - * Information exchanged between userspace and kernel for observation fd - * ioctl's is stream type specific - */ -enum drm_xe_observation_ioctls { - /** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */ - DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0), - - /** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */ - DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1), - - /** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */ - DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2), - - /** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */ - DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3), - - /** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */ - DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4), -}; - -/** - * enum drm_xe_oa_unit_type - OA unit types - */ -enum drm_xe_oa_unit_type { - /** - * @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered - * sub-types of OAG. For OAR/OAC, use OAG. - */ - DRM_XE_OA_UNIT_TYPE_OAG, - - /** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */ - DRM_XE_OA_UNIT_TYPE_OAM, -}; - -/** - * struct drm_xe_oa_unit - describe OA unit - */ -struct drm_xe_oa_unit { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @oa_unit_id: OA unit ID */ - __u32 oa_unit_id; - - /** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */ - __u32 oa_unit_type; - - /** @capabilities: OA capabilities bit-mask */ - __u64 capabilities; -#define DRM_XE_OA_CAPS_BASE (1 << 0) -#define DRM_XE_OA_CAPS_SYNCS (1 << 1) - - /** @oa_timestamp_freq: OA timestamp freq */ - __u64 oa_timestamp_freq; - - /** @reserved: MBZ */ - __u64 reserved[4]; - - /** @num_engines: number of engines in @eci array */ - __u64 num_engines; - - /** @eci: engines attached to this OA unit */ - struct drm_xe_engine_class_instance eci[]; -}; - -/** - * struct drm_xe_query_oa_units - describe OA units - * - * If a query is made with a struct drm_xe_device_query where .query - * is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct - * drm_xe_query_oa_units in .data. - * - * OA unit properties for all OA units can be accessed using a code block - * such as the one below: - * - * .. code-block:: C - * - * struct drm_xe_query_oa_units *qoa; - * struct drm_xe_oa_unit *oau; - * u8 *poau; - * - * // malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then: - * poau = (u8 *)&qoa->oa_units[0]; - * for (int i = 0; i < qoa->num_oa_units; i++) { - * oau = (struct drm_xe_oa_unit *)poau; - * // Access 'struct drm_xe_oa_unit' fields here - * poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]); - * } - */ -struct drm_xe_query_oa_units { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - /** @num_oa_units: number of OA units returned in oau[] */ - __u32 num_oa_units; - /** @pad: MBZ */ - __u32 pad; - /** - * @oa_units: struct @drm_xe_oa_unit array returned for this device. - * Written below as a u64 array to avoid problems with nested flexible - * arrays with some compilers - */ - __u64 oa_units[]; -}; - -/** - * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec - * 52198/60942 - */ -enum drm_xe_oa_format_type { - /** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */ - DRM_XE_OA_FMT_TYPE_OAG, - /** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */ - DRM_XE_OA_FMT_TYPE_OAR, - /** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */ - DRM_XE_OA_FMT_TYPE_OAM, - /** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */ - DRM_XE_OA_FMT_TYPE_OAC, - /** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */ - DRM_XE_OA_FMT_TYPE_OAM_MPEC, - /** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */ - DRM_XE_OA_FMT_TYPE_PEC, -}; - -/** - * enum drm_xe_oa_property_id - OA stream property id's - * - * Stream params are specified as a chain of @drm_xe_ext_set_property - * struct's, with @property values from enum @drm_xe_oa_property_id and - * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY. - * @param field in struct @drm_xe_observation_param points to the first - * @drm_xe_ext_set_property struct. - * - * Exactly the same mechanism is also used for stream reconfiguration using the - * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a - * subset of properties below can be specified for stream reconfiguration. - */ -enum drm_xe_oa_property_id { -#define DRM_XE_OA_EXTENSION_SET_PROPERTY 0 - /** - * @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open - * the OA stream, see @oa_unit_id in 'struct - * drm_xe_query_oa_units'. Defaults to 0 if not provided. - */ - DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1, - - /** - * @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw - * OA unit reports or stream samples in a global buffer attached to an - * OA unit. - */ - DRM_XE_OA_PROPERTY_SAMPLE_OA, - - /** - * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA - * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG. - */ - DRM_XE_OA_PROPERTY_OA_METRIC_SET, - - /** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */ - DRM_XE_OA_PROPERTY_OA_FORMAT, - /* - * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942, - * in terms of the following quantities: a. enum @drm_xe_oa_format_type - * b. Counter select c. Counter size and d. BC report. Also refer to the - * oa_formats array in drivers/gpu/drm/xe/xe_oa.c. - */ -#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0) -#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8) -#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16) -#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24) - - /** - * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit - * sampling with sampling frequency proportional to 2^(period_exponent + 1) - */ - DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT, - - /** - * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA - * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE). - */ - DRM_XE_OA_PROPERTY_OA_DISABLED, - - /** - * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific - * @exec_queue_id. OA queries can be executed on this exec queue. - */ - DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID, - - /** - * @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to - * pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0. - */ - DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE, - - /** - * @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing - * to be disabled for the stream exec queue. - */ - DRM_XE_OA_PROPERTY_NO_PREEMPT, - - /** - * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array - * specified in @DRM_XE_OA_PROPERTY_SYNCS - */ - DRM_XE_OA_PROPERTY_NUM_SYNCS, - - /** - * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array - * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA - * configuration will wait till input fences signal. Output fences - * will signal after the new OA configuration takes effect. For - * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar - * to the VM bind case. - */ - DRM_XE_OA_PROPERTY_SYNCS, -}; - -/** - * struct drm_xe_oa_config - OA metric configuration - * - * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A - * particular config can be specified when opening an OA stream using - * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property. - */ -struct drm_xe_oa_config { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */ - char uuid[36]; - - /** @n_regs: Number of regs in @regs_ptr */ - __u32 n_regs; - - /** - * @regs_ptr: Pointer to (register address, value) pairs for OA config - * registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs). - */ - __u64 regs_ptr; -}; - -/** - * struct drm_xe_oa_stream_status - OA stream status returned from - * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can - * call the ioctl to query stream status in response to EIO errno from - * observation fd read(). - */ -struct drm_xe_oa_stream_status { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @oa_status: OA stream status (see Bspec 46717/61226) */ - __u64 oa_status; -#define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3) -#define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2) -#define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1) -#define DRM_XE_OASTATUS_REPORT_LOST (1 << 0) - - /** @reserved: reserved for future use */ - __u64 reserved[3]; -}; - -/** - * struct drm_xe_oa_stream_info - OA stream info returned from - * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl - */ -struct drm_xe_oa_stream_info { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @oa_buf_size: OA buffer size */ - __u64 oa_buf_size; - - /** @reserved: reserved for future use */ - __u64 reserved[3]; -}; - -/* - * Debugger ABI (ioctl and events) Version History: - * 0 - No debugger available - * 1 - Initial version - */ -#define DRM_XE_EUDEBUG_VERSION 1 - -struct drm_xe_eudebug_connect { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - __u64 pid; /* input: Target process ID */ - __u32 flags; /* MBZ */ - - __u32 version; /* output: current ABI (ioctl / events) version */ -}; - -/* - * struct drm_xe_debug_metadata_create - Create debug metadata - * - * Add a region of user memory to be marked as debug metadata. - * When the debugger attaches, the metadata regions will be delivered - * for debugger. Debugger can then map these regions to help decode - * the program state. - * - * Returns handle to created metadata entry. - */ -struct drm_xe_debug_metadata_create { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - -#define DRM_XE_DEBUG_METADATA_ELF_BINARY 0 -#define DRM_XE_DEBUG_METADATA_PROGRAM_MODULE 1 -#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_MODULE_AREA 2 -#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SBA_AREA 3 -#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA 4 -#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_NUM (1 + \ - WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA) - - /** @type: Type of metadata */ - __u64 type; - - /** @user_addr: pointer to start of the metadata */ - __u64 user_addr; - - /** @len: length, in bytes of the medata */ - __u64 len; - - /** @metadata_id: created metadata handle (out) */ - __u32 metadata_id; -}; - -/** - * struct drm_xe_debug_metadata_destroy - Destroy debug metadata - * - * Destroy debug metadata. - */ -struct drm_xe_debug_metadata_destroy { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @metadata_id: metadata handle to destroy */ - __u32 metadata_id; -}; - -#include "xe_drm_eudebug.h" - -#if defined(__cplusplus) -} -#endif - -#endif /* _XE_DRM_H_ */ + + /** + * struct drm_xe_user_extension - Base class for defining a chain of extensions + */ + struct drm_xe_user_extension { + /** + * @next_extension: + * + * Pointer to the next struct drm_xe_user_extension, or zero if the end. + */ + __u64 next_extension; + + /** + * @name: Name of the extension. + * + * Note that the name here is just some integer. + * + * Also note that the name space for this is not global for the whole + * driver, but rather its scope/meaning is limited to the specific piece + * of uAPI which has embedded the struct drm_xe_user_extension. + */ + __u32 name; + + /** + * @pad: MBZ + * + * All undefined bits must be zero. + */ + __u32 pad; + }; + + /** + * struct drm_xe_ext_set_property - Generic set property extension + * + * A generic struct that allows any of the Xe's IOCTL to be extended + * with a set_property operation. + */ + struct drm_xe_ext_set_property { + /** @base: base user extension */ + struct drm_xe_user_extension base; + + /** @property: property to set */ + __u32 property; + + /** @pad: MBZ */ + __u32 pad; + + /** @value: property value */ + __u64 value; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_engine_class_instance - instance of an engine class + * + * It is returned as part of the @drm_xe_engine, but it also is used as + * the input of engine selection for both @drm_xe_exec_queue_create and + * @drm_xe_query_engine_cycles + * + * The @engine_class can be: + * - %DRM_XE_ENGINE_CLASS_RENDER + * - %DRM_XE_ENGINE_CLASS_COPY + * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE + * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE + * - %DRM_XE_ENGINE_CLASS_COMPUTE + * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual + * hardware engine class). Used for creating ordered queues of VM + * bind operations. + */ + struct drm_xe_engine_class_instance { + #define DRM_XE_ENGINE_CLASS_RENDER 0 + #define DRM_XE_ENGINE_CLASS_COPY 1 + #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2 + #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3 + #define DRM_XE_ENGINE_CLASS_COMPUTE 4 + #define DRM_XE_ENGINE_CLASS_VM_BIND 5 + /** @engine_class: engine class id */ + __u16 engine_class; + /** @engine_instance: engine instance id */ + __u16 engine_instance; + /** @gt_id: Unique ID of this GT within the PCI Device */ + __u16 gt_id; + /** @pad: MBZ */ + __u16 pad; + }; + + /** + * struct drm_xe_engine - describe hardware engine + */ + struct drm_xe_engine { + /** @instance: The @drm_xe_engine_class_instance */ + struct drm_xe_engine_class_instance instance; + + /** @reserved: Reserved */ + __u64 reserved[3]; + }; + + /** + * struct drm_xe_query_engines - describe engines + * + * If a query is made with a struct @drm_xe_device_query where .query + * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of + * struct @drm_xe_query_engines in .data. + */ + struct drm_xe_query_engines { + /** @num_engines: number of engines returned in @engines */ + __u32 num_engines; + /** @pad: MBZ */ + __u32 pad; + /** @engines: The returned engines for this device */ + struct drm_xe_engine engines[]; + }; + + /** + * enum drm_xe_memory_class - Supported memory classes. + */ + enum drm_xe_memory_class { + /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ + DRM_XE_MEM_REGION_CLASS_SYSMEM = 0, + /** + * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this + * represents the memory that is local to the device, which we + * call VRAM. Not valid on integrated platforms. + */ + DRM_XE_MEM_REGION_CLASS_VRAM + }; + + /** + * struct drm_xe_mem_region - Describes some region as known to + * the driver. + */ + struct drm_xe_mem_region { + /** + * @mem_class: The memory class describing this region. + * + * See enum drm_xe_memory_class for supported values. + */ + __u16 mem_class; + /** + * @instance: The unique ID for this region, which serves as the + * index in the placement bitmask used as argument for + * &DRM_IOCTL_XE_GEM_CREATE + */ + __u16 instance; + /** + * @min_page_size: Min page-size in bytes for this region. + * + * When the kernel allocates memory for this region, the + * underlying pages will be at least @min_page_size in size. + * Buffer objects with an allowable placement in this region must be + * created with a size aligned to this value. + * GPU virtual address mappings of (parts of) buffer objects that + * may be placed in this region must also have their GPU virtual + * address and range aligned to this value. + * Affected IOCTLS will return %-EINVAL if alignment restrictions are + * not met. + */ + __u32 min_page_size; + /** + * @total_size: The usable size in bytes for this region. + */ + __u64 total_size; + /** + * @used: Estimate of the memory used in bytes for this region. + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable + * accounting. Without this the value here will always equal + * zero. + */ + __u64 used; + /** + * @cpu_visible_size: How much of this region can be CPU + * accessed, in bytes. + * + * This will always be <= @total_size, and the remainder (if + * any) will not be CPU accessible. If the CPU accessible part + * is smaller than @total_size then this is referred to as a + * small BAR system. + * + * On systems without small BAR (full BAR), the probed_size will + * always equal the @total_size, since all of it will be CPU + * accessible. + * + * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM + * regions (for other types the value here will always equal + * zero). + */ + __u64 cpu_visible_size; + /** + * @cpu_visible_used: Estimate of CPU visible memory used, in + * bytes. + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable + * accounting. Without this the value here will always equal + * zero. Note this is only currently tracked for + * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value + * here will always be zero). + */ + __u64 cpu_visible_used; + /** @reserved: Reserved */ + __u64 reserved[6]; + }; + + /** + * struct drm_xe_query_mem_regions - describe memory regions + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses + * struct drm_xe_query_mem_regions in .data. + */ + struct drm_xe_query_mem_regions { + /** @num_mem_regions: number of memory regions returned in @mem_regions */ + __u32 num_mem_regions; + /** @pad: MBZ */ + __u32 pad; + /** @mem_regions: The returned memory regions for this device */ + struct drm_xe_mem_region mem_regions[]; + }; + + /** + * struct drm_xe_query_config - describe the device configuration + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses + * struct drm_xe_query_config in .data. + * + * The index in @info can be: + * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits) + * and the device revision (next 8 bits) + * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device + * configuration, see list below + * + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device + * has usable VRAM + * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment + * required by this device, typically SZ_4K or SZ_64K + * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address + * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest + * available exec queue priority + */ + struct drm_xe_query_config { + /** @num_params: number of parameters returned in info */ + __u32 num_params; + + /** @pad: MBZ */ + __u32 pad; + + #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 + #define DRM_XE_QUERY_CONFIG_FLAGS 1 + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0) + #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 + #define DRM_XE_QUERY_CONFIG_VA_BITS 3 + #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 + /** @info: array of elements containing the config info */ + __u64 info[]; + }; + + /** + * struct drm_xe_gt - describe an individual GT. + * + * To be used with drm_xe_query_gt_list, which will return a list with all the + * existing GT individual descriptions. + * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for + * implementing graphics and/or media operations. + * + * The index in @type can be: + * - %DRM_XE_QUERY_GT_TYPE_MAIN + * - %DRM_XE_QUERY_GT_TYPE_MEDIA + */ + struct drm_xe_gt { + #define DRM_XE_QUERY_GT_TYPE_MAIN 0 + #define DRM_XE_QUERY_GT_TYPE_MEDIA 1 + /** @type: GT type: Main or Media */ + __u16 type; + /** @tile_id: Tile ID where this GT lives (Information only) */ + __u16 tile_id; + /** @gt_id: Unique ID of this GT within the PCI Device */ + __u16 gt_id; + /** @pad: MBZ */ + __u16 pad[3]; + /** @reference_clock: A clock frequency for timestamp */ + __u32 reference_clock; + /** + * @near_mem_regions: Bit mask of instances from + * drm_xe_query_mem_regions that are nearest to the current engines + * of this GT. + * Each index in this mask refers directly to the struct + * drm_xe_query_mem_regions' instance, no assumptions should + * be made about order. The type of each region is described + * by struct drm_xe_query_mem_regions' mem_class. + */ + __u64 near_mem_regions; + /** + * @far_mem_regions: Bit mask of instances from + * drm_xe_query_mem_regions that are far from the engines of this GT. + * In general, they have extra indirections when compared to the + * @near_mem_regions. For a discrete device this could mean system + * memory and memory living in a different tile. + * Each index in this mask refers directly to the struct + * drm_xe_query_mem_regions' instance, no assumptions should + * be made about order. The type of each region is described + * by struct drm_xe_query_mem_regions' mem_class. + */ + __u64 far_mem_regions; + /** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */ + __u16 ip_ver_major; + /** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */ + __u16 ip_ver_minor; + /** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */ + __u16 ip_ver_rev; + /** @pad2: MBZ */ + __u16 pad2; + /** @reserved: Reserved */ + __u64 reserved[7]; + }; + + /** + * struct drm_xe_query_gt_list - A list with GT description items. + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct + * drm_xe_query_gt_list in .data. + */ + struct drm_xe_query_gt_list { + /** @num_gt: number of GT items returned in gt_list */ + __u32 num_gt; + /** @pad: MBZ */ + __u32 pad; + /** @gt_list: The GT list returned for this device */ + struct drm_xe_gt gt_list[]; + }; + + /** + * struct drm_xe_query_topology_mask - describe the topology mask of a GT + * + * This is the hardware topology which reflects the internal physical + * structure of the GPU. + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses + * struct drm_xe_query_topology_mask in .data. + * + * The @type can be: + * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices + * (DSS) available for geometry operations. For example a query response + * containing the following in mask: + * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00`` + * means 32 DSS are available for geometry. + * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices + * (DSS) available for compute operations. For example a query response + * containing the following in mask: + * ``DSS_COMPUTE ff ff ff ff 00 00 00 00`` + * means 32 DSS are available for compute. + * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type + * may be omitted if the driver is unable to query the mask from the + * hardware. + * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU) + * available per Dual Sub Slices (DSS). For example a query response + * containing the following in mask: + * ``EU_PER_DSS ff ff 00 00 00 00 00 00`` + * means each DSS has 16 SIMD8 EUs. This type may be omitted if device + * doesn't have SIMD8 EUs. + * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution + * Units (EU) available per Dual Sub Slices (DSS). For example a query + * response containing the following in mask: + * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00`` + * means each DSS has 16 SIMD16 EUs. This type may be omitted if device + * doesn't have SIMD16 EUs. + */ + struct drm_xe_query_topology_mask { + /** @gt_id: GT ID the mask is associated with */ + __u16 gt_id; + + #define DRM_XE_TOPO_DSS_GEOMETRY 1 + #define DRM_XE_TOPO_DSS_COMPUTE 2 + #define DRM_XE_TOPO_L3_BANK 3 + #define DRM_XE_TOPO_EU_PER_DSS 4 + #define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5 + /** @type: type of mask */ + __u16 type; + + /** @num_bytes: number of bytes in requested mask */ + __u32 num_bytes; + + /** @mask: little-endian mask of @num_bytes */ + __u8 mask[]; + }; + + /** + * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps + * + * If a query is made with a struct drm_xe_device_query where .query is equal to + * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles + * in .data. struct drm_xe_query_engine_cycles is allocated by the user and + * .data points to this allocated structure. + * + * The query returns the engine cycles, which along with GT's @reference_clock, + * can be used to calculate the engine timestamp. In addition the + * query returns a set of cpu timestamps that indicate when the command + * streamer cycle count was captured. + */ + struct drm_xe_query_engine_cycles { + /** + * @eci: This is input by the user and is the engine for which command + * streamer cycles is queried. + */ + struct drm_xe_engine_class_instance eci; + + /** + * @clockid: This is input by the user and is the reference clock id for + * CPU timestamp. For definition, see clock_gettime(2) and + * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC, + * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI. + */ + __s32 clockid; + + /** @width: Width of the engine cycle counter in bits. */ + __u32 width; + + /** + * @engine_cycles: Engine cycles as read from its register + * at 0x358 offset. + */ + __u64 engine_cycles; + + /** + * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before + * reading the engine_cycles register using the reference clockid set by the + * user. + */ + __u64 cpu_timestamp; + + /** + * @cpu_delta: Time delta in ns captured around reading the lower dword + * of the engine_cycles register. + */ + __u64 cpu_delta; + }; + + /** + * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version + * + * Given a uc_type this will return the branch, major, minor and patch version + * of the micro-controller firmware. + */ + struct drm_xe_query_uc_fw_version { + /** @uc_type: The micro-controller type to query firmware version */ + #define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0 + #define XE_QUERY_UC_TYPE_HUC 1 + __u16 uc_type; + + /** @pad: MBZ */ + __u16 pad; + + /** @branch_ver: branch uc fw version */ + __u32 branch_ver; + /** @major_ver: major uc fw version */ + __u32 major_ver; + /** @minor_ver: minor uc fw version */ + __u32 minor_ver; + /** @patch_ver: patch uc fw version */ + __u32 patch_ver; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @reserved: Reserved */ + __u64 reserved; + }; + + /** + * struct drm_xe_query_pxp_status - query if PXP is ready + * + * If PXP is enabled and no fatal error has occurred, the status will be set to + * one of the following values: + * 0: PXP init still in progress + * 1: PXP init complete + * + * If PXP is not enabled or something has gone wrong, the query will be failed + * with one of the following error codes: + * -ENODEV: PXP not supported or disabled; + * -EIO: fatal error occurred during init, so PXP will never be enabled; + * -EINVAL: incorrect value provided as part of the query; + * -EFAULT: error copying the memory between kernel and userspace. + * + * The status can only be 0 in the first few seconds after driver load. If + * everything works as expected, the status will transition to init complete in + * less than 1 second, while in case of errors the driver might take longer to + * start returning an error code, but it should still take less than 10 seconds. + * + * The supported session type bitmask is based on the values in + * enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore + * is not reported in the bitmask. + * + */ + struct drm_xe_query_pxp_status { + /** @status: current PXP status */ + __u32 status; + + /** @supported_session_types: bitmask of supported PXP session types */ + __u32 supported_session_types; + }; + + /** + * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main + * structure to query device information + * + * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_* + * and sets the value in the query member. This determines the type of + * the structure provided by the driver in data, among struct drm_xe_query_*. + * + * The @query can be: + * - %DRM_XE_DEVICE_QUERY_ENGINES + * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS + * - %DRM_XE_DEVICE_QUERY_CONFIG + * - %DRM_XE_DEVICE_QUERY_GT_LIST + * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware + * configuration of the device such as information on slices, memory, + * caches, and so on. It is provided as a table of key / value + * attributes. + * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY + * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES + * - %DRM_XE_DEVICE_QUERY_PXP_STATUS + * + * If size is set to 0, the driver fills it with the required size for + * the requested type of data to query. If size is equal to the required + * size, the queried information is copied into data. If size is set to + * a value different from 0 and different from the required size, the + * IOCTL call returns -EINVAL. + * + * For example the following code snippet allows retrieving and printing + * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES: + * + * .. code-block:: C + * + * struct drm_xe_query_engines *engines; + * struct drm_xe_device_query query = { + * .extensions = 0, + * .query = DRM_XE_DEVICE_QUERY_ENGINES, + * .size = 0, + * .data = 0, + * }; + * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); + * engines = malloc(query.size); + * query.data = (uintptr_t)engines; + * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); + * for (int i = 0; i < engines->num_engines; i++) { + * printf("Engine %d: %s\n", i, + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_COPY ? "COPY": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE": + * "UNKNOWN"); + * } + * free(engines); + */ + struct drm_xe_device_query { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + #define DRM_XE_DEVICE_QUERY_ENGINES 0 + #define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1 + #define DRM_XE_DEVICE_QUERY_CONFIG 2 + #define DRM_XE_DEVICE_QUERY_GT_LIST 3 + #define DRM_XE_DEVICE_QUERY_HWCONFIG 4 + #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5 + #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 + #define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7 + #define DRM_XE_DEVICE_QUERY_OA_UNITS 8 + #define DRM_XE_DEVICE_QUERY_PXP_STATUS 9 + /** @query: The type of data to query */ + __u32 query; + + /** @size: Size of the queried data */ + __u32 size; + + /** @data: Queried data is placed here */ + __u64 data; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for + * gem creation + * + * The @flags can be: + * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING + * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT + * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a + * possible placement, ensure that the corresponding VRAM allocation + * will always use the CPU accessible part of VRAM. This is important + * for small-bar systems (on full-bar systems this gets turned into a + * noop). + * Note1: System memory can be used as an extra placement if the kernel + * should spill the allocation to system memory, if space can't be made + * available in the CPU accessible part of VRAM (giving the same + * behaviour as the i915 interface, see + * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS). + * Note2: For clear-color CCS surfaces the kernel needs to read the + * clear-color value stored in the buffer, and on discrete platforms we + * need to use VRAM for display surfaces, therefore the kernel requires + * setting this flag for such objects, otherwise an error is thrown on + * small-bar systems. + * + * @cpu_caching supports the following values: + * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back + * caching. On iGPU this can't be used for scanout surfaces. Currently + * not allowed for objects placed in VRAM. + * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This + * is uncached. Scanout surfaces should likely use this. All objects + * that can be placed in VRAM must use this. + * + * This ioctl supports setting the following properties via the + * %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the + * generic @drm_xe_ext_set_property struct: + * + * - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session + * this object will be used with. Valid values are listed in enum + * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so + * there is no need to explicitly set that. Objects used with session of type + * %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation + * event occurs after their creation. Attempting to flip an invalid object + * will cause a black frame to be displayed instead. Submissions with invalid + * objects mapped in the VM will be rejected. + */ + struct drm_xe_gem_create { + #define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0 + #define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0 + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @size: Size of the object to be created, must match region + * (system or vram) minimum alignment (&min_page_size). + */ + __u64 size; + + /** + * @placement: A mask of memory instances of where BO can be placed. + * Each index in this mask refers directly to the struct + * drm_xe_query_mem_regions' instance, no assumptions should + * be made about order. The type of each region is described + * by struct drm_xe_query_mem_regions' mem_class. + */ + __u32 placement; + + #define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0) + #define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1) + #define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2) + /** + * @flags: Flags, currently a mask of memory instances of where BO can + * be placed + */ + __u32 flags; + + /** + * @vm_id: Attached VM, if any + * + * If a VM is specified, this BO must: + * + * 1. Only ever be bound to that VM. + * 2. Cannot be exported as a PRIME fd. + */ + __u32 vm_id; + + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + + #define DRM_XE_GEM_CPU_CACHING_WB 1 + #define DRM_XE_GEM_CPU_CACHING_WC 2 + /** + * @cpu_caching: The CPU caching mode to select for this object. If + * mmaping the object the mode selected here will also be used. The + * exception is when mapping system memory (including data evicted + * to system) on discrete GPUs. The caching mode selected will + * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency + * between GPU- and CPU is guaranteed. The caching mode of + * existing CPU-mappings will be updated transparently to + * user-space clients. + */ + __u16 cpu_caching; + /** @pad: MBZ */ + __u16 pad[3]; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET + * + * The @flags can be: + * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset + * for use in mmap ioctl. Writing to the returned mmap address will generate a + * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing + * to VRAM which would also add overhead), acting like an MI_MEM_FENCE + * instruction. + * + * Note: The mmap size can be at most 4K, due to HW limitations. As a result + * this interface is only supported on CPU architectures that support 4K page + * size. The mmap_offset ioctl will detect this and gracefully return an + * error, where userspace is expected to have a different fallback method for + * triggering a barrier. + * + * Roughly the usage would be as follows: + * + * .. code-block:: C + * + * struct drm_xe_gem_mmap_offset mmo = { + * .handle = 0, // must be set to 0 + * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER, + * }; + * + * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo); + * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset); + * map[i] = 0xdeadbeaf; // issue barrier + */ + struct drm_xe_gem_mmap_offset { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @handle: Handle for the object being mapped. */ + __u32 handle; + + #define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0) + /** @flags: Flags */ + __u32 flags; + + /** @offset: The fake offset to use for subsequent mmap call */ + __u64 offset; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE + * + * The @flags can be: + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE + * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts + * exec submissions to its exec_queues that don't have an upper time + * limit on the job execution time. But exec submissions to these + * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ, + * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF, + * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL. + * LR VMs can be created in recoverable page-fault mode using + * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it. + * If that flag is omitted, the UMD can not rely on the slightly + * different per-VM overcommit semantics that are enabled by + * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may + * still enable recoverable pagefaults if supported by the device. + * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also + * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on + * demand when accessed, and also allows per-VM overcommit of memory. + * The xe driver internally uses recoverable pagefaults to implement + * this. + */ + struct drm_xe_vm_create { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0) + #define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1) + #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2) + /** @flags: Flags */ + __u32 flags; + + /** @vm_id: Returned VM ID */ + __u32 vm_id; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY + */ + struct drm_xe_vm_destroy { + /** @vm_id: VM ID */ + __u32 vm_id; + + /** @pad: MBZ */ + __u32 pad; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + struct drm_xe_vm_bind_op_ext_attach_debug { + /** @base: base user extension */ + struct drm_xe_user_extension base; + + /** @id: Debug object id from create metadata */ + __u64 metadata_id; + + /** @flags: Flags */ + __u64 flags; + + /** @cookie: Cookie */ + __u64 cookie; + + /** @reserved: Reserved */ + __u64 reserved; + }; + + /** + * struct drm_xe_vm_bind_op - run bind operations + * + * The @op can be: + * - %DRM_XE_VM_BIND_OP_MAP + * - %DRM_XE_VM_BIND_OP_UNMAP + * - %DRM_XE_VM_BIND_OP_MAP_USERPTR + * - %DRM_XE_VM_BIND_OP_UNMAP_ALL + * - %DRM_XE_VM_BIND_OP_PREFETCH + * + * and the @flags can be: + * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only + * to ensure write protection + * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the + * MAP operation immediately rather than deferring the MAP to the page + * fault handler. This is implied on a non-faulting VM as there is no + * fault handler to defer to. + * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page + * tables are setup with a special bit which indicates writes are + * dropped and all reads return zero. In the future, the NULL flags + * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO + * handle MBZ, and the BO offset MBZ. This flag is intended to + * implement VK sparse bindings. + * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP, + * reject the binding if the encryption key is no longer valid. This + * flag has no effect on BOs that are not marked as using PXP. + */ + + struct drm_xe_vm_bind_op { + #define XE_VM_BIND_OP_EXTENSIONS_ATTACH_DEBUG 0 + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP + */ + __u32 obj; + + /** + * @pat_index: The platform defined @pat_index to use for this mapping. + * The index basically maps to some predefined memory attributes, + * including things like caching, coherency, compression etc. The exact + * meaning of the pat_index is platform specific and defined in the + * Bspec and PRMs. When the KMD sets up the binding the index here is + * encoded into the ppGTT PTE. + * + * For coherency the @pat_index needs to be at least 1way coherent when + * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD + * will extract the coherency mode from the @pat_index and reject if + * there is a mismatch (see note below for pre-MTL platforms). + * + * Note: On pre-MTL platforms there is only a caching mode and no + * explicit coherency mode, but on such hardware there is always a + * shared-LLC (or is dgpu) so all GT memory accesses are coherent with + * CPU caches even with the caching mode set as uncached. It's only the + * display engine that is incoherent (on dgpu it must be in VRAM which + * is always mapped as WC on the CPU). However to keep the uapi somewhat + * consistent with newer platforms the KMD groups the different cache + * levels into the following coherency buckets on all pre-MTL platforms: + * + * ppGTT UC -> COH_NONE + * ppGTT WC -> COH_NONE + * ppGTT WT -> COH_NONE + * ppGTT WB -> COH_AT_LEAST_1WAY + * + * In practice UC/WC/WT should only ever used for scanout surfaces on + * such platforms (or perhaps in general for dma-buf if shared with + * another device) since it is only the display engine that is actually + * incoherent. Everything else should typically use WB given that we + * have a shared-LLC. On MTL+ this completely changes and the HW + * defines the coherency mode as part of the @pat_index, where + * incoherent GT access is possible. + * + * Note: For userptr and externally imported dma-buf the kernel expects + * either 1WAY or 2WAY for the @pat_index. + * + * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions + * on the @pat_index. For such mappings there is no actual memory being + * mapped (the address in the PTE is invalid), so the various PAT memory + * attributes likely do not apply. Simply leaving as zero is one + * option (still a valid pat_index). + */ + __u16 pat_index; + + /** @pad: MBZ */ + __u16 pad; + + union { + /** + * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE, + * ignored for unbind + */ + __u64 obj_offset; + + /** @userptr: user pointer to bind on */ + __u64 userptr; + }; + + /** + * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL + */ + __u64 range; + + /** @addr: Address to operate on, MBZ for UNMAP_ALL */ + __u64 addr; + + #define DRM_XE_VM_BIND_OP_MAP 0x0 + #define DRM_XE_VM_BIND_OP_UNMAP 0x1 + #define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2 + #define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3 + #define DRM_XE_VM_BIND_OP_PREFETCH 0x4 + /** @op: Bind operation to perform */ + __u32 op; + + #define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0) + #define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) + #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) + #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) + #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) + /** @flags: Bind flags */ + __u32 flags; + + /** + * @prefetch_mem_region_instance: Memory region to prefetch VMA to. + * It is a region instance, not a mask. + * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation. + */ + __u32 prefetch_mem_region_instance; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @reserved: Reserved */ + __u64 reserved[3]; + }; + + /** + * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND + * + * Below is an example of a minimal use of @drm_xe_vm_bind to + * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to + * illustrate `userptr`. It can be synchronized by using the example + * provided for @drm_xe_sync. + * + * .. code-block:: C + * + * data = aligned_alloc(ALIGNMENT, BO_SIZE); + * struct drm_xe_vm_bind bind = { + * .vm_id = vm, + * .num_binds = 1, + * .bind.obj = 0, + * .bind.obj_offset = to_user_pointer(data), + * .bind.range = BO_SIZE, + * .bind.addr = BIND_ADDRESS, + * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR, + * .bind.flags = 0, + * .num_syncs = 1, + * .syncs = &sync, + * .exec_queue_id = 0, + * }; + * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind); + * + */ + struct drm_xe_vm_bind { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @vm_id: The ID of the VM to bind to */ + __u32 vm_id; + + /** + * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND + * and exec queue must have same vm_id. If zero, the default VM bind engine + * is used. + */ + __u32 exec_queue_id; + + /** @pad: MBZ */ + __u32 pad; + + /** @num_binds: number of binds in this IOCTL */ + __u32 num_binds; + + union { + /** @bind: used if num_binds == 1 */ + struct drm_xe_vm_bind_op bind; + + /** + * @vector_of_binds: userptr to array of struct + * drm_xe_vm_bind_op if num_binds > 1 + */ + __u64 vector_of_binds; + }; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @num_syncs: amount of syncs to wait on */ + __u32 num_syncs; + + /** @syncs: pointer to struct drm_xe_sync array */ + __u64 syncs; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE + * + * This ioctl supports setting the following properties via the + * %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the + * generic @drm_xe_ext_set_property struct: + * + * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority. + * CAP_SYS_NICE is required to set a value above normal. + * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice + * duration in microseconds. + * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session + * this queue will be used with. Valid values are listed in enum + * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so + * there is no need to explicitly set that. When a queue of type + * %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session + * (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running. + * Given that going into a power-saving state kills PXP HWDRM sessions, + * runtime PM will be blocked while queues of this type are alive. + * All PXP queues will be killed if a PXP invalidation event occurs. + * + * The example below shows how to use @drm_xe_exec_queue_create to create + * a simple exec_queue (no parallel submission) of class + * &DRM_XE_ENGINE_CLASS_RENDER. + * + * .. code-block:: C + * + * struct drm_xe_engine_class_instance instance = { + * .engine_class = DRM_XE_ENGINE_CLASS_RENDER, + * }; + * struct drm_xe_exec_queue_create exec_queue_create = { + * .extensions = 0, + * .vm_id = vm, + * .num_bb_per_exec = 1, + * .num_eng_per_bb = 1, + * .instances = to_user_pointer(&instance), + * }; + * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); + * + */ + struct drm_xe_exec_queue_create { + #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 + #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 + #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 + #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2 + #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_EUDEBUG 3 + #define DRM_XE_EXEC_QUEUE_EUDEBUG_FLAG_ENABLE (1 << 0) + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @width: submission width (number BB per exec) for this exec queue */ + __u16 width; + + /** @num_placements: number of valid placements for this exec queue */ + __u16 num_placements; + + /** @vm_id: VM to use for this exec queue */ + __u32 vm_id; + + /** @flags: MBZ */ + __u32 flags; + + /** @exec_queue_id: Returned exec queue ID */ + __u32 exec_queue_id; + + /** + * @instances: user pointer to a 2-d array of struct + * drm_xe_engine_class_instance + * + * length = width (i) * num_placements (j) + * index = j + i * width + */ + __u64 instances; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY + */ + struct drm_xe_exec_queue_destroy { + /** @exec_queue_id: Exec queue ID */ + __u32 exec_queue_id; + + /** @pad: MBZ */ + __u32 pad; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY + * + * The @property can be: + * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN + */ + struct drm_xe_exec_queue_get_property { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @exec_queue_id: Exec queue ID */ + __u32 exec_queue_id; + + #define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 + /** @property: property to get */ + __u32 property; + + /** @value: property value */ + __u64 value; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_sync - sync object + * + * The @type can be: + * - %DRM_XE_SYNC_TYPE_SYNCOBJ + * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ + * - %DRM_XE_SYNC_TYPE_USER_FENCE + * + * and the @flags can be: + * - %DRM_XE_SYNC_FLAG_SIGNAL + * + * A minimal use of @drm_xe_sync looks like this: + * + * .. code-block:: C + * + * struct drm_xe_sync sync = { + * .flags = DRM_XE_SYNC_FLAG_SIGNAL, + * .type = DRM_XE_SYNC_TYPE_SYNCOBJ, + * }; + * struct drm_syncobj_create syncobj_create = { 0 }; + * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create); + * sync.handle = syncobj_create.handle; + * ... + * use of &sync in drm_xe_exec or drm_xe_vm_bind + * ... + * struct drm_syncobj_wait wait = { + * .handles = &sync.handle, + * .timeout_nsec = INT64_MAX, + * .count_handles = 1, + * .flags = 0, + * .first_signaled = 0, + * .pad = 0, + * }; + * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait); + */ + struct drm_xe_sync { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + #define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0 + #define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1 + #define DRM_XE_SYNC_TYPE_USER_FENCE 0x2 + /** @type: Type of the this sync object */ + __u32 type; + + #define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0) + /** @flags: Sync Flags */ + __u32 flags; + + union { + /** @handle: Handle for the object */ + __u32 handle; + + /** + * @addr: Address of user fence. When sync is passed in via exec + * IOCTL this is a GPU address in the VM. When sync passed in via + * VM bind IOCTL this is a user pointer. In either case, it is + * the users responsibility that this address is present and + * mapped when the user fence is signalled. Must be qword + * aligned. + */ + __u64 addr; + }; + + /** + * @timeline_value: Input for the timeline sync object. Needs to be + * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ. + */ + __u64 timeline_value; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC + * + * This is an example to use @drm_xe_exec for execution of the object + * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue + * (see example in @drm_xe_exec_queue_create). It can be synchronized + * by using the example provided for @drm_xe_sync. + * + * .. code-block:: C + * + * struct drm_xe_exec exec = { + * .exec_queue_id = exec_queue, + * .syncs = &sync, + * .num_syncs = 1, + * .address = BIND_ADDRESS, + * .num_batch_buffer = 1, + * }; + * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec); + * + */ + struct drm_xe_exec { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @exec_queue_id: Exec queue ID for the batch buffer */ + __u32 exec_queue_id; + + /** @num_syncs: Amount of struct drm_xe_sync in array. */ + __u32 num_syncs; + + /** @syncs: Pointer to struct drm_xe_sync array. */ + __u64 syncs; + + /** + * @address: address of batch buffer if num_batch_buffer == 1 or an + * array of batch buffer addresses + */ + __u64 address; + + /** + * @num_batch_buffer: number of batch buffer in this exec, must match + * the width of the engine + */ + __u16 num_batch_buffer; + + /** @pad: MBZ */ + __u16 pad[3]; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE + * + * Wait on user fence, XE will wake-up on every HW engine interrupt in the + * instances list and check if user fence is complete:: + * + * (*addr & MASK) OP (VALUE & MASK) + * + * Returns to user on user fence completion or timeout. + * + * The @op can be: + * - %DRM_XE_UFENCE_WAIT_OP_EQ + * - %DRM_XE_UFENCE_WAIT_OP_NEQ + * - %DRM_XE_UFENCE_WAIT_OP_GT + * - %DRM_XE_UFENCE_WAIT_OP_GTE + * - %DRM_XE_UFENCE_WAIT_OP_LT + * - %DRM_XE_UFENCE_WAIT_OP_LTE + * + * and the @flags can be: + * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME + * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP + * + * The @mask values can be for example: + * - 0xffu for u8 + * - 0xffffu for u16 + * - 0xffffffffu for u32 + * - 0xffffffffffffffffu for u64 + */ + struct drm_xe_wait_user_fence { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @addr: user pointer address to wait on, must qword aligned + */ + __u64 addr; + + #define DRM_XE_UFENCE_WAIT_OP_EQ 0x0 + #define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1 + #define DRM_XE_UFENCE_WAIT_OP_GT 0x2 + #define DRM_XE_UFENCE_WAIT_OP_GTE 0x3 + #define DRM_XE_UFENCE_WAIT_OP_LT 0x4 + #define DRM_XE_UFENCE_WAIT_OP_LTE 0x5 + /** @op: wait operation (type of comparison) */ + __u16 op; + + #define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0) + /** @flags: wait flags */ + __u16 flags; + + /** @pad: MBZ */ + __u32 pad; + + /** @value: compare value */ + __u64 value; + + /** @mask: comparison mask */ + __u64 mask; + + /** + * @timeout: how long to wait before bailing, value in nanoseconds. + * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout) + * it contains timeout expressed in nanoseconds to wait (fence will + * expire at now() + timeout). + * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait + * will end at timeout (uses system MONOTONIC_CLOCK). + * Passing negative timeout leads to neverending wait. + * + * On relative timeout this value is updated with timeout left + * (for restarting the call in case of signal delivery). + * On absolute timeout this value stays intact (restarted call still + * expire at the same point of time). + */ + __s64 timeout; + + /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */ + __u32 exec_queue_id; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @reserved: Reserved */ + __u64 reserved[2]; + }; + + /** + * enum drm_xe_observation_type - Observation stream types + */ + enum drm_xe_observation_type { + /** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */ + DRM_XE_OBSERVATION_TYPE_OA, + }; + + /** + * enum drm_xe_observation_op - Observation stream ops + */ + enum drm_xe_observation_op { + /** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */ + DRM_XE_OBSERVATION_OP_STREAM_OPEN, + + /** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */ + DRM_XE_OBSERVATION_OP_ADD_CONFIG, + + /** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */ + DRM_XE_OBSERVATION_OP_REMOVE_CONFIG, + }; + + /** + * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION + * + * The observation layer enables multiplexing observation streams of + * multiple types. The actual params for a particular stream operation are + * supplied via the @param pointer (use __copy_from_user to get these + * params). + */ + struct drm_xe_observation_param { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + /** @observation_type: observation stream type, of enum @drm_xe_observation_type */ + __u64 observation_type; + /** @observation_op: observation stream op, of enum @drm_xe_observation_op */ + __u64 observation_op; + /** @param: Pointer to actual stream params */ + __u64 param; + }; + + /** + * enum drm_xe_observation_ioctls - Observation stream fd ioctl's + * + * Information exchanged between userspace and kernel for observation fd + * ioctl's is stream type specific + */ + enum drm_xe_observation_ioctls { + /** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */ + DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0), + + /** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */ + DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1), + + /** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */ + DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2), + + /** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */ + DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3), + + /** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */ + DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4), + }; + + /** + * enum drm_xe_oa_unit_type - OA unit types + */ + enum drm_xe_oa_unit_type { + /** + * @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered + * sub-types of OAG. For OAR/OAC, use OAG. + */ + DRM_XE_OA_UNIT_TYPE_OAG, + + /** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */ + DRM_XE_OA_UNIT_TYPE_OAM, + }; + + /** + * struct drm_xe_oa_unit - describe OA unit + */ + struct drm_xe_oa_unit { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @oa_unit_id: OA unit ID */ + __u32 oa_unit_id; + + /** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */ + __u32 oa_unit_type; + + /** @capabilities: OA capabilities bit-mask */ + __u64 capabilities; + #define DRM_XE_OA_CAPS_BASE (1 << 0) + #define DRM_XE_OA_CAPS_SYNCS (1 << 1) + #define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2) + #define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3) + + /** @oa_timestamp_freq: OA timestamp freq */ + __u64 oa_timestamp_freq; + + /** @reserved: MBZ */ + __u64 reserved[4]; + + /** @num_engines: number of engines in @eci array */ + __u64 num_engines; + + /** @eci: engines attached to this OA unit */ + struct drm_xe_engine_class_instance eci[]; + }; + + /** + * struct drm_xe_query_oa_units - describe OA units + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct + * drm_xe_query_oa_units in .data. + * + * OA unit properties for all OA units can be accessed using a code block + * such as the one below: + * + * .. code-block:: C + * + * struct drm_xe_query_oa_units *qoa; + * struct drm_xe_oa_unit *oau; + * u8 *poau; + * + * // malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then: + * poau = (u8 *)&qoa->oa_units[0]; + * for (int i = 0; i < qoa->num_oa_units; i++) { + * oau = (struct drm_xe_oa_unit *)poau; + * // Access 'struct drm_xe_oa_unit' fields here + * poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]); + * } + */ + struct drm_xe_query_oa_units { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + /** @num_oa_units: number of OA units returned in oau[] */ + __u32 num_oa_units; + /** @pad: MBZ */ + __u32 pad; + /** + * @oa_units: struct @drm_xe_oa_unit array returned for this device. + * Written below as a u64 array to avoid problems with nested flexible + * arrays with some compilers + */ + __u64 oa_units[]; + }; + + /** + * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec + * 52198/60942 + */ + enum drm_xe_oa_format_type { + /** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */ + DRM_XE_OA_FMT_TYPE_OAG, + /** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */ + DRM_XE_OA_FMT_TYPE_OAR, + /** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */ + DRM_XE_OA_FMT_TYPE_OAM, + /** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */ + DRM_XE_OA_FMT_TYPE_OAC, + /** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */ + DRM_XE_OA_FMT_TYPE_OAM_MPEC, + /** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */ + DRM_XE_OA_FMT_TYPE_PEC, + }; + + /** + * enum drm_xe_oa_property_id - OA stream property id's + * + * Stream params are specified as a chain of @drm_xe_ext_set_property + * struct's, with @property values from enum @drm_xe_oa_property_id and + * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY. + * @param field in struct @drm_xe_observation_param points to the first + * @drm_xe_ext_set_property struct. + * + * Exactly the same mechanism is also used for stream reconfiguration using the + * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a + * subset of properties below can be specified for stream reconfiguration. + */ + enum drm_xe_oa_property_id { + #define DRM_XE_OA_EXTENSION_SET_PROPERTY 0 + /** + * @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open + * the OA stream, see @oa_unit_id in 'struct + * drm_xe_query_oa_units'. Defaults to 0 if not provided. + */ + DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1, + + /** + * @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw + * OA unit reports or stream samples in a global buffer attached to an + * OA unit. + */ + DRM_XE_OA_PROPERTY_SAMPLE_OA, + + /** + * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA + * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG. + */ + DRM_XE_OA_PROPERTY_OA_METRIC_SET, + + /** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */ + DRM_XE_OA_PROPERTY_OA_FORMAT, + /* + * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942, + * in terms of the following quantities: a. enum @drm_xe_oa_format_type + * b. Counter select c. Counter size and d. BC report. Also refer to the + * oa_formats array in drivers/gpu/drm/xe/xe_oa.c. + */ + #define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0) + #define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8) + #define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16) + #define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24) + + /** + * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit + * sampling with sampling frequency proportional to 2^(period_exponent + 1) + */ + DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT, + + /** + * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA + * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE). + */ + DRM_XE_OA_PROPERTY_OA_DISABLED, + + /** + * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific + * @exec_queue_id. OA queries can be executed on this exec queue. + */ + DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID, + + /** + * @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to + * pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0. + */ + DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE, + + /** + * @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing + * to be disabled for the stream exec queue. + */ + DRM_XE_OA_PROPERTY_NO_PREEMPT, + + /** + * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array + * specified in @DRM_XE_OA_PROPERTY_SYNCS + */ + DRM_XE_OA_PROPERTY_NUM_SYNCS, + + /** + * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array + * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA + * configuration will wait till input fences signal. Output fences + * will signal after the new OA configuration takes effect. For + * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar + * to the VM bind case. + */ + DRM_XE_OA_PROPERTY_SYNCS, + + /** + * @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be + * allocated by the driver in bytes. Supported sizes are powers of + * 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA + * buffer is allocated by default. + */ + DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE, + + /** + * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait + * for before unblocking poll or read + */ + DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS, + }; + + /** + * struct drm_xe_oa_config - OA metric configuration + * + * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A + * particular config can be specified when opening an OA stream using + * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property. + */ + struct drm_xe_oa_config { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */ + char uuid[36]; + + /** @n_regs: Number of regs in @regs_ptr */ + __u32 n_regs; + + /** + * @regs_ptr: Pointer to (register address, value) pairs for OA config + * registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs). + */ + __u64 regs_ptr; + }; + + /** + * struct drm_xe_oa_stream_status - OA stream status returned from + * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can + * call the ioctl to query stream status in response to EIO errno from + * observation fd read(). + */ + struct drm_xe_oa_stream_status { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @oa_status: OA stream status (see Bspec 46717/61226) */ + __u64 oa_status; + #define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3) + #define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2) + #define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1) + #define DRM_XE_OASTATUS_REPORT_LOST (1 << 0) + + /** @reserved: reserved for future use */ + __u64 reserved[3]; + }; + + /** + * struct drm_xe_oa_stream_info - OA stream info returned from + * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl + */ + struct drm_xe_oa_stream_info { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @oa_buf_size: OA buffer size */ + __u64 oa_buf_size; + + /** @reserved: reserved for future use */ + __u64 reserved[3]; + }; + + /** + * enum drm_xe_pxp_session_type - Supported PXP session types. + * + * We currently only support HWDRM sessions, which are used for protected + * content that ends up being displayed, but the HW supports multiple types, so + * we might extend support in the future. + */ + enum drm_xe_pxp_session_type { + /** @DRM_XE_PXP_TYPE_NONE: PXP not used */ + DRM_XE_PXP_TYPE_NONE = 0, + /** + * @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends + * up on the display. + */ + DRM_XE_PXP_TYPE_HWDRM = 1, + }; + + /* ID of the protected content session managed by Xe when PXP is active */ + #define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf + + /* + * Debugger ABI (ioctl and events) Version History: + * 0 - No debugger available + * 1 - Initial version + */ + #define DRM_XE_EUDEBUG_VERSION 1 + + struct drm_xe_eudebug_connect { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + __u64 pid; /* input: Target process ID */ + __u32 flags; /* MBZ */ + + __u32 version; /* output: current ABI (ioctl / events) version */ + }; + + /* + * struct drm_xe_debug_metadata_create - Create debug metadata + * + * Add a region of user memory to be marked as debug metadata. + * When the debugger attaches, the metadata regions will be delivered + * for debugger. Debugger can then map these regions to help decode + * the program state. + * + * Returns handle to created metadata entry. + */ + struct drm_xe_debug_metadata_create { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + #define DRM_XE_DEBUG_METADATA_ELF_BINARY 0 + #define DRM_XE_DEBUG_METADATA_PROGRAM_MODULE 1 + #define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_MODULE_AREA 2 + #define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SBA_AREA 3 + #define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA 4 + #define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_NUM (1 + \ + WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA) + + /** @type: Type of metadata */ + __u64 type; + + /** @user_addr: pointer to start of the metadata */ + __u64 user_addr; + + /** @len: length, in bytes of the medata */ + __u64 len; + + /** @metadata_id: created metadata handle (out) */ + __u32 metadata_id; + }; + + /** + * struct drm_xe_debug_metadata_destroy - Destroy debug metadata + * + * Destroy debug metadata. + */ + struct drm_xe_debug_metadata_destroy { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @metadata_id: metadata handle to destroy */ + __u32 metadata_id; + }; + + #include "xe_drm_eudebug.h" + + #if defined(__cplusplus) + } + #endif + + #endif /* _XE_DRM_H_ */ + \ No newline at end of file diff --git a/third_party/uapi-eudebug/drm/xe_drm_eudebug.h b/third_party/uapi-eudebug/drm/xe_drm_eudebug.h index e9adcca84e..395c277a40 100644 --- a/third_party/uapi-eudebug/drm/xe_drm_eudebug.h +++ b/third_party/uapi-eudebug/drm/xe_drm_eudebug.h @@ -3,254 +3,238 @@ * Copyright © 2023 Intel Corporation */ -#ifndef _XE_DRM_EUDEBUG_H_ -#define _XE_DRM_EUDEBUG_H_ - -#if defined(__cplusplus) -extern "C" { -#endif - -/** - * Do a eudebug event read for a debugger connection. - * - * This ioctl is available in debug version 1. - */ -#define DRM_XE_EUDEBUG_IOCTL_READ_EVENT _IO('j', 0x0) -#define DRM_XE_EUDEBUG_IOCTL_EU_CONTROL _IOWR('j', 0x2, struct drm_xe_eudebug_eu_control) -#define DRM_XE_EUDEBUG_IOCTL_ACK_EVENT _IOW('j', 0x4, struct drm_xe_eudebug_ack_event) -#define DRM_XE_EUDEBUG_IOCTL_VM_OPEN _IOW('j', 0x1, struct drm_xe_eudebug_vm_open) -#define DRM_XE_EUDEBUG_IOCTL_READ_METADATA _IOWR('j', 0x3, struct drm_xe_eudebug_read_metadata) - -/* XXX: Document events to match their internal counterparts when moved to xe_drm.h */ -struct drm_xe_eudebug_event { - __u32 len; - - __u16 type; -#define DRM_XE_EUDEBUG_EVENT_NONE 0 -#define DRM_XE_EUDEBUG_EVENT_READ 1 -#define DRM_XE_EUDEBUG_EVENT_OPEN 2 -#define DRM_XE_EUDEBUG_EVENT_VM 3 -#define DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE 4 -#define DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE_PLACEMENTS 5 -#define DRM_XE_EUDEBUG_EVENT_EU_ATTENTION 6 -#define DRM_XE_EUDEBUG_EVENT_VM_BIND 7 -#define DRM_XE_EUDEBUG_EVENT_VM_BIND_OP 8 -#define DRM_XE_EUDEBUG_EVENT_VM_BIND_UFENCE 9 -#define DRM_XE_EUDEBUG_EVENT_METADATA 10 -#define DRM_XE_EUDEBUG_EVENT_VM_BIND_OP_METADATA 11 -#define DRM_XE_EUDEBUG_EVENT_PAGEFAULT 12 - - __u16 flags; -#define DRM_XE_EUDEBUG_EVENT_CREATE (1 << 0) -#define DRM_XE_EUDEBUG_EVENT_DESTROY (1 << 1) -#define DRM_XE_EUDEBUG_EVENT_STATE_CHANGE (1 << 2) -#define DRM_XE_EUDEBUG_EVENT_NEED_ACK (1 << 3) - - __u64 seqno; - __u64 reserved; -}; - -struct drm_xe_eudebug_event_client { - struct drm_xe_eudebug_event base; - - __u64 client_handle; /* This is unique per debug connection */ -}; - -struct drm_xe_eudebug_event_vm { - struct drm_xe_eudebug_event base; - - __u64 client_handle; - __u64 vm_handle; -}; - -struct drm_xe_eudebug_event_exec_queue { - struct drm_xe_eudebug_event base; - - __u64 client_handle; - __u64 vm_handle; - __u64 exec_queue_handle; - __u32 engine_class; - __u32 width; - __u64 lrc_handle[]; -}; - -struct drm_xe_eudebug_event_exec_queue_placements { - struct drm_xe_eudebug_event base; - - __u64 client_handle; - __u64 vm_handle; - __u64 exec_queue_handle; - __u64 lrc_handle; - __u32 num_placements; - __u32 pad; - /** - * @instances: user pointer to num_placements sized array of struct - * drm_xe_engine_class_instance - */ - __u64 instances[]; -}; - -struct drm_xe_eudebug_event_eu_attention { - struct drm_xe_eudebug_event base; - - __u64 client_handle; - __u64 exec_queue_handle; - __u64 lrc_handle; - __u32 flags; - __u32 bitmask_size; - __u8 bitmask[]; -}; - -struct drm_xe_eudebug_eu_control { - __u64 client_handle; - -#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_INTERRUPT_ALL 0 -#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_STOPPED 1 -#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_RESUME 2 - __u32 cmd; - __u32 flags; - - __u64 seqno; - - __u64 exec_queue_handle; - __u64 lrc_handle; - __u32 reserved; - __u32 bitmask_size; - __u64 bitmask_ptr; -}; - -/* - * When client (debuggee) does vm_bind_ioctl() following event - * sequence will be created (for the debugger): - * - * ┌───────────────────────┐ - * │ EVENT_VM_BIND ├───────┬─┬─┐ - * └───────────────────────┘ │ │ │ - * ┌───────────────────────┐ │ │ │ - * │ EVENT_VM_BIND_OP #1 ├───┘ │ │ - * └───────────────────────┘ │ │ - * ... │ │ - * ┌───────────────────────┐ │ │ - * │ EVENT_VM_BIND_OP #n ├─────┘ │ - * └───────────────────────┘ │ - * │ - * ┌───────────────────────┐ │ - * │ EVENT_UFENCE ├───────┘ - * └───────────────────────┘ - * - * All the events below VM_BIND will reference the VM_BIND - * they associate with, by field .vm_bind_ref_seqno. - * event_ufence will only be included if the client did - * attach sync of type UFENCE into its vm_bind_ioctl(). - * - * When EVENT_UFENCE is sent by the driver, all the OPs of - * the original VM_BIND are completed and the [addr,range] - * contained in them are present and modifiable through the - * vm accessors. Accessing [addr, range] before related ufence - * event will lead to undefined results as the actual bind - * operations are async and the backing storage might not - * be there on a moment of receiving the event. - * - * Client's UFENCE sync will be held by the driver: client's - * drm_xe_wait_ufence will not complete and the value of the ufence - * won't appear until ufence is acked by the debugger process calling - * DRM_XE_EUDEBUG_IOCTL_ACK_EVENT with the event_ufence.base.seqno. - * This will signal the fence, .value will update and the wait will - * complete allowing the client to continue. - * - */ - -struct drm_xe_eudebug_event_vm_bind { - struct drm_xe_eudebug_event base; - - __u64 client_handle; - __u64 vm_handle; - - __u32 flags; -#define DRM_XE_EUDEBUG_EVENT_VM_BIND_FLAG_UFENCE (1 << 0) - - __u32 num_binds; -}; - -struct drm_xe_eudebug_event_vm_bind_op { - struct drm_xe_eudebug_event base; - __u64 vm_bind_ref_seqno; /* *_event_vm_bind.base.seqno */ - __u64 num_extensions; - - __u64 addr; /* XXX: Zero for unmap all? */ - __u64 range; /* XXX: Zero for unmap all? */ -}; - -struct drm_xe_eudebug_event_vm_bind_ufence { - struct drm_xe_eudebug_event base; - __u64 vm_bind_ref_seqno; /* *_event_vm_bind.base.seqno */ -}; - -struct drm_xe_eudebug_ack_event { - __u32 type; - __u32 flags; /* MBZ */ - __u64 seqno; -}; - -struct drm_xe_eudebug_vm_open { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @client_handle: id of client */ - __u64 client_handle; - - /** @vm_handle: id of vm */ - __u64 vm_handle; - - /** @flags: flags */ - __u64 flags; - -#define DRM_XE_EUDEBUG_VM_SYNC_MAX_TIMEOUT_NSECS (10ULL * NSEC_PER_SEC) - /** @timeout_ns: Timeout value in nanoseconds operations (fsync) */ - __u64 timeout_ns; -}; - -struct drm_xe_eudebug_read_metadata { - __u64 client_handle; - __u64 metadata_handle; - __u32 flags; - __u32 reserved; - __u64 ptr; - __u64 size; -}; - -struct drm_xe_eudebug_event_metadata { - struct drm_xe_eudebug_event base; - - __u64 client_handle; - __u64 metadata_handle; - /* XXX: Refer to xe_drm.h for fields */ - __u64 type; - __u64 len; -}; - -struct drm_xe_eudebug_event_vm_bind_op_metadata { - struct drm_xe_eudebug_event base; - __u64 vm_bind_op_ref_seqno; /* *_event_vm_bind_op.base.seqno */ - - __u64 metadata_handle; - __u64 metadata_cookie; -}; - -struct drm_xe_eudebug_event_pagefault { - struct drm_xe_eudebug_event base; - - __u64 client_handle; - __u64 exec_queue_handle; - __u64 lrc_handle; - __u32 flags; - __u32 bitmask_size; - __u64 pagefault_address; - __u8 bitmask[]; -}; - -#if defined(__cplusplus) -} -#endif - -#endif + #ifndef _XE_DRM_EUDEBUG_H_ + #define _XE_DRM_EUDEBUG_H_ + + #if defined(__cplusplus) + extern "C" { + #endif + + /** + * Do a eudebug event read for a debugger connection. + * + * This ioctl is available in debug version 1. + */ + #define DRM_XE_EUDEBUG_IOCTL_READ_EVENT _IO('j', 0x0) + #define DRM_XE_EUDEBUG_IOCTL_EU_CONTROL _IOWR('j', 0x2, struct drm_xe_eudebug_eu_control) + #define DRM_XE_EUDEBUG_IOCTL_ACK_EVENT _IOW('j', 0x4, struct drm_xe_eudebug_ack_event) + #define DRM_XE_EUDEBUG_IOCTL_VM_OPEN _IOW('j', 0x1, struct drm_xe_eudebug_vm_open) + #define DRM_XE_EUDEBUG_IOCTL_READ_METADATA _IOWR('j', 0x3, struct drm_xe_eudebug_read_metadata) + + /* XXX: Document events to match their internal counterparts when moved to xe_drm.h */ + struct drm_xe_eudebug_event { + __u32 len; + + __u16 type; + #define DRM_XE_EUDEBUG_EVENT_NONE 0 + #define DRM_XE_EUDEBUG_EVENT_READ 1 + #define DRM_XE_EUDEBUG_EVENT_OPEN 2 + #define DRM_XE_EUDEBUG_EVENT_VM 3 + #define DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE 4 + #define DRM_XE_EUDEBUG_EVENT_EU_ATTENTION 5 + #define DRM_XE_EUDEBUG_EVENT_VM_BIND 6 + #define DRM_XE_EUDEBUG_EVENT_VM_BIND_OP 7 + #define DRM_XE_EUDEBUG_EVENT_VM_BIND_UFENCE 8 + #define DRM_XE_EUDEBUG_EVENT_METADATA 9 + #define DRM_XE_EUDEBUG_EVENT_VM_BIND_OP_METADATA 10 + #define DRM_XE_EUDEBUG_EVENT_PAGEFAULT 11 + + __u16 flags; + #define DRM_XE_EUDEBUG_EVENT_CREATE (1 << 0) + #define DRM_XE_EUDEBUG_EVENT_DESTROY (1 << 1) + #define DRM_XE_EUDEBUG_EVENT_STATE_CHANGE (1 << 2) + #define DRM_XE_EUDEBUG_EVENT_NEED_ACK (1 << 3) + + __u64 seqno; + __u64 reserved; + }; + + struct drm_xe_eudebug_event_client { + struct drm_xe_eudebug_event base; + + __u64 client_handle; /* This is unique per debug connection */ + }; + + struct drm_xe_eudebug_event_vm { + struct drm_xe_eudebug_event base; + + __u64 client_handle; + __u64 vm_handle; + }; + + struct drm_xe_eudebug_event_exec_queue { + struct drm_xe_eudebug_event base; + + __u64 client_handle; + __u64 vm_handle; + __u64 exec_queue_handle; + __u32 engine_class; + __u32 width; + __u64 lrc_handle[]; + }; + + struct drm_xe_eudebug_event_eu_attention { + struct drm_xe_eudebug_event base; + + __u64 client_handle; + __u64 exec_queue_handle; + __u64 lrc_handle; + __u32 flags; + __u32 bitmask_size; + __u8 bitmask[]; + }; + + struct drm_xe_eudebug_eu_control { + __u64 client_handle; + + #define DRM_XE_EUDEBUG_EU_CONTROL_CMD_INTERRUPT_ALL 0 + #define DRM_XE_EUDEBUG_EU_CONTROL_CMD_STOPPED 1 + #define DRM_XE_EUDEBUG_EU_CONTROL_CMD_RESUME 2 + __u32 cmd; + __u32 flags; + + __u64 seqno; + + __u64 exec_queue_handle; + __u64 lrc_handle; + __u32 reserved; + __u32 bitmask_size; + __u64 bitmask_ptr; + }; + + /* + * When client (debuggee) does vm_bind_ioctl() following event + * sequence will be created (for the debugger): + * + * ┌───────────────────────┐ + * │ EVENT_VM_BIND ├───────┬─┬─┐ + * └───────────────────────┘ │ │ │ + * ┌───────────────────────┐ │ │ │ + * │ EVENT_VM_BIND_OP #1 ├───┘ │ │ + * └───────────────────────┘ │ │ + * ... │ │ + * ┌───────────────────────┐ │ │ + * │ EVENT_VM_BIND_OP #n ├─────┘ │ + * └───────────────────────┘ │ + * │ + * ┌───────────────────────┐ │ + * │ EVENT_UFENCE ├───────┘ + * └───────────────────────┘ + * + * All the events below VM_BIND will reference the VM_BIND + * they associate with, by field .vm_bind_ref_seqno. + * event_ufence will only be included if the client did + * attach sync of type UFENCE into its vm_bind_ioctl(). + * + * When EVENT_UFENCE is sent by the driver, all the OPs of + * the original VM_BIND are completed and the [addr,range] + * contained in them are present and modifiable through the + * vm accessors. Accessing [addr, range] before related ufence + * event will lead to undefined results as the actual bind + * operations are async and the backing storage might not + * be there on a moment of receiving the event. + * + * Client's UFENCE sync will be held by the driver: client's + * drm_xe_wait_ufence will not complete and the value of the ufence + * won't appear until ufence is acked by the debugger process calling + * DRM_XE_EUDEBUG_IOCTL_ACK_EVENT with the event_ufence.base.seqno. + * This will signal the fence, .value will update and the wait will + * complete allowing the client to continue. + * + */ + + struct drm_xe_eudebug_event_vm_bind { + struct drm_xe_eudebug_event base; + + __u64 client_handle; + __u64 vm_handle; + + __u32 flags; + #define DRM_XE_EUDEBUG_EVENT_VM_BIND_FLAG_UFENCE (1 << 0) + + __u32 num_binds; + }; + + struct drm_xe_eudebug_event_vm_bind_op { + struct drm_xe_eudebug_event base; + __u64 vm_bind_ref_seqno; /* *_event_vm_bind.base.seqno */ + __u64 num_extensions; + + __u64 addr; /* XXX: Zero for unmap all? */ + __u64 range; /* XXX: Zero for unmap all? */ + }; + + struct drm_xe_eudebug_event_vm_bind_ufence { + struct drm_xe_eudebug_event base; + __u64 vm_bind_ref_seqno; /* *_event_vm_bind.base.seqno */ + }; + + struct drm_xe_eudebug_ack_event { + __u32 type; + __u32 flags; /* MBZ */ + __u64 seqno; + }; + + struct drm_xe_eudebug_vm_open { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @client_handle: id of client */ + __u64 client_handle; + + /** @vm_handle: id of vm */ + __u64 vm_handle; + + /** @flags: flags */ + __u64 flags; + + #define DRM_XE_EUDEBUG_VM_SYNC_MAX_TIMEOUT_NSECS (10ULL * NSEC_PER_SEC) + /** @timeout_ns: Timeout value in nanoseconds operations (fsync) */ + __u64 timeout_ns; + }; + + struct drm_xe_eudebug_read_metadata { + __u64 client_handle; + __u64 metadata_handle; + __u32 flags; + __u32 reserved; + __u64 ptr; + __u64 size; + }; + + struct drm_xe_eudebug_event_metadata { + struct drm_xe_eudebug_event base; + + __u64 client_handle; + __u64 metadata_handle; + /* XXX: Refer to xe_drm.h for fields */ + __u64 type; + __u64 len; + }; + + struct drm_xe_eudebug_event_vm_bind_op_metadata { + struct drm_xe_eudebug_event base; + __u64 vm_bind_op_ref_seqno; /* *_event_vm_bind_op.base.seqno */ + + __u64 metadata_handle; + __u64 metadata_cookie; + }; + + struct drm_xe_eudebug_event_pagefault { + struct drm_xe_eudebug_event base; + + __u64 client_handle; + __u64 exec_queue_handle; + __u64 lrc_handle; + __u32 flags; + __u32 bitmask_size; + __u64 pagefault_address; + __u8 bitmask[]; + }; + + #if defined(__cplusplus) + } + #endif + + #endif /* _XE_DRM_EUDEBUG_H_ */ + \ No newline at end of file