build: use drm-next headers instead of upstream kernel headers

pick drm-next uapi headers as of b60301774a8fe6c30b14a95104ec099290a2e904

Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
Mateusz Jablonski 2025-04-17 15:49:25 +00:00 committed by Compute-Runtime-Automation
parent e981e85c31
commit 27d7b633e4
10 changed files with 334 additions and 12 deletions

View File

@ -1,17 +1,17 @@
#
# Copyright (C) 2024 Intel Corporation
# Copyright (C) 2024-2025 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
if(NOT DEFINED NEO_DRM_HEADERS_DIR OR NEO_DRM_HEADERS_DIR STREQUAL "")
get_filename_component(NEO_DRM_HEADERS_DIR "${NEO_SOURCE_DIR}/third_party/uapi/upstream/drm" ABSOLUTE)
get_filename_component(NEO_DRM_HEADERS_DIR "${NEO_SOURCE_DIR}/third_party/uapi/drm-next/drm" ABSOLUTE)
endif()
message(STATUS "drm includes dir: ${NEO_DRM_HEADERS_DIR}")
if(NOT DEFINED NEO_I915_HEADERS_DIR OR NEO_I915_HEADERS_DIR STREQUAL "")
get_filename_component(NEO_I915_HEADERS_DIR "${NEO_SOURCE_DIR}/third_party/uapi/upstream/i915" ABSOLUTE)
get_filename_component(NEO_I915_HEADERS_DIR "${NEO_SOURCE_DIR}/third_party/uapi/drm-next/i915" ABSOLUTE)
endif()
message(STATUS "i915 includes dir: ${NEO_I915_HEADERS_DIR}")
@ -23,7 +23,7 @@ endif()
message(STATUS "i915 prelim includes dir: ${NEO_I915_PRELIM_HEADERS_DIR}")
if(NOT DEFINED NEO_XE_HEADERS_DIR OR NEO_XE_HEADERS_DIR STREQUAL "")
get_filename_component(NEO_XE_HEADERS_DIR "${NEO_SOURCE_DIR}/third_party/uapi/upstream/xe" ABSOLUTE)
get_filename_component(NEO_XE_HEADERS_DIR "${NEO_SOURCE_DIR}/third_party/uapi/drm-next/xe" ABSOLUTE)
endif()
message(STATUS "xe includes dir: ${NEO_XE_HEADERS_DIR}")

View File

@ -27,7 +27,7 @@
#include "CL/cl_gl.h"
#include "config.h"
#include "third_party/uapi/upstream/drm/drm_fourcc.h"
#include "drm_fourcc.h"
#include <GL/gl.h>
namespace NEO {

View File

@ -11,5 +11,5 @@ Source locations of Linux kernel interface headers:
1. dg1: https://repositories.intel.com/graphics/kernel-api/index.html
2. drm-uapi-helper: https://github.com/intel-gpu/drm-uapi-helper
3. upstream: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
3. drm-next: https://gitlab.freedesktop.org/drm/kernel.git

2
third_party/uapi/drm-next/.version vendored Normal file
View File

@ -0,0 +1,2 @@
git_revision: b60301774a8fe6c30b14a95104ec099290a2e904
git_url: https://gitlab.freedesktop.org/drm/kernel.git

View File

@ -899,13 +899,17 @@ struct drm_syncobj_destroy {
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
__u64 point;
};
struct drm_syncobj_transfer {

View File

@ -421,6 +421,8 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b
#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c
/* add more to the end as needed */
@ -1453,6 +1455,90 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
/* MediaTek modifiers
* Bits Parameter Notes
* ----- ------------------------ ---------------------------------------------
* 7: 0 TILE LAYOUT Values are MTK_FMT_MOD_TILE_*
* 15: 8 COMPRESSION Values are MTK_FMT_MOD_COMPRESS_*
* 23:16 10 BIT LAYOUT Values are MTK_FMT_MOD_10BIT_LAYOUT_*
*
*/
#define DRM_FORMAT_MOD_MTK(__flags) fourcc_mod_code(MTK, __flags)
/*
* MediaTek Tiled Modifier
* The lowest 8 bits of the modifier is used to specify the tiling
* layout. Only the 16L_32S tiling is used for now, but we define an
* "untiled" version and leave room for future expansion.
*/
#define MTK_FMT_MOD_TILE_MASK 0xf
#define MTK_FMT_MOD_TILE_NONE 0x0
#define MTK_FMT_MOD_TILE_16L32S 0x1
/*
* Bits 8-15 specify compression options
*/
#define MTK_FMT_MOD_COMPRESS_MASK (0xf << 8)
#define MTK_FMT_MOD_COMPRESS_NONE (0x0 << 8)
#define MTK_FMT_MOD_COMPRESS_V1 (0x1 << 8)
/*
* Bits 16-23 specify how the bits of 10 bit formats are
* stored out in memory
*/
#define MTK_FMT_MOD_10BIT_LAYOUT_MASK (0xf << 16)
#define MTK_FMT_MOD_10BIT_LAYOUT_PACKED (0x0 << 16)
#define MTK_FMT_MOD_10BIT_LAYOUT_LSBTILED (0x1 << 16)
#define MTK_FMT_MOD_10BIT_LAYOUT_LSBRASTER (0x2 << 16)
/* alias for the most common tiling format */
#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S)
/*
* Apple GPU-tiled layouts.
*
* Apple GPUs support nonlinear tilings with optional lossless compression.
*
* GPU-tiled images are divided into 16KiB tiles:
*
* Bytes per pixel Tile size
* --------------- ---------
* 1 128x128
* 2 128x64
* 4 64x64
* 8 64x32
* 16 32x32
*
* Tiles are raster-order. Pixels within a tile are interleaved (Morton order).
*
* Compressed images pad the body to 128-bytes and are immediately followed by a
* metadata section. The metadata section rounds the image dimensions to
* powers-of-two and contains 8 bytes for each 16x16 compression subtile.
* Subtiles are interleaved (Morton order).
*
* All images are 128-byte aligned.
*
* These layouts fundamentally do not have meaningful strides. No matter how we
* specify strides for these layouts, userspace unaware of Apple image layouts
* will be unable to use correctly the specified stride for any purpose.
* Userspace aware of the image layouts do not use strides. The most "correct"
* convention would be setting the image stride to 0. Unfortunately, some
* software assumes the stride is at least (width * bytes per pixel). We
* therefore require that stride equals (width * bytes per pixel). Since the
* stride is arbitrary here, we pick the simplest convention.
*
* Although containing two sections, compressed image layouts are treated in
* software as a single plane. This is modelled after AFBC, a similar
* scheme. Attempting to separate the sections to be "explicit" in DRM would
* only generate more confusion, as software does not treat the image this way.
*
* For detailed information on the hardware image layouts, see
* https://docs.mesa3d.org/drivers/asahi.html#image-layouts
*/
#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1)
#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2)
/*
* AMD modifiers
*

View File

@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions {
*
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
* has usable VRAM
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
* has low latency hint support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
* device has CPU address mirroring support
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@ -409,6 +413,8 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define DRM_XE_QUERY_CONFIG_FLAGS 1
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@ -629,6 +635,39 @@ struct drm_xe_query_uc_fw_version {
__u64 reserved;
};
/**
* struct drm_xe_query_pxp_status - query if PXP is ready
*
* If PXP is enabled and no fatal error has occurred, the status will be set to
* one of the following values:
* 0: PXP init still in progress
* 1: PXP init complete
*
* If PXP is not enabled or something has gone wrong, the query will be failed
* with one of the following error codes:
* -ENODEV: PXP not supported or disabled;
* -EIO: fatal error occurred during init, so PXP will never be enabled;
* -EINVAL: incorrect value provided as part of the query;
* -EFAULT: error copying the memory between kernel and userspace.
*
* The status can only be 0 in the first few seconds after driver load. If
* everything works as expected, the status will transition to init complete in
* less than 1 second, while in case of errors the driver might take longer to
* start returning an error code, but it should still take less than 10 seconds.
*
* The supported session type bitmask is based on the values in
* enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore
* is not reported in the bitmask.
*
*/
struct drm_xe_query_pxp_status {
/** @status: current PXP status */
__u32 status;
/** @supported_session_types: bitmask of supported PXP session types */
__u32 supported_session_types;
};
/**
* struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
* structure to query device information
@ -648,6 +687,7 @@ struct drm_xe_query_uc_fw_version {
* attributes.
* - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
* - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
* - %DRM_XE_DEVICE_QUERY_PXP_STATUS
*
* If size is set to 0, the driver fills it with the required size for
* the requested type of data to query. If size is equal to the required
@ -700,6 +740,8 @@ struct drm_xe_device_query {
#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
#define DRM_XE_DEVICE_QUERY_OA_UNITS 8
#define DRM_XE_DEVICE_QUERY_PXP_STATUS 9
#define DRM_XE_DEVICE_QUERY_EU_STALL 10
/** @query: The type of data to query */
__u32 query;
@ -743,8 +785,23 @@ struct drm_xe_device_query {
* - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
* is uncached. Scanout surfaces should likely use this. All objects
* that can be placed in VRAM must use this.
*
* This ioctl supports setting the following properties via the
* %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the
* generic @drm_xe_ext_set_property struct:
*
* - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
* this object will be used with. Valid values are listed in enum
* drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
* there is no need to explicitly set that. Objects used with session of type
* %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation
* event occurs after their creation. Attempting to flip an invalid object
* will cause a black frame to be displayed instead. Submissions with invalid
* objects mapped in the VM will be rejected.
*/
struct drm_xe_gem_create {
#define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@ -811,6 +868,32 @@ struct drm_xe_gem_create {
/**
* struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
*
* The @flags can be:
* - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset
* for use in mmap ioctl. Writing to the returned mmap address will generate a
* PCI memory barrier with low overhead (avoiding IOCTL call as well as writing
* to VRAM which would also add overhead), acting like an MI_MEM_FENCE
* instruction.
*
* Note: The mmap size can be at most 4K, due to HW limitations. As a result
* this interface is only supported on CPU architectures that support 4K page
* size. The mmap_offset ioctl will detect this and gracefully return an
* error, where userspace is expected to have a different fallback method for
* triggering a barrier.
*
* Roughly the usage would be as follows:
*
* .. code-block:: C
*
* struct drm_xe_gem_mmap_offset mmo = {
* .handle = 0, // must be set to 0
* .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER,
* };
*
* err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo);
* map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset);
* map[i] = 0xdeadbeaf; // issue barrier
*/
struct drm_xe_gem_mmap_offset {
/** @extensions: Pointer to the first extension struct, if any */
@ -819,7 +902,8 @@ struct drm_xe_gem_mmap_offset {
/** @handle: Handle for the object being mapped. */
__u32 handle;
/** @flags: Must be zero */
#define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0)
/** @flags: Flags */
__u32 flags;
/** @offset: The fake offset to use for subsequent mmap call */
@ -906,6 +990,15 @@ struct drm_xe_vm_destroy {
* will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ. This flag is intended to
* implement VK sparse bindings.
* - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
* reject the binding if the encryption key is no longer valid. This
* flag has no effect on BOs that are not marked as using PXP.
* - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
* set, no mappings are created rather the range is reserved for CPU address
* mirroring which will be populated on GPU page faults or prefetches. Only
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@ -958,7 +1051,9 @@ struct drm_xe_vm_bind_op {
* on the @pat_index. For such mappings there is no actual memory being
* mapped (the address in the PTE is invalid), so the various PAT memory
* attributes likely do not apply. Simply leaving as zero is one
* option (still a valid pat_index).
* option (still a valid pat_index). Same applies to
* DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
* there is no actual memory being mapped.
*/
__u16 pat_index;
@ -974,6 +1069,14 @@ struct drm_xe_vm_bind_op {
/** @userptr: user pointer to bind on */
__u64 userptr;
/**
* @cpu_addr_mirror_offset: Offset from GPU @addr to create
* CPU address mirror mappings. MBZ with current level of
* support (e.g. 1 to 1 mapping between GPU and CPU mappings
* only supported).
*/
__s64 cpu_addr_mirror_offset;
};
/**
@ -996,6 +1099,8 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
/** @flags: Bind flags */
__u32 flags;
@ -1087,6 +1192,24 @@ struct drm_xe_vm_bind {
/**
* struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
*
* This ioctl supports setting the following properties via the
* %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the
* generic @drm_xe_ext_set_property struct:
*
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority.
* CAP_SYS_NICE is required to set a value above normal.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice
* duration in microseconds.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
* this queue will be used with. Valid values are listed in enum
* drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
* there is no need to explicitly set that. When a queue of type
* %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
* (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
*
* The example below shows how to use @drm_xe_exec_queue_create to create
* a simple exec_queue (no parallel submission) of class
* &DRM_XE_ENGINE_CLASS_RENDER.
@ -1105,12 +1228,27 @@ struct drm_xe_vm_bind {
* };
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
*
* Allow users to provide a hint to kernel for cases demanding low latency
* profile. Please note it will have impact on power consumption. User can
* indicate low latency hint with flag while creating exec queue as
* mentioned below,
*
* struct drm_xe_exec_queue_create exec_queue_create = {
* .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
* .extensions = 0,
* .vm_id = vm,
* .num_bb_per_exec = 1,
* .num_eng_per_bb = 1,
* .instances = to_user_pointer(&instance),
* };
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
*
*/
struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@ -1123,7 +1261,8 @@ struct drm_xe_exec_queue_create {
/** @vm_id: VM to use for this exec queue */
__u32 vm_id;
/** @flags: MBZ */
#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
/** @flags: flags to use for this exec queue */
__u32 flags;
/** @exec_queue_id: Returned exec queue ID */
@ -1397,6 +1536,8 @@ struct drm_xe_wait_user_fence {
enum drm_xe_observation_type {
/** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
DRM_XE_OBSERVATION_TYPE_OA,
/** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */
DRM_XE_OBSERVATION_TYPE_EU_STALL,
};
/**
@ -1729,6 +1870,97 @@ struct drm_xe_oa_stream_info {
__u64 reserved[3];
};
/**
* enum drm_xe_pxp_session_type - Supported PXP session types.
*
* We currently only support HWDRM sessions, which are used for protected
* content that ends up being displayed, but the HW supports multiple types, so
* we might extend support in the future.
*/
enum drm_xe_pxp_session_type {
/** @DRM_XE_PXP_TYPE_NONE: PXP not used */
DRM_XE_PXP_TYPE_NONE = 0,
/**
* @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends
* up on the display.
*/
DRM_XE_PXP_TYPE_HWDRM = 1,
};
/* ID of the protected content session managed by Xe when PXP is active */
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
/**
* enum drm_xe_eu_stall_property_id - EU stall sampling input property ids.
*
* These properties are passed to the driver at open as a chain of
* @drm_xe_ext_set_property structures with @property set to these
* properties' enums and @value set to the corresponding values of these
* properties. @drm_xe_user_extension base.name should be set to
* @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY.
*
* With the file descriptor obtained from open, user space must enable
* the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before
* calling read(). EIO errno from read() indicates HW dropped data
* due to full buffer.
*/
enum drm_xe_eu_stall_property_id {
#define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY 0
/**
* @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which
* EU stall data will be captured.
*/
DRM_XE_EU_STALL_PROP_GT_ID = 1,
/**
* @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in
* GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall
*/
DRM_XE_EU_STALL_PROP_SAMPLE_RATE,
/**
* @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of
* EU stall data reports to be present in the kernel buffer
* before unblocking a blocked poll or read.
*/
DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS,
};
/**
* struct drm_xe_query_eu_stall - Information about EU stall sampling.
*
* If a query is made with a struct @drm_xe_device_query where .query
* is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses
* struct @drm_xe_query_eu_stall in .data.
*/
struct drm_xe_query_eu_stall {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @capabilities: EU stall capabilities bit-mask */
__u64 capabilities;
#define DRM_XE_EU_STALL_CAPS_BASE (1 << 0)
/** @record_size: size of each EU stall data record */
__u64 record_size;
/** @per_xecore_buf_size: internal per XeCore buffer size */
__u64 per_xecore_buf_size;
/** @reserved: Reserved */
__u64 reserved[5];
/** @num_sampling_rates: Number of sampling rates in @sampling_rates array */
__u64 num_sampling_rates;
/**
* @sampling_rates: Flexible array of sampling rates
* sorted in the fastest to slowest order.
* Sampling rates are specified in GPU clock cycles.
*/
__u64 sampling_rates[];
};
#if defined(__cplusplus)
}
#endif

View File

@ -1,2 +0,0 @@
git_revision: v6.14
git_url: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/