ci: update uapi headers to 6.10.1

Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
Mateusz Jablonski
2024-07-25 08:35:44 +00:00
committed by Compute-Runtime-Automation
parent 4dc737fc4a
commit 6842356447
4 changed files with 61 additions and 8 deletions

View File

@@ -1,2 +1,2 @@
git_revision: v6.9.9
git_revision: v6.10.1
git_url: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/

View File

@@ -865,6 +865,17 @@ struct drm_color_lut {
__u16 reserved;
};
/**
* struct drm_plane_size_hint - Plane size hints
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
*/
struct drm_plane_size_hint {
__u16 width;
__u16 height;
};
/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*

View File

@@ -806,6 +806,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_PXP_STATUS 58
/*
* Query if kernel allows marking a context to send a Freq hint to SLPC. This
* will enable use of the strategies allowed by the SLPC algorithm.
*/
#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59
/* Must be kept compact -- no holes and well documented */
/**
@@ -2148,6 +2154,15 @@ struct drm_i915_gem_context_param {
* -EIO: The firmware did not succeed in creating the protected context.
*/
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/*
* I915_CONTEXT_PARAM_LOW_LATENCY:
*
* Mark this context as a low latency workload which requires aggressive GT
* frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel
* supports this per context flag.
*/
#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
/* Must be kept compact -- no holes and well documented */
/** @value: Context parameter value to be set or queried */
@@ -2623,19 +2638,29 @@ struct drm_i915_reg_read {
*
*/
/*
* struct drm_i915_reset_stats - Return global reset and other context stats
*
* Driver keeps few stats for each contexts and also global reset count.
* This struct can be used to query those stats.
*/
struct drm_i915_reset_stats {
/** @ctx_id: ID of the requested context */
__u32 ctx_id;
/** @flags: MBZ */
__u32 flags;
/* All resets since boot/module reload, for all contexts */
/** @reset_count: All resets since boot/module reload, for all contexts */
__u32 reset_count;
/* Number of batches lost when active in GPU, for this context */
/** @batch_active: Number of batches lost when active in GPU, for this context */
__u32 batch_active;
/* Number of batches lost pending for execution, for this context */
/** @batch_pending: Number of batches lost pending for execution, for this context */
__u32 batch_pending;
/** @pad: MBZ */
__u32 pad;
};

View File

@@ -459,8 +459,16 @@ struct drm_xe_gt {
* by struct drm_xe_query_mem_regions' mem_class.
*/
__u64 far_mem_regions;
/** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */
__u16 ip_ver_major;
/** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */
__u16 ip_ver_minor;
/** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */
__u16 ip_ver_rev;
/** @pad2: MBZ */
__u16 pad2;
/** @reserved: Reserved */
__u64 reserved[8];
__u64 reserved[7];
};
/**
@@ -510,9 +518,9 @@ struct drm_xe_query_topology_mask {
/** @gt_id: GT ID the mask is associated with */
__u16 gt_id;
#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
#define DRM_XE_TOPO_DSS_GEOMETRY 1
#define DRM_XE_TOPO_DSS_COMPUTE 2
#define DRM_XE_TOPO_EU_PER_DSS 4
/** @type: type of mask */
__u16 type;
@@ -583,6 +591,7 @@ struct drm_xe_query_engine_cycles {
struct drm_xe_query_uc_fw_version {
/** @uc_type: The micro-controller type to query firmware version */
#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
#define XE_QUERY_UC_TYPE_HUC 1
__u16 uc_type;
/** @pad: MBZ */
@@ -862,6 +871,12 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_OP_PREFETCH
*
* and the @flags can be:
* - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
* to ensure write protection
* - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
* MAP operation immediately rather than deferring the MAP to the page
* fault handler. This is implied on a non-faulting VM as there is no
* fault handler to defer to.
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
* tables are setup with a special bit which indicates writes are
* dropped and all reads return zero. In the future, the NULL flags
@@ -954,6 +969,8 @@ struct drm_xe_vm_bind_op {
/** @op: Bind operation to perform */
__u32 op;
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
/** @flags: Bind flags */