refactor: Update eudebug header

Resolves: NEO-16069

Signed-off-by: Jemale Lockett <jemale.lockett@intel.com>
This commit is contained in:
Jemale Lockett
2025-11-05 16:32:30 +00:00
committed by Compute-Runtime-Automation
parent 3d8a19cc13
commit 5677d4b9ab
15 changed files with 5453 additions and 5104 deletions

View File

@@ -72,6 +72,15 @@ DebugSession *DebugSessionLinuxXe::createLinuxSession(const zet_debug_config_t &
return nullptr;
}
ze_result_t DebugSessionLinuxXe::initialize() {
if (euDebugInterface->getInterfaceType() == NEO::EuDebugInterfaceType::upstream) {
clientHandleToConnection[euDebugInterface->getDefaultClientHandle()].reset(new ClientConnectionXe);
clientHandleToConnection[euDebugInterface->getDefaultClientHandle()]->client = NEO::EuDebugEventClient{};
clientHandleToConnection[euDebugInterface->getDefaultClientHandle()]->client.clientHandle = euDebugInterface->getDefaultClientHandle();
}
return DebugSessionLinux::initialize();
}
bool DebugSessionLinuxXe::handleInternalEvent() {
auto eventMemory = getInternalEvent();
if (eventMemory == nullptr) {

View File

@@ -26,6 +26,8 @@ struct DebugSessionLinuxXe : DebugSessionLinux {
DebugSessionLinuxXe(const zet_debug_config_t &config, Device *device, int debugFd, std::unique_ptr<NEO::EuDebugInterface> debugInterface, void *params);
static DebugSession *createLinuxSession(const zet_debug_config_t &config, Device *device, ze_result_t &result, bool isRootAttach);
ze_result_t initialize() override;
struct IoctlHandlerXe : DebugSessionLinux::IoctlHandler {
IoctlHandlerXe(const NEO::EuDebugInterface &euDebugInterface) : euDebugInterface(euDebugInterface){};
int ioctl(int fd, unsigned long request, void *arg) override {

View File

@@ -181,6 +181,7 @@ struct MockDebugSessionLinuxXe : public L0::DebugSessionLinuxXe {
using L0::DebugSessionLinuxXe::clientHandleToConnection;
using L0::DebugSessionLinuxXe::debugArea;
using L0::DebugSessionLinuxXe::euControlInterruptSeqno;
using L0::DebugSessionLinuxXe::euDebugInterface;
using L0::DebugSessionLinuxXe::eventTypeIsAttention;
using L0::DebugSessionLinuxXe::getEuControlCmdUnlock;
using L0::DebugSessionLinuxXe::getThreadStateMutexForTileSession;

View File

@@ -364,6 +364,23 @@ TEST_F(DebugApiLinuxTestXe, GivenDebugSessionWhenPollReturnsZeroThenNotReadyIsRe
EXPECT_EQ(ZE_RESULT_NOT_READY, result);
}
TEST_F(DebugApiLinuxTestXe, GivenDebugSessionWhenInterfaceIsUpstreamThenDefaultClientHandleConnectionCreated) {
zet_debug_config_t config = {};
config.pid = 0x1234;
auto session = std::make_unique<MockDebugSessionLinuxXe>(config, device, 10);
ASSERT_NE(nullptr, session);
auto handler = new MockIoctlHandlerXe;
session->ioctlHandler.reset(handler);
session->clientHandleToConnection.clear();
session->initialize();
EXPECT_EQ(1u, session->clientHandleToConnection.size());
EXPECT_NE(session->clientHandleToConnection.end(), session->clientHandleToConnection.find(EuDebugInterfaceUpstream::defaultClientHandle));
}
TEST_F(DebugApiLinuxTestXe, GivenDebugSessionInitializationWhenNoValidEventsAreReadThenResultNotReadyIsReturned) {
zet_debug_config_t config = {};
config.pid = 0x1234;

View File

@@ -22,6 +22,7 @@ class EuDebugInterface {
static std::unique_ptr<EuDebugInterface> create(const std::string &sysFsPciPath);
virtual uint32_t getParamValue(EuDebugParam param) const = 0;
virtual bool isExecQueuePageFaultEnableSupported() { return false; };
virtual uint64_t getDefaultClientHandle() const { return 0; }
virtual EuDebugInterfaceType getInterfaceType() const = 0;
virtual ~EuDebugInterface() = default;

View File

@@ -38,9 +38,9 @@ uint32_t EuDebugInterfaceUpstream::getParamValue(EuDebugParam param) const {
case EuDebugParam::eventTypeExecQueuePlacements:
return 0;
case EuDebugParam::eventTypeMetadata:
return DRM_XE_EUDEBUG_EVENT_METADATA;
return 0;
case EuDebugParam::eventTypeOpen:
return DRM_XE_EUDEBUG_EVENT_OPEN;
return 0;
case EuDebugParam::eventTypePagefault:
return DRM_XE_EUDEBUG_EVENT_PAGEFAULT;
case EuDebugParam::eventTypeRead:
@@ -50,9 +50,9 @@ uint32_t EuDebugInterfaceUpstream::getParamValue(EuDebugParam param) const {
case EuDebugParam::eventTypeVmBind:
return DRM_XE_EUDEBUG_EVENT_VM_BIND;
case EuDebugParam::eventTypeVmBindOp:
return DRM_XE_EUDEBUG_EVENT_VM_BIND_OP;
return 0;
case EuDebugParam::eventTypeVmBindOpMetadata:
return DRM_XE_EUDEBUG_EVENT_VM_BIND_OP_METADATA;
return 0;
case EuDebugParam::eventTypeVmBindUfence:
return DRM_XE_EUDEBUG_EVENT_VM_BIND_UFENCE;
case EuDebugParam::eventVmBindFlagUfence:
@@ -70,25 +70,25 @@ uint32_t EuDebugInterfaceUpstream::getParamValue(EuDebugParam param) const {
case EuDebugParam::ioctlReadEvent:
return DRM_XE_EUDEBUG_IOCTL_READ_EVENT;
case EuDebugParam::ioctlReadMetadata:
return DRM_XE_EUDEBUG_IOCTL_READ_METADATA;
return 0;
case EuDebugParam::ioctlVmOpen:
return DRM_XE_EUDEBUG_IOCTL_VM_OPEN;
case EuDebugParam::metadataCreate:
return DRM_IOCTL_XE_DEBUG_METADATA_CREATE;
return 0;
case EuDebugParam::metadataDestroy:
return DRM_IOCTL_XE_DEBUG_METADATA_DESTROY;
return 0;
case EuDebugParam::metadataElfBinary:
return DRM_XE_DEBUG_METADATA_ELF_BINARY;
return 0;
case EuDebugParam::metadataModuleArea:
return WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_MODULE_AREA;
return 0;
case EuDebugParam::metadataProgramModule:
return DRM_XE_DEBUG_METADATA_PROGRAM_MODULE;
return 0;
case EuDebugParam::metadataSbaArea:
return WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SBA_AREA;
return 0;
case EuDebugParam::metadataSipArea:
return WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA;
return 0;
case EuDebugParam::vmBindOpExtensionsAttachDebug:
return XE_VM_BIND_OP_EXTENSIONS_ATTACH_DEBUG;
return 0;
}
return 0;
}
@@ -97,6 +97,10 @@ EuDebugInterfaceType EuDebugInterfaceUpstream::getInterfaceType() const {
return EuDebugInterfaceType::upstream;
}
uint64_t EuDebugInterfaceUpstream::getDefaultClientHandle() const {
return defaultClientHandle;
}
std::unique_ptr<EuDebugEventEuAttention, void (*)(EuDebugEventEuAttention *)> EuDebugInterfaceUpstream::toEuDebugEventEuAttention(const void *drmType) {
const drm_xe_eudebug_event_eu_attention *event = static_cast<const drm_xe_eudebug_event_eu_attention *>(drmType);
EuDebugEventEuAttention *pEuAttentionEvent = static_cast<EuDebugEventEuAttention *>(malloc(sizeof(EuDebugEventEuAttention) + event->bitmask_size * sizeof(uint8_t)));
@@ -112,7 +116,7 @@ std::unique_ptr<EuDebugEventEuAttention, void (*)(EuDebugEventEuAttention *)> Eu
pEuAttentionEvent->flags = event->flags;
pEuAttentionEvent->lrcHandle = event->lrc_handle;
pEuAttentionEvent->execQueueHandle = event->exec_queue_handle;
pEuAttentionEvent->clientHandle = event->client_handle;
pEuAttentionEvent->clientHandle = defaultClientHandle;
auto deleter = [](EuDebugEventEuAttention *ptr) {
free(ptr);
@@ -122,18 +126,8 @@ std::unique_ptr<EuDebugEventEuAttention, void (*)(EuDebugEventEuAttention *)> Eu
}
EuDebugEventClient EuDebugInterfaceUpstream::toEuDebugEventClient(const void *drmType) {
const drm_xe_eudebug_event_client *event = static_cast<const drm_xe_eudebug_event_client *>(drmType);
EuDebugEventClient euClientEvent = {};
euClientEvent.base.len = event->base.len;
euClientEvent.base.type = event->base.type;
euClientEvent.base.flags = event->base.flags;
euClientEvent.base.seqno = event->base.seqno;
euClientEvent.base.reserved = event->base.reserved;
euClientEvent.clientHandle = event->client_handle;
return euClientEvent;
UNRECOVERABLE_IF(true);
return {};
}
EuDebugEventVm EuDebugInterfaceUpstream::toEuDebugEventVm(const void *drmType) {
@@ -145,9 +139,8 @@ EuDebugEventVm EuDebugInterfaceUpstream::toEuDebugEventVm(const void *drmType) {
euVmEvent.base.flags = event->base.flags;
euVmEvent.base.seqno = event->base.seqno;
euVmEvent.base.reserved = event->base.reserved;
euVmEvent.clientHandle = event->client_handle;
euVmEvent.vmHandle = event->vm_handle;
euVmEvent.clientHandle = defaultClientHandle;
return euVmEvent;
}
@@ -166,7 +159,7 @@ std::unique_ptr<EuDebugEventExecQueue, void (*)(EuDebugEventExecQueue *)> EuDebu
pExecQueueEvent->engineClass = event->engine_class;
pExecQueueEvent->width = event->width;
pExecQueueEvent->vmHandle = event->vm_handle;
pExecQueueEvent->clientHandle = event->client_handle;
pExecQueueEvent->clientHandle = defaultClientHandle;
memcpy(pExecQueueEvent->lrcHandle, event->lrc_handle, event->width * sizeof(uint64_t));
auto deleter = [](EuDebugEventExecQueue *ptr) {
@@ -183,21 +176,9 @@ std::unique_ptr<EuDebugEventExecQueuePlacements, void (*)(EuDebugEventExecQueueP
}
EuDebugEventMetadata EuDebugInterfaceUpstream::toEuDebugEventMetadata(const void *drmType) {
const drm_xe_eudebug_event_metadata *event = static_cast<const drm_xe_eudebug_event_metadata *>(drmType);
EuDebugEventMetadata metadataEvent = {};
metadataEvent.base.len = event->base.len;
metadataEvent.base.type = event->base.type;
metadataEvent.base.flags = event->base.flags;
metadataEvent.base.seqno = event->base.seqno;
metadataEvent.base.reserved = event->base.reserved;
metadataEvent.clientHandle = event->client_handle;
metadataEvent.len = event->len;
metadataEvent.metadataHandle = event->metadata_handle;
metadataEvent.type = event->type;
return metadataEvent;
UNRECOVERABLE_IF(true);
return {};
}
EuDebugEventVmBind EuDebugInterfaceUpstream::toEuDebugEventVmBind(const void *drmType) {
@@ -209,48 +190,24 @@ EuDebugEventVmBind EuDebugInterfaceUpstream::toEuDebugEventVmBind(const void *dr
vmBindEvent.base.flags = event->base.flags;
vmBindEvent.base.seqno = event->base.seqno;
vmBindEvent.base.reserved = event->base.reserved;
vmBindEvent.clientHandle = event->client_handle;
vmBindEvent.flags = event->flags;
vmBindEvent.numBinds = event->num_binds;
vmBindEvent.vmHandle = event->vm_handle;
vmBindEvent.clientHandle = defaultClientHandle;
return vmBindEvent;
}
NEO::EuDebugEventVmBindOp EuDebugInterfaceUpstream::toEuDebugEventVmBindOp(const void *drmType) {
const drm_xe_eudebug_event_vm_bind_op *event = static_cast<const drm_xe_eudebug_event_vm_bind_op *>(drmType);
EuDebugEventVmBindOp vmBindOpEvent = {};
vmBindOpEvent.base.len = event->base.len;
vmBindOpEvent.base.type = event->base.type;
vmBindOpEvent.base.flags = event->base.flags;
vmBindOpEvent.base.seqno = event->base.seqno;
vmBindOpEvent.base.reserved = event->base.reserved;
vmBindOpEvent.vmBindRefSeqno = event->vm_bind_ref_seqno;
vmBindOpEvent.numExtensions = event->num_extensions;
vmBindOpEvent.addr = event->addr;
vmBindOpEvent.range = event->range;
return vmBindOpEvent;
UNRECOVERABLE_IF(true);
return {};
}
EuDebugEventVmBindOpMetadata EuDebugInterfaceUpstream::toEuDebugEventVmBindOpMetadata(const void *drmType) {
const drm_xe_eudebug_event_vm_bind_op_metadata *event = static_cast<const drm_xe_eudebug_event_vm_bind_op_metadata *>(drmType);
EuDebugEventVmBindOpMetadata vmBindOpMetadataEvent = {};
vmBindOpMetadataEvent.base.len = event->base.len;
vmBindOpMetadataEvent.base.type = event->base.type;
vmBindOpMetadataEvent.base.flags = event->base.flags;
vmBindOpMetadataEvent.base.seqno = event->base.seqno;
vmBindOpMetadataEvent.base.reserved = event->base.reserved;
vmBindOpMetadataEvent.vmBindOpRefSeqno = event->vm_bind_op_ref_seqno;
vmBindOpMetadataEvent.metadataHandle = event->metadata_handle;
vmBindOpMetadataEvent.metadataCookie = event->metadata_cookie;
return vmBindOpMetadataEvent;
UNRECOVERABLE_IF(true);
return {};
}
EuDebugEventVmBindUfence EuDebugInterfaceUpstream::toEuDebugEventVmBindUfence(const void *drmType) {
@@ -280,11 +237,11 @@ std::unique_ptr<EuDebugEventPageFault, void (*)(EuDebugEventPageFault *)> EuDebu
memcpy(pPageFaultEvent->bitmask, event->bitmask, event->bitmask_size * sizeof(uint8_t));
pPageFaultEvent->bitmaskSize = event->bitmask_size;
pPageFaultEvent->clientHandle = event->client_handle;
pPageFaultEvent->flags = event->flags;
pPageFaultEvent->execQueueHandle = event->exec_queue_handle;
pPageFaultEvent->lrcHandle = event->lrc_handle;
pPageFaultEvent->pagefaultAddress = event->pagefault_address;
pPageFaultEvent->clientHandle = defaultClientHandle;
auto deleter = [](EuDebugEventPageFault *ptr) {
free(ptr);
@@ -299,12 +256,12 @@ EuDebugEuControl EuDebugInterfaceUpstream::toEuDebugEuControl(const void *drmTyp
control.bitmaskPtr = euControl->bitmask_ptr;
control.bitmaskSize = euControl->bitmask_size;
control.clientHandle = euControl->client_handle;
control.cmd = euControl->cmd;
control.flags = euControl->flags;
control.execQueueHandle = euControl->exec_queue_handle;
control.lrcHandle = euControl->lrc_handle;
control.seqno = euControl->seqno;
control.clientHandle = defaultClientHandle;
return control;
}
@@ -315,7 +272,6 @@ EuDebugConnect EuDebugInterfaceUpstream::toEuDebugConnect(const void *drmType) {
connectEvent.extensions = event->extensions;
connectEvent.flags = event->flags;
connectEvent.pid = event->pid;
connectEvent.version = event->version;
return connectEvent;
@@ -325,7 +281,6 @@ std::unique_ptr<void, void (*)(void *)> EuDebugInterfaceUpstream::toDrmEuDebugCo
struct drm_xe_eudebug_connect *pDrmConnect = new drm_xe_eudebug_connect();
pDrmConnect->extensions = connect.extensions;
pDrmConnect->pid = connect.pid;
pDrmConnect->flags = connect.flags;
pDrmConnect->version = connect.version;
@@ -340,7 +295,6 @@ std::unique_ptr<void, void (*)(void *)> EuDebugInterfaceUpstream::toDrmEuDebugEu
pDrmEuControl->bitmask_ptr = euControl.bitmaskPtr;
pDrmEuControl->bitmask_size = euControl.bitmaskSize;
pDrmEuControl->client_handle = euControl.clientHandle;
pDrmEuControl->cmd = euControl.cmd;
pDrmEuControl->flags = euControl.flags;
pDrmEuControl->exec_queue_handle = euControl.execQueueHandle;
@@ -356,7 +310,6 @@ std::unique_ptr<void, void (*)(void *)> EuDebugInterfaceUpstream::toDrmEuDebugEu
std::unique_ptr<void, void (*)(void *)> EuDebugInterfaceUpstream::toDrmEuDebugVmOpen(const EuDebugVmOpen &vmOpen) {
struct drm_xe_eudebug_vm_open *pDrmVmOpen = new drm_xe_eudebug_vm_open();
pDrmVmOpen->client_handle = vmOpen.clientHandle;
pDrmVmOpen->extensions = vmOpen.extensions;
pDrmVmOpen->flags = vmOpen.flags;
pDrmVmOpen->timeout_ns = vmOpen.timeoutNs;
@@ -387,35 +340,9 @@ static_assert(offsetof(EuDebugEvent, flags) == offsetof(drm_xe_eudebug_event, fl
static_assert(offsetof(EuDebugEvent, seqno) == offsetof(drm_xe_eudebug_event, seqno));
static_assert(offsetof(EuDebugEvent, reserved) == offsetof(drm_xe_eudebug_event, reserved));
static_assert(sizeof(EuDebugReadMetadata) == sizeof(drm_xe_eudebug_read_metadata));
static_assert(offsetof(EuDebugReadMetadata, clientHandle) == offsetof(drm_xe_eudebug_read_metadata, client_handle));
static_assert(offsetof(EuDebugReadMetadata, metadataHandle) == offsetof(drm_xe_eudebug_read_metadata, metadata_handle));
static_assert(offsetof(EuDebugReadMetadata, flags) == offsetof(drm_xe_eudebug_read_metadata, flags));
static_assert(offsetof(EuDebugReadMetadata, reserved) == offsetof(drm_xe_eudebug_read_metadata, reserved));
static_assert(offsetof(EuDebugReadMetadata, ptr) == offsetof(drm_xe_eudebug_read_metadata, ptr));
static_assert(offsetof(EuDebugReadMetadata, size) == offsetof(drm_xe_eudebug_read_metadata, size));
static_assert(sizeof(DebugMetadataCreate) == sizeof(drm_xe_debug_metadata_create));
static_assert(offsetof(DebugMetadataCreate, extensions) == offsetof(drm_xe_debug_metadata_create, extensions));
static_assert(offsetof(DebugMetadataCreate, type) == offsetof(drm_xe_debug_metadata_create, type));
static_assert(offsetof(DebugMetadataCreate, userAddr) == offsetof(drm_xe_debug_metadata_create, user_addr));
static_assert(offsetof(DebugMetadataCreate, len) == offsetof(drm_xe_debug_metadata_create, len));
static_assert(offsetof(DebugMetadataCreate, metadataId) == offsetof(drm_xe_debug_metadata_create, metadata_id));
static_assert(sizeof(DebugMetadataDestroy) == sizeof(drm_xe_debug_metadata_destroy));
static_assert(offsetof(DebugMetadataDestroy, extensions) == offsetof(drm_xe_debug_metadata_destroy, extensions));
static_assert(offsetof(DebugMetadataDestroy, metadataId) == offsetof(drm_xe_debug_metadata_destroy, metadata_id));
static_assert(sizeof(XeUserExtension) == sizeof(drm_xe_user_extension));
static_assert(offsetof(XeUserExtension, nextExtension) == offsetof(drm_xe_user_extension, next_extension));
static_assert(offsetof(XeUserExtension, name) == offsetof(drm_xe_user_extension, name));
static_assert(offsetof(XeUserExtension, pad) == offsetof(drm_xe_user_extension, pad));
static_assert(sizeof(VmBindOpExtAttachDebug) == sizeof(drm_xe_vm_bind_op_ext_attach_debug));
static_assert(offsetof(VmBindOpExtAttachDebug, base) == offsetof(drm_xe_vm_bind_op_ext_attach_debug, base));
static_assert(offsetof(VmBindOpExtAttachDebug, metadataId) == offsetof(drm_xe_vm_bind_op_ext_attach_debug, metadata_id));
static_assert(offsetof(VmBindOpExtAttachDebug, flags) == offsetof(drm_xe_vm_bind_op_ext_attach_debug, flags));
static_assert(offsetof(VmBindOpExtAttachDebug, cookie) == offsetof(drm_xe_vm_bind_op_ext_attach_debug, cookie));
static_assert(offsetof(VmBindOpExtAttachDebug, reserved) == offsetof(drm_xe_vm_bind_op_ext_attach_debug, reserved));
} // namespace NEO

View File

@@ -12,9 +12,11 @@ namespace NEO {
class EuDebugInterfaceUpstream : public EuDebugInterface {
public:
static constexpr const char *sysFsXeEuDebugFile = "/device/enable_eudebug";
static constexpr uint64_t defaultClientHandle = 1;
uint32_t getParamValue(EuDebugParam param) const override;
EuDebugInterfaceType getInterfaceType() const override;
uint64_t getDefaultClientHandle() const override;
std::unique_ptr<EuDebugEventEuAttention, void (*)(EuDebugEventEuAttention *)> toEuDebugEventEuAttention(const void *drmType) override;
EuDebugEventClient toEuDebugEventClient(const void *drmType) override;

View File

@@ -67,7 +67,7 @@ uint32_t MockEuDebugInterface::getParamValue(EuDebugParam param) const {
std::unique_ptr<EuDebugEventEuAttention, void (*)(EuDebugEventEuAttention *)> MockEuDebugInterface::toEuDebugEventEuAttention(const void *drmType) {
const drm_xe_eudebug_event_eu_attention *event = static_cast<const drm_xe_eudebug_event_eu_attention *>(drmType);
const prelim_drm_xe_eudebug_event_eu_attention *event = static_cast<const prelim_drm_xe_eudebug_event_eu_attention *>(drmType);
EuDebugEventEuAttention *pEuAttentionEvent = static_cast<EuDebugEventEuAttention *>(malloc(sizeof(EuDebugEventEuAttention) + event->bitmask_size * sizeof(uint8_t)));
pEuAttentionEvent->base.len = event->base.len;
@@ -94,7 +94,7 @@ EuDebugEventVm MockEuDebugInterface::toEuDebugEventVm(const void *drmType) {
return *static_cast<const EuDebugEventVm *>(drmType);
}
std::unique_ptr<EuDebugEventExecQueue, void (*)(EuDebugEventExecQueue *)> MockEuDebugInterface::toEuDebugEventExecQueue(const void *drmType) {
const drm_xe_eudebug_event_exec_queue *event = static_cast<const drm_xe_eudebug_event_exec_queue *>(drmType);
const prelim_drm_xe_eudebug_event_exec_queue *event = static_cast<const prelim_drm_xe_eudebug_event_exec_queue *>(drmType);
EuDebugEventExecQueue *pExecQueueEvent = static_cast<EuDebugEventExecQueue *>(malloc(sizeof(EuDebugEventExecQueue) + event->width * sizeof(uint64_t)));
pExecQueueEvent->base.len = event->base.len;
@@ -153,7 +153,7 @@ EuDebugEventVmBindUfence MockEuDebugInterface::toEuDebugEventVmBindUfence(const
return *static_cast<const EuDebugEventVmBindUfence *>(drmType);
}
std::unique_ptr<EuDebugEventPageFault, void (*)(EuDebugEventPageFault *)> MockEuDebugInterface::toEuDebugEventPageFault(const void *drmType) {
const drm_xe_eudebug_event_pagefault *event = static_cast<const drm_xe_eudebug_event_pagefault *>(drmType);
const prelim_drm_xe_eudebug_event_pagefault *event = static_cast<const prelim_drm_xe_eudebug_event_pagefault *>(drmType);
EuDebugEventPageFault *pPageFaultEvent = static_cast<EuDebugEventPageFault *>(malloc(sizeof(EuDebugEventPageFault) + event->bitmask_size * sizeof(uint8_t)));
pPageFaultEvent->base.len = event->base.len;
@@ -186,7 +186,7 @@ EuDebugConnect MockEuDebugInterface::toEuDebugConnect(const void *drmType) {
}
std::unique_ptr<void, void (*)(void *)> MockEuDebugInterface::toDrmEuDebugConnect(const EuDebugConnect &connect) {
struct drm_xe_eudebug_connect *pDrmConnect = new drm_xe_eudebug_connect();
struct prelim_drm_xe_eudebug_connect *pDrmConnect = new prelim_drm_xe_eudebug_connect();
pDrmConnect->extensions = connect.extensions;
pDrmConnect->pid = connect.pid;
@@ -194,12 +194,12 @@ std::unique_ptr<void, void (*)(void *)> MockEuDebugInterface::toDrmEuDebugConnec
pDrmConnect->version = connect.version;
auto deleter = [](void *ptr) {
delete static_cast<drm_xe_eudebug_connect *>(ptr);
delete static_cast<prelim_drm_xe_eudebug_connect *>(ptr);
};
return std::unique_ptr<void, void (*)(void *)>(pDrmConnect, deleter);
}
std::unique_ptr<void, void (*)(void *)> MockEuDebugInterface::toDrmEuDebugEuControl(const EuDebugEuControl &euControl) {
struct drm_xe_eudebug_eu_control *pDrmEuControl = new drm_xe_eudebug_eu_control();
struct prelim_drm_xe_eudebug_eu_control *pDrmEuControl = new prelim_drm_xe_eudebug_eu_control();
pDrmEuControl->bitmask_ptr = euControl.bitmaskPtr;
pDrmEuControl->bitmask_size = euControl.bitmaskSize;
@@ -211,12 +211,12 @@ std::unique_ptr<void, void (*)(void *)> MockEuDebugInterface::toDrmEuDebugEuCont
pDrmEuControl->seqno = euControl.seqno;
auto deleter = [](void *ptr) {
delete static_cast<drm_xe_eudebug_eu_control *>(ptr);
delete static_cast<prelim_drm_xe_eudebug_eu_control *>(ptr);
};
return std::unique_ptr<void, void (*)(void *)>(pDrmEuControl, deleter);
}
std::unique_ptr<void, void (*)(void *)> MockEuDebugInterface::toDrmEuDebugVmOpen(const EuDebugVmOpen &vmOpen) {
struct drm_xe_eudebug_vm_open *pDrmVmOpen = new drm_xe_eudebug_vm_open();
struct prelim_drm_xe_eudebug_vm_open *pDrmVmOpen = new prelim_drm_xe_eudebug_vm_open();
pDrmVmOpen->client_handle = vmOpen.clientHandle;
pDrmVmOpen->extensions = vmOpen.extensions;
@@ -225,19 +225,19 @@ std::unique_ptr<void, void (*)(void *)> MockEuDebugInterface::toDrmEuDebugVmOpen
pDrmVmOpen->vm_handle = vmOpen.vmHandle;
auto deleter = [](void *ptr) {
delete static_cast<drm_xe_eudebug_vm_open *>(ptr);
delete static_cast<prelim_drm_xe_eudebug_vm_open *>(ptr);
};
return std::unique_ptr<void, void (*)(void *)>(pDrmVmOpen, deleter);
}
std::unique_ptr<void, void (*)(void *)> MockEuDebugInterface::toDrmEuDebugAckEvent(const EuDebugAckEvent &ackEvent) {
struct drm_xe_eudebug_ack_event *pDrmAckEvent = new drm_xe_eudebug_ack_event();
struct prelim_drm_xe_eudebug_ack_event *pDrmAckEvent = new prelim_drm_xe_eudebug_ack_event();
pDrmAckEvent->type = ackEvent.type;
pDrmAckEvent->flags = ackEvent.flags;
pDrmAckEvent->seqno = ackEvent.seqno;
auto deleter = [](void *ptr) {
delete static_cast<drm_xe_eudebug_ack_event *>(ptr);
delete static_cast<prelim_drm_xe_eudebug_ack_event *>(ptr);
};
return std::unique_ptr<void, void (*)(void *)>(pDrmAckEvent, deleter);
}

View File

@@ -18,6 +18,9 @@ class MockEuDebugInterface : public EuDebugInterface {
bool isExecQueuePageFaultEnableSupported() override { return pageFaultEnableSupported; };
uint32_t getParamValue(EuDebugParam param) const override;
EuDebugInterfaceType getInterfaceType() const override { return euDebugInterfaceType; };
uint64_t getDefaultClientHandle() const override {
return 1; // EuDebugInterfaceUpstream::defaultClientHandle
};
std::unique_ptr<EuDebugEventEuAttention, void (*)(EuDebugEventEuAttention *)> toEuDebugEventEuAttention(const void *drmType) override;
EuDebugEventClient toEuDebugEventClient(const void *drmType) override;

View File

@@ -25,14 +25,14 @@ TEST(EuDebugInterfaceUpstreamTest, whenGettingParamValueThenCorrectValueIsReturn
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_EU_ATTENTION), euDebugInterface.getParamValue(EuDebugParam::eventTypeEuAttention));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE), euDebugInterface.getParamValue(EuDebugParam::eventTypeExecQueue));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::eventTypeExecQueuePlacements));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_METADATA), euDebugInterface.getParamValue(EuDebugParam::eventTypeMetadata));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_OPEN), euDebugInterface.getParamValue(EuDebugParam::eventTypeOpen));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::eventTypeMetadata));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::eventTypeOpen));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_PAGEFAULT), euDebugInterface.getParamValue(EuDebugParam::eventTypePagefault));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_READ), euDebugInterface.getParamValue(EuDebugParam::eventTypeRead));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_VM), euDebugInterface.getParamValue(EuDebugParam::eventTypeVm));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_VM_BIND), euDebugInterface.getParamValue(EuDebugParam::eventTypeVmBind));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_VM_BIND_OP), euDebugInterface.getParamValue(EuDebugParam::eventTypeVmBindOp));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_VM_BIND_OP_METADATA), euDebugInterface.getParamValue(EuDebugParam::eventTypeVmBindOpMetadata));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::eventTypeVmBindOp));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::eventTypeVmBindOpMetadata));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_VM_BIND_UFENCE), euDebugInterface.getParamValue(EuDebugParam::eventTypeVmBindUfence));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_EVENT_VM_BIND_FLAG_UFENCE), euDebugInterface.getParamValue(EuDebugParam::eventVmBindFlagUfence));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EXEC_QUEUE_SET_PROPERTY_EUDEBUG), euDebugInterface.getParamValue(EuDebugParam::execQueueSetPropertyEuDebug));
@@ -40,16 +40,16 @@ TEST(EuDebugInterfaceUpstreamTest, whenGettingParamValueThenCorrectValueIsReturn
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_IOCTL_ACK_EVENT), euDebugInterface.getParamValue(EuDebugParam::ioctlAckEvent));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_IOCTL_EU_CONTROL), euDebugInterface.getParamValue(EuDebugParam::ioctlEuControl));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_IOCTL_READ_EVENT), euDebugInterface.getParamValue(EuDebugParam::ioctlReadEvent));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_IOCTL_READ_METADATA), euDebugInterface.getParamValue(EuDebugParam::ioctlReadMetadata));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::ioctlReadMetadata));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_EUDEBUG_IOCTL_VM_OPEN), euDebugInterface.getParamValue(EuDebugParam::ioctlVmOpen));
EXPECT_EQ(static_cast<uint32_t>(DRM_IOCTL_XE_DEBUG_METADATA_CREATE), euDebugInterface.getParamValue(EuDebugParam::metadataCreate));
EXPECT_EQ(static_cast<uint32_t>(DRM_IOCTL_XE_DEBUG_METADATA_DESTROY), euDebugInterface.getParamValue(EuDebugParam::metadataDestroy));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_DEBUG_METADATA_ELF_BINARY), euDebugInterface.getParamValue(EuDebugParam::metadataElfBinary));
EXPECT_EQ(static_cast<uint32_t>(WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_MODULE_AREA), euDebugInterface.getParamValue(EuDebugParam::metadataModuleArea));
EXPECT_EQ(static_cast<uint32_t>(DRM_XE_DEBUG_METADATA_PROGRAM_MODULE), euDebugInterface.getParamValue(EuDebugParam::metadataProgramModule));
EXPECT_EQ(static_cast<uint32_t>(WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SBA_AREA), euDebugInterface.getParamValue(EuDebugParam::metadataSbaArea));
EXPECT_EQ(static_cast<uint32_t>(WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA), euDebugInterface.getParamValue(EuDebugParam::metadataSipArea));
EXPECT_EQ(static_cast<uint32_t>(XE_VM_BIND_OP_EXTENSIONS_ATTACH_DEBUG), euDebugInterface.getParamValue(EuDebugParam::vmBindOpExtensionsAttachDebug));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::metadataCreate));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::metadataDestroy));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::metadataElfBinary));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::metadataModuleArea));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::metadataProgramModule));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::metadataSbaArea));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::metadataSipArea));
EXPECT_EQ(static_cast<uint32_t>(0), euDebugInterface.getParamValue(EuDebugParam::vmBindOpExtensionsAttachDebug));
}
TEST(EuDebugInterfaceUpstreamTest, whenGettingInterfaceTypeThenUpstreamIsReturned) {
@@ -62,7 +62,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmEuAttentionWhenConvertingToInter
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_eu_attention *drmEuAttention = (drm_xe_eudebug_event_eu_attention *)malloc(sizeof(drm_xe_eudebug_event_eu_attention) + 4 * sizeof(uint8_t));
drmEuAttention->client_handle = 0x32;
drmEuAttention->exec_queue_handle = 0x64;
drmEuAttention->lrc_handle = 0x128;
drmEuAttention->flags = 0x0F;
@@ -73,7 +72,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmEuAttentionWhenConvertingToInter
drmEuAttention->bitmask[3] = 0x4;
auto event = euDebugInterface.toEuDebugEventEuAttention(drmEuAttention);
EXPECT_EQ(0x32u, event->clientHandle);
EXPECT_EQ(0x64u, event->execQueueHandle);
EXPECT_EQ(0x128u, event->lrcHandle);
EXPECT_EQ(0x0Fu, event->flags);
@@ -86,25 +84,13 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmEuAttentionWhenConvertingToInter
free(drmEuAttention);
}
TEST(EuDebugInterfaceUpstreamTest, givenValidDrmClientWhenConvertingToInterfaceTypeThenFieldsAreCorrect) {
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_client drmClient = {};
drmClient.client_handle = 0x32;
auto event = euDebugInterface.toEuDebugEventClient(&drmClient);
EXPECT_EQ(0x32u, event.clientHandle);
}
TEST(EuDebugInterfaceUpstreamTest, givenValidDrmVmWhenConvertingToInterfaceTypeThenFieldsAreCorrect) {
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_vm drmVm = {};
drmVm.client_handle = 0x32;
drmVm.vm_handle = 0x64;
auto event = euDebugInterface.toEuDebugEventVm(&drmVm);
EXPECT_EQ(0x32u, event.clientHandle);
EXPECT_EQ(0x64u, event.vmHandle);
}
@@ -112,7 +98,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmExecQueueWhenConvertingToInterfa
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_exec_queue *drmExecQueue = (drm_xe_eudebug_event_exec_queue *)malloc(sizeof(drm_xe_eudebug_event_exec_queue) + 3 * sizeof(uint64_t));
drmExecQueue->client_handle = 0x32;
drmExecQueue->vm_handle = 0x64;
drmExecQueue->exec_queue_handle = 0x128;
drmExecQueue->engine_class = 0x256;
@@ -122,7 +107,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmExecQueueWhenConvertingToInterfa
drmExecQueue->lrc_handle[2] = 0x3;
auto event = euDebugInterface.toEuDebugEventExecQueue(drmExecQueue);
EXPECT_EQ(0x32u, event->clientHandle);
EXPECT_EQ(0x64u, event->vmHandle);
EXPECT_EQ(0x128u, event->execQueueHandle);
EXPECT_EQ(0x256u, event->engineClass);
@@ -134,68 +118,20 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmExecQueueWhenConvertingToInterfa
free(drmExecQueue);
}
TEST(EuDebugInterfaceUpstreamTest, givenValidDrmMetadataWhenConvertingToInterfaceTypeThenFieldsAreCorrect) {
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_metadata drmMetadata = {};
drmMetadata.client_handle = 0x32;
drmMetadata.metadata_handle = 0x64;
drmMetadata.type = 0x128;
drmMetadata.len = 0x256;
auto event = euDebugInterface.toEuDebugEventMetadata(&drmMetadata);
EXPECT_EQ(0x32u, event.clientHandle);
EXPECT_EQ(0x64u, event.metadataHandle);
EXPECT_EQ(0x128u, event.type);
EXPECT_EQ(0x256u, event.len);
}
TEST(EuDebugInterfaceUpstreamTest, givenValidDrmVmBindWhenConvertingToInterfaceTypeThenFieldsAreCorrect) {
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_vm_bind drmVmBind = {};
drmVmBind.client_handle = 0x32;
drmVmBind.vm_handle = 0x64;
drmVmBind.flags = 0x0F;
drmVmBind.num_binds = 0x128;
auto event = euDebugInterface.toEuDebugEventVmBind(&drmVmBind);
EXPECT_EQ(0x32u, event.clientHandle);
EXPECT_EQ(0x64u, event.vmHandle);
EXPECT_EQ(0x0Fu, event.flags);
EXPECT_EQ(0x128u, event.numBinds);
}
TEST(EuDebugInterfaceUpstreamTest, givenValidDrmVmBindOpWhenConvertingToInterfaceTypeThenFieldsAreCorrect) {
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_vm_bind_op drmVmBindOp = {};
drmVmBindOp.vm_bind_ref_seqno = 0x32;
drmVmBindOp.addr = 0x64;
drmVmBindOp.range = 0x128;
drmVmBindOp.num_extensions = 0x0F;
auto event = euDebugInterface.toEuDebugEventVmBindOp(&drmVmBindOp);
EXPECT_EQ(0x32u, event.vmBindRefSeqno);
EXPECT_EQ(0x64u, event.addr);
EXPECT_EQ(0x128u, event.range);
EXPECT_EQ(0x0Fu, event.numExtensions);
}
TEST(EuDebugInterfaceUpstreamTest, givenValidDrmVmBindOpMetadataWhenConvertingToInterfaceTypeThenFieldsAreCorrect) {
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_vm_bind_op_metadata drmVmBindOpMetadata = {};
drmVmBindOpMetadata.vm_bind_op_ref_seqno = 0x32;
drmVmBindOpMetadata.metadata_handle = 0x64;
drmVmBindOpMetadata.metadata_cookie = 0x128;
auto event = euDebugInterface.toEuDebugEventVmBindOpMetadata(&drmVmBindOpMetadata);
EXPECT_EQ(0x32u, event.vmBindOpRefSeqno);
EXPECT_EQ(0x64u, event.metadataHandle);
EXPECT_EQ(0x128u, event.metadataCookie);
}
TEST(EuDebugInterfaceUpstreamTest, givenValidDrmVmBindUfenceWhenConvertingToInterfaceTypeThenFieldsAreCorrect) {
EuDebugInterfaceUpstream euDebugInterface{};
@@ -210,7 +146,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmPageFaultWhenConvertingToInterfa
EuDebugInterfaceUpstream euDebugInterface{};
drm_xe_eudebug_event_pagefault *drmPageFault = (drm_xe_eudebug_event_pagefault *)malloc(sizeof(drm_xe_eudebug_event_pagefault) + 4 * sizeof(uint8_t));
drmPageFault->client_handle = 0x32;
drmPageFault->exec_queue_handle = 0x64;
drmPageFault->flags = 0x0F;
drmPageFault->lrc_handle = 0x4096;
@@ -222,7 +157,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmPageFaultWhenConvertingToInterfa
drmPageFault->bitmask[3] = 0x4;
auto event = euDebugInterface.toEuDebugEventPageFault(drmPageFault);
EXPECT_EQ(0x32u, event->clientHandle);
EXPECT_EQ(0x64u, event->execQueueHandle);
EXPECT_EQ(0x0Fu, event->flags);
EXPECT_EQ(0x4096u, event->lrcHandle);
@@ -270,14 +204,12 @@ TEST(EuDebugInterfaceUpstreamTest, givenValidDrmConnectwhenConvertingToInterface
drm_xe_eudebug_connect drmConnect = {};
drmConnect.extensions = 1;
drmConnect.pid = 2;
drmConnect.flags = 3;
drmConnect.version = 4;
auto connect = euDebugInterface.toEuDebugConnect(&drmConnect);
EXPECT_EQ(1u, connect.extensions);
EXPECT_EQ(2u, connect.pid);
EXPECT_EQ(3u, connect.flags);
EXPECT_EQ(4u, connect.version);
}
@@ -330,7 +262,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenInterfaceVmOpenWhenConvertingToDrmVmOpen
EuDebugVmOpen vmOpen = {};
vmOpen.extensions = 1;
vmOpen.clientHandle = 2;
vmOpen.vmHandle = 3;
vmOpen.flags = 4;
vmOpen.timeoutNs = 5;
@@ -339,7 +270,6 @@ TEST(EuDebugInterfaceUpstreamTest, givenInterfaceVmOpenWhenConvertingToDrmVmOpen
auto drmVmOpen = static_cast<drm_xe_eudebug_vm_open *>(wrappedDrmVmOpen.get());
EXPECT_EQ(1u, drmVmOpen->extensions);
EXPECT_EQ(2u, drmVmOpen->client_handle);
EXPECT_EQ(3u, drmVmOpen->vm_handle);
EXPECT_EQ(4u, drmVmOpen->flags);
EXPECT_EQ(5u, drmVmOpen->timeout_ns);

View File

@@ -1,3 +1,3 @@
git_url: https://gitlab.freedesktop.org/miku/kernel/-/tree/eudebug-dev
git_revision: 446269b9ea8d6d08a35437b0b4bc85f64e070ebd
git_url: https://gitlab.freedesktop.org/miku/kernel/-/tree/eudebug-v5
git_revision: 9d410b9d29c8ca09a621ca988260bd30999b6915

View File

@@ -591,34 +591,65 @@
int drm_dd_minor;
};
/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
/**
* struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl.
* @handle: Handle of the object to be closed.
* @pad: Padding.
*
* Releases the handle to an mm object.
*/
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
/* DRM_IOCTL_GEM_FLINK ioctl argument type */
/**
* struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl.
* @handle: Handle for the object being named.
* @name: Returned global name.
*
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
/** Returned global name */
__u32 name;
};
/* DRM_IOCTL_GEM_OPEN ioctl argument type */
/**
* struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl.
* @name: Name of object being opened.
* @handle: Returned handle for the object.
* @size: Returned size of the object
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
/** Returned handle for the object */
__u32 handle;
/** Returned size of the object */
__u64 size;
};
/**
* struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl.
* @handle: The handle of a gem object.
* @new_handle: An available gem handle.
*
* This ioctl changes the handle of a GEM object to the specified one.
* The new handle must be unused. On success the old handle is closed
* and all further IOCTL should refer to the new handle only.
* Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle.
*/
struct drm_gem_change_handle {
__u32 handle;
__u32 new_handle;
};
/**
* DRM_CAP_DUMB_BUFFER
*
@@ -899,13 +930,17 @@
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
__u64 point;
};
struct drm_syncobj_transfer {
@@ -1299,6 +1334,14 @@
*/
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
/**
* DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle
*
* Some applications (notably CRIU) need objects to have specific gem handles.
* This ioctl changes the object at one gem handle to use a new gem handle.
*/
#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle)
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@@ -1423,4 +1466,3 @@
#endif
#endif

View File

@@ -962,6 +962,14 @@
* Request that the kernel sends back a vblank event (see
* struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the
* page-flip is done.
*
* When used with atomic uAPI, one event will be delivered per CRTC included in
* the atomic commit. A CRTC is included in an atomic commit if one of its
* properties is set, or if a property is set on a connector or plane linked
* via the CRTC_ID property to the CRTC. At least one CRTC must be included,
* and all pulled in CRTCs must be either previously or newly powered on (in
* other words, a powered off CRTC which stays off cannot be included in the
* atomic commit).
*/
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
/**
@@ -1058,7 +1066,7 @@
* struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
* @height: buffer height in pixels
* @width: buffer width in pixels
* @bpp: bits per pixel
* @bpp: color mode
* @flags: must be zero
* @handle: buffer object handle
* @pitch: number of bytes between two consecutive lines
@@ -1066,6 +1074,54 @@
*
* User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
* the kernel fills @handle, @pitch and @size.
*
* The value of @bpp is a color-mode number describing a specific format
* or a variant thereof. The value often corresponds to the number of bits
* per pixel for most modes, although there are exceptions. Each color mode
* maps to a DRM format plus a number of modes with similar pixel layout.
* Framebuffer layout is always linear.
*
* Support for all modes and formats is optional. Even if dumb-buffer
* creation with a certain color mode succeeds, it is not guaranteed that
* the DRM driver supports any of the related formats. Most drivers support
* a color mode of 32 with a format of DRM_FORMAT_XRGB8888 on their primary
* plane.
*
* +------------+------------------------+------------------------+
* | Color mode | Framebuffer format | Compatible formats |
* +============+========================+========================+
* | 32 | * DRM_FORMAT_XRGB8888 | * DRM_FORMAT_BGRX8888 |
* | | | * DRM_FORMAT_RGBX8888 |
* | | | * DRM_FORMAT_XBGR8888 |
* +------------+------------------------+------------------------+
* | 24 | * DRM_FORMAT_RGB888 | * DRM_FORMAT_BGR888 |
* +------------+------------------------+------------------------+
* | 16 | * DRM_FORMAT_RGB565 | * DRM_FORMAT_BGR565 |
* +------------+------------------------+------------------------+
* | 15 | * DRM_FORMAT_XRGB1555 | * DRM_FORMAT_BGRX1555 |
* | | | * DRM_FORMAT_RGBX1555 |
* | | | * DRM_FORMAT_XBGR1555 |
* +------------+------------------------+------------------------+
* | 8 | * DRM_FORMAT_C8 | * DRM_FORMAT_D8 |
* | | | * DRM_FORMAT_R8 |
* +------------+------------------------+------------------------+
* | 4 | * DRM_FORMAT_C4 | * DRM_FORMAT_D4 |
* | | | * DRM_FORMAT_R4 |
* +------------+------------------------+------------------------+
* | 2 | * DRM_FORMAT_C2 | * DRM_FORMAT_D2 |
* | | | * DRM_FORMAT_R2 |
* +------------+------------------------+------------------------+
* | 1 | * DRM_FORMAT_C1 | * DRM_FORMAT_D1 |
* | | | * DRM_FORMAT_R1 |
* +------------+------------------------+------------------------+
*
* Color modes of 10, 12, 15, 30 and 64 are only supported for use by
* legacy user space. Please don't use them in new code. Other modes
* are not support.
*
* Do not attempt to allocate anything but linear framebuffer memory
* with single-plane RGB data. Allocation of other framebuffer
* layouts requires dedicated ioctls in the respective DRM driver.
*/
struct drm_mode_create_dumb {
__u32 height;
@@ -1360,4 +1416,3 @@
#endif
#endif

View File

@@ -6,6 +6,8 @@
#ifndef _XE_DRM_H_
#define _XE_DRM_H_
#include <linux/limits.h>
#include "drm.h"
#if defined(__cplusplus)
@@ -81,6 +83,8 @@
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
* - &DRM_IOCTL_XE_MADVISE
* - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@@ -102,9 +106,10 @@
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
#define DRM_XE_EUDEBUG_CONNECT 0x0c
#define DRM_XE_DEBUG_METADATA_CREATE 0x0d
#define DRM_XE_DEBUG_METADATA_DESTROY 0x0e
#define DRM_XE_MADVISE 0x0c
#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
#define DRM_XE_EUDEBUG_CONNECT 0x0e
/* Must be kept compact -- no holes */
#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
@@ -119,9 +124,9 @@
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
#define DRM_IOCTL_XE_EUDEBUG_CONNECT DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EUDEBUG_CONNECT, struct drm_xe_eudebug_connect)
#define DRM_IOCTL_XE_DEBUG_METADATA_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEBUG_METADATA_CREATE, struct drm_xe_debug_metadata_create)
#define DRM_IOCTL_XE_DEBUG_METADATA_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_DEBUG_METADATA_DESTROY, struct drm_xe_debug_metadata_destroy)
/**
* DOC: Xe IOCTL Extensions
@@ -398,6 +403,10 @@
*
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
* has usable VRAM
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
* has low latency hint support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
* device has CPU address mirroring support
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@@ -414,6 +423,8 @@
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define DRM_XE_QUERY_CONFIG_FLAGS 1
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@@ -740,6 +751,7 @@
#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
#define DRM_XE_DEVICE_QUERY_OA_UNITS 8
#define DRM_XE_DEVICE_QUERY_PXP_STATUS 9
#define DRM_XE_DEVICE_QUERY_EU_STALL 10
/** @query: The type of data to query */
__u32 query;
@@ -758,7 +770,11 @@
* gem creation
*
* The @flags can be:
* - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
* - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
* allocation strategy by deferring physical memory allocation
* until the object is either bound to a virtual memory region via
* VM_BIND or accessed by the CPU. As a result, no backing memory is
* reserved at the time of GEM object creation.
* - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
* - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
* possible placement, ensure that the corresponding VRAM allocation
@@ -915,13 +931,17 @@
* struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
*
* The @flags can be:
* - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
* - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
* space of the VM to scratch page. A vm_bind would overwrite the scratch
* page mapping. This flag is mutually exclusive with the
* %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
* xe3 platform.
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
* don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
* DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
* used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
* don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
* DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
* together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@@ -965,21 +985,33 @@
__u64 reserved[2];
};
struct drm_xe_vm_bind_op_ext_attach_debug {
struct drm_xe_vm_bind_op_ext_debug_data {
/** @base: base user extension */
struct drm_xe_user_extension base;
/** @id: Debug object id from create metadata */
__u64 metadata_id;
/** @addr: Address of the metadata mapping */
__u64 addr;
/** @flags: Flags */
/** @range: Range of the metadata mapping */
__u64 range;
#define DRM_XE_VM_BIND_DEBUG_DATA_FLAG_PSEUDO (1 << 0)
/** @flags: Debug metadata flags */
__u64 flags;
/** @cookie: Cookie */
__u64 cookie;
/** @offset: Offset into the debug data file, MBZ for DEBUG_PSEUDO */
__u32 offset;
/** @reserved: Reserved */
__u64 reserved;
__u32 reserved;
union {
#define DRM_XE_VM_BIND_DEBUG_DATA_PSEUDO_MODULE_AREA 0x1
#define DRM_XE_VM_BIND_DEBUG_DATA_PSEUDO_SBA_AREA 0x2
#define DRM_XE_VM_BIND_DEBUG_DATA_PSEUDO_SIP_AREA 0x3
__u64 pseudopath;
char pathname[PATH_MAX];
};
};
/**
@@ -991,6 +1023,8 @@
* - %DRM_XE_VM_BIND_OP_MAP_USERPTR
* - %DRM_XE_VM_BIND_OP_UNMAP_ALL
* - %DRM_XE_VM_BIND_OP_PREFETCH
* - %DRM_XE_VM_BIND_OP_ADD_DEBUG_DATA
* - %DRM_XE_VM_BIND_OP_REMOVE_DEBUG_DATA
*
* and the @flags can be:
* - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
@@ -1008,10 +1042,19 @@
* - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
* reject the binding if the encryption key is no longer valid. This
* flag has no effect on BOs that are not marked as using PXP.
* - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
* set, no mappings are created rather the range is reserved for CPU address
* mirroring which will be populated on GPU page faults or prefetches. Only
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
*
* The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
* - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
* the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
#define XE_VM_BIND_OP_EXTENSIONS_ATTACH_DEBUG 0
#define XE_VM_BIND_OP_EXTENSIONS_DEBUG_DATA 0
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@@ -1062,7 +1105,9 @@
* on the @pat_index. For such mappings there is no actual memory being
* mapped (the address in the PTE is invalid), so the various PAT memory
* attributes likely do not apply. Simply leaving as zero is one
* option (still a valid pat_index).
* option (still a valid pat_index). Same applies to
* DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
* there is no actual memory being mapped.
*/
__u16 pat_index;
@@ -1078,6 +1123,14 @@
/** @userptr: user pointer to bind on */
__u64 userptr;
/**
* @cpu_addr_mirror_offset: Offset from GPU @addr to create
* CPU address mirror mappings. MBZ with current level of
* support (e.g. 1 to 1 mapping between GPU and CPU mappings
* only supported).
*/
__s64 cpu_addr_mirror_offset;
};
/**
@@ -1093,6 +1146,8 @@
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
#define DRM_XE_VM_BIND_OP_ADD_DEBUG_DATA 0x5
#define DRM_XE_VM_BIND_OP_REMOVE_DEBUG_DATA 0x6
/** @op: Bind operation to perform */
__u32 op;
@@ -1101,9 +1156,11 @@
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
/** @flags: Bind flags */
__u32 flags;
#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@@ -1206,6 +1263,11 @@
* there is no need to explicitly set that. When a queue of type
* %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
* (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
* The user is expected to query the PXP status via the query ioctl (see
* %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before
* attempting to create a queue with this property. When a queue is created
* before PXP is ready, the ioctl will return -EBUSY if init is still in
* progress or -EIO if init failed.
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
@@ -1228,6 +1290,21 @@
* };
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
*
* Allow users to provide a hint to kernel for cases demanding low latency
* profile. Please note it will have impact on power consumption. User can
* indicate low latency hint with flag while creating exec queue as
* mentioned below,
*
* struct drm_xe_exec_queue_create exec_queue_create = {
* .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
* .extensions = 0,
* .vm_id = vm,
* .num_bb_per_exec = 1,
* .num_eng_per_bb = 1,
* .instances = to_user_pointer(&instance),
* };
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
*
*/
struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
@@ -1248,7 +1325,8 @@
/** @vm_id: VM to use for this exec queue */
__u32 vm_id;
/** @flags: MBZ */
#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
/** @flags: flags to use for this exec queue */
__u32 flags;
/** @exec_queue_id: Returned exec queue ID */
@@ -1371,7 +1449,7 @@
/**
* @timeline_value: Input for the timeline sync object. Needs to be
* different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
* different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@@ -1522,6 +1600,8 @@
enum drm_xe_observation_type {
/** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
DRM_XE_OBSERVATION_TYPE_OA,
/** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */
DRM_XE_OBSERVATION_TYPE_EU_STALL,
};
/**
@@ -1592,6 +1672,9 @@
/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
DRM_XE_OA_UNIT_TYPE_OAM,
/** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
DRM_XE_OA_UNIT_TYPE_OAM_SAG,
};
/**
@@ -1613,6 +1696,7 @@
#define DRM_XE_OA_CAPS_SYNCS (1 << 1)
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
#define DRM_XE_OA_CAPS_OAM (1 << 4)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;
@@ -1874,6 +1958,342 @@
/* ID of the protected content session managed by Xe when PXP is active */
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
/**
* enum drm_xe_eu_stall_property_id - EU stall sampling input property ids.
*
* These properties are passed to the driver at open as a chain of
* @drm_xe_ext_set_property structures with @property set to these
* properties' enums and @value set to the corresponding values of these
* properties. @drm_xe_user_extension base.name should be set to
* @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY.
*
* With the file descriptor obtained from open, user space must enable
* the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before
* calling read(). EIO errno from read() indicates HW dropped data
* due to full buffer.
*/
enum drm_xe_eu_stall_property_id {
#define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY 0
/**
* @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which
* EU stall data will be captured.
*/
DRM_XE_EU_STALL_PROP_GT_ID = 1,
/**
* @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in
* GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall
*/
DRM_XE_EU_STALL_PROP_SAMPLE_RATE,
/**
* @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of
* EU stall data reports to be present in the kernel buffer
* before unblocking a blocked poll or read.
*/
DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS,
};
/**
* struct drm_xe_query_eu_stall - Information about EU stall sampling.
*
* If a query is made with a struct @drm_xe_device_query where .query
* is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses
* struct @drm_xe_query_eu_stall in .data.
*/
struct drm_xe_query_eu_stall {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @capabilities: EU stall capabilities bit-mask */
__u64 capabilities;
#define DRM_XE_EU_STALL_CAPS_BASE (1 << 0)
/** @record_size: size of each EU stall data record */
__u64 record_size;
/** @per_xecore_buf_size: internal per XeCore buffer size */
__u64 per_xecore_buf_size;
/** @reserved: Reserved */
__u64 reserved[5];
/** @num_sampling_rates: Number of sampling rates in @sampling_rates array */
__u64 num_sampling_rates;
/**
* @sampling_rates: Flexible array of sampling rates
* sorted in the fastest to slowest order.
* Sampling rates are specified in GPU clock cycles.
*/
__u64 sampling_rates[];
};
/**
* struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
*
* This structure is used to set memory attributes for a virtual address range
* in a VM. The type of attribute is specified by @type, and the corresponding
* union member is used to provide additional parameters for @type.
*
* Supported attribute types:
* - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
* - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
* - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
*
* Example:
*
* .. code-block:: C
*
* struct drm_xe_madvise madvise = {
* .vm_id = vm_id,
* .start = 0x100000,
* .range = 0x2000,
* .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
* .atomic_val = DRM_XE_ATOMIC_DEVICE,
* };
*
* ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
*
*/
struct drm_xe_madvise {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @start: start of the virtual address range */
__u64 start;
/** @range: size of the virtual address range */
__u64 range;
/** @vm_id: vm_id of the virtual range */
__u32 vm_id;
#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
#define DRM_XE_MEM_RANGE_ATTR_PAT 2
/** @type: type of attribute */
__u32 type;
union {
/**
* @preferred_mem_loc: preferred memory location
*
* Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
*
* Supported values for @preferred_mem_loc.devmem_fd:
* - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
* - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
*
* Supported values for @preferred_mem_loc.migration_policy:
* - DRM_XE_MIGRATE_ALL_PAGES
* - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
*/
struct {
#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
/** @preferred_mem_loc.devmem_fd: fd for preferred loc */
__u32 devmem_fd;
#define DRM_XE_MIGRATE_ALL_PAGES 0
#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u16 migration_policy;
/** @preferred_mem_loc.pad : MBZ */
__u16 pad;
/** @preferred_mem_loc.reserved : Reserved */
__u64 reserved;
} preferred_mem_loc;
/**
* @atomic: Atomic access policy
*
* Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
*
* Supported values for @atomic.val:
* - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
* Support both GPU and CPU atomic operations for system allocator.
* Support GPU atomic operations for normal(bo) allocator.
* - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
* - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
* - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
*/
struct {
#define DRM_XE_ATOMIC_UNDEFINED 0
#define DRM_XE_ATOMIC_DEVICE 1
#define DRM_XE_ATOMIC_GLOBAL 2
#define DRM_XE_ATOMIC_CPU 3
/** @atomic.val: value of atomic operation */
__u32 val;
/** @atomic.pad: MBZ */
__u32 pad;
/** @atomic.reserved: Reserved */
__u64 reserved;
} atomic;
/**
* @pat_index: Page attribute table index
*
* Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
*/
struct {
/** @pat_index.val: PAT index value */
__u32 val;
/** @pat_index.pad: MBZ */
__u32 pad;
/** @pat_index.reserved: Reserved */
__u64 reserved;
} pat_index;
};
/** @reserved: Reserved */
__u64 reserved[2];
};
/**
* struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
*
* This structure is provided by userspace and filled by KMD in response to the
* DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
* a memory ranges within a user specified address range in a VM.
*
* The structure includes information such as atomic access policy,
* page attribute table (PAT) index, and preferred memory location.
* Userspace allocates an array of these structures and passes a pointer to the
* ioctl to retrieve attributes for each memory ranges
*
* @extensions: Pointer to the first extension struct, if any
* @start: Start address of the memory range
* @end: End address of the virtual memory range
*
*/
struct drm_xe_mem_range_attr {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @start: start of the memory range */
__u64 start;
/** @end: end of the memory range */
__u64 end;
/** @preferred_mem_loc: preferred memory location */
struct {
/** @preferred_mem_loc.devmem_fd: fd for preferred loc */
__u32 devmem_fd;
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u32 migration_policy;
} preferred_mem_loc;
/** @atomic: Atomic access policy */
struct {
/** @atomic.val: atomic attribute */
__u32 val;
/** @atomic.reserved: Reserved */
__u32 reserved;
} atomic;
/** @pat_index: Page attribute table index */
struct {
/** @pat_index.val: PAT index */
__u32 val;
/** @pat_index.reserved: Reserved */
__u32 reserved;
} pat_index;
/** @reserved: Reserved */
__u64 reserved[2];
};
/**
* struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
*
* This structure is used to query memory attributes of memory regions
* within a user specified address range in a VM. It provides detailed
* information about each memory range, including atomic access policy,
* page attribute table (PAT) index, and preferred memory location.
*
* Userspace first calls the ioctl with @num_mem_ranges = 0,
* @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
* the number of memory regions and size of each memory range attribute.
* Then, it allocates a buffer of that size and calls the ioctl again to fill
* the buffer with memory range attributes.
*
* If second call fails with -ENOSPC, it means memory ranges changed between
* first call and now, retry IOCTL again with @num_mem_ranges = 0,
* @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
* Second ioctl call.
*
* Example:
*
* .. code-block:: C
*
* struct drm_xe_vm_query_mem_range_attr query = {
* .vm_id = vm_id,
* .start = 0x100000,
* .range = 0x2000,
* };
*
* // First ioctl call to get num of mem regions and sizeof each attribute
* ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
*
* // Allocate buffer for the memory region attributes
* void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
* void *ptr_start = ptr;
*
* query.vector_of_mem_attr = (uintptr_t)ptr;
*
* // Second ioctl call to actually fill the memory attributes
* ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
*
* // Iterate over the returned memory region attributes
* for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
* struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
*
* // Do something with attr
*
* // Move pointer by one entry
* ptr += query.sizeof_mem_range_attr;
* }
*
* free(ptr_start);
*/
struct drm_xe_vm_query_mem_range_attr {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @vm_id: vm_id of the virtual range */
__u32 vm_id;
/** @num_mem_ranges: number of mem_ranges in range */
__u32 num_mem_ranges;
/** @start: start of the virtual address range */
__u64 start;
/** @range: size of the virtual address range */
__u64 range;
/** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
__u64 sizeof_mem_range_attr;
/** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */
__u64 vector_of_mem_attr;
/** @reserved: Reserved */
__u64 reserved[2];
};
/*
* Debugger ABI (ioctl and events) Version History:
* 0 - No debugger available
@@ -1885,60 +2305,12 @@
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
__u64 pid; /* input: Target process ID */
__u64 fd; /* Target drm client fd */
__u32 flags; /* MBZ */
__u32 version; /* output: current ABI (ioctl / events) version */
};
/*
* struct drm_xe_debug_metadata_create - Create debug metadata
*
* Add a region of user memory to be marked as debug metadata.
* When the debugger attaches, the metadata regions will be delivered
* for debugger. Debugger can then map these regions to help decode
* the program state.
*
* Returns handle to created metadata entry.
*/
struct drm_xe_debug_metadata_create {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
#define DRM_XE_DEBUG_METADATA_ELF_BINARY 0
#define DRM_XE_DEBUG_METADATA_PROGRAM_MODULE 1
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_MODULE_AREA 2
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SBA_AREA 3
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA 4
#define WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_NUM (1 + \
WORK_IN_PROGRESS_DRM_XE_DEBUG_METADATA_SIP_AREA)
/** @type: Type of metadata */
__u64 type;
/** @user_addr: pointer to start of the metadata */
__u64 user_addr;
/** @len: length, in bytes of the medata */
__u64 len;
/** @metadata_id: created metadata handle (out) */
__u32 metadata_id;
};
/**
* struct drm_xe_debug_metadata_destroy - Destroy debug metadata
*
* Destroy debug metadata.
*/
struct drm_xe_debug_metadata_destroy {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @metadata_id: metadata handle to destroy */
__u32 metadata_id;
};
#include "xe_drm_eudebug.h"
#if defined(__cplusplus)
@@ -1946,4 +2318,3 @@
#endif
#endif /* _XE_DRM_H_ */

View File

@@ -16,28 +16,47 @@
* This ioctl is available in debug version 1.
*/
#define DRM_XE_EUDEBUG_IOCTL_READ_EVENT _IO('j', 0x0)
#define DRM_XE_EUDEBUG_IOCTL_EU_CONTROL _IOWR('j', 0x2, struct drm_xe_eudebug_eu_control)
#define DRM_XE_EUDEBUG_IOCTL_ACK_EVENT _IOW('j', 0x4, struct drm_xe_eudebug_ack_event)
#define DRM_XE_EUDEBUG_IOCTL_VM_OPEN _IOW('j', 0x1, struct drm_xe_eudebug_vm_open)
#define DRM_XE_EUDEBUG_IOCTL_READ_METADATA _IOWR('j', 0x3, struct drm_xe_eudebug_read_metadata)
#define DRM_XE_EUDEBUG_IOCTL_ACK_EVENT _IOW('j', 0x1, struct drm_xe_eudebug_ack_event)
#define DRM_XE_EUDEBUG_IOCTL_VM_OPEN _IOW('j', 0x2, struct drm_xe_eudebug_vm_open)
#define DRM_XE_EUDEBUG_IOCTL_EU_CONTROL _IOWR('j', 0x3, struct drm_xe_eudebug_eu_control)
/* XXX: Document events to match their internal counterparts when moved to xe_drm.h */
/**
* struct drm_xe_eudebug_event - Base type of event delivered by xe_eudebug.
* @len: Length of event, including the base, of event.
* @type: Event type
* @flags: Flags for the event
* @seqno: Sequence number
* @reserved: MBZ
*
* Base event for xe_eudebug interface. To initiate a read, type
* needs to be set to DRM_XE_EUDEBUG_EVENT_READ and length
* need to be set by userspace to what has been allocated as max.
* On successful return the event len will be deliver or -EMSGSIZE
* if it does not fit. Seqno can be used to form a timeline
* as event delivery order does not guarantee event creation
* order.
*
* flags will indicate if resource was created, destroyed
* or its state changed.
*
* if DRM_XE_EUDEBUG_EVENT_NEED_ACK is set, the xe_eudebug
* will held the said resource until it is acked by userspace
* using another acking ioctl with the seqno of said event.
*
*/
struct drm_xe_eudebug_event {
__u32 len;
__u16 type;
#define DRM_XE_EUDEBUG_EVENT_NONE 0
#define DRM_XE_EUDEBUG_EVENT_READ 1
#define DRM_XE_EUDEBUG_EVENT_OPEN 2
#define DRM_XE_EUDEBUG_EVENT_VM 3
#define DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE 4
#define DRM_XE_EUDEBUG_EVENT_EU_ATTENTION 5
#define DRM_XE_EUDEBUG_EVENT_VM_BIND 6
#define DRM_XE_EUDEBUG_EVENT_VM_BIND_OP 7
#define DRM_XE_EUDEBUG_EVENT_VM_BIND_UFENCE 8
#define DRM_XE_EUDEBUG_EVENT_METADATA 9
#define DRM_XE_EUDEBUG_EVENT_VM_BIND_OP_METADATA 10
#define DRM_XE_EUDEBUG_EVENT_PAGEFAULT 11
#define DRM_XE_EUDEBUG_EVENT_VM 2
#define DRM_XE_EUDEBUG_EVENT_EXEC_QUEUE 3
#define DRM_XE_EUDEBUG_EVENT_VM_BIND 4
#define DRM_XE_EUDEBUG_EVENT_VM_BIND_OP_DEBUG_DATA 5
#define DRM_XE_EUDEBUG_EVENT_VM_BIND_UFENCE 6
#define DRM_XE_EUDEBUG_EVENT_EU_ATTENTION 7
#define DRM_XE_EUDEBUG_EVENT_PAGEFAULT 8
__u16 flags;
#define DRM_XE_EUDEBUG_EVENT_CREATE (1 << 0)
@@ -49,23 +68,21 @@
__u64 reserved;
};
struct drm_xe_eudebug_event_client {
struct drm_xe_eudebug_event base;
__u64 client_handle; /* This is unique per debug connection */
};
/**
* struct drm_xe_eudebug_event_vm - VM resource event
* @vm_handle: Handle of a vm that was created/destroyed
*
* Resource creation/destruction event for a VM.
*/
struct drm_xe_eudebug_event_vm {
struct drm_xe_eudebug_event base;
__u64 client_handle;
__u64 vm_handle;
};
struct drm_xe_eudebug_event_exec_queue {
struct drm_xe_eudebug_event base;
__u64 client_handle;
__u64 vm_handle;
__u64 exec_queue_handle;
__u32 engine_class;
@@ -73,57 +90,29 @@
__u64 lrc_handle[];
};
struct drm_xe_eudebug_event_eu_attention {
struct drm_xe_eudebug_event base;
__u64 client_handle;
__u64 exec_queue_handle;
__u64 lrc_handle;
__u32 flags;
__u32 bitmask_size;
__u8 bitmask[];
};
struct drm_xe_eudebug_eu_control {
__u64 client_handle;
#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_INTERRUPT_ALL 0
#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_STOPPED 1
#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_RESUME 2
__u32 cmd;
__u32 flags;
__u64 seqno;
__u64 exec_queue_handle;
__u64 lrc_handle;
__u32 reserved;
__u32 bitmask_size;
__u64 bitmask_ptr;
};
/*
* When client (debuggee) does vm_bind_ioctl() following event
* When the client (debuggee) calls the vm_bind_ioctl with the
* DRM_XE_VM_BIND_OP_[ADD|REMOVE]_DEBUG_DATA operation, the following event
* sequence will be created (for the debugger):
*
* ┌───────────────────────┐
* │ EVENT_VM_BIND ├────────┬─┐
* └───────────────────────┘ │ │
* ┌───────────────────────┐ │ │
* │ EVENT_VM_BIND_OP #1 ├───┘ │
* └───────────────────────┘ │
* ... │ │
* ┌───────────────────────┐ │
* │ EVENT_VM_BIND_OP #n ├─────┘
* └───────────────────────┘
*
* ┌───────────────────────
* EVENT_UFENCE ├───────
* └───────────────────────
* │ EVENT_VM_BIND ├──────────────────┬─┬┄
* └───────────────────────┘ │ │
* ┌──────────────────────────────────┐ │ │
* │ EVENT_VM_BIND_OP_DEBUG_DATA #1 ├───┘ │
* └──────────────────────────────────┘ │
* ... │ ┊
* ┌──────────────────────────────────┐ │
* │ EVENT_VM_BIND_OP_DEBUG_DATA #n ├─────┘
* └──────────────────────────────────
*
* ┌┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄
* EVENT_UFENCE ┄┄┄┄┄┄┄
* └┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄
*
* All the events below VM_BIND will reference the VM_BIND
* they associate with, by field .vm_bind_ref_seqno.
* event_ufence will only be included if the client did
* EVENT_UFENCE will only be included if the client did
* attach sync of type UFENCE into its vm_bind_ioctl().
*
* When EVENT_UFENCE is sent by the driver, all the OPs of
@@ -146,7 +135,6 @@
struct drm_xe_eudebug_event_vm_bind {
struct drm_xe_eudebug_event base;
__u64 client_handle;
__u64 vm_handle;
__u32 flags;
@@ -155,13 +143,20 @@
__u32 num_binds;
};
struct drm_xe_eudebug_event_vm_bind_op {
struct drm_xe_eudebug_event_vm_bind_op_debug_data {
struct drm_xe_eudebug_event base;
__u64 vm_bind_ref_seqno; /* *_event_vm_bind.base.seqno */
__u64 num_extensions;
__u64 addr; /* XXX: Zero for unmap all? */
__u64 range; /* XXX: Zero for unmap all? */
__u64 addr;
__u64 range;
__u64 flags;
__u32 offset;
__u32 reserved;
union {
__u64 pseudopath;
char pathname[PATH_MAX];
};
};
struct drm_xe_eudebug_event_vm_bind_ufence {
@@ -179,51 +174,46 @@
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @client_handle: id of client */
__u64 client_handle;
/** @vm_handle: id of vm */
__u64 vm_handle;
/** @flags: flags */
__u64 flags;
#define DRM_XE_EUDEBUG_VM_SYNC_MAX_TIMEOUT_NSECS (10ULL * NSEC_PER_SEC)
/** @timeout_ns: Timeout value in nanoseconds operations (fsync) */
__u64 timeout_ns;
};
struct drm_xe_eudebug_read_metadata {
__u64 client_handle;
__u64 metadata_handle;
struct drm_xe_eudebug_eu_control {
#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_INTERRUPT_ALL 0
#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_STOPPED 1
#define DRM_XE_EUDEBUG_EU_CONTROL_CMD_RESUME 2
__u32 cmd;
__u32 flags;
__u64 seqno;
__u64 exec_queue_handle;
__u64 lrc_handle;
__u32 reserved;
__u64 ptr;
__u64 size;
__u32 bitmask_size;
__u64 bitmask_ptr;
};
struct drm_xe_eudebug_event_metadata {
struct drm_xe_eudebug_event_eu_attention {
struct drm_xe_eudebug_event base;
__u64 client_handle;
__u64 metadata_handle;
/* XXX: Refer to xe_drm.h for fields */
__u64 type;
__u64 len;
};
struct drm_xe_eudebug_event_vm_bind_op_metadata {
struct drm_xe_eudebug_event base;
__u64 vm_bind_op_ref_seqno; /* *_event_vm_bind_op.base.seqno */
__u64 metadata_handle;
__u64 metadata_cookie;
__u64 exec_queue_handle;
__u64 lrc_handle;
__u32 flags;
__u32 bitmask_size;
__u8 bitmask[];
};
struct drm_xe_eudebug_event_pagefault {
struct drm_xe_eudebug_event base;
__u64 client_handle;
__u64 exec_queue_handle;
__u64 lrc_handle;
__u32 flags;
@@ -237,4 +227,3 @@
#endif
#endif /* _XE_DRM_EUDEBUG_H_ */