diff --git a/shared/source/debug_settings/debug_variables_base.inl b/shared/source/debug_settings/debug_variables_base.inl index 379a2fc8b1..4146934ade 100644 --- a/shared/source/debug_settings/debug_variables_base.inl +++ b/shared/source/debug_settings/debug_variables_base.inl @@ -265,6 +265,7 @@ DECLARE_DEBUG_VARIABLE(int32_t, EnableInOrderRelaxedOrderingForEventsChaining, - DECLARE_DEBUG_VARIABLE(int32_t, InOrderAtomicSignallingEnabled, -1, "-1: default, 0: disabled, 1: Use atomic GPU operations in increment the counter. Otherwise use non-atomic commands like SDI.") DECLARE_DEBUG_VARIABLE(int32_t, InOrderDuplicatedCounterStorageEnabled, -1, "-1: default, 0: disabled, 1: Allocate additional host storage for signalling") DECLARE_DEBUG_VARIABLE(int32_t, SetProcessPowerThrottlingState, -1, "-1: default, 0: Disabled, 1: ECO, 2: HIGH. If set, will override process power throttling state on os context init. Windows only.") +DECLARE_DEBUG_VARIABLE(int32_t, OverrideCpuCaching, -1, "-1: default, 1: DRM_XE_GEM_CPU_CACHING_WB, 2: DRM_XE_GEM_CPU_CACHING_WC") /*LOGGING FLAGS*/ DECLARE_DEBUG_VARIABLE(int32_t, PrintDriverDiagnostics, -1, "prints driver diagnostics messages to standard output, value corresponds to hint level") diff --git a/shared/source/os_interface/linux/drm_neo.cpp b/shared/source/os_interface/linux/drm_neo.cpp index 53c979a26f..ec8e1eaf25 100644 --- a/shared/source/os_interface/linux/drm_neo.cpp +++ b/shared/source/os_interface/linux/drm_neo.cpp @@ -1315,6 +1315,7 @@ int changeBufferObjectBinding(Drm *drm, OsContext *osContext, uint32_t vmHandleI UNRECOVERABLE_IF(bo->peekPatIndex() == CommonConstants::unsupportedPatIndex); ioctlHelper->fillVmBindExtSetPat(vmBindExtSetPat, bo->peekPatIndex(), castToUint64(extensions.get())); vmBind.extensions = castToUint64(vmBindExtSetPat); + vmBind.patIndex = bo->peekPatIndex(); } else { vmBind.extensions = castToUint64(extensions.get()); } diff --git a/shared/source/os_interface/linux/ioctl_helper.h b/shared/source/os_interface/linux/ioctl_helper.h index c278eb3351..b4ed9ff9ff 100644 --- a/shared/source/os_interface/linux/ioctl_helper.h +++ b/shared/source/os_interface/linux/ioctl_helper.h @@ -61,6 +61,7 @@ struct VmBindParams { uint64_t length; uint64_t flags; uint64_t extensions; + uint64_t patIndex; }; struct UuidRegisterResult { diff --git a/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp b/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp index c0ea0724f8..50acc9242f 100644 --- a/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp +++ b/shared/source/os_interface/linux/xe/ioctl_helper_xe.cpp @@ -533,6 +533,15 @@ void IoctlHelperXe::setDefaultEngine() { } } +uint16_t IoctlHelperXe::getCpuCachingMode() { + uint16_t cpuCachingMode = DRM_XE_GEM_CPU_CACHING_WC; + if (debugManager.flags.OverrideCpuCaching.get() != -1) { + cpuCachingMode = debugManager.flags.OverrideCpuCaching.get(); + } + + return cpuCachingMode; +} + int IoctlHelperXe::createGemExt(const MemRegionsVec &memClassInstances, size_t allocSize, uint32_t &handle, uint64_t patIndex, std::optional vmId, int32_t pairHandle, bool isChunked, uint32_t numOfChunks) { struct drm_xe_gem_create create = {}; uint32_t regionsSize = static_cast(memClassInstances.size()); @@ -553,6 +562,7 @@ int IoctlHelperXe::createGemExt(const MemRegionsVec &memClassInstances, size_t a memoryInstances.set(memoryClassInstance.memoryInstance); } create.flags = static_cast(memoryInstances.to_ulong()); + create.cpu_caching = this->getCpuCachingMode(); auto ret = IoctlHelper::ioctl(DrmIoctl::gemCreate, &create); handle = create.handle; @@ -586,6 +596,7 @@ uint32_t IoctlHelperXe::createGem(uint64_t size, uint32_t memoryBanks) { memoryInstances.set(regionClassAndInstance.memoryInstance); } create.flags = static_cast(memoryInstances.to_ulong()); + create.cpu_caching = this->getCpuCachingMode(); [[maybe_unused]] auto ret = ioctl(DrmIoctl::gemCreate, &create); DEBUG_BREAK_IF(ret != 0); updateBindInfo(create.handle, 0u, create.size); @@ -1254,6 +1265,7 @@ int IoctlHelperXe::xeVmBind(const VmBindParams &vmBindParams, bool isBind) { bind.bind.addr = gmmHelper->decanonize(vmBindParams.start); bind.bind.flags = DRM_XE_VM_BIND_FLAG_ASYNC; bind.bind.obj_offset = vmBindParams.offset; + bind.bind.pat_index = vmBindParams.patIndex; if (isBind) { bind.bind.op = DRM_XE_VM_BIND_OP_MAP; diff --git a/shared/source/os_interface/linux/xe/ioctl_helper_xe.h b/shared/source/os_interface/linux/xe/ioctl_helper_xe.h index 2208fce345..84dd9079cf 100644 --- a/shared/source/os_interface/linux/xe/ioctl_helper_xe.h +++ b/shared/source/os_interface/linux/xe/ioctl_helper_xe.h @@ -118,6 +118,7 @@ class IoctlHelperXe : public IoctlHelper { void fillBindInfoForIpcHandle(uint32_t handle, size_t size) override; bool getFdFromVmExport(uint32_t vmId, uint32_t flags, int32_t *fd) override; bool isImmediateVmBindRequired() const override; + uint16_t getCpuCachingMode(); private: template diff --git a/shared/test/common/test_files/igdrcl.config b/shared/test/common/test_files/igdrcl.config index b5bf15e02c..b9ececd369 100644 --- a/shared/test/common/test_files/igdrcl.config +++ b/shared/test/common/test_files/igdrcl.config @@ -569,4 +569,5 @@ EnableDeviceStateVerificationAfterFailedSubmission = -1 InOrderAtomicSignallingEnabled = -1 SetProcessPowerThrottlingState = -1 InOrderDuplicatedCounterStorageEnabled = -1 +OverrideCpuCaching = -1 # Please don't edit below this line diff --git a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp index 35ee51d2f7..e5c8049ed3 100644 --- a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.cpp @@ -45,7 +45,7 @@ TEST(IoctlHelperXeTest, whenChangingBufferBindingThenWaitIsNeededAlways) { TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallingGemCreateExtWithRegionsThenDummyValueIsReturned) { auto executionEnvironment = std::make_unique(); - DrmMock drm{*executionEnvironment->rootDeviceEnvironments[0]}; + DrmMockXe drm{*executionEnvironment->rootDeviceEnvironments[0]}; auto xeIoctlHelper = std::make_unique(drm); ASSERT_NE(nullptr, xeIoctlHelper); @@ -62,11 +62,12 @@ TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallingGemCreateExtWithRegionsThen EXPECT_TRUE(xeIoctlHelper->bindInfo.empty()); EXPECT_NE(0, xeIoctlHelper->createGemExt(memRegions, 0u, handle, 0, {}, -1, false, numOfChunks)); EXPECT_FALSE(xeIoctlHelper->bindInfo.empty()); + EXPECT_EQ(DRM_XE_GEM_CPU_CACHING_WC, drm.createParamsCpuCaching); } TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallingGemCreateExtWithRegionsAndVmIdThenDummyValueIsReturned) { auto executionEnvironment = std::make_unique(); - DrmMock drm{*executionEnvironment->rootDeviceEnvironments[0]}; + DrmMockXe drm{*executionEnvironment->rootDeviceEnvironments[0]}; auto xeIoctlHelper = std::make_unique(drm); ASSERT_NE(nullptr, xeIoctlHelper); @@ -84,6 +85,7 @@ TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallingGemCreateExtWithRegionsAndV EXPECT_TRUE(xeIoctlHelper->bindInfo.empty()); EXPECT_NE(0, xeIoctlHelper->createGemExt(memRegions, 0u, handle, 0, test.vmId, -1, false, numOfChunks)); EXPECT_FALSE(xeIoctlHelper->bindInfo.empty()); + EXPECT_EQ(DRM_XE_GEM_CPU_CACHING_WC, drm.createParamsCpuCaching); } TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallGemCreateAndNoLocalMemoryThenProperValuesSet) { @@ -107,6 +109,7 @@ TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallGemCreateAndNoLocalMemoryThenP EXPECT_EQ(size, drm.createParamsSize); EXPECT_EQ(1u, drm.createParamsFlags); + EXPECT_EQ(DRM_XE_GEM_CPU_CACHING_WC, drm.createParamsCpuCaching); // dummy mock handle EXPECT_EQ(handle, drm.createParamsHandle); @@ -134,6 +137,7 @@ TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallGemCreateWhenMemoryBanksZeroTh EXPECT_EQ(size, drm.createParamsSize); EXPECT_EQ(1u, drm.createParamsFlags); + EXPECT_EQ(DRM_XE_GEM_CPU_CACHING_WC, drm.createParamsCpuCaching); // dummy mock handle EXPECT_EQ(handle, drm.createParamsHandle); @@ -161,6 +165,7 @@ TEST(IoctlHelperXeTest, givenIoctlHelperXeWhenCallGemCreateAndLocalMemoryThenPro EXPECT_EQ(size, drm.createParamsSize); EXPECT_EQ(6u, drm.createParamsFlags); + EXPECT_EQ(DRM_XE_GEM_CPU_CACHING_WC, drm.createParamsCpuCaching); // dummy mock handle EXPECT_EQ(handle, drm.createParamsHandle); @@ -587,6 +592,7 @@ TEST(IoctlHelperXeTest, whenCallingIoctlThenProperValueIsReturned) { test.handle = 0; test.flags = 1; test.size = 123; + test.cpu_caching = DRM_XE_GEM_CPU_CACHING_WC; ret = mockXeIoctlHelper->ioctl(DrmIoctl::gemCreate, &test); EXPECT_EQ(0, ret); } @@ -1653,3 +1659,51 @@ TEST(IoctlHelperXeTest, givenXeIoctlHelperWhenInitializeGetGpuTimeFunctionIsCall xeIoctlHelper->initializeGetGpuTimeFunction(); EXPECT_EQ(xeIoctlHelper->getGpuTime, nullptr); } + +TEST(IoctlHelperXeTest, givenIoctlHelperXeAndDebugOverrideEnabledWhenGetCpuCachingModeCalledThenOverriddenValueIsReturned) { + DebugManagerStateRestore restorer; + auto executionEnvironment = std::make_unique(); + DrmMockXe drm{*executionEnvironment->rootDeviceEnvironments[0]}; + + auto xeIoctlHelper = std::make_unique(drm); + drm.memoryInfo.reset(xeIoctlHelper->createMemoryInfo().release()); + ASSERT_NE(nullptr, xeIoctlHelper); + + debugManager.flags.OverrideCpuCaching.set(DRM_XE_GEM_CPU_CACHING_WB); + EXPECT_EQ(xeIoctlHelper->getCpuCachingMode(), DRM_XE_GEM_CPU_CACHING_WB); + + debugManager.flags.OverrideCpuCaching.set(DRM_XE_GEM_CPU_CACHING_WC); + EXPECT_EQ(xeIoctlHelper->getCpuCachingMode(), DRM_XE_GEM_CPU_CACHING_WC); +} + +TEST(IoctlHelperXeTest, whenCallingVmBindThenPatIndexIsSet) { + DebugManagerStateRestore restorer; + auto executionEnvironment = std::make_unique(); + DrmMockXe drm{*executionEnvironment->rootDeviceEnvironments[0]}; + auto xeIoctlHelper = std::make_unique(drm); + + uint64_t fenceAddress = 0x4321; + uint64_t fenceValue = 0x789; + uint64_t expectedPatIndex = 0xba; + + BindInfo mockBindInfo{}; + mockBindInfo.handle = 0x1234; + xeIoctlHelper->bindInfo.push_back(mockBindInfo); + + VmBindExtUserFenceT vmBindExtUserFence{}; + + xeIoctlHelper->fillVmBindExtUserFence(vmBindExtUserFence, fenceAddress, fenceValue, 0u); + + VmBindParams vmBindParams{}; + vmBindParams.handle = mockBindInfo.handle; + vmBindParams.extensions = castToUint64(&vmBindExtUserFence); + vmBindParams.patIndex = expectedPatIndex; + + drm.vmBindInputs.clear(); + drm.syncInputs.clear(); + drm.waitUserFenceInputs.clear(); + ASSERT_EQ(0, xeIoctlHelper->vmBind(vmBindParams)); + ASSERT_EQ(1u, drm.vmBindInputs.size()); + + EXPECT_EQ(drm.vmBindInputs[0].bind.pat_index, expectedPatIndex); +} \ No newline at end of file diff --git a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h index 217d05317b..b33899f538 100644 --- a/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h +++ b/shared/test/unit_test/os_interface/linux/xe/ioctl_helper_xe_tests.h @@ -151,7 +151,8 @@ class DrmMockXe : public DrmMockCustom { this->createParamsSize = createParams->size; this->createParamsFlags = createParams->flags; this->createParamsHandle = createParams->handle = testValueGemCreate; - if (0 == this->createParamsSize || 0 == this->createParamsFlags) { + this->createParamsCpuCaching = createParams->cpu_caching; + if (0 == this->createParamsSize || 0 == this->createParamsFlags || 0 == this->createParamsCpuCaching) { return EINVAL; } ret = 0; @@ -269,5 +270,6 @@ class DrmMockXe : public DrmMockCustom { StackVec syncInputs; int waitUserFenceReturn = 0; uint32_t createParamsFlags = 0u; + uint16_t createParamsCpuCaching = 0u; bool ioctlCalled = false; }; diff --git a/third_party/uapi/drm/xe_drm.h b/third_party/uapi/drm/xe_drm.h index 255b360a1c..85790129d1 100644 --- a/third_party/uapi/drm/xe_drm.h +++ b/third_party/uapi/drm/xe_drm.h @@ -3,8 +3,8 @@ * Copyright © 2023 Intel Corporation */ -#ifndef _XE_DRM_H_ -#define _XE_DRM_H_ +#ifndef _UAPI_XE_DRM_H_ +#define _UAPI_XE_DRM_H_ #include "drm.h" @@ -19,12 +19,25 @@ extern "C" { /** * DOC: uevent generated by xe on it's pci node. * - * DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt - * fails. The value supplied with the event is always "NEEDS_RESET". - * Additional information supplied is tile id and gt id of the gt unit for - * which reset has failed. + * DRM_XE_RESET_REQUIRED_UEVENT - Event is generated when device needs reset. + * The REASON is provided along with the event for which reset is required. + * On the basis of REASONS, additional information might be supplied. */ -#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS" +#define DRM_XE_RESET_REQUIRED_UEVENT "DEVICE_STATUS=NEEDS_RESET" + +/** + * DRM_XE_RESET_REQUIRED_UEVENT_REASON_GT - Reason provided to XE_RESET_REQUIRED_UEVENT + * incase of gt reset failure. The additional information supplied is tile id and + * gt id of the gt unit for which reset has failed. + */ +#define DRM_XE_RESET_REQUIRED_UEVENT_REASON_GT "REASON=GT_RESET_FAILED" + +/** + * DRM_XE_RESET_REQUIRED_UEVENT_REASON_GSC - Reason provided to XE_RESET_REQUIRED_UEVENT + * incase of GSC HW reporting Uncorrectable errors. The GSC errors are reported only + * on TILE0, therefore no additional information is supplied for this reason. + */ +#define DRM_XE_RESET_REQUIRED_UEVENT_REASON_GSC "REASON=GSC_HW_ERROR" /** * struct xe_user_extension - Base class for defining a chain of extensions @@ -39,7 +52,7 @@ extern "C" { * redefine the interface more easily than an ever growing struct of * increasing complexity, and for large parts of that interface to be * entirely optional. The downside is more pointer chasing; chasing across - * the boundary with pointers encapsulated inside u64. + * the __user boundary with pointers encapsulated inside u64. * * Example chaining: * @@ -541,8 +554,25 @@ struct drm_xe_gem_create { */ __u32 handle; + /** + * @cpu_caching: The CPU caching mode to select for this object. If + * mmaping the object the mode selected here will also be used. + * + * Supported values: + * + * DRM_XE_GEM_CPU_CACHING_WB: Allocate the pages with write-back + * caching. On iGPU this can't be used for scanout surfaces. Currently + * not allowed for objects placed in VRAM. + * + * DRM_XE_GEM_CPU_CACHING_WC: Allocate the pages as write-combined. This + * is uncached. Scanout surfaces should likely use this. All objects + * that can be placed in VRAM must use this. + */ +#define DRM_XE_GEM_CPU_CACHING_WB 1 +#define DRM_XE_GEM_CPU_CACHING_WC 2 + __u16 cpu_caching; /** @pad: MBZ */ - __u32 pad; + __u16 pad; /** @reserved: Reserved */ __u64 reserved[2]; @@ -619,8 +649,54 @@ struct drm_xe_vm_bind_op { */ __u32 obj; + /** + * @pat_index: The platform defined @pat_index to use for this mapping. + * The index basically maps to some predefined memory attributes, + * including things like caching, coherency, compression etc. The exact + * meaning of the pat_index is platform specific and defined in the + * Bspec and PRMs. When the KMD sets up the binding the index here is + * encoded into the ppGTT PTE. + * + * For coherency the @pat_index needs to be at least 1way coherent when + * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD + * will extract the coherency mode from the @pat_index and reject if + * there is a mismatch (see note below for pre-MTL platforms). + * + * Note: On pre-MTL platforms there is only a caching mode and no + * explicit coherency mode, but on such hardware there is always a + * shared-LLC (or is dgpu) so all GT memory accesses are coherent with + * CPU caches even with the caching mode set as uncached. It's only the + * display engine that is incoherent (on dgpu it must be in VRAM which + * is always mapped as WC on the CPU). However to keep the uapi somewhat + * consistent with newer platforms the KMD groups the different cache + * levels into the following coherency buckets on all pre-MTL platforms: + * + * ppGTT UC -> COH_NONE + * ppGTT WC -> COH_NONE + * ppGTT WT -> COH_NONE + * ppGTT WB -> COH_AT_LEAST_1WAY + * + * In practice UC/WC/WT should only ever used for scanout surfaces on + * such platforms (or perhaps in general for dma-buf if shared with + * another device) since it is only the display engine that is actually + * incoherent. Everything else should typically use WB given that we + * have a shared-LLC. On MTL+ this completely changes and the HW + * defines the coherency mode as part of the @pat_index, where + * incoherent GT access is possible. + * + * Note: For userptr and externally imported dma-buf the kernel expects + * either 1WAY or 2WAY for the @pat_index. + * + * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions + * on the @pat_index. For such mappings there is no actual memory being + * mapped (the address in the PTE is invalid), so the various PAT memory + * attributes likely do not apply. Simply leaving as zero is one + * option (still a valid pat_index). + */ + __u16 pat_index; + /** @pad: MBZ */ - __u32 pad; + __u16 pad; union { /** @@ -1017,8 +1093,89 @@ struct drm_xe_wait_user_fence { #define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 2) #define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 3) +/** + * DOC: XE GENL netlink event IDs + * TODO: Add more details + */ +#define XE_HW_ERROR(gt, id) \ + ((id) | ((__u64)(gt) << __DRM_XE_PMU_GT_SHIFT)) + +#define XE_GENL_GT_ERROR_CORRECTABLE_L3_SNG (0) +#define XE_GENL_GT_ERROR_CORRECTABLE_GUC (1) +#define XE_GENL_GT_ERROR_CORRECTABLE_SAMPLER (2) +#define XE_GENL_GT_ERROR_CORRECTABLE_SLM (3) +#define XE_GENL_GT_ERROR_CORRECTABLE_EU_IC (4) +#define XE_GENL_GT_ERROR_CORRECTABLE_EU_GRF (5) +#define XE_GENL_GT_ERROR_FATAL_ARR_BIST (6) +#define XE_GENL_GT_ERROR_FATAL_L3_DOUB (7) +#define XE_GENL_GT_ERROR_FATAL_L3_ECC_CHK (8) +#define XE_GENL_GT_ERROR_FATAL_GUC (9) +#define XE_GENL_GT_ERROR_FATAL_IDI_PAR (10) +#define XE_GENL_GT_ERROR_FATAL_SQIDI (11) +#define XE_GENL_GT_ERROR_FATAL_SAMPLER (12) +#define XE_GENL_GT_ERROR_FATAL_SLM (13) +#define XE_GENL_GT_ERROR_FATAL_EU_IC (14) +#define XE_GENL_GT_ERROR_FATAL_EU_GRF (15) +#define XE_GENL_GT_ERROR_FATAL_FPU (16) +#define XE_GENL_GT_ERROR_FATAL_TLB (17) +#define XE_GENL_GT_ERROR_FATAL_L3_FABRIC (18) +#define XE_GENL_GT_ERROR_CORRECTABLE_SUBSLICE (19) +#define XE_GENL_GT_ERROR_CORRECTABLE_L3BANK (20) +#define XE_GENL_GT_ERROR_FATAL_SUBSLICE (21) +#define XE_GENL_GT_ERROR_FATAL_L3BANK (22) +#define XE_GENL_SGUNIT_ERROR_CORRECTABLE (23) +#define XE_GENL_SGUNIT_ERROR_NONFATAL (24) +#define XE_GENL_SGUNIT_ERROR_FATAL (25) +#define XE_GENL_SOC_ERROR_NONFATAL_CSC_PSF_CMD (26) +#define XE_GENL_SOC_ERROR_NONFATAL_CSC_PSF_CMP (27) +#define XE_GENL_SOC_ERROR_NONFATAL_CSC_PSF_REQ (28) +#define XE_GENL_SOC_ERROR_NONFATAL_ANR_MDFI (29) +#define XE_GENL_SOC_ERROR_NONFATAL_MDFI_T2T (30) +#define XE_GENL_SOC_ERROR_NONFATAL_MDFI_T2C (31) +#define XE_GENL_SOC_ERROR_FATAL_CSC_PSF_CMD (32) +#define XE_GENL_SOC_ERROR_FATAL_CSC_PSF_CMP (33) +#define XE_GENL_SOC_ERROR_FATAL_CSC_PSF_REQ (34) +#define XE_GENL_SOC_ERROR_FATAL_PUNIT (35) +#define XE_GENL_SOC_ERROR_FATAL_PCIE_PSF_CMD (36) +#define XE_GENL_SOC_ERROR_FATAL_PCIE_PSF_CMP (37) +#define XE_GENL_SOC_ERROR_FATAL_PCIE_PSF_REQ (38) +#define XE_GENL_SOC_ERROR_FATAL_ANR_MDFI (39) +#define XE_GENL_SOC_ERROR_FATAL_MDFI_T2T (40) +#define XE_GENL_SOC_ERROR_FATAL_MDFI_T2C (41) +#define XE_GENL_SOC_ERROR_FATAL_PCIE_AER (42) +#define XE_GENL_SOC_ERROR_FATAL_PCIE_ERR (43) +#define XE_GENL_SOC_ERROR_FATAL_UR_COND (44) +#define XE_GENL_SOC_ERROR_FATAL_SERR_SRCS (45) + +#define XE_GENL_SOC_ERROR_NONFATAL_HBM(ss, n)\ + (XE_GENL_SOC_ERROR_FATAL_SERR_SRCS + 0x1 + (ss) * 0x10 + (n)) +#define XE_GENL_SOC_ERROR_FATAL_HBM(ss, n)\ + (XE_GENL_SOC_ERROR_NONFATAL_HBM(1, 15) + 0x1 + (ss) * 0x10 + (n)) + +/* 109 is the last ID used by SOC errors */ +#define XE_GENL_GSC_ERROR_CORRECTABLE_SRAM_ECC (110) +#define XE_GENL_GSC_ERROR_NONFATAL_MIA_SHUTDOWN (111) +#define XE_GENL_GSC_ERROR_NONFATAL_MIA_INTERNAL (112) +#define XE_GENL_GSC_ERROR_NONFATAL_SRAM_ECC (113) +#define XE_GENL_GSC_ERROR_NONFATAL_WDG_TIMEOUT (114) +#define XE_GENL_GSC_ERROR_NONFATAL_ROM_PARITY (115) +#define XE_GENL_GSC_ERROR_NONFATAL_UCODE_PARITY (116) +#define XE_GENL_GSC_ERROR_NONFATAL_VLT_GLITCH (117) +#define XE_GENL_GSC_ERROR_NONFATAL_FUSE_PULL (118) +#define XE_GENL_GSC_ERROR_NONFATAL_FUSE_CRC_CHECK (119) +#define XE_GENL_GSC_ERROR_NONFATAL_SELF_MBIST (120) +#define XE_GENL_GSC_ERROR_NONFATAL_AON_RF_PARITY (121) +#define XE_GENL_SGGI_ERROR_NONFATAL (122) +#define XE_GENL_SGLI_ERROR_NONFATAL (123) +#define XE_GENL_SGCI_ERROR_NONFATAL (124) +#define XE_GENL_MERT_ERROR_NONFATAL (125) +#define XE_GENL_SGGI_ERROR_FATAL (126) +#define XE_GENL_SGLI_ERROR_FATAL (127) +#define XE_GENL_SGCI_ERROR_FATAL (128) +#define XE_GENL_MERT_ERROR_FATAL (129) + #if defined(__cplusplus) } #endif -#endif /* _XE_DRM_H_ */ +#endif /* _UAPI_XE_DRM_H_ */ \ No newline at end of file