fix: change denorm mode in IDD to FlushToZero

denorm support is controlled by IGC, we should just set zero by default

Related-To: NEO-8059
Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
Mateusz Jablonski
2023-06-13 12:56:04 +00:00
committed by Compute-Runtime-Automation
parent bd15d067d5
commit 2d01bdec81
6 changed files with 3 additions and 7 deletions

View File

@@ -132,7 +132,7 @@ HWCMDTEST_P(IGFX_GEN8_CORE, KernelSLMAndBarrierTest, GivenStaticSlmSizeWhenProgr
ASSERT_GT(expectedSlmSize, 0u);
EXPECT_EQ(expectedSlmSize, pSrcIDData->getSharedLocalMemorySize());
EXPECT_EQ(kernelInfo.kernelDescriptor.kernelAttributes.usesBarriers(), pSrcIDData->getBarrierEnable());
EXPECT_EQ(INTERFACE_DESCRIPTOR_DATA::DENORM_MODE_SETBYKERNEL, pSrcIDData->getDenormMode());
EXPECT_EQ(INTERFACE_DESCRIPTOR_DATA::DENORM_MODE_FTZ, pSrcIDData->getDenormMode());
if (EncodeSurfaceState<FamilyType>::doBindingTablePrefetch()) {
EXPECT_EQ(4u, pSrcIDData->getBindingTableEntryCount());