mirror of
https://github.com/intel/compute-runtime.git
synced 2025-09-15 13:01:45 +08:00
Update RENDER_SURFACE_STATE for Xe Hpg
Program Multi Gpu params in surface state only on Xe Hp Sdv Respect zero-size image scenario when programming surface state Move XeHp-only tests to dedicated subdir Related-To: NEO-6466 Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:

committed by
Compute-Runtime-Automation

parent
10f329768f
commit
8ebef3769c
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# Copyright (C) 2018-2022 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
@ -27,12 +27,6 @@ set(RUNTIME_SRCS_MEM_OBJ
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/definitions${BRANCH_DIR_SUFFIX}image_ext.inl
|
||||
)
|
||||
|
||||
if(SUPPORT_XEHP_AND_LATER)
|
||||
list(APPEND RUNTIME_SRCS_MEM_OBJ
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/image_xehp_and_later.inl
|
||||
)
|
||||
endif()
|
||||
|
||||
target_sources(${NEO_STATIC_LIB_NAME} PRIVATE ${RUNTIME_SRCS_MEM_OBJ})
|
||||
set_property(GLOBAL PROPERTY RUNTIME_SRCS_MEM_OBJ ${RUNTIME_SRCS_MEM_OBJ})
|
||||
add_subdirectories()
|
||||
|
@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
*/
|
||||
|
||||
#include "opencl/source/context/context.h"
|
||||
#include "opencl/source/mem_obj/image.h"
|
||||
|
||||
namespace NEO {
|
||||
template <>
|
||||
void ImageHw<Family>::appendSurfaceStateParams(Family::RENDER_SURFACE_STATE *surfaceState, uint32_t rootDeviceIndex, bool useGlobalAtomics) {
|
||||
auto imageCtxType = this->context->peekContextType();
|
||||
|
||||
bool enableMultiGpuPartialWrites = (imageCtxType != ContextType::CONTEXT_TYPE_SPECIALIZED) && (context->containsMultipleSubDevices(rootDeviceIndex));
|
||||
|
||||
bool enableMultiGpuAtomics = enableMultiGpuPartialWrites;
|
||||
|
||||
if (DebugManager.flags.EnableMultiGpuAtomicsOptimization.get()) {
|
||||
enableMultiGpuAtomics &= useGlobalAtomics;
|
||||
}
|
||||
|
||||
surfaceState->setDisableSupportForMultiGpuAtomics(!enableMultiGpuAtomics);
|
||||
surfaceState->setDisableSupportForMultiGpuPartialWrites(!enableMultiGpuPartialWrites);
|
||||
|
||||
if (DebugManager.flags.ForceMultiGpuAtomics.get() != -1) {
|
||||
surfaceState->setDisableSupportForMultiGpuAtomics(!!DebugManager.flags.ForceMultiGpuAtomics.get());
|
||||
}
|
||||
if (DebugManager.flags.ForceMultiGpuPartialWrites.get() != -1) {
|
||||
surfaceState->setDisableSupportForMultiGpuPartialWrites(!!DebugManager.flags.ForceMultiGpuPartialWrites.get());
|
||||
}
|
||||
}
|
||||
} // namespace NEO
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
* Copyright (C) 2021-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@ -7,15 +7,38 @@
|
||||
|
||||
#include "shared/source/xe_hp_core/hw_cmds_base.h"
|
||||
|
||||
#include "opencl/source/context/context.h"
|
||||
#include "opencl/source/mem_obj/image.inl"
|
||||
|
||||
namespace NEO {
|
||||
|
||||
using Family = XeHpFamily;
|
||||
static auto gfxCore = IGFX_XE_HP_CORE;
|
||||
|
||||
template <>
|
||||
void ImageHw<Family>::appendSurfaceStateParams(Family::RENDER_SURFACE_STATE *surfaceState, uint32_t rootDeviceIndex, bool useGlobalAtomics) {
|
||||
auto imageCtxType = this->context->peekContextType();
|
||||
|
||||
bool enableMultiGpuPartialWrites = (imageCtxType != ContextType::CONTEXT_TYPE_SPECIALIZED) && (context->containsMultipleSubDevices(rootDeviceIndex));
|
||||
|
||||
bool enableMultiGpuAtomics = enableMultiGpuPartialWrites;
|
||||
|
||||
if (DebugManager.flags.EnableMultiGpuAtomicsOptimization.get()) {
|
||||
enableMultiGpuAtomics &= useGlobalAtomics;
|
||||
}
|
||||
|
||||
surfaceState->setDisableSupportForMultiGpuAtomics(!enableMultiGpuAtomics);
|
||||
surfaceState->setDisableSupportForMultiGpuPartialWrites(!enableMultiGpuPartialWrites);
|
||||
|
||||
if (DebugManager.flags.ForceMultiGpuAtomics.get() != -1) {
|
||||
surfaceState->setDisableSupportForMultiGpuAtomics(!!DebugManager.flags.ForceMultiGpuAtomics.get());
|
||||
}
|
||||
if (DebugManager.flags.ForceMultiGpuPartialWrites.get() != -1) {
|
||||
surfaceState->setDisableSupportForMultiGpuPartialWrites(!!DebugManager.flags.ForceMultiGpuPartialWrites.get());
|
||||
}
|
||||
}
|
||||
} // namespace NEO
|
||||
#include "opencl/source/mem_obj/image_tgllp_and_later.inl"
|
||||
#include "opencl/source/mem_obj/image_xehp_and_later.inl"
|
||||
|
||||
// factory initializer
|
||||
#include "opencl/source/mem_obj/image_factory_init.inl"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
* Copyright (C) 2021-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@ -15,7 +15,6 @@ using Family = XE_HPC_COREFamily;
|
||||
static auto gfxCore = IGFX_XE_HPC_CORE;
|
||||
} // namespace NEO
|
||||
#include "opencl/source/mem_obj/image_tgllp_and_later.inl"
|
||||
#include "opencl/source/mem_obj/image_xehp_and_later.inl"
|
||||
|
||||
// factory initializer
|
||||
#include "opencl/source/mem_obj/image_factory_init.inl"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
* Copyright (C) 2021-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@ -15,7 +15,6 @@ using Family = XE_HPG_COREFamily;
|
||||
static auto gfxCore = IGFX_XE_HPG_CORE;
|
||||
} // namespace NEO
|
||||
#include "opencl/source/mem_obj/image_tgllp_and_later.inl"
|
||||
#include "opencl/source/mem_obj/image_xehp_and_later.inl"
|
||||
|
||||
// factory initializer
|
||||
#include "opencl/source/mem_obj/image_factory_init.inl"
|
||||
|
@ -1,29 +1,19 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
* Copyright (C) 2021-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
*/
|
||||
|
||||
#include "shared/source/command_container/implicit_scaling.h"
|
||||
#include "shared/source/command_stream/command_stream_receiver.h"
|
||||
#include "shared/source/device/sub_device.h"
|
||||
#include "shared/source/gmm_helper/client_context/gmm_client_context.h"
|
||||
#include "shared/source/gmm_helper/gmm.h"
|
||||
#include "shared/source/gmm_helper/gmm_helper.h"
|
||||
#include "shared/source/helpers/basic_math.h"
|
||||
#include "shared/test/common/helpers/debug_manager_state_restore.h"
|
||||
#include "shared/test/common/helpers/variable_backup.h"
|
||||
#include "shared/test/common/mocks/mock_device.h"
|
||||
#include "shared/test/common/mocks/mock_gmm.h"
|
||||
#include "shared/test/common/test_macros/test.h"
|
||||
|
||||
#include "opencl/source/cl_device/cl_device.h"
|
||||
#include "opencl/source/helpers/cl_memory_properties_helpers.h"
|
||||
#include "opencl/source/mem_obj/buffer.h"
|
||||
#include "opencl/test/unit_test/mocks/mock_buffer.h"
|
||||
#include "opencl/test/unit_test/mocks/mock_context.h"
|
||||
#include "opencl/test/unit_test/mocks/mock_platform.h"
|
||||
|
||||
#include <functional>
|
||||
|
||||
@ -31,40 +21,6 @@ using namespace NEO;
|
||||
|
||||
using XeHPAndLaterBufferTests = ::testing::Test;
|
||||
|
||||
using isXePlatform = IsWithinGfxCore<IGFX_XE_HP_CORE, IGFX_XE_HPC_CORE>;
|
||||
|
||||
HWTEST2_F(XeHPAndLaterBufferTests, givenContextTypeDefaultWhenBufferIsWritableAndOnlyOneTileIsAvailableThenRemainFlagsToTrue, isXePlatform) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(1);
|
||||
initPlatform();
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWCMDTEST_F(IGFX_XE_HP_CORE, XeHPAndLaterBufferTests, givenDebugFlagSetWhenProgramingSurfaceStateThenForceCompressionFormat) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
|
||||
@ -97,280 +53,6 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, XeHPAndLaterBufferTests, givenDebugFlagSetWhenProgr
|
||||
}
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterBufferTests, givenContextTypeDefaultWhenBufferIsWritableThenFlipPartialFlagsToFalse, isXePlatform) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), true, true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterBufferTests, givenContextTypeUnrestrictiveWhenBufferIsWritableThenFlipPartialFlagsToFalse, isXePlatform) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_UNRESTRICTIVE;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), true, true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterBufferTests, givenContextTypeDefaultWhenBufferIsNotWritableThenRemainPartialFlagsToTrue, isXePlatform) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
|
||||
auto buffer = std::unique_ptr<Buffer>(Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_ONLY,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), true, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterBufferTests, givenContextTypeSpecializedWhenBufferIsWritableThenRemainPartialFlagsToTrue, isXePlatform) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_SPECIALIZED;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
|
||||
auto buffer = std::unique_ptr<Buffer>(Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterBufferTests, givenDebugFlagForMultiTileSupportWhenSurfaceStateIsSetThenValuesMatch, isXePlatform) {
|
||||
DebugManagerStateRestore restore;
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_SPECIALIZED;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
|
||||
auto buffer = std::unique_ptr<Buffer>(Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(0);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(0);
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(1);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(1);
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterBufferTests, givenNullContextWhenBufferAllocationIsNullThenRemainPartialFlagsToTrue, isXePlatform) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
|
||||
auto device = std::unique_ptr<MockDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(defaultHwInfo.get()));
|
||||
|
||||
auto size = MemoryConstants::pageSize;
|
||||
auto ptr = alignedMalloc(size, MemoryConstants::pageSize);
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
Buffer::setSurfaceState(device.get(), &surfaceState, false, false, size, ptr, 0, nullptr, 0, 0, false, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
alignedFree(ptr);
|
||||
}
|
||||
|
||||
struct MultiGpuGlobalAtomicsBufferTest : public XeHPAndLaterBufferTests,
|
||||
public ::testing::WithParamInterface<std::tuple<unsigned int, unsigned int, bool, bool, bool>> {
|
||||
};
|
||||
|
||||
HWTEST2_P(MultiGpuGlobalAtomicsBufferTest, givenSetArgStatefulCalledThenDisableSupportForMultiGpuAtomicsIsSetCorrectly, isXePlatform) {
|
||||
unsigned int numAvailableDevices, bufferFlags;
|
||||
bool useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization;
|
||||
std::tie(numAvailableDevices, bufferFlags, useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization) = GetParam();
|
||||
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(numAvailableDevices);
|
||||
DebugManager.flags.EnableMultiGpuAtomicsOptimization.set(enableMultiGpuAtomicsOptimization);
|
||||
initPlatform();
|
||||
|
||||
if (numAvailableDevices == 1) {
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
} else {
|
||||
EXPECT_EQ(numAvailableDevices, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
}
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
bufferFlags,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), useGlobalAtomics, areMultipleSubDevicesInContext);
|
||||
|
||||
DeviceBitfield deviceBitfield{static_cast<uint32_t>(maxNBitValue(numAvailableDevices))};
|
||||
bool implicitScaling = ImplicitScalingHelper::isImplicitScalingEnabled(deviceBitfield, true);
|
||||
bool enabled = implicitScaling;
|
||||
|
||||
if (enableMultiGpuAtomicsOptimization) {
|
||||
enabled = useGlobalAtomics && (enabled || areMultipleSubDevicesInContext);
|
||||
}
|
||||
|
||||
EXPECT_EQ(!enabled, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
}
|
||||
|
||||
HWTEST2_P(MultiGpuGlobalAtomicsBufferTest, givenSetSurfaceStateCalledThenDisableSupportForMultiGpuAtomicsIsSetCorrectly, isXePlatform) {
|
||||
unsigned int numAvailableDevices, bufferFlags;
|
||||
bool useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization;
|
||||
std::tie(numAvailableDevices, bufferFlags, useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization) = GetParam();
|
||||
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(numAvailableDevices);
|
||||
DebugManager.flags.EnableMultiGpuAtomicsOptimization.set(enableMultiGpuAtomicsOptimization);
|
||||
initPlatform();
|
||||
if (numAvailableDevices == 1) {
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
} else {
|
||||
EXPECT_EQ(numAvailableDevices, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
}
|
||||
|
||||
auto size = MemoryConstants::pageSize;
|
||||
auto ptr = alignedMalloc(size, MemoryConstants::pageSize);
|
||||
MockGraphicsAllocation gfxAllocation(ptr, size);
|
||||
gfxAllocation.setMemObjectsAllocationWithWritableFlags(bufferFlags == CL_MEM_READ_WRITE);
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
Buffer::setSurfaceState(&platform()->getClDevice(0)->getDevice(), &surfaceState, false, false, 0, nullptr, 0, &gfxAllocation, bufferFlags, 0, useGlobalAtomics, areMultipleSubDevicesInContext);
|
||||
|
||||
DeviceBitfield deviceBitfield{static_cast<uint32_t>(maxNBitValue(numAvailableDevices))};
|
||||
bool implicitScaling = ImplicitScalingHelper::isImplicitScalingEnabled(deviceBitfield, true);
|
||||
bool enabled = implicitScaling;
|
||||
|
||||
if (enableMultiGpuAtomicsOptimization) {
|
||||
enabled = useGlobalAtomics && (enabled || areMultipleSubDevicesInContext);
|
||||
}
|
||||
|
||||
EXPECT_EQ(!enabled, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
alignedFree(ptr);
|
||||
}
|
||||
|
||||
static unsigned int numAvailableDevices[] = {1, 2};
|
||||
static unsigned int bufferFlags[] = {CL_MEM_READ_ONLY, CL_MEM_READ_WRITE};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MultiGpuGlobalAtomicsBufferTest,
|
||||
MultiGpuGlobalAtomicsBufferTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(numAvailableDevices),
|
||||
::testing::ValuesIn(bufferFlags),
|
||||
::testing::Bool(),
|
||||
::testing::Bool(),
|
||||
::testing::Bool()));
|
||||
|
||||
HWCMDTEST_F(IGFX_XE_HP_CORE, XeHPAndLaterBufferTests, givenBufferAllocationInDeviceMemoryWhenStatelessCompressionIsEnabledThenSetSurfaceStateWithCompressionSettings) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.EnableLocalMemory.set(1);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
* Copyright (C) 2021-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@ -29,301 +29,6 @@
|
||||
using namespace NEO;
|
||||
|
||||
using XeHPAndLaterImageTests = ::testing::Test;
|
||||
using isXePlatform = IsWithinGfxCore<IGFX_XE_HP_CORE, IGFX_XE_HPC_CORE>;
|
||||
|
||||
HWTEST2_F(XeHPAndLaterImageTests, givenContextTypeDefaultWhenImageIsWritableAndOnlyOneTileIsAvailableThenRemainFlagsToTrue, isXePlatform) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(1);
|
||||
initPlatform();
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterImageTests, givenContextTypeDefaultWhenImageIsWritableThenFlipPartialFlagsToFalse, isXePlatform) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterImageTests, givenDebugFlagForMultiTileSupportWhenSurfaceStateIsProgrammedThenItHasDesiredValues, isXePlatform) {
|
||||
DebugManagerStateRestore restorer;
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_ONLY, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(0);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(0);
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(1);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(1);
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterImageTests, givenContextTypeUnrestrictiveWhenImageIsWritableThenFlipPartialFlagsToFalse, isXePlatform) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_UNRESTRICTIVE;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterImageTests, givenContextTypeDefaultWhenImageIsNotWritableThenRemainPartialFlagsToTrue, isXePlatform) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_ONLY, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
HWTEST2_F(XeHPAndLaterImageTests, givenContextTypeSpecializedWhenImageIsWritableThenRemainPartialFlagsToTrue, isXePlatform) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_SPECIALIZED;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
struct MultiGpuGlobalAtomicsImageTest : public XeHPAndLaterImageTests,
|
||||
public ::testing::WithParamInterface<std::tuple<unsigned int, unsigned int, ContextType, bool, bool>> {
|
||||
};
|
||||
|
||||
HWTEST2_P(MultiGpuGlobalAtomicsImageTest, givenAppendSurfaceStateParamCalledThenDisableSupportForMultiGpuAtomicsIsSetCorrectly, isXePlatform) {
|
||||
unsigned int numAvailableDevices, memFlags;
|
||||
ContextType contextType;
|
||||
bool useGlobalAtomics, enableMultiGpuAtomicsOptimization;
|
||||
std::tie(numAvailableDevices, memFlags, contextType, useGlobalAtomics, enableMultiGpuAtomicsOptimization) = GetParam();
|
||||
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.EnableMultiGpuAtomicsOptimization.set(enableMultiGpuAtomicsOptimization);
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(numAvailableDevices);
|
||||
initPlatform();
|
||||
if (numAvailableDevices == 1) {
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
} else {
|
||||
EXPECT_EQ(numAvailableDevices, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
}
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = contextType;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
memFlags, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(memFlags, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
memFlags, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), useGlobalAtomics);
|
||||
|
||||
bool enableGlobalAtomics = (contextType != ContextType::CONTEXT_TYPE_SPECIALIZED) && (numAvailableDevices > 1);
|
||||
if (enableMultiGpuAtomicsOptimization) {
|
||||
enableGlobalAtomics &= useGlobalAtomics;
|
||||
}
|
||||
EXPECT_EQ(!enableGlobalAtomics, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
}
|
||||
|
||||
static unsigned int numAvailableDevices[] = {1, 2};
|
||||
static unsigned int memFlags[] = {CL_MEM_READ_ONLY, CL_MEM_READ_WRITE};
|
||||
static ContextType contextTypes[] = {ContextType::CONTEXT_TYPE_DEFAULT, ContextType::CONTEXT_TYPE_SPECIALIZED, ContextType::CONTEXT_TYPE_UNRESTRICTIVE};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MultiGpuGlobalAtomicsImageTest,
|
||||
MultiGpuGlobalAtomicsImageTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(numAvailableDevices),
|
||||
::testing::ValuesIn(memFlags),
|
||||
::testing::ValuesIn(contextTypes),
|
||||
::testing::Bool(),
|
||||
::testing::Bool()));
|
||||
|
||||
HWCMDTEST_F(IGFX_XE_HP_CORE, XeHPAndLaterImageTests, WhenAppendingSurfaceStateParamsThenDoNothing) {
|
||||
typedef typename FamilyType::RENDER_SURFACE_STATE RENDER_SURFACE_STATE;
|
||||
@ -684,27 +389,3 @@ HWTEST2_F(XeHPAndLaterImageHelperTests, givenAuxModeMcsLceWhenAppendingSurfaceSt
|
||||
EXPECT_EQ(expectedGetSurfaceStateCompressionFormatCalled, gmmClientContext->getSurfaceStateCompressionFormatCalled);
|
||||
EXPECT_EQ(expectedGetMediaSurfaceStateCompressionFormatCalled, gmmClientContext->getMediaSurfaceStateCompressionFormatCalled);
|
||||
}
|
||||
|
||||
HWTEST2_F(ImageCompressionTests, givenXeHpCoreAndRedescribableFormatWhenCreatingAllocationThenDoNotPreferCompression, IsXeHpCore) {
|
||||
MockContext context{};
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_width = 5;
|
||||
imageDesc.image_height = 5;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
flags, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
mockContext.get(), ClMemoryPropertiesHelper::createMemoryProperties(flags, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
flags, 0, surfaceFormat, &imageDesc, nullptr, retVal));
|
||||
ASSERT_NE(nullptr, image);
|
||||
EXPECT_EQ(UnitTestHelper<FamilyType>::tiledImagesSupported, myMemoryManager->capturedPreferCompressed);
|
||||
|
||||
imageFormat.image_channel_order = CL_RG;
|
||||
surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
flags, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
image = std::unique_ptr<Image>(Image::create(
|
||||
mockContext.get(), ClMemoryPropertiesHelper::createMemoryProperties(flags, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
flags, 0, surfaceFormat, &imageDesc, nullptr, retVal));
|
||||
ASSERT_NE(nullptr, image);
|
||||
EXPECT_TRUE(myMemoryManager->capturedPreferCompressed);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (C) 2021 Intel Corporation
|
||||
# Copyright (C) 2021-2022 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
@ -14,9 +14,11 @@ if(TESTS_XE_HP_SDV)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/memory_manager_tests_xehp.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/sampler_tests_xehp.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/source_level_debugger_csr_tests_xehp.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_buffer_xe_hp_sdv.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_command_stream_receiver_xehp.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_device_caps_xehp.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_hw_info_config_xehp.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_image_xe_hp_sdv.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_local_work_size_xehp.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_sub_devices_xehp.inl
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_preamble_xehp.cpp
|
||||
|
323
opencl/test/unit_test/xe_hp_core/xehp/test_buffer_xe_hp_sdv.inl
Normal file
323
opencl/test/unit_test/xe_hp_core/xehp/test_buffer_xe_hp_sdv.inl
Normal file
@ -0,0 +1,323 @@
|
||||
/*
|
||||
* Copyright (C) 2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
*/
|
||||
|
||||
#include "shared/source/command_container/implicit_scaling.h"
|
||||
#include "shared/test/common/helpers/debug_manager_state_restore.h"
|
||||
#include "shared/test/common/mocks/mock_device.h"
|
||||
#include "shared/test/common/test_macros/test.h"
|
||||
|
||||
#include "opencl/source/cl_device/cl_device.h"
|
||||
#include "opencl/source/mem_obj/buffer.h"
|
||||
#include "opencl/test/unit_test/mocks/mock_context.h"
|
||||
#include "opencl/test/unit_test/mocks/mock_platform.h"
|
||||
using XeHpSdvBufferTests = ::testing::Test;
|
||||
|
||||
XEHPTEST_F(XeHpSdvBufferTests, givenContextTypeDefaultWhenBufferIsWritableAndOnlyOneTileIsAvailableThenRemainFlagsToTrue) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(1);
|
||||
initPlatform();
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvBufferTests, givenContextTypeDefaultWhenBufferIsWritableThenFlipPartialFlagsToFalse) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), true, true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvBufferTests, givenContextTypeUnrestrictiveWhenBufferIsWritableThenFlipPartialFlagsToFalse) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_UNRESTRICTIVE;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), true, true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvBufferTests, givenContextTypeDefaultWhenBufferIsNotWritableThenRemainPartialFlagsToTrue) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
|
||||
auto buffer = std::unique_ptr<Buffer>(Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_ONLY,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), true, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvBufferTests, givenContextTypeSpecializedWhenBufferIsWritableThenRemainPartialFlagsToTrue) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_SPECIALIZED;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
|
||||
auto buffer = std::unique_ptr<Buffer>(Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvBufferTests, givenDebugFlagForMultiTileSupportWhenSurfaceStateIsSetThenValuesMatch) {
|
||||
DebugManagerStateRestore restore;
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_SPECIALIZED;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
|
||||
auto buffer = std::unique_ptr<Buffer>(Buffer::create(
|
||||
&context,
|
||||
CL_MEM_READ_WRITE,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(0);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(0);
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(1);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(1);
|
||||
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), false, false);
|
||||
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvBufferTests, givenNullContextWhenBufferAllocationIsNullThenRemainPartialFlagsToTrue) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
|
||||
auto device = std::unique_ptr<MockDevice>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(defaultHwInfo.get()));
|
||||
|
||||
auto size = MemoryConstants::pageSize;
|
||||
auto ptr = alignedMalloc(size, MemoryConstants::pageSize);
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
Buffer::setSurfaceState(device.get(), &surfaceState, false, false, size, ptr, 0, nullptr, 0, 0, false, false);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
alignedFree(ptr);
|
||||
}
|
||||
|
||||
struct MultiGpuGlobalAtomicsBufferTest : public XeHpSdvBufferTests,
|
||||
public ::testing::WithParamInterface<std::tuple<unsigned int, unsigned int, bool, bool, bool>> {
|
||||
};
|
||||
|
||||
XEHPTEST_P(MultiGpuGlobalAtomicsBufferTest, givenSetArgStatefulCalledThenDisableSupportForMultiGpuAtomicsIsSetCorrectly) {
|
||||
unsigned int numAvailableDevices, bufferFlags;
|
||||
bool useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization;
|
||||
std::tie(numAvailableDevices, bufferFlags, useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization) = GetParam();
|
||||
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(numAvailableDevices);
|
||||
DebugManager.flags.EnableMultiGpuAtomicsOptimization.set(enableMultiGpuAtomicsOptimization);
|
||||
initPlatform();
|
||||
|
||||
if (numAvailableDevices == 1) {
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
} else {
|
||||
EXPECT_EQ(numAvailableDevices, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
}
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
size_t size = 0x1000;
|
||||
auto retVal = CL_SUCCESS;
|
||||
auto buffer = std::unique_ptr<Buffer>(
|
||||
Buffer::create(
|
||||
&context,
|
||||
bufferFlags,
|
||||
size,
|
||||
nullptr,
|
||||
retVal));
|
||||
EXPECT_EQ(CL_SUCCESS, retVal);
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
buffer->setArgStateful(&surfaceState, false, false, false, false, context.getDevice(0)->getDevice(), useGlobalAtomics, areMultipleSubDevicesInContext);
|
||||
|
||||
DeviceBitfield deviceBitfield{static_cast<uint32_t>(maxNBitValue(numAvailableDevices))};
|
||||
bool implicitScaling = ImplicitScalingHelper::isImplicitScalingEnabled(deviceBitfield, true);
|
||||
bool enabled = implicitScaling;
|
||||
|
||||
if (enableMultiGpuAtomicsOptimization) {
|
||||
enabled = useGlobalAtomics && (enabled || areMultipleSubDevicesInContext);
|
||||
}
|
||||
|
||||
EXPECT_EQ(!enabled, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
}
|
||||
|
||||
XEHPTEST_P(MultiGpuGlobalAtomicsBufferTest, givenSetSurfaceStateCalledThenDisableSupportForMultiGpuAtomicsIsSetCorrectly) {
|
||||
unsigned int numAvailableDevices, bufferFlags;
|
||||
bool useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization;
|
||||
std::tie(numAvailableDevices, bufferFlags, useGlobalAtomics, areMultipleSubDevicesInContext, enableMultiGpuAtomicsOptimization) = GetParam();
|
||||
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(numAvailableDevices);
|
||||
DebugManager.flags.EnableMultiGpuAtomicsOptimization.set(enableMultiGpuAtomicsOptimization);
|
||||
initPlatform();
|
||||
if (numAvailableDevices == 1) {
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
} else {
|
||||
EXPECT_EQ(numAvailableDevices, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
}
|
||||
|
||||
auto size = MemoryConstants::pageSize;
|
||||
auto ptr = alignedMalloc(size, MemoryConstants::pageSize);
|
||||
MockGraphicsAllocation gfxAllocation(ptr, size);
|
||||
gfxAllocation.setMemObjectsAllocationWithWritableFlags(bufferFlags == CL_MEM_READ_WRITE);
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
Buffer::setSurfaceState(&platform()->getClDevice(0)->getDevice(), &surfaceState, false, false, 0, nullptr, 0, &gfxAllocation, bufferFlags, 0, useGlobalAtomics, areMultipleSubDevicesInContext);
|
||||
|
||||
DeviceBitfield deviceBitfield{static_cast<uint32_t>(maxNBitValue(numAvailableDevices))};
|
||||
bool implicitScaling = ImplicitScalingHelper::isImplicitScalingEnabled(deviceBitfield, true);
|
||||
bool enabled = implicitScaling;
|
||||
|
||||
if (enableMultiGpuAtomicsOptimization) {
|
||||
enabled = useGlobalAtomics && (enabled || areMultipleSubDevicesInContext);
|
||||
}
|
||||
|
||||
EXPECT_EQ(!enabled, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
alignedFree(ptr);
|
||||
}
|
||||
|
||||
static unsigned int numAvailableDevices[] = {1, 2};
|
||||
static unsigned int bufferFlags[] = {CL_MEM_READ_ONLY, CL_MEM_READ_WRITE};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MultiGpuGlobalAtomicsBufferTest,
|
||||
MultiGpuGlobalAtomicsBufferTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(numAvailableDevices),
|
||||
::testing::ValuesIn(bufferFlags),
|
||||
::testing::Bool(),
|
||||
::testing::Bool(),
|
||||
::testing::Bool()));
|
339
opencl/test/unit_test/xe_hp_core/xehp/test_image_xe_hp_sdv.inl
Normal file
339
opencl/test/unit_test/xe_hp_core/xehp/test_image_xe_hp_sdv.inl
Normal file
@ -0,0 +1,339 @@
|
||||
/*
|
||||
* Copyright (C) 2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
*/
|
||||
|
||||
#include "shared/test/common/helpers/debug_manager_state_restore.h"
|
||||
#include "shared/test/common/helpers/unit_test_helper.h"
|
||||
#include "shared/test/common/mocks/mock_device.h"
|
||||
#include "shared/test/common/test_macros/test.h"
|
||||
|
||||
#include "opencl/source/cl_device/cl_device.h"
|
||||
#include "opencl/source/helpers/cl_memory_properties_helpers.h"
|
||||
#include "opencl/source/mem_obj/image.h"
|
||||
#include "opencl/test/unit_test/mem_obj/image_compression_fixture.h"
|
||||
#include "opencl/test/unit_test/mocks/mock_context.h"
|
||||
#include "opencl/test/unit_test/mocks/mock_platform.h"
|
||||
|
||||
using XeHpSdvImageTests = ::testing::Test;
|
||||
using isXePlatform = IsWithinGfxCore<IGFX_XE_HP_CORE, IGFX_XE_HPC_CORE>;
|
||||
|
||||
XEHPTEST_F(XeHpSdvImageTests, givenContextTypeDefaultWhenImageIsWritableAndOnlyOneTileIsAvailableThenRemainFlagsToTrue) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(1);
|
||||
initPlatform();
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvImageTests, givenContextTypeDefaultWhenImageIsWritableThenFlipPartialFlagsToFalse) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvImageTests, givenDebugFlagForMultiTileSupportWhenSurfaceStateIsProgrammedThenItHasDesiredValues) {
|
||||
DebugManagerStateRestore restorer;
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_ONLY, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(0);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(0);
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(0u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
DebugManager.flags.ForceMultiGpuAtomics.set(1);
|
||||
DebugManager.flags.ForceMultiGpuPartialWrites.set(1);
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_EQ(1u, surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvImageTests, givenContextTypeUnrestrictiveWhenImageIsWritableThenFlipPartialFlagsToFalse) {
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(4);
|
||||
initPlatform();
|
||||
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = ContextType::CONTEXT_TYPE_UNRESTRICTIVE;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_FALSE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvImageTests, givenContextTypeDefaultWhenImageIsNotWritableThenRemainPartialFlagsToTrue) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_DEFAULT;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_ONLY, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
XEHPTEST_F(XeHpSdvImageTests, givenContextTypeSpecializedWhenImageIsWritableThenRemainPartialFlagsToTrue) {
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context;
|
||||
context.contextType = ContextType::CONTEXT_TYPE_SPECIALIZED;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
CL_MEM_READ_WRITE, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(CL_MEM_READ_WRITE, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
CL_MEM_READ_WRITE, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), true);
|
||||
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuPartialWrites());
|
||||
}
|
||||
|
||||
struct MultiGpuGlobalAtomicsImageTest : public XeHpSdvImageTests,
|
||||
public ::testing::WithParamInterface<std::tuple<unsigned int, unsigned int, ContextType, bool, bool>> {
|
||||
};
|
||||
|
||||
XEHPTEST_P(MultiGpuGlobalAtomicsImageTest, givenAppendSurfaceStateParamCalledThenDisableSupportForMultiGpuAtomicsIsSetCorrectly) {
|
||||
unsigned int numAvailableDevices, memFlags;
|
||||
ContextType contextType;
|
||||
bool useGlobalAtomics, enableMultiGpuAtomicsOptimization;
|
||||
std::tie(numAvailableDevices, memFlags, contextType, useGlobalAtomics, enableMultiGpuAtomicsOptimization) = GetParam();
|
||||
|
||||
DebugManagerStateRestore restorer;
|
||||
DebugManager.flags.EnableMultiGpuAtomicsOptimization.set(enableMultiGpuAtomicsOptimization);
|
||||
DebugManager.flags.CreateMultipleSubDevices.set(numAvailableDevices);
|
||||
initPlatform();
|
||||
if (numAvailableDevices == 1) {
|
||||
EXPECT_EQ(0u, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
} else {
|
||||
EXPECT_EQ(numAvailableDevices, platform()->getClDevice(0)->getNumGenericSubDevices());
|
||||
}
|
||||
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
||||
MockContext context(platform()->getClDevice(0));
|
||||
context.contextType = contextType;
|
||||
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
cl_image_format imageFormat = {};
|
||||
cl_image_desc imageDesc = {};
|
||||
|
||||
imageFormat.image_channel_data_type = CL_UNORM_INT8;
|
||||
imageFormat.image_channel_order = CL_RGBA;
|
||||
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_height = 128;
|
||||
imageDesc.image_width = 256;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
memFlags, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
&context, ClMemoryPropertiesHelper::createMemoryProperties(memFlags, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
memFlags, 0, surfaceFormat, &imageDesc, NULL, retVal));
|
||||
auto imageHw = static_cast<ImageHw<FamilyType> *>(image.get());
|
||||
|
||||
RENDER_SURFACE_STATE surfaceState = FamilyType::cmdInitRenderSurfaceState;
|
||||
EXPECT_TRUE(surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
|
||||
surfaceState.setDisableSupportForMultiGpuAtomics(false);
|
||||
surfaceState.setDisableSupportForMultiGpuPartialWrites(false);
|
||||
imageHw->appendSurfaceStateParams(&surfaceState, context.getDevice(0)->getRootDeviceIndex(), useGlobalAtomics);
|
||||
|
||||
bool enableGlobalAtomics = (contextType != ContextType::CONTEXT_TYPE_SPECIALIZED) && (numAvailableDevices > 1);
|
||||
if (enableMultiGpuAtomicsOptimization) {
|
||||
enableGlobalAtomics &= useGlobalAtomics;
|
||||
}
|
||||
EXPECT_EQ(!enableGlobalAtomics, surfaceState.getDisableSupportForMultiGpuAtomics());
|
||||
}
|
||||
|
||||
static unsigned int numAvailableDevicesForMultiGpuGlobalAtomicsImageTest[] = {1, 2};
|
||||
static unsigned int memFlags[] = {CL_MEM_READ_ONLY, CL_MEM_READ_WRITE};
|
||||
static ContextType contextTypes[] = {ContextType::CONTEXT_TYPE_DEFAULT, ContextType::CONTEXT_TYPE_SPECIALIZED, ContextType::CONTEXT_TYPE_UNRESTRICTIVE};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MultiGpuGlobalAtomicsImageTest,
|
||||
MultiGpuGlobalAtomicsImageTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(numAvailableDevicesForMultiGpuGlobalAtomicsImageTest),
|
||||
::testing::ValuesIn(memFlags),
|
||||
::testing::ValuesIn(contextTypes),
|
||||
::testing::Bool(),
|
||||
::testing::Bool()));
|
||||
|
||||
XEHPTEST_F(ImageCompressionTests, givenXeHpCoreAndRedescribableFormatWhenCreatingAllocationThenDoNotPreferCompression) {
|
||||
MockContext context{};
|
||||
imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D;
|
||||
imageDesc.image_width = 5;
|
||||
imageDesc.image_height = 5;
|
||||
|
||||
auto surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
flags, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
auto image = std::unique_ptr<Image>(Image::create(
|
||||
mockContext.get(), ClMemoryPropertiesHelper::createMemoryProperties(flags, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
flags, 0, surfaceFormat, &imageDesc, nullptr, retVal));
|
||||
ASSERT_NE(nullptr, image);
|
||||
EXPECT_EQ(UnitTestHelper<FamilyType>::tiledImagesSupported, myMemoryManager->capturedPreferCompressed);
|
||||
|
||||
imageFormat.image_channel_order = CL_RG;
|
||||
surfaceFormat = Image::getSurfaceFormatFromTable(
|
||||
flags, &imageFormat, context.getDevice(0)->getHardwareInfo().capabilityTable.supportsOcl21Features);
|
||||
image = std::unique_ptr<Image>(Image::create(
|
||||
mockContext.get(), ClMemoryPropertiesHelper::createMemoryProperties(flags, 0, 0, &context.getDevice(0)->getDevice()),
|
||||
flags, 0, surfaceFormat, &imageDesc, nullptr, retVal));
|
||||
ASSERT_NE(nullptr, image);
|
||||
EXPECT_TRUE(myMemoryManager->capturedPreferCompressed);
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
* Copyright (C) 2021-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@ -11,9 +11,11 @@
|
||||
#include "hw_info_tests_xehp.inl"
|
||||
#include "memory_manager_tests_xehp.inl"
|
||||
#include "sampler_tests_xehp.inl"
|
||||
#include "test_buffer_xe_hp_sdv.inl"
|
||||
#include "test_command_stream_receiver_xehp.inl"
|
||||
#include "test_device_caps_xehp.inl"
|
||||
#include "test_hw_info_config_xehp.inl"
|
||||
#include "test_image_xe_hp_sdv.inl"
|
||||
#include "test_local_work_size_xehp.inl"
|
||||
#include "test_platform_caps_xehp.inl"
|
||||
#include "test_sub_devices_xehp.inl"
|
||||
|
Reference in New Issue
Block a user