1131 lines
41 KiB
C++
1131 lines
41 KiB
C++
/*
|
|
* Copyright (c) 2017 - 2018, Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included
|
|
* in all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#include "runtime/mem_obj/buffer.h"
|
|
#include "runtime/memory_manager/svm_memory_manager.h"
|
|
#include "runtime/gmm_helper/gmm.h"
|
|
#include "runtime/gmm_helper/gmm_helper.h"
|
|
#include "runtime/gmm_helper/resource_info.h"
|
|
#include "unit_tests/fixtures/device_fixture.h"
|
|
#include "unit_tests/fixtures/memory_management_fixture.h"
|
|
#include "unit_tests/gen_common/matchers.h"
|
|
#include "unit_tests/helpers/debug_manager_state_restore.h"
|
|
#include "unit_tests/helpers/memory_management.h"
|
|
#include "unit_tests/mocks/mock_context.h"
|
|
#include "unit_tests/mocks/mock_command_queue.h"
|
|
#include "unit_tests/mocks/mock_gmm_resource_info.h"
|
|
#include "unit_tests/fixtures/platform_fixture.h"
|
|
#include "unit_tests/libult/ult_command_stream_receiver.h"
|
|
#include "runtime/helpers/options.h"
|
|
#include "gtest/gtest.h"
|
|
#include "gmock/gmock.h"
|
|
#include "test.h"
|
|
|
|
using namespace OCLRT;
|
|
|
|
static const unsigned int g_scTestBufferSizeInBytes = 16;
|
|
|
|
TEST(Buffer, giveBufferWhenAskedForPtrOffsetForMappingThenReturnCorrectValue) {
|
|
MockContext ctx;
|
|
cl_int retVal;
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&ctx, 0, 1, nullptr, retVal));
|
|
|
|
MemObjOffsetArray offset = {{4, 5, 6}};
|
|
|
|
auto retOffset = buffer->calculateOffsetForMapping(offset);
|
|
EXPECT_EQ(offset[0], retOffset);
|
|
}
|
|
|
|
TEST(Buffer, givenBufferWhenAskedForPtrLengthThenReturnCorrectValue) {
|
|
MockContext ctx;
|
|
cl_int retVal;
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&ctx, 0, 1, nullptr, retVal));
|
|
|
|
MemObjSizeArray size = {{4, 5, 6}};
|
|
|
|
auto retOffset = buffer->calculateMappedPtrLength(size);
|
|
EXPECT_EQ(size[0], retOffset);
|
|
}
|
|
|
|
TEST(Buffer, givenReadOnlySetOfInputFlagsWhenPassedToisReadOnlyMemoryPermittedByFlagsThenTrueIsReturned) {
|
|
class MockBuffer : public Buffer {
|
|
public:
|
|
using Buffer::isReadOnlyMemoryPermittedByFlags;
|
|
};
|
|
cl_mem_flags flags = CL_MEM_HOST_NO_ACCESS | CL_MEM_READ_ONLY;
|
|
EXPECT_TRUE(MockBuffer::isReadOnlyMemoryPermittedByFlags(flags));
|
|
|
|
flags = CL_MEM_HOST_READ_ONLY | CL_MEM_READ_ONLY;
|
|
EXPECT_TRUE(MockBuffer::isReadOnlyMemoryPermittedByFlags(flags));
|
|
}
|
|
|
|
class BufferReadOnlyTest : public testing::TestWithParam<uint64_t> {
|
|
};
|
|
|
|
TEST_P(BufferReadOnlyTest, givenNonReadOnlySetOfInputFlagsWhenPassedToisReadOnlyMemoryPermittedByFlagsThenFalseIsReturned) {
|
|
class MockBuffer : public Buffer {
|
|
public:
|
|
using Buffer::isReadOnlyMemoryPermittedByFlags;
|
|
};
|
|
|
|
cl_mem_flags flags = GetParam() | CL_MEM_USE_HOST_PTR;
|
|
EXPECT_FALSE(MockBuffer::isReadOnlyMemoryPermittedByFlags(flags));
|
|
}
|
|
static cl_mem_flags nonReadOnlyFlags[] = {
|
|
CL_MEM_READ_WRITE | CL_MEM_HOST_READ_ONLY,
|
|
CL_MEM_WRITE_ONLY,
|
|
CL_MEM_READ_ONLY | CL_MEM_HOST_WRITE_ONLY,
|
|
CL_MEM_HOST_READ_ONLY,
|
|
CL_MEM_HOST_WRITE_ONLY,
|
|
CL_MEM_HOST_NO_ACCESS,
|
|
CL_MEM_HOST_READ_ONLY | CL_MEM_WRITE_ONLY,
|
|
CL_MEM_HOST_WRITE_ONLY | CL_MEM_WRITE_ONLY,
|
|
0};
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
nonReadOnlyFlags,
|
|
BufferReadOnlyTest,
|
|
testing::ValuesIn(nonReadOnlyFlags));
|
|
|
|
class GMockMemoryManagerFailFirstAllocation : public MockMemoryManager {
|
|
public:
|
|
MOCK_METHOD3(createGraphicsAllocationWithRequiredBitness, GraphicsAllocation *(size_t size, void *ptr, bool forcePin));
|
|
GraphicsAllocation *baseCreateGraphicsAllocationWithRequiredBitness(size_t size, void *ptr, bool forcePin) {
|
|
return MockMemoryManager::createGraphicsAllocationWithRequiredBitness(size, ptr, forcePin);
|
|
}
|
|
};
|
|
|
|
TEST(Buffer, givenReadOnlyHostPtrMemoryWhenBufferIsCreatedWithReadOnlyFlagsThenBufferHasAllocatedNewMemoryStorageAndBufferIsNotZeroCopy) {
|
|
void *memory = alignedMalloc(MemoryConstants::pageSize, MemoryConstants::pageSize);
|
|
ASSERT_NE(nullptr, memory);
|
|
|
|
memset(memory, 0xAA, MemoryConstants::pageSize);
|
|
|
|
std::unique_ptr<MockDevice> device(DeviceHelper<>::create(nullptr));
|
|
::testing::NiceMock<GMockMemoryManagerFailFirstAllocation> *memoryManager = new ::testing::NiceMock<GMockMemoryManagerFailFirstAllocation>;
|
|
|
|
device->injectMemoryManager(memoryManager);
|
|
MockContext ctx(device.get());
|
|
|
|
// First fail in createGraphicsAllocation simulates error for read only memory allocation
|
|
EXPECT_CALL(*memoryManager, createGraphicsAllocationWithRequiredBitness(MemoryConstants::pageSize, (void *)memory, true))
|
|
.WillOnce(::testing::Return(nullptr));
|
|
|
|
EXPECT_CALL(*memoryManager, createGraphicsAllocationWithRequiredBitness(::testing::_, nullptr, ::testing::_))
|
|
.WillRepeatedly(::testing::Invoke(memoryManager, &GMockMemoryManagerFailFirstAllocation::baseCreateGraphicsAllocationWithRequiredBitness));
|
|
|
|
cl_int retVal;
|
|
cl_mem_flags flags = CL_MEM_HOST_READ_ONLY | CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR;
|
|
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&ctx, flags, MemoryConstants::pageSize, (void *)memory, retVal));
|
|
|
|
EXPECT_FALSE(buffer->isMemObjZeroCopy());
|
|
void *memoryStorage = buffer->getCpuAddressForMemoryTransfer();
|
|
EXPECT_NE((void *)memory, memoryStorage);
|
|
EXPECT_THAT(buffer->getCpuAddressForMemoryTransfer(), MemCompare(memory, MemoryConstants::pageSize));
|
|
|
|
alignedFree(memory);
|
|
}
|
|
|
|
TEST(Buffer, givenReadOnlyHostPtrMemoryWhenBufferIsCreatedWithReadOnlyFlagsAndSecondAllocationFailsThenNullptrIsReturned) {
|
|
void *memory = alignedMalloc(MemoryConstants::pageSize, MemoryConstants::pageSize);
|
|
ASSERT_NE(nullptr, memory);
|
|
|
|
memset(memory, 0xAA, MemoryConstants::pageSize);
|
|
|
|
std::unique_ptr<MockDevice> device(DeviceHelper<>::create(nullptr));
|
|
::testing::NiceMock<GMockMemoryManagerFailFirstAllocation> *memoryManager = new ::testing::NiceMock<GMockMemoryManagerFailFirstAllocation>;
|
|
|
|
device->injectMemoryManager(memoryManager);
|
|
MockContext ctx(device.get());
|
|
|
|
// First fail in createGraphicsAllocation simulates error for read only memory allocation
|
|
EXPECT_CALL(*memoryManager, createGraphicsAllocationWithRequiredBitness(MemoryConstants::pageSize, (void *)memory, true))
|
|
.WillOnce(::testing::Return(nullptr));
|
|
|
|
EXPECT_CALL(*memoryManager, createGraphicsAllocationWithRequiredBitness(::testing::_, nullptr, ::testing::_))
|
|
.WillOnce(::testing::Return(nullptr));
|
|
|
|
cl_int retVal;
|
|
cl_mem_flags flags = CL_MEM_HOST_READ_ONLY | CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR;
|
|
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&ctx, flags, MemoryConstants::pageSize, (void *)memory, retVal));
|
|
|
|
EXPECT_EQ(nullptr, buffer.get());
|
|
alignedFree(memory);
|
|
}
|
|
|
|
TEST(Buffer, givenReadOnlyHostPtrMemoryWhenBufferIsCreatedWithKernelWriteFlagThenBufferAllocationFailsAndReturnsNullptr) {
|
|
void *memory = alignedMalloc(MemoryConstants::pageSize, MemoryConstants::pageSize);
|
|
ASSERT_NE(nullptr, memory);
|
|
|
|
memset(memory, 0xAA, MemoryConstants::pageSize);
|
|
|
|
std::unique_ptr<MockDevice> device(DeviceHelper<>::create(nullptr));
|
|
::testing::NiceMock<GMockMemoryManagerFailFirstAllocation> *memoryManager = new ::testing::NiceMock<GMockMemoryManagerFailFirstAllocation>;
|
|
|
|
device->injectMemoryManager(memoryManager);
|
|
MockContext ctx(device.get());
|
|
|
|
// First fail in createGraphicsAllocation simulates error for read only memory allocation
|
|
EXPECT_CALL(*memoryManager, createGraphicsAllocationWithRequiredBitness(MemoryConstants::pageSize, (void *)memory, true))
|
|
.WillOnce(::testing::Return(nullptr));
|
|
|
|
cl_int retVal;
|
|
cl_mem_flags flags = CL_MEM_HOST_READ_ONLY | CL_MEM_WRITE_ONLY | CL_MEM_USE_HOST_PTR;
|
|
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&ctx, flags, MemoryConstants::pageSize, (void *)memory, retVal));
|
|
|
|
EXPECT_EQ(nullptr, buffer.get());
|
|
alignedFree(memory);
|
|
}
|
|
|
|
TEST(Buffer, givenNullPtrWhenBufferIsCreatedWithKernelReadOnlyFlagsThenBufferAllocationFailsAndReturnsNullptr) {
|
|
std::unique_ptr<MockDevice> device(DeviceHelper<>::create(nullptr));
|
|
::testing::NiceMock<GMockMemoryManagerFailFirstAllocation> *memoryManager = new ::testing::NiceMock<GMockMemoryManagerFailFirstAllocation>;
|
|
|
|
device->injectMemoryManager(memoryManager);
|
|
MockContext ctx(device.get());
|
|
|
|
// First fail in createGraphicsAllocation simulates error for read only memory allocation
|
|
EXPECT_CALL(*memoryManager, createGraphicsAllocationWithRequiredBitness(::testing::_, nullptr, ::testing::_))
|
|
.WillOnce(::testing::Return(nullptr));
|
|
|
|
cl_int retVal;
|
|
cl_mem_flags flags = CL_MEM_HOST_READ_ONLY | CL_MEM_WRITE_ONLY;
|
|
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&ctx, flags, MemoryConstants::pageSize, nullptr, retVal));
|
|
|
|
EXPECT_EQ(nullptr, buffer.get());
|
|
}
|
|
|
|
class BufferTest : public DeviceFixture,
|
|
public testing::TestWithParam<uint64_t /*cl_mem_flags*/> {
|
|
public:
|
|
BufferTest() {
|
|
}
|
|
|
|
protected:
|
|
void SetUp() override {
|
|
flags = GetParam();
|
|
DeviceFixture::SetUp();
|
|
context.reset(new MockContext(pDevice));
|
|
}
|
|
|
|
void TearDown() override {
|
|
context.reset();
|
|
DeviceFixture::TearDown();
|
|
}
|
|
|
|
cl_int retVal = CL_SUCCESS;
|
|
std::unique_ptr<MockContext> context;
|
|
MemoryManager *contextMemoryManager;
|
|
cl_mem_flags flags = 0;
|
|
unsigned char pHostPtr[g_scTestBufferSizeInBytes];
|
|
};
|
|
|
|
typedef BufferTest NoHostPtr;
|
|
|
|
TEST_P(NoHostPtr, ValidFlags) {
|
|
auto buffer = Buffer::create(
|
|
context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
nullptr,
|
|
retVal);
|
|
|
|
ASSERT_EQ(CL_SUCCESS, retVal);
|
|
ASSERT_NE(nullptr, buffer);
|
|
|
|
auto address = buffer->getCpuAddress();
|
|
EXPECT_NE(nullptr, address);
|
|
|
|
delete buffer;
|
|
}
|
|
|
|
TEST_P(NoHostPtr, GivenNoHostPtrWhenHwBufferCreationFailsThenReturnNullptr) {
|
|
BufferFuncs BufferFuncsBackup[IGFX_MAX_CORE];
|
|
|
|
for (uint32_t i = 0; i < IGFX_MAX_CORE; i++) {
|
|
BufferFuncsBackup[i] = bufferFactory[i];
|
|
bufferFactory[i].createBufferFunction =
|
|
[](Context *,
|
|
cl_mem_flags,
|
|
size_t,
|
|
void *,
|
|
void *,
|
|
GraphicsAllocation *,
|
|
bool,
|
|
bool,
|
|
bool)
|
|
-> OCLRT::Buffer * { return nullptr; };
|
|
}
|
|
|
|
auto buffer = Buffer::create(
|
|
context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
nullptr,
|
|
retVal);
|
|
|
|
EXPECT_EQ(nullptr, buffer);
|
|
|
|
for (uint32_t i = 0; i < IGFX_MAX_CORE; i++) {
|
|
bufferFactory[i] = BufferFuncsBackup[i];
|
|
}
|
|
}
|
|
|
|
TEST_P(NoHostPtr, completionStamp) {
|
|
auto buffer = Buffer::create(
|
|
context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
nullptr,
|
|
retVal);
|
|
|
|
FlushStamp expectedFlushstamp = 0;
|
|
|
|
ASSERT_EQ(CL_SUCCESS, retVal);
|
|
ASSERT_NE(nullptr, buffer);
|
|
EXPECT_EQ(0u, buffer->getCompletionStamp().taskCount);
|
|
EXPECT_EQ(expectedFlushstamp, buffer->getCompletionStamp().flushStamp);
|
|
EXPECT_EQ(0u, buffer->getCompletionStamp().deviceOrdinal);
|
|
EXPECT_EQ(0u, buffer->getCompletionStamp().engineType);
|
|
|
|
CompletionStamp completionStamp;
|
|
completionStamp.taskCount = 42;
|
|
completionStamp.deviceOrdinal = 43;
|
|
completionStamp.engineType = EngineType::ENGINE_RCS;
|
|
completionStamp.flushStamp = 5;
|
|
buffer->setCompletionStamp(completionStamp, nullptr, nullptr);
|
|
EXPECT_EQ(completionStamp.taskCount, buffer->getCompletionStamp().taskCount);
|
|
EXPECT_EQ(completionStamp.flushStamp, buffer->getCompletionStamp().flushStamp);
|
|
EXPECT_EQ(completionStamp.deviceOrdinal, buffer->getCompletionStamp().deviceOrdinal);
|
|
EXPECT_EQ(completionStamp.engineType, buffer->getCompletionStamp().engineType);
|
|
|
|
delete buffer;
|
|
}
|
|
|
|
TEST_P(NoHostPtr, WithUseHostPtr_returnsError) {
|
|
auto buffer = Buffer::create(
|
|
context.get(),
|
|
flags | CL_MEM_USE_HOST_PTR,
|
|
g_scTestBufferSizeInBytes,
|
|
nullptr,
|
|
retVal);
|
|
EXPECT_EQ(CL_INVALID_HOST_PTR, retVal);
|
|
EXPECT_EQ(nullptr, buffer);
|
|
|
|
delete buffer;
|
|
}
|
|
|
|
TEST_P(NoHostPtr, WithCopyHostPtr_returnsError) {
|
|
auto buffer = Buffer::create(
|
|
context.get(),
|
|
flags | CL_MEM_COPY_HOST_PTR,
|
|
g_scTestBufferSizeInBytes,
|
|
nullptr,
|
|
retVal);
|
|
EXPECT_EQ(CL_INVALID_HOST_PTR, retVal);
|
|
EXPECT_EQ(nullptr, buffer);
|
|
|
|
delete buffer;
|
|
}
|
|
|
|
TEST_P(NoHostPtr, withBufferGraphicsAllocationReportsBufferType) {
|
|
auto buffer = Buffer::create(
|
|
context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
nullptr,
|
|
retVal);
|
|
ASSERT_EQ(CL_SUCCESS, retVal);
|
|
ASSERT_NE(nullptr, buffer);
|
|
|
|
auto &allocation = *buffer->getGraphicsAllocation();
|
|
auto type = allocation.getAllocationType();
|
|
auto isTypeBuffer = !!(type & GraphicsAllocation::AllocationType::BUFFER);
|
|
EXPECT_TRUE(isTypeBuffer);
|
|
|
|
auto isTypeWritable = !!(type & GraphicsAllocation::AllocationType::WRITABLE);
|
|
auto isBufferWritable = !(flags & (CL_MEM_READ_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS));
|
|
EXPECT_EQ(isBufferWritable, isTypeWritable);
|
|
|
|
delete buffer;
|
|
}
|
|
|
|
// Parameterized test that tests buffer creation with all flags
|
|
// that should be valid with a nullptr host ptr
|
|
cl_mem_flags NoHostPtrFlags[] = {
|
|
CL_MEM_READ_WRITE,
|
|
CL_MEM_WRITE_ONLY,
|
|
CL_MEM_READ_ONLY,
|
|
CL_MEM_HOST_READ_ONLY,
|
|
CL_MEM_HOST_WRITE_ONLY,
|
|
CL_MEM_HOST_NO_ACCESS};
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
BufferTest_Create,
|
|
NoHostPtr,
|
|
testing::ValuesIn(NoHostPtrFlags));
|
|
|
|
struct ValidHostPtr
|
|
: public BufferTest,
|
|
public MemoryManagementFixture,
|
|
public PlatformFixture {
|
|
typedef BufferTest BaseClass;
|
|
|
|
using BufferTest::SetUp;
|
|
using MemoryManagementFixture::SetUp;
|
|
using PlatformFixture::SetUp;
|
|
|
|
ValidHostPtr() {
|
|
}
|
|
|
|
void SetUp() override {
|
|
MemoryManagementFixture::SetUp();
|
|
PlatformFixture::SetUp();
|
|
BaseClass::SetUp();
|
|
|
|
auto pDevice = pPlatform->getDevice(0);
|
|
ASSERT_NE(nullptr, pDevice);
|
|
}
|
|
|
|
void TearDown() override {
|
|
delete buffer;
|
|
BaseClass::TearDown();
|
|
PlatformFixture::TearDown();
|
|
MemoryManagementFixture::TearDown();
|
|
}
|
|
|
|
Buffer *createBuffer() {
|
|
return Buffer::create(
|
|
context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
pHostPtr,
|
|
retVal);
|
|
}
|
|
|
|
cl_int retVal = CL_INVALID_VALUE;
|
|
Buffer *buffer = nullptr;
|
|
};
|
|
|
|
TEST_P(ValidHostPtr, isResident_defaultsToFalseAfterCreate) {
|
|
buffer = createBuffer();
|
|
ASSERT_NE(nullptr, buffer);
|
|
|
|
EXPECT_FALSE(buffer->getGraphicsAllocation()->isResident());
|
|
}
|
|
|
|
TEST_P(ValidHostPtr, isResident_returnsValueFromSetResident) {
|
|
buffer = createBuffer();
|
|
ASSERT_NE(nullptr, buffer);
|
|
|
|
buffer->getGraphicsAllocation()->residencyTaskCount = 1;
|
|
EXPECT_TRUE(buffer->getGraphicsAllocation()->isResident());
|
|
|
|
buffer->getGraphicsAllocation()->residencyTaskCount = ObjectNotResident;
|
|
EXPECT_FALSE(buffer->getGraphicsAllocation()->isResident());
|
|
}
|
|
|
|
TEST_P(ValidHostPtr, getAddress) {
|
|
buffer = createBuffer();
|
|
ASSERT_NE(nullptr, buffer);
|
|
|
|
auto address = buffer->getCpuAddress();
|
|
EXPECT_NE(nullptr, address);
|
|
if (flags & CL_MEM_USE_HOST_PTR && buffer->isMemObjZeroCopy()) {
|
|
// Buffer should use host ptr
|
|
EXPECT_EQ(pHostPtr, address);
|
|
} else {
|
|
// Buffer should have a different ptr
|
|
EXPECT_NE(pHostPtr, address);
|
|
}
|
|
|
|
if (flags & CL_MEM_COPY_HOST_PTR) {
|
|
// Buffer should contain a copy of host memory
|
|
EXPECT_EQ(0, memcmp(pHostPtr, address, sizeof(g_scTestBufferSizeInBytes)));
|
|
}
|
|
}
|
|
|
|
TEST_P(ValidHostPtr, getSize) {
|
|
buffer = createBuffer();
|
|
ASSERT_NE(nullptr, buffer);
|
|
|
|
EXPECT_EQ(g_scTestBufferSizeInBytes, buffer->getSize());
|
|
}
|
|
|
|
TEST_P(ValidHostPtr, givenValidHostPtrParentFlagsWhenSubBufferIsCreatedWithZeroFlagsThenItCreatesSuccesfuly) {
|
|
auto retVal = CL_SUCCESS;
|
|
auto clBuffer = clCreateBuffer(context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
pHostPtr,
|
|
&retVal);
|
|
|
|
ASSERT_NE(nullptr, clBuffer);
|
|
|
|
cl_buffer_region region = {0, g_scTestBufferSizeInBytes};
|
|
|
|
auto subBuffer = clCreateSubBuffer(clBuffer,
|
|
0,
|
|
CL_BUFFER_CREATE_TYPE_REGION,
|
|
®ion,
|
|
&retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
retVal = clReleaseMemObject(subBuffer);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
retVal = clReleaseMemObject(clBuffer);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
}
|
|
TEST_P(ValidHostPtr, givenValidHostPtrParentFlagsWhenSubBufferIsCreatedWithParentFlagsThenItIsCreatedSuccesfuly) {
|
|
auto retVal = CL_SUCCESS;
|
|
auto clBuffer = clCreateBuffer(context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
pHostPtr,
|
|
&retVal);
|
|
|
|
ASSERT_NE(nullptr, clBuffer);
|
|
cl_buffer_region region = {0, g_scTestBufferSizeInBytes};
|
|
|
|
const cl_mem_flags allValidFlags =
|
|
CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY |
|
|
CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS;
|
|
|
|
cl_mem_flags unionFlags = flags & allValidFlags;
|
|
auto subBuffer = clCreateSubBuffer(clBuffer,
|
|
unionFlags,
|
|
CL_BUFFER_CREATE_TYPE_REGION,
|
|
®ion,
|
|
&retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
EXPECT_NE(nullptr, subBuffer);
|
|
retVal = clReleaseMemObject(subBuffer);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
retVal = clReleaseMemObject(clBuffer);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
}
|
|
|
|
TEST_P(ValidHostPtr, givenValidHostPtrParentFlagsWhenSubBufferIsCreatedWithInvalidParentFlagsThenCreationFails) {
|
|
auto retVal = CL_SUCCESS;
|
|
cl_mem_flags invalidFlags = 0;
|
|
if (flags & CL_MEM_READ_ONLY) {
|
|
invalidFlags |= CL_MEM_WRITE_ONLY;
|
|
}
|
|
if (flags & CL_MEM_WRITE_ONLY) {
|
|
invalidFlags |= CL_MEM_READ_ONLY;
|
|
}
|
|
if (flags & CL_MEM_HOST_NO_ACCESS) {
|
|
invalidFlags |= CL_MEM_HOST_READ_ONLY;
|
|
}
|
|
if (flags & CL_MEM_HOST_READ_ONLY) {
|
|
invalidFlags |= CL_MEM_HOST_WRITE_ONLY;
|
|
}
|
|
if (flags & CL_MEM_HOST_WRITE_ONLY) {
|
|
invalidFlags |= CL_MEM_HOST_READ_ONLY;
|
|
}
|
|
if (invalidFlags == 0) {
|
|
return;
|
|
}
|
|
|
|
auto clBuffer = clCreateBuffer(context.get(),
|
|
flags,
|
|
g_scTestBufferSizeInBytes,
|
|
pHostPtr,
|
|
&retVal);
|
|
|
|
ASSERT_NE(nullptr, clBuffer);
|
|
cl_buffer_region region = {0, g_scTestBufferSizeInBytes};
|
|
|
|
auto subBuffer = clCreateSubBuffer(clBuffer,
|
|
invalidFlags,
|
|
CL_BUFFER_CREATE_TYPE_REGION,
|
|
®ion,
|
|
&retVal);
|
|
EXPECT_NE(CL_SUCCESS, retVal);
|
|
EXPECT_EQ(nullptr, subBuffer);
|
|
retVal = clReleaseMemObject(clBuffer);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
}
|
|
|
|
TEST_P(ValidHostPtr, failedAllocationInjection) {
|
|
InjectedFunction method = [this](size_t failureIndex) {
|
|
delete buffer;
|
|
buffer = nullptr;
|
|
|
|
// System under test
|
|
buffer = createBuffer();
|
|
|
|
if (nonfailingAllocation == failureIndex) {
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
EXPECT_NE(nullptr, buffer);
|
|
} else {
|
|
EXPECT_EQ(CL_OUT_OF_HOST_MEMORY, retVal) << "for allocation " << failureIndex;
|
|
EXPECT_EQ(nullptr, buffer);
|
|
}
|
|
};
|
|
injectFailures(method);
|
|
}
|
|
|
|
TEST_P(ValidHostPtr, SvmHostPtr) {
|
|
const DeviceInfo &devInfo = pPlatform->getDevice(0)->getDeviceInfo();
|
|
if (devInfo.svmCapabilities != 0) {
|
|
auto ptr = clSVMAlloc(context.get(), CL_MEM_READ_WRITE, 64, 64);
|
|
|
|
auto bufferSvm = Buffer::create(context.get(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, 64, ptr, retVal);
|
|
EXPECT_NE(nullptr, bufferSvm);
|
|
EXPECT_TRUE(bufferSvm->isMemObjWithHostPtrSVM());
|
|
EXPECT_EQ(context->getSVMAllocsManager()->getSVMAlloc(ptr), bufferSvm->getGraphicsAllocation());
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
clSVMFree(context.get(), ptr);
|
|
delete bufferSvm;
|
|
}
|
|
}
|
|
|
|
// Parameterized test that tests buffer creation with all flags that should be
|
|
// valid with a valid host ptr
|
|
cl_mem_flags ValidHostPtrFlags[] = {
|
|
0 | CL_MEM_USE_HOST_PTR,
|
|
CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR,
|
|
CL_MEM_WRITE_ONLY | CL_MEM_USE_HOST_PTR,
|
|
CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR,
|
|
CL_MEM_HOST_READ_ONLY | CL_MEM_USE_HOST_PTR,
|
|
CL_MEM_HOST_WRITE_ONLY | CL_MEM_USE_HOST_PTR,
|
|
CL_MEM_HOST_NO_ACCESS | CL_MEM_USE_HOST_PTR,
|
|
0 | CL_MEM_COPY_HOST_PTR,
|
|
CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
|
|
CL_MEM_WRITE_ONLY | CL_MEM_COPY_HOST_PTR,
|
|
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
|
|
CL_MEM_HOST_READ_ONLY | CL_MEM_COPY_HOST_PTR,
|
|
CL_MEM_HOST_WRITE_ONLY | CL_MEM_COPY_HOST_PTR,
|
|
CL_MEM_HOST_NO_ACCESS | CL_MEM_COPY_HOST_PTR};
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
BufferTest_Create,
|
|
ValidHostPtr,
|
|
testing::ValuesIn(ValidHostPtrFlags));
|
|
|
|
class BufferCalculateHostPtrSize : public testing::TestWithParam<std::tuple<size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t>> {
|
|
public:
|
|
BufferCalculateHostPtrSize(){};
|
|
|
|
protected:
|
|
void SetUp() override {
|
|
std::tie(origin[0], origin[1], origin[2], region[0], region[1], region[2], rowPitch, slicePitch, hostPtrSize) = GetParam();
|
|
}
|
|
|
|
void TearDown() override {
|
|
}
|
|
|
|
size_t origin[3];
|
|
size_t region[3];
|
|
size_t rowPitch;
|
|
size_t slicePitch;
|
|
size_t hostPtrSize;
|
|
};
|
|
/* origin, region, rowPitch, slicePitch, hostPtrSize*/
|
|
static std::tuple<size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t> Inputs[] = {std::make_tuple(0, 0, 0, 1, 1, 1, 10, 1, 1),
|
|
std::make_tuple(0, 0, 0, 7, 1, 1, 10, 1, 7),
|
|
std::make_tuple(0, 0, 0, 7, 3, 1, 10, 1, 27),
|
|
std::make_tuple(0, 0, 0, 7, 1, 3, 10, 10, 27),
|
|
std::make_tuple(0, 0, 0, 7, 2, 3, 10, 20, 57),
|
|
std::make_tuple(0, 0, 0, 7, 1, 3, 10, 30, 67),
|
|
std::make_tuple(0, 0, 0, 7, 2, 3, 10, 30, 77),
|
|
std::make_tuple(9, 0, 0, 1, 1, 1, 10, 1, 10),
|
|
std::make_tuple(0, 2, 0, 7, 3, 1, 10, 1, 27 + 20),
|
|
std::make_tuple(0, 0, 1, 7, 1, 3, 10, 10, 27 + 10),
|
|
std::make_tuple(0, 2, 1, 7, 2, 3, 10, 20, 57 + 40),
|
|
std::make_tuple(1, 1, 1, 7, 1, 3, 10, 30, 67 + 41),
|
|
std::make_tuple(2, 0, 2, 7, 2, 3, 10, 30, 77 + 62)};
|
|
|
|
TEST_P(BufferCalculateHostPtrSize, CheckReturnedSize) {
|
|
size_t calculatedSize = Buffer::calculateHostPtrSize(origin, region, rowPitch, slicePitch);
|
|
EXPECT_EQ(hostPtrSize, calculatedSize);
|
|
}
|
|
|
|
INSTANTIATE_TEST_CASE_P(
|
|
BufferCalculateHostPtrSizes,
|
|
BufferCalculateHostPtrSize,
|
|
testing::ValuesIn(Inputs));
|
|
|
|
TEST(Buffers64on32Tests, given32BitBufferCreatedWithUseHostPtrFlagThatIsZeroCopyWhenAskedForStorageThenHostPtrIsReturned) {
|
|
DebugManagerStateRestore dbgRestorer;
|
|
{
|
|
DebugManager.flags.Force32bitAddressing.set(true);
|
|
MockContext context;
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
void *ptr = (void *)0x1000;
|
|
auto ptrOffset = MemoryConstants::cacheLineSize;
|
|
uintptr_t offsetedPtr = (uintptr_t)ptr + ptrOffset;
|
|
auto retVal = CL_SUCCESS;
|
|
|
|
auto buffer = Buffer::create(
|
|
&context,
|
|
CL_MEM_USE_HOST_PTR,
|
|
size,
|
|
(void *)offsetedPtr,
|
|
retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_TRUE(buffer->isMemObjZeroCopy());
|
|
EXPECT_EQ((void *)offsetedPtr, buffer->getCpuAddressForMapping());
|
|
EXPECT_EQ((void *)offsetedPtr, buffer->getCpuAddressForMemoryTransfer());
|
|
|
|
delete buffer;
|
|
DebugManager.flags.Force32bitAddressing.set(false);
|
|
}
|
|
}
|
|
|
|
TEST(Buffers64on32Tests, given32BitBufferCreatedWithAllocHostPtrFlagThatIsZeroCopyWhenAskedForStorageThenStorageIsEqualToMemoryStorage) {
|
|
DebugManagerStateRestore dbgRestorer;
|
|
{
|
|
DebugManager.flags.Force32bitAddressing.set(true);
|
|
MockContext context;
|
|
auto size = MemoryConstants::pageSize;
|
|
auto retVal = CL_SUCCESS;
|
|
|
|
auto buffer = Buffer::create(
|
|
&context,
|
|
CL_MEM_ALLOC_HOST_PTR,
|
|
size,
|
|
nullptr,
|
|
retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_TRUE(buffer->isMemObjZeroCopy());
|
|
EXPECT_EQ(buffer->getCpuAddress(), buffer->getCpuAddressForMapping());
|
|
EXPECT_EQ(buffer->getCpuAddress(), buffer->getCpuAddressForMemoryTransfer());
|
|
|
|
delete buffer;
|
|
DebugManager.flags.Force32bitAddressing.set(false);
|
|
}
|
|
}
|
|
|
|
TEST(Buffers64on32Tests, given32BitBufferThatIsCreatedWithUseHostPtrButIsNotZeroCopyThenProperPointersAreReturned) {
|
|
DebugManagerStateRestore dbgRestorer;
|
|
{
|
|
DebugManager.flags.Force32bitAddressing.set(true);
|
|
MockContext context;
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
void *ptr = (void *)alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
auto ptrOffset = 1;
|
|
uintptr_t offsetedPtr = (uintptr_t)ptr + ptrOffset;
|
|
auto retVal = CL_SUCCESS;
|
|
|
|
auto buffer = Buffer::create(
|
|
&context,
|
|
CL_MEM_USE_HOST_PTR,
|
|
size,
|
|
(void *)offsetedPtr,
|
|
retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_FALSE(buffer->isMemObjZeroCopy());
|
|
EXPECT_EQ((void *)offsetedPtr, buffer->getCpuAddressForMapping());
|
|
EXPECT_EQ(buffer->getCpuAddress(), buffer->getCpuAddressForMemoryTransfer());
|
|
|
|
delete buffer;
|
|
DebugManager.flags.Force32bitAddressing.set(false);
|
|
alignedFree(ptr);
|
|
}
|
|
}
|
|
|
|
TEST(SharedBuffersTest, whenBuffersIsCreatedWithSharingHandlerThenItIsSharedBuffer) {
|
|
MockContext context;
|
|
auto memoryManager = context.getDevice(0)->getMemoryManager();
|
|
auto handler = new SharingHandler();
|
|
auto graphicsAlloaction = memoryManager->allocateGraphicsMemory(4096);
|
|
auto buffer = Buffer::createSharedBuffer(&context, CL_MEM_READ_ONLY, handler, graphicsAlloaction);
|
|
ASSERT_NE(nullptr, buffer);
|
|
EXPECT_EQ(handler, buffer->peekSharingHandler());
|
|
buffer->release();
|
|
}
|
|
|
|
class BufferTests : public ::testing::Test {
|
|
protected:
|
|
void SetUp() override {
|
|
device.reset(MockDevice::createWithNewExecutionEnvironment<MockDevice>(*platformDevices));
|
|
}
|
|
void TearDown() override {
|
|
}
|
|
std::unique_ptr<Device> device;
|
|
};
|
|
|
|
typedef BufferTests BufferSetSurfaceTests;
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatMemoryPtrAndSizeIsAlignedToCachelineThenL3CacheShouldBeOn) {
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = (void *)alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
Buffer::setSurfaceState(
|
|
device.get(),
|
|
&surfaceState,
|
|
size,
|
|
ptr);
|
|
|
|
auto mocs = surfaceState.getMemoryObjectControlState();
|
|
EXPECT_EQ(GmmHelper::getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER), mocs);
|
|
|
|
alignedFree(ptr);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatMemoryPtrIsUnalignedToCachelineThenL3CacheShouldBeOff) {
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
auto ptrOffset = 1;
|
|
auto offsetedPtr = (void *)((uintptr_t)ptr + ptrOffset);
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
Buffer::setSurfaceState(
|
|
device.get(),
|
|
&surfaceState,
|
|
size,
|
|
offsetedPtr);
|
|
|
|
auto mocs = surfaceState.getMemoryObjectControlState();
|
|
EXPECT_EQ(GmmHelper::getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED), mocs);
|
|
|
|
alignedFree(ptr);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatMemorySizeIsUnalignedToCachelineThenL3CacheShouldBeOff) {
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
auto sizeOffset = 1;
|
|
auto offsetedSize = size + sizeOffset;
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
Buffer::setSurfaceState(
|
|
device.get(),
|
|
&surfaceState,
|
|
offsetedSize,
|
|
ptr);
|
|
|
|
auto mocs = surfaceState.getMemoryObjectControlState();
|
|
EXPECT_EQ(GmmHelper::getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED), mocs);
|
|
|
|
alignedFree(ptr);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatMemoryIsUnalignedToCachelineButReadOnlyThenL3CacheShouldBeStillOn) {
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
auto sizeOffset = 1;
|
|
auto offsetedSize = size + sizeOffset;
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
Buffer::setSurfaceState(
|
|
device.get(),
|
|
&surfaceState,
|
|
offsetedSize,
|
|
ptr,
|
|
nullptr,
|
|
CL_MEM_READ_ONLY);
|
|
|
|
auto mocs = surfaceState.getMemoryObjectControlState();
|
|
EXPECT_EQ(GmmHelper::getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER), mocs);
|
|
|
|
alignedFree(ptr);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatMemorySizeIsUnalignedThenSurfaceSizeShouldBeAlignedToFour) {
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
auto sizeOffset = 1;
|
|
auto offsetedSize = size + sizeOffset;
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
Buffer::setSurfaceState(
|
|
device.get(),
|
|
&surfaceState,
|
|
offsetedSize,
|
|
ptr);
|
|
|
|
auto width = surfaceState.getWidth();
|
|
EXPECT_EQ(alignUp(width, 4), width);
|
|
|
|
alignedFree(ptr);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatMemoryPtrIsNotNullThenBufferSurfaceShouldBeUsed) {
|
|
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
Buffer::setSurfaceState(
|
|
device.get(),
|
|
&surfaceState,
|
|
size,
|
|
ptr);
|
|
|
|
auto surfType = surfaceState.getSurfaceType();
|
|
EXPECT_EQ(RENDER_SURFACE_STATE::SURFACE_TYPE_SURFTYPE_BUFFER, surfType);
|
|
|
|
alignedFree(ptr);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatMemoryPtrIsNullThenNullSurfaceShouldBeUsed) {
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
Buffer::setSurfaceState(
|
|
device.get(),
|
|
&surfaceState,
|
|
0,
|
|
nullptr);
|
|
|
|
auto surfType = surfaceState.getSurfaceType();
|
|
EXPECT_EQ(RENDER_SURFACE_STATE::SURFACE_TYPE_SURFTYPE_NULL, surfType);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferSetSurfaceThatAddressIsForcedTo32bitWhenSetArgStatefulIsCalledThenSurfaceBaseAddressIsPopulatedWithGpuAddress) {
|
|
DebugManagerStateRestore dbgRestorer;
|
|
{
|
|
DebugManager.flags.Force32bitAddressing.set(true);
|
|
MockContext context;
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = (void *)alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
auto retVal = CL_SUCCESS;
|
|
|
|
auto buffer = Buffer::create(
|
|
&context,
|
|
CL_MEM_USE_HOST_PTR,
|
|
size,
|
|
ptr,
|
|
retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_TRUE(is64bit ? buffer->getGraphicsAllocation()->is32BitAllocation : true);
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
buffer->setArgStateful(&surfaceState);
|
|
|
|
auto surfBaseAddress = surfaceState.getSurfaceBaseAddress();
|
|
auto bufferAddress = buffer->getGraphicsAllocation()->getGpuAddress();
|
|
EXPECT_EQ(bufferAddress, surfBaseAddress);
|
|
|
|
delete buffer;
|
|
alignedFree(ptr);
|
|
DebugManager.flags.Force32bitAddressing.set(false);
|
|
}
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenBufferWithOffsetWhenSetArgStatefulIsCalledThenSurfaceBaseAddressIsProperlyOffseted) {
|
|
MockContext context;
|
|
auto size = MemoryConstants::pageSize;
|
|
auto ptr = (void *)alignedMalloc(size * 2, MemoryConstants::pageSize);
|
|
auto retVal = CL_SUCCESS;
|
|
|
|
auto buffer = Buffer::create(
|
|
&context,
|
|
CL_MEM_USE_HOST_PTR,
|
|
size,
|
|
ptr,
|
|
retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
cl_buffer_region region = {4, 8};
|
|
retVal = -1;
|
|
auto subBuffer = buffer->createSubBuffer(CL_MEM_READ_WRITE, ®ion, retVal);
|
|
ASSERT_NE(nullptr, subBuffer);
|
|
ASSERT_EQ(CL_SUCCESS, retVal);
|
|
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
|
|
subBuffer->setArgStateful(&surfaceState);
|
|
|
|
auto surfBaseAddress = surfaceState.getSurfaceBaseAddress();
|
|
auto bufferAddress = buffer->getGraphicsAllocation()->getGpuAddress();
|
|
EXPECT_EQ(bufferAddress + region.origin, surfBaseAddress);
|
|
|
|
subBuffer->release();
|
|
|
|
delete buffer;
|
|
alignedFree(ptr);
|
|
DebugManager.flags.Force32bitAddressing.set(false);
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenRenderCompressedGmmResourceWhenSurfaceStateIsProgrammedThenSetAuxParams) {
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
using AUXILIARY_SURFACE_MODE = typename RENDER_SURFACE_STATE::AUXILIARY_SURFACE_MODE;
|
|
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
MockContext context;
|
|
auto retVal = CL_SUCCESS;
|
|
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&context, CL_MEM_READ_WRITE, 1, nullptr, retVal));
|
|
auto gmm = new Gmm(nullptr, 1, false);
|
|
buffer->getGraphicsAllocation()->gmm = gmm;
|
|
gmm->isRenderCompressed = true;
|
|
|
|
buffer->setArgStateful(&surfaceState);
|
|
|
|
EXPECT_EQ(0u, surfaceState.getAuxiliarySurfaceBaseAddress());
|
|
EXPECT_TRUE(AUXILIARY_SURFACE_MODE::AUXILIARY_SURFACE_MODE_AUX_CCS_E == surfaceState.getAuxiliarySurfaceMode());
|
|
EXPECT_TRUE(RENDER_SURFACE_STATE::COHERENCY_TYPE_GPU_COHERENT == surfaceState.getCoherencyType());
|
|
}
|
|
|
|
HWTEST_F(BufferSetSurfaceTests, givenNonRenderCompressedGmmResourceWhenSurfaceStateIsProgrammedThenDontSetAuxParams) {
|
|
using RENDER_SURFACE_STATE = typename FamilyType::RENDER_SURFACE_STATE;
|
|
using AUXILIARY_SURFACE_MODE = typename RENDER_SURFACE_STATE::AUXILIARY_SURFACE_MODE;
|
|
|
|
RENDER_SURFACE_STATE surfaceState = {};
|
|
MockContext context;
|
|
auto retVal = CL_SUCCESS;
|
|
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&context, CL_MEM_READ_WRITE, 1, nullptr, retVal));
|
|
auto gmm = new Gmm(nullptr, 1, false);
|
|
buffer->getGraphicsAllocation()->gmm = gmm;
|
|
gmm->isRenderCompressed = false;
|
|
|
|
buffer->setArgStateful(&surfaceState);
|
|
|
|
EXPECT_EQ(0u, surfaceState.getAuxiliarySurfaceBaseAddress());
|
|
EXPECT_TRUE(AUXILIARY_SURFACE_MODE::AUXILIARY_SURFACE_MODE_AUX_NONE == surfaceState.getAuxiliarySurfaceMode());
|
|
EXPECT_TRUE(RENDER_SURFACE_STATE::COHERENCY_TYPE_IA_COHERENT == surfaceState.getCoherencyType());
|
|
}
|
|
|
|
struct BufferUnmapTest : public DeviceFixture, public ::testing::Test {
|
|
void SetUp() override {
|
|
DeviceFixture::SetUp();
|
|
}
|
|
void TearDown() override {
|
|
DeviceFixture::TearDown();
|
|
}
|
|
};
|
|
|
|
HWTEST_F(BufferUnmapTest, givenBufferWithSharingHandlerWhenUnmappingThenUseEnqueueWriteBuffer) {
|
|
MockContext context(pDevice);
|
|
MockCommandQueueHw<FamilyType> cmdQ(&context, pDevice, nullptr);
|
|
|
|
auto retVal = CL_SUCCESS;
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&context, CL_MEM_ALLOC_HOST_PTR, 123, nullptr, retVal));
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
buffer->setSharingHandler(new SharingHandler());
|
|
EXPECT_NE(nullptr, buffer->peekSharingHandler());
|
|
|
|
auto mappedPtr = clEnqueueMapBuffer(&cmdQ, buffer.get(), CL_TRUE, CL_MAP_WRITE, 0, 1, 0, nullptr, nullptr, &retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_EQ(0u, cmdQ.EnqueueWriteBufferCounter);
|
|
retVal = clEnqueueUnmapMemObject(&cmdQ, buffer.get(), mappedPtr, 0, nullptr, nullptr);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_EQ(1u, cmdQ.EnqueueWriteBufferCounter);
|
|
EXPECT_TRUE(cmdQ.blockingWriteBuffer);
|
|
}
|
|
|
|
HWTEST_F(BufferUnmapTest, givenBufferWithoutSharingHandlerWhenUnmappingThenDontUseEnqueueWriteBuffer) {
|
|
MockContext context(pDevice);
|
|
MockCommandQueueHw<FamilyType> cmdQ(&context, pDevice, nullptr);
|
|
|
|
auto retVal = CL_SUCCESS;
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&context, CL_MEM_ALLOC_HOST_PTR, 123, nullptr, retVal));
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_EQ(nullptr, buffer->peekSharingHandler());
|
|
|
|
auto mappedPtr = clEnqueueMapBuffer(&cmdQ, buffer.get(), CL_TRUE, CL_MAP_READ, 0, 1, 0, nullptr, nullptr, &retVal);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
retVal = clEnqueueUnmapMemObject(&cmdQ, buffer.get(), mappedPtr, 0, nullptr, nullptr);
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_EQ(0u, cmdQ.EnqueueWriteBufferCounter);
|
|
}
|
|
|
|
using BufferTransferTests = BufferUnmapTest;
|
|
|
|
TEST_F(BufferTransferTests, givenBufferWhenTransferToHostPtrCalledThenCopyRequestedSizeAndOffsetOnly) {
|
|
MockContext context(pDevice);
|
|
auto retVal = CL_SUCCESS;
|
|
const size_t bufferSize = 100;
|
|
size_t ignoredParam = 123;
|
|
MemObjOffsetArray copyOffset = {{20, ignoredParam, ignoredParam}};
|
|
MemObjSizeArray copySize = {{10, ignoredParam, ignoredParam}};
|
|
|
|
uint8_t hostPtr[bufferSize] = {};
|
|
uint8_t expectedHostPtr[bufferSize] = {};
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&context, CL_MEM_USE_HOST_PTR, bufferSize, hostPtr, retVal));
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
auto srcPtr = buffer->getCpuAddress();
|
|
EXPECT_NE(srcPtr, hostPtr);
|
|
memset(srcPtr, 123, bufferSize);
|
|
memset(ptrOffset(expectedHostPtr, copyOffset[0]), 123, copySize[0]);
|
|
|
|
buffer->transferDataToHostPtr(copySize, copyOffset);
|
|
|
|
EXPECT_TRUE(memcmp(hostPtr, expectedHostPtr, copySize[0]) == 0);
|
|
}
|
|
|
|
TEST_F(BufferTransferTests, givenBufferWhenTransferFromHostPtrCalledThenCopyRequestedSizeAndOffsetOnly) {
|
|
MockContext context(pDevice);
|
|
auto retVal = CL_SUCCESS;
|
|
const size_t bufferSize = 100;
|
|
size_t ignoredParam = 123;
|
|
MemObjOffsetArray copyOffset = {{20, ignoredParam, ignoredParam}};
|
|
MemObjSizeArray copySize = {{10, ignoredParam, ignoredParam}};
|
|
|
|
uint8_t hostPtr[bufferSize] = {};
|
|
uint8_t expectedBufferMemory[bufferSize] = {};
|
|
std::unique_ptr<Buffer> buffer(Buffer::create(&context, CL_MEM_USE_HOST_PTR, bufferSize, hostPtr, retVal));
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
EXPECT_NE(buffer->getCpuAddress(), hostPtr);
|
|
memset(hostPtr, 123, bufferSize);
|
|
memset(ptrOffset(expectedBufferMemory, copyOffset[0]), 123, copySize[0]);
|
|
|
|
buffer->transferDataFromHostPtr(copySize, copyOffset);
|
|
|
|
EXPECT_TRUE(memcmp(expectedBufferMemory, buffer->getCpuAddress(), copySize[0]) == 0);
|
|
}
|