2017-12-21 07:45:38 +08:00
|
|
|
/*
|
2020-02-22 16:28:27 +08:00
|
|
|
* Copyright (C) 2017-2020 Intel Corporation
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
2018-09-18 15:11:08 +08:00
|
|
|
* SPDX-License-Identifier: MIT
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2020-02-24 05:44:01 +08:00
|
|
|
#include "shared/source/command_stream/linear_stream.h"
|
|
|
|
#include "shared/source/helpers/aligned_memory.h"
|
2020-10-07 21:09:42 +08:00
|
|
|
#include "shared/source/helpers/local_id_gen.h"
|
2020-11-02 22:54:01 +08:00
|
|
|
#include "shared/test/unit_test/mocks/mock_graphics_allocation.h"
|
2020-02-24 17:22:30 +08:00
|
|
|
|
2020-02-23 05:50:57 +08:00
|
|
|
#include "opencl/source/helpers/per_thread_data.h"
|
|
|
|
#include "opencl/source/program/kernel_info.h"
|
2020-05-28 20:05:12 +08:00
|
|
|
#include "opencl/test/unit_test/fixtures/cl_device_fixture.h"
|
2019-02-27 18:39:32 +08:00
|
|
|
#include "test.h"
|
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
#include "patch_shared.h"
|
|
|
|
|
2019-03-26 18:59:46 +08:00
|
|
|
using namespace NEO;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
template <bool localIdX = true, bool localIdY = true, bool localIdZ = true, bool flattenedId = false>
|
2020-05-28 20:05:12 +08:00
|
|
|
struct PerThreadDataTests : public ClDeviceFixture,
|
2017-12-21 07:45:38 +08:00
|
|
|
::testing::Test {
|
|
|
|
|
|
|
|
void SetUp() override {
|
2020-05-28 20:05:12 +08:00
|
|
|
ClDeviceFixture::SetUp();
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
threadPayload = {};
|
|
|
|
threadPayload.LocalIDXPresent = localIdX ? 1 : 0;
|
|
|
|
threadPayload.LocalIDYPresent = localIdY ? 1 : 0;
|
|
|
|
threadPayload.LocalIDZPresent = localIdZ ? 1 : 0;
|
|
|
|
threadPayload.LocalIDFlattenedPresent = flattenedId;
|
|
|
|
threadPayload.UnusedPerThreadConstantPresent =
|
|
|
|
!(localIdX || localIdY || localIdZ || flattenedId);
|
|
|
|
|
|
|
|
executionEnvironment = {};
|
|
|
|
executionEnvironment.CompiledSIMD32 = 1;
|
|
|
|
executionEnvironment.LargestCompiledSIMDSize = 32;
|
|
|
|
|
|
|
|
kernelInfo.heapInfo.pKernelHeap = kernelIsa;
|
2020-05-26 15:36:04 +08:00
|
|
|
kernelInfo.heapInfo.KernelHeapSize = sizeof(kernelIsa);
|
2017-12-21 07:45:38 +08:00
|
|
|
kernelInfo.patchInfo.executionEnvironment = &executionEnvironment;
|
|
|
|
kernelInfo.patchInfo.threadPayload = &threadPayload;
|
|
|
|
|
|
|
|
simd = executionEnvironment.LargestCompiledSIMDSize;
|
|
|
|
numChannels = threadPayload.LocalIDXPresent +
|
|
|
|
threadPayload.LocalIDYPresent +
|
|
|
|
threadPayload.LocalIDZPresent;
|
2019-12-17 15:55:09 +08:00
|
|
|
grfSize = 32;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
indirectHeapMemorySize = 4096;
|
|
|
|
indirectHeapMemory = reinterpret_cast<uint8_t *>(alignedMalloc(indirectHeapMemorySize, 32));
|
|
|
|
ASSERT_TRUE(isAligned<32>(indirectHeapMemory));
|
|
|
|
}
|
|
|
|
|
|
|
|
void TearDown() override {
|
|
|
|
alignedFree(indirectHeapMemory);
|
2020-05-28 20:05:12 +08:00
|
|
|
ClDeviceFixture::TearDown();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-08-08 18:49:36 +08:00
|
|
|
|
|
|
|
const std::array<uint8_t, 3> workgroupWalkOrder = {{0, 1, 2}};
|
2017-12-21 07:45:38 +08:00
|
|
|
uint32_t simd;
|
2019-12-17 15:55:09 +08:00
|
|
|
uint32_t grfSize;
|
2017-12-21 07:45:38 +08:00
|
|
|
uint32_t numChannels;
|
|
|
|
uint32_t kernelIsa[32];
|
|
|
|
uint8_t *indirectHeapMemory;
|
|
|
|
size_t indirectHeapMemorySize;
|
|
|
|
|
|
|
|
SKernelBinaryHeaderCommon kernelHeader;
|
|
|
|
SPatchThreadPayload threadPayload;
|
|
|
|
SPatchExecutionEnvironment executionEnvironment;
|
|
|
|
KernelInfo kernelInfo;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef PerThreadDataTests<> PerThreadDataXYZTests;
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataXYZTests, WhenGettingLocalIdSizePerThreadThenCorrectValueIsReturned) {
|
2019-12-17 15:55:09 +08:00
|
|
|
EXPECT_EQ(3 * 2 * grfSize, PerThreadDataHelper::getLocalIdSizePerThread(simd, grfSize, numChannels));
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataXYZTests, WhenGettingPerThreadDataSizeTotalThenCorrectValueIsReturned) {
|
2017-12-21 07:45:38 +08:00
|
|
|
size_t localWorkSize = 256;
|
2019-12-17 15:55:09 +08:00
|
|
|
EXPECT_EQ(256 * 3 * 2 * grfSize / 32, PerThreadDataHelper::getPerThreadDataSizeTotal(simd, grfSize, numChannels, localWorkSize));
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataXYZTests, Given256x1x1WhenSendingPerThreadDataThenCorrectAmountOfIndirectHeapIsConsumed) {
|
2017-12-21 07:45:38 +08:00
|
|
|
MockGraphicsAllocation gfxAllocation(indirectHeapMemory, indirectHeapMemorySize);
|
|
|
|
LinearStream indirectHeap(&gfxAllocation);
|
|
|
|
|
2020-11-04 17:34:42 +08:00
|
|
|
const std::array<uint16_t, 3> localWorkSizes = {{256, 1, 1}};
|
|
|
|
size_t localWorkSize = localWorkSizes[0] * localWorkSizes[1] * localWorkSizes[2];
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
auto offsetPerThreadData = PerThreadDataHelper::sendPerThreadData(
|
|
|
|
indirectHeap,
|
|
|
|
simd,
|
2019-12-17 15:55:09 +08:00
|
|
|
grfSize,
|
2017-12-21 07:45:38 +08:00
|
|
|
numChannels,
|
2018-08-08 18:49:36 +08:00
|
|
|
localWorkSizes,
|
|
|
|
workgroupWalkOrder,
|
|
|
|
false);
|
2019-12-17 15:55:09 +08:00
|
|
|
auto expectedPerThreadDataSizeTotal = PerThreadDataHelper::getPerThreadDataSizeTotal(simd, grfSize, numChannels, localWorkSize);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
size_t sizeConsumed = indirectHeap.getUsed() - offsetPerThreadData;
|
|
|
|
EXPECT_EQ(expectedPerThreadDataSizeTotal, sizeConsumed);
|
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataXYZTests, Given2x4x8WhenSendingPerThreadDataThenCorrectAmountOfIndirectHeapIsConsumed) {
|
2017-12-21 07:45:38 +08:00
|
|
|
MockGraphicsAllocation gfxAllocation(indirectHeapMemory, indirectHeapMemorySize);
|
|
|
|
LinearStream indirectHeap(&gfxAllocation);
|
|
|
|
|
2020-11-04 17:34:42 +08:00
|
|
|
const std::array<uint16_t, 3> localWorkSizes = {{2, 4, 8}};
|
2017-12-21 07:45:38 +08:00
|
|
|
auto offsetPerThreadData = PerThreadDataHelper::sendPerThreadData(
|
|
|
|
indirectHeap,
|
|
|
|
simd,
|
2019-12-17 15:55:09 +08:00
|
|
|
grfSize,
|
2017-12-21 07:45:38 +08:00
|
|
|
numChannels,
|
2018-08-08 18:49:36 +08:00
|
|
|
localWorkSizes,
|
|
|
|
workgroupWalkOrder,
|
|
|
|
false);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
size_t sizeConsumed = indirectHeap.getUsed() - offsetPerThreadData;
|
|
|
|
EXPECT_EQ(64u * (3u * 2u * 4u * 8u) / 32u, sizeConsumed);
|
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataXYZTests, GivenDifferentSimdWhenGettingThreadPayloadSizeThenCorrectSizeIsReturned) {
|
2017-12-21 07:45:38 +08:00
|
|
|
simd = 32;
|
2019-12-17 15:55:09 +08:00
|
|
|
uint32_t size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 2u * 3u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 16;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 3u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 16;
|
|
|
|
threadPayload.HeaderPresent = 1;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 4u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 16;
|
|
|
|
threadPayload.UnusedPerThreadConstantPresent = 1;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 5u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef PerThreadDataTests<false, false, false, false> PerThreadDataNoIdsTests;
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataNoIdsTests, givenZeroChannelsWhenPassedToGetLocalIdSizePerThreadThenSizeOfOneGrfIsReturned) {
|
2019-12-17 15:55:09 +08:00
|
|
|
EXPECT_EQ(32u, PerThreadDataHelper::getLocalIdSizePerThread(simd, grfSize, numChannels));
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataNoIdsTests, givenZeroChannelsAndHighWkgSizeWhenGetPerThreadDataSizeTotalIsCalledThenReturnedSizeContainsUnusedGrfPerEachThread) {
|
2018-04-03 23:06:44 +08:00
|
|
|
size_t localWorkSize = 256u;
|
|
|
|
auto threadCount = localWorkSize / simd;
|
2019-12-17 15:55:09 +08:00
|
|
|
auto expectedSize = threadCount * grfSize;
|
|
|
|
EXPECT_EQ(expectedSize, PerThreadDataHelper::getPerThreadDataSizeTotal(simd, grfSize, numChannels, localWorkSize));
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataNoIdsTests, GivenThreadPaylodDataWithoutLocalIdsWhenSendingPerThreadDataThenIndirectHeapMemoryIsNotConsumed) {
|
2017-12-21 07:45:38 +08:00
|
|
|
uint8_t fillValue = 0xcc;
|
|
|
|
memset(indirectHeapMemory, fillValue, indirectHeapMemorySize);
|
|
|
|
|
|
|
|
MockGraphicsAllocation gfxAllocation(indirectHeapMemory, indirectHeapMemorySize);
|
|
|
|
LinearStream indirectHeap(&gfxAllocation);
|
|
|
|
|
2020-11-04 17:34:42 +08:00
|
|
|
const std::array<uint16_t, 3> localWorkSizes = {{256, 1, 1}};
|
2017-12-21 07:45:38 +08:00
|
|
|
auto offsetPerThreadData = PerThreadDataHelper::sendPerThreadData(
|
|
|
|
indirectHeap,
|
|
|
|
simd,
|
2019-12-17 15:55:09 +08:00
|
|
|
grfSize,
|
2017-12-21 07:45:38 +08:00
|
|
|
numChannels,
|
2018-08-08 18:49:36 +08:00
|
|
|
localWorkSizes,
|
|
|
|
workgroupWalkOrder,
|
|
|
|
false);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
size_t sizeConsumed = indirectHeap.getUsed() - offsetPerThreadData;
|
|
|
|
EXPECT_EQ(0u, sizeConsumed);
|
|
|
|
|
|
|
|
size_t i = 0;
|
|
|
|
while (i < indirectHeapMemorySize) {
|
|
|
|
ASSERT_EQ(fillValue, indirectHeapMemory[i]) << "for index " << i;
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataNoIdsTests, GivenSimdWhenGettingThreadPayloadSizeThenCorrectValueIsReturned) {
|
2017-12-21 07:45:38 +08:00
|
|
|
simd = 32;
|
2019-12-17 15:55:09 +08:00
|
|
|
uint32_t size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 16;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 16;
|
|
|
|
threadPayload.HeaderPresent = 1;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 2u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef PerThreadDataTests<false, false, false, true> PerThreadDataFlattenedIdsTests;
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
HWTEST_F(PerThreadDataFlattenedIdsTests, GivenSimdWhenGettingThreadPayloadSizeThenCorrectValueIsReturned) {
|
2017-12-21 07:45:38 +08:00
|
|
|
simd = 32;
|
2019-12-17 15:55:09 +08:00
|
|
|
uint32_t size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 2u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 16;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 16;
|
|
|
|
threadPayload.HeaderPresent = 1;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 2u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
simd = 32;
|
|
|
|
threadPayload.HeaderPresent = 1;
|
2019-12-17 15:55:09 +08:00
|
|
|
size = PerThreadDataHelper::getThreadPayloadSize(threadPayload, simd, grfSize);
|
|
|
|
EXPECT_EQ(grfSize * 3u, size);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
TEST(PerThreadDataTest, WhenSettingLocalIdsInPerThreadDataThenIdsAreSetInCorrectOrder) {
|
2017-12-21 07:45:38 +08:00
|
|
|
uint32_t simd = 8;
|
2019-12-17 15:55:09 +08:00
|
|
|
uint32_t grfSize = 32;
|
2017-12-21 07:45:38 +08:00
|
|
|
uint32_t numChannels = 3;
|
|
|
|
uint32_t localWorkSize = 24;
|
|
|
|
|
2020-11-04 17:34:42 +08:00
|
|
|
const std::array<uint16_t, 3> localWorkSizes = {{24, 1, 1}};
|
|
|
|
const std::array<uint8_t, 3> workgroupWalkOrder = {{0, 1, 2}};
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2019-12-17 15:55:09 +08:00
|
|
|
auto sizePerThreadDataTotal = PerThreadDataHelper::getPerThreadDataSizeTotal(simd, numChannels, localWorkSize, grfSize);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
auto sizeOverSizedBuffer = sizePerThreadDataTotal * 4;
|
2019-07-29 22:11:51 +08:00
|
|
|
auto buffer = static_cast<char *>(alignedMalloc(sizeOverSizedBuffer, 16));
|
2017-12-21 07:45:38 +08:00
|
|
|
memset(buffer, 0, sizeOverSizedBuffer);
|
|
|
|
|
|
|
|
// Setup reference filled with zeros
|
2019-07-29 22:11:51 +08:00
|
|
|
auto reference = static_cast<char *>(alignedMalloc(sizePerThreadDataTotal, 16));
|
2017-12-21 07:45:38 +08:00
|
|
|
memset(reference, 0, sizePerThreadDataTotal);
|
|
|
|
|
|
|
|
LinearStream stream(buffer, sizeOverSizedBuffer / 2);
|
|
|
|
PerThreadDataHelper::sendPerThreadData(
|
|
|
|
stream,
|
|
|
|
simd,
|
2019-12-17 15:55:09 +08:00
|
|
|
grfSize,
|
2017-12-21 07:45:38 +08:00
|
|
|
numChannels,
|
2018-08-08 18:49:36 +08:00
|
|
|
localWorkSizes,
|
2020-11-04 17:34:42 +08:00
|
|
|
workgroupWalkOrder,
|
2018-08-08 18:49:36 +08:00
|
|
|
false);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
// Check if buffer overrun happend, only first sizePerThreadDataTotal bytes can be overwriten, following should be same as reference.
|
|
|
|
for (auto i = sizePerThreadDataTotal; i < sizeOverSizedBuffer; i += sizePerThreadDataTotal) {
|
|
|
|
int result = memcmp(buffer + i, reference, sizePerThreadDataTotal);
|
|
|
|
EXPECT_EQ(0, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
alignedFree(buffer);
|
|
|
|
alignedFree(reference);
|
|
|
|
}
|
2019-10-23 15:36:37 +08:00
|
|
|
|
2020-04-14 21:41:08 +08:00
|
|
|
TEST(PerThreadDataTest, givenSimdEqualOneWhenSettingLocalIdsInPerThreadDataThenIdsAreSetInCorrectOrder) {
|
2019-10-23 15:36:37 +08:00
|
|
|
uint32_t simd = 1;
|
2019-12-17 15:55:09 +08:00
|
|
|
uint32_t grfSize = 32;
|
2019-10-23 15:36:37 +08:00
|
|
|
uint32_t numChannels = 3;
|
|
|
|
uint32_t localWorkSize = 24;
|
|
|
|
|
2020-11-04 17:34:42 +08:00
|
|
|
const std::array<uint16_t, 3> localWorkSizes = {{3, 4, 2}};
|
|
|
|
const std::array<uint8_t, 3> workgroupWalkOrder = {{0, 1, 2}};
|
2019-10-23 15:36:37 +08:00
|
|
|
|
2019-12-17 15:55:09 +08:00
|
|
|
auto sizePerThreadDataTotal = PerThreadDataHelper::getPerThreadDataSizeTotal(simd, grfSize, numChannels, localWorkSize);
|
2019-10-23 15:36:37 +08:00
|
|
|
|
|
|
|
auto sizeOverSizedBuffer = sizePerThreadDataTotal * 4;
|
|
|
|
auto buffer = static_cast<char *>(alignedMalloc(sizeOverSizedBuffer, 16));
|
|
|
|
memset(buffer, 0, sizeOverSizedBuffer);
|
|
|
|
|
|
|
|
// Setup reference filled with zeros
|
|
|
|
auto reference = static_cast<char *>(alignedMalloc(sizePerThreadDataTotal, 16));
|
|
|
|
memset(reference, 0, sizePerThreadDataTotal);
|
|
|
|
|
|
|
|
LinearStream stream(buffer, sizeOverSizedBuffer / 2);
|
|
|
|
PerThreadDataHelper::sendPerThreadData(
|
|
|
|
stream,
|
|
|
|
simd,
|
2019-12-17 15:55:09 +08:00
|
|
|
grfSize,
|
2019-10-23 15:36:37 +08:00
|
|
|
numChannels,
|
|
|
|
localWorkSizes,
|
2020-11-04 17:34:42 +08:00
|
|
|
workgroupWalkOrder,
|
2019-10-23 15:36:37 +08:00
|
|
|
false);
|
|
|
|
|
|
|
|
auto bufferPtr = buffer;
|
|
|
|
for (uint16_t i = 0; i < localWorkSizes[2]; i++) {
|
|
|
|
for (uint16_t j = 0; j < localWorkSizes[1]; j++) {
|
|
|
|
for (uint16_t k = 0; k < localWorkSizes[0]; k++) {
|
|
|
|
uint16_t ids[] = {k, j, i};
|
|
|
|
int result = memcmp(bufferPtr, ids, sizeof(uint16_t) * 3);
|
|
|
|
EXPECT_EQ(0, result);
|
2019-12-17 15:55:09 +08:00
|
|
|
bufferPtr += grfSize;
|
2019-10-23 15:36:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check if buffer overrun happend, only first sizePerThreadDataTotal bytes can be overwriten, following should be same as reference.
|
|
|
|
for (auto i = sizePerThreadDataTotal; i < sizeOverSizedBuffer; i += sizePerThreadDataTotal) {
|
|
|
|
int result = memcmp(buffer + i, reference, sizePerThreadDataTotal);
|
|
|
|
EXPECT_EQ(0, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
alignedFree(buffer);
|
|
|
|
alignedFree(reference);
|
|
|
|
}
|