2017-12-21 07:45:38 +08:00
|
|
|
/*
|
2020-02-23 05:21:06 +08:00
|
|
|
* Copyright (C) 2017-2020 Intel Corporation
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
2018-09-18 15:11:08 +08:00
|
|
|
* SPDX-License-Identifier: MIT
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2020-02-23 05:50:57 +08:00
|
|
|
#include "command_queue/enqueue_fixture.h"
|
|
|
|
#include "fixtures/hello_world_fixture.h"
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2019-03-26 18:59:46 +08:00
|
|
|
using namespace NEO;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
typedef HelloWorldTest<HelloWorldFixtureFactory> IOQ;
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
TEST_F(IOQ, WhenEnqueueingKernelThenTaskLevelIsIncremented) {
|
2017-12-21 07:45:38 +08:00
|
|
|
auto previousTaskLevel = pCmdQ->taskLevel;
|
|
|
|
|
|
|
|
EnqueueKernelHelper<>::enqueueKernel(
|
|
|
|
pCmdQ,
|
|
|
|
pKernel);
|
|
|
|
EXPECT_LT(previousTaskLevel, pCmdQ->taskLevel);
|
|
|
|
}
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
TEST_F(IOQ, WhenFillingBufferThenTaskLevelIsIncremented) {
|
2017-12-21 07:45:38 +08:00
|
|
|
auto previousTaskLevel = pCmdQ->taskLevel;
|
|
|
|
|
|
|
|
EnqueueFillBufferHelper<>::enqueue(pCmdQ);
|
|
|
|
EXPECT_LT(previousTaskLevel, pCmdQ->taskLevel);
|
|
|
|
}
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
TEST_F(IOQ, WhenReadingBufferThenTaskLevelIsIncremented) {
|
2017-12-21 07:45:38 +08:00
|
|
|
auto previousTaskLevel = pCmdQ->taskLevel;
|
|
|
|
auto buffer = std::unique_ptr<Buffer>(BufferHelper<>::create());
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
buffer->forceDisallowCPUCopy = true; // task level is not increased if doing cpu copy
|
2017-12-21 07:45:38 +08:00
|
|
|
EnqueueReadBufferHelper<>::enqueueReadBuffer(pCmdQ, buffer.get());
|
|
|
|
|
|
|
|
EXPECT_LT(previousTaskLevel, pCmdQ->taskLevel);
|
|
|
|
}
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
TEST_F(IOQ, WhenEnqueueingKernelThenTaskCountIsIncremented) {
|
2019-07-15 20:28:09 +08:00
|
|
|
auto &commandStreamReceiver = pCmdQ->getGpgpuCommandStreamReceiver();
|
2017-12-21 07:45:38 +08:00
|
|
|
auto previousTaskCount = commandStreamReceiver.peekTaskCount();
|
|
|
|
|
|
|
|
EnqueueKernelHelper<>::enqueueKernel(pCmdQ,
|
|
|
|
pKernel);
|
|
|
|
EXPECT_LT(previousTaskCount, commandStreamReceiver.peekTaskCount());
|
|
|
|
EXPECT_EQ(pCmdQ->taskCount, commandStreamReceiver.peekTaskCount());
|
|
|
|
}
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
TEST_F(IOQ, WhenFillingBufferThenTaskCountIsIncremented) {
|
2019-07-15 20:28:09 +08:00
|
|
|
auto &commandStreamReceiver = pCmdQ->getGpgpuCommandStreamReceiver();
|
2017-12-21 07:45:38 +08:00
|
|
|
auto previousTaskCount = commandStreamReceiver.peekTaskCount();
|
|
|
|
|
|
|
|
EnqueueFillBufferHelper<>::enqueue(pCmdQ);
|
|
|
|
EXPECT_LT(previousTaskCount, commandStreamReceiver.peekTaskCount());
|
|
|
|
EXPECT_LE(pCmdQ->taskCount, commandStreamReceiver.peekTaskCount());
|
|
|
|
}
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
TEST_F(IOQ, WhenReadingBufferThenTaskCountIsIncremented) {
|
2019-07-15 20:28:09 +08:00
|
|
|
auto &commandStreamReceiver = pCmdQ->getGpgpuCommandStreamReceiver();
|
2017-12-21 07:45:38 +08:00
|
|
|
auto previousTaskCount = commandStreamReceiver.peekTaskCount();
|
|
|
|
auto buffer = std::unique_ptr<Buffer>(BufferHelper<>::create());
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
buffer->forceDisallowCPUCopy = true; // task level is not increased if doing cpu copy
|
2017-12-21 07:45:38 +08:00
|
|
|
EnqueueReadBufferHelper<>::enqueueReadBuffer(pCmdQ, buffer.get());
|
|
|
|
EXPECT_LT(previousTaskCount, commandStreamReceiver.peekTaskCount());
|
|
|
|
EXPECT_LE(pCmdQ->taskCount, commandStreamReceiver.peekTaskCount());
|
|
|
|
}
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
TEST_F(IOQ, GivenUserEventWhenReadingBufferThenTaskCountAndTaskLevelAreIncremented) {
|
2017-12-21 07:45:38 +08:00
|
|
|
auto buffer = std::unique_ptr<Buffer>(BufferHelper<>::create());
|
|
|
|
|
|
|
|
auto alignedReadPtr = alignedMalloc(BufferDefaults::sizeInBytes, MemoryConstants::cacheLineSize);
|
|
|
|
ASSERT_NE(nullptr, alignedReadPtr);
|
|
|
|
|
|
|
|
auto previousTaskCount = pCmdQ->taskCount;
|
|
|
|
auto previousTaskLevel = pCmdQ->taskLevel;
|
|
|
|
|
|
|
|
auto userEvent = clCreateUserEvent(pContext, &retVal);
|
|
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
|
|
|
|
retVal = clSetUserEventStatus(userEvent, CL_COMPLETE);
|
|
|
|
ASSERT_EQ(CL_SUCCESS, retVal);
|
|
|
|
|
2019-12-23 17:00:10 +08:00
|
|
|
buffer->forceDisallowCPUCopy = true; // task level is not increased if doing cpu copy
|
2017-12-21 07:45:38 +08:00
|
|
|
retVal = EnqueueReadBufferHelper<>::enqueueReadBuffer(pCmdQ,
|
|
|
|
buffer.get(),
|
|
|
|
CL_TRUE,
|
|
|
|
0,
|
|
|
|
BufferDefaults::sizeInBytes,
|
|
|
|
alignedReadPtr,
|
2019-04-08 20:49:35 +08:00
|
|
|
nullptr,
|
2017-12-21 07:45:38 +08:00
|
|
|
1,
|
|
|
|
&userEvent,
|
|
|
|
nullptr);
|
|
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
|
|
|
|
EXPECT_LT(previousTaskCount, pCmdQ->taskCount);
|
|
|
|
EXPECT_LT(previousTaskLevel, pCmdQ->taskLevel);
|
|
|
|
|
|
|
|
retVal = clReleaseEvent(userEvent);
|
|
|
|
EXPECT_EQ(CL_SUCCESS, retVal);
|
|
|
|
|
|
|
|
alignedFree(alignedReadPtr);
|
|
|
|
}
|