mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-21 09:14:47 +08:00
Add new unit test to command list compute barrier suite
Signed-off-by: Zbigniew Zdanowicz <zbigniew.zdanowicz@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
370b4b94c8
commit
504b49effa
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2020-2021 Intel Corporation
|
||||
* Copyright (C) 2020-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -219,5 +219,152 @@ HWTEST2_F(MultiTileCommandListAppendBarrier, WhenAppendingBarrierThenPipeControl
|
||||
EXPECT_EQ(expectedUseBuffer, parsedOffset);
|
||||
}
|
||||
|
||||
HWTEST2_F(MultiTileCommandListAppendBarrier,
|
||||
GivenCurrentCommandBufferExhaustedWhenAppendingMultiTileBarrierThenPipeControlAndCrossTileSyncIsGeneratedInNewBuffer, IsWithinXeGfxFamily) {
|
||||
using PIPE_CONTROL = typename FamilyType::PIPE_CONTROL;
|
||||
using MI_BATCH_BUFFER_START = typename FamilyType::MI_BATCH_BUFFER_START;
|
||||
using MI_STORE_DATA_IMM = typename FamilyType::MI_STORE_DATA_IMM;
|
||||
using MI_ATOMIC = typename FamilyType::MI_ATOMIC;
|
||||
using MI_SEMAPHORE_WAIT = typename FamilyType::MI_SEMAPHORE_WAIT;
|
||||
using MI_BATCH_BUFFER_END = typename FamilyType::MI_BATCH_BUFFER_END;
|
||||
|
||||
EXPECT_EQ(2u, device->getNEODevice()->getDeviceBitfield().count());
|
||||
EXPECT_EQ(2u, commandList->partitionCount);
|
||||
|
||||
LinearStream *cmdListStream = commandList->commandContainer.getCommandStream();
|
||||
|
||||
size_t beforeControlSectionOffset = sizeof(MI_STORE_DATA_IMM) +
|
||||
sizeof(PIPE_CONTROL) +
|
||||
sizeof(MI_ATOMIC) + sizeof(MI_SEMAPHORE_WAIT) +
|
||||
sizeof(MI_BATCH_BUFFER_START);
|
||||
|
||||
size_t bbStartOffset = beforeControlSectionOffset +
|
||||
(2 * sizeof(uint32_t));
|
||||
|
||||
size_t expectedUseBuffer = bbStartOffset +
|
||||
sizeof(MI_ATOMIC) + sizeof(MI_SEMAPHORE_WAIT) +
|
||||
sizeof(MI_STORE_DATA_IMM) +
|
||||
sizeof(MI_ATOMIC) + sizeof(MI_SEMAPHORE_WAIT);
|
||||
|
||||
auto firstBatchBufferAllocation = cmdListStream->getGraphicsAllocation();
|
||||
auto useSize = cmdListStream->getAvailableSpace();
|
||||
useSize -= (sizeof(MI_BATCH_BUFFER_END) +
|
||||
sizeof(MI_STORE_DATA_IMM) +
|
||||
sizeof(PIPE_CONTROL));
|
||||
cmdListStream->getSpace(useSize);
|
||||
|
||||
auto result = commandList->appendBarrier(nullptr, 0, nullptr);
|
||||
ASSERT_EQ(ZE_RESULT_SUCCESS, result);
|
||||
|
||||
auto secondBatchBufferAllocation = cmdListStream->getGraphicsAllocation();
|
||||
EXPECT_NE(firstBatchBufferAllocation, secondBatchBufferAllocation);
|
||||
|
||||
auto gpuBaseAddress = secondBatchBufferAllocation->getGpuAddress();
|
||||
|
||||
auto gpuCrossTileSyncAddress = gpuBaseAddress +
|
||||
beforeControlSectionOffset;
|
||||
|
||||
auto gpuFinalSyncAddress = gpuCrossTileSyncAddress +
|
||||
sizeof(uint32_t);
|
||||
|
||||
auto gpuStartAddress = gpuBaseAddress +
|
||||
bbStartOffset;
|
||||
|
||||
auto usedSpace = cmdListStream->getUsed();
|
||||
EXPECT_EQ(expectedUseBuffer, usedSpace);
|
||||
|
||||
void *cmdBuffer = cmdListStream->getCpuBase();
|
||||
size_t parsedOffset = 0;
|
||||
|
||||
{
|
||||
auto storeDataImm = genCmdCast<MI_STORE_DATA_IMM *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, storeDataImm);
|
||||
EXPECT_EQ(gpuFinalSyncAddress, storeDataImm->getAddress());
|
||||
EXPECT_EQ(0u, storeDataImm->getDataDword0());
|
||||
parsedOffset += sizeof(MI_STORE_DATA_IMM);
|
||||
}
|
||||
{
|
||||
auto pipeControl = genCmdCast<PIPE_CONTROL *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, pipeControl);
|
||||
EXPECT_TRUE(pipeControl->getCommandStreamerStallEnable());
|
||||
EXPECT_FALSE(pipeControl->getDcFlushEnable());
|
||||
parsedOffset += sizeof(PIPE_CONTROL);
|
||||
}
|
||||
{
|
||||
auto miAtomic = genCmdCast<MI_ATOMIC *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, miAtomic);
|
||||
auto miAtomicProgrammedAddress = NEO::UnitTestHelper<FamilyType>::getAtomicMemoryAddress(*miAtomic);
|
||||
EXPECT_EQ(gpuCrossTileSyncAddress, miAtomicProgrammedAddress);
|
||||
EXPECT_FALSE(miAtomic->getReturnDataControl());
|
||||
EXPECT_EQ(MI_ATOMIC::ATOMIC_OPCODES::ATOMIC_4B_INCREMENT, miAtomic->getAtomicOpcode());
|
||||
parsedOffset += sizeof(MI_ATOMIC);
|
||||
}
|
||||
{
|
||||
auto miSemaphore = genCmdCast<MI_SEMAPHORE_WAIT *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, miSemaphore);
|
||||
EXPECT_EQ(gpuCrossTileSyncAddress, miSemaphore->getSemaphoreGraphicsAddress());
|
||||
EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION::COMPARE_OPERATION_SAD_GREATER_THAN_OR_EQUAL_SDD, miSemaphore->getCompareOperation());
|
||||
EXPECT_EQ(2u, miSemaphore->getSemaphoreDataDword());
|
||||
parsedOffset += sizeof(MI_SEMAPHORE_WAIT);
|
||||
}
|
||||
{
|
||||
auto bbStart = genCmdCast<MI_BATCH_BUFFER_START *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, bbStart);
|
||||
EXPECT_EQ(gpuStartAddress, bbStart->getBatchBufferStartAddress());
|
||||
EXPECT_EQ(MI_BATCH_BUFFER_START::SECOND_LEVEL_BATCH_BUFFER::SECOND_LEVEL_BATCH_BUFFER_SECOND_LEVEL_BATCH, bbStart->getSecondLevelBatchBuffer());
|
||||
parsedOffset += sizeof(MI_BATCH_BUFFER_START);
|
||||
}
|
||||
{
|
||||
auto crossField = reinterpret_cast<uint32_t *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
EXPECT_EQ(0u, *crossField);
|
||||
parsedOffset += sizeof(uint32_t);
|
||||
auto finalField = reinterpret_cast<uint32_t *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
EXPECT_EQ(0u, *finalField);
|
||||
parsedOffset += sizeof(uint32_t);
|
||||
}
|
||||
{
|
||||
auto miAtomic = genCmdCast<MI_ATOMIC *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, miAtomic);
|
||||
auto miAtomicProgrammedAddress = NEO::UnitTestHelper<FamilyType>::getAtomicMemoryAddress(*miAtomic);
|
||||
EXPECT_EQ(gpuFinalSyncAddress, miAtomicProgrammedAddress);
|
||||
EXPECT_FALSE(miAtomic->getReturnDataControl());
|
||||
EXPECT_EQ(MI_ATOMIC::ATOMIC_OPCODES::ATOMIC_4B_INCREMENT, miAtomic->getAtomicOpcode());
|
||||
parsedOffset += sizeof(MI_ATOMIC);
|
||||
}
|
||||
{
|
||||
auto miSemaphore = genCmdCast<MI_SEMAPHORE_WAIT *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, miSemaphore);
|
||||
EXPECT_EQ(gpuFinalSyncAddress, miSemaphore->getSemaphoreGraphicsAddress());
|
||||
EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION::COMPARE_OPERATION_SAD_GREATER_THAN_OR_EQUAL_SDD, miSemaphore->getCompareOperation());
|
||||
EXPECT_EQ(2u, miSemaphore->getSemaphoreDataDword());
|
||||
parsedOffset += sizeof(MI_SEMAPHORE_WAIT);
|
||||
}
|
||||
{
|
||||
auto storeDataImm = genCmdCast<MI_STORE_DATA_IMM *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, storeDataImm);
|
||||
EXPECT_EQ(gpuCrossTileSyncAddress, storeDataImm->getAddress());
|
||||
EXPECT_EQ(0u, storeDataImm->getDataDword0());
|
||||
parsedOffset += sizeof(MI_STORE_DATA_IMM);
|
||||
}
|
||||
{
|
||||
auto miAtomic = genCmdCast<MI_ATOMIC *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, miAtomic);
|
||||
auto miAtomicProgrammedAddress = NEO::UnitTestHelper<FamilyType>::getAtomicMemoryAddress(*miAtomic);
|
||||
EXPECT_EQ(gpuFinalSyncAddress, miAtomicProgrammedAddress);
|
||||
EXPECT_FALSE(miAtomic->getReturnDataControl());
|
||||
EXPECT_EQ(MI_ATOMIC::ATOMIC_OPCODES::ATOMIC_4B_INCREMENT, miAtomic->getAtomicOpcode());
|
||||
parsedOffset += sizeof(MI_ATOMIC);
|
||||
}
|
||||
{
|
||||
auto miSemaphore = genCmdCast<MI_SEMAPHORE_WAIT *>(ptrOffset(cmdBuffer, parsedOffset));
|
||||
ASSERT_NE(nullptr, miSemaphore);
|
||||
EXPECT_EQ(gpuFinalSyncAddress, miSemaphore->getSemaphoreGraphicsAddress());
|
||||
EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION::COMPARE_OPERATION_SAD_GREATER_THAN_OR_EQUAL_SDD, miSemaphore->getCompareOperation());
|
||||
EXPECT_EQ(4u, miSemaphore->getSemaphoreDataDword());
|
||||
parsedOffset += sizeof(MI_SEMAPHORE_WAIT);
|
||||
}
|
||||
EXPECT_EQ(expectedUseBuffer, parsedOffset);
|
||||
}
|
||||
|
||||
} // namespace ult
|
||||
} // namespace L0
|
||||
|
||||
Reference in New Issue
Block a user