diff --git a/level_zero/core/test/unit_tests/sources/cmdlist/test_cmdlist_append_barrier.cpp b/level_zero/core/test/unit_tests/sources/cmdlist/test_cmdlist_append_barrier.cpp index de5b1cccf0..2b20a17e51 100644 --- a/level_zero/core/test/unit_tests/sources/cmdlist/test_cmdlist_append_barrier.cpp +++ b/level_zero/core/test/unit_tests/sources/cmdlist/test_cmdlist_append_barrier.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2022 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -219,5 +219,152 @@ HWTEST2_F(MultiTileCommandListAppendBarrier, WhenAppendingBarrierThenPipeControl EXPECT_EQ(expectedUseBuffer, parsedOffset); } +HWTEST2_F(MultiTileCommandListAppendBarrier, + GivenCurrentCommandBufferExhaustedWhenAppendingMultiTileBarrierThenPipeControlAndCrossTileSyncIsGeneratedInNewBuffer, IsWithinXeGfxFamily) { + using PIPE_CONTROL = typename FamilyType::PIPE_CONTROL; + using MI_BATCH_BUFFER_START = typename FamilyType::MI_BATCH_BUFFER_START; + using MI_STORE_DATA_IMM = typename FamilyType::MI_STORE_DATA_IMM; + using MI_ATOMIC = typename FamilyType::MI_ATOMIC; + using MI_SEMAPHORE_WAIT = typename FamilyType::MI_SEMAPHORE_WAIT; + using MI_BATCH_BUFFER_END = typename FamilyType::MI_BATCH_BUFFER_END; + + EXPECT_EQ(2u, device->getNEODevice()->getDeviceBitfield().count()); + EXPECT_EQ(2u, commandList->partitionCount); + + LinearStream *cmdListStream = commandList->commandContainer.getCommandStream(); + + size_t beforeControlSectionOffset = sizeof(MI_STORE_DATA_IMM) + + sizeof(PIPE_CONTROL) + + sizeof(MI_ATOMIC) + sizeof(MI_SEMAPHORE_WAIT) + + sizeof(MI_BATCH_BUFFER_START); + + size_t bbStartOffset = beforeControlSectionOffset + + (2 * sizeof(uint32_t)); + + size_t expectedUseBuffer = bbStartOffset + + sizeof(MI_ATOMIC) + sizeof(MI_SEMAPHORE_WAIT) + + sizeof(MI_STORE_DATA_IMM) + + sizeof(MI_ATOMIC) + sizeof(MI_SEMAPHORE_WAIT); + + auto firstBatchBufferAllocation = cmdListStream->getGraphicsAllocation(); + auto useSize = cmdListStream->getAvailableSpace(); + useSize -= (sizeof(MI_BATCH_BUFFER_END) + + sizeof(MI_STORE_DATA_IMM) + + sizeof(PIPE_CONTROL)); + cmdListStream->getSpace(useSize); + + auto result = commandList->appendBarrier(nullptr, 0, nullptr); + ASSERT_EQ(ZE_RESULT_SUCCESS, result); + + auto secondBatchBufferAllocation = cmdListStream->getGraphicsAllocation(); + EXPECT_NE(firstBatchBufferAllocation, secondBatchBufferAllocation); + + auto gpuBaseAddress = secondBatchBufferAllocation->getGpuAddress(); + + auto gpuCrossTileSyncAddress = gpuBaseAddress + + beforeControlSectionOffset; + + auto gpuFinalSyncAddress = gpuCrossTileSyncAddress + + sizeof(uint32_t); + + auto gpuStartAddress = gpuBaseAddress + + bbStartOffset; + + auto usedSpace = cmdListStream->getUsed(); + EXPECT_EQ(expectedUseBuffer, usedSpace); + + void *cmdBuffer = cmdListStream->getCpuBase(); + size_t parsedOffset = 0; + + { + auto storeDataImm = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, storeDataImm); + EXPECT_EQ(gpuFinalSyncAddress, storeDataImm->getAddress()); + EXPECT_EQ(0u, storeDataImm->getDataDword0()); + parsedOffset += sizeof(MI_STORE_DATA_IMM); + } + { + auto pipeControl = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, pipeControl); + EXPECT_TRUE(pipeControl->getCommandStreamerStallEnable()); + EXPECT_FALSE(pipeControl->getDcFlushEnable()); + parsedOffset += sizeof(PIPE_CONTROL); + } + { + auto miAtomic = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, miAtomic); + auto miAtomicProgrammedAddress = NEO::UnitTestHelper::getAtomicMemoryAddress(*miAtomic); + EXPECT_EQ(gpuCrossTileSyncAddress, miAtomicProgrammedAddress); + EXPECT_FALSE(miAtomic->getReturnDataControl()); + EXPECT_EQ(MI_ATOMIC::ATOMIC_OPCODES::ATOMIC_4B_INCREMENT, miAtomic->getAtomicOpcode()); + parsedOffset += sizeof(MI_ATOMIC); + } + { + auto miSemaphore = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, miSemaphore); + EXPECT_EQ(gpuCrossTileSyncAddress, miSemaphore->getSemaphoreGraphicsAddress()); + EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION::COMPARE_OPERATION_SAD_GREATER_THAN_OR_EQUAL_SDD, miSemaphore->getCompareOperation()); + EXPECT_EQ(2u, miSemaphore->getSemaphoreDataDword()); + parsedOffset += sizeof(MI_SEMAPHORE_WAIT); + } + { + auto bbStart = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, bbStart); + EXPECT_EQ(gpuStartAddress, bbStart->getBatchBufferStartAddress()); + EXPECT_EQ(MI_BATCH_BUFFER_START::SECOND_LEVEL_BATCH_BUFFER::SECOND_LEVEL_BATCH_BUFFER_SECOND_LEVEL_BATCH, bbStart->getSecondLevelBatchBuffer()); + parsedOffset += sizeof(MI_BATCH_BUFFER_START); + } + { + auto crossField = reinterpret_cast(ptrOffset(cmdBuffer, parsedOffset)); + EXPECT_EQ(0u, *crossField); + parsedOffset += sizeof(uint32_t); + auto finalField = reinterpret_cast(ptrOffset(cmdBuffer, parsedOffset)); + EXPECT_EQ(0u, *finalField); + parsedOffset += sizeof(uint32_t); + } + { + auto miAtomic = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, miAtomic); + auto miAtomicProgrammedAddress = NEO::UnitTestHelper::getAtomicMemoryAddress(*miAtomic); + EXPECT_EQ(gpuFinalSyncAddress, miAtomicProgrammedAddress); + EXPECT_FALSE(miAtomic->getReturnDataControl()); + EXPECT_EQ(MI_ATOMIC::ATOMIC_OPCODES::ATOMIC_4B_INCREMENT, miAtomic->getAtomicOpcode()); + parsedOffset += sizeof(MI_ATOMIC); + } + { + auto miSemaphore = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, miSemaphore); + EXPECT_EQ(gpuFinalSyncAddress, miSemaphore->getSemaphoreGraphicsAddress()); + EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION::COMPARE_OPERATION_SAD_GREATER_THAN_OR_EQUAL_SDD, miSemaphore->getCompareOperation()); + EXPECT_EQ(2u, miSemaphore->getSemaphoreDataDword()); + parsedOffset += sizeof(MI_SEMAPHORE_WAIT); + } + { + auto storeDataImm = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, storeDataImm); + EXPECT_EQ(gpuCrossTileSyncAddress, storeDataImm->getAddress()); + EXPECT_EQ(0u, storeDataImm->getDataDword0()); + parsedOffset += sizeof(MI_STORE_DATA_IMM); + } + { + auto miAtomic = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, miAtomic); + auto miAtomicProgrammedAddress = NEO::UnitTestHelper::getAtomicMemoryAddress(*miAtomic); + EXPECT_EQ(gpuFinalSyncAddress, miAtomicProgrammedAddress); + EXPECT_FALSE(miAtomic->getReturnDataControl()); + EXPECT_EQ(MI_ATOMIC::ATOMIC_OPCODES::ATOMIC_4B_INCREMENT, miAtomic->getAtomicOpcode()); + parsedOffset += sizeof(MI_ATOMIC); + } + { + auto miSemaphore = genCmdCast(ptrOffset(cmdBuffer, parsedOffset)); + ASSERT_NE(nullptr, miSemaphore); + EXPECT_EQ(gpuFinalSyncAddress, miSemaphore->getSemaphoreGraphicsAddress()); + EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION::COMPARE_OPERATION_SAD_GREATER_THAN_OR_EQUAL_SDD, miSemaphore->getCompareOperation()); + EXPECT_EQ(4u, miSemaphore->getSemaphoreDataDword()); + parsedOffset += sizeof(MI_SEMAPHORE_WAIT); + } + EXPECT_EQ(expectedUseBuffer, parsedOffset); +} + } // namespace ult } // namespace L0