mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-19 16:24:18 +08:00
fix: disable TBX writable mode after chunk write
Signed-off-by: Bartosz Dunajski <bartosz.dunajski@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
cb9977b8f4
commit
95b7929d97
@@ -440,7 +440,7 @@ void EventImp<TagSizeT>::copyDataToEventAlloc(void *dstHostAddr, uint64_t dstGpu
|
|||||||
|
|
||||||
csrs[0]->writeMemory(*alloc, true, offset, copySize);
|
csrs[0]->writeMemory(*alloc, true, offset, copySize);
|
||||||
|
|
||||||
alloc->setTbxWritable(true, allBanks);
|
alloc->setTbxWritable(false, allBanks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3575,7 +3575,7 @@ HWTEST_F(EventTests, GivenCsrTbxModeWhenEventCreatedAndSignaledThenEventAllocati
|
|||||||
EXPECT_TRUE(ultCsr.writeMemoryParams.latestChunkedMode);
|
EXPECT_TRUE(ultCsr.writeMemoryParams.latestChunkedMode);
|
||||||
EXPECT_EQ(sizeof(uint64_t) * expectedCallCount, ultCsr.writeMemoryParams.latestChunkSize);
|
EXPECT_EQ(sizeof(uint64_t) * expectedCallCount, ultCsr.writeMemoryParams.latestChunkSize);
|
||||||
EXPECT_EQ(0u, ultCsr.writeMemoryParams.latestGpuVaChunkOffset);
|
EXPECT_EQ(0u, ultCsr.writeMemoryParams.latestGpuVaChunkOffset);
|
||||||
EXPECT_TRUE(eventAllocation->isTbxWritable(expectedBanks));
|
EXPECT_FALSE(eventAllocation->isTbxWritable(expectedBanks));
|
||||||
|
|
||||||
auto status = event->hostSignal(false);
|
auto status = event->hostSignal(false);
|
||||||
EXPECT_EQ(ZE_RESULT_SUCCESS, status);
|
EXPECT_EQ(ZE_RESULT_SUCCESS, status);
|
||||||
@@ -3586,14 +3586,14 @@ HWTEST_F(EventTests, GivenCsrTbxModeWhenEventCreatedAndSignaledThenEventAllocati
|
|||||||
EXPECT_EQ(event->getSinglePacketSize(), ultCsr.writeMemoryParams.latestChunkSize);
|
EXPECT_EQ(event->getSinglePacketSize(), ultCsr.writeMemoryParams.latestChunkSize);
|
||||||
EXPECT_EQ(0u, ultCsr.writeMemoryParams.latestGpuVaChunkOffset);
|
EXPECT_EQ(0u, ultCsr.writeMemoryParams.latestGpuVaChunkOffset);
|
||||||
|
|
||||||
EXPECT_TRUE(eventAllocation->isTbxWritable(expectedBanks));
|
EXPECT_FALSE(eventAllocation->isTbxWritable(expectedBanks));
|
||||||
|
|
||||||
std::bitset<32> singleBitMask;
|
std::bitset<32> singleBitMask;
|
||||||
for (uint32_t i = 0; i < 32; i++) {
|
for (uint32_t i = 0; i < 32; i++) {
|
||||||
singleBitMask.reset();
|
singleBitMask.reset();
|
||||||
singleBitMask.set(i, true);
|
singleBitMask.set(i, true);
|
||||||
uint32_t bit = static_cast<uint32_t>(singleBitMask.to_ulong());
|
uint32_t bit = static_cast<uint32_t>(singleBitMask.to_ulong());
|
||||||
EXPECT_TRUE(eventAllocation->isTbxWritable(bit));
|
EXPECT_FALSE(eventAllocation->isTbxWritable(bit));
|
||||||
}
|
}
|
||||||
|
|
||||||
event->reset();
|
event->reset();
|
||||||
@@ -3605,7 +3605,7 @@ HWTEST_F(EventTests, GivenCsrTbxModeWhenEventCreatedAndSignaledThenEventAllocati
|
|||||||
EXPECT_EQ(event->getSinglePacketSize(), ultCsr.writeMemoryParams.latestChunkSize);
|
EXPECT_EQ(event->getSinglePacketSize(), ultCsr.writeMemoryParams.latestChunkSize);
|
||||||
EXPECT_EQ(0u, ultCsr.writeMemoryParams.latestGpuVaChunkOffset);
|
EXPECT_EQ(0u, ultCsr.writeMemoryParams.latestGpuVaChunkOffset);
|
||||||
|
|
||||||
EXPECT_TRUE(eventAllocation->isTbxWritable(expectedBanks));
|
EXPECT_FALSE(eventAllocation->isTbxWritable(expectedBanks));
|
||||||
|
|
||||||
size_t offset = event->getCompletionFieldOffset();
|
size_t offset = event->getCompletionFieldOffset();
|
||||||
void *completionAddress = ptrOffset(event->hostAddress, offset);
|
void *completionAddress = ptrOffset(event->hostAddress, offset);
|
||||||
|
|||||||
Reference in New Issue
Block a user