DrmGemCloseWorker - remove not used modes of operation.

Change-Id: Ie8524a8411f1022785536a523aad3e4ebea3a349
This commit is contained in:
Mrozek, Michal
2018-05-10 10:16:22 +02:00
committed by sys_ocldev
parent f90ced1452
commit 06287af541
9 changed files with 105 additions and 216 deletions

View File

@ -220,7 +220,7 @@ class MemoryManager {
return deferredDeleter.get();
}
virtual void waitForDeletions();
void waitForDeletions();
bool isAsyncDeleterEnabled() const;
virtual bool isMemoryBudgetExhausted() const;

View File

@ -78,16 +78,7 @@ FlushStamp DrmCommandStreamReceiver<GfxFamily>::flush(BatchBuffer &batchBuffer,
batchBuffer.requiresCoherency,
batchBuffer.low_priority);
if (this->gemCloseWorkerOperationMode == gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers) {
// Consume all space in CS to force new allocation
batchBuffer.stream->replaceBuffer(nullptr, 0);
batchBuffer.stream->replaceGraphicsAllocation(nullptr);
// Push for asynchronous cleanup
getMemoryManager()->push(alloc);
} else {
bb->getResidency()->clear();
}
bb->getResidency()->clear();
}
return flushStamp;
@ -105,9 +96,6 @@ void DrmCommandStreamReceiver<GfxFamily>::makeResident(GraphicsAllocation &gfxAl
template <typename GfxFamily>
void DrmCommandStreamReceiver<GfxFamily>::makeResident(BufferObject *bo) {
if (bo) {
if (this->gemCloseWorkerOperationMode == gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers) {
bo->reference();
}
residency.push_back(bo);
}
}
@ -138,11 +126,6 @@ void DrmCommandStreamReceiver<GfxFamily>::makeNonResident(GraphicsAllocation &gf
// If makeNonResident is called before flush, vector will be cleared.
if (gfxAllocation.residencyTaskCount != ObjectNotResident) {
if (this->residency.size() != 0) {
if (this->gemCloseWorkerOperationMode == gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers) {
for (auto it : residency) {
getMemoryManager()->unreference(it);
}
}
this->residency.clear();
}
if (gfxAllocation.fragmentsStorage.fragmentCount) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Intel Corporation
* Copyright (c) 2017 - 2018, Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -35,9 +35,8 @@ class DrmMemoryManager;
class DrmAllocation;
enum gemCloseWorkerMode {
gemCloseWorkerConsumingCommandBuffers,
gemCloseWorkerInactive,
gemCloseWorkerConsumingResources
gemCloseWorkerActive
};
class DrmGemCloseWorker {
@ -53,7 +52,7 @@ class DrmGemCloseWorker {
bool isEmpty();
private:
protected:
void close(DrmAllocation *workItem);
void closeThread();
void worker();

View File

@ -77,10 +77,6 @@ DrmMemoryManager::~DrmMemoryManager() {
}
}
void DrmMemoryManager::push(DrmAllocation *alloc) {
gemCloseWorker->push(alloc);
}
void DrmMemoryManager::eraseSharedBufferObject(OCLRT::BufferObject *bo) {
std::lock_guard<decltype(mtx)> lock(mtx);
@ -109,9 +105,6 @@ uint32_t DrmMemoryManager::unreference(OCLRT::BufferObject *bo, bool synchronous
uint32_t r = bo->refCount.fetch_sub(1);
if (r == 1) {
for (auto it : *bo->getResidency()) {
unreference(it);
}
auto unmapSize = bo->peekUnmapSize();
auto address = bo->isAllocated || unmapSize > 0 ? bo->address : nullptr;
auto allocatorType = bo->peekAllocationType();
@ -537,14 +530,6 @@ BufferObject *DrmMemoryManager::getPinBB() const {
return pinBB;
}
void DrmMemoryManager::waitForDeletions() {
if (gemCloseWorker.get()) {
while (!gemCloseWorker->isEmpty())
;
}
MemoryManager::waitForDeletions();
}
bool DrmMemoryManager::setDomainCpu(GraphicsAllocation &graphicsAllocation, bool writeEnable) {
DEBUG_BREAK_IF(writeEnable); //unsupported path (for CPU writes call SW_FINISH ioctl in unlockResource)

View File

@ -70,11 +70,7 @@ class DrmMemoryManager : public MemoryManager {
// drm/i915 ioctl wrappers
uint32_t unreference(BufferObject *bo, bool synchronousDestroy = false);
// CloseWorker delegate
void push(DrmAllocation *alloc);
DrmAllocation *createGraphicsAllocation(OsHandleStorage &handleStorage, size_t hostPtrSize, const void *hostPtr) override;
void waitForDeletions() override;
bool isValidateHostMemoryEnabled() const {
return validateHostPtrMemory;
}

View File

@ -39,7 +39,7 @@ HWTEST_F(DrmCommandStreamMMTest, MMwithPinBB) {
std::unique_ptr<DrmMockCustom> mock(new DrmMockCustom());
ASSERT_NE(nullptr, mock);
DrmCommandStreamReceiver<FamilyType> csr(*platformDevices[0], mock.get(), gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers);
DrmCommandStreamReceiver<FamilyType> csr(*platformDevices[0], mock.get(), gemCloseWorkerMode::gemCloseWorkerInactive);
auto mm = (DrmMemoryManager *)csr.createMemoryManager(false);
ASSERT_NE(nullptr, mm);
@ -58,7 +58,7 @@ HWTEST_F(DrmCommandStreamMMTest, givenForcePinDisabledWhenMemoryManagerIsCreated
std::unique_ptr<DrmMockCustom> mock(new DrmMockCustom());
ASSERT_NE(nullptr, mock);
DrmCommandStreamReceiver<FamilyType> csr(*platformDevices[0], mock.get(), gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers);
DrmCommandStreamReceiver<FamilyType> csr(*platformDevices[0], mock.get(), gemCloseWorkerMode::gemCloseWorkerInactive);
auto mm = (DrmMemoryManager *)csr.createMemoryManager(false);
csr.setMemoryManager(nullptr);

View File

@ -62,7 +62,7 @@ class DrmCommandStreamFixture {
this->mock = new DrmMockImpl(mockFd);
csr = new DrmCommandStreamReceiver<DEFAULT_TEST_FAMILY_NAME>(*platformDevices[0], mock, gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers);
csr = new DrmCommandStreamReceiver<DEFAULT_TEST_FAMILY_NAME>(*platformDevices[0], mock, gemCloseWorkerMode::gemCloseWorkerInactive);
ASSERT_NE(nullptr, csr);
// Memory manager creates pinBB with ioctl, expect one call
@ -77,8 +77,8 @@ class DrmCommandStreamFixture {
void TearDown() {
mm->waitForDeletions();
::testing::Mock::VerifyAndClearExpectations(mock);
delete csr;
::testing::Mock::VerifyAndClearExpectations(mock);
// Memory manager closes pinBB with ioctl, expect one call
EXPECT_CALL(*mock, ioctl(::testing::_, ::testing::_))
.Times(::testing::AtLeast(1));
@ -205,19 +205,20 @@ TEST_F(DrmCommandStreamTest, Flush) {
.Times(1)
.RetiresOnSaturation();
DrmAllocation *commandBuffer = static_cast<DrmAllocation *>(mm->allocateGraphicsMemory(1024, 4096));
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
EXPECT_EQ(boHandle, commandBuffer->getBO()->peekHandle());
LinearStream cs(commandBuffer);
csr->addBatchBufferEnd(cs, nullptr);
csr->alignToCacheLine(cs);
BatchBuffer batchBuffer{cs.getGraphicsAllocation(), 0, 0, nullptr, false, false, QueueThrottle::MEDIUM, cs.getUsed(), &cs};
auto availableSpacePriorToFlush = cs.getAvailableSpace();
auto flushStamp = csr->flush(batchBuffer, EngineType::ENGINE_RCS, nullptr);
EXPECT_EQ(static_cast<uint64_t>(boHandle), flushStamp);
EXPECT_EQ(cs.getCpuBase(), nullptr);
EXPECT_EQ(cs.getGraphicsAllocation(), nullptr);
EXPECT_NE(cs.getCpuBase(), nullptr);
EXPECT_EQ(availableSpacePriorToFlush, cs.getAvailableSpace());
}
TEST_F(DrmCommandStreamTest, FlushWithLowPriorityContext) {
@ -240,17 +241,17 @@ TEST_F(DrmCommandStreamTest, FlushWithLowPriorityContext) {
.Times(1)
.RetiresOnSaturation();
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
csr->addBatchBufferEnd(cs, nullptr);
csr->alignToCacheLine(cs);
BatchBuffer batchBuffer{cs.getGraphicsAllocation(), 0, 0, nullptr, false, true, QueueThrottle::MEDIUM, cs.getUsed(), &cs};
csr->flush(batchBuffer, EngineType::ENGINE_RCS, nullptr);
EXPECT_EQ(cs.getCpuBase(), nullptr);
EXPECT_EQ(cs.getGraphicsAllocation(), nullptr);
EXPECT_NE(cs.getCpuBase(), nullptr);
}
TEST_F(DrmCommandStreamTest, FlushInvalidAddress) {
@ -282,37 +283,6 @@ TEST_F(DrmCommandStreamTest, FlushInvalidAddress) {
delete[] commandBuffer;
}
TEST_F(DrmCommandStreamTest, FlushMultipleTimes) {
auto expectedSize = alignUp(8u, MemoryConstants::cacheLineSize); // bbEnd
::testing::InSequence inSequence;
EXPECT_CALL(*mock, ioctl(DRM_IOCTL_I915_GEM_USERPTR, ::testing::_))
.Times(1)
.WillRepeatedly(::testing::Return(0))
.RetiresOnSaturation();
EXPECT_CALL(*mock, ioctl(DRM_IOCTL_I915_GEM_EXECBUFFER2, BoExecFlushEq(0u, expectedSize)))
.Times(1)
.WillRepeatedly(::testing::Return(0))
.RetiresOnSaturation();
EXPECT_CALL(*mock, ioctl(DRM_IOCTL_I915_GEM_WAIT, ::testing::_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*mock, ioctl(DRM_IOCTL_GEM_CLOSE, ::testing::_))
.Times(1)
.RetiresOnSaturation();
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
csr->addBatchBufferEnd(cs, nullptr);
csr->alignToCacheLine(cs);
BatchBuffer batchBuffer{cs.getGraphicsAllocation(), 0, 0, nullptr, false, false, QueueThrottle::MEDIUM, cs.getUsed(), &cs};
csr->flush(batchBuffer, EngineType::ENGINE_RCS, nullptr);
EXPECT_EQ(0u, cs.getAvailableSpace());
}
TEST_F(DrmCommandStreamTest, FlushNotEmptyBB) {
uint32_t bbUsed = 16 * sizeof(uint32_t);
auto expectedSize = alignUp(bbUsed + 8, MemoryConstants::cacheLineSize); // bbUsed + bbEnd
@ -334,10 +304,7 @@ TEST_F(DrmCommandStreamTest, FlushNotEmptyBB) {
.Times(1)
.RetiresOnSaturation();
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
auto &cs = csr->getCS();
cs.getSpace(bbUsed);
csr->addBatchBufferEnd(cs, nullptr);
@ -366,10 +333,7 @@ TEST_F(DrmCommandStreamTest, FlushNotEmptyNotPaddedBB) {
.Times(1)
.RetiresOnSaturation();
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
auto &cs = csr->getCS();
cs.getSpace(bbUsed);
csr->addBatchBufferEnd(cs, nullptr);
@ -383,13 +347,12 @@ TEST_F(DrmCommandStreamTest, FlushNotAligned) {
.Times(1)
.WillRepeatedly(::testing::Return(0));
auto *commandBuffer = mm->allocateGraphicsMemory(1024 + 4, 128);
ASSERT_NE(nullptr, commandBuffer);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
//make sure command buffer with offset is not page aligned
ASSERT_NE(0u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & (this->alignment - 1));
ASSERT_EQ(4u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & 0x7F);
LinearStream cs(commandBuffer);
auto expectedSize = alignUp(8u, MemoryConstants::cacheLineSize); // bbEnd
@ -444,9 +407,7 @@ TEST_F(DrmCommandStreamTest, FlushCheckFlags) {
EXPECT_CALL(*mock, ioctl(DRM_IOCTL_I915_GEM_WAIT, ::testing::_))
.WillRepeatedly(::testing::Return(0));
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 128);
ASSERT_NE(nullptr, commandBuffer);
LinearStream cs(commandBuffer);
auto &cs = csr->getCS();
EXPECT_CALL(*mock, ioctl(
DRM_IOCTL_I915_GEM_EXECBUFFER2,
@ -469,13 +430,12 @@ TEST_F(DrmCommandStreamTest, CheckDrmFree) {
.Times(1)
.WillOnce(::testing::DoAll(UserptrSetHandle(17), ::testing::Return(0)));
auto *commandBuffer = mm->allocateGraphicsMemory(1024 + 4, 128);
ASSERT_NE(nullptr, commandBuffer);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
//make sure command buffer with offset is not page aligned
ASSERT_NE(0u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & (this->alignment - 1));
ASSERT_EQ(4u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & 0x7F);
LinearStream cs(commandBuffer);
auto expectedSize = alignUp(8u, MemoryConstants::cacheLineSize); // bbEnd
@ -511,13 +471,12 @@ TEST_F(DrmCommandStreamTest, CheckDrmFreeCloseFailed) {
.Times(1)
.WillOnce(::testing::DoAll(UserptrSetHandle(17), ::testing::Return(0)));
auto *commandBuffer = mm->allocateGraphicsMemory(1024 + 4, 128);
ASSERT_NE(nullptr, commandBuffer);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
//make sure command buffer with offset is not page aligned
ASSERT_NE(0u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & (this->alignment - 1));
ASSERT_EQ(4u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & 0x7F);
LinearStream cs(commandBuffer);
auto expectedSize = alignUp(8u, MemoryConstants::cacheLineSize); // bbEnd
@ -547,7 +506,7 @@ struct DrmCsrVfeTests : ::testing::Test {
using DrmCommandStreamReceiver<FamilyType>::mediaVfeStateLowPriorityDirty;
using CommandStreamReceiver::commandStream;
MyCsr() : DrmCommandStreamReceiver<FamilyType>(*platformDevices[0], nullptr, gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers) {}
MyCsr() : DrmCommandStreamReceiver<FamilyType>(*platformDevices[0], nullptr, gemCloseWorkerMode::gemCloseWorkerInactive) {}
FlushStamp flush(BatchBuffer &batchBuffer, EngineType engineType, ResidencyContainer *allocationsForResidency) override {
return (FlushStamp)0;
}
@ -749,7 +708,7 @@ class DrmCommandStreamEnhancedFixture
TestedDrmCommandStreamReceiver(Drm *drm, gemCloseWorkerMode mode) : DrmCommandStreamReceiver<GfxFamily>(*platformDevices[0], drm, mode) {
}
TestedDrmCommandStreamReceiver(Drm *drm) : DrmCommandStreamReceiver<GfxFamily>(*platformDevices[0], drm, gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers) {
TestedDrmCommandStreamReceiver(Drm *drm) : DrmCommandStreamReceiver<GfxFamily>(*platformDevices[0], drm, gemCloseWorkerMode::gemCloseWorkerInactive) {
}
void overrideGemCloseWorkerOperationMode(gemCloseWorkerMode overrideValue) {
@ -824,8 +783,8 @@ class DrmCommandStreamEnhancedFixture
};
typedef Test<DrmCommandStreamEnhancedFixture> DrmCommandStreamGemWorkerTests;
TEST_F(DrmCommandStreamGemWorkerTests, givenDefaultDrmCSRWhenItIsCreatedThenGemCloseWorkerModeIsConsumigCommandBuffer) {
EXPECT_EQ(gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers, tCsr->peekGemCloseWorkerOperationMode());
TEST_F(DrmCommandStreamGemWorkerTests, givenDefaultDrmCSRWhenItIsCreatedThenGemCloseWorkerModeIsInactive) {
EXPECT_EQ(gemCloseWorkerMode::gemCloseWorkerInactive, tCsr->peekGemCloseWorkerOperationMode());
}
TEST_F(DrmCommandStreamGemWorkerTests, givenCommandStreamWhenItIsFlushedWithGemCloseWorkerInactiveModeThenCsIsNotNulled) {
@ -1128,7 +1087,7 @@ TEST_F(DrmCommandStreamLeaksTest, makeResident) {
EXPECT_TRUE(isResident(buffer));
auto bo = getResident(buffer);
EXPECT_EQ(bo, buffer);
EXPECT_EQ(2u, bo->getRefCount());
EXPECT_EQ(1u, bo->getRefCount());
csr->makeNonResident(*allocation);
EXPECT_FALSE(isResident(buffer));
@ -1156,8 +1115,8 @@ TEST_F(DrmCommandStreamLeaksTest, makeResidentOnly) {
auto bo2 = getResident(buffer2);
EXPECT_EQ(bo1, buffer1);
EXPECT_EQ(bo2, buffer2);
EXPECT_EQ(2u, bo1->getRefCount());
EXPECT_EQ(2u, bo2->getRefCount());
EXPECT_EQ(1u, bo1->getRefCount());
EXPECT_EQ(1u, bo2->getRefCount());
// dont call makeNonResident on allocation2, any other makeNonResident call will clean this
// we want to keep all makeResident calls before flush and makeNonResident everyting after flush
@ -1177,7 +1136,7 @@ TEST_F(DrmCommandStreamLeaksTest, makeResidentTwice) {
EXPECT_TRUE(isResident(buffer));
auto bo1 = getResident(buffer);
EXPECT_EQ(buffer, bo1);
EXPECT_EQ(2u, bo1->getRefCount());
EXPECT_EQ(1u, bo1->getRefCount());
csr->getMemoryManager()->clearResidencyAllocations();
csr->makeResident(*allocation);
@ -1187,7 +1146,7 @@ TEST_F(DrmCommandStreamLeaksTest, makeResidentTwice) {
auto bo2 = getResident(buffer);
EXPECT_EQ(buffer, bo2);
EXPECT_EQ(bo1, bo2);
EXPECT_EQ(2u, bo1->getRefCount());
EXPECT_EQ(1u, bo1->getRefCount());
csr->makeNonResident(*allocation);
EXPECT_FALSE(isResident(buffer));
@ -1218,7 +1177,7 @@ TEST_F(DrmCommandStreamLeaksTest, makeResidentTwiceWhenFragmentStorage) {
EXPECT_TRUE(isResident(bo));
auto bo1 = getResident(bo);
ASSERT_EQ(bo, bo1);
EXPECT_EQ(2u, bo1->getRefCount()); // only 1 refCount incrementation
EXPECT_EQ(1u, bo1->getRefCount());
}
csr->makeNonResident(*allocation);
@ -1322,7 +1281,7 @@ TEST_F(DrmCommandStreamLeaksTest, GivenAllocationCreatedFromThreeFragmentsWhenMa
EXPECT_TRUE(isResident(bo));
auto bo1 = getResident(bo);
ASSERT_EQ(bo, bo1);
EXPECT_EQ(2u, bo1->getRefCount());
EXPECT_EQ(1u, bo1->getRefCount());
}
csr->makeNonResident(*allocation);
for (int i = 0; i < max_fragments_count; i++) {
@ -1360,7 +1319,7 @@ TEST_F(DrmCommandStreamLeaksTest, GivenAllocationsContainingDifferentCountOfFrag
EXPECT_TRUE(isResident(bo));
auto bo1 = getResident(bo);
ASSERT_EQ(bo, bo1);
EXPECT_EQ(2u, bo1->getRefCount());
EXPECT_EQ(1u, bo1->getRefCount());
}
csr->makeNonResident(*allocation);
for (unsigned int i = 0; i < reqs.requiredFragmentsCount; i++) {
@ -1392,7 +1351,7 @@ TEST_F(DrmCommandStreamLeaksTest, GivenAllocationsContainingDifferentCountOfFrag
EXPECT_TRUE(isResident(bo));
auto bo1 = getResident(bo);
ASSERT_EQ(bo, bo1);
EXPECT_EQ(2u, bo1->getRefCount());
EXPECT_EQ(1u, bo1->getRefCount());
}
csr->makeNonResident(*allocation2);
for (unsigned int i = 0; i < reqs.requiredFragmentsCount; i++) {
@ -1421,25 +1380,19 @@ TEST_F(DrmCommandStreamLeaksTest, makeResidentSizeZero) {
}
TEST_F(DrmCommandStreamLeaksTest, Flush) {
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
csr->addBatchBufferEnd(cs, nullptr);
csr->alignToCacheLine(cs);
BatchBuffer batchBuffer{cs.getGraphicsAllocation(), 0, 0, nullptr, false, false, QueueThrottle::MEDIUM, cs.getUsed(), &cs};
csr->flush(batchBuffer, EngineType::ENGINE_RCS, nullptr);
EXPECT_EQ(cs.getCpuBase(), nullptr);
EXPECT_EQ(cs.getGraphicsAllocation(), nullptr);
EXPECT_NE(cs.getCpuBase(), nullptr);
EXPECT_NE(cs.getGraphicsAllocation(), nullptr);
}
TEST_F(DrmCommandStreamLeaksTest, ClearResidencyWhenFlushNotCalled) {
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
auto allocation1 = mm->allocateGraphicsMemory(1024, 4096);
auto allocation2 = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, allocation1);
@ -1454,8 +1407,8 @@ TEST_F(DrmCommandStreamLeaksTest, ClearResidencyWhenFlushNotCalled) {
EXPECT_TRUE(isResident(allocation2->getBO()));
EXPECT_EQ(tCsr->getResidencyVector()->size(), 2u);
EXPECT_EQ(allocation1->getBO()->getRefCount(), 2u);
EXPECT_EQ(allocation2->getBO()->getRefCount(), 2u);
EXPECT_EQ(allocation1->getBO()->getRefCount(), 1u);
EXPECT_EQ(allocation2->getBO()->getRefCount(), 1u);
// makeNonResident without flush
csr->makeNonResident(*allocation1);
@ -1467,70 +1420,19 @@ TEST_F(DrmCommandStreamLeaksTest, ClearResidencyWhenFlushNotCalled) {
EXPECT_EQ(allocation1->getBO()->getRefCount(), 1u);
EXPECT_EQ(allocation2->getBO()->getRefCount(), 1u);
mm->freeGraphicsMemory(allocation1);
mm->freeGraphicsMemory(allocation2);
mm->freeGraphicsMemory(commandBuffer);
}
TEST_F(DrmCommandStreamLeaksTest, ClearResidencyWhenFlushCalled) {
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
auto allocation1 = mm->allocateGraphicsMemory(1024, 4096);
auto allocation2 = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, allocation1);
ASSERT_NE(nullptr, allocation2);
csr->makeResident(*allocation1);
csr->makeResident(*allocation2);
csr->addBatchBufferEnd(cs, nullptr);
csr->alignToCacheLine(cs);
EXPECT_EQ(0u, tCsr->getResidencyVector()->size());
EXPECT_FALSE(isResident(allocation1->getBO()));
EXPECT_FALSE(isResident(allocation2->getBO()));
BatchBuffer batchBuffer{cs.getGraphicsAllocation(), 0, 0, nullptr, false, false, QueueThrottle::MEDIUM, cs.getUsed(), &cs};
csr->flush(batchBuffer, EngineType::ENGINE_RCS, nullptr);
EXPECT_EQ(cs.getCpuBase(), nullptr);
EXPECT_EQ(cs.getGraphicsAllocation(), nullptr);
EXPECT_EQ(tCsr->getResidencyVector()->size(), 0u);
// wait for async thread to finish
while (allocation1->getBO()->getRefCount() > 1 ||
allocation2->getBO()->getRefCount() > 1)
;
csr->makeNonResident(*allocation1);
csr->makeNonResident(*allocation2);
EXPECT_FALSE(allocation1->isResident());
EXPECT_FALSE(allocation2->isResident());
EXPECT_EQ(allocation1->getBO()->getRefCount(), 1u);
EXPECT_EQ(allocation2->getBO()->getRefCount(), 1u);
mm->freeGraphicsMemory(allocation1);
mm->freeGraphicsMemory(allocation2);
}
TEST_F(DrmCommandStreamLeaksTest, FlushMultipleTimes) {
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
csr->addBatchBufferEnd(cs, nullptr);
csr->alignToCacheLine(cs);
BatchBuffer batchBuffer{cs.getGraphicsAllocation(), 0, 0, nullptr, false, false, QueueThrottle::MEDIUM, cs.getUsed(), &cs};
csr->flush(batchBuffer, EngineType::ENGINE_RCS, nullptr);
commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
cs.replaceBuffer(commandBuffer->getUnderlyingBuffer(), commandBuffer->getUnderlyingBufferSize());
cs.replaceGraphicsAllocation(commandBuffer);
csr->addBatchBufferEnd(cs, nullptr);
@ -1546,6 +1448,8 @@ TEST_F(DrmCommandStreamLeaksTest, FlushMultipleTimes) {
csr->makeResident(*allocation);
csr->makeResident(*allocation2);
mm->storeAllocation(std::unique_ptr<GraphicsAllocation>(commandBuffer), REUSABLE_ALLOCATION);
commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
cs.replaceBuffer(commandBuffer->getUnderlyingBuffer(), commandBuffer->getUnderlyingBufferSize());
@ -1558,6 +1462,7 @@ TEST_F(DrmCommandStreamLeaksTest, FlushMultipleTimes) {
mm->freeGraphicsMemory(allocation);
mm->freeGraphicsMemory(allocation2);
mm->storeAllocation(std::unique_ptr<GraphicsAllocation>(commandBuffer), REUSABLE_ALLOCATION);
commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
cs.replaceBuffer(commandBuffer->getUnderlyingBuffer(), commandBuffer->getUnderlyingBufferSize());
@ -1571,10 +1476,8 @@ TEST_F(DrmCommandStreamLeaksTest, FlushMultipleTimes) {
TEST_F(DrmCommandStreamLeaksTest, FlushNotEmptyBB) {
int bbUsed = 16 * sizeof(uint32_t);
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
auto &cs = csr->getCS();
cs.getSpace(bbUsed);
csr->addBatchBufferEnd(cs, nullptr);
@ -1586,10 +1489,8 @@ TEST_F(DrmCommandStreamLeaksTest, FlushNotEmptyBB) {
TEST_F(DrmCommandStreamLeaksTest, FlushNotEmptyNotPaddedBB) {
int bbUsed = 15 * sizeof(uint32_t);
auto *commandBuffer = mm->allocateGraphicsMemory(1024, 4096);
ASSERT_NE(nullptr, commandBuffer);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) & 0xFFF);
LinearStream cs(commandBuffer);
auto &cs = csr->getCS();
cs.getSpace(bbUsed);
csr->addBatchBufferEnd(cs, nullptr);
@ -1599,13 +1500,12 @@ TEST_F(DrmCommandStreamLeaksTest, FlushNotEmptyNotPaddedBB) {
}
TEST_F(DrmCommandStreamLeaksTest, FlushNotAligned) {
auto *commandBuffer = mm->allocateGraphicsMemory(1024 + 4, 128);
ASSERT_NE(nullptr, commandBuffer);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
//make sure command buffer with offset is not page aligned
ASSERT_NE(0u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & 0xFFF);
ASSERT_EQ(4u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & 0x7F);
LinearStream cs(commandBuffer);
csr->addBatchBufferEnd(cs, nullptr);
csr->alignToCacheLine(cs);
@ -1614,13 +1514,12 @@ TEST_F(DrmCommandStreamLeaksTest, FlushNotAligned) {
}
TEST_F(DrmCommandStreamLeaksTest, CheckDrmFree) {
auto *commandBuffer = mm->allocateGraphicsMemory(1024 + 4, 128);
ASSERT_NE(nullptr, commandBuffer);
auto &cs = csr->getCS();
auto commandBuffer = static_cast<DrmAllocation *>(cs.getGraphicsAllocation());
//make sure command buffer with offset is not page aligned
ASSERT_NE(0u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & 0xFFF);
ASSERT_EQ(4u, (reinterpret_cast<uintptr_t>(commandBuffer->getUnderlyingBuffer()) + 4) & 0x7F);
LinearStream cs(commandBuffer);
auto allocation = mm->allocateGraphicsMemory(1024, 128);

View File

@ -79,7 +79,7 @@ class DrmGemCloseWorkerFixture {
this->drmMock->gem_close_cnt = 0;
this->drmMock->gem_close_expected = 0;
this->mm = new DrmMemoryManager(this->drmMock, gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers, false, false);
this->mm = new DrmMemoryManager(this->drmMock, gemCloseWorkerMode::gemCloseWorkerInactive, false, false);
}
void TearDown() {
@ -175,3 +175,28 @@ TEST_F(DrmGemCloseWorkerTests, givenAllocationWhenAskedForUnreferenceWithForceFl
delete worker;
}
TEST_F(DrmGemCloseWorkerTests, givenDrmGemCloseWorkerWhenCloseIsCalledWithBlockingFlagThenThreadIsClosed) {
struct mockDrmGemCloseWorker : DrmGemCloseWorker {
using DrmGemCloseWorker::DrmGemCloseWorker;
using DrmGemCloseWorker::thread;
};
std::unique_ptr<mockDrmGemCloseWorker> worker(new mockDrmGemCloseWorker(*mm));
EXPECT_NE(nullptr, worker->thread);
worker->close(true);
EXPECT_EQ(nullptr, worker->thread);
}
TEST_F(DrmGemCloseWorkerTests, givenDrmGemCloseWorkerWhenCloseIsCalledMultipleTimeWithBlockingFlagThenThreadIsClosed) {
struct mockDrmGemCloseWorker : DrmGemCloseWorker {
using DrmGemCloseWorker::DrmGemCloseWorker;
using DrmGemCloseWorker::thread;
};
std::unique_ptr<mockDrmGemCloseWorker> worker(new mockDrmGemCloseWorker(*mm));
worker->close(true);
worker->close(true);
worker->close(true);
EXPECT_EQ(nullptr, worker->thread);
}

View File

@ -82,7 +82,7 @@ class TestedDrmMemoryManager : public DrmMemoryManager {
using DrmMemoryManager::allocUserptr;
using DrmMemoryManager::setDomainCpu;
TestedDrmMemoryManager(Drm *drm) : DrmMemoryManager(drm, gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers, false, false) {
TestedDrmMemoryManager(Drm *drm) : DrmMemoryManager(drm, gemCloseWorkerMode::gemCloseWorkerInactive, false, false) {
this->lseekFunction = &lseekMock;
this->mmapFunction = &mmapMock;
this->munmapFunction = &munmapMock;
@ -92,7 +92,7 @@ class TestedDrmMemoryManager : public DrmMemoryManager {
mmapMockCallCount = 0;
munmapMockCallCount = 0;
};
TestedDrmMemoryManager(Drm *drm, bool allowForcePin, bool validateHostPtrMemory) : DrmMemoryManager(drm, gemCloseWorkerMode::gemCloseWorkerConsumingCommandBuffers, allowForcePin, validateHostPtrMemory) {
TestedDrmMemoryManager(Drm *drm, bool allowForcePin, bool validateHostPtrMemory) : DrmMemoryManager(drm, gemCloseWorkerMode::gemCloseWorkerInactive, allowForcePin, validateHostPtrMemory) {
this->lseekFunction = &lseekMock;
this->mmapFunction = &mmapMock;
this->munmapFunction = &munmapMock;
@ -131,7 +131,9 @@ class DrmMemoryManagerFixture : public MemoryManagementFixture {
memoryManager = new (std::nothrow) TestedDrmMemoryManager(this->mock);
//assert we have memory manager
ASSERT_NE(nullptr, memoryManager);
memoryManager->getgemCloseWorker()->close(true);
if (memoryManager->getgemCloseWorker()) {
memoryManager->getgemCloseWorker()->close(true);
}
}
void TearDown() override {
@ -158,7 +160,9 @@ class DrmMemoryManagerFixtureWithoutQuietIoctlExpectation : public MemoryManagem
this->mock = new DrmMockCustom;
memoryManager = new (std::nothrow) TestedDrmMemoryManager(this->mock);
ASSERT_NE(nullptr, memoryManager);
memoryManager->getgemCloseWorker()->close(true);
if (memoryManager->getgemCloseWorker()) {
memoryManager->getgemCloseWorker()->close(true);
}
}
void TearDown() override {
@ -381,6 +385,17 @@ TEST_F(DrmMemoryManagerWithExplicitExpectationsTest, givenDrmMemoryManagerCreate
EXPECT_EQ(nullptr, drmMemoryManger.getgemCloseWorker());
}
TEST_F(DrmMemoryManagerWithExplicitExpectationsTest, givenDrmMemoryManagerCreatedWithGemCloseWorkerActiveThenGemCloseWorkerIsCreated) {
class MyTestedDrmMemoryManager : public DrmMemoryManager {
public:
MyTestedDrmMemoryManager(Drm *drm, gemCloseWorkerMode mode) : DrmMemoryManager(drm, mode, false, false) {}
DrmGemCloseWorker *getgemCloseWorker() { return this->gemCloseWorker.get(); }
};
MyTestedDrmMemoryManager drmMemoryManger(this->mock, gemCloseWorkerMode::gemCloseWorkerActive);
EXPECT_NE(nullptr, drmMemoryManger.getgemCloseWorker());
}
TEST_F(DrmMemoryManagerTest, AllocateThenFree) {
mock->ioctl_expected.gemUserptr = 1;
mock->ioctl_expected.gemWait = 1;
@ -2332,19 +2347,6 @@ TEST(DrmMemoryManager, givenDefaultDrmMemoryManagerWhenItIsQueriedForInternalHea
EXPECT_EQ(heapBase, memoryManager->getInternalHeapBaseAddress());
}
TEST(DrmMemoryManager, givenEnabledGemCloseWorkerWhenWaitForDeletionsIsCalledThenGemCloseWorkerIsEmpty) {
TestedDrmMemoryManager memoryManager(Drm::get(0));
auto gemCloseWorker = memoryManager.getgemCloseWorker();
EXPECT_TRUE(gemCloseWorker->isEmpty());
auto allocation = memoryManager.allocateGraphicsMemory(1024, 4096, false, false);
memoryManager.push(allocation);
memoryManager.waitForDeletions();
EXPECT_TRUE(gemCloseWorker->isEmpty());
}
TEST(DrmMemoryManager, givenMemoryManagerWithEnabledHostMemoryValidationWhenFeatureIsQueriedThenTrueIsReturned) {
std::unique_ptr<TestedDrmMemoryManager> memoryManager(new (std::nothrow) TestedDrmMemoryManager(Drm::get(0), false, true));
ASSERT_NE(nullptr, memoryManager.get());