mirror of
https://github.com/intel/compute-runtime.git
synced 2025-09-15 13:01:45 +08:00
Remove GMock from GMockDrmMemoryManager, MockOSMemoryLinux, MyCsr...
Removed Gmock from: - GMockDrmMemoryManager - MockOSMemoryLinux - MyCsr - GmockGfxPartition Renamed: - GMockDrmMemoryManager -> MockTestedDrmMemoryManager Moved class body: - GmockGfxPartition -> MockGfxPartition - MockTestedDrmMemoryManager -> TestedDrmMemoryManager Related-To: NEO-4914 Signed-off-by: Fabian Zwolinski <fabian.zwolinski@intel.com>
This commit is contained in:

committed by
Compute-Runtime-Automation

parent
e51f8ceb0d
commit
4e31612c31
@ -29,7 +29,24 @@ class MyCsr : public UltCommandStreamReceiver<Family> {
|
|||||||
public:
|
public:
|
||||||
MyCsr(const ExecutionEnvironment &executionEnvironment, const DeviceBitfield deviceBitfield)
|
MyCsr(const ExecutionEnvironment &executionEnvironment, const DeviceBitfield deviceBitfield)
|
||||||
: UltCommandStreamReceiver<Family>(const_cast<ExecutionEnvironment &>(executionEnvironment), 0, deviceBitfield) {}
|
: UltCommandStreamReceiver<Family>(const_cast<ExecutionEnvironment &>(executionEnvironment), 0, deviceBitfield) {}
|
||||||
MOCK_METHOD3(waitForCompletionWithTimeout, WaitStatus(bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait));
|
|
||||||
|
WaitStatus waitForCompletionWithTimeout(bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) override {
|
||||||
|
waitForCompletionWithTimeoutCalled++;
|
||||||
|
waitForCompletionWithTimeoutParamsPassed.push_back({enableTimeout, timeoutMs, taskCountToWait});
|
||||||
|
*this->getTagAddress() = getTagAddressValue;
|
||||||
|
return waitForCompletionWithTimeoutResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WaitForCompletionWithTimeoutParams {
|
||||||
|
bool enableTimeout;
|
||||||
|
int64_t timeoutMs;
|
||||||
|
uint32_t taskCountToWait;
|
||||||
|
};
|
||||||
|
|
||||||
|
uint32_t waitForCompletionWithTimeoutCalled = 0u;
|
||||||
|
WaitStatus waitForCompletionWithTimeoutResult = NEO::WaitStatus::Ready;
|
||||||
|
StackVec<WaitForCompletionWithTimeoutParams, 2> waitForCompletionWithTimeoutParamsPassed{};
|
||||||
|
uint32_t getTagAddressValue{};
|
||||||
};
|
};
|
||||||
|
|
||||||
void CL_CALLBACK emptyDestructorCallback(cl_mem memObj, void *userData) {
|
void CL_CALLBACK emptyDestructorCallback(cl_mem memObj, void *userData) {
|
||||||
@ -141,46 +158,48 @@ HWTEST_P(MemObjAsyncDestructionTest, givenUsedMemObjWithAsyncDestructionsEnabled
|
|||||||
|
|
||||||
auto rootDeviceIndex = device->getRootDeviceIndex();
|
auto rootDeviceIndex = device->getRootDeviceIndex();
|
||||||
|
|
||||||
auto mockCsr0 = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr0 = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
auto mockCsr1 = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr1 = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr0, 0);
|
device->resetCommandStreamReceiver(mockCsr0, 0);
|
||||||
device->resetCommandStreamReceiver(mockCsr1, 1);
|
device->resetCommandStreamReceiver(mockCsr1, 1);
|
||||||
*mockCsr0->getTagAddress() = 0;
|
*mockCsr0->getTagAddress() = 0;
|
||||||
*mockCsr1->getTagAddress() = 0;
|
*mockCsr1->getTagAddress() = 0;
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock0 = [&mockCsr0](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) -> NEO::WaitStatus {
|
mockCsr0->getTagAddressValue = taskCountReady;
|
||||||
*mockCsr0->getTagAddress() = taskCountReady;
|
mockCsr1->getTagAddressValue = taskCountReady;
|
||||||
return NEO::WaitStatus::Ready;
|
|
||||||
};
|
|
||||||
auto waitForCompletionWithTimeoutMock1 = [&mockCsr1](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) -> NEO::WaitStatus {
|
|
||||||
*mockCsr1->getTagAddress() = taskCountReady;
|
|
||||||
return NEO::WaitStatus::Ready;
|
|
||||||
};
|
|
||||||
auto osContextId0 = mockCsr0->getOsContext().getContextId();
|
auto osContextId0 = mockCsr0->getOsContext().getContextId();
|
||||||
auto osContextId1 = mockCsr1->getOsContext().getContextId();
|
auto osContextId1 = mockCsr1->getOsContext().getContextId();
|
||||||
|
|
||||||
memObj->getGraphicsAllocation(rootDeviceIndex)->updateTaskCount(taskCountReady, osContextId0);
|
memObj->getGraphicsAllocation(rootDeviceIndex)->updateTaskCount(taskCountReady, osContextId0);
|
||||||
memObj->getGraphicsAllocation(rootDeviceIndex)->updateTaskCount(taskCountReady, osContextId1);
|
memObj->getGraphicsAllocation(rootDeviceIndex)->updateTaskCount(taskCountReady, osContextId1);
|
||||||
|
|
||||||
ON_CALL(*mockCsr0, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
uint32_t expectedTaskCount0{};
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock0));
|
uint32_t expectedTaskCount1{};
|
||||||
ON_CALL(*mockCsr1, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock1));
|
|
||||||
|
|
||||||
if (hasCallbacks) {
|
if (hasCallbacks) {
|
||||||
EXPECT_CALL(*mockCsr0, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, allocation->getTaskCount(osContextId0)))
|
expectedTaskCount0 = allocation->getTaskCount(osContextId0);
|
||||||
.Times(1);
|
expectedTaskCount1 = allocation->getTaskCount(osContextId1);
|
||||||
EXPECT_CALL(*mockCsr1, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, allocation->getTaskCount(osContextId1)))
|
|
||||||
.Times(1);
|
|
||||||
} else {
|
} else {
|
||||||
*mockCsr0->getTagAddress() = taskCountReady;
|
*mockCsr0->getTagAddress() = taskCountReady;
|
||||||
*mockCsr1->getTagAddress() = taskCountReady;
|
*mockCsr1->getTagAddress() = taskCountReady;
|
||||||
EXPECT_CALL(*mockCsr0, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.Times(0);
|
|
||||||
EXPECT_CALL(*mockCsr1, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.Times(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete memObj;
|
delete memObj;
|
||||||
|
|
||||||
|
if (hasCallbacks) {
|
||||||
|
EXPECT_EQ(1u, mockCsr0->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr0->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(expectedTaskCount0, mockCsr0->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
|
|
||||||
|
EXPECT_EQ(1u, mockCsr1->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr1->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(expectedTaskCount1, mockCsr1->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
|
} else {
|
||||||
|
EXPECT_EQ(0u, mockCsr0->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(0u, mockCsr1->waitForCompletionWithTimeoutCalled);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HWTEST_P(MemObjAsyncDestructionTest, givenUsedMemObjWithAsyncDestructionsEnabledThatHasAllocatedMappedPtrWhenItIsDestroyedThenDestructorWaitsOnTaskCount) {
|
HWTEST_P(MemObjAsyncDestructionTest, givenUsedMemObjWithAsyncDestructionsEnabledThatHasAllocatedMappedPtrWhenItIsDestroyedThenDestructorWaitsOnTaskCount) {
|
||||||
@ -193,26 +212,26 @@ HWTEST_P(MemObjAsyncDestructionTest, givenUsedMemObjWithAsyncDestructionsEnabled
|
|||||||
memObj->setAllocatedMapPtr(allocatedPtr);
|
memObj->setAllocatedMapPtr(allocatedPtr);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
auto osContextId = mockCsr->getOsContext().getContextId();
|
auto osContextId = mockCsr->getOsContext().getContextId();
|
||||||
|
|
||||||
auto desired = NEO::WaitStatus::Ready;
|
uint32_t expectedTaskCount{};
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock = [=](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) { return desired; };
|
|
||||||
|
|
||||||
ON_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock));
|
|
||||||
|
|
||||||
if (hasAllocatedMappedPtr) {
|
if (hasAllocatedMappedPtr) {
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, allocation->getTaskCount(osContextId)))
|
expectedTaskCount = allocation->getTaskCount(osContextId);
|
||||||
.Times(1);
|
|
||||||
} else {
|
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.Times(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete memObj;
|
delete memObj;
|
||||||
|
|
||||||
|
if (hasAllocatedMappedPtr) {
|
||||||
|
EXPECT_EQ(1u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(expectedTaskCount, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
|
} else {
|
||||||
|
EXPECT_EQ(0u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HWTEST_P(MemObjAsyncDestructionTest, givenUsedMemObjWithAsyncDestructionsEnabledThatHasDestructableMappedPtrWhenItIsDestroyedThenDestructorWaitsOnTaskCount) {
|
HWTEST_P(MemObjAsyncDestructionTest, givenUsedMemObjWithAsyncDestructionsEnabledThatHasDestructableMappedPtrWhenItIsDestroyedThenDestructorWaitsOnTaskCount) {
|
||||||
@ -236,30 +255,31 @@ HWTEST_P(MemObjAsyncDestructionTest, givenUsedMemObjWithAsyncDestructionsEnabled
|
|||||||
}
|
}
|
||||||
|
|
||||||
makeMemObjUsed();
|
makeMemObjUsed();
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
|
|
||||||
auto desired = NEO::WaitStatus::Ready;
|
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock = [=](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) { return desired; };
|
|
||||||
auto osContextId = mockCsr->getOsContext().getContextId();
|
auto osContextId = mockCsr->getOsContext().getContextId();
|
||||||
|
|
||||||
ON_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
uint32_t expectedTaskCount{};
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock));
|
|
||||||
|
|
||||||
if (hasAllocatedMappedPtr) {
|
if (hasAllocatedMappedPtr) {
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, allocation->getTaskCount(osContextId)))
|
expectedTaskCount = allocation->getTaskCount(osContextId);
|
||||||
.Times(1);
|
|
||||||
} else {
|
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.Times(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete memObj;
|
delete memObj;
|
||||||
|
|
||||||
if (!hasAllocatedMappedPtr) {
|
if (!hasAllocatedMappedPtr) {
|
||||||
alignedFree(storage);
|
alignedFree(storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hasAllocatedMappedPtr) {
|
||||||
|
EXPECT_EQ(1u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(expectedTaskCount, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
|
} else {
|
||||||
|
EXPECT_EQ(0u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithDestructableAllocationWhenAsyncDestructionsAreDisabledThenDestructorWaitsOnTaskCount) {
|
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithDestructableAllocationWhenAsyncDestructionsAreDisabledThenDestructorWaitsOnTaskCount) {
|
||||||
@ -271,22 +291,19 @@ HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithDestructableAllocationWhenAsy
|
|||||||
} else {
|
} else {
|
||||||
makeMemObjNotReady();
|
makeMemObjNotReady();
|
||||||
}
|
}
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
|
|
||||||
auto desired = NEO::WaitStatus::Ready;
|
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock = [=](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) { return desired; };
|
|
||||||
auto osContextId = mockCsr->getOsContext().getContextId();
|
auto osContextId = mockCsr->getOsContext().getContextId();
|
||||||
|
|
||||||
ON_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
uint32_t expectedTaskCount = allocation->getTaskCount(osContextId);
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock));
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, allocation->getTaskCount(osContextId)))
|
|
||||||
.Times(1);
|
|
||||||
|
|
||||||
delete memObj;
|
delete memObj;
|
||||||
|
|
||||||
|
EXPECT_EQ(1u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(expectedTaskCount, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
}
|
}
|
||||||
|
|
||||||
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithDestructableAllocationWhenAsyncDestructionsAreDisabledThenAllocationIsNotDeferred) {
|
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithDestructableAllocationWhenAsyncDestructionsAreDisabledThenAllocationIsNotDeferred) {
|
||||||
@ -298,17 +315,10 @@ HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithDestructableAllocationWhenAsy
|
|||||||
} else {
|
} else {
|
||||||
makeMemObjNotReady();
|
makeMemObjNotReady();
|
||||||
}
|
}
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
|
|
||||||
auto desired = NEO::WaitStatus::Ready;
|
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock = [=](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) { return desired; };
|
|
||||||
|
|
||||||
ON_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock));
|
|
||||||
|
|
||||||
delete memObj;
|
delete memObj;
|
||||||
auto &allocationList = mockCsr->getTemporaryAllocations();
|
auto &allocationList = mockCsr->getTemporaryAllocations();
|
||||||
EXPECT_TRUE(allocationList.peekIsEmpty());
|
EXPECT_TRUE(allocationList.peekIsEmpty());
|
||||||
@ -317,7 +327,7 @@ HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithDestructableAllocationWhenAsy
|
|||||||
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithMapAllocationWhenAsyncDestructionsAreDisabledThenWaitForCompletionWithTimeoutOnMapAllocation) {
|
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithMapAllocationWhenAsyncDestructionsAreDisabledThenWaitForCompletionWithTimeoutOnMapAllocation) {
|
||||||
auto isMapAllocationUsed = GetParam();
|
auto isMapAllocationUsed = GetParam();
|
||||||
|
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
|
|
||||||
@ -335,27 +345,29 @@ HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithMapAllocationWhenAsyncDestruc
|
|||||||
memObj->getMapAllocation(device->getRootDeviceIndex())->updateTaskCount(taskCountReady, contextId);
|
memObj->getMapAllocation(device->getRootDeviceIndex())->updateTaskCount(taskCountReady, contextId);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock = [=](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) { return NEO::WaitStatus::Ready; };
|
|
||||||
auto osContextId = mockCsr->getOsContext().getContextId();
|
auto osContextId = mockCsr->getOsContext().getContextId();
|
||||||
|
|
||||||
ON_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
uint32_t expectedTaskCount{};
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock));
|
|
||||||
|
|
||||||
if (isMapAllocationUsed) {
|
if (isMapAllocationUsed) {
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, mapAllocation->getTaskCount(osContextId)))
|
expectedTaskCount = mapAllocation->getTaskCount(osContextId);
|
||||||
.Times(1);
|
|
||||||
} else {
|
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.Times(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete memObj;
|
delete memObj;
|
||||||
|
|
||||||
|
if (isMapAllocationUsed) {
|
||||||
|
EXPECT_EQ(1u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(expectedTaskCount, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
|
} else {
|
||||||
|
EXPECT_EQ(0u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithMapAllocationWhenAsyncDestructionsAreDisabledThenMapAllocationIsNotDeferred) {
|
HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithMapAllocationWhenAsyncDestructionsAreDisabledThenMapAllocationIsNotDeferred) {
|
||||||
auto hasMapAllocation = GetParam();
|
auto hasMapAllocation = GetParam();
|
||||||
|
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
|
|
||||||
@ -386,7 +398,7 @@ HWTEST_P(MemObjSyncDestructionTest, givenMemObjWithMapAllocationWhenAsyncDestruc
|
|||||||
HWTEST_P(MemObjAsyncDestructionTest, givenMemObjWithMapAllocationWithoutMemUseHostPtrFlagWhenAsyncDestructionsAreEnabledThenMapAllocationIsDeferred) {
|
HWTEST_P(MemObjAsyncDestructionTest, givenMemObjWithMapAllocationWithoutMemUseHostPtrFlagWhenAsyncDestructionsAreEnabledThenMapAllocationIsDeferred) {
|
||||||
auto hasMapAllocation = GetParam();
|
auto hasMapAllocation = GetParam();
|
||||||
|
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
|
|
||||||
@ -425,7 +437,7 @@ HWTEST_P(MemObjAsyncDestructionTest, givenMemObjWithMapAllocationWithoutMemUseHo
|
|||||||
HWTEST_P(MemObjAsyncDestructionTest, givenMemObjWithMapAllocationWithMemUseHostPtrFlagWhenAsyncDestructionsAreEnabledThenMapAllocationIsNotDeferred) {
|
HWTEST_P(MemObjAsyncDestructionTest, givenMemObjWithMapAllocationWithMemUseHostPtrFlagWhenAsyncDestructionsAreEnabledThenMapAllocationIsNotDeferred) {
|
||||||
auto hasMapAllocation = GetParam();
|
auto hasMapAllocation = GetParam();
|
||||||
|
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*device->executionEnvironment, device->getDeviceBitfield());
|
auto mockCsr = new MyCsr<FamilyType>(*device->executionEnvironment, device->getDeviceBitfield());
|
||||||
device->resetCommandStreamReceiver(mockCsr);
|
device->resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 0;
|
*mockCsr->getTagAddress() = 0;
|
||||||
|
|
||||||
@ -486,7 +498,7 @@ HWTEST_F(UsmDestructionTests, givenSharedUsmAllocationWhenBlockingFreeIsCalledTh
|
|||||||
GTEST_SKIP();
|
GTEST_SKIP();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*mockDevice.executionEnvironment, 1);
|
auto mockCsr = new MyCsr<FamilyType>(*mockDevice.executionEnvironment, 1);
|
||||||
mockDevice.resetCommandStreamReceiver(mockCsr);
|
mockDevice.resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 5u;
|
*mockCsr->getTagAddress() = 5u;
|
||||||
|
|
||||||
@ -498,15 +510,16 @@ HWTEST_F(UsmDestructionTests, givenSharedUsmAllocationWhenBlockingFreeIsCalledTh
|
|||||||
|
|
||||||
auto svmEntry = svmAllocationsManager->getSVMAlloc(sharedMemory);
|
auto svmEntry = svmAllocationsManager->getSVMAlloc(sharedMemory);
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock = [=](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) { return NEO::WaitStatus::Ready; };
|
|
||||||
ON_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock));
|
|
||||||
svmEntry->gpuAllocations.getGraphicsAllocation(mockDevice.getRootDeviceIndex())->updateTaskCount(6u, 0u);
|
svmEntry->gpuAllocations.getGraphicsAllocation(mockDevice.getRootDeviceIndex())->updateTaskCount(6u, 0u);
|
||||||
svmEntry->cpuAllocation->updateTaskCount(6u, 0u);
|
svmEntry->cpuAllocation->updateTaskCount(6u, 0u);
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, 6u))
|
|
||||||
.Times(2);
|
|
||||||
|
|
||||||
clMemBlockingFreeINTEL(&mockContext, sharedMemory);
|
clMemBlockingFreeINTEL(&mockContext, sharedMemory);
|
||||||
|
|
||||||
|
EXPECT_EQ(2u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr->waitForCompletionWithTimeoutParamsPassed[1].timeoutMs);
|
||||||
|
EXPECT_EQ(6u, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
|
EXPECT_EQ(6u, mockCsr->waitForCompletionWithTimeoutParamsPassed[1].taskCountToWait);
|
||||||
}
|
}
|
||||||
|
|
||||||
HWTEST_F(UsmDestructionTests, givenUsmAllocationWhenBlockingFreeIsCalledThenWaitForCompletionIsCalled) {
|
HWTEST_F(UsmDestructionTests, givenUsmAllocationWhenBlockingFreeIsCalledThenWaitForCompletionIsCalled) {
|
||||||
@ -519,7 +532,7 @@ HWTEST_F(UsmDestructionTests, givenUsmAllocationWhenBlockingFreeIsCalledThenWait
|
|||||||
GTEST_SKIP();
|
GTEST_SKIP();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto mockCsr = new ::testing::NiceMock<MyCsr<FamilyType>>(*mockDevice.executionEnvironment, 1);
|
auto mockCsr = new MyCsr<FamilyType>(*mockDevice.executionEnvironment, 1);
|
||||||
mockDevice.resetCommandStreamReceiver(mockCsr);
|
mockDevice.resetCommandStreamReceiver(mockCsr);
|
||||||
*mockCsr->getTagAddress() = 5u;
|
*mockCsr->getTagAddress() = 5u;
|
||||||
|
|
||||||
@ -531,12 +544,11 @@ HWTEST_F(UsmDestructionTests, givenUsmAllocationWhenBlockingFreeIsCalledThenWait
|
|||||||
|
|
||||||
auto svmEntry = svmAllocationsManager->getSVMAlloc(hostMemory);
|
auto svmEntry = svmAllocationsManager->getSVMAlloc(hostMemory);
|
||||||
|
|
||||||
auto waitForCompletionWithTimeoutMock = [=](bool enableTimeout, int64_t timeoutMs, uint32_t taskCountToWait) { return NEO::WaitStatus::Ready; };
|
|
||||||
ON_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, ::testing::_, ::testing::_))
|
|
||||||
.WillByDefault(::testing::Invoke(waitForCompletionWithTimeoutMock));
|
|
||||||
svmEntry->gpuAllocations.getGraphicsAllocation(mockDevice.getRootDeviceIndex())->updateTaskCount(6u, 0u);
|
svmEntry->gpuAllocations.getGraphicsAllocation(mockDevice.getRootDeviceIndex())->updateTaskCount(6u, 0u);
|
||||||
EXPECT_CALL(*mockCsr, waitForCompletionWithTimeout(::testing::_, TimeoutControls::maxTimeout, 6u))
|
|
||||||
.Times(1);
|
|
||||||
|
|
||||||
clMemBlockingFreeINTEL(&mockContext, hostMemory);
|
clMemBlockingFreeINTEL(&mockContext, hostMemory);
|
||||||
|
|
||||||
|
EXPECT_EQ(1u, mockCsr->waitForCompletionWithTimeoutCalled);
|
||||||
|
EXPECT_EQ(TimeoutControls::maxTimeout, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].timeoutMs);
|
||||||
|
EXPECT_EQ(6u, mockCsr->waitForCompletionWithTimeoutParamsPassed[0].taskCountToWait);
|
||||||
}
|
}
|
||||||
|
@ -4068,15 +4068,13 @@ TEST_F(DrmMemoryManagerTest, givenSvmCpuAllocationWhenSizeAndAlignmentProvidedBu
|
|||||||
TEST_F(DrmMemoryManagerTest, givenDrmMemoryManagerAndReleaseGpuRangeIsCalledThenGpuAddressIsDecanonized) {
|
TEST_F(DrmMemoryManagerTest, givenDrmMemoryManagerAndReleaseGpuRangeIsCalledThenGpuAddressIsDecanonized) {
|
||||||
constexpr size_t reservedCpuAddressRangeSize = is64bit ? (6 * 4 * GB) : 0;
|
constexpr size_t reservedCpuAddressRangeSize = is64bit ? (6 * 4 * GB) : 0;
|
||||||
auto hwInfo = defaultHwInfo.get();
|
auto hwInfo = defaultHwInfo.get();
|
||||||
auto mockGfxPartition = std::make_unique<GmockGfxPartition>();
|
auto mockGfxPartition = std::make_unique<MockGfxPartition>();
|
||||||
mockGfxPartition->init(hwInfo->capabilityTable.gpuAddressSpace, reservedCpuAddressRangeSize, 0, 1);
|
mockGfxPartition->init(hwInfo->capabilityTable.gpuAddressSpace, reservedCpuAddressRangeSize, 0, 1);
|
||||||
auto size = 2 * MemoryConstants::megaByte;
|
auto size = 2 * MemoryConstants::megaByte;
|
||||||
auto gpuAddress = mockGfxPartition->heapAllocate(HeapIndex::HEAP_STANDARD, size);
|
auto gpuAddress = mockGfxPartition->heapAllocate(HeapIndex::HEAP_STANDARD, size);
|
||||||
auto gpuAddressCanonized = GmmHelper::canonize(gpuAddress);
|
auto gpuAddressCanonized = GmmHelper::canonize(gpuAddress);
|
||||||
EXPECT_LE(gpuAddress, gpuAddressCanonized);
|
EXPECT_LE(gpuAddress, gpuAddressCanonized);
|
||||||
|
|
||||||
EXPECT_CALL(*mockGfxPartition.get(), freeGpuAddressRange(gpuAddress, size));
|
|
||||||
|
|
||||||
memoryManager->overrideGfxPartition(mockGfxPartition.release());
|
memoryManager->overrideGfxPartition(mockGfxPartition.release());
|
||||||
memoryManager->releaseGpuRange(reinterpret_cast<void *>(gpuAddressCanonized), size, 0);
|
memoryManager->releaseGpuRange(reinterpret_cast<void *>(gpuAddressCanonized), size, 0);
|
||||||
|
|
||||||
@ -4084,51 +4082,26 @@ TEST_F(DrmMemoryManagerTest, givenDrmMemoryManagerAndReleaseGpuRangeIsCalledThen
|
|||||||
memoryManager->overrideGfxPartition(mockGfxPartitionBasic.release());
|
memoryManager->overrideGfxPartition(mockGfxPartitionBasic.release());
|
||||||
}
|
}
|
||||||
|
|
||||||
class GMockDrmMemoryManager : public TestedDrmMemoryManager {
|
|
||||||
public:
|
|
||||||
GMockDrmMemoryManager(ExecutionEnvironment &executionEnvironment) : TestedDrmMemoryManager(executionEnvironment) {
|
|
||||||
ON_CALL(*this, unreference).WillByDefault([this](BufferObject *bo, bool synchronousDestroy) {
|
|
||||||
return this->baseUnreference(bo, synchronousDestroy);
|
|
||||||
});
|
|
||||||
|
|
||||||
ON_CALL(*this, releaseGpuRange).WillByDefault([this](void *ptr, size_t size, uint32_t rootDeviceIndex) {
|
|
||||||
return this->baseReleaseGpuRange(ptr, size, rootDeviceIndex);
|
|
||||||
});
|
|
||||||
|
|
||||||
ON_CALL(*this, alignedFreeWrapper).WillByDefault([this](void *ptr) {
|
|
||||||
return this->baseAlignedFreeWrapper(ptr);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
MOCK_METHOD2(unreference, uint32_t(BufferObject *, bool));
|
|
||||||
MOCK_METHOD3(releaseGpuRange, void(void *, size_t, uint32_t));
|
|
||||||
MOCK_METHOD1(alignedFreeWrapper, void(void *));
|
|
||||||
|
|
||||||
uint32_t baseUnreference(BufferObject *bo, bool synchronousDestroy) { return TestedDrmMemoryManager::unreference(bo, synchronousDestroy); }
|
|
||||||
void baseReleaseGpuRange(void *ptr, size_t size, uint32_t rootDeviceIndex) { TestedDrmMemoryManager::releaseGpuRange(ptr, size, rootDeviceIndex); }
|
|
||||||
void baseAlignedFreeWrapper(void *ptr) { TestedDrmMemoryManager::alignedFreeWrapper(ptr); }
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST(DrmMemoryManagerFreeGraphicsMemoryCallSequenceTest, givenDrmMemoryManagerAndFreeGraphicsMemoryIsCalledThenUnreferenceBufferObjectIsCalledFirstWithSynchronousDestroySetToTrue) {
|
TEST(DrmMemoryManagerFreeGraphicsMemoryCallSequenceTest, givenDrmMemoryManagerAndFreeGraphicsMemoryIsCalledThenUnreferenceBufferObjectIsCalledFirstWithSynchronousDestroySetToTrue) {
|
||||||
MockExecutionEnvironment executionEnvironment(defaultHwInfo.get());
|
MockExecutionEnvironment executionEnvironment(defaultHwInfo.get());
|
||||||
executionEnvironment.rootDeviceEnvironments[0]->osInterface = std::make_unique<OSInterface>();
|
executionEnvironment.rootDeviceEnvironments[0]->osInterface = std::make_unique<OSInterface>();
|
||||||
auto drm = Drm::create(nullptr, *executionEnvironment.rootDeviceEnvironments[0]);
|
auto drm = Drm::create(nullptr, *executionEnvironment.rootDeviceEnvironments[0]);
|
||||||
executionEnvironment.rootDeviceEnvironments[0]->osInterface->setDriverModel(std::unique_ptr<DriverModel>(drm));
|
executionEnvironment.rootDeviceEnvironments[0]->osInterface->setDriverModel(std::unique_ptr<DriverModel>(drm));
|
||||||
executionEnvironment.rootDeviceEnvironments[0]->memoryOperationsInterface = DrmMemoryOperationsHandler::create(*drm, 0u);
|
executionEnvironment.rootDeviceEnvironments[0]->memoryOperationsInterface = DrmMemoryOperationsHandler::create(*drm, 0u);
|
||||||
GMockDrmMemoryManager gmockDrmMemoryManager(executionEnvironment);
|
TestedDrmMemoryManager memoryManger(executionEnvironment);
|
||||||
|
|
||||||
AllocationProperties properties{mockRootDeviceIndex, MemoryConstants::pageSize, AllocationType::BUFFER, mockDeviceBitfield};
|
AllocationProperties properties{mockRootDeviceIndex, MemoryConstants::pageSize, AllocationType::BUFFER, mockDeviceBitfield};
|
||||||
auto allocation = gmockDrmMemoryManager.allocateGraphicsMemoryWithProperties(properties);
|
auto allocation = memoryManger.allocateGraphicsMemoryWithProperties(properties);
|
||||||
ASSERT_NE(allocation, nullptr);
|
ASSERT_NE(allocation, nullptr);
|
||||||
|
|
||||||
{
|
memoryManger.freeGraphicsMemory(allocation);
|
||||||
::testing::InSequence inSequence;
|
|
||||||
EXPECT_CALL(gmockDrmMemoryManager, unreference(::testing::_, true)).Times(EngineLimits::maxHandleCount);
|
|
||||||
EXPECT_CALL(gmockDrmMemoryManager, releaseGpuRange(::testing::_, ::testing::_, ::testing::_)).Times(1);
|
|
||||||
EXPECT_CALL(gmockDrmMemoryManager, alignedFreeWrapper(::testing::_)).Times(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
gmockDrmMemoryManager.freeGraphicsMemory(allocation);
|
EXPECT_EQ(EngineLimits::maxHandleCount, memoryManger.unreferenceCalled);
|
||||||
|
for (size_t i = 0; i < EngineLimits::maxHandleCount; ++i) {
|
||||||
|
EXPECT_TRUE(memoryManger.unreferenceParamsPassed[i].synchronousDestroy);
|
||||||
|
}
|
||||||
|
EXPECT_EQ(1u, memoryManger.releaseGpuRangeCalled);
|
||||||
|
EXPECT_EQ(1u, memoryManger.alignedFreeWrapperCalled);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(DrmMemoryManagerFreeGraphicsMemoryUnreferenceTest, givenDrmMemoryManagerAndFreeGraphicsMemoryIsCalledForSharedAllocationThenUnreferenceBufferObjectIsCalledWithSynchronousDestroySetToFalse) {
|
TEST(DrmMemoryManagerFreeGraphicsMemoryUnreferenceTest, givenDrmMemoryManagerAndFreeGraphicsMemoryIsCalledForSharedAllocationThenUnreferenceBufferObjectIsCalledWithSynchronousDestroySetToFalse) {
|
||||||
@ -4138,17 +4111,20 @@ TEST(DrmMemoryManagerFreeGraphicsMemoryUnreferenceTest, givenDrmMemoryManagerAnd
|
|||||||
auto drm = Drm::create(nullptr, *executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]);
|
auto drm = Drm::create(nullptr, *executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]);
|
||||||
executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]->osInterface->setDriverModel(std::unique_ptr<DriverModel>(drm));
|
executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]->osInterface->setDriverModel(std::unique_ptr<DriverModel>(drm));
|
||||||
executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]->memoryOperationsInterface = DrmMemoryOperationsHandler::create(*drm, 0u);
|
executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]->memoryOperationsInterface = DrmMemoryOperationsHandler::create(*drm, 0u);
|
||||||
::testing::NiceMock<GMockDrmMemoryManager> gmockDrmMemoryManager(executionEnvironment);
|
TestedDrmMemoryManager memoryManger(executionEnvironment);
|
||||||
|
|
||||||
osHandle handle = 1u;
|
osHandle handle = 1u;
|
||||||
AllocationProperties properties(rootDeviceIndex, false, MemoryConstants::pageSize, AllocationType::SHARED_BUFFER, false, {});
|
AllocationProperties properties(rootDeviceIndex, false, MemoryConstants::pageSize, AllocationType::SHARED_BUFFER, false, {});
|
||||||
auto allocation = gmockDrmMemoryManager.createGraphicsAllocationFromSharedHandle(handle, properties, false, false);
|
auto allocation = memoryManger.createGraphicsAllocationFromSharedHandle(handle, properties, false, false);
|
||||||
ASSERT_NE(nullptr, allocation);
|
ASSERT_NE(nullptr, allocation);
|
||||||
|
|
||||||
EXPECT_CALL(gmockDrmMemoryManager, unreference(::testing::_, false)).Times(1);
|
memoryManger.freeGraphicsMemory(allocation);
|
||||||
EXPECT_CALL(gmockDrmMemoryManager, unreference(::testing::_, true)).Times(EngineLimits::maxHandleCount - 1);
|
|
||||||
|
|
||||||
gmockDrmMemoryManager.freeGraphicsMemory(allocation);
|
EXPECT_EQ(1 + EngineLimits::maxHandleCount - 1, memoryManger.unreferenceCalled);
|
||||||
|
EXPECT_FALSE(memoryManger.unreferenceParamsPassed[0].synchronousDestroy);
|
||||||
|
for (size_t i = 1; i < EngineLimits::maxHandleCount - 1; ++i) {
|
||||||
|
EXPECT_TRUE(memoryManger.unreferenceParamsPassed[i].synchronousDestroy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(DrmMemoryMangerTest, givenMultipleRootDeviceWhenMemoryManagerGetsDrmThenDrmIsFromCorrectRootDevice) {
|
TEST(DrmMemoryMangerTest, givenMultipleRootDeviceWhenMemoryManagerGetsDrmThenDrmIsFromCorrectRootDevice) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (C) 2019-2020 Intel Corporation
|
* Copyright (C) 2019-2022 Intel Corporation
|
||||||
*
|
*
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
@ -8,6 +8,7 @@
|
|||||||
#include "shared/source/helpers/constants.h"
|
#include "shared/source/helpers/constants.h"
|
||||||
#include "shared/source/helpers/file_io.h"
|
#include "shared/source/helpers/file_io.h"
|
||||||
#include "shared/source/os_interface/linux/os_memory_linux.h"
|
#include "shared/source/os_interface/linux/os_memory_linux.h"
|
||||||
|
#include "shared/source/utilities/stackvec.h"
|
||||||
|
|
||||||
#include "gmock/gmock.h"
|
#include "gmock/gmock.h"
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
@ -22,18 +23,39 @@ class MockOSMemoryLinux : public OSMemoryLinux {
|
|||||||
return std::make_unique<MockOSMemoryLinux>();
|
return std::make_unique<MockOSMemoryLinux>();
|
||||||
}
|
}
|
||||||
|
|
||||||
MockOSMemoryLinux() {
|
MockOSMemoryLinux() = default;
|
||||||
ON_CALL(*this, mmapWrapper).WillByDefault([this](void *addr, size_t size, int prot, int flags, int fd, off_t off) {
|
|
||||||
return this->baseMmapWrapper(addr, size, prot, flags, fd, off);
|
|
||||||
});
|
|
||||||
|
|
||||||
ON_CALL(*this, munmapWrapper).WillByDefault([this](void *addr, size_t size) {
|
void *mmapWrapper(void *addr, size_t size, int prot, int flags, int fd, off_t off) override {
|
||||||
return this->baseMunmapWrapper(addr, size);
|
mmapWrapperCalled++;
|
||||||
});
|
mmapWrapperParamsPassed.push_back({addr, size, prot, flags, fd, off});
|
||||||
|
return this->baseMmapWrapper(addr, size, prot, flags, fd, off);
|
||||||
}
|
}
|
||||||
|
|
||||||
MOCK_METHOD6(mmapWrapper, void *(void *, size_t, int, int, int, off_t));
|
struct MmapWrapperParams {
|
||||||
MOCK_METHOD2(munmapWrapper, int(void *, size_t));
|
void *addr;
|
||||||
|
size_t size;
|
||||||
|
int prot;
|
||||||
|
int flags;
|
||||||
|
int fd;
|
||||||
|
off_t off;
|
||||||
|
};
|
||||||
|
|
||||||
|
uint32_t mmapWrapperCalled = 0u;
|
||||||
|
StackVec<MmapWrapperParams, 1> mmapWrapperParamsPassed{};
|
||||||
|
|
||||||
|
int munmapWrapper(void *addr, size_t size) override {
|
||||||
|
munmapWrapperCalled++;
|
||||||
|
munmapWrapperParamsPassed.push_back({addr, size});
|
||||||
|
return this->baseMunmapWrapper(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MunmapWrapperParams {
|
||||||
|
void *addr;
|
||||||
|
size_t size;
|
||||||
|
};
|
||||||
|
|
||||||
|
uint32_t munmapWrapperCalled = 0u;
|
||||||
|
StackVec<MunmapWrapperParams, 1> munmapWrapperParamsPassed{};
|
||||||
|
|
||||||
void *baseMmapWrapper(void *addr, size_t size, int prot, int flags, int fd, off_t off) {
|
void *baseMmapWrapper(void *addr, size_t size, int prot, int flags, int fd, off_t off) {
|
||||||
return OSMemoryLinux::mmapWrapper(addr, size, prot, flags, fd, off);
|
return OSMemoryLinux::mmapWrapper(addr, size, prot, flags, fd, off);
|
||||||
@ -47,37 +69,39 @@ class MockOSMemoryLinux : public OSMemoryLinux {
|
|||||||
TEST(OSMemoryLinux, givenOSMemoryLinuxWhenReserveCpuAddressRangeIsCalledThenMinusOneIsPassedToMmapAsFdParam) {
|
TEST(OSMemoryLinux, givenOSMemoryLinuxWhenReserveCpuAddressRangeIsCalledThenMinusOneIsPassedToMmapAsFdParam) {
|
||||||
auto mockOSMemoryLinux = MockOSMemoryLinux::create();
|
auto mockOSMemoryLinux = MockOSMemoryLinux::create();
|
||||||
|
|
||||||
EXPECT_CALL(*mockOSMemoryLinux, mmapWrapper(_, _, _, _, -1, _));
|
|
||||||
|
|
||||||
auto reservedCpuRange = mockOSMemoryLinux->reserveCpuAddressRange(MemoryConstants::pageSize, MemoryConstants::pageSize64k);
|
auto reservedCpuRange = mockOSMemoryLinux->reserveCpuAddressRange(MemoryConstants::pageSize, MemoryConstants::pageSize64k);
|
||||||
|
|
||||||
EXPECT_CALL(*mockOSMemoryLinux, munmapWrapper(reservedCpuRange.originalPtr, reservedCpuRange.actualReservedSize));
|
|
||||||
|
|
||||||
mockOSMemoryLinux->releaseCpuAddressRange(reservedCpuRange);
|
mockOSMemoryLinux->releaseCpuAddressRange(reservedCpuRange);
|
||||||
|
|
||||||
|
EXPECT_EQ(-1, mockOSMemoryLinux->mmapWrapperParamsPassed[0].fd);
|
||||||
|
EXPECT_EQ(reservedCpuRange.originalPtr, mockOSMemoryLinux->munmapWrapperParamsPassed[0].addr);
|
||||||
|
EXPECT_EQ(reservedCpuRange.actualReservedSize, mockOSMemoryLinux->munmapWrapperParamsPassed[0].size);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(OSMemoryLinux, givenOSMemoryLinuxWhenReserveCpuAddressRangeIsCalledAndBaseAddressIsSpecifiedThenCorrectValueIsPassedToMmapAsAddrParam) {
|
TEST(OSMemoryLinux, givenOSMemoryLinuxWhenReserveCpuAddressRangeIsCalledAndBaseAddressIsSpecifiedThenCorrectValueIsPassedToMmapAsAddrParam) {
|
||||||
auto mockOSMemoryLinux = MockOSMemoryLinux::create();
|
auto mockOSMemoryLinux = MockOSMemoryLinux::create();
|
||||||
|
|
||||||
EXPECT_CALL(*mockOSMemoryLinux, mmapWrapper(reinterpret_cast<void *>(0x10000000), _, _, _, -1, _));
|
|
||||||
|
|
||||||
auto reservedCpuRange = mockOSMemoryLinux->reserveCpuAddressRange(reinterpret_cast<void *>(0x10000000), MemoryConstants::pageSize, MemoryConstants::pageSize64k);
|
auto reservedCpuRange = mockOSMemoryLinux->reserveCpuAddressRange(reinterpret_cast<void *>(0x10000000), MemoryConstants::pageSize, MemoryConstants::pageSize64k);
|
||||||
|
|
||||||
EXPECT_CALL(*mockOSMemoryLinux, munmapWrapper(reservedCpuRange.originalPtr, reservedCpuRange.actualReservedSize));
|
|
||||||
|
|
||||||
mockOSMemoryLinux->releaseCpuAddressRange(reservedCpuRange);
|
mockOSMemoryLinux->releaseCpuAddressRange(reservedCpuRange);
|
||||||
|
|
||||||
|
EXPECT_EQ(reinterpret_cast<void *>(0x10000000), mockOSMemoryLinux->mmapWrapperParamsPassed[0].addr);
|
||||||
|
EXPECT_EQ(-1, mockOSMemoryLinux->mmapWrapperParamsPassed[0].fd);
|
||||||
|
EXPECT_EQ(reservedCpuRange.originalPtr, mockOSMemoryLinux->munmapWrapperParamsPassed[0].addr);
|
||||||
|
EXPECT_EQ(reservedCpuRange.actualReservedSize, mockOSMemoryLinux->munmapWrapperParamsPassed[0].size);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(OSMemoryLinux, givenOSMemoryLinuxWhenReserveCpuAddressRangeIsCalledAndBaseAddressIsNotSpecifiedThenoZeroIsPassedToMmapAsAddrParam) {
|
TEST(OSMemoryLinux, givenOSMemoryLinuxWhenReserveCpuAddressRangeIsCalledAndBaseAddressIsNotSpecifiedThenoZeroIsPassedToMmapAsAddrParam) {
|
||||||
auto mockOSMemoryLinux = MockOSMemoryLinux::create();
|
auto mockOSMemoryLinux = MockOSMemoryLinux::create();
|
||||||
|
|
||||||
EXPECT_CALL(*mockOSMemoryLinux, mmapWrapper(nullptr, _, _, _, -1, _));
|
|
||||||
|
|
||||||
auto reservedCpuRange = mockOSMemoryLinux->reserveCpuAddressRange(MemoryConstants::pageSize, MemoryConstants::pageSize64k);
|
auto reservedCpuRange = mockOSMemoryLinux->reserveCpuAddressRange(MemoryConstants::pageSize, MemoryConstants::pageSize64k);
|
||||||
|
|
||||||
EXPECT_CALL(*mockOSMemoryLinux, munmapWrapper(reservedCpuRange.originalPtr, reservedCpuRange.actualReservedSize));
|
|
||||||
|
|
||||||
mockOSMemoryLinux->releaseCpuAddressRange(reservedCpuRange);
|
mockOSMemoryLinux->releaseCpuAddressRange(reservedCpuRange);
|
||||||
|
|
||||||
|
EXPECT_EQ(nullptr, mockOSMemoryLinux->mmapWrapperParamsPassed[0].addr);
|
||||||
|
EXPECT_EQ(-1, mockOSMemoryLinux->mmapWrapperParamsPassed[0].fd);
|
||||||
|
EXPECT_EQ(reservedCpuRange.originalPtr, mockOSMemoryLinux->munmapWrapperParamsPassed[0].addr);
|
||||||
|
EXPECT_EQ(reservedCpuRange.actualReservedSize, mockOSMemoryLinux->munmapWrapperParamsPassed[0].size);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(OSMemoryLinux, GivenProcSelfMapsFileExistsWhenGetMemoryMapsIsQueriedThenValidValueIsReturned) {
|
TEST(OSMemoryLinux, GivenProcSelfMapsFileExistsWhenGetMemoryMapsIsQueriedThenValidValueIsReturned) {
|
||||||
|
@ -126,6 +126,35 @@ class TestedDrmMemoryManager : public MemoryManagerCreate<DrmMemoryManager> {
|
|||||||
}
|
}
|
||||||
bool alignedMallocShouldFail = false;
|
bool alignedMallocShouldFail = false;
|
||||||
size_t alignedMallocSizeRequired = 0u;
|
size_t alignedMallocSizeRequired = 0u;
|
||||||
|
uint32_t unreference(BufferObject *bo, bool synchronousDestroy) override {
|
||||||
|
std::unique_lock<std::mutex> lock(unreferenceMtx);
|
||||||
|
unreferenceCalled++;
|
||||||
|
unreferenceParamsPassed.push_back({bo, synchronousDestroy});
|
||||||
|
return DrmMemoryManager::unreference(bo, synchronousDestroy);
|
||||||
|
}
|
||||||
|
struct UnreferenceParams {
|
||||||
|
BufferObject *bo;
|
||||||
|
bool synchronousDestroy;
|
||||||
|
};
|
||||||
|
uint32_t unreferenceCalled = 0u;
|
||||||
|
StackVec<UnreferenceParams, 4> unreferenceParamsPassed{};
|
||||||
|
void releaseGpuRange(void *ptr, size_t size, uint32_t rootDeviceIndex) override {
|
||||||
|
std::unique_lock<std::mutex> lock(releaseGpuRangeMtx);
|
||||||
|
releaseGpuRangeCalled++;
|
||||||
|
DrmMemoryManager::releaseGpuRange(ptr, size, rootDeviceIndex);
|
||||||
|
}
|
||||||
|
uint32_t releaseGpuRangeCalled = 0u;
|
||||||
|
void alignedFreeWrapper(void *ptr) override {
|
||||||
|
std::unique_lock<std::mutex> lock(alignedFreeWrapperMtx);
|
||||||
|
alignedFreeWrapperCalled++;
|
||||||
|
DrmMemoryManager::alignedFreeWrapper(ptr);
|
||||||
|
}
|
||||||
|
uint32_t alignedFreeWrapperCalled = 0u;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
std::mutex unreferenceMtx;
|
||||||
|
std::mutex releaseGpuRangeMtx;
|
||||||
|
std::mutex alignedFreeWrapperMtx;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MockDrmGemCloseWorker : DrmGemCloseWorker {
|
struct MockDrmGemCloseWorker : DrmGemCloseWorker {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (C) 2019-2021 Intel Corporation
|
* Copyright (C) 2019-2022 Intel Corporation
|
||||||
*
|
*
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
#include "shared/source/memory_manager/gfx_partition.h"
|
#include "shared/source/memory_manager/gfx_partition.h"
|
||||||
|
|
||||||
#include "gmock/gmock.h"
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
using namespace NEO;
|
using namespace NEO;
|
||||||
|
|
||||||
@ -48,6 +48,16 @@ class MockGfxPartition : public GfxPartition {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void freeGpuAddressRange(uint64_t gpuAddress, size_t size) override {
|
||||||
|
freeGpuAddressRangeCalled++;
|
||||||
|
if (callBasefreeGpuAddressRange) {
|
||||||
|
GfxPartition::freeGpuAddressRange(gpuAddress, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t freeGpuAddressRangeCalled = 0u;
|
||||||
|
bool callBasefreeGpuAddressRange = false;
|
||||||
|
|
||||||
static std::array<HeapIndex, static_cast<uint32_t>(HeapIndex::TOTAL_HEAPS)> allHeapNames;
|
static std::array<HeapIndex, static_cast<uint32_t>(HeapIndex::TOTAL_HEAPS)> allHeapNames;
|
||||||
|
|
||||||
OSMemory::ReservedCpuAddressRange reservedCpuAddressRange;
|
OSMemory::ReservedCpuAddressRange reservedCpuAddressRange;
|
||||||
@ -56,11 +66,6 @@ class MockGfxPartition : public GfxPartition {
|
|||||||
const uint64_t mockGpuVa = std::numeric_limits<uint64_t>::max();
|
const uint64_t mockGpuVa = std::numeric_limits<uint64_t>::max();
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GmockGfxPartition : MockGfxPartition {
|
|
||||||
using MockGfxPartition::MockGfxPartition;
|
|
||||||
MOCK_METHOD(void, freeGpuAddressRange, (uint64_t gpuAddress, size_t size), (override));
|
|
||||||
};
|
|
||||||
|
|
||||||
class MockGfxPartitionBasic : public GfxPartition {
|
class MockGfxPartitionBasic : public GfxPartition {
|
||||||
public:
|
public:
|
||||||
MockGfxPartitionBasic() : GfxPartition(reservedCpuAddressRange) {}
|
MockGfxPartitionBasic() : GfxPartition(reservedCpuAddressRange) {}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (C) 2019-2021 Intel Corporation
|
* Copyright (C) 2019-2022 Intel Corporation
|
||||||
*
|
*
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
@ -407,6 +407,7 @@ using GfxPartitionTestForAllHeapTypes = ::testing::TestWithParam<HeapIndex>;
|
|||||||
TEST_P(GfxPartitionTestForAllHeapTypes, givenHeapIndexWhenFreeGpuAddressRangeIsCalledThenFreeMemory) {
|
TEST_P(GfxPartitionTestForAllHeapTypes, givenHeapIndexWhenFreeGpuAddressRangeIsCalledThenFreeMemory) {
|
||||||
MockGfxPartition gfxPartition;
|
MockGfxPartition gfxPartition;
|
||||||
gfxPartition.init(maxNBitValue(48), reservedCpuAddressRangeSize, 0, 1);
|
gfxPartition.init(maxNBitValue(48), reservedCpuAddressRangeSize, 0, 1);
|
||||||
|
gfxPartition.callBasefreeGpuAddressRange = true;
|
||||||
const HeapIndex heapIndex = GetParam();
|
const HeapIndex heapIndex = GetParam();
|
||||||
const size_t allocationSize = static_cast<size_t>(gfxPartition.getHeapSize(heapIndex)) * 3 / 4;
|
const size_t allocationSize = static_cast<size_t>(gfxPartition.getHeapSize(heapIndex)) * 3 / 4;
|
||||||
if (allocationSize == 0) {
|
if (allocationSize == 0) {
|
||||||
|
Reference in New Issue
Block a user