mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-25 13:33:02 +08:00
feature: Mark selected resources as UC when mitigating dc flush
Related-To: NEO-10556 Signed-off-by: Lukasz Jobczyk <lukasz.jobczyk@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
b5435f7dfe
commit
8a0c425495
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2023 Intel Corporation
|
||||
* Copyright (C) 2018-2024 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -488,7 +488,7 @@ TEST_F(PlatformWithFourDevicesTest, whenCreateColoredAllocationAndWddmReturnsCan
|
||||
struct CanonizeAddressMockWddm : public WddmMock {
|
||||
using WddmMock::WddmMock;
|
||||
|
||||
bool mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr) override {
|
||||
bool mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr, AllocationType type) override {
|
||||
auto gmmHelper = rootDeviceEnvironment.getGmmHelper();
|
||||
gpuPtr = gmmHelper->canonize(preferredAddress);
|
||||
return mapGpuVaStatus;
|
||||
|
||||
@@ -47,6 +47,17 @@ bool CacheSettingsHelper::preferNoCpuAccess(GMM_RESOURCE_USAGE_TYPE_ENUM gmmReso
|
||||
}
|
||||
|
||||
GMM_RESOURCE_USAGE_TYPE_ENUM CacheSettingsHelper::getDefaultUsageTypeWithCachingEnabled(AllocationType allocationType, const ProductHelper &productHelper) {
|
||||
if (productHelper.isDcFlushMitigated() &&
|
||||
(allocationType == AllocationType::externalHostPtr ||
|
||||
allocationType == AllocationType::bufferHostMemory ||
|
||||
allocationType == AllocationType::mapAllocation ||
|
||||
allocationType == AllocationType::svmCpu ||
|
||||
allocationType == AllocationType::svmZeroCopy ||
|
||||
allocationType == AllocationType::internalHostMemory ||
|
||||
allocationType == AllocationType::timestampPacketTagBuffer ||
|
||||
allocationType == AllocationType::tagBuffer)) {
|
||||
return getDefaultUsageTypeWithCachingDisabled(allocationType, productHelper);
|
||||
}
|
||||
|
||||
switch (allocationType) {
|
||||
case AllocationType::image:
|
||||
|
||||
@@ -128,6 +128,7 @@ class ProductHelper {
|
||||
virtual bool isKmdMigrationSupported() const = 0;
|
||||
virtual bool isTile64With3DSurfaceOnBCSSupported(const HardwareInfo &hwInfo) const = 0;
|
||||
virtual bool isDcFlushAllowed() const = 0;
|
||||
virtual bool isDcFlushMitigated() const = 0;
|
||||
virtual uint32_t computeMaxNeededSubSliceSpace(const HardwareInfo &hwInfo) const = 0;
|
||||
virtual bool getUuid(NEO::DriverModel *driverModel, const uint32_t subDeviceCount, const uint32_t deviceIndex, std::array<uint8_t, ProductHelper::uuidSize> &uuid) const = 0;
|
||||
virtual bool isFlushTaskAllowed() const = 0;
|
||||
|
||||
@@ -393,6 +393,13 @@ bool ProductHelperHw<gfxProduct>::isDcFlushAllowed() const {
|
||||
return dcFlushAllowed;
|
||||
}
|
||||
|
||||
template <PRODUCT_FAMILY gfxProduct>
|
||||
bool ProductHelperHw<gfxProduct>::isDcFlushMitigated() const {
|
||||
using GfxProduct = typename HwMapper<gfxProduct>::GfxProduct;
|
||||
bool dcFlushAllowed = GfxProduct::isDcFlushAllowed;
|
||||
return this->isDcFlushAllowed() != dcFlushAllowed;
|
||||
}
|
||||
|
||||
template <PRODUCT_FAMILY gfxProduct>
|
||||
uint32_t ProductHelperHw<gfxProduct>::computeMaxNeededSubSliceSpace(const HardwareInfo &hwInfo) const {
|
||||
return hwInfo.gtSystemInfo.MaxSubSlicesSupported;
|
||||
|
||||
@@ -73,6 +73,7 @@ class ProductHelperHw : public ProductHelper {
|
||||
bool isKmdMigrationSupported() const override;
|
||||
bool isTile64With3DSurfaceOnBCSSupported(const HardwareInfo &hwInfo) const override;
|
||||
bool isDcFlushAllowed() const override;
|
||||
bool isDcFlushMitigated() const override;
|
||||
uint32_t computeMaxNeededSubSliceSpace(const HardwareInfo &hwInfo) const override;
|
||||
bool getUuid(NEO::DriverModel *driverModel, uint32_t subDeviceCount, uint32_t deviceIndex, std::array<uint8_t, ProductHelper::uuidSize> &uuid) const override;
|
||||
bool isFlushTaskAllowed() const override;
|
||||
|
||||
@@ -547,10 +547,10 @@ bool Wddm::mapGpuVirtualAddress(AllocationStorageData *allocationStorageData) {
|
||||
return mapGpuVirtualAddress(osHandle->gmm,
|
||||
osHandle->handle,
|
||||
0u, MemoryConstants::maxSvmAddress, castToUint64(allocationStorageData->cpuPtr),
|
||||
osHandle->gpuPtr);
|
||||
osHandle->gpuPtr, AllocationType::externalHostPtr);
|
||||
}
|
||||
|
||||
bool Wddm::mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr) {
|
||||
bool Wddm::mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr, AllocationType type) {
|
||||
D3DDDI_MAPGPUVIRTUALADDRESS mapGPUVA = {};
|
||||
D3DDDIGPUVIRTUALADDRESS_PROTECTION_TYPE protectionType = {};
|
||||
protectionType.Write = TRUE;
|
||||
@@ -568,7 +568,7 @@ bool Wddm::mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_A
|
||||
mapGPUVA.MinimumAddress = minimumAddress;
|
||||
mapGPUVA.MaximumAddress = maximumAddress;
|
||||
|
||||
applyAdditionalMapGPUVAFields(mapGPUVA, gmm);
|
||||
applyAdditionalMapGPUVAFields(mapGPUVA, gmm, type);
|
||||
|
||||
MapGpuVirtualAddressGmm gmmMapGpuVa = {&mapGPUVA, gmm->gmmResourceInfo.get(), &gpuPtr, getGdi()};
|
||||
auto status = gmm->getGmmHelper()->getClientContext()->mapGpuVirtualAddress(&gmmMapGpuVa);
|
||||
|
||||
@@ -66,12 +66,12 @@ class Wddm : public DriverModel {
|
||||
|
||||
MOCKABLE_VIRTUAL bool evict(const D3DKMT_HANDLE *handleList, uint32_t numOfHandles, uint64_t &sizeToTrim, bool evictNeeded);
|
||||
MOCKABLE_VIRTUAL bool makeResident(const D3DKMT_HANDLE *handles, uint32_t count, bool cantTrimFurther, uint64_t *numberOfBytesToTrim, size_t totalSize);
|
||||
MOCKABLE_VIRTUAL bool mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr);
|
||||
MOCKABLE_VIRTUAL bool mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr, AllocationType type);
|
||||
bool mapGpuVirtualAddress(AllocationStorageData *allocationStorageData);
|
||||
MOCKABLE_VIRTUAL NTSTATUS reserveGpuVirtualAddress(D3DGPU_VIRTUAL_ADDRESS baseAddress, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_SIZE_T size, D3DGPU_VIRTUAL_ADDRESS *reservedAddress);
|
||||
MOCKABLE_VIRTUAL bool createContext(OsContextWin &osContext);
|
||||
MOCKABLE_VIRTUAL void applyAdditionalContextFlags(CREATECONTEXT_PVTDATA &privateData, OsContextWin &osContext);
|
||||
MOCKABLE_VIRTUAL void applyAdditionalMapGPUVAFields(D3DDDI_MAPGPUVIRTUALADDRESS &mapGPUVA, Gmm *gmm);
|
||||
MOCKABLE_VIRTUAL void applyAdditionalMapGPUVAFields(D3DDDI_MAPGPUVIRTUALADDRESS &mapGPUVA, Gmm *gmm, AllocationType type);
|
||||
MOCKABLE_VIRTUAL uint64_t freeGmmGpuVirtualAddress(Gmm *gmm, D3DGPU_VIRTUAL_ADDRESS &gpuPtr, uint64_t size);
|
||||
MOCKABLE_VIRTUAL bool freeGpuVirtualAddress(D3DGPU_VIRTUAL_ADDRESS &gpuPtr, uint64_t size);
|
||||
MOCKABLE_VIRTUAL NTSTATUS createAllocation(const void *alignedCpuPtr, const Gmm *gmm, D3DKMT_HANDLE &outHandle, D3DKMT_HANDLE &outResourceHandle, uint64_t *outSharedHandle);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
* Copyright (C) 2021-2024 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -8,6 +8,6 @@
|
||||
#include "shared/source/os_interface/windows/wddm/wddm.h"
|
||||
|
||||
namespace NEO {
|
||||
void Wddm::applyAdditionalMapGPUVAFields(D3DDDI_MAPGPUVIRTUALADDRESS &mapGPUVA, Gmm *gmm) {
|
||||
void Wddm::applyAdditionalMapGPUVAFields(D3DDDI_MAPGPUVIRTUALADDRESS &mapGPUVA, Gmm *gmm, AllocationType type) {
|
||||
}
|
||||
} // namespace NEO
|
||||
|
||||
@@ -1010,11 +1010,11 @@ bool WddmMemoryManager::mapGpuVaForOneHandleAllocation(WddmAllocation *allocatio
|
||||
|
||||
D3DGPU_VIRTUAL_ADDRESS minimumAddress = gfxPartition->getHeapMinimalAddress(heapIndex);
|
||||
D3DGPU_VIRTUAL_ADDRESS maximumAddress = gfxPartition->getHeapLimit(heapIndex);
|
||||
auto status = getWddm(allocation->getRootDeviceIndex()).mapGpuVirtualAddress(allocation->getDefaultGmm(), allocation->getDefaultHandle(), minimumAddress, maximumAddress, addressToMap, allocation->getGpuAddressToModify());
|
||||
auto status = getWddm(allocation->getRootDeviceIndex()).mapGpuVirtualAddress(allocation->getDefaultGmm(), allocation->getDefaultHandle(), minimumAddress, maximumAddress, addressToMap, allocation->getGpuAddressToModify(), allocation->getAllocationType());
|
||||
|
||||
if (!status && deferredDeleter) {
|
||||
deferredDeleter->drain(true);
|
||||
status = getWddm(allocation->getRootDeviceIndex()).mapGpuVirtualAddress(allocation->getDefaultGmm(), allocation->getDefaultHandle(), minimumAddress, maximumAddress, addressToMap, allocation->getGpuAddressToModify());
|
||||
status = getWddm(allocation->getRootDeviceIndex()).mapGpuVirtualAddress(allocation->getDefaultGmm(), allocation->getDefaultHandle(), minimumAddress, maximumAddress, addressToMap, allocation->getGpuAddressToModify(), allocation->getAllocationType());
|
||||
}
|
||||
if (!status) {
|
||||
if (allocation->getReservedGpuVirtualAddress()) {
|
||||
@@ -1056,12 +1056,12 @@ bool WddmMemoryManager::mapMultiHandleAllocationWithRetry(WddmAllocation *alloca
|
||||
for (auto currentHandle = 0u; currentHandle < allocation->getNumGmms(); currentHandle++) {
|
||||
uint64_t gpuAddress = 0;
|
||||
auto status = wddm.mapGpuVirtualAddress(allocation->getGmm(currentHandle), allocation->getHandles()[currentHandle],
|
||||
gfxPartition->getHeapMinimalAddress(heapIndex), gfxPartition->getHeapLimit(heapIndex), addressToMap, gpuAddress);
|
||||
gfxPartition->getHeapMinimalAddress(heapIndex), gfxPartition->getHeapLimit(heapIndex), addressToMap, gpuAddress, allocation->getAllocationType());
|
||||
|
||||
if (!status && deferredDeleter) {
|
||||
deferredDeleter->drain(true);
|
||||
status = wddm.mapGpuVirtualAddress(allocation->getGmm(currentHandle), allocation->getHandles()[currentHandle],
|
||||
gfxPartition->getHeapMinimalAddress(heapIndex), gfxPartition->getHeapLimit(heapIndex), addressToMap, gpuAddress);
|
||||
gfxPartition->getHeapMinimalAddress(heapIndex), gfxPartition->getHeapLimit(heapIndex), addressToMap, gpuAddress, allocation->getAllocationType());
|
||||
}
|
||||
if (!status) {
|
||||
if (allocation->getReservedGpuVirtualAddress()) {
|
||||
|
||||
@@ -271,6 +271,11 @@ bool ProductHelperHw<IGFX_UNKNOWN>::isDcFlushAllowed() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <>
|
||||
bool ProductHelperHw<IGFX_UNKNOWN>::isDcFlushMitigated() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
template <>
|
||||
uint32_t ProductHelperHw<IGFX_UNKNOWN>::computeMaxNeededSubSliceSpace(const HardwareInfo &hwInfo) const {
|
||||
return hwInfo.gtSystemInfo.MaxSubSlicesSupported;
|
||||
|
||||
@@ -82,14 +82,14 @@ bool WddmMock::mapGpuVirtualAddress(WddmAllocation *allocation) {
|
||||
maximumAddress = MemoryConstants::maxSvmAddress;
|
||||
}
|
||||
return mapGpuVirtualAddress(allocation->getDefaultGmm(), allocation->getDefaultHandle(), minimumAddress, maximumAddress,
|
||||
reinterpret_cast<D3DGPU_VIRTUAL_ADDRESS>(allocation->getAlignedCpuPtr()), allocation->getGpuAddressToModify());
|
||||
reinterpret_cast<D3DGPU_VIRTUAL_ADDRESS>(allocation->getAlignedCpuPtr()), allocation->getGpuAddressToModify(), allocation->getAllocationType());
|
||||
}
|
||||
bool WddmMock::mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr) {
|
||||
bool WddmMock::mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr, AllocationType type) {
|
||||
mapGpuVirtualAddressResult.called++;
|
||||
mapGpuVirtualAddressResult.cpuPtrPassed = reinterpret_cast<void *>(preferredAddress);
|
||||
mapGpuVirtualAddressResult.uint64ParamPassed = preferredAddress;
|
||||
if (callBaseMapGpuVa) {
|
||||
return mapGpuVirtualAddressResult.success = Wddm::mapGpuVirtualAddress(gmm, handle, minimumAddress, maximumAddress, preferredAddress, gpuPtr);
|
||||
return mapGpuVirtualAddressResult.success = Wddm::mapGpuVirtualAddress(gmm, handle, minimumAddress, maximumAddress, preferredAddress, gpuPtr, type);
|
||||
} else {
|
||||
gpuPtr = preferredAddress;
|
||||
return mapGpuVaStatus;
|
||||
|
||||
@@ -66,7 +66,7 @@ class WddmMock : public Wddm {
|
||||
WddmMock(RootDeviceEnvironment &rootDeviceEnvironment);
|
||||
~WddmMock() override;
|
||||
|
||||
bool mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr) override;
|
||||
bool mapGpuVirtualAddress(Gmm *gmm, D3DKMT_HANDLE handle, D3DGPU_VIRTUAL_ADDRESS minimumAddress, D3DGPU_VIRTUAL_ADDRESS maximumAddress, D3DGPU_VIRTUAL_ADDRESS preferredAddress, D3DGPU_VIRTUAL_ADDRESS &gpuPtr, AllocationType type) override;
|
||||
bool mapGpuVirtualAddress(WddmAllocation *allocation);
|
||||
bool freeGpuVirtualAddress(D3DGPU_VIRTUAL_ADDRESS &gpuPtr, uint64_t size) override;
|
||||
NTSTATUS createAllocation(const void *alignedCpuPtr, const Gmm *gmm, D3DKMT_HANDLE &outHandle, D3DKMT_HANDLE &outResource, uint64_t *outSharedHandle) override;
|
||||
|
||||
@@ -726,6 +726,79 @@ TEST(GmmTest, givenAllocationTypeWhenGettingUsageTypeThenReturnCorrectValue) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(GmmTest, givenAllocationTypeAndMitigatedDcFlushWhenGettingUsageTypeThenReturnCorrectValue) {
|
||||
DebugManagerStateRestore restorer;
|
||||
debugManager.flags.AllowDcFlush.set(0);
|
||||
MockExecutionEnvironment mockExecutionEnvironment{};
|
||||
const auto &productHelper = mockExecutionEnvironment.rootDeviceEnvironments[0]->getHelper<ProductHelper>();
|
||||
for (uint32_t i = 0; i < static_cast<uint32_t>(AllocationType::count); i++) {
|
||||
auto allocationType = static_cast<AllocationType>(i);
|
||||
auto uncachedGmmUsageType = productHelper.isNewCoherencyModelSupported() ? GMM_RESOURCE_USAGE_OCL_BUFFER_CSR_UC : GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED;
|
||||
auto usage = CacheSettingsHelper::getGmmUsageType(allocationType, false, productHelper);
|
||||
auto expectedUsage = GMM_RESOURCE_USAGE_UNKNOWN;
|
||||
|
||||
if (productHelper.isDcFlushMitigated()) {
|
||||
switch (allocationType) {
|
||||
case AllocationType::externalHostPtr:
|
||||
case AllocationType::mapAllocation:
|
||||
case AllocationType::svmCpu:
|
||||
case AllocationType::svmZeroCopy:
|
||||
case AllocationType::internalHostMemory:
|
||||
case AllocationType::timestampPacketTagBuffer:
|
||||
case AllocationType::bufferHostMemory:
|
||||
case AllocationType::tagBuffer:
|
||||
expectedUsage = uncachedGmmUsageType;
|
||||
break;
|
||||
case AllocationType::constantSurface:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_BUFFER_CONST;
|
||||
break;
|
||||
case AllocationType::image:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_IMAGE;
|
||||
break;
|
||||
case AllocationType::internalHeap:
|
||||
case AllocationType::linearStream:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_STATE_HEAP_BUFFER;
|
||||
break;
|
||||
case AllocationType::fillPattern:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_SYSTEM_MEMORY_BUFFER;
|
||||
break;
|
||||
default:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_BUFFER;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (allocationType) {
|
||||
case AllocationType::constantSurface:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_BUFFER_CONST;
|
||||
break;
|
||||
case AllocationType::image:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_IMAGE;
|
||||
break;
|
||||
case AllocationType::internalHeap:
|
||||
case AllocationType::linearStream:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_STATE_HEAP_BUFFER;
|
||||
break;
|
||||
case AllocationType::bufferHostMemory:
|
||||
case AllocationType::internalHostMemory:
|
||||
case AllocationType::mapAllocation:
|
||||
case AllocationType::fillPattern:
|
||||
case AllocationType::svmCpu:
|
||||
case AllocationType::svmZeroCopy:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_SYSTEM_MEMORY_BUFFER;
|
||||
break;
|
||||
default:
|
||||
expectedUsage = GMM_RESOURCE_USAGE_OCL_BUFFER;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_EQ(expectedUsage, usage);
|
||||
if (expectedUsage != usage) {
|
||||
std::cout << "fail";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(GmmTest, givenForceAllResourcesUncachedFlagSetWhenGettingUsageTypeThenReturnUncached) {
|
||||
DebugManagerStateRestore restore;
|
||||
debugManager.flags.ForceAllResourcesUncached.set(true);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2023 Intel Corporation
|
||||
* Copyright (C) 2018-2024 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -119,7 +119,7 @@ TEST_F(WddmKmDafListenerTest, givenWddmWhenMapGpuVirtualAddressIsCalledThenKmDaf
|
||||
auto gmm = std::make_unique<Gmm>(rootDeviceEnvironment->getGmmHelper(), nullptr, 1, 0, GMM_RESOURCE_USAGE_OCL_BUFFER, StorageInfo{}, gmmRequirements);
|
||||
|
||||
wddmWithKmDafMock->mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, wddmWithKmDafMock->getGfxPartition().Standard.Base,
|
||||
wddmWithKmDafMock->getGfxPartition().Standard.Limit, 0u, gpuPtr);
|
||||
wddmWithKmDafMock->getGfxPartition().Standard.Limit, 0u, gpuPtr, AllocationType::unknown);
|
||||
|
||||
EXPECT_EQ(wddmWithKmDafMock->featureTable->flags.ftrKmdDaf, wddmWithKmDafMock->getKmDafListenerMock().notifyMapGpuVAParametrization.ftrKmdDaf);
|
||||
EXPECT_EQ(wddmWithKmDafMock->getAdapter(), wddmWithKmDafMock->getKmDafListenerMock().notifyMapGpuVAParametrization.hAdapter);
|
||||
|
||||
@@ -3685,7 +3685,7 @@ TEST_F(MockWddmMemoryManagerTest, givenCompressedAllocationWhenMappedGpuVaAndPag
|
||||
|
||||
auto hwInfoMock = hardwareInfoTable[wddm.getGfxPlatform()->eProductFamily];
|
||||
ASSERT_NE(nullptr, hwInfoMock);
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, wddm.getGfxPartition().Standard.Base, wddm.getGfxPartition().Standard.Limit, 0u, gpuVa);
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, wddm.getGfxPartition().Standard.Base, wddm.getGfxPartition().Standard.Limit, 0u, gpuVa, AllocationType::unknown);
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
auto gmmHelper = rootDeviceEnvironment->getGmmHelper();
|
||||
@@ -3731,7 +3731,7 @@ TEST_F(MockWddmMemoryManagerTest, givenCompressedAllocationWhenMappedGpuVaAndPag
|
||||
|
||||
auto hwInfoMock = hardwareInfoTable[wddm.getGfxPlatform()->eProductFamily];
|
||||
ASSERT_NE(nullptr, hwInfoMock);
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, wddm.getGfxPartition().Standard.Base, wddm.getGfxPartition().Standard.Limit, 0u, gpuVa);
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, wddm.getGfxPartition().Standard.Base, wddm.getGfxPartition().Standard.Limit, 0u, gpuVa, AllocationType::unknown);
|
||||
ASSERT_TRUE(result);
|
||||
EXPECT_EQ(gmmHelper->canonize(wddm.getGfxPartition().Standard.Base), gpuVa);
|
||||
EXPECT_TRUE(memcmp(&expectedDdiUpdateAuxTable, &mockMngr->updateAuxTableParamsPassed[0].ddiUpdateAuxTable, sizeof(GMM_DDI_UPDATEAUXTABLE)) == 0);
|
||||
@@ -3826,7 +3826,7 @@ TEST_F(MockWddmMemoryManagerTest, givenNonCompressedAllocationWhenMappedGpuVaThe
|
||||
engine.commandStreamReceiver->pageTableManager.reset(mockMngr);
|
||||
}
|
||||
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, wddm.getGfxPartition().Standard.Base, wddm.getGfxPartition().Standard.Limit, 0u, gpuVa);
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, wddm.getGfxPartition().Standard.Base, wddm.getGfxPartition().Standard.Limit, 0u, gpuVa, AllocationType::unknown);
|
||||
ASSERT_TRUE(result);
|
||||
EXPECT_EQ(0u, mockMngr->updateAuxTableCalled);
|
||||
}
|
||||
@@ -3842,7 +3842,7 @@ TEST_F(MockWddmMemoryManagerTest, givenFailingAllocationWhenMappedGpuVaThenRetur
|
||||
WddmMock wddm(*rootDeviceEnvironment);
|
||||
wddm.init();
|
||||
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), 0, 0, 0, 0, gpuVa);
|
||||
auto result = wddm.mapGpuVirtualAddress(gmm.get(), 0, 0, 0, 0, gpuVa, AllocationType::unknown);
|
||||
ASSERT_FALSE(result);
|
||||
}
|
||||
|
||||
@@ -3875,7 +3875,7 @@ TEST_F(MockWddmMemoryManagerTest, givenCompressedFlagSetWhenInternalIsUnsetThenD
|
||||
delete wddmAlloc->getDefaultGmm();
|
||||
wddmAlloc->setDefaultGmm(myGmm);
|
||||
|
||||
auto result = wddm->mapGpuVirtualAddress(myGmm, ALLOCATION_HANDLE, wddm->getGfxPartition().Standard.Base, wddm->getGfxPartition().Standard.Limit, 0u, gpuVa);
|
||||
auto result = wddm->mapGpuVirtualAddress(myGmm, ALLOCATION_HANDLE, wddm->getGfxPartition().Standard.Base, wddm->getGfxPartition().Standard.Limit, 0u, gpuVa, AllocationType::unknown);
|
||||
EXPECT_TRUE(result);
|
||||
memoryManager.freeGraphicsMemory(wddmAlloc);
|
||||
EXPECT_EQ(0u, mockMngr->updateAuxTableCalled);
|
||||
@@ -3917,7 +3917,7 @@ HWTEST_F(MockWddmMemoryManagerTest, givenCompressedFlagSetWhenInternalIsSetThenU
|
||||
|
||||
auto expectedCallCount = memoryManager.getRegisteredEngines(rootDeviceIndex).size();
|
||||
|
||||
auto result = wddm->mapGpuVirtualAddress(myGmm, ALLOCATION_HANDLE, wddm->getGfxPartition().Standard.Base, wddm->getGfxPartition().Standard.Limit, 0u, gpuVa);
|
||||
auto result = wddm->mapGpuVirtualAddress(myGmm, ALLOCATION_HANDLE, wddm->getGfxPartition().Standard.Base, wddm->getGfxPartition().Standard.Limit, 0u, gpuVa, AllocationType::unknown);
|
||||
EXPECT_TRUE(result);
|
||||
|
||||
auto ultCsr = reinterpret_cast<UltCommandStreamReceiver<FamilyType> *>(csr.get());
|
||||
|
||||
@@ -144,7 +144,7 @@ TEST_F(Wddm20Tests, givenGraphicsAllocationWhenItIsMappedInHeap0ThenItHasGpuAddr
|
||||
auto heapBase = wddm->getGfxPartition().Heap32[static_cast<uint32_t>(HeapIndex::heapInternalDeviceMemory)].Base;
|
||||
auto heapLimit = wddm->getGfxPartition().Heap32[static_cast<uint32_t>(HeapIndex::heapInternalDeviceMemory)].Limit;
|
||||
|
||||
bool ret = wddm->mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, heapBase, heapLimit, 0u, gpuAddress);
|
||||
bool ret = wddm->mapGpuVirtualAddress(gmm.get(), ALLOCATION_HANDLE, heapBase, heapLimit, 0u, gpuAddress, AllocationType::unknown);
|
||||
EXPECT_TRUE(ret);
|
||||
|
||||
auto gmmHelper = rootDeviceEnvironment->getGmmHelper();
|
||||
|
||||
Reference in New Issue
Block a user