diff --git a/shared/source/debug_settings/debug_variables_base.inl b/shared/source/debug_settings/debug_variables_base.inl index ed8d686c68..6820ba25fb 100644 --- a/shared/source/debug_settings/debug_variables_base.inl +++ b/shared/source/debug_settings/debug_variables_base.inl @@ -221,6 +221,7 @@ DECLARE_DEBUG_VARIABLE(int32_t, OverridePatIndexForSystemMemory, -1, "-1: defaul DECLARE_DEBUG_VARIABLE(int32_t, OverridePatIndexForDeviceMemory, -1, "-1: default, >=0: PatIndex to override. Applicable only for Device memory.") DECLARE_DEBUG_VARIABLE(int32_t, OverridePatIndexForUncachedTypes, -1, "-1: default, >=0: PatIndex to override for uncached resource types. Applicable only on Linux.") DECLARE_DEBUG_VARIABLE(int32_t, OverridePatIndexForCachedTypes, -1, "-1: default, >=0: PatIndex to override for cached resource types. Applicable only on Linux.") +DECLARE_DEBUG_VARIABLE(int32_t, UseGemCreateExtInAllocateMemoryByKMD, 0, "Use gem create ext when allocating memory by KMD") DECLARE_DEBUG_VARIABLE(int32_t, UseTileMemoryBankInVirtualMemoryCreation, -1, "-1: default - on, 0: do not assign tile memory bank to virtual memory space, 1: assign tile memory bank to virtual memory space") DECLARE_DEBUG_VARIABLE(int32_t, OverrideTimestampEvents, -1, "-1: default (based on user settings), 0: Force disable timestamp events (no timestamps will be reported), 1: Force enable timestamp events") DECLARE_DEBUG_VARIABLE(int32_t, ForcePreParserEnabledForMiArbCheck, -1, "-1: default , 0: PreParser disabled, 1: PreParser enabled") diff --git a/shared/source/os_interface/linux/drm_buffer_object.cpp b/shared/source/os_interface/linux/drm_buffer_object.cpp index 44b1b521ef..e0f03cdcc7 100644 --- a/shared/source/os_interface/linux/drm_buffer_object.cpp +++ b/shared/source/os_interface/linux/drm_buffer_object.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -81,7 +81,7 @@ BufferObject::BufferObject(uint32_t rootDeviceIndex, Drm *drm, uint64_t patIndex : BufferObject(rootDeviceIndex, drm, patIndex, BufferObjectHandleWrapper{handle}, size, maxOsContextCount) {} BufferObject::BufferObject(uint32_t rootDeviceIndex, Drm *drm, uint64_t patIndex, BufferObjectHandleWrapper &&handle, size_t size, size_t maxOsContextCount) - : drm(drm), refCount(1), rootDeviceIndex(rootDeviceIndex), handle(std::move(handle)), size(size) { + : drm(drm), size(size), handle(std::move(handle)), refCount(1), rootDeviceIndex(rootDeviceIndex) { auto ioctlHelper = drm->getIoctlHelper(); this->tilingMode = ioctlHelper->getDrmParamValue(DrmParam::tilingNone); diff --git a/shared/source/os_interface/linux/drm_buffer_object.h b/shared/source/os_interface/linux/drm_buffer_object.h index 7c60c9a8a9..3cf3203da9 100644 --- a/shared/source/os_interface/linux/drm_buffer_object.h +++ b/shared/source/os_interface/linux/drm_buffer_object.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -93,6 +93,12 @@ class BufferObject { } }; + enum class BOType { + legacy, + coherent, + nonCoherent + }; + bool setTiling(uint32_t mode, uint32_t stride); int pin(BufferObject *const boToPin[], size_t numberOfBos, OsContext *osContext, uint32_t vmHandleId, uint32_t drmContextId); @@ -205,6 +211,8 @@ class BufferObject { } uint64_t peekPatIndex() const { return patIndex; } void setPatIndex(uint64_t newPatIndex) { this->patIndex = newPatIndex; } + BOType peekBOType() const { return boType; } + void setBOType(BOType newBoType) { this->boType = newBoType; } static constexpr int gpuHangDetected{-7171}; @@ -215,38 +223,34 @@ class BufferObject { protected: MOCKABLE_VIRTUAL MemoryOperationsStatus evictUnusedAllocations(bool waitForCompletion, bool isLockNeeded); - - Drm *drm = nullptr; - bool perContextVmsUsed = false; - std::atomic refCount; - - uint32_t rootDeviceIndex = std::numeric_limits::max(); - BufferObjectHandleWrapper handle; // i915 gem object handle - uint64_t size; - bool isReused = false; - bool boHandleShared = false; - - uint32_t tilingMode; - bool allowCapture = false; - bool requiresImmediateBinding = false; - bool requiresExplicitResidency = false; - MOCKABLE_VIRTUAL void fillExecObject(ExecObject &execObject, OsContext *osContext, uint32_t vmHandleId, uint32_t drmContextId); void printBOBindingResult(OsContext *osContext, uint32_t vmHandleId, bool bind, int retVal); - void *lockedAddress; // CPU side virtual address - + Drm *drm = nullptr; + uint64_t size; uint64_t unmapSize = 0; uint64_t patIndex = CommonConstants::unsupportedPatIndex; + void *lockedAddress; // CPU side virtual address + size_t colourChunk = 0; + BufferObjectHandleWrapper handle; // i915 gem object handle + StackVec bindExtHandles; + std::vector bindAddresses; + std::atomic refCount; + uint32_t rootDeviceIndex = std::numeric_limits::max(); + uint32_t tilingMode; + + BOType boType = BOType::legacy; CacheRegion cacheRegion = CacheRegion::defaultRegion; CachePolicy cachePolicy = CachePolicy::writeBack; - StackVec bindExtHandles; - + bool perContextVmsUsed = false; + bool isReused = false; + bool boHandleShared = false; + bool allowCapture = false; + bool requiresImmediateBinding = false; + bool requiresExplicitResidency = false; bool colourWithBind = false; - size_t colourChunk = 0; - std::vector bindAddresses; private: uint64_t gpuAddress = 0llu; diff --git a/shared/source/os_interface/linux/drm_memory_manager.cpp b/shared/source/os_interface/linux/drm_memory_manager.cpp index f29b6a5c49..ea92d114e5 100644 --- a/shared/source/os_interface/linux/drm_memory_manager.cpp +++ b/shared/source/os_interface/linux/drm_memory_manager.cpp @@ -632,7 +632,8 @@ GraphicsAllocation *DrmMemoryManager::allocateMemoryByKMD(const AllocationData & GmmRequirements gmmRequirements{}; gmmRequirements.allowLargePages = true; gmmRequirements.preferCompressed = false; - auto gmm = std::make_unique(executionEnvironment.rootDeviceEnvironments[allocationData.rootDeviceIndex]->getGmmHelper(), allocationData.hostPtr, + auto gmmHelper = executionEnvironment.rootDeviceEnvironments[allocationData.rootDeviceIndex]->getGmmHelper(); + auto gmm = std::make_unique(gmmHelper, allocationData.hostPtr, allocationData.size, 0u, CacheSettingsHelper::getGmmUsageType(allocationData.type, allocationData.flags.uncacheable, productHelper), systemMemoryStorageInfo, gmmRequirements); size_t bufferSize = allocationData.size; auto alignment = allocationData.alignment; @@ -645,18 +646,25 @@ GraphicsAllocation *DrmMemoryManager::allocateMemoryByKMD(const AllocationData & int ret = -1; uint32_t handle; auto patIndex = drm.getPatIndex(gmm.get(), allocationData.type, CacheRegion::defaultRegion, CachePolicy::writeBack, false, MemoryPoolHelper::isSystemMemoryPool(memoryPool)); - const bool tryToUseGemCreateExt = !debugManager.flags.DisableGemCreateExtSetPat.get(); + bool tryToUseGemCreateExt = productHelper.useGemCreateExtInAllocateMemoryByKMD(); + if (debugManager.flags.UseGemCreateExtInAllocateMemoryByKMD.get() != -1) { + tryToUseGemCreateExt = debugManager.flags.UseGemCreateExtInAllocateMemoryByKMD.get() == 1; + } + BufferObject::BOType boType{}; if (tryToUseGemCreateExt && drm.getMemoryInfo()) { ret = drm.getMemoryInfo()->createGemExtWithSingleRegion(allocationData.storageInfo.getMemoryBanks(), bufferSize, handle, patIndex, -1, allocationData.flags.isUSMHostAllocation); + boType = getBOTypeFromPatIndex(patIndex, productHelper.isVmBindPatIndexProgrammingSupported()); } if (0 != ret) { auto ioctlHelper = drm.getIoctlHelper(); handle = ioctlHelper->createGem(bufferSize, static_cast(allocationData.storageInfo.memoryBanks.to_ulong())); + boType = BufferObject::BOType::legacy; } std::unique_ptr bo(new BufferObject(allocationData.rootDeviceIndex, &drm, patIndex, handle, bufferSize, maxOsContextCount)); bo->setAddress(gpuRange); + bo->setBOType(boType); auto allocation = new DrmAllocation(allocationData.rootDeviceIndex, allocationData.type, bo.get(), nullptr, gpuRange, bufferSize, memoryPool); if (!allocation) { @@ -1918,7 +1926,8 @@ BufferObject *DrmMemoryManager::createBufferObjectInMemoryRegion(uint32_t rootDe if (!bo) { return nullptr; } - + auto &productHelper = executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]->getHelper(); + bo->setBOType(getBOTypeFromPatIndex(patIndex, productHelper.isVmBindPatIndexProgrammingSupported())); bo->setAddress(gpuAddress); return bo; @@ -1977,6 +1986,8 @@ bool DrmMemoryManager::createDrmChunkedAllocation(Drm *drm, DrmAllocation *alloc auto bo = new (std::nothrow) BufferObject(allocation->getRootDeviceIndex(), drm, patIndex, handle, boSize, maxOsContextCount); UNRECOVERABLE_IF(bo == nullptr); + auto &productHelper = executionEnvironment.rootDeviceEnvironments[allocation->getRootDeviceIndex()]->getHelper(); + bo->setBOType(getBOTypeFromPatIndex(patIndex, productHelper.isVmBindPatIndexProgrammingSupported())); bo->setAddress(boAddress); allocation->getBufferObjectToModify(0) = bo; @@ -2087,12 +2098,44 @@ bool DrmMemoryManager::createDrmAllocation(Drm *drm, DrmAllocation *allocation, return true; } +BufferObject::BOType DrmMemoryManager::getBOTypeFromPatIndex(uint64_t patIndex, bool isPatIndexSupported) const { + if (!isPatIndexSupported) { + return BufferObject::BOType::legacy; + } + if (patIndex < 3) { + return BufferObject::BOType::nonCoherent; + } else { + return BufferObject::BOType::coherent; + } +} + bool DrmMemoryManager::retrieveMmapOffsetForBufferObject(uint32_t rootDeviceIndex, BufferObject &bo, uint64_t flags, uint64_t &offset) { constexpr uint64_t mmapOffsetFixed = 4; + constexpr uint64_t mmapOffsetCoherent = I915_MMAP_OFFSET_WB; + constexpr uint64_t mmapOffsetNonCoherent = I915_MMAP_OFFSET_WC; GemMmapOffset mmapOffset = {}; mmapOffset.handle = bo.peekHandle(); - mmapOffset.flags = isLocalMemorySupported(rootDeviceIndex) ? mmapOffsetFixed : flags; + if (isLocalMemorySupported(rootDeviceIndex)) { + mmapOffset.flags = mmapOffsetFixed; + } else { + auto &productHelper = executionEnvironment.rootDeviceEnvironments[rootDeviceIndex]->getHelper(); + if (productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + BufferObject::BOType boType = bo.peekBOType(); + switch (boType) { + case NEO::BufferObject::BOType::nonCoherent: + mmapOffset.flags = mmapOffsetNonCoherent; + break; + case NEO::BufferObject::BOType::legacy: + case NEO::BufferObject::BOType::coherent: + default: + mmapOffset.flags = mmapOffsetCoherent; + break; + } + } else { + mmapOffset.flags = flags; + } + } auto &drm = this->getDrm(rootDeviceIndex); auto ioctlHelper = drm.getIoctlHelper(); auto ret = ioctlHelper->ioctl(DrmIoctl::gemMmapOffset, &mmapOffset); @@ -2361,7 +2404,7 @@ GraphicsAllocation *DrmMemoryManager::createSharedUnifiedMemoryAllocation(const } const auto memoryPool = MemoryPool::localMemory; - + auto &productHelper = executionEnvironment.rootDeviceEnvironments[allocationData.rootDeviceIndex]->getHelper(); for (auto handleId = 0u; handleId < numHandles; handleId++) { uint32_t handle = 0; @@ -2394,6 +2437,7 @@ GraphicsAllocation *DrmMemoryManager::createSharedUnifiedMemoryAllocation(const uint64_t mmapOffsetWb = ioctlHelper->getDrmParamValue(DrmParam::mmapOffsetWb); uint64_t offset = 0; + bo->setBOType(getBOTypeFromPatIndex(patIndex, productHelper.isVmBindPatIndexProgrammingSupported())); if (!retrieveMmapOffsetForBufferObject(allocationData.rootDeviceIndex, *bo, mmapOffsetWb, offset)) { this->munmapFunction(cpuBasePointer, totalSizeToAlloc); releaseGpuRange(reinterpret_cast(preferredAddress), totalSizeToAlloc, allocationData.rootDeviceIndex); diff --git a/shared/source/os_interface/linux/drm_memory_manager.h b/shared/source/os_interface/linux/drm_memory_manager.h index 985ee6b924..130c79055d 100644 --- a/shared/source/os_interface/linux/drm_memory_manager.h +++ b/shared/source/os_interface/linux/drm_memory_manager.h @@ -156,6 +156,7 @@ class DrmMemoryManager : public MemoryManager { BufferObject *createRootDeviceBufferObject(uint32_t rootDeviceIndex); void releaseBufferObject(uint32_t rootDeviceIndex); bool retrieveMmapOffsetForBufferObject(uint32_t rootDeviceIndex, BufferObject &bo, uint64_t flags, uint64_t &offset); + BufferObject::BOType getBOTypeFromPatIndex(uint64_t patIndex, bool isPatIndexSupported) const; std::vector pinBBs; std::vector memoryForPinBBs; diff --git a/shared/source/os_interface/product_helper.h b/shared/source/os_interface/product_helper.h index 1e3b6bdbe9..779d64fd4a 100644 --- a/shared/source/os_interface/product_helper.h +++ b/shared/source/os_interface/product_helper.h @@ -169,6 +169,7 @@ class ProductHelper { virtual bool isMidThreadPreemptionDisallowedForRayTracingKernels() const = 0; virtual bool isBufferPoolAllocatorSupported() const = 0; virtual bool isUsmPoolAllocatorSupported() const = 0; + virtual bool useGemCreateExtInAllocateMemoryByKMD() const = 0; virtual bool isTlbFlushRequired() const = 0; virtual bool isDummyBlitWaRequired() const = 0; virtual bool isDetectIndirectAccessInKernelSupported(const KernelDescriptor &kernelDescriptor, const bool isPrecompiled, const uint32_t kernelIndirectDetectionVersion) const = 0; diff --git a/shared/source/os_interface/product_helper.inl b/shared/source/os_interface/product_helper.inl index fa3682a116..c47e8cd42e 100644 --- a/shared/source/os_interface/product_helper.inl +++ b/shared/source/os_interface/product_helper.inl @@ -609,6 +609,11 @@ bool ProductHelperHw::isUsmPoolAllocatorSupported() const { return false; } +template +bool ProductHelperHw::useGemCreateExtInAllocateMemoryByKMD() const { + return false; +} + template void ProductHelperHw::fillScmPropertiesSupportStructureBase(StateComputeModePropertiesSupport &propertiesSupport) const { propertiesSupport.coherencyRequired = getScmPropertyCoherencyRequiredSupport(); diff --git a/shared/source/os_interface/product_helper_hw.h b/shared/source/os_interface/product_helper_hw.h index 804fe947bc..a2c303b834 100644 --- a/shared/source/os_interface/product_helper_hw.h +++ b/shared/source/os_interface/product_helper_hw.h @@ -117,6 +117,7 @@ class ProductHelperHw : public ProductHelper { bool isMidThreadPreemptionDisallowedForRayTracingKernels() const override; bool isBufferPoolAllocatorSupported() const override; bool isUsmPoolAllocatorSupported() const override; + bool useGemCreateExtInAllocateMemoryByKMD() const override; bool isTlbFlushRequired() const override; bool isDummyBlitWaRequired() const override; bool isDetectIndirectAccessInKernelSupported(const KernelDescriptor &kernelDescriptor, const bool isPrecompiled, const uint32_t kernelIndirectDetectionVersion) const override; diff --git a/shared/source/xe_hpg_core/linux/product_helper_mtl.cpp b/shared/source/xe_hpg_core/linux/product_helper_mtl.cpp index 99e6702a23..efc1a18192 100644 --- a/shared/source/xe_hpg_core/linux/product_helper_mtl.cpp +++ b/shared/source/xe_hpg_core/linux/product_helper_mtl.cpp @@ -22,6 +22,11 @@ uint64_t ProductHelperHw::overridePatIndex(bool isUncachedType, uint } } +template <> +bool ProductHelperHw::useGemCreateExtInAllocateMemoryByKMD() const { + return true; +} + template class NEO::ProductHelperHw; } // namespace NEO \ No newline at end of file diff --git a/shared/test/common/mocks/linux/mock_drm_memory_manager.h b/shared/test/common/mocks/linux/mock_drm_memory_manager.h index a8c341cabd..726f136e73 100644 --- a/shared/test/common/mocks/linux/mock_drm_memory_manager.h +++ b/shared/test/common/mocks/linux/mock_drm_memory_manager.h @@ -52,6 +52,7 @@ class TestedDrmMemoryManager : public MemoryManagerCreate { using DrmMemoryManager::createSharedUnifiedMemoryAllocation; using DrmMemoryManager::eraseSharedBoHandleWrapper; using DrmMemoryManager::eraseSharedBufferObject; + using DrmMemoryManager::getBOTypeFromPatIndex; using DrmMemoryManager::getDefaultDrmContextId; using DrmMemoryManager::getDrm; using DrmMemoryManager::getRootDeviceIndex; diff --git a/shared/test/common/test_files/igdrcl.config b/shared/test/common/test_files/igdrcl.config index d2fe954874..e2efba1248 100644 --- a/shared/test/common/test_files/igdrcl.config +++ b/shared/test/common/test_files/igdrcl.config @@ -586,4 +586,5 @@ ExperimentalEnableHostAllocationCache = -1 OverridePatIndexForUncachedTypes = -1 OverridePatIndexForCachedTypes = -1 FlushTlbBeforeCopy = -1 +UseGemCreateExtInAllocateMemoryByKMD = 0 # Please don't edit below this line diff --git a/shared/test/common/test_macros/header/common_matchers.h b/shared/test/common/test_macros/header/common_matchers.h index b05bdecb42..c648c580ca 100644 --- a/shared/test/common/test_macros/header/common_matchers.h +++ b/shared/test/common/test_macros/header/common_matchers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2023 Intel Corporation + * Copyright (C) 2021-2024 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -36,6 +36,7 @@ using IsBeforeXeHpCore = IsBeforeGfxCore; using IsAtLeastXeHpgCore = IsAtLeastGfxCore; using IsAtMostXeHpgCore = IsAtMostGfxCore; +using IsBeforeXeHpgCore = IsBeforeGfxCore; using IsAtLeastXeHpcCore = IsAtLeastGfxCore; using IsAtMostXeHpcCore = IsAtMostGfxCore; diff --git a/shared/test/unit_test/os_interface/linux/drm_memory_manager_tests.cpp b/shared/test/unit_test/os_interface/linux/drm_memory_manager_tests.cpp index 804382e38c..aa43e2d057 100644 --- a/shared/test/unit_test/os_interface/linux/drm_memory_manager_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/drm_memory_manager_tests.cpp @@ -1759,8 +1759,14 @@ TEST_F(DrmMemoryManagerTest, GivenShareableEnabledWhenAskedToCreateGraphicsAlloc mock->ioctlHelper.reset(new MockIoctlHelper(*mock)); mock->queryMemoryInfo(); EXPECT_NE(nullptr, mock->getMemoryInfo()); + auto &productHelper = executionEnvironment->rootDeviceEnvironments[0]->getHelper(); + if (debugManager.flags.UseGemCreateExtInAllocateMemoryByKMD.get() == 0 || + !productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + mock->ioctlExpected.gemCreate = 1; + } else { + mock->ioctlExpected.gemCreateExt = 1; + } mock->ioctlExpected.gemWait = 1; - mock->ioctlExpected.gemCreate = 1; mock->ioctlExpected.gemClose = 1; allocationData.size = MemoryConstants::pageSize; @@ -1797,8 +1803,13 @@ TEST_F(DrmMemoryManagerTest, GivenSizeAndAlignmentWhenAskedToCreateGraphicsAlloc memoryManager->freeGraphicsMemory(allocation); ioctlCnt += 1; } while (alignment != 0); - - mock->ioctlExpected.gemCreate = ioctlCnt; + auto &productHelper = executionEnvironment->rootDeviceEnvironments[0]->getHelper(); + if (debugManager.flags.UseGemCreateExtInAllocateMemoryByKMD.get() == 0 || + !productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + mock->ioctlExpected.gemCreate = ioctlCnt; + } else { + mock->ioctlExpected.gemCreateExt = ioctlCnt; + } mock->ioctlExpected.gemWait = ioctlCnt; mock->ioctlExpected.gemClose = ioctlCnt; } @@ -2400,7 +2411,13 @@ TEST_F(DrmMemoryManagerTest, givenDrmMemoryManagerWhenLockUnlockIsCalledOnAlloca // check DRM_IOCTL_I915_GEM_MMAP_OFFSET input params EXPECT_EQ((uint32_t)drmAllocation->getBO()->peekHandle(), mock->mmapOffsetHandle); EXPECT_EQ(0u, mock->mmapOffsetPad); - EXPECT_EQ(static_cast(I915_MMAP_OFFSET_WC), mock->mmapOffsetFlags); + auto expectedMmapOffset = static_cast(I915_MMAP_OFFSET_WC); + auto &productHelper = executionEnvironment->rootDeviceEnvironments[0]->getHelper(); + if (BufferObject::BOType::nonCoherent != drmAllocation->getBO()->peekBOType() && + productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + expectedMmapOffset = static_cast(I915_MMAP_OFFSET_WB); + } + EXPECT_EQ(expectedMmapOffset, mock->mmapOffsetFlags); memoryManager->unlockResource(allocation); EXPECT_EQ(nullptr, drmAllocation->getBO()->peekLockedAddress()); @@ -7136,17 +7153,46 @@ TEST_F(DrmMemoryManagerWithLocalMemoryTest, givenDrmWhenRetrieveMmapOffsetForBuf } TEST_F(DrmMemoryManagerTest, givenDrmWhenRetrieveMmapOffsetForBufferObjectIsCalledForSystemMemoryThenApplyCorrectFlags) { - mock->ioctlExpected.gemMmapOffset = 4; + mock->ioctlExpected.gemMmapOffset = 8; BufferObject bo(rootDeviceIndex, mock, 3, 1, 1024, 0); uint64_t offset = 0; bool ret = false; - + auto &productHelper = executionEnvironment->rootDeviceEnvironments[0]->getHelper(); + bo.setBOType(BufferObject::BOType::legacy); for (uint64_t flags : {I915_MMAP_OFFSET_WC, I915_MMAP_OFFSET_WB}) { ret = memoryManager->retrieveMmapOffsetForBufferObject(rootDeviceIndex, bo, flags, offset); EXPECT_TRUE(ret); - EXPECT_EQ(flags, mock->mmapOffsetFlags); + if (productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + EXPECT_EQ(static_cast(I915_MMAP_OFFSET_WB), mock->mmapOffsetFlags); + } else { + EXPECT_EQ(flags, mock->mmapOffsetFlags); + } + } + + bo.setBOType(BufferObject::BOType::coherent); + for (uint64_t flags : {I915_MMAP_OFFSET_WC, I915_MMAP_OFFSET_WB}) { + ret = memoryManager->retrieveMmapOffsetForBufferObject(rootDeviceIndex, bo, flags, offset); + + EXPECT_TRUE(ret); + if (productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + EXPECT_EQ(static_cast(I915_MMAP_OFFSET_WB), mock->mmapOffsetFlags); + } else { + EXPECT_EQ(flags, mock->mmapOffsetFlags); + } + } + + bo.setBOType(BufferObject::BOType::nonCoherent); + for (uint64_t flags : {I915_MMAP_OFFSET_WC, I915_MMAP_OFFSET_WB}) { + ret = memoryManager->retrieveMmapOffsetForBufferObject(rootDeviceIndex, bo, flags, offset); + + EXPECT_TRUE(ret); + if (productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + EXPECT_EQ(static_cast(I915_MMAP_OFFSET_WC), mock->mmapOffsetFlags); + } else { + EXPECT_EQ(flags, mock->mmapOffsetFlags); + } } mock->failOnMmapOffset = true; @@ -7155,7 +7201,28 @@ TEST_F(DrmMemoryManagerTest, givenDrmWhenRetrieveMmapOffsetForBufferObjectIsCall ret = memoryManager->retrieveMmapOffsetForBufferObject(rootDeviceIndex, bo, flags, offset); EXPECT_FALSE(ret); - EXPECT_EQ(flags, mock->mmapOffsetFlags); + if (productHelper.useGemCreateExtInAllocateMemoryByKMD()) { + EXPECT_EQ(static_cast(I915_MMAP_OFFSET_WC), mock->mmapOffsetFlags); + } else { + EXPECT_EQ(flags, mock->mmapOffsetFlags); + } + } +} + +HWTEST_F(DrmMemoryManagerTest, givenDrmWhenGetBOTypeFromPatIndexIsCalledThenReturnCorrectBOType) { + const bool isPatIndexSupported = memoryManager->getGmmHelper(mockRootDeviceIndex)->getRootDeviceEnvironment().getProductHelper().isVmBindPatIndexProgrammingSupported(); + if (!isPatIndexSupported) { + EXPECT_EQ(BufferObject::BOType::legacy, memoryManager->getBOTypeFromPatIndex(0, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::legacy, memoryManager->getBOTypeFromPatIndex(1, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::legacy, memoryManager->getBOTypeFromPatIndex(2, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::legacy, memoryManager->getBOTypeFromPatIndex(3, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::legacy, memoryManager->getBOTypeFromPatIndex(4, isPatIndexSupported)); + } else { + EXPECT_EQ(BufferObject::BOType::nonCoherent, memoryManager->getBOTypeFromPatIndex(0, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::nonCoherent, memoryManager->getBOTypeFromPatIndex(1, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::nonCoherent, memoryManager->getBOTypeFromPatIndex(2, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::coherent, memoryManager->getBOTypeFromPatIndex(3, isPatIndexSupported)); + EXPECT_EQ(BufferObject::BOType::coherent, memoryManager->getBOTypeFromPatIndex(4, isPatIndexSupported)); } }