mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-18 22:08:53 +08:00
fix: report ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT correctly
At the moment the capability is returned only based on the value returned by the `productHelper`, which is too liberal. The capability must also consider the support reported by `memoryManager`. Only then the support reported is aligned with actual logic of handling USM-allocations. Related-To: NEO-10040 Signed-off-by: Maciej Bielski <maciej.bielski@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
1abb48c3e0
commit
a8779c2387
@@ -26,6 +26,7 @@ struct KmdNotifyProperties;
|
||||
struct AllocationData;
|
||||
class CommandStreamReceiver;
|
||||
class Device;
|
||||
class Drm;
|
||||
enum class LocalMemoryAccessMode;
|
||||
struct FrontEndPropertiesSupport;
|
||||
struct HardwareInfo;
|
||||
@@ -82,7 +83,7 @@ class ProductHelper {
|
||||
virtual void adjustSamplerState(void *sampler, const HardwareInfo &hwInfo) const = 0;
|
||||
virtual uint64_t getHostMemCapabilities(const HardwareInfo *hwInfo) const = 0;
|
||||
virtual uint64_t getDeviceMemCapabilities() const = 0;
|
||||
virtual uint64_t getSingleDeviceSharedMemCapabilities() const = 0;
|
||||
virtual uint64_t getSingleDeviceSharedMemCapabilities(bool isKmdMigrationAvailable) const = 0;
|
||||
virtual uint64_t getCrossDeviceSharedMemCapabilities() const = 0;
|
||||
virtual uint64_t getSharedSystemMemCapabilities(const HardwareInfo *hwInfo) const = 0;
|
||||
virtual std::vector<int32_t> getKernelSupportedThreadArbitrationPolicies() const = 0;
|
||||
|
||||
@@ -148,10 +148,10 @@ uint64_t ProductHelperHw<gfxProduct>::getDeviceMemCapabilities() const {
|
||||
}
|
||||
|
||||
template <PRODUCT_FAMILY gfxProduct>
|
||||
uint64_t ProductHelperHw<gfxProduct>::getSingleDeviceSharedMemCapabilities() const {
|
||||
uint64_t ProductHelperHw<gfxProduct>::getSingleDeviceSharedMemCapabilities(bool isKmdMigrationAvailable) const {
|
||||
uint64_t capabilities = UnifiedSharedMemoryFlags::access | UnifiedSharedMemoryFlags::atomicAccess;
|
||||
|
||||
if (isKmdMigrationSupported() || getConcurrentAccessMemCapabilitiesSupported(UsmAccessCapabilities::sharedSingleDevice)) {
|
||||
if (isKmdMigrationAvailable || getConcurrentAccessMemCapabilitiesSupported(UsmAccessCapabilities::sharedSingleDevice)) {
|
||||
capabilities |= UnifiedSharedMemoryFlags::concurrentAccess | UnifiedSharedMemoryFlags::concurrentAtomicAccess;
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ class ProductHelperHw : public ProductHelper {
|
||||
void adjustSamplerState(void *sampler, const HardwareInfo &hwInfo) const override;
|
||||
uint64_t getHostMemCapabilities(const HardwareInfo *hwInfo) const override;
|
||||
uint64_t getDeviceMemCapabilities() const override;
|
||||
uint64_t getSingleDeviceSharedMemCapabilities() const override;
|
||||
uint64_t getSingleDeviceSharedMemCapabilities(bool isKmdMigrationAvailable) const override;
|
||||
uint64_t getCrossDeviceSharedMemCapabilities() const override;
|
||||
uint64_t getSharedSystemMemCapabilities(const HardwareInfo *hwInfo) const override;
|
||||
std::vector<int32_t> getKernelSupportedThreadArbitrationPolicies() const override;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2024 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -172,6 +172,7 @@ class TestedDrmMemoryManager : public MemoryManagerCreate<DrmMemoryManager> {
|
||||
return DrmMemoryManager::acquireGpuRangeWithCustomAlignment(size, rootDeviceIndex, heapIndex, alignment);
|
||||
}
|
||||
ADDMETHOD(isLimitedRange, bool, true, false, (uint32_t rootDeviceIndex), (rootDeviceIndex));
|
||||
ADDMETHOD(isKmdMigrationAvailable, bool, true, false, (uint32_t rootDeviceIndex), (rootDeviceIndex));
|
||||
|
||||
DeviceBitfield computeStorageInfoMemoryBanks(const AllocationProperties &properties, DeviceBitfield preferredBank, DeviceBitfield allBanks) override {
|
||||
++computeStorageInfoMemoryBanksCalled;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2024 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -43,6 +43,7 @@ struct MockRootDeviceEnvironment : public RootDeviceEnvironment {
|
||||
struct MockExecutionEnvironment : ExecutionEnvironment {
|
||||
using ExecutionEnvironment::adjustCcsCountImpl;
|
||||
using ExecutionEnvironment::directSubmissionController;
|
||||
using ExecutionEnvironment::memoryManager;
|
||||
using ExecutionEnvironment::rootDeviceEnvironments;
|
||||
|
||||
~MockExecutionEnvironment() override = default;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2021-2024 Intel Corporation
|
||||
* Copyright (C) 2021-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -59,7 +59,7 @@ uint64_t ProductHelperHw<IGFX_UNKNOWN>::getDeviceMemCapabilities() const {
|
||||
}
|
||||
|
||||
template <>
|
||||
uint64_t ProductHelperHw<IGFX_UNKNOWN>::getSingleDeviceSharedMemCapabilities() const {
|
||||
uint64_t ProductHelperHw<IGFX_UNKNOWN>::getSingleDeviceSharedMemCapabilities(bool) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "shared/test/common/helpers/engine_descriptor_helper.h"
|
||||
#include "shared/test/common/libult/linux/drm_query_mock.h"
|
||||
#include "shared/test/common/mocks/linux/mock_drm_allocation.h"
|
||||
#include "shared/test/common/mocks/linux/mock_drm_memory_manager.h"
|
||||
#include "shared/test/common/mocks/linux/mock_os_context_linux.h"
|
||||
#include "shared/test/common/mocks/mock_execution_environment.h"
|
||||
|
||||
@@ -179,33 +180,35 @@ TEST(DrmVmBindTest, givenUseKmdMigrationWhenCallingBindBoOnUnifiedSharedMemoryTh
|
||||
}
|
||||
|
||||
TEST(DrmVmBindTest, givenDrmWithPageFaultSupportWhenCallingBindBoOnUnifiedSharedMemoryThenMarkAllocationShouldPageFaultWhenKmdMigrationIsSupported) {
|
||||
constexpr auto rootDeviceIndex{0U};
|
||||
auto executionEnvironment = std::make_unique<MockExecutionEnvironment>();
|
||||
executionEnvironment->rootDeviceEnvironments[0]->initGmm();
|
||||
executionEnvironment->initializeMemoryManager();
|
||||
executionEnvironment->rootDeviceEnvironments[rootDeviceIndex]->initGmm();
|
||||
executionEnvironment->rootDeviceEnvironments[rootDeviceIndex]->osInterface.reset(new NEO::OSInterface);
|
||||
|
||||
DrmQueryMock drm(*executionEnvironment->rootDeviceEnvironments[0]);
|
||||
drm.pageFaultSupported = true;
|
||||
auto drm{new DrmQueryMock{*executionEnvironment->rootDeviceEnvironments[rootDeviceIndex]}};
|
||||
drm->pageFaultSupported = true;
|
||||
executionEnvironment->rootDeviceEnvironments[rootDeviceIndex]->osInterface->setDriverModel(std::unique_ptr<DriverModel>{drm});
|
||||
executionEnvironment->memoryManager.reset(new MockDrmMemoryManager{GemCloseWorkerMode::gemCloseWorkerInactive, false, false, *executionEnvironment});
|
||||
|
||||
OsContextLinux osContext(drm, 0, 0u, EngineDescriptorHelper::getDefaultDescriptor());
|
||||
OsContextLinux osContext(*drm, rootDeviceIndex, 0u, EngineDescriptorHelper::getDefaultDescriptor());
|
||||
osContext.ensureContextInitialized(false);
|
||||
uint32_t vmHandleId = 0;
|
||||
|
||||
MockBufferObject bo(0u, &drm, 3, 0, 0, 1);
|
||||
MockDrmAllocation allocation(0u, AllocationType::unifiedSharedMemory, MemoryPool::localMemory);
|
||||
MockBufferObject bo(rootDeviceIndex, drm, 3, 0, 0, 1);
|
||||
MockDrmAllocation allocation(rootDeviceIndex, AllocationType::unifiedSharedMemory, MemoryPool::localMemory);
|
||||
allocation.bufferObjects[0] = &bo;
|
||||
|
||||
allocation.bindBO(&bo, &osContext, vmHandleId, nullptr, true, false);
|
||||
|
||||
auto &productHelper = drm.getRootDeviceEnvironment().getHelper<ProductHelper>();
|
||||
auto kmdMigrationSupported = productHelper.isKmdMigrationSupported();
|
||||
const bool isKmdMigrationAvailable{executionEnvironment->memoryManager->isKmdMigrationAvailable(rootDeviceIndex)};
|
||||
|
||||
if (kmdMigrationSupported) {
|
||||
EXPECT_TRUE(allocation.shouldAllocationPageFault(&drm));
|
||||
if (isKmdMigrationAvailable) {
|
||||
EXPECT_TRUE(allocation.shouldAllocationPageFault(drm));
|
||||
EXPECT_FALSE(bo.isExplicitResidencyRequired());
|
||||
EXPECT_EQ(DrmPrelimHelper::getImmediateVmBindFlag(), drm.context.receivedVmBind->flags);
|
||||
EXPECT_EQ(DrmPrelimHelper::getImmediateVmBindFlag(), drm->context.receivedVmBind->flags);
|
||||
} else {
|
||||
EXPECT_FALSE(allocation.shouldAllocationPageFault(&drm));
|
||||
EXPECT_FALSE(allocation.shouldAllocationPageFault(drm));
|
||||
EXPECT_TRUE(bo.isExplicitResidencyRequired());
|
||||
EXPECT_EQ(DrmPrelimHelper::getImmediateVmBindFlag() | DrmPrelimHelper::getMakeResidentVmBindFlag(), drm.context.receivedVmBind->flags);
|
||||
EXPECT_EQ(DrmPrelimHelper::getImmediateVmBindFlag() | DrmPrelimHelper::getMakeResidentVmBindFlag(), drm->context.receivedVmBind->flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,7 +115,8 @@ HWTEST_F(ProductHelperTest, givenProductHelperWhenGettingMemoryCapabilitiesThenC
|
||||
}
|
||||
}
|
||||
|
||||
auto singleDeviceSharedMemCapabilities = productHelper->getSingleDeviceSharedMemCapabilities();
|
||||
constexpr bool isKmdMigrationAvailable{false};
|
||||
auto singleDeviceSharedMemCapabilities = productHelper->getSingleDeviceSharedMemCapabilities(isKmdMigrationAvailable);
|
||||
if (singleDeviceSharedMemCapabilities > 0) {
|
||||
if (capabilityBitset.test(static_cast<uint32_t>(UsmAccessCapabilities::sharedSingleDevice))) {
|
||||
EXPECT_TRUE(UnifiedSharedMemoryFlags::concurrentAccess & singleDeviceSharedMemCapabilities);
|
||||
@@ -142,11 +143,15 @@ HWTEST_F(ProductHelperTest, givenProductHelperWhenGettingMemoryCapabilitiesThenC
|
||||
}
|
||||
|
||||
HWTEST_F(ProductHelperTest, givenProductHelperAndSingleDeviceSharedMemAccessConcurrentAtomicEnabledIfKmdMigrationEnabled) {
|
||||
DebugManagerStateRestore restore;
|
||||
debugManager.flags.EnableUsmConcurrentAccessSupport.set(0);
|
||||
|
||||
auto singleDeviceSharedMemCapabilities = productHelper->getSingleDeviceSharedMemCapabilities();
|
||||
if ((singleDeviceSharedMemCapabilities > 0) && (productHelper->isKmdMigrationSupported())) {
|
||||
EXPECT_TRUE(UnifiedSharedMemoryFlags::concurrentAccess & singleDeviceSharedMemCapabilities);
|
||||
EXPECT_TRUE(UnifiedSharedMemoryFlags::concurrentAtomicAccess & singleDeviceSharedMemCapabilities);
|
||||
for (const bool isKmdMigrationAvailable : std::array<bool, 2>{false, true}) {
|
||||
auto singleDeviceSharedMemCapabilities = productHelper->getSingleDeviceSharedMemCapabilities(isKmdMigrationAvailable);
|
||||
if (singleDeviceSharedMemCapabilities > 0) {
|
||||
EXPECT_EQ(isKmdMigrationAvailable, !!(UnifiedSharedMemoryFlags::concurrentAccess & singleDeviceSharedMemCapabilities));
|
||||
EXPECT_EQ(isKmdMigrationAvailable, !!(UnifiedSharedMemoryFlags::concurrentAtomicAccess & singleDeviceSharedMemCapabilities));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user