Add prelim DRM page fault allocation check support

Signed-off-by: Daniel Chabrowski daniel.chabrowski@intel.com
Related-To: NEO-6591
This commit is contained in:
Daniel Chabrowski
2022-02-10 16:16:37 +00:00
committed by Compute-Runtime-Automation
parent d1f2e40e5f
commit 23999110ed
13 changed files with 181 additions and 32 deletions

View File

@@ -18,7 +18,6 @@ set(NEO_CORE_OS_INTERFACE_LINUX
${CMAKE_CURRENT_SOURCE_DIR}/driver_info_linux.h
${CMAKE_CURRENT_SOURCE_DIR}/drm_allocation.cpp
${CMAKE_CURRENT_SOURCE_DIR}/drm_allocation.h
${CMAKE_CURRENT_SOURCE_DIR}${BRANCH_DIR_SUFFIX}drm_allocation_extended.cpp
${CMAKE_CURRENT_SOURCE_DIR}/drm_buffer_object.cpp
${CMAKE_CURRENT_SOURCE_DIR}/drm_buffer_object.h
${CMAKE_CURRENT_SOURCE_DIR}${BRANCH_DIR_SUFFIX}drm_buffer_object_extended.cpp

View File

@@ -134,6 +134,34 @@ int DrmAllocation::bindBO(BufferObject *bo, OsContext *osContext, uint32_t vmHan
return retVal;
}
int DrmAllocation::bindBOs(OsContext *osContext, uint32_t vmHandleId, std::vector<BufferObject *> *bufferObjects, bool bind) {
int retVal = 0;
if (this->storageInfo.getNumBanks() > 1) {
auto &bos = this->getBOs();
if (this->storageInfo.tileInstanced) {
auto bo = bos[vmHandleId];
retVal = bindBO(bo, osContext, vmHandleId, bufferObjects, bind);
if (retVal) {
return retVal;
}
} else {
for (auto bo : bos) {
retVal = bindBO(bo, osContext, vmHandleId, bufferObjects, bind);
if (retVal) {
return retVal;
}
}
}
} else {
auto bo = this->getBO();
retVal = bindBO(bo, osContext, vmHandleId, bufferObjects, bind);
if (retVal) {
return retVal;
}
}
return 0;
}
void DrmAllocation::registerBOBindExtHandle(Drm *drm) {
if (!drm->resourceRegistrationEnabled()) {
return;
@@ -205,6 +233,23 @@ void DrmAllocation::markForCapture() {
}
}
bool DrmAllocation::shouldAllocationPageFault(const Drm *drm) {
if (!drm->hasPageFaultSupport()) {
return false;
}
if (DebugManager.flags.EnableImplicitMigrationOnFaultableHardware.get() != -1) {
return DebugManager.flags.EnableImplicitMigrationOnFaultableHardware.get();
}
switch (this->allocationType) {
case AllocationType::UNIFIED_SHARED_MEMORY:
return DebugManager.flags.UseKmdMigration.get();
default:
return false;
}
}
bool DrmAllocation::setMemAdvise(Drm *drm, MemAdviseFlags flags) {
bool success = true;

View File

@@ -1,24 +0,0 @@
/*
* Copyright (C) 2020-2022 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "shared/source/os_interface/linux/drm_allocation.h"
#include "shared/source/os_interface/linux/drm_buffer_object.h"
#include "shared/source/os_interface/linux/drm_neo.h"
#include "shared/source/os_interface/os_context.h"
namespace NEO {
int DrmAllocation::bindBOs(OsContext *osContext, uint32_t vmHandleId, std::vector<BufferObject *> *bufferObjects, bool bind) {
auto bo = this->getBO();
return bindBO(bo, osContext, vmHandleId, bufferObjects, bind);
}
bool DrmAllocation::shouldAllocationPageFault(const Drm *drm) {
return false;
}
} // namespace NEO

View File

@@ -1180,4 +1180,21 @@ bool Drm::queryTopology(const HardwareInfo &hwInfo, QueryTopologyData &topologyD
return retVal;
}
void Drm::queryPageFaultSupport() {
if (const auto paramId = ioctlHelper->getHasPageFaultParamId(); paramId) {
int support = 0;
const auto ret = getParamIoctl(*paramId, &support);
pageFaultSupported = (0 == ret) && (support > 0);
}
}
bool Drm::hasPageFaultSupport() const {
if (DebugManager.flags.EnableRecoverablePageFaults.get() != -1) {
return DebugManager.flags.EnableRecoverablePageFaults.get();
}
return pageFaultSupported;
}
} // namespace NEO

View File

@@ -80,11 +80,4 @@ uint32_t Drm::createDrmContextExt(drm_i915_gem_context_create_ext &gcc, uint32_t
return ioctl(DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &gcc);
}
void Drm::queryPageFaultSupport() {
}
bool Drm::hasPageFaultSupport() const {
return pageFaultSupported;
}
} // namespace NEO

View File

@@ -13,6 +13,7 @@
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
@@ -83,6 +84,7 @@ class IoctlHelper {
virtual int32_t getComputeEngineClass() = 0;
virtual int execBuffer(Drm *drm, drm_i915_gem_execbuffer2 *execBuffer, uint64_t completionGpuAddress, uint32_t counterValue) = 0;
virtual bool completionFenceExtensionSupported(Drm &drm, const HardwareInfo &hwInfo) = 0;
virtual std::optional<int> getHasPageFaultParamId() = 0;
};
class IoctlHelperUpstream : public IoctlHelper {
@@ -109,6 +111,7 @@ class IoctlHelperUpstream : public IoctlHelper {
int32_t getComputeEngineClass() override;
int execBuffer(Drm *drm, drm_i915_gem_execbuffer2 *execBuffer, uint64_t completionGpuAddress, uint32_t counterValue) override;
bool completionFenceExtensionSupported(Drm &drm, const HardwareInfo &hwInfo) override;
std::optional<int> getHasPageFaultParamId() override;
};
template <PRODUCT_FAMILY gfxProduct>
@@ -148,6 +151,7 @@ class IoctlHelperPrelim20 : public IoctlHelper {
int32_t getComputeEngineClass() override;
int execBuffer(Drm *drm, drm_i915_gem_execbuffer2 *execBuffer, uint64_t completionGpuAddress, uint32_t counterValue) override;
bool completionFenceExtensionSupported(Drm &drm, const HardwareInfo &hwInfo) override;
std::optional<int> getHasPageFaultParamId() override;
};
} // namespace NEO

View File

@@ -247,4 +247,8 @@ int32_t IoctlHelperPrelim20::getComputeEngineClass() {
return PRELIM_I915_ENGINE_CLASS_COMPUTE;
}
std::optional<int> IoctlHelperPrelim20::getHasPageFaultParamId() {
return PRELIM_I915_PARAM_HAS_PAGE_FAULT;
};
} // namespace NEO

View File

@@ -146,4 +146,8 @@ bool IoctlHelperUpstream::completionFenceExtensionSupported(Drm &drm, const Hard
return false;
}
std::optional<int> IoctlHelperUpstream::getHasPageFaultParamId() {
return std::nullopt;
};
} // namespace NEO