mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-01 04:23:00 +08:00
Get rid of UNRECOVERABLE_IF in MemoryManager constructor
Related-To: NEO-5053 Change-Id: Ibf955c760e61e34c4d38cbb5071ef712bae1c518 Signed-off-by: Igor Venevtsev <igor.venevtsev@intel.com>
This commit is contained in:
committed by
sys_ocldev
parent
47f5867e8f
commit
bd9695a19a
@@ -64,7 +64,7 @@ void GfxPartition::freeGpuAddressRange(uint64_t ptr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
void GfxPartition::init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToReserve, uint32_t rootDeviceIndex, size_t numRootDevices, bool useFrontWindowPool) {
|
||||
bool GfxPartition::init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToReserve, uint32_t rootDeviceIndex, size_t numRootDevices, bool useFrontWindowPool) {
|
||||
|
||||
/*
|
||||
* I. 64-bit builds:
|
||||
@@ -125,10 +125,16 @@ void GfxPartition::init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToRe
|
||||
heapInit(HeapIndex::HEAP_SVM, 0ull, gfxBase);
|
||||
} else if (gpuAddressSpace == maxNBitValue(47)) {
|
||||
if (reservedCpuAddressRange.alignedPtr == nullptr) {
|
||||
UNRECOVERABLE_IF(cpuAddressRangeSizeToReserve == 0);
|
||||
if (cpuAddressRangeSizeToReserve == 0) {
|
||||
return false;
|
||||
}
|
||||
reservedCpuAddressRange = osMemory->reserveCpuAddressRange(cpuAddressRangeSizeToReserve, GfxPartition::heapGranularity);
|
||||
UNRECOVERABLE_IF(reservedCpuAddressRange.originalPtr == nullptr);
|
||||
UNRECOVERABLE_IF(!isAligned<GfxPartition::heapGranularity>(reservedCpuAddressRange.alignedPtr));
|
||||
if (reservedCpuAddressRange.originalPtr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (!isAligned<GfxPartition::heapGranularity>(reservedCpuAddressRange.alignedPtr)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
gfxBase = reinterpret_cast<uint64_t>(reservedCpuAddressRange.alignedPtr);
|
||||
gfxTop = gfxBase + cpuAddressRangeSizeToReserve;
|
||||
@@ -137,7 +143,9 @@ void GfxPartition::init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToRe
|
||||
gfxBase = 0ull;
|
||||
heapInit(HeapIndex::HEAP_SVM, 0ull, 0ull);
|
||||
} else {
|
||||
initAdditionalRange(gpuAddressSpace, gfxBase, gfxTop, rootDeviceIndex, numRootDevices);
|
||||
if (!initAdditionalRange(gpuAddressSpace, gfxBase, gfxTop, rootDeviceIndex, numRootDevices)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,6 +169,8 @@ void GfxPartition::init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToRe
|
||||
// Split HEAP_STANDARD64K among root devices
|
||||
auto gfxStandard64KBSize = alignDown(gfxStandardSize / numRootDevices, GfxPartition::heapGranularity);
|
||||
heapInit(HeapIndex::HEAP_STANDARD64KB, gfxBase + rootDeviceIndex * gfxStandard64KBSize, gfxStandard64KBSize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace NEO
|
||||
|
||||
@@ -37,10 +37,10 @@ class GfxPartition {
|
||||
GfxPartition(OSMemory::ReservedCpuAddressRange &sharedReservedCpuAddressRange);
|
||||
MOCKABLE_VIRTUAL ~GfxPartition();
|
||||
|
||||
void init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToReserve, uint32_t rootDeviceIndex, size_t numRootDevices) {
|
||||
init(gpuAddressSpace, cpuAddressRangeSizeToReserve, rootDeviceIndex, numRootDevices, false);
|
||||
bool init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToReserve, uint32_t rootDeviceIndex, size_t numRootDevices) {
|
||||
return init(gpuAddressSpace, cpuAddressRangeSizeToReserve, rootDeviceIndex, numRootDevices, false);
|
||||
}
|
||||
void init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToReserve, uint32_t rootDeviceIndex, size_t numRootDevices, bool useFrontWindowPool);
|
||||
MOCKABLE_VIRTUAL bool init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToReserve, uint32_t rootDeviceIndex, size_t numRootDevices, bool useFrontWindowPool);
|
||||
|
||||
void heapInit(HeapIndex heapIndex, uint64_t base, uint64_t size) {
|
||||
getHeap(heapIndex).init(base, size);
|
||||
@@ -91,7 +91,7 @@ class GfxPartition {
|
||||
static const std::array<HeapIndex, 7> heapNonSvmNames;
|
||||
|
||||
protected:
|
||||
void initAdditionalRange(uint64_t gpuAddressSpace, uint64_t &gfxBase, uint64_t &gfxTop, uint32_t rootDeviceIndex, size_t numRootDevices);
|
||||
bool initAdditionalRange(uint64_t gpuAddressSpace, uint64_t &gfxBase, uint64_t &gfxTop, uint32_t rootDeviceIndex, size_t numRootDevices);
|
||||
|
||||
class Heap {
|
||||
public:
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
namespace NEO {
|
||||
|
||||
void GfxPartition::initAdditionalRange(uint64_t gpuAddressSpace, uint64_t &gfxBase, uint64_t &gfxTop, uint32_t rootDeviceIndex, size_t numRootDevices) {
|
||||
UNRECOVERABLE_IF("Invalid GPU Address Range!");
|
||||
bool GfxPartition::initAdditionalRange(uint64_t gpuAddressSpace, uint64_t &gfxBase, uint64_t &gfxTop, uint32_t rootDeviceIndex, size_t numRootDevices) {
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace NEO
|
||||
|
||||
@@ -61,6 +61,7 @@ class MemoryManager {
|
||||
};
|
||||
|
||||
MemoryManager(ExecutionEnvironment &executionEnvironment);
|
||||
bool isInitialized() const { return initialized; }
|
||||
|
||||
virtual ~MemoryManager();
|
||||
MOCKABLE_VIRTUAL void *allocateSystemMemory(size_t size, size_t alignment);
|
||||
@@ -216,6 +217,7 @@ class MemoryManager {
|
||||
virtual void freeAssociatedResourceImpl(GraphicsAllocation &graphicsAllocation) { return unlockResourceImpl(graphicsAllocation); };
|
||||
virtual void registerAllocation(GraphicsAllocation *allocation) {}
|
||||
|
||||
bool initialized = false;
|
||||
bool forceNonSvmForExternalHostPtr = false;
|
||||
bool force32bitAllocations = false;
|
||||
bool virtualPaddingAvailable = false;
|
||||
|
||||
Reference in New Issue
Block a user