mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-26 23:33:20 +08:00
feature: Support for pStart
Related-To: NEO-15156, GSD-9939 Support for start address hint in zeVirtualMemReserve. If it fails to find pStart then it defaults to the base line allocateWithCustomAlignment(...) Signed-off-by: Chandio, Bibrak Qamar <bibrak.qamar.chandio@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
01a4769141
commit
47caeda487
@@ -270,6 +270,12 @@ uint64_t DrmMemoryManager::acquireGpuRangeWithCustomAlignment(size_t &size, uint
|
||||
return gmmHelper->canonize(gfxPartition->heapAllocateWithCustomAlignment(heapIndex, size, alignment));
|
||||
}
|
||||
|
||||
uint64_t DrmMemoryManager::acquireGpuRangeWithCustomAlignmentWithStartAddressHint(const uint64_t requiredStartAddress, size_t &size, uint32_t rootDeviceIndex, HeapIndex heapIndex, size_t alignment) {
|
||||
auto gfxPartition = getGfxPartition(rootDeviceIndex);
|
||||
auto gmmHelper = getGmmHelper(rootDeviceIndex);
|
||||
return gmmHelper->canonize(gfxPartition->heapAllocateWithCustomAlignmentWithStartAddressHint(gmmHelper->decanonize(requiredStartAddress), heapIndex, size, alignment));
|
||||
}
|
||||
|
||||
void DrmMemoryManager::releaseGpuRange(void *address, size_t unmapSize, uint32_t rootDeviceIndex) {
|
||||
uint64_t graphicsAddress = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(address));
|
||||
auto gmmHelper = getGmmHelper(rootDeviceIndex);
|
||||
@@ -1719,46 +1725,62 @@ uint32_t DrmMemoryManager::getRootDeviceIndex(const Drm *drm) {
|
||||
}
|
||||
|
||||
size_t DrmMemoryManager::selectAlignmentAndHeap(size_t size, HeapIndex *heap) {
|
||||
AlignmentSelector::CandidateAlignment alignmentBase = alignmentSelector.selectAlignment(size);
|
||||
size_t pageSizeAlignment = alignmentBase.alignment;
|
||||
auto rootDeviceCount = this->executionEnvironment.rootDeviceEnvironments.size();
|
||||
return selectAlignmentAndHeap(0ULL, size, heap);
|
||||
}
|
||||
|
||||
// If all devices can support HEAP EXTENDED, then that heap is used, otherwise the HEAP based on the size is used.
|
||||
size_t DrmMemoryManager::selectAlignmentAndHeap(const uint64_t requiredStartAddress, size_t size, HeapIndex *heap) {
|
||||
|
||||
// Always default to HEAP STANDARD 2MB.
|
||||
*heap = HeapIndex::heapStandard2MB;
|
||||
size_t pageSizeAlignment = MemoryConstants::pageSize2M;
|
||||
|
||||
// If the user provides a start address, we try to find the heap and page size alignment based on that address.
|
||||
if (requiredStartAddress != 0ULL) {
|
||||
auto rootDeviceIndex = 0u;
|
||||
auto gfxPartition = getGfxPartition(rootDeviceIndex);
|
||||
if (gfxPartition->getHeapIndexAndPageSizeBasedOnAddress(requiredStartAddress, *heap, pageSizeAlignment)) {
|
||||
return pageSizeAlignment;
|
||||
}
|
||||
}
|
||||
|
||||
// If all devices can support HEAP EXTENDED, then that heap is used.
|
||||
bool useExtendedHeap = true;
|
||||
auto rootDeviceCount = this->executionEnvironment.rootDeviceEnvironments.size();
|
||||
for (auto rootDeviceIndex = 0u; rootDeviceIndex < rootDeviceCount; rootDeviceIndex++) {
|
||||
auto gfxPartition = getGfxPartition(rootDeviceIndex);
|
||||
if (gfxPartition->getHeapLimit(HeapIndex::heapExtended) > 0) {
|
||||
auto alignSize = size >= 8 * MemoryConstants::gigaByte && Math::isPow2(size);
|
||||
if (debugManager.flags.UseHighAlignmentForHeapExtended.get() != -1) {
|
||||
alignSize = !!debugManager.flags.UseHighAlignmentForHeapExtended.get();
|
||||
}
|
||||
|
||||
if (alignSize) {
|
||||
pageSizeAlignment = Math::prevPowerOfTwo(size);
|
||||
}
|
||||
|
||||
*heap = HeapIndex::heapExtended;
|
||||
} else {
|
||||
pageSizeAlignment = alignmentBase.alignment;
|
||||
*heap = alignmentBase.heap;
|
||||
if (!(gfxPartition->getHeapLimit(HeapIndex::heapExtended) > 0)) {
|
||||
useExtendedHeap = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (useExtendedHeap) {
|
||||
auto alignSize = size >= 8 * MemoryConstants::gigaByte && Math::isPow2(size);
|
||||
if (debugManager.flags.UseHighAlignmentForHeapExtended.get() != -1) {
|
||||
alignSize = !!debugManager.flags.UseHighAlignmentForHeapExtended.get();
|
||||
}
|
||||
|
||||
if (alignSize) {
|
||||
pageSizeAlignment = Math::prevPowerOfTwo(size);
|
||||
}
|
||||
|
||||
*heap = HeapIndex::heapExtended;
|
||||
}
|
||||
|
||||
return pageSizeAlignment;
|
||||
}
|
||||
|
||||
AddressRange DrmMemoryManager::reserveGpuAddress(const uint64_t requiredStartAddress, size_t size, const RootDeviceIndicesContainer &rootDeviceIndices, uint32_t *reservedOnRootDeviceIndex) {
|
||||
return reserveGpuAddressOnHeap(requiredStartAddress, size, rootDeviceIndices, reservedOnRootDeviceIndex, HeapIndex::heapStandard, MemoryConstants::pageSize64k);
|
||||
return reserveGpuAddressOnHeap(requiredStartAddress, size, rootDeviceIndices, reservedOnRootDeviceIndex, HeapIndex::heapStandard2MB, MemoryConstants::pageSize2M);
|
||||
}
|
||||
|
||||
AddressRange DrmMemoryManager::reserveGpuAddressOnHeap(const uint64_t requiredStartAddress, size_t size, const RootDeviceIndicesContainer &rootDeviceIndices, uint32_t *reservedOnRootDeviceIndex, HeapIndex heap, size_t alignment) {
|
||||
uint64_t gpuVa = 0u;
|
||||
*reservedOnRootDeviceIndex = 0;
|
||||
|
||||
for (auto rootDeviceIndex : rootDeviceIndices) {
|
||||
if (heap == HeapIndex::heapExtended) {
|
||||
gpuVa = acquireGpuRangeWithCustomAlignment(size, rootDeviceIndex, heap, alignment);
|
||||
} else {
|
||||
gpuVa = acquireGpuRange(size, rootDeviceIndex, heap);
|
||||
}
|
||||
|
||||
gpuVa = requiredStartAddress == 0 ? acquireGpuRangeWithCustomAlignment(size, rootDeviceIndex, heap, alignment) : acquireGpuRangeWithCustomAlignmentWithStartAddressHint(requiredStartAddress, size, rootDeviceIndex, heap, alignment);
|
||||
if (gpuVa != 0u) {
|
||||
*reservedOnRootDeviceIndex = rootDeviceIndex;
|
||||
break;
|
||||
|
||||
Reference in New Issue
Block a user