mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-03 06:49:52 +08:00
feature: Support for pStart
Related-To: NEO-15156, GSD-9939 Support for start address hint in zeVirtualMemReserve. If it fails to find pStart then it defaults to the base line allocateWithCustomAlignment(...) Signed-off-by: Chandio, Bibrak Qamar <bibrak.qamar.chandio@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
841267ecbd
commit
a50c0dbecf
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2019-2024 Intel Corporation
|
||||
* Copyright (C) 2019-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -18,6 +18,62 @@ bool operator<(const HeapChunk &hc1, const HeapChunk &hc2) {
|
||||
return hc1.ptr < hc2.ptr;
|
||||
}
|
||||
|
||||
uint64_t HeapAllocator::allocateWithCustomAlignmentWithStartAddressHint(const uint64_t requiredStartAddress, size_t &sizeToAllocate, size_t alignment) {
|
||||
|
||||
if (alignment < this->allocationAlignment) {
|
||||
alignment = this->allocationAlignment;
|
||||
}
|
||||
|
||||
UNRECOVERABLE_IF(alignment % allocationAlignment != 0); // custom alignment have to be a multiple of allocator alignment
|
||||
sizeToAllocate = alignUp(sizeToAllocate, allocationAlignment);
|
||||
|
||||
uint64_t ptrReturn = 0llu;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
DBG_LOG(LogAllocationMemoryPool, __FUNCTION__, "Allocator usage == ", this->getUsage());
|
||||
if (availableSize < sizeToAllocate) {
|
||||
return 0llu;
|
||||
}
|
||||
|
||||
if (requiredStartAddress >= pLeftBound && requiredStartAddress <= pRightBound) {
|
||||
|
||||
const uint64_t misalignment = requiredStartAddress - pLeftBound;
|
||||
if (pLeftBound + misalignment + sizeToAllocate <= pRightBound) {
|
||||
if (misalignment) {
|
||||
storeInFreedChunks(pLeftBound, static_cast<size_t>(misalignment), freedChunksBig);
|
||||
pLeftBound += misalignment;
|
||||
}
|
||||
ptrReturn = pLeftBound;
|
||||
pLeftBound += sizeToAllocate;
|
||||
availableSize -= sizeToAllocate;
|
||||
}
|
||||
} else { // Try to find in freed chunks
|
||||
|
||||
defragment();
|
||||
|
||||
if (requiredStartAddress < this->pLeftBound) {
|
||||
// If between baseAddress and pLeftBound, get from freedChunksBig
|
||||
ptrReturn = getFromFreedChunksWithStartAddressHint(requiredStartAddress, sizeToAllocate, freedChunksBig);
|
||||
} else {
|
||||
// If between pRightBound and heapLimit, get from freedChunksSmall
|
||||
ptrReturn = getFromFreedChunksWithStartAddressHint(requiredStartAddress, sizeToAllocate, freedChunksSmall);
|
||||
}
|
||||
|
||||
if (ptrReturn != 0llu) {
|
||||
availableSize -= sizeToAllocate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ptrReturn == 0llu) {
|
||||
return allocateWithCustomAlignment(sizeToAllocate, alignment);
|
||||
}
|
||||
|
||||
UNRECOVERABLE_IF(!isAligned(ptrReturn, alignment));
|
||||
return ptrReturn;
|
||||
}
|
||||
|
||||
uint64_t HeapAllocator::allocateWithCustomAlignment(size_t &sizeToAllocate, size_t alignment) {
|
||||
if (alignment < this->allocationAlignment) {
|
||||
alignment = this->allocationAlignment;
|
||||
@@ -73,7 +129,7 @@ uint64_t HeapAllocator::allocateWithCustomAlignment(size_t &sizeToAllocate, size
|
||||
} else {
|
||||
availableSize -= sizeToAllocate;
|
||||
}
|
||||
DEBUG_BREAK_IF(!isAligned(ptrReturn, alignment));
|
||||
UNRECOVERABLE_IF(!isAligned(ptrReturn, alignment));
|
||||
return ptrReturn;
|
||||
}
|
||||
|
||||
@@ -115,6 +171,43 @@ double HeapAllocator::getUsage() const {
|
||||
return static_cast<double>(size - availableSize) / size;
|
||||
}
|
||||
|
||||
uint64_t HeapAllocator::getFromFreedChunksWithStartAddressHint(const uint64_t requiredStartAddress, size_t size, std::vector<HeapChunk> &freedChunks) {
|
||||
|
||||
for (size_t i = 0; i < freedChunks.size(); i++) {
|
||||
uint64_t chunkStart = freedChunks[i].ptr;
|
||||
uint64_t chunkEnd = chunkStart + freedChunks[i].size;
|
||||
|
||||
if (requiredStartAddress >= chunkStart && requiredStartAddress + size <= chunkEnd) {
|
||||
size_t leadingSize = static_cast<size_t>(requiredStartAddress - chunkStart);
|
||||
size_t trailingSize = static_cast<size_t>(chunkEnd - (requiredStartAddress + size));
|
||||
|
||||
// Chunk splitting
|
||||
if (leadingSize > 0) {
|
||||
|
||||
freedChunks[i].size = leadingSize;
|
||||
|
||||
if (trailingSize > 0) {
|
||||
freedChunks.emplace_back(requiredStartAddress + size, trailingSize);
|
||||
}
|
||||
} else {
|
||||
|
||||
if (trailingSize > 0) {
|
||||
|
||||
freedChunks[i].ptr = requiredStartAddress + size;
|
||||
freedChunks[i].size = trailingSize;
|
||||
} else {
|
||||
|
||||
freedChunks.erase(freedChunks.begin() + i);
|
||||
}
|
||||
}
|
||||
|
||||
return requiredStartAddress;
|
||||
}
|
||||
}
|
||||
|
||||
return 0llu;
|
||||
}
|
||||
|
||||
uint64_t HeapAllocator::getFromFreedChunks(size_t size, std::vector<HeapChunk> &freedChunks, size_t &sizeOfFreedChunk, size_t requiredAlignment) {
|
||||
size_t elements = freedChunks.size();
|
||||
size_t bestFitIndex = -1;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2024 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -44,6 +44,11 @@ class HeapAllocator {
|
||||
return allocateWithCustomAlignment(sizeToAllocate, 0u);
|
||||
}
|
||||
|
||||
uint64_t allocateWithStartAddressHint(const uint64_t requiredStartAddress, size_t &sizeToAllocate) {
|
||||
return allocateWithCustomAlignmentWithStartAddressHint(requiredStartAddress, sizeToAllocate, 0u);
|
||||
}
|
||||
|
||||
uint64_t allocateWithCustomAlignmentWithStartAddressHint(const uint64_t requiredStartAddress, size_t &sizeToAllocate, size_t alignment);
|
||||
uint64_t allocateWithCustomAlignment(size_t &sizeToAllocate, size_t alignment);
|
||||
|
||||
MOCKABLE_VIRTUAL void free(uint64_t ptr, size_t size);
|
||||
@@ -62,6 +67,10 @@ class HeapAllocator {
|
||||
return this->baseAddress;
|
||||
}
|
||||
|
||||
size_t getAllocationAlignment() const {
|
||||
return this->allocationAlignment;
|
||||
}
|
||||
|
||||
protected:
|
||||
const uint64_t baseAddress;
|
||||
const uint64_t size;
|
||||
@@ -76,6 +85,7 @@ class HeapAllocator {
|
||||
std::mutex mtx;
|
||||
|
||||
uint64_t getFromFreedChunks(size_t size, std::vector<HeapChunk> &freedChunks, size_t &sizeOfFreedChunk, size_t requiredAlignment);
|
||||
MOCKABLE_VIRTUAL uint64_t getFromFreedChunksWithStartAddressHint(const uint64_t requiredStartAddress, size_t size, std::vector<HeapChunk> &freedChunks);
|
||||
|
||||
void storeInFreedChunks(uint64_t ptr, size_t size, std::vector<HeapChunk> &freedChunks) {
|
||||
for (auto &freedChunk : freedChunks) {
|
||||
|
||||
Reference in New Issue
Block a user