Revert "feature: in-order host counter allocation pooling"

This reverts commit 2f03c48c7a.

Signed-off-by: Compute-Runtime-Validation <compute-runtime-validation@intel.com>
This commit is contained in:
Compute-Runtime-Validation
2024-03-09 19:18:46 +01:00
committed by Compute-Runtime-Automation
parent 0d00c8bab8
commit 4082e9f028
12 changed files with 70 additions and 139 deletions

View File

@@ -21,34 +21,26 @@ NEO::DebuggerL0 *Device::getL0Debugger() {
return getNEODevice()->getL0Debugger();
}
template <typename NodeT>
NEO::TagAllocatorBase *getInOrderCounterAllocator(std::unique_ptr<NEO::TagAllocatorBase> &allocator, std::mutex &inOrderAllocatorMutex, NEO::Device &neoDevice) {
if (!allocator.get()) {
NEO::TagAllocatorBase *Device::getDeviceInOrderCounterAllocator() {
if (!deviceInOrderCounterAllocator.get()) {
std::unique_lock<std::mutex> lock(inOrderAllocatorMutex);
if (!allocator.get()) {
RootDeviceIndicesContainer rootDeviceIndices = {neoDevice.getRootDeviceIndex()};
if (!deviceInOrderCounterAllocator.get()) {
using NodeT = typename NEO::DeviceAllocNodeType<true>;
RootDeviceIndicesContainer rootDeviceIndices = {getRootDeviceIndex()};
const size_t maxPartitionCount = neoDevice.getDeviceBitfield().count();
const size_t maxPartitionCount = getNEODevice()->getDeviceBitfield().count();
const size_t nodeSize = sizeof(uint64_t) * maxPartitionCount * 2; // Multiplied by 2 to handle 32b overflow
DEBUG_BREAK_IF(alignUp(nodeSize, MemoryConstants::cacheLineSize) * NodeT::defaultAllocatorTagCount > MemoryConstants::pageSize64k);
allocator = std::make_unique<NEO::TagAllocator<NodeT>>(rootDeviceIndices, neoDevice.getMemoryManager(), NodeT::defaultAllocatorTagCount,
MemoryConstants::cacheLineSize, nodeSize, false, neoDevice.getDeviceBitfield());
deviceInOrderCounterAllocator = std::make_unique<NEO::TagAllocator<NodeT>>(rootDeviceIndices, neoDevice->getMemoryManager(), NodeT::defaultAllocatorTagCount,
MemoryConstants::cacheLineSize, nodeSize, false, neoDevice->getDeviceBitfield());
}
}
return allocator.get();
}
NEO::TagAllocatorBase *Device::getDeviceInOrderCounterAllocator() {
return getInOrderCounterAllocator<NEO::DeviceAllocNodeType<true>>(deviceInOrderCounterAllocator, inOrderAllocatorMutex, *getNEODevice());
}
NEO::TagAllocatorBase *Device::getHostInOrderCounterAllocator() {
return getInOrderCounterAllocator<NEO::DeviceAllocNodeType<false>>(hostInOrderCounterAllocator, inOrderAllocatorMutex, *getNEODevice());
return deviceInOrderCounterAllocator.get();
}
} // namespace L0

View File

@@ -150,12 +150,10 @@ struct Device : _ze_device_handle_t {
virtual uint32_t getEventMaxPacketCount() const = 0;
virtual uint32_t getEventMaxKernelCount() const = 0;
NEO::TagAllocatorBase *getDeviceInOrderCounterAllocator();
NEO::TagAllocatorBase *getHostInOrderCounterAllocator();
protected:
NEO::Device *neoDevice = nullptr;
std::unique_ptr<NEO::TagAllocatorBase> deviceInOrderCounterAllocator;
std::unique_ptr<NEO::TagAllocatorBase> hostInOrderCounterAllocator;
std::mutex inOrderAllocatorMutex;
bool implicitScalingCapable = false;
};

View File

@@ -1467,7 +1467,6 @@ void DeviceImp::releaseResources() {
builtins.reset();
cacheReservation.reset();
deviceInOrderCounterAllocator.reset();
hostInOrderCounterAllocator.reset();
if (allocationsForReuse.get()) {
allocationsForReuse->freeAllGraphicsAllocations(neoDevice);