Files
compute-runtime/runtime/memory_manager/deferrable_allocation_deletion.cpp
Mateusz Jablonski b5d9ed77a6 Correct destruction logic of shared allocations
wait for all os contexts that used the allocation
when os context is not ready then flush related command stream receiver

Change-Id: I5fb2c16c1d398c59fbd02e32ebbbb9254583244e
Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
2019-01-08 14:32:21 +01:00

41 lines
1.6 KiB
C++

/*
* Copyright (C) 2018-2019 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "runtime/command_stream/command_stream_receiver.h"
#include "runtime/memory_manager/deferrable_allocation_deletion.h"
#include "runtime/memory_manager/memory_manager.h"
#include "runtime/os_interface/os_context.h"
namespace OCLRT {
DeferrableAllocationDeletion::DeferrableAllocationDeletion(MemoryManager &memoryManager, GraphicsAllocation &graphicsAllocation) : memoryManager(memoryManager),
graphicsAllocation(graphicsAllocation) {}
bool DeferrableAllocationDeletion::apply() {
if (graphicsAllocation.isUsed()) {
for (auto &deviceCsrs : memoryManager.getCommandStreamReceivers()) {
for (auto &csr : deviceCsrs) {
auto contextId = csr->getOsContext().getContextId();
if (graphicsAllocation.isUsedByOsContext(contextId)) {
auto currentContextTaskCount = *csr->getTagAddress();
if (graphicsAllocation.getTaskCount(contextId) <= currentContextTaskCount) {
graphicsAllocation.releaseUsageInOsContext(contextId);
} else {
csr->flushBatchedSubmissions();
}
}
}
}
if (graphicsAllocation.isUsed()) {
return false;
}
}
memoryManager.freeGraphicsMemory(&graphicsAllocation);
return true;
}
} // namespace OCLRT