2017-12-21 07:45:38 +08:00
|
|
|
/*
|
2018-09-17 20:03:37 +08:00
|
|
|
* Copyright (C) 2017-2018 Intel Corporation
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
2018-09-17 20:03:37 +08:00
|
|
|
* SPDX-License-Identifier: MIT
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2018-01-22 23:43:26 +08:00
|
|
|
#include "runtime/memory_manager/memory_manager.h"
|
2018-07-23 01:27:33 +08:00
|
|
|
#include "runtime/command_stream/command_stream_receiver.h"
|
2017-12-21 07:45:38 +08:00
|
|
|
#include "runtime/event/event.h"
|
2018-07-23 01:27:33 +08:00
|
|
|
#include "runtime/event/hw_timestamps.h"
|
|
|
|
#include "runtime/event/perf_counter.h"
|
|
|
|
#include "runtime/gmm_helper/gmm.h"
|
|
|
|
#include "runtime/gmm_helper/resource_info.h"
|
2017-12-21 07:45:38 +08:00
|
|
|
#include "runtime/helpers/aligned_memory.h"
|
|
|
|
#include "runtime/helpers/basic_math.h"
|
2018-09-26 06:44:43 +08:00
|
|
|
#include "runtime/helpers/kernel_commands.h"
|
2017-12-21 07:45:38 +08:00
|
|
|
#include "runtime/helpers/options.h"
|
2018-08-24 14:48:59 +08:00
|
|
|
#include "runtime/helpers/timestamp_packet.h"
|
2018-07-23 01:27:33 +08:00
|
|
|
#include "runtime/memory_manager/deferred_deleter.h"
|
2018-09-06 15:01:13 +08:00
|
|
|
#include "runtime/os_interface/os_context.h"
|
2017-12-21 07:45:38 +08:00
|
|
|
#include "runtime/utilities/stackvec.h"
|
|
|
|
#include "runtime/utilities/tag_allocator.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
|
|
|
|
namespace OCLRT {
|
2018-08-24 14:48:59 +08:00
|
|
|
constexpr size_t TagCount = 512;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
struct ReusableAllocationRequirements {
|
|
|
|
size_t requiredMinimalSize;
|
|
|
|
volatile uint32_t *csrTagAddress;
|
2018-04-17 00:01:38 +08:00
|
|
|
bool internalAllocationRequired;
|
2017-12-21 07:45:38 +08:00
|
|
|
};
|
|
|
|
|
2018-04-17 00:01:38 +08:00
|
|
|
std::unique_ptr<GraphicsAllocation> AllocationsList::detachAllocation(size_t requiredMinimalSize, volatile uint32_t *csrTagAddress, bool internalAllocationRequired) {
|
2017-12-21 07:45:38 +08:00
|
|
|
ReusableAllocationRequirements req;
|
|
|
|
req.requiredMinimalSize = requiredMinimalSize;
|
|
|
|
req.csrTagAddress = csrTagAddress;
|
2018-04-17 00:01:38 +08:00
|
|
|
req.internalAllocationRequired = internalAllocationRequired;
|
2017-12-21 07:45:38 +08:00
|
|
|
GraphicsAllocation *a = nullptr;
|
|
|
|
GraphicsAllocation *retAlloc = processLocked<AllocationsList, &AllocationsList::detachAllocationImpl>(a, static_cast<void *>(&req));
|
|
|
|
return std::unique_ptr<GraphicsAllocation>(retAlloc);
|
|
|
|
}
|
|
|
|
|
|
|
|
GraphicsAllocation *AllocationsList::detachAllocationImpl(GraphicsAllocation *, void *data) {
|
|
|
|
ReusableAllocationRequirements *req = static_cast<ReusableAllocationRequirements *>(data);
|
|
|
|
auto *curr = head;
|
|
|
|
while (curr != nullptr) {
|
|
|
|
auto currentTagValue = req->csrTagAddress ? *req->csrTagAddress : -1;
|
2018-04-17 00:01:38 +08:00
|
|
|
if ((req->internalAllocationRequired == curr->is32BitAllocation) &&
|
|
|
|
(curr->getUnderlyingBufferSize() >= req->requiredMinimalSize) &&
|
|
|
|
((currentTagValue > curr->taskCount) || (curr->taskCount == 0))) {
|
2017-12-21 07:45:38 +08:00
|
|
|
return removeOneImpl(curr, nullptr);
|
|
|
|
}
|
|
|
|
curr = curr->next;
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
2018-09-06 16:53:35 +08:00
|
|
|
|
2018-10-01 22:10:54 +08:00
|
|
|
MemoryManager::MemoryManager(bool enable64kbpages, bool enableLocalMemory,
|
|
|
|
ExecutionEnvironment &executionEnvironment) : allocator32Bit(nullptr), enable64kbpages(enable64kbpages),
|
|
|
|
localMemorySupported(enableLocalMemory),
|
|
|
|
executionEnvironment(executionEnvironment){};
|
2018-09-06 16:53:35 +08:00
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
MemoryManager::~MemoryManager() {
|
|
|
|
freeAllocationsList(-1, graphicsAllocations);
|
|
|
|
freeAllocationsList(-1, allocationsForReuse);
|
2018-09-06 15:01:13 +08:00
|
|
|
for (auto osContext : registeredOsContexts) {
|
|
|
|
osContext->decRefInternal();
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *MemoryManager::allocateSystemMemory(size_t size, size_t alignment) {
|
|
|
|
// Establish a minimum alignment of 16bytes.
|
2018-01-22 23:43:26 +08:00
|
|
|
constexpr size_t minAlignment = 16;
|
|
|
|
alignment = std::max(alignment, minAlignment);
|
|
|
|
auto restrictions = getAlignedMallocRestrictions();
|
|
|
|
void *ptr = nullptr;
|
|
|
|
|
|
|
|
ptr = alignedMallocWrapper(size, alignment);
|
|
|
|
if (restrictions == nullptr) {
|
|
|
|
return ptr;
|
|
|
|
} else if (restrictions->minAddress == 0) {
|
|
|
|
return ptr;
|
|
|
|
} else {
|
|
|
|
if (restrictions->minAddress > reinterpret_cast<uintptr_t>(ptr) && ptr != nullptr) {
|
|
|
|
StackVec<void *, 100> invalidMemVector;
|
|
|
|
invalidMemVector.push_back(ptr);
|
|
|
|
do {
|
|
|
|
ptr = alignedMallocWrapper(size, alignment);
|
|
|
|
if (restrictions->minAddress > reinterpret_cast<uintptr_t>(ptr) && ptr != nullptr) {
|
|
|
|
invalidMemVector.push_back(ptr);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (1);
|
|
|
|
for (auto &it : invalidMemVector) {
|
|
|
|
alignedFreeWrapper(it);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ptr;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2017-12-28 18:25:43 +08:00
|
|
|
GraphicsAllocation *MemoryManager::allocateGraphicsMemoryForSVM(size_t size, bool coherent) {
|
|
|
|
GraphicsAllocation *graphicsAllocation = nullptr;
|
2018-07-11 15:45:20 +08:00
|
|
|
if (peek64kbPagesEnabled()) {
|
2018-07-23 01:27:33 +08:00
|
|
|
graphicsAllocation = allocateGraphicsMemory64kb(size, MemoryConstants::pageSize64k, false, false);
|
2017-12-28 18:25:43 +08:00
|
|
|
} else {
|
2018-07-09 20:12:32 +08:00
|
|
|
graphicsAllocation = allocateGraphicsMemory(size);
|
2017-12-28 18:25:43 +08:00
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
if (graphicsAllocation) {
|
|
|
|
graphicsAllocation->setCoherent(coherent);
|
|
|
|
}
|
|
|
|
return graphicsAllocation;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::freeGmm(GraphicsAllocation *gfxAllocation) {
|
|
|
|
delete gfxAllocation->gmm;
|
|
|
|
}
|
|
|
|
|
|
|
|
GraphicsAllocation *MemoryManager::allocateGraphicsMemory(size_t size, const void *ptr, bool forcePin) {
|
|
|
|
std::lock_guard<decltype(mtx)> lock(mtx);
|
|
|
|
auto requirements = HostPtrManager::getAllocationRequirements(ptr, size);
|
2018-02-28 19:09:48 +08:00
|
|
|
GraphicsAllocation *graphicsAllocation = nullptr;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
if (deferredDeleter) {
|
|
|
|
deferredDeleter->drain(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
//check for overlaping
|
|
|
|
CheckedFragments checkedFragments;
|
|
|
|
if (checkAllocationsForOverlapping(&requirements, &checkedFragments) == RequirementsStatus::FATAL) {
|
|
|
|
//abort whole application instead of silently passing.
|
|
|
|
abortExecution();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto osStorage = hostPtrManager.populateAlreadyAllocatedFragments(requirements, &checkedFragments);
|
|
|
|
if (osStorage.fragmentCount == 0) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2018-02-28 19:09:48 +08:00
|
|
|
auto result = populateOsHandles(osStorage);
|
|
|
|
if (result != AllocationStatus::Success) {
|
2017-12-21 07:45:38 +08:00
|
|
|
cleanOsHandles(osStorage);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-02-28 19:09:48 +08:00
|
|
|
graphicsAllocation = createGraphicsAllocation(osStorage, size, ptr);
|
2017-12-21 07:45:38 +08:00
|
|
|
return graphicsAllocation;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::cleanGraphicsMemoryCreatedFromHostPtr(GraphicsAllocation *graphicsAllocation) {
|
|
|
|
hostPtrManager.releaseHandleStorage(graphicsAllocation->fragmentsStorage);
|
|
|
|
cleanOsHandles(graphicsAllocation->fragmentsStorage);
|
|
|
|
}
|
|
|
|
|
|
|
|
GraphicsAllocation *MemoryManager::createGraphicsAllocationWithPadding(GraphicsAllocation *inputGraphicsAllocation, size_t sizeWithPadding) {
|
|
|
|
if (!paddingAllocation) {
|
2018-07-09 20:12:32 +08:00
|
|
|
paddingAllocation = allocateGraphicsMemory(paddingBufferSize);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
return createPaddedAllocation(inputGraphicsAllocation, sizeWithPadding);
|
|
|
|
}
|
|
|
|
|
|
|
|
GraphicsAllocation *MemoryManager::createPaddedAllocation(GraphicsAllocation *inputGraphicsAllocation, size_t sizeWithPadding) {
|
|
|
|
return allocateGraphicsMemory(sizeWithPadding);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::freeSystemMemory(void *ptr) {
|
|
|
|
::alignedFree(ptr);
|
|
|
|
}
|
|
|
|
|
2018-07-05 22:31:57 +08:00
|
|
|
void MemoryManager::storeAllocation(std::unique_ptr<GraphicsAllocation> gfxAllocation, uint32_t allocationUsage) {
|
2017-12-21 07:45:38 +08:00
|
|
|
std::lock_guard<decltype(mtx)> lock(mtx);
|
|
|
|
|
|
|
|
uint32_t taskCount = gfxAllocation->taskCount;
|
|
|
|
|
2018-07-05 22:31:57 +08:00
|
|
|
if (allocationUsage == REUSABLE_ALLOCATION) {
|
2018-10-01 22:10:54 +08:00
|
|
|
taskCount = getCommandStreamReceiver(0)->peekTaskCount();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2018-07-05 22:31:57 +08:00
|
|
|
storeAllocation(std::move(gfxAllocation), allocationUsage, taskCount);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2018-07-05 22:31:57 +08:00
|
|
|
void MemoryManager::storeAllocation(std::unique_ptr<GraphicsAllocation> gfxAllocation, uint32_t allocationUsage, uint32_t taskCount) {
|
2017-12-21 07:45:38 +08:00
|
|
|
std::lock_guard<decltype(mtx)> lock(mtx);
|
|
|
|
|
|
|
|
if (DebugManager.flags.DisableResourceRecycling.get()) {
|
2018-07-05 22:31:57 +08:00
|
|
|
if (allocationUsage == REUSABLE_ALLOCATION) {
|
2017-12-21 07:45:38 +08:00
|
|
|
freeGraphicsMemory(gfxAllocation.release());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-05 22:31:57 +08:00
|
|
|
auto &allocationsList = (allocationUsage == TEMPORARY_ALLOCATION) ? graphicsAllocations : allocationsForReuse;
|
2017-12-21 07:45:38 +08:00
|
|
|
gfxAllocation->taskCount = taskCount;
|
|
|
|
allocationsList.pushTailOne(*gfxAllocation.release());
|
|
|
|
}
|
|
|
|
|
2018-04-17 00:01:38 +08:00
|
|
|
std::unique_ptr<GraphicsAllocation> MemoryManager::obtainReusableAllocation(size_t requiredSize, bool internalAllocation) {
|
2017-12-21 07:45:38 +08:00
|
|
|
std::lock_guard<decltype(mtx)> lock(mtx);
|
2018-10-01 22:10:54 +08:00
|
|
|
auto allocation = allocationsForReuse.detachAllocation(requiredSize, getCommandStreamReceiver(0)->getTagAddress(), internalAllocation);
|
2017-12-21 07:45:38 +08:00
|
|
|
return allocation;
|
|
|
|
}
|
|
|
|
|
2018-02-28 16:27:38 +08:00
|
|
|
void MemoryManager::setForce32BitAllocations(bool newValue) {
|
|
|
|
if (newValue && !this->allocator32Bit) {
|
|
|
|
this->allocator32Bit.reset(new Allocator32bit);
|
|
|
|
}
|
|
|
|
force32bitAllocations = newValue;
|
|
|
|
}
|
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
void MemoryManager::applyCommonCleanup() {
|
|
|
|
if (this->paddingAllocation) {
|
|
|
|
this->freeGraphicsMemory(this->paddingAllocation);
|
|
|
|
}
|
2018-08-24 14:48:59 +08:00
|
|
|
if (profilingTimeStampAllocator) {
|
2017-12-21 07:45:38 +08:00
|
|
|
profilingTimeStampAllocator->cleanUpResources();
|
2018-08-24 14:48:59 +08:00
|
|
|
}
|
|
|
|
if (perfCounterAllocator) {
|
2017-12-21 07:45:38 +08:00
|
|
|
perfCounterAllocator->cleanUpResources();
|
2018-08-24 14:48:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (timestampPacketAllocator) {
|
|
|
|
timestampPacketAllocator->cleanUpResources();
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
cleanAllocationList(-1, TEMPORARY_ALLOCATION);
|
|
|
|
cleanAllocationList(-1, REUSABLE_ALLOCATION);
|
|
|
|
}
|
|
|
|
|
2018-07-05 22:31:57 +08:00
|
|
|
bool MemoryManager::cleanAllocationList(uint32_t waitTaskCount, uint32_t allocationUsage) {
|
2017-12-21 07:45:38 +08:00
|
|
|
std::lock_guard<decltype(mtx)> lock(mtx);
|
2018-07-05 22:31:57 +08:00
|
|
|
freeAllocationsList(waitTaskCount, (allocationUsage == TEMPORARY_ALLOCATION) ? graphicsAllocations : allocationsForReuse);
|
2017-12-21 07:45:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::freeAllocationsList(uint32_t waitTaskCount, AllocationsList &allocationsList) {
|
|
|
|
GraphicsAllocation *curr = allocationsList.detachNodes();
|
|
|
|
|
|
|
|
IDList<GraphicsAllocation, false, true> allocationsLeft;
|
|
|
|
while (curr != nullptr) {
|
|
|
|
auto *next = curr->next;
|
|
|
|
if (curr->taskCount <= waitTaskCount) {
|
|
|
|
freeGraphicsMemory(curr);
|
|
|
|
} else {
|
|
|
|
allocationsLeft.pushTailOne(*curr);
|
|
|
|
}
|
|
|
|
curr = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (allocationsLeft.peekIsEmpty() == false) {
|
|
|
|
allocationsList.splice(*allocationsLeft.detachNodes());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TagAllocator<HwTimeStamps> *MemoryManager::getEventTsAllocator() {
|
|
|
|
if (profilingTimeStampAllocator.get() == nullptr) {
|
2018-08-24 14:48:59 +08:00
|
|
|
profilingTimeStampAllocator = std::make_unique<TagAllocator<HwTimeStamps>>(this, TagCount, MemoryConstants::cacheLineSize);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-04-24 15:39:07 +08:00
|
|
|
return profilingTimeStampAllocator.get();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TagAllocator<HwPerfCounter> *MemoryManager::getEventPerfCountAllocator() {
|
|
|
|
if (perfCounterAllocator.get() == nullptr) {
|
2018-08-24 14:48:59 +08:00
|
|
|
perfCounterAllocator = std::make_unique<TagAllocator<HwPerfCounter>>(this, TagCount, MemoryConstants::cacheLineSize);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-04-24 15:39:07 +08:00
|
|
|
return perfCounterAllocator.get();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2018-08-24 14:48:59 +08:00
|
|
|
TagAllocator<TimestampPacket> *MemoryManager::getTimestampPacketAllocator() {
|
|
|
|
if (timestampPacketAllocator.get() == nullptr) {
|
|
|
|
timestampPacketAllocator = std::make_unique<TagAllocator<TimestampPacket>>(this, TagCount, MemoryConstants::cacheLineSize);
|
|
|
|
}
|
|
|
|
return timestampPacketAllocator.get();
|
|
|
|
}
|
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
void MemoryManager::freeGraphicsMemory(GraphicsAllocation *gfxAllocation) {
|
|
|
|
freeGraphicsMemoryImpl(gfxAllocation);
|
|
|
|
}
|
2018-01-19 17:55:36 +08:00
|
|
|
//if not in use destroy in place
|
|
|
|
//if in use pass to temporary allocation list that is cleaned on blocking calls
|
|
|
|
void MemoryManager::checkGpuUsageAndDestroyGraphicsAllocations(GraphicsAllocation *gfxAllocation) {
|
2018-10-01 22:10:54 +08:00
|
|
|
if (gfxAllocation->taskCount == ObjectNotUsed || gfxAllocation->taskCount <= *getCommandStreamReceiver(0)->getTagAddress()) {
|
2018-01-19 17:55:36 +08:00
|
|
|
freeGraphicsMemory(gfxAllocation);
|
|
|
|
} else {
|
|
|
|
storeAllocation(std::unique_ptr<GraphicsAllocation>(gfxAllocation), TEMPORARY_ALLOCATION);
|
|
|
|
}
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
void MemoryManager::waitForDeletions() {
|
|
|
|
if (deferredDeleter) {
|
|
|
|
deferredDeleter->drain(false);
|
|
|
|
}
|
|
|
|
deferredDeleter.reset(nullptr);
|
|
|
|
}
|
|
|
|
bool MemoryManager::isAsyncDeleterEnabled() const {
|
|
|
|
return asyncDeleterEnabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MemoryManager::isMemoryBudgetExhausted() const {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
RequirementsStatus MemoryManager::checkAllocationsForOverlapping(AllocationRequirements *requirements, CheckedFragments *checkedFragments) {
|
|
|
|
DEBUG_BREAK_IF(requirements == nullptr);
|
|
|
|
DEBUG_BREAK_IF(checkedFragments == nullptr);
|
|
|
|
|
|
|
|
RequirementsStatus status = RequirementsStatus::SUCCESS;
|
|
|
|
checkedFragments->count = 0;
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < max_fragments_count; i++) {
|
|
|
|
checkedFragments->status[i] = OverlapStatus::FRAGMENT_NOT_CHECKED;
|
|
|
|
checkedFragments->fragments[i] = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < requirements->requiredFragmentsCount; i++) {
|
|
|
|
checkedFragments->count++;
|
|
|
|
checkedFragments->fragments[i] = hostPtrManager.getFragmentAndCheckForOverlaps(requirements->AllocationFragments[i].allocationPtr, requirements->AllocationFragments[i].allocationSize, checkedFragments->status[i]);
|
|
|
|
if (checkedFragments->status[i] == OverlapStatus::FRAGMENT_OVERLAPING_AND_BIGGER_THEN_STORED_FRAGMENT) {
|
|
|
|
// clean temporary allocations
|
2018-10-01 22:10:54 +08:00
|
|
|
|
|
|
|
uint32_t taskCount = *getCommandStreamReceiver(0)->getTagAddress();
|
2018-10-01 22:04:12 +08:00
|
|
|
cleanAllocationList(taskCount, TEMPORARY_ALLOCATION);
|
|
|
|
|
|
|
|
// check overlapping again
|
|
|
|
checkedFragments->fragments[i] = hostPtrManager.getFragmentAndCheckForOverlaps(requirements->AllocationFragments[i].allocationPtr, requirements->AllocationFragments[i].allocationSize, checkedFragments->status[i]);
|
|
|
|
if (checkedFragments->status[i] == OverlapStatus::FRAGMENT_OVERLAPING_AND_BIGGER_THEN_STORED_FRAGMENT) {
|
2018-10-01 22:10:54 +08:00
|
|
|
|
2018-10-01 22:04:12 +08:00
|
|
|
// Wait for completion
|
2018-10-01 22:10:54 +08:00
|
|
|
while (*getCommandStreamReceiver(0)->getTagAddress() < getCommandStreamReceiver(0)->peekLatestSentTaskCount()) {
|
2018-10-01 22:04:12 +08:00
|
|
|
}
|
|
|
|
|
2018-10-01 22:10:54 +08:00
|
|
|
taskCount = *getCommandStreamReceiver(0)->getTagAddress();
|
2017-12-21 07:45:38 +08:00
|
|
|
cleanAllocationList(taskCount, TEMPORARY_ALLOCATION);
|
|
|
|
|
2018-10-01 22:04:12 +08:00
|
|
|
// check overlapping last time
|
2017-12-21 07:45:38 +08:00
|
|
|
checkedFragments->fragments[i] = hostPtrManager.getFragmentAndCheckForOverlaps(requirements->AllocationFragments[i].allocationPtr, requirements->AllocationFragments[i].allocationSize, checkedFragments->status[i]);
|
|
|
|
if (checkedFragments->status[i] == OverlapStatus::FRAGMENT_OVERLAPING_AND_BIGGER_THEN_STORED_FRAGMENT) {
|
2018-10-01 22:04:12 +08:00
|
|
|
status = RequirementsStatus::FATAL;
|
|
|
|
break;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-09-06 15:01:13 +08:00
|
|
|
void MemoryManager::registerOsContext(OsContext *contextToRegister) {
|
2018-09-12 13:47:55 +08:00
|
|
|
auto contextId = contextToRegister->getContextId();
|
|
|
|
if (contextId + 1 > registeredOsContexts.size()) {
|
|
|
|
registeredOsContexts.resize(contextId + 1);
|
|
|
|
}
|
2018-09-06 15:01:13 +08:00
|
|
|
contextToRegister->incRefInternal();
|
2018-09-12 13:47:55 +08:00
|
|
|
registeredOsContexts[contextToRegister->getContextId()] = contextToRegister;
|
2018-09-06 15:01:13 +08:00
|
|
|
}
|
|
|
|
|
2018-09-23 21:47:27 +08:00
|
|
|
bool MemoryManager::getAllocationData(AllocationData &allocationData, const AllocationFlags &flags, const DevicesBitfield devicesBitfield,
|
2018-09-21 12:07:50 +08:00
|
|
|
const void *hostPtr, size_t size, GraphicsAllocation::AllocationType type) {
|
|
|
|
UNRECOVERABLE_IF(hostPtr == nullptr && !flags.flags.allocateMemory);
|
2018-07-09 20:12:32 +08:00
|
|
|
|
|
|
|
bool allow64KbPages = false;
|
|
|
|
bool allow32Bit = false;
|
2018-07-25 00:36:26 +08:00
|
|
|
bool forcePin = false;
|
|
|
|
bool uncacheable = false;
|
|
|
|
bool mustBeZeroCopy = false;
|
2018-07-09 20:12:32 +08:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case GraphicsAllocation::AllocationType::BUFFER:
|
2018-07-25 00:36:26 +08:00
|
|
|
case GraphicsAllocation::AllocationType::BUFFER_HOST_MEMORY:
|
2018-07-23 01:27:33 +08:00
|
|
|
case GraphicsAllocation::AllocationType::BUFFER_COMPRESSED:
|
2018-07-09 20:12:32 +08:00
|
|
|
case GraphicsAllocation::AllocationType::PIPE:
|
|
|
|
case GraphicsAllocation::AllocationType::SCRATCH_SURFACE:
|
|
|
|
case GraphicsAllocation::AllocationType::PRIVATE_SURFACE:
|
|
|
|
case GraphicsAllocation::AllocationType::PRINTF_SURFACE:
|
|
|
|
case GraphicsAllocation::AllocationType::CONSTANT_SURFACE:
|
|
|
|
case GraphicsAllocation::AllocationType::GLOBAL_SURFACE:
|
|
|
|
allow64KbPages = true;
|
|
|
|
allow32Bit = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-07-25 00:36:26 +08:00
|
|
|
switch (type) {
|
|
|
|
case GraphicsAllocation::AllocationType::BUFFER:
|
|
|
|
case GraphicsAllocation::AllocationType::BUFFER_HOST_MEMORY:
|
|
|
|
case GraphicsAllocation::AllocationType::BUFFER_COMPRESSED:
|
|
|
|
forcePin = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case GraphicsAllocation::AllocationType::BUFFER_HOST_MEMORY:
|
|
|
|
case GraphicsAllocation::AllocationType::PIPE:
|
|
|
|
case GraphicsAllocation::AllocationType::PRINTF_SURFACE:
|
|
|
|
case GraphicsAllocation::AllocationType::CONSTANT_SURFACE:
|
|
|
|
case GraphicsAllocation::AllocationType::GLOBAL_SURFACE:
|
|
|
|
mustBeZeroCopy = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-07-09 20:12:32 +08:00
|
|
|
allocationData.flags.mustBeZeroCopy = mustBeZeroCopy;
|
2018-09-21 12:07:50 +08:00
|
|
|
allocationData.flags.allocateMemory = flags.flags.allocateMemory;
|
2018-07-09 20:12:32 +08:00
|
|
|
allocationData.flags.allow32Bit = allow32Bit;
|
|
|
|
allocationData.flags.allow64kbPages = allow64KbPages;
|
|
|
|
allocationData.flags.forcePin = forcePin;
|
|
|
|
allocationData.flags.uncacheable = uncacheable;
|
2018-09-21 12:07:50 +08:00
|
|
|
allocationData.flags.flushL3 = flags.flags.flushL3RequiredForRead | flags.flags.flushL3RequiredForWrite;
|
2018-07-09 20:12:32 +08:00
|
|
|
|
|
|
|
if (allocationData.flags.mustBeZeroCopy) {
|
|
|
|
allocationData.flags.useSystemMemory = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
allocationData.hostPtr = hostPtr;
|
|
|
|
allocationData.size = size;
|
|
|
|
allocationData.type = type;
|
2018-09-23 21:47:27 +08:00
|
|
|
allocationData.devicesBitfield = devicesBitfield;
|
2018-07-09 20:12:32 +08:00
|
|
|
|
|
|
|
if (allocationData.flags.allocateMemory) {
|
|
|
|
allocationData.hostPtr = nullptr;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-09-23 21:47:27 +08:00
|
|
|
GraphicsAllocation *MemoryManager::allocateGraphicsMemoryInPreferredPool(AllocationFlags flags, DevicesBitfield devicesBitfield, const void *hostPtr, size_t size, GraphicsAllocation::AllocationType type) {
|
2018-07-09 20:12:32 +08:00
|
|
|
AllocationData allocationData;
|
2018-07-18 15:48:21 +08:00
|
|
|
AllocationStatus status = AllocationStatus::Error;
|
|
|
|
|
2018-09-23 21:47:27 +08:00
|
|
|
getAllocationData(allocationData, flags, devicesBitfield, hostPtr, size, type);
|
2018-07-09 20:12:32 +08:00
|
|
|
UNRECOVERABLE_IF(allocationData.type == GraphicsAllocation::AllocationType::IMAGE || allocationData.type == GraphicsAllocation::AllocationType::SHARED_RESOURCE);
|
2018-07-18 15:48:21 +08:00
|
|
|
GraphicsAllocation *allocation = nullptr;
|
2018-07-09 20:12:32 +08:00
|
|
|
|
2018-07-18 15:48:21 +08:00
|
|
|
allocation = allocateGraphicsMemoryInDevicePool(allocationData, status);
|
|
|
|
if (!allocation && status == AllocationStatus::RetryInNonDevicePool) {
|
|
|
|
allocation = allocateGraphicsMemory(allocationData);
|
|
|
|
}
|
|
|
|
return allocation;
|
2018-07-09 20:12:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
GraphicsAllocation *MemoryManager::allocateGraphicsMemory(const AllocationData &allocationData) {
|
|
|
|
if (force32bitAllocations && allocationData.flags.allow32Bit && is64bit) {
|
|
|
|
return allocate32BitGraphicsMemory(allocationData.size, allocationData.hostPtr, AllocationOrigin::EXTERNAL_ALLOCATION);
|
|
|
|
}
|
|
|
|
if (allocationData.hostPtr) {
|
|
|
|
return allocateGraphicsMemory(allocationData.size, allocationData.hostPtr, allocationData.flags.forcePin);
|
|
|
|
}
|
2018-07-11 15:45:20 +08:00
|
|
|
if (peek64kbPagesEnabled() && allocationData.flags.allow64kbPages) {
|
2018-07-23 01:27:33 +08:00
|
|
|
bool preferRenderCompressed = (allocationData.type == GraphicsAllocation::AllocationType::BUFFER_COMPRESSED);
|
|
|
|
return allocateGraphicsMemory64kb(allocationData.size, MemoryConstants::pageSize64k, allocationData.flags.forcePin, preferRenderCompressed);
|
2018-07-09 20:12:32 +08:00
|
|
|
}
|
|
|
|
return allocateGraphicsMemory(allocationData.size, MemoryConstants::pageSize, allocationData.flags.forcePin, allocationData.flags.uncacheable);
|
|
|
|
}
|
2018-10-01 22:10:54 +08:00
|
|
|
CommandStreamReceiver *MemoryManager::getCommandStreamReceiver(uint32_t contextId) {
|
|
|
|
UNRECOVERABLE_IF(executionEnvironment.commandStreamReceivers.size() < 1);
|
|
|
|
return executionEnvironment.commandStreamReceivers[contextId].get();
|
|
|
|
}
|
2018-07-09 20:12:32 +08:00
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
} // namespace OCLRT
|