mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-03 14:55:24 +08:00
Dont pass redundant flags param to allocUserPtr
Signed-off-by: Bartosz Dunajski <bartosz.dunajski@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
0268c4dfcd
commit
1a7c11e4f1
@@ -102,7 +102,7 @@ void DrmMemoryManager::initialize(gemCloseWorkerMode mode) {
|
||||
BufferObject *DrmMemoryManager::createRootDeviceBufferObject(uint32_t rootDeviceIndex) {
|
||||
BufferObject *bo = nullptr;
|
||||
if (forcePinEnabled || validateHostPtrMemory) {
|
||||
bo = allocUserptr(reinterpret_cast<uintptr_t>(memoryForPinBBs[rootDeviceIndex]), MemoryConstants::pageSize, 0, rootDeviceIndex);
|
||||
bo = allocUserptr(reinterpret_cast<uintptr_t>(memoryForPinBBs[rootDeviceIndex]), MemoryConstants::pageSize, rootDeviceIndex);
|
||||
if (bo) {
|
||||
if (isLimitedRange(rootDeviceIndex)) {
|
||||
auto boSize = bo->peekSize();
|
||||
@@ -239,11 +239,10 @@ bool DrmMemoryManager::setMemPrefetch(GraphicsAllocation *gfxAllocation, uint32_
|
||||
return drmAllocation->setMemPrefetch(&this->getDrm(rootDeviceIndex), subDeviceId);
|
||||
}
|
||||
|
||||
NEO::BufferObject *DrmMemoryManager::allocUserptr(uintptr_t address, size_t size, uint64_t flags, uint32_t rootDeviceIndex) {
|
||||
NEO::BufferObject *DrmMemoryManager::allocUserptr(uintptr_t address, size_t size, uint32_t rootDeviceIndex) {
|
||||
GemUserPtr userptr = {};
|
||||
userptr.userPtr = address;
|
||||
userptr.userSize = size;
|
||||
userptr.flags = static_cast<uint32_t>(flags);
|
||||
|
||||
auto &drm = this->getDrm(rootDeviceIndex);
|
||||
|
||||
@@ -339,7 +338,7 @@ DrmAllocation *DrmMemoryManager::createAllocWithAlignmentFromUserptr(const Alloc
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(res), size, 0, allocationData.rootDeviceIndex));
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(res), size, allocationData.rootDeviceIndex));
|
||||
if (!bo) {
|
||||
alignedFreeWrapper(res);
|
||||
return nullptr;
|
||||
@@ -382,7 +381,6 @@ DrmAllocation *DrmMemoryManager::allocateUSMHostGraphicsMemory(const AllocationD
|
||||
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(bufferPtr),
|
||||
cSize,
|
||||
0,
|
||||
allocationData.rootDeviceIndex));
|
||||
if (!bo) {
|
||||
return nullptr;
|
||||
@@ -439,7 +437,7 @@ GraphicsAllocation *DrmMemoryManager::allocateGraphicsMemoryWithGpuVa(const Allo
|
||||
if (!res)
|
||||
return nullptr;
|
||||
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(res), alignedSize, 0, allocationData.rootDeviceIndex));
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(res), alignedSize, allocationData.rootDeviceIndex));
|
||||
|
||||
if (!bo) {
|
||||
alignedFreeWrapper(res);
|
||||
@@ -476,7 +474,7 @@ DrmAllocation *DrmMemoryManager::allocateGraphicsMemoryForNonSvmHostPtr(const Al
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(alignedPtr), realAllocationSize, 0, rootDeviceIndex));
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(alignedPtr), realAllocationSize, rootDeviceIndex));
|
||||
if (!bo) {
|
||||
releaseGpuRange(reinterpret_cast<void *>(gpuVirtualAddress), alignedSize, rootDeviceIndex);
|
||||
return nullptr;
|
||||
@@ -593,7 +591,7 @@ DrmAllocation *DrmMemoryManager::allocate32BitGraphicsMemoryImpl(const Allocatio
|
||||
auto alignedUserPointer = reinterpret_cast<uintptr_t>(alignDown(allocationData.hostPtr, MemoryConstants::pageSize));
|
||||
auto inputPointerOffset = inputPtr - alignedUserPointer;
|
||||
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(alignedUserPointer, allocationSize, 0, allocationData.rootDeviceIndex));
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(alignedUserPointer, allocationSize, allocationData.rootDeviceIndex));
|
||||
if (!bo) {
|
||||
gfxPartition->heapFree(allocatorToUse, gpuVirtualAddress, realAllocationSize);
|
||||
return nullptr;
|
||||
@@ -628,7 +626,7 @@ DrmAllocation *DrmMemoryManager::allocate32BitGraphicsMemoryImpl(const Allocatio
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(ptrAlloc), alignedAllocationSize, 0, allocationData.rootDeviceIndex));
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(reinterpret_cast<uintptr_t>(ptrAlloc), alignedAllocationSize, allocationData.rootDeviceIndex));
|
||||
|
||||
if (!bo) {
|
||||
alignedFreeWrapper(ptrAlloc);
|
||||
@@ -875,7 +873,7 @@ GraphicsAllocation *DrmMemoryManager::createPaddedAllocation(GraphicsAllocation
|
||||
auto alignedPtr = reinterpret_cast<uintptr_t>(alignDown(srcPtr, MemoryConstants::pageSize));
|
||||
auto offset = ptrDiff(srcPtr, alignedPtr);
|
||||
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(alignedPtr, alignedSrcSize, 0, rootDeviceIndex));
|
||||
std::unique_ptr<BufferObject, BufferObject::Deleter> bo(allocUserptr(alignedPtr, alignedSrcSize, rootDeviceIndex));
|
||||
if (!bo) {
|
||||
return nullptr;
|
||||
}
|
||||
@@ -1024,8 +1022,7 @@ MemoryManager::AllocationStatus DrmMemoryManager::populateOsHandles(OsHandleStor
|
||||
handleStorage.fragmentStorageData[i].residency = new ResidencyData(maxOsContextCount);
|
||||
|
||||
osHandle->bo = allocUserptr((uintptr_t)handleStorage.fragmentStorageData[i].cpuPtr,
|
||||
handleStorage.fragmentStorageData[i].fragmentSize,
|
||||
0, rootDeviceIndex);
|
||||
handleStorage.fragmentStorageData[i].fragmentSize, rootDeviceIndex);
|
||||
if (!osHandle->bo) {
|
||||
handleStorage.fragmentStorageData[i].freeTheFragment = true;
|
||||
return AllocationStatus::Error;
|
||||
|
||||
@@ -92,7 +92,7 @@ class DrmMemoryManager : public MemoryManager {
|
||||
MOCKABLE_VIRTUAL BufferObject *findAndReferenceSharedBufferObject(int boHandle, uint32_t rootDeviceIndex);
|
||||
void eraseSharedBufferObject(BufferObject *bo);
|
||||
void pushSharedBufferObject(BufferObject *bo);
|
||||
BufferObject *allocUserptr(uintptr_t address, size_t size, uint64_t flags, uint32_t rootDeviceIndex);
|
||||
BufferObject *allocUserptr(uintptr_t address, size_t size, uint32_t rootDeviceIndex);
|
||||
bool setDomainCpu(GraphicsAllocation &graphicsAllocation, bool writeEnable);
|
||||
uint64_t acquireGpuRange(size_t &size, uint32_t rootDeviceIndex, HeapIndex heapIndex);
|
||||
MOCKABLE_VIRTUAL void releaseGpuRange(void *address, size_t size, uint32_t rootDeviceIndex);
|
||||
|
||||
@@ -56,7 +56,7 @@ DrmAllocation *DrmMemoryManager::createMultiHostAllocation(const AllocationData
|
||||
}
|
||||
|
||||
auto boHostPtr = static_cast<uint8_t *>(cpuBasePointer) + tile * sizePerTile;
|
||||
auto bo = allocUserptr(reinterpret_cast<uintptr_t>(boHostPtr), sizePerTile, 0, allocationData.rootDeviceIndex);
|
||||
auto bo = allocUserptr(reinterpret_cast<uintptr_t>(boHostPtr), sizePerTile, allocationData.rootDeviceIndex);
|
||||
if (!bo) {
|
||||
freeGraphicsMemoryImpl(allocation);
|
||||
return nullptr;
|
||||
|
||||
Reference in New Issue
Block a user