mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-26 15:03:02 +08:00
Remove redundant/recursive checks in unmap operations
- Some of the paths were made only for ULTs - Params like mappedPtr were ignored - Improve confusing method names - Fix for memory leak in map shared buffer path (not tested code) Change-Id: I8a69035f1d1c340f2d131a6f8d7e13116e3ddabc
This commit is contained in:
@@ -491,4 +491,37 @@ bool CommandQueue::sendPerfCountersConfig() {
|
||||
return getPerfCounters()->sendPmRegsCfgCommands(perfConfigurationData, &perfCountersRegsCfgHandle, &perfCountersRegsCfgPending);
|
||||
}
|
||||
|
||||
cl_int CommandQueue::enqueueWriteMemObjForUnmap(MemObj *memObj, void *mappedPtr, cl_uint numEventsInWaitList, const cl_event *eventWaitList, cl_event *event) {
|
||||
auto image = castToObject<Image>(memObj);
|
||||
if (image) {
|
||||
auto mappedRegion = image->getMappedRegion();
|
||||
size_t region[] = {mappedRegion[0] ? mappedRegion[0] : 1,
|
||||
mappedRegion[1] ? mappedRegion[1] : 1,
|
||||
mappedRegion[2] ? mappedRegion[2] : 1};
|
||||
|
||||
auto retVal = enqueueWriteImage(image, CL_FALSE, image->getMappedOrigin(), region, image->getHostPtrRowPitch(), image->getHostPtrSlicePitch(),
|
||||
mappedPtr, numEventsInWaitList, eventWaitList, event);
|
||||
bool mustCallFinish = true;
|
||||
if (!(image->getFlags() & CL_MEM_USE_HOST_PTR)) {
|
||||
mustCallFinish = true;
|
||||
} else {
|
||||
mustCallFinish = (CommandQueue::getTaskLevelFromWaitList(this->taskLevel, numEventsInWaitList, eventWaitList) != Event::eventNotReady);
|
||||
}
|
||||
if (mustCallFinish) {
|
||||
finish(true);
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
||||
auto buffer = castToObject<Buffer>(memObj);
|
||||
if (buffer) {
|
||||
auto writePtr = ptrOffset(mappedPtr, buffer->getMappedOffset());
|
||||
|
||||
return enqueueWriteBuffer(buffer, CL_TRUE, buffer->getMappedOffset(), buffer->getMappedSize(), writePtr,
|
||||
numEventsInWaitList, eventWaitList, event);
|
||||
}
|
||||
|
||||
return CL_INVALID_MEM_OBJECT;
|
||||
}
|
||||
|
||||
} // namespace OCLRT
|
||||
|
||||
@@ -400,6 +400,8 @@ class CommandQueue : public BaseObject<_cl_command_queue> {
|
||||
Event *virtualEvent;
|
||||
|
||||
protected:
|
||||
cl_int enqueueWriteMemObjForUnmap(MemObj *memObj, void *mappedPtr, cl_uint numEventsInWaitList, const cl_event *eventWaitList, cl_event *event);
|
||||
|
||||
Context *context;
|
||||
Device *device;
|
||||
|
||||
|
||||
@@ -246,7 +246,7 @@ class CommandQueueHw : public CommandQueue {
|
||||
cl_event *event) override {
|
||||
cl_int retVal;
|
||||
if (memObj->allowTiling() || memObj->peekSharingHandler()) {
|
||||
retVal = memObj->unmapObj(this, mappedPtr, numEventsInWaitList, eventWaitList, event);
|
||||
retVal = enqueueWriteMemObjForUnmap(memObj, mappedPtr, numEventsInWaitList, eventWaitList, event);
|
||||
} else {
|
||||
cpuDataTransferHandler(memObj,
|
||||
CL_COMMAND_UNMAP_MEM_OBJECT,
|
||||
|
||||
@@ -45,7 +45,7 @@ void *CommandQueueHw<GfxFamily>::enqueueMapSharedBuffer(Buffer *buffer, cl_bool
|
||||
auto memoryManager = device->getMemoryManager();
|
||||
if (!buffer->getMappedPtr()) {
|
||||
auto memory = memoryManager->allocateSystemMemory(buffer->getGraphicsAllocation()->getUnderlyingBufferSize(), 0);
|
||||
buffer->setMappedPtr(memory);
|
||||
buffer->setAllocatedMappedPtr(memory);
|
||||
}
|
||||
|
||||
auto returnPtr = ptrOffset(buffer->getMappedPtr(), offset);
|
||||
@@ -59,4 +59,4 @@ void *CommandQueueHw<GfxFamily>::enqueueMapSharedBuffer(Buffer *buffer, cl_bool
|
||||
buffer->setMappedOffset(offset);
|
||||
return returnPtr;
|
||||
}
|
||||
}
|
||||
} // namespace OCLRT
|
||||
|
||||
Reference in New Issue
Block a user