2017-12-21 07:45:38 +08:00
|
|
|
/*
|
2021-01-05 19:39:04 +08:00
|
|
|
* Copyright (C) 2017-2021 Intel Corporation
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
2018-09-18 15:11:08 +08:00
|
|
|
* SPDX-License-Identifier: MIT
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#pragma once
|
2020-02-24 20:10:44 +08:00
|
|
|
#include "shared/source/built_ins/built_ins.h"
|
2020-02-24 05:44:01 +08:00
|
|
|
#include "shared/source/command_stream/command_stream_receiver.h"
|
|
|
|
#include "shared/source/helpers/cache_policy.h"
|
2020-07-10 22:04:01 +08:00
|
|
|
#include "shared/source/helpers/engine_node_helper.h"
|
2020-02-24 05:44:01 +08:00
|
|
|
#include "shared/source/memory_manager/unified_memory_manager.h"
|
2020-07-10 22:04:01 +08:00
|
|
|
#include "shared/source/os_interface/os_context.h"
|
2020-02-24 17:22:30 +08:00
|
|
|
|
2020-02-23 05:50:57 +08:00
|
|
|
#include "opencl/source/command_queue/command_queue_hw.h"
|
|
|
|
#include "opencl/source/command_queue/enqueue_common.h"
|
|
|
|
#include "opencl/source/helpers/hardware_commands_helper.h"
|
|
|
|
#include "opencl/source/mem_obj/buffer.h"
|
|
|
|
#include "opencl/source/memory_manager/mem_obj_surface.h"
|
2019-02-27 18:39:32 +08:00
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
#include <new>
|
|
|
|
|
2019-03-26 18:59:46 +08:00
|
|
|
namespace NEO {
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
template <typename GfxFamily>
|
|
|
|
cl_int CommandQueueHw<GfxFamily>::enqueueReadBuffer(
|
|
|
|
Buffer *buffer,
|
|
|
|
cl_bool blockingRead,
|
|
|
|
size_t offset,
|
|
|
|
size_t size,
|
|
|
|
void *ptr,
|
2019-04-08 20:49:35 +08:00
|
|
|
GraphicsAllocation *mapAllocation,
|
2017-12-21 07:45:38 +08:00
|
|
|
cl_uint numEventsInWaitList,
|
|
|
|
const cl_event *eventWaitList,
|
|
|
|
cl_event *event) {
|
|
|
|
|
2020-07-10 22:04:01 +08:00
|
|
|
const cl_command_type cmdType = CL_COMMAND_READ_BUFFER;
|
2020-10-27 20:27:13 +08:00
|
|
|
auto blitAllowed = blitEnqueueAllowed(cmdType);
|
|
|
|
auto &csr = getCommandStreamReceiver(blitAllowed);
|
2020-07-10 22:04:01 +08:00
|
|
|
|
2019-04-15 21:48:48 +08:00
|
|
|
if (nullptr == mapAllocation) {
|
2020-07-10 22:04:01 +08:00
|
|
|
notifyEnqueueReadBuffer(buffer, !!blockingRead, EngineHelpers::isBcs(csr.getOsContext().getEngineType()));
|
2019-04-15 21:48:48 +08:00
|
|
|
}
|
2018-08-23 00:41:52 +08:00
|
|
|
|
2020-06-29 18:47:13 +08:00
|
|
|
auto rootDeviceIndex = getDevice().getRootDeviceIndex();
|
2019-06-18 17:02:47 +08:00
|
|
|
bool isMemTransferNeeded = buffer->isMemObjZeroCopy() ? buffer->checkIfMemoryTransferIsRequired(offset, 0, ptr, cmdType) : true;
|
|
|
|
bool isCpuCopyAllowed = bufferCpuCopyAllowed(buffer, cmdType, blockingRead, size, ptr,
|
2019-05-30 20:36:12 +08:00
|
|
|
numEventsInWaitList, eventWaitList);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2021-01-05 19:39:04 +08:00
|
|
|
InternalMemoryType memoryType = InternalMemoryType::NOT_SPECIFIED;
|
2020-01-24 18:47:19 +08:00
|
|
|
//check if we are dealing with SVM pointer here for which we already have an allocation
|
|
|
|
if (!mapAllocation && this->getContext().getSVMAllocsManager()) {
|
|
|
|
auto svmEntry = this->getContext().getSVMAllocsManager()->getSVMAlloc(ptr);
|
|
|
|
if (svmEntry) {
|
2021-01-05 19:39:04 +08:00
|
|
|
memoryType = svmEntry->memoryType;
|
2020-06-29 18:47:13 +08:00
|
|
|
if ((svmEntry->gpuAllocations.getGraphicsAllocation(rootDeviceIndex)->getGpuAddress() + svmEntry->size) < (castToUint64(ptr) + size)) {
|
2020-01-24 18:47:19 +08:00
|
|
|
return CL_INVALID_OPERATION;
|
|
|
|
}
|
2020-06-29 18:47:13 +08:00
|
|
|
mapAllocation = svmEntry->cpuAllocation ? svmEntry->cpuAllocation : svmEntry->gpuAllocations.getGraphicsAllocation(rootDeviceIndex);
|
2020-01-24 18:47:19 +08:00
|
|
|
if (isCpuCopyAllowed) {
|
|
|
|
if (svmEntry->memoryType == DEVICE_UNIFIED_MEMORY) {
|
|
|
|
isCpuCopyAllowed = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 20:36:12 +08:00
|
|
|
if (isCpuCopyAllowed) {
|
|
|
|
if (isMemTransferNeeded) {
|
2019-06-18 17:02:47 +08:00
|
|
|
return enqueueReadWriteBufferOnCpuWithMemoryTransfer(cmdType, buffer, offset, size, ptr,
|
2019-05-30 20:36:12 +08:00
|
|
|
numEventsInWaitList, eventWaitList, event);
|
|
|
|
} else {
|
2019-06-18 17:02:47 +08:00
|
|
|
return enqueueReadWriteBufferOnCpuWithoutMemoryTransfer(cmdType, buffer, offset, size, ptr,
|
2019-05-30 20:36:12 +08:00
|
|
|
numEventsInWaitList, eventWaitList, event);
|
2018-01-11 22:42:55 +08:00
|
|
|
}
|
2019-05-30 20:36:12 +08:00
|
|
|
} else if (!isMemTransferNeeded) {
|
2019-06-18 17:02:47 +08:00
|
|
|
return enqueueMarkerForReadWriteOperation(buffer, ptr, cmdType, blockingRead,
|
2019-05-30 20:36:12 +08:00
|
|
|
numEventsInWaitList, eventWaitList, event);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-01-11 22:42:55 +08:00
|
|
|
|
2019-11-04 21:30:00 +08:00
|
|
|
auto eBuiltInOps = EBuiltInOps::CopyBufferToBuffer;
|
2019-11-06 17:01:37 +08:00
|
|
|
if (forceStateless(buffer->getSize())) {
|
2019-11-04 21:30:00 +08:00
|
|
|
eBuiltInOps = EBuiltInOps::CopyBufferToBufferStateless;
|
|
|
|
}
|
2019-05-30 20:36:12 +08:00
|
|
|
|
2018-02-09 05:52:58 +08:00
|
|
|
void *dstPtr = ptr;
|
|
|
|
|
|
|
|
MemObjSurface bufferSurf(buffer);
|
2019-01-03 23:18:53 +08:00
|
|
|
HostPtrSurface hostPtrSurf(dstPtr, size);
|
2019-04-08 20:49:35 +08:00
|
|
|
GeneralSurface mapSurface;
|
|
|
|
Surface *surfaces[] = {&bufferSurf, nullptr};
|
2018-02-09 05:52:58 +08:00
|
|
|
|
2019-04-08 20:49:35 +08:00
|
|
|
if (mapAllocation) {
|
|
|
|
surfaces[1] = &mapSurface;
|
|
|
|
mapSurface.setGraphicsAllocation(mapAllocation);
|
|
|
|
//get offset between base cpu ptr of map allocation and dst ptr
|
2021-01-25 17:48:31 +08:00
|
|
|
if ((memoryType != DEVICE_UNIFIED_MEMORY) && (memoryType != SHARED_UNIFIED_MEMORY)) {
|
2021-01-05 19:39:04 +08:00
|
|
|
size_t dstOffset = ptrDiff(dstPtr, mapAllocation->getUnderlyingBuffer());
|
|
|
|
dstPtr = reinterpret_cast<void *>(mapAllocation->getGpuAddress() + dstOffset);
|
|
|
|
}
|
2019-04-08 20:49:35 +08:00
|
|
|
} else {
|
|
|
|
surfaces[1] = &hostPtrSurf;
|
|
|
|
if (size != 0) {
|
2019-11-08 20:17:49 +08:00
|
|
|
bool status = csr.createAllocationForHostSurface(hostPtrSurf, true);
|
2019-04-08 20:49:35 +08:00
|
|
|
if (!status) {
|
|
|
|
return CL_OUT_OF_RESOURCES;
|
|
|
|
}
|
|
|
|
dstPtr = reinterpret_cast<void *>(hostPtrSurf.getAllocation()->getGpuAddress());
|
2018-02-09 05:52:58 +08:00
|
|
|
}
|
|
|
|
}
|
2019-01-10 22:38:56 +08:00
|
|
|
void *alignedDstPtr = alignDown(dstPtr, 4);
|
|
|
|
size_t dstPtrOffset = ptrDiff(dstPtr, alignedDstPtr);
|
|
|
|
|
2019-07-03 15:30:30 +08:00
|
|
|
BuiltinOpParams dc;
|
2018-12-27 20:34:55 +08:00
|
|
|
dc.dstPtr = alignedDstPtr;
|
|
|
|
dc.dstOffset = {dstPtrOffset, 0, 0};
|
2017-12-21 07:45:38 +08:00
|
|
|
dc.srcMemObj = buffer;
|
|
|
|
dc.srcOffset = {offset, 0, 0};
|
|
|
|
dc.size = {size, 0, 0};
|
2019-10-24 20:18:39 +08:00
|
|
|
dc.transferAllocation = mapAllocation ? mapAllocation : hostPtrSurf.getAllocation();
|
2019-11-04 21:30:00 +08:00
|
|
|
|
2020-09-01 17:39:32 +08:00
|
|
|
MultiDispatchInfo dispatchInfo(dc);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
if (context->isProvidingPerformanceHints()) {
|
2019-05-30 20:36:12 +08:00
|
|
|
context->providePerformanceHintForMemoryTransfer(CL_COMMAND_READ_BUFFER, true, static_cast<cl_mem>(buffer), ptr);
|
2017-12-21 07:45:38 +08:00
|
|
|
if (!isL3Capable(ptr, size)) {
|
|
|
|
context->providePerformanceHint(CL_CONTEXT_DIAGNOSTICS_LEVEL_BAD_INTEL, CL_ENQUEUE_READ_BUFFER_DOESNT_MEET_ALIGNMENT_RESTRICTIONS, ptr, size, MemoryConstants::pageSize, MemoryConstants::pageSize);
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 20:27:13 +08:00
|
|
|
dispatchBcsOrGpgpuEnqueue<CL_COMMAND_READ_BUFFER>(dispatchInfo, surfaces, eBuiltInOps, numEventsInWaitList, eventWaitList, event, blockingRead, blitAllowed);
|
2018-01-11 22:42:55 +08:00
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
return CL_SUCCESS;
|
|
|
|
}
|
2019-03-26 18:59:46 +08:00
|
|
|
} // namespace NEO
|