2017-12-21 07:45:38 +08:00
/*
2020-01-08 22:00:45 +08:00
* Copyright ( C ) 2018 - 2020 Intel Corporation
2017-12-21 07:45:38 +08:00
*
2018-09-19 04:40:46 +08:00
* SPDX - License - Identifier : MIT
2017-12-21 07:45:38 +08:00
*
*/
2020-02-23 05:50:57 +08:00
# include "opencl/source/command_queue/command_queue.h"
2019-02-27 18:39:32 +08:00
2020-02-24 05:44:01 +08:00
# include "shared/source/command_stream/command_stream_receiver.h"
# include "shared/source/helpers/aligned_memory.h"
# include "shared/source/helpers/array_count.h"
# include "shared/source/helpers/engine_node_helper.h"
# include "shared/source/helpers/get_info.h"
# include "shared/source/helpers/ptr_math.h"
# include "shared/source/helpers/string.h"
# include "shared/source/helpers/timestamp_packet.h"
# include "shared/source/memory_manager/internal_allocation_storage.h"
# include "shared/source/os_interface/os_context.h"
# include "shared/source/utilities/api_intercept.h"
# include "shared/source/utilities/tag_allocator.h"
2020-02-24 17:22:30 +08:00
2020-02-23 05:50:57 +08:00
# include "opencl/source/built_ins/builtins_dispatch_builder.h"
2020-03-20 18:15:25 +08:00
# include "opencl/source/cl_device/cl_device.h"
2020-02-23 05:50:57 +08:00
# include "opencl/source/context/context.h"
# include "opencl/source/device_queue/device_queue.h"
# include "opencl/source/event/event_builder.h"
# include "opencl/source/event/user_event.h"
# include "opencl/source/gtpin/gtpin_notify.h"
# include "opencl/source/helpers/convert_color.h"
# include "opencl/source/helpers/hardware_commands_helper.h"
# include "opencl/source/helpers/mipmap.h"
# include "opencl/source/helpers/queue_helpers.h"
# include "opencl/source/mem_obj/buffer.h"
# include "opencl/source/mem_obj/image.h"
2019-02-27 18:39:32 +08:00
# include "CL/cl_ext.h"
2017-12-21 07:45:38 +08:00
# include <map>
2019-03-26 18:59:46 +08:00
namespace NEO {
2017-12-21 07:45:38 +08:00
// Global table of create functions
CommandQueueCreateFunc commandQueueFactory [ IGFX_MAX_CORE ] = { } ;
CommandQueue * CommandQueue : : create ( Context * context ,
2020-01-14 21:32:11 +08:00
ClDevice * device ,
2017-12-21 07:45:38 +08:00
const cl_queue_properties * properties ,
2020-01-21 16:35:12 +08:00
bool internalUsage ,
2017-12-21 07:45:38 +08:00
cl_int & retVal ) {
retVal = CL_SUCCESS ;
auto funcCreate = commandQueueFactory [ device - > getRenderCoreFamily ( ) ] ;
DEBUG_BREAK_IF ( nullptr = = funcCreate ) ;
2020-01-21 16:35:12 +08:00
return funcCreate ( context , device , properties , internalUsage ) ;
2017-12-21 07:45:38 +08:00
}
2020-01-14 21:32:11 +08:00
CommandQueue : : CommandQueue ( Context * context , ClDevice * device , const cl_queue_properties * properties )
: context ( context ) , device ( device ) {
2017-12-21 07:45:38 +08:00
if ( context ) {
context - > incRefInternal ( ) ;
}
2018-04-26 16:01:01 +08:00
2017-12-21 07:45:38 +08:00
commandQueueProperties = getCmdQueueProperties < cl_command_queue_properties > ( properties ) ;
flushStamp . reset ( new FlushStampTracker ( true ) ) ;
2018-10-03 05:37:30 +08:00
2018-11-22 20:57:10 +08:00
if ( device ) {
2020-05-21 22:35:20 +08:00
auto hwInfo = device - > getHardwareInfo ( ) ;
2019-07-15 20:28:09 +08:00
gpgpuEngine = & device - > getDefaultEngine ( ) ;
2020-05-21 22:35:20 +08:00
if ( hwInfo . capabilityTable . blitterOperationsSupported | | gpgpuEngine - > commandStreamReceiver - > peekTimestampPacketWriteEnabled ( ) ) {
2018-11-27 20:07:41 +08:00
timestampPacketContainer = std : : make_unique < TimestampPacketContainer > ( ) ;
2018-11-22 20:57:10 +08:00
}
2020-02-12 18:27:28 +08:00
if ( hwInfo . capabilityTable . blitterOperationsSupported ) {
2020-02-25 01:25:42 +08:00
auto & selectorCopyEngine = device - > getDeviceById ( 0 ) - > getSelectorCopyEngine ( ) ;
bcsEngine = & device - > getDeviceById ( 0 ) - > getEngine ( EngineHelpers : : getBcsEngineType ( hwInfo , selectorCopyEngine ) , false ) ;
2019-07-16 15:23:02 +08:00
}
2018-10-03 05:37:30 +08:00
}
2018-11-29 18:39:10 +08:00
2020-05-27 23:12:32 +08:00
storeProperties ( properties ) ;
2018-12-06 16:21:28 +08:00
processProperties ( properties ) ;
2017-12-21 07:45:38 +08:00
}
CommandQueue : : ~ CommandQueue ( ) {
if ( virtualEvent ) {
UNRECOVERABLE_IF ( this - > virtualEvent - > getCommandQueue ( ) ! = this & & this - > virtualEvent - > getCommandQueue ( ) ! = nullptr ) ;
virtualEvent - > decRefInternal ( ) ;
}
if ( device ) {
2019-11-24 21:50:41 +08:00
auto storageForAllocation = gpgpuEngine - > commandStreamReceiver - > getInternalAllocationStorage ( ) ;
2017-12-21 07:45:38 +08:00
2018-11-09 00:22:21 +08:00
if ( commandStream ) {
2018-10-24 20:25:04 +08:00
storageForAllocation - > storeAllocation ( std : : unique_ptr < GraphicsAllocation > ( commandStream - > getGraphicsAllocation ( ) ) , REUSABLE_ALLOCATION ) ;
2017-12-21 07:45:38 +08:00
}
delete commandStream ;
if ( this - > perfCountersEnabled ) {
device - > getPerformanceCounters ( ) - > shutdown ( ) ;
}
}
2018-12-13 23:24:42 +08:00
timestampPacketContainer . reset ( ) ;
2018-01-05 18:33:30 +08:00
//for normal queue, decrement ref count on context
//special queue is owned by context so ref count doesn't have to be decremented
if ( context & & ! isSpecialCommandQueue ) {
2017-12-21 07:45:38 +08:00
context - > decRefInternal ( ) ;
}
}
2019-07-15 20:28:09 +08:00
CommandStreamReceiver & CommandQueue : : getGpgpuCommandStreamReceiver ( ) const {
return * gpgpuEngine - > commandStreamReceiver ;
2018-11-22 20:57:10 +08:00
}
2019-07-16 15:23:02 +08:00
CommandStreamReceiver * CommandQueue : : getBcsCommandStreamReceiver ( ) const {
if ( bcsEngine ) {
return bcsEngine - > commandStreamReceiver ;
}
return nullptr ;
}
2020-04-21 18:04:47 +08:00
CommandStreamReceiver & CommandQueue : : getCommandStreamReceiverByCommandType ( cl_command_type cmdType ) const {
if ( blitEnqueueAllowed ( cmdType ) ) {
auto csr = getBcsCommandStreamReceiver ( ) ;
UNRECOVERABLE_IF ( ! csr ) ;
return * csr ;
}
return getGpgpuCommandStreamReceiver ( ) ;
}
2020-01-14 21:32:11 +08:00
Device & CommandQueue : : getDevice ( ) const noexcept {
return device - > getDevice ( ) ;
}
2017-12-21 07:45:38 +08:00
uint32_t CommandQueue : : getHwTag ( ) const {
uint32_t tag = * getHwTagAddress ( ) ;
return tag ;
}
volatile uint32_t * CommandQueue : : getHwTagAddress ( ) const {
2019-07-15 20:28:09 +08:00
return getGpgpuCommandStreamReceiver ( ) . getTagAddress ( ) ;
2017-12-21 07:45:38 +08:00
}
bool CommandQueue : : isCompleted ( uint32_t taskCount ) const {
uint32_t tag = getHwTag ( ) ;
2020-06-16 19:19:11 +08:00
DEBUG_BREAK_IF ( tag = = CompletionStamp : : notReady ) ;
2017-12-21 07:45:38 +08:00
return tag > = taskCount ;
}
2018-03-21 17:00:49 +08:00
void CommandQueue : : waitUntilComplete ( uint32_t taskCountToWait , FlushStamp flushStampToWait , bool useQuickKmdSleep ) {
2017-12-21 07:45:38 +08:00
WAIT_ENTER ( )
DBG_LOG ( LogTaskCounts , __FUNCTION__ , " Waiting for taskCount: " , taskCountToWait ) ;
DBG_LOG ( LogTaskCounts , __FUNCTION__ , " Line: " , __LINE__ , " Current taskCount: " , getHwTag ( ) ) ;
2018-11-16 19:46:49 +08:00
bool forcePowerSavingMode = this - > throttle = = QueueThrottle : : LOW ;
2019-07-15 20:28:09 +08:00
getGpgpuCommandStreamReceiver ( ) . waitForTaskCountWithKmdNotifyFallback ( taskCountToWait , flushStampToWait ,
useQuickKmdSleep , forcePowerSavingMode ) ;
2017-12-21 07:45:38 +08:00
DEBUG_BREAK_IF ( getHwTag ( ) < taskCountToWait ) ;
2019-07-15 17:01:32 +08:00
2020-02-19 23:32:40 +08:00
if ( gtpinIsGTPinInitialized ( ) ) {
gtpinNotifyTaskCompletion ( taskCountToWait ) ;
}
2019-07-12 20:48:52 +08:00
if ( auto bcsCsr = getBcsCommandStreamReceiver ( ) ) {
2019-10-22 17:25:14 +08:00
bcsCsr - > waitForTaskCountWithKmdNotifyFallback ( bcsTaskCount , 0 , false , false ) ;
2019-12-17 17:35:25 +08:00
bcsCsr - > waitForTaskCountAndCleanTemporaryAllocationList ( bcsTaskCount ) ;
2019-07-12 20:48:52 +08:00
}
2019-12-17 17:35:25 +08:00
getGpgpuCommandStreamReceiver ( ) . waitForTaskCountAndCleanTemporaryAllocationList ( taskCountToWait ) ;
2019-11-05 19:55:13 +08:00
2017-12-21 07:45:38 +08:00
WAIT_LEAVE ( )
}
bool CommandQueue : : isQueueBlocked ( ) {
TakeOwnershipWrapper < CommandQueue > takeOwnershipWrapper ( * this ) ;
//check if we have user event and if so, if it is in blocked state.
if ( this - > virtualEvent ) {
2019-07-09 00:07:46 +08:00
auto executionStatus = this - > virtualEvent - > peekExecutionStatus ( ) ;
if ( executionStatus < = CL_SUBMITTED ) {
2017-12-21 07:45:38 +08:00
UNRECOVERABLE_IF ( this - > virtualEvent = = nullptr ) ;
2019-07-09 00:07:46 +08:00
if ( this - > virtualEvent - > isStatusCompletedByTermination ( executionStatus ) = = false ) {
2017-12-21 07:45:38 +08:00
taskCount = this - > virtualEvent - > peekTaskCount ( ) ;
flushStamp - > setStamp ( this - > virtualEvent - > flushStamp - > peekStamp ( ) ) ;
taskLevel = this - > virtualEvent - > taskLevel ;
// If this isn't an OOQ, update the taskLevel for the queue
if ( ! isOOQEnabled ( ) ) {
taskLevel + + ;
}
} else {
//at this point we may reset queue TaskCount, since all command previous to this were aborted
taskCount = 0 ;
flushStamp - > setStamp ( 0 ) ;
2019-07-15 20:28:09 +08:00
taskLevel = getGpgpuCommandStreamReceiver ( ) . peekTaskLevel ( ) ;
2017-12-21 07:45:38 +08:00
}
2019-12-10 23:26:35 +08:00
FileLoggerInstance ( ) . log ( DebugManager . flags . EventsDebugEnable . get ( ) , " isQueueBlocked taskLevel change from " , taskLevel , " to new from virtualEvent " , this - > virtualEvent , " new tasklevel " , this - > virtualEvent - > taskLevel . load ( ) ) ;
2017-12-21 07:45:38 +08:00
//close the access to virtual event, driver added only 1 ref count.
this - > virtualEvent - > decRefInternal ( ) ;
this - > virtualEvent = nullptr ;
return false ;
}
return true ;
}
return false ;
}
cl_int CommandQueue : : getCommandQueueInfo ( cl_command_queue_info paramName ,
size_t paramValueSize ,
void * paramValue ,
size_t * paramValueSizeRet ) {
return getQueueInfo < CommandQueue > ( this , paramName , paramValueSize , paramValue , paramValueSizeRet ) ;
}
uint32_t CommandQueue : : getTaskLevelFromWaitList ( uint32_t taskLevel ,
cl_uint numEventsInWaitList ,
const cl_event * eventWaitList ) {
for ( auto iEvent = 0u ; iEvent < numEventsInWaitList ; + + iEvent ) {
auto pEvent = ( Event * ) ( eventWaitList [ iEvent ] ) ;
uint32_t eventTaskLevel = pEvent - > taskLevel ;
taskLevel = std : : max ( taskLevel , eventTaskLevel ) ;
}
return taskLevel ;
}
LinearStream & CommandQueue : : getCS ( size_t minRequiredSize ) {
DEBUG_BREAK_IF ( nullptr = = device ) ;
if ( ! commandStream ) {
commandStream = new LinearStream ( nullptr ) ;
}
minRequiredSize + = CSRequirements : : minCommandQueueCommandStreamSize ;
2019-02-20 17:31:32 +08:00
constexpr static auto additionalAllocationSize = CSRequirements : : minCommandQueueCommandStreamSize + CSRequirements : : csOverfetchSize ;
2019-07-15 20:28:09 +08:00
getGpgpuCommandStreamReceiver ( ) . ensureCommandBufferAllocation ( * commandStream , minRequiredSize , additionalAllocationSize ) ;
2017-12-21 07:45:38 +08:00
return * commandStream ;
}
cl_int CommandQueue : : enqueueAcquireSharedObjects ( cl_uint numObjects , const cl_mem * memObjects , cl_uint numEventsInWaitList , const cl_event * eventWaitList , cl_event * oclEvent , cl_uint cmdType ) {
2018-01-18 00:02:08 +08:00
if ( ( memObjects = = nullptr & & numObjects ! = 0 ) | | ( memObjects ! = nullptr & & numObjects = = 0 ) ) {
return CL_INVALID_VALUE ;
}
2017-12-21 07:45:38 +08:00
for ( unsigned int object = 0 ; object < numObjects ; object + + ) {
2018-01-18 00:02:08 +08:00
auto memObject = castToObject < MemObj > ( memObjects [ object ] ) ;
if ( memObject = = nullptr | | memObject - > peekSharingHandler ( ) = = nullptr ) {
return CL_INVALID_MEM_OBJECT ;
}
2020-05-25 21:41:13 +08:00
int result = memObject - > peekSharingHandler ( ) - > acquire ( memObject , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-04-19 20:11:45 +08:00
if ( result ! = CL_SUCCESS ) {
return result ;
}
2017-12-21 07:45:38 +08:00
memObject - > acquireCount + + ;
}
auto status = enqueueMarkerWithWaitList (
numEventsInWaitList ,
eventWaitList ,
oclEvent ) ;
if ( oclEvent ) {
castToObjectOrAbort < Event > ( * oclEvent ) - > setCmdType ( cmdType ) ;
}
return status ;
}
cl_int CommandQueue : : enqueueReleaseSharedObjects ( cl_uint numObjects , const cl_mem * memObjects , cl_uint numEventsInWaitList , const cl_event * eventWaitList , cl_event * oclEvent , cl_uint cmdType ) {
2018-01-18 00:02:08 +08:00
if ( ( memObjects = = nullptr & & numObjects ! = 0 ) | | ( memObjects ! = nullptr & & numObjects = = 0 ) ) {
return CL_INVALID_VALUE ;
}
2017-12-21 07:45:38 +08:00
for ( unsigned int object = 0 ; object < numObjects ; object + + ) {
2018-01-18 00:02:08 +08:00
auto memObject = castToObject < MemObj > ( memObjects [ object ] ) ;
if ( memObject = = nullptr | | memObject - > peekSharingHandler ( ) = = nullptr ) {
return CL_INVALID_MEM_OBJECT ;
}
2020-05-18 20:21:14 +08:00
memObject - > peekSharingHandler ( ) - > release ( memObject , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2017-12-21 07:45:38 +08:00
DEBUG_BREAK_IF ( memObject - > acquireCount < = 0 ) ;
memObject - > acquireCount - - ;
}
auto status = enqueueMarkerWithWaitList (
numEventsInWaitList ,
eventWaitList ,
oclEvent ) ;
if ( oclEvent ) {
castToObjectOrAbort < Event > ( * oclEvent ) - > setCmdType ( cmdType ) ;
}
return status ;
}
void CommandQueue : : updateFromCompletionStamp ( const CompletionStamp & completionStamp ) {
2017-12-22 23:05:10 +08:00
DEBUG_BREAK_IF ( this - > taskLevel > completionStamp . taskLevel ) ;
DEBUG_BREAK_IF ( this - > taskCount > completionStamp . taskCount ) ;
2020-06-16 19:19:11 +08:00
if ( completionStamp . taskCount ! = CompletionStamp : : notReady ) {
2018-01-05 19:07:47 +08:00
taskCount = completionStamp . taskCount ;
}
2017-12-21 07:45:38 +08:00
flushStamp - > setStamp ( completionStamp . flushStamp ) ;
this - > taskLevel = completionStamp . taskLevel ;
}
2019-12-19 19:58:02 +08:00
bool CommandQueue : : setPerfCountersEnabled ( ) {
2017-12-21 07:45:38 +08:00
DEBUG_BREAK_IF ( device = = nullptr ) ;
2019-12-19 19:58:02 +08:00
2017-12-21 07:45:38 +08:00
auto perfCounters = device - > getPerformanceCounters ( ) ;
2020-01-13 20:15:03 +08:00
bool isCcsEngine = EngineHelpers : : isCcs ( getGpgpuEngine ( ) . osContext - > getEngineType ( ) ) ;
2019-05-20 17:19:27 +08:00
2019-12-19 19:58:02 +08:00
perfCountersEnabled = perfCounters - > enable ( isCcsEngine ) ;
if ( ! perfCountersEnabled ) {
2019-05-20 17:19:27 +08:00
perfCounters - > shutdown ( ) ;
2017-12-21 07:45:38 +08:00
}
2019-05-20 17:19:27 +08:00
2019-12-19 19:58:02 +08:00
return perfCountersEnabled ;
}
2017-12-21 07:45:38 +08:00
PerformanceCounters * CommandQueue : : getPerfCounters ( ) {
return device - > getPerformanceCounters ( ) ;
}
2018-02-09 05:59:03 +08:00
cl_int CommandQueue : : enqueueWriteMemObjForUnmap ( MemObj * memObj , void * mappedPtr , EventsRequest & eventsRequest ) {
2018-02-18 05:26:28 +08:00
cl_int retVal = CL_SUCCESS ;
MapInfo unmapInfo ;
if ( ! memObj - > findMappedPtr ( mappedPtr , unmapInfo ) ) {
return CL_INVALID_VALUE ;
}
if ( ! unmapInfo . readOnly ) {
2019-11-28 14:58:10 +08:00
memObj - > getMapAllocation ( ) - > setAubWritable ( true , GraphicsAllocation : : defaultBank ) ;
memObj - > getMapAllocation ( ) - > setTbxWritable ( true , GraphicsAllocation : : defaultBank ) ;
2018-02-18 05:26:28 +08:00
if ( memObj - > peekClMemObjType ( ) = = CL_MEM_OBJECT_BUFFER ) {
auto buffer = castToObject < Buffer > ( memObj ) ;
2019-11-28 14:58:10 +08:00
2020-03-10 15:13:30 +08:00
retVal = enqueueWriteBuffer ( buffer , CL_FALSE , unmapInfo . offset [ 0 ] , unmapInfo . size [ 0 ] , mappedPtr , memObj - > getMapAllocation ( ) ,
2018-02-18 05:26:28 +08:00
eventsRequest . numEventsInWaitList , eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-01 20:40:30 +08:00
} else {
2018-04-04 15:29:48 +08:00
auto image = castToObjectOrAbort < Image > ( memObj ) ;
size_t writeOrigin [ 4 ] = { unmapInfo . offset [ 0 ] , unmapInfo . offset [ 1 ] , unmapInfo . offset [ 2 ] , 0 } ;
auto mipIdx = getMipLevelOriginIdx ( image - > peekClMemObjType ( ) ) ;
UNRECOVERABLE_IF ( mipIdx > = 4 ) ;
writeOrigin [ mipIdx ] = unmapInfo . mipLevel ;
retVal = enqueueWriteImage ( image , CL_FALSE , writeOrigin , & unmapInfo . size [ 0 ] ,
2019-04-16 00:17:28 +08:00
image - > getHostPtrRowPitch ( ) , image - > getHostPtrSlicePitch ( ) , mappedPtr , memObj - > getMapAllocation ( ) ,
2018-02-18 05:26:28 +08:00
eventsRequest . numEventsInWaitList , eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-01 20:40:30 +08:00
}
2018-02-18 05:26:28 +08:00
} else {
retVal = enqueueMarkerWithWaitList ( eventsRequest . numEventsInWaitList , eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-01 20:40:30 +08:00
}
2018-02-18 05:26:28 +08:00
if ( retVal = = CL_SUCCESS ) {
memObj - > removeMappedPtr ( mappedPtr ) ;
if ( eventsRequest . outEvent ) {
auto event = castToObject < Event > ( * eventsRequest . outEvent ) ;
event - > setCmdType ( CL_COMMAND_UNMAP_MEM_OBJECT ) ;
}
}
2018-02-21 22:25:46 +08:00
return retVal ;
2018-02-01 20:40:30 +08:00
}
2018-02-09 05:59:03 +08:00
void * CommandQueue : : enqueueReadMemObjForMap ( TransferProperties & transferProperties , EventsRequest & eventsRequest , cl_int & errcodeRet ) {
2019-11-07 21:15:04 +08:00
void * basePtr = transferProperties . memObj - > getBasePtrForMap ( getDevice ( ) . getRootDeviceIndex ( ) ) ;
2019-04-08 20:49:35 +08:00
size_t mapPtrOffset = transferProperties . memObj - > calculateOffsetForMapping ( transferProperties . offset ) + transferProperties . mipPtrOffset ;
if ( transferProperties . memObj - > peekClMemObjType ( ) = = CL_MEM_OBJECT_BUFFER ) {
mapPtrOffset + = transferProperties . memObj - > getOffset ( ) ;
}
void * returnPtr = ptrOffset ( basePtr , mapPtrOffset ) ;
2018-02-18 05:26:28 +08:00
if ( ! transferProperties . memObj - > addMappedPtr ( returnPtr , transferProperties . memObj - > calculateMappedPtrLength ( transferProperties . size ) ,
2018-04-04 15:29:48 +08:00
transferProperties . mapFlags , transferProperties . size , transferProperties . offset , transferProperties . mipLevel ) ) {
2018-02-18 05:26:28 +08:00
errcodeRet = CL_INVALID_OPERATION ;
return nullptr ;
}
2018-02-09 05:59:03 +08:00
2018-02-21 22:25:46 +08:00
if ( transferProperties . memObj - > peekClMemObjType ( ) = = CL_MEM_OBJECT_BUFFER ) {
auto buffer = castToObject < Buffer > ( transferProperties . memObj ) ;
2019-04-08 20:49:35 +08:00
errcodeRet = enqueueReadBuffer ( buffer , transferProperties . blocking , transferProperties . offset [ 0 ] , transferProperties . size [ 0 ] ,
returnPtr , transferProperties . memObj - > getMapAllocation ( ) , eventsRequest . numEventsInWaitList ,
eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-09 05:59:03 +08:00
} else {
2018-04-04 15:29:48 +08:00
auto image = castToObjectOrAbort < Image > ( transferProperties . memObj ) ;
size_t readOrigin [ 4 ] = { transferProperties . offset [ 0 ] , transferProperties . offset [ 1 ] , transferProperties . offset [ 2 ] , 0 } ;
auto mipIdx = getMipLevelOriginIdx ( image - > peekClMemObjType ( ) ) ;
UNRECOVERABLE_IF ( mipIdx > = 4 ) ;
readOrigin [ mipIdx ] = transferProperties . mipLevel ;
errcodeRet = enqueueReadImage ( image , transferProperties . blocking , readOrigin , & transferProperties . size [ 0 ] ,
2019-04-16 00:17:28 +08:00
image - > getHostPtrRowPitch ( ) , image - > getHostPtrSlicePitch ( ) ,
returnPtr , transferProperties . memObj - > getMapAllocation ( ) , eventsRequest . numEventsInWaitList ,
2018-02-13 20:20:34 +08:00
eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-09 05:59:03 +08:00
}
2018-02-18 05:26:28 +08:00
if ( errcodeRet ! = CL_SUCCESS ) {
transferProperties . memObj - > removeMappedPtr ( returnPtr ) ;
return nullptr ;
}
if ( eventsRequest . outEvent ) {
auto event = castToObject < Event > ( * eventsRequest . outEvent ) ;
event - > setCmdType ( transferProperties . cmdType ) ;
2018-02-09 05:59:03 +08:00
}
return returnPtr ;
}
void * CommandQueue : : enqueueMapMemObject ( TransferProperties & transferProperties , EventsRequest & eventsRequest , cl_int & errcodeRet ) {
if ( transferProperties . memObj - > mappingOnCpuAllowed ( ) ) {
return cpuDataTransferHandler ( transferProperties , eventsRequest , errcodeRet ) ;
} else {
return enqueueReadMemObjForMap ( transferProperties , eventsRequest , errcodeRet ) ;
}
}
cl_int CommandQueue : : enqueueUnmapMemObject ( TransferProperties & transferProperties , EventsRequest & eventsRequest ) {
2018-07-19 20:28:21 +08:00
cl_int retVal = CL_SUCCESS ;
2018-02-09 05:59:03 +08:00
if ( transferProperties . memObj - > mappingOnCpuAllowed ( ) ) {
cpuDataTransferHandler ( transferProperties , eventsRequest , retVal ) ;
} else {
retVal = enqueueWriteMemObjForUnmap ( transferProperties . memObj , transferProperties . ptr , eventsRequest ) ;
}
return retVal ;
}
void * CommandQueue : : enqueueMapBuffer ( Buffer * buffer , cl_bool blockingMap ,
cl_map_flags mapFlags , size_t offset ,
size_t size , cl_uint numEventsInWaitList ,
const cl_event * eventWaitList , cl_event * event ,
cl_int & errcodeRet ) {
2020-05-26 00:27:17 +08:00
TransferProperties transferProperties ( buffer , CL_COMMAND_MAP_BUFFER , mapFlags , blockingMap ! = CL_FALSE , & offset , & size , nullptr , false , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-02-09 05:59:03 +08:00
EventsRequest eventsRequest ( numEventsInWaitList , eventWaitList , event ) ;
return enqueueMapMemObject ( transferProperties , eventsRequest , errcodeRet ) ;
}
void * CommandQueue : : enqueueMapImage ( Image * image , cl_bool blockingMap ,
cl_map_flags mapFlags , const size_t * origin ,
const size_t * region , size_t * imageRowPitch ,
size_t * imageSlicePitch ,
cl_uint numEventsInWaitList ,
const cl_event * eventWaitList , cl_event * event ,
cl_int & errcodeRet ) {
2018-02-18 05:26:28 +08:00
TransferProperties transferProperties ( image , CL_COMMAND_MAP_IMAGE , mapFlags , blockingMap ! = CL_FALSE ,
2020-05-26 00:27:17 +08:00
const_cast < size_t * > ( origin ) , const_cast < size_t * > ( region ) , nullptr , false , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-02-09 05:59:03 +08:00
EventsRequest eventsRequest ( numEventsInWaitList , eventWaitList , event ) ;
2018-02-18 20:07:53 +08:00
if ( image - > isMemObjZeroCopy ( ) & & image - > mappingOnCpuAllowed ( ) ) {
GetInfoHelper : : set ( imageSlicePitch , image - > getImageDesc ( ) . image_slice_pitch ) ;
2018-04-11 23:40:42 +08:00
if ( image - > getImageDesc ( ) . image_type = = CL_MEM_OBJECT_IMAGE1D_ARRAY ) {
// There are differences in qPitch programming between Gen8 vs Gen9+ devices.
// For Gen8 qPitch is distance in rows while Gen9+ it is in pixels.
// Minimum value of qPitch is 4 and this causes slicePitch = 4*rowPitch on Gen8.
// To allow zero-copy we have to tell what is correct value rowPitch which should equal to slicePitch.
GetInfoHelper : : set ( imageRowPitch , image - > getImageDesc ( ) . image_slice_pitch ) ;
} else {
GetInfoHelper : : set ( imageRowPitch , image - > getImageDesc ( ) . image_row_pitch ) ;
}
2018-02-18 20:07:53 +08:00
} else {
2018-09-04 21:25:29 +08:00
GetInfoHelper : : set ( imageSlicePitch , image - > getHostPtrSlicePitch ( ) ) ;
GetInfoHelper : : set ( imageRowPitch , image - > getHostPtrRowPitch ( ) ) ;
2018-02-18 20:07:53 +08:00
}
2018-04-04 15:29:48 +08:00
if ( Image : : hasSlices ( image - > peekClMemObjType ( ) ) = = false ) {
GetInfoHelper : : set ( imageSlicePitch , static_cast < size_t > ( 0 ) ) ;
}
2018-09-04 21:25:29 +08:00
return enqueueMapMemObject ( transferProperties , eventsRequest , errcodeRet ) ;
2018-02-09 05:59:03 +08:00
}
cl_int CommandQueue : : enqueueUnmapMemObject ( MemObj * memObj , void * mappedPtr , cl_uint numEventsInWaitList , const cl_event * eventWaitList , cl_event * event ) {
2020-05-26 00:27:17 +08:00
TransferProperties transferProperties ( memObj , CL_COMMAND_UNMAP_MEM_OBJECT , 0 , false , nullptr , nullptr , mappedPtr , false , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-02-09 05:59:03 +08:00
EventsRequest eventsRequest ( numEventsInWaitList , eventWaitList , event ) ;
return enqueueUnmapMemObject ( transferProperties , eventsRequest ) ;
}
void CommandQueue : : enqueueBlockedMapUnmapOperation ( const cl_event * eventWaitList ,
size_t numEventsInWaitlist ,
MapOperationType opType ,
MemObj * memObj ,
2018-02-18 05:26:28 +08:00
MemObjSizeArray & copySize ,
MemObjOffsetArray & copyOffset ,
bool readOnly ,
2018-02-09 05:59:03 +08:00
EventBuilder & externalEventBuilder ) {
EventBuilder internalEventBuilder ;
EventBuilder * eventBuilder ;
// check if event will be exposed externally
if ( externalEventBuilder . getEvent ( ) ) {
externalEventBuilder . getEvent ( ) - > incRefInternal ( ) ;
eventBuilder = & externalEventBuilder ;
} else {
// it will be an internal event
internalEventBuilder . create < VirtualEvent > ( this , context ) ;
eventBuilder = & internalEventBuilder ;
}
//store task data in event
2019-07-23 03:28:59 +08:00
auto cmd = std : : unique_ptr < Command > ( new CommandMapUnmap ( opType , * memObj , copySize , copyOffset , readOnly , * this ) ) ;
2018-02-09 05:59:03 +08:00
eventBuilder - > getEvent ( ) - > setCommand ( std : : move ( cmd ) ) ;
//bind output event with input events
eventBuilder - > addParentEvents ( ArrayRef < const cl_event > ( eventWaitList , numEventsInWaitlist ) ) ;
eventBuilder - > addParentEvent ( this - > virtualEvent ) ;
eventBuilder - > finalize ( ) ;
if ( this - > virtualEvent ) {
this - > virtualEvent - > decRefInternal ( ) ;
}
this - > virtualEvent = eventBuilder - > getEvent ( ) ;
}
2018-03-21 19:58:30 +08:00
bool CommandQueue : : setupDebugSurface ( Kernel * kernel ) {
2019-07-15 20:28:09 +08:00
auto debugSurface = getGpgpuCommandStreamReceiver ( ) . getDebugSurfaceAllocation ( ) ;
2018-03-21 19:58:30 +08:00
if ( ! debugSurface ) {
2019-07-15 20:28:09 +08:00
debugSurface = getGpgpuCommandStreamReceiver ( ) . allocateDebugSurface ( SipKernel : : maxDbgSurfaceSize ) ;
2018-03-21 19:58:30 +08:00
}
DEBUG_BREAK_IF ( ! kernel - > requiresSshForBuffers ( ) ) ;
auto surfaceState = ptrOffset ( reinterpret_cast < uintptr_t * > ( kernel - > getSurfaceStateHeap ( ) ) ,
kernel - > getKernelInfo ( ) . patchInfo . pAllocateSystemThreadSurface - > Offset ) ;
void * addressToPatch = reinterpret_cast < void * > ( debugSurface - > getGpuAddress ( ) ) ;
size_t sizeToPatch = debugSurface - > getUnderlyingBufferSize ( ) ;
2020-02-21 06:54:48 +08:00
Buffer : : setSurfaceState ( & device - > getDevice ( ) , surfaceState , sizeToPatch , addressToPatch , 0 , debugSurface , 0 , 0 ) ;
2018-03-21 19:58:30 +08:00
return true ;
}
2018-04-26 16:01:01 +08:00
IndirectHeap & CommandQueue : : getIndirectHeap ( IndirectHeap : : Type heapType , size_t minRequiredSize ) {
2019-07-15 20:28:09 +08:00
return getGpgpuCommandStreamReceiver ( ) . getIndirectHeap ( heapType , minRequiredSize ) ;
2018-04-26 16:01:01 +08:00
}
2018-04-05 21:12:28 +08:00
2018-04-26 16:01:01 +08:00
void CommandQueue : : allocateHeapMemory ( IndirectHeap : : Type heapType , size_t minRequiredSize , IndirectHeap * & indirectHeap ) {
2019-07-15 20:28:09 +08:00
getGpgpuCommandStreamReceiver ( ) . allocateHeapMemory ( heapType , minRequiredSize , indirectHeap ) ;
2018-04-26 16:01:01 +08:00
}
2018-04-05 21:12:28 +08:00
2018-04-26 16:01:01 +08:00
void CommandQueue : : releaseIndirectHeap ( IndirectHeap : : Type heapType ) {
2019-07-15 20:28:09 +08:00
getGpgpuCommandStreamReceiver ( ) . releaseIndirectHeap ( heapType ) ;
2018-04-05 21:12:28 +08:00
}
2018-04-26 16:01:01 +08:00
2019-05-23 19:51:32 +08:00
void CommandQueue : : obtainNewTimestampPacketNodes ( size_t numberOfNodes , TimestampPacketContainer & previousNodes , bool clearAllDependencies ) {
2019-07-15 20:28:09 +08:00
auto allocator = getGpgpuCommandStreamReceiver ( ) . getTimestampPacketAllocator ( ) ;
2018-08-28 20:11:25 +08:00
2018-10-03 05:37:30 +08:00
previousNodes . swapNodes ( * timestampPacketContainer ) ;
2019-05-23 19:51:32 +08:00
previousNodes . resolveDependencies ( clearAllDependencies ) ;
2018-10-03 05:37:30 +08:00
DEBUG_BREAK_IF ( timestampPacketContainer - > peekNodes ( ) . size ( ) > 0 ) ;
for ( size_t i = 0 ; i < numberOfNodes ; i + + ) {
timestampPacketContainer - > add ( allocator - > getTag ( ) ) ;
2018-09-10 20:24:11 +08:00
}
2018-08-28 20:11:25 +08:00
}
2019-02-21 23:59:10 +08:00
size_t CommandQueue : : estimateTimestampPacketNodesCount ( const MultiDispatchInfo & dispatchInfo ) const {
size_t nodesCount = dispatchInfo . size ( ) ;
auto mainKernel = dispatchInfo . peekMainKernel ( ) ;
2020-03-06 21:56:23 +08:00
if ( obtainTimestampPacketForCacheFlush ( mainKernel - > requiresCacheFlushCommand ( * this ) ) ) {
2019-02-21 23:59:10 +08:00
nodesCount + + ;
}
return nodesCount ;
}
2019-05-30 20:36:12 +08:00
bool CommandQueue : : bufferCpuCopyAllowed ( Buffer * buffer , cl_command_type commandType , cl_bool blocking , size_t size , void * ptr ,
cl_uint numEventsInWaitList , const cl_event * eventWaitList ) {
2020-02-25 20:39:23 +08:00
auto debugVariableSet = false ;
2019-05-30 20:36:12 +08:00
// Requested by debug variable or allowed by Buffer
2020-02-25 20:39:23 +08:00
if ( CL_COMMAND_READ_BUFFER = = commandType & & DebugManager . flags . DoCpuCopyOnReadBuffer . get ( ) ! = - 1 ) {
if ( DebugManager . flags . DoCpuCopyOnReadBuffer . get ( ) = = 0 ) {
return false ;
}
debugVariableSet = true ;
}
if ( CL_COMMAND_WRITE_BUFFER = = commandType & & DebugManager . flags . DoCpuCopyOnWriteBuffer . get ( ) ! = - 1 ) {
if ( DebugManager . flags . DoCpuCopyOnWriteBuffer . get ( ) = = 0 ) {
return false ;
}
debugVariableSet = true ;
}
2019-05-30 20:36:12 +08:00
2020-02-21 15:25:43 +08:00
//if we are blocked by user events, we can't service the call on CPU
if ( Event : : checkUserEventDependencies ( numEventsInWaitList , eventWaitList ) ) {
return false ;
}
2020-02-21 18:11:36 +08:00
//check if buffer is compatible
2020-06-24 00:08:30 +08:00
if ( ! buffer - > isReadWriteOnCpuAllowed ( device - > getRootDeviceIndex ( ) ) ) {
2020-02-21 16:48:51 +08:00
return false ;
}
2020-03-12 14:23:27 +08:00
if ( buffer - > getMemoryManager ( ) & & buffer - > getMemoryManager ( ) - > isCpuCopyRequired ( ptr ) ) {
return true ;
}
2020-02-21 16:48:51 +08:00
if ( debugVariableSet ) {
2020-02-21 15:25:43 +08:00
return true ;
}
2020-02-21 16:48:51 +08:00
//non blocking transfers are not expected to be serviced by CPU
//we do not want to artifically stall the pipeline to allow CPU access
if ( blocking = = CL_FALSE ) {
return false ;
}
2020-02-21 18:11:36 +08:00
//check if it is beneficial to do transfer on CPU
if ( ! buffer - > isReadWriteOnCpuPreffered ( ptr , size ) ) {
2020-02-21 15:25:43 +08:00
return false ;
}
//make sure that event wait list is empty
if ( numEventsInWaitList = = 0 ) {
return true ;
}
return false ;
2019-05-30 20:36:12 +08:00
}
2019-06-05 15:35:58 +08:00
2019-06-13 15:56:06 +08:00
bool CommandQueue : : queueDependenciesClearRequired ( ) const {
return isOOQEnabled ( ) | | DebugManager . flags . OmitTimestampPacketDependencies . get ( ) ;
}
2019-06-18 17:02:47 +08:00
2019-09-02 16:16:44 +08:00
bool CommandQueue : : blitEnqueueAllowed ( cl_command_type cmdType ) const {
2020-02-12 18:27:28 +08:00
bool blitAllowed = device - > getHardwareInfo ( ) . capabilityTable . blitterOperationsSupported ;
2019-07-17 20:14:58 +08:00
if ( DebugManager . flags . EnableBlitterOperationsForReadWriteBuffers . get ( ) ! = - 1 ) {
2019-10-25 16:04:36 +08:00
blitAllowed & = ! ! DebugManager . flags . EnableBlitterOperationsForReadWriteBuffers . get ( ) ;
2019-07-17 20:14:58 +08:00
}
2019-06-18 17:02:47 +08:00
2019-11-19 18:54:11 +08:00
bool commandAllowed = ( CL_COMMAND_READ_BUFFER = = cmdType ) | | ( CL_COMMAND_WRITE_BUFFER = = cmdType ) | |
2020-03-30 21:20:55 +08:00
( CL_COMMAND_COPY_BUFFER = = cmdType ) | | ( CL_COMMAND_READ_BUFFER_RECT = = cmdType ) | |
2020-04-20 20:21:48 +08:00
( CL_COMMAND_WRITE_BUFFER_RECT = = cmdType ) | | ( CL_COMMAND_COPY_BUFFER_RECT = = cmdType ) | |
( CL_COMMAND_SVM_MEMCPY = = cmdType ) ;
2019-06-18 17:02:47 +08:00
2019-09-02 16:16:44 +08:00
return commandAllowed & & blitAllowed ;
2019-06-18 17:02:47 +08:00
}
2019-07-23 02:55:09 +08:00
bool CommandQueue : : isBlockedCommandStreamRequired ( uint32_t commandType , const EventsRequest & eventsRequest , bool blockedQueue ) const {
if ( ! blockedQueue ) {
return false ;
}
if ( isCacheFlushCommand ( commandType ) | | ! isCommandWithoutKernel ( commandType ) ) {
return true ;
}
if ( ( CL_COMMAND_BARRIER = = commandType | | CL_COMMAND_MARKER = = commandType ) & &
getGpgpuCommandStreamReceiver ( ) . peekTimestampPacketWriteEnabled ( ) ) {
for ( size_t i = 0 ; i < eventsRequest . numEventsInWaitList ; i + + ) {
auto waitlistEvent = castToObjectOrAbort < Event > ( eventsRequest . eventWaitList [ i ] ) ;
if ( waitlistEvent - > getTimestampPacketNodes ( ) ) {
return true ;
}
}
}
return false ;
}
2019-08-02 21:56:28 +08:00
2020-05-27 23:12:32 +08:00
void CommandQueue : : storeProperties ( const cl_queue_properties * properties ) {
if ( properties ) {
for ( size_t i = 0 ; properties [ i ] ! = 0 ; i + = 2 ) {
propertiesVector . push_back ( properties [ i ] ) ;
propertiesVector . push_back ( properties [ i + 1 ] ) ;
}
propertiesVector . push_back ( 0 ) ;
}
}
2019-08-02 21:56:28 +08:00
void CommandQueue : : aubCaptureHook ( bool & blocking , bool & clearAllDependencies , const MultiDispatchInfo & multiDispatchInfo ) {
if ( DebugManager . flags . AUBDumpSubCaptureMode . get ( ) ) {
auto status = getGpgpuCommandStreamReceiver ( ) . checkAndActivateAubSubCapture ( multiDispatchInfo ) ;
if ( ! status . isActive ) {
// make each enqueue blocking when subcapture is not active to split batch buffer
blocking = true ;
} else if ( ! status . wasActiveInPreviousEnqueue ) {
// omit timestamp packet dependencies dependencies upon subcapture activation
clearAllDependencies = true ;
}
}
if ( getGpgpuCommandStreamReceiver ( ) . getType ( ) > CommandStreamReceiverType : : CSR_HW ) {
for ( auto & dispatchInfo : multiDispatchInfo ) {
auto kernelName = dispatchInfo . getKernel ( ) - > getKernelInfo ( ) . name ;
getGpgpuCommandStreamReceiver ( ) . addAubComment ( kernelName . c_str ( ) ) ;
}
}
}
2019-03-26 18:59:46 +08:00
} // namespace NEO