2017-12-21 00:45:38 +01:00
/*
2020-12-22 00:03:25 +00:00
* Copyright ( C ) 2018 - 2021 Intel Corporation
2017-12-21 00:45:38 +01:00
*
2018-09-18 13:40:46 -07:00
* SPDX - License - Identifier : MIT
2017-12-21 00:45:38 +01:00
*
*/
2020-02-22 22:50:57 +01:00
# include "opencl/source/command_queue/command_queue.h"
2019-02-27 11:39:32 +01:00
2020-02-23 22:44:01 +01:00
# include "shared/source/command_stream/command_stream_receiver.h"
# include "shared/source/helpers/aligned_memory.h"
# include "shared/source/helpers/array_count.h"
# include "shared/source/helpers/engine_node_helper.h"
# include "shared/source/helpers/get_info.h"
# include "shared/source/helpers/ptr_math.h"
# include "shared/source/helpers/string.h"
# include "shared/source/helpers/timestamp_packet.h"
# include "shared/source/memory_manager/internal_allocation_storage.h"
2021-10-13 13:30:45 +00:00
# include "shared/source/os_interface/hw_info_config.h"
2020-02-23 22:44:01 +01:00
# include "shared/source/os_interface/os_context.h"
# include "shared/source/utilities/api_intercept.h"
# include "shared/source/utilities/tag_allocator.h"
2020-02-24 10:22:30 +01:00
2020-02-22 22:50:57 +01:00
# include "opencl/source/built_ins/builtins_dispatch_builder.h"
2020-03-20 11:15:25 +01:00
# include "opencl/source/cl_device/cl_device.h"
2020-02-22 22:50:57 +01:00
# include "opencl/source/context/context.h"
# include "opencl/source/device_queue/device_queue.h"
# include "opencl/source/event/event_builder.h"
# include "opencl/source/event/user_event.h"
# include "opencl/source/gtpin/gtpin_notify.h"
2021-02-12 12:25:51 +00:00
# include "opencl/source/helpers/cl_hw_helper.h"
2020-02-22 22:50:57 +01:00
# include "opencl/source/helpers/convert_color.h"
# include "opencl/source/helpers/hardware_commands_helper.h"
# include "opencl/source/helpers/mipmap.h"
# include "opencl/source/helpers/queue_helpers.h"
# include "opencl/source/mem_obj/buffer.h"
# include "opencl/source/mem_obj/image.h"
2020-08-26 15:44:12 +02:00
# include "opencl/source/program/printf_handler.h"
2019-02-27 11:39:32 +01:00
# include "CL/cl_ext.h"
2020-11-16 11:43:03 +00:00
# include <limits>
2017-12-21 00:45:38 +01:00
# include <map>
2019-03-26 11:59:46 +01:00
namespace NEO {
2017-12-21 00:45:38 +01:00
// Global table of create functions
CommandQueueCreateFunc commandQueueFactory [ IGFX_MAX_CORE ] = { } ;
CommandQueue * CommandQueue : : create ( Context * context ,
2020-01-14 14:32:11 +01:00
ClDevice * device ,
2017-12-21 00:45:38 +01:00
const cl_queue_properties * properties ,
2020-01-21 09:35:12 +01:00
bool internalUsage ,
2017-12-21 00:45:38 +01:00
cl_int & retVal ) {
retVal = CL_SUCCESS ;
auto funcCreate = commandQueueFactory [ device - > getRenderCoreFamily ( ) ] ;
DEBUG_BREAK_IF ( nullptr = = funcCreate ) ;
2020-01-21 09:35:12 +01:00
return funcCreate ( context , device , properties , internalUsage ) ;
2017-12-21 00:45:38 +01:00
}
2021-07-02 11:34:07 +00:00
CommandQueue : : CommandQueue ( Context * context , ClDevice * device , const cl_queue_properties * properties , bool internalUsage )
2020-01-14 14:32:11 +01:00
: context ( context ) , device ( device ) {
2017-12-21 00:45:38 +01:00
if ( context ) {
context - > incRefInternal ( ) ;
}
2018-04-26 10:01:01 +02:00
2017-12-21 00:45:38 +01:00
commandQueueProperties = getCmdQueueProperties < cl_command_queue_properties > ( properties ) ;
flushStamp . reset ( new FlushStampTracker ( true ) ) ;
2018-10-02 14:37:30 -07:00
2018-11-22 13:57:10 +01:00
if ( device ) {
2021-07-19 16:18:52 +00:00
auto & hwInfo = device - > getHardwareInfo ( ) ;
2021-03-03 14:26:16 +00:00
auto & hwHelper = HwHelper : : get ( hwInfo . platform . eRenderCoreFamily ) ;
2019-07-15 14:28:09 +02:00
gpgpuEngine = & device - > getDefaultEngine ( ) ;
2021-03-19 09:44:36 +00:00
UNRECOVERABLE_IF ( gpgpuEngine - > getEngineType ( ) > = aub_stream : : EngineType : : NUM_ENGINES ) ;
2021-03-03 14:26:16 +00:00
bool bcsAllowed = hwInfo . capabilityTable . blitterOperationsSupported & &
hwHelper . isSubDeviceEngineSupported ( hwInfo , device - > getDeviceBitfield ( ) , aub_stream : : EngineType : : ENGINE_BCS ) ;
if ( bcsAllowed | | gpgpuEngine - > commandStreamReceiver - > peekTimestampPacketWriteEnabled ( ) ) {
2018-11-27 13:07:41 +01:00
timestampPacketContainer = std : : make_unique < TimestampPacketContainer > ( ) ;
2021-06-14 16:57:09 +00:00
deferredTimestampPackets = std : : make_unique < TimestampPacketContainer > ( ) ;
2018-11-22 13:57:10 +01:00
}
2021-03-03 14:26:16 +00:00
if ( bcsAllowed ) {
2021-08-31 11:49:04 +00:00
auto & neoDevice = device - > getNearestGenericSubDevice ( 0 ) - > getDevice ( ) ;
2021-07-02 11:34:07 +00:00
auto & selectorCopyEngine = neoDevice . getSelectorCopyEngine ( ) ;
2021-08-17 14:15:49 +00:00
auto bcsEngineType = EngineHelpers : : getBcsEngineType ( hwInfo , device - > getDeviceBitfield ( ) , selectorCopyEngine , internalUsage ) ;
2021-09-24 16:32:20 +00:00
bcsEngines [ EngineHelpers : : getBcsIndex ( bcsEngineType ) ] = neoDevice . tryGetEngine ( bcsEngineType , EngineUsage : : Regular ) ;
2019-07-16 09:23:02 +02:00
}
2018-10-02 14:37:30 -07:00
}
2018-11-29 11:39:10 +01:00
2020-05-27 17:12:32 +02:00
storeProperties ( properties ) ;
2018-12-06 09:21:28 +01:00
processProperties ( properties ) ;
2017-12-21 00:45:38 +01:00
}
CommandQueue : : ~ CommandQueue ( ) {
if ( virtualEvent ) {
UNRECOVERABLE_IF ( this - > virtualEvent - > getCommandQueue ( ) ! = this & & this - > virtualEvent - > getCommandQueue ( ) ! = nullptr ) ;
virtualEvent - > decRefInternal ( ) ;
}
if ( device ) {
2019-11-24 14:50:41 +01:00
auto storageForAllocation = gpgpuEngine - > commandStreamReceiver - > getInternalAllocationStorage ( ) ;
2017-12-21 00:45:38 +01:00
2018-11-08 17:22:21 +01:00
if ( commandStream ) {
2018-10-24 14:25:04 +02:00
storageForAllocation - > storeAllocation ( std : : unique_ptr < GraphicsAllocation > ( commandStream - > getGraphicsAllocation ( ) ) , REUSABLE_ALLOCATION ) ;
2017-12-21 00:45:38 +01:00
}
delete commandStream ;
if ( this - > perfCountersEnabled ) {
device - > getPerformanceCounters ( ) - > shutdown ( ) ;
}
2021-07-02 11:34:07 +00:00
2021-09-24 16:32:20 +00:00
if ( auto mainBcs = bcsEngines [ 0 ] ; mainBcs ! = nullptr ) {
2021-08-31 11:49:04 +00:00
auto & selectorCopyEngine = device - > getNearestGenericSubDevice ( 0 ) - > getSelectorCopyEngine ( ) ;
2021-09-24 16:32:20 +00:00
EngineHelpers : : releaseBcsEngineType ( mainBcs - > getEngineType ( ) , selectorCopyEngine ) ;
2021-07-02 11:34:07 +00:00
}
2017-12-21 00:45:38 +01:00
}
2018-12-13 16:24:42 +01:00
timestampPacketContainer . reset ( ) ;
2018-01-05 11:33:30 +01:00
//for normal queue, decrement ref count on context
//special queue is owned by context so ref count doesn't have to be decremented
if ( context & & ! isSpecialCommandQueue ) {
2017-12-21 00:45:38 +01:00
context - > decRefInternal ( ) ;
}
2021-08-31 15:44:43 +00:00
gtpinRemoveCommandQueue ( this ) ;
2017-12-21 00:45:38 +01:00
}
2019-07-15 14:28:09 +02:00
CommandStreamReceiver & CommandQueue : : getGpgpuCommandStreamReceiver ( ) const {
return * gpgpuEngine - > commandStreamReceiver ;
2018-11-22 13:57:10 +01:00
}
2021-09-21 13:22:36 +00:00
CommandStreamReceiver * CommandQueue : : getBcsCommandStreamReceiver ( aub_stream : : EngineType bcsEngineType ) const {
2021-09-24 16:32:20 +00:00
const EngineControl * engine = this - > bcsEngines [ EngineHelpers : : getBcsIndex ( bcsEngineType ) ] ;
if ( engine = = nullptr ) {
return nullptr ;
} else {
return engine - > commandStreamReceiver ;
2019-07-16 09:23:02 +02:00
}
}
2021-09-24 16:32:20 +00:00
CommandStreamReceiver * CommandQueue : : getAnyBcs ( ) const {
for ( const EngineControl * engine : this - > bcsEngines ) {
if ( engine ! = nullptr ) {
return engine - > commandStreamReceiver ;
}
2021-08-25 16:03:15 +00:00
}
return nullptr ;
}
2021-09-24 16:32:20 +00:00
CommandStreamReceiver * CommandQueue : : getBcsForAuxTranslation ( ) const {
return getAnyBcs ( ) ;
}
2021-09-06 17:04:14 +00:00
CommandStreamReceiver & CommandQueue : : selectCsrForBuiltinOperation ( const CsrSelectionArgs & args ) const {
const bool blitAllowed = blitEnqueueAllowed ( args ) ;
const bool blitPreferred = blitEnqueuePreferred ( args ) ;
const bool blitRequired = isCopyOnly ;
const bool blit = blitAllowed & & ( blitPreferred | | blitRequired ) ;
if ( blit ) {
2021-09-24 16:32:20 +00:00
return * getAnyBcs ( ) ;
2021-09-06 17:04:14 +00:00
} else {
return getGpgpuCommandStreamReceiver ( ) ;
}
}
2020-01-14 14:32:11 +01:00
Device & CommandQueue : : getDevice ( ) const noexcept {
return device - > getDevice ( ) ;
}
2017-12-21 00:45:38 +01:00
uint32_t CommandQueue : : getHwTag ( ) const {
uint32_t tag = * getHwTagAddress ( ) ;
return tag ;
}
volatile uint32_t * CommandQueue : : getHwTagAddress ( ) const {
2019-07-15 14:28:09 +02:00
return getGpgpuCommandStreamReceiver ( ) . getTagAddress ( ) ;
2017-12-21 00:45:38 +01:00
}
2021-08-26 16:38:39 +00:00
bool CommandQueue : : isCompleted ( uint32_t gpgpuTaskCount , CopyEngineState bcsState ) const {
2020-06-26 11:21:07 +02:00
uint32_t gpgpuHwTag = getHwTag ( ) ;
DEBUG_BREAK_IF ( gpgpuHwTag = = CompletionStamp : : notReady ) ;
if ( gpgpuHwTag > = gpgpuTaskCount ) {
2021-08-26 16:38:39 +00:00
if ( bcsState . isValid ( ) ) {
2021-09-17 17:09:06 +00:00
return * getBcsCommandStreamReceiver ( bcsState . engineType ) - > getTagAddress ( ) > = peekBcsTaskCount ( bcsState . engineType ) ;
2020-06-26 11:21:07 +02:00
}
return true ;
}
return false ;
2017-12-21 00:45:38 +01:00
}
2021-09-09 16:57:09 +00:00
void CommandQueue : : waitUntilComplete ( uint32_t gpgpuTaskCountToWait , Range < CopyEngineState > copyEnginesToWait , FlushStamp flushStampToWait , bool useQuickKmdSleep ) {
2017-12-21 00:45:38 +01:00
WAIT_ENTER ( )
2020-06-25 11:35:29 +02:00
DBG_LOG ( LogTaskCounts , __FUNCTION__ , " Waiting for taskCount: " , gpgpuTaskCountToWait ) ;
2017-12-21 00:45:38 +01:00
DBG_LOG ( LogTaskCounts , __FUNCTION__ , " Line: " , __LINE__ , " Current taskCount: " , getHwTag ( ) ) ;
2018-11-16 12:46:49 +01:00
bool forcePowerSavingMode = this - > throttle = = QueueThrottle : : LOW ;
2021-09-06 22:29:47 +00:00
getGpgpuCommandStreamReceiver ( ) . waitForTaskCountWithKmdNotifyFallback ( gpgpuTaskCountToWait ,
flushStampToWait ,
useQuickKmdSleep ,
2021-09-17 13:05:26 +00:00
forcePowerSavingMode ) ;
2020-06-25 11:35:29 +02:00
DEBUG_BREAK_IF ( getHwTag ( ) < gpgpuTaskCountToWait ) ;
2019-07-15 11:01:32 +02:00
2020-02-19 16:32:40 +01:00
if ( gtpinIsGTPinInitialized ( ) ) {
2020-06-25 11:35:29 +02:00
gtpinNotifyTaskCompletion ( gpgpuTaskCountToWait ) ;
2020-02-19 16:32:40 +01:00
}
2021-09-09 16:57:09 +00:00
for ( const CopyEngineState & copyEngine : copyEnginesToWait ) {
auto bcsCsr = getBcsCommandStreamReceiver ( copyEngine . engineType ) ;
bcsCsr - > waitForTaskCountWithKmdNotifyFallback ( copyEngine . taskCount , 0 , false , false ) ;
bcsCsr - > waitForTaskCountAndCleanTemporaryAllocationList ( copyEngine . taskCount ) ;
2019-07-12 14:48:52 +02:00
}
2020-06-25 11:35:29 +02:00
getGpgpuCommandStreamReceiver ( ) . waitForTaskCountAndCleanTemporaryAllocationList ( gpgpuTaskCountToWait ) ;
2019-11-05 12:55:13 +01:00
2017-12-21 00:45:38 +01:00
WAIT_LEAVE ( )
}
bool CommandQueue : : isQueueBlocked ( ) {
TakeOwnershipWrapper < CommandQueue > takeOwnershipWrapper ( * this ) ;
//check if we have user event and if so, if it is in blocked state.
if ( this - > virtualEvent ) {
2019-07-08 18:07:46 +02:00
auto executionStatus = this - > virtualEvent - > peekExecutionStatus ( ) ;
if ( executionStatus < = CL_SUBMITTED ) {
2017-12-21 00:45:38 +01:00
UNRECOVERABLE_IF ( this - > virtualEvent = = nullptr ) ;
2019-07-08 18:07:46 +02:00
if ( this - > virtualEvent - > isStatusCompletedByTermination ( executionStatus ) = = false ) {
2017-12-21 00:45:38 +01:00
taskCount = this - > virtualEvent - > peekTaskCount ( ) ;
flushStamp - > setStamp ( this - > virtualEvent - > flushStamp - > peekStamp ( ) ) ;
taskLevel = this - > virtualEvent - > taskLevel ;
// If this isn't an OOQ, update the taskLevel for the queue
if ( ! isOOQEnabled ( ) ) {
taskLevel + + ;
}
} else {
//at this point we may reset queue TaskCount, since all command previous to this were aborted
taskCount = 0 ;
flushStamp - > setStamp ( 0 ) ;
2019-07-15 14:28:09 +02:00
taskLevel = getGpgpuCommandStreamReceiver ( ) . peekTaskLevel ( ) ;
2017-12-21 00:45:38 +01:00
}
2019-12-10 16:26:35 +01:00
FileLoggerInstance ( ) . log ( DebugManager . flags . EventsDebugEnable . get ( ) , " isQueueBlocked taskLevel change from " , taskLevel , " to new from virtualEvent " , this - > virtualEvent , " new tasklevel " , this - > virtualEvent - > taskLevel . load ( ) ) ;
2017-12-21 00:45:38 +01:00
//close the access to virtual event, driver added only 1 ref count.
this - > virtualEvent - > decRefInternal ( ) ;
this - > virtualEvent = nullptr ;
return false ;
}
return true ;
}
return false ;
}
cl_int CommandQueue : : getCommandQueueInfo ( cl_command_queue_info paramName ,
size_t paramValueSize ,
void * paramValue ,
size_t * paramValueSizeRet ) {
return getQueueInfo < CommandQueue > ( this , paramName , paramValueSize , paramValue , paramValueSizeRet ) ;
}
uint32_t CommandQueue : : getTaskLevelFromWaitList ( uint32_t taskLevel ,
cl_uint numEventsInWaitList ,
const cl_event * eventWaitList ) {
for ( auto iEvent = 0u ; iEvent < numEventsInWaitList ; + + iEvent ) {
auto pEvent = ( Event * ) ( eventWaitList [ iEvent ] ) ;
uint32_t eventTaskLevel = pEvent - > taskLevel ;
taskLevel = std : : max ( taskLevel , eventTaskLevel ) ;
}
return taskLevel ;
}
LinearStream & CommandQueue : : getCS ( size_t minRequiredSize ) {
DEBUG_BREAK_IF ( nullptr = = device ) ;
if ( ! commandStream ) {
commandStream = new LinearStream ( nullptr ) ;
}
minRequiredSize + = CSRequirements : : minCommandQueueCommandStreamSize ;
2019-02-20 10:31:32 +01:00
constexpr static auto additionalAllocationSize = CSRequirements : : minCommandQueueCommandStreamSize + CSRequirements : : csOverfetchSize ;
2019-07-15 14:28:09 +02:00
getGpgpuCommandStreamReceiver ( ) . ensureCommandBufferAllocation ( * commandStream , minRequiredSize , additionalAllocationSize ) ;
2017-12-21 00:45:38 +01:00
return * commandStream ;
}
cl_int CommandQueue : : enqueueAcquireSharedObjects ( cl_uint numObjects , const cl_mem * memObjects , cl_uint numEventsInWaitList , const cl_event * eventWaitList , cl_event * oclEvent , cl_uint cmdType ) {
2018-01-17 17:02:08 +01:00
if ( ( memObjects = = nullptr & & numObjects ! = 0 ) | | ( memObjects ! = nullptr & & numObjects = = 0 ) ) {
return CL_INVALID_VALUE ;
}
2017-12-21 00:45:38 +01:00
for ( unsigned int object = 0 ; object < numObjects ; object + + ) {
2018-01-17 17:02:08 +01:00
auto memObject = castToObject < MemObj > ( memObjects [ object ] ) ;
if ( memObject = = nullptr | | memObject - > peekSharingHandler ( ) = = nullptr ) {
return CL_INVALID_MEM_OBJECT ;
}
2020-05-25 15:41:13 +02:00
int result = memObject - > peekSharingHandler ( ) - > acquire ( memObject , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-04-19 14:11:45 +02:00
if ( result ! = CL_SUCCESS ) {
return result ;
}
2017-12-21 00:45:38 +01:00
memObject - > acquireCount + + ;
}
auto status = enqueueMarkerWithWaitList (
numEventsInWaitList ,
eventWaitList ,
oclEvent ) ;
if ( oclEvent ) {
castToObjectOrAbort < Event > ( * oclEvent ) - > setCmdType ( cmdType ) ;
}
return status ;
}
cl_int CommandQueue : : enqueueReleaseSharedObjects ( cl_uint numObjects , const cl_mem * memObjects , cl_uint numEventsInWaitList , const cl_event * eventWaitList , cl_event * oclEvent , cl_uint cmdType ) {
2018-01-17 17:02:08 +01:00
if ( ( memObjects = = nullptr & & numObjects ! = 0 ) | | ( memObjects ! = nullptr & & numObjects = = 0 ) ) {
return CL_INVALID_VALUE ;
}
2017-12-21 00:45:38 +01:00
for ( unsigned int object = 0 ; object < numObjects ; object + + ) {
2018-01-17 17:02:08 +01:00
auto memObject = castToObject < MemObj > ( memObjects [ object ] ) ;
if ( memObject = = nullptr | | memObject - > peekSharingHandler ( ) = = nullptr ) {
return CL_INVALID_MEM_OBJECT ;
}
2020-05-18 14:21:14 +02:00
memObject - > peekSharingHandler ( ) - > release ( memObject , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2017-12-21 00:45:38 +01:00
DEBUG_BREAK_IF ( memObject - > acquireCount < = 0 ) ;
memObject - > acquireCount - - ;
}
auto status = enqueueMarkerWithWaitList (
numEventsInWaitList ,
eventWaitList ,
oclEvent ) ;
if ( oclEvent ) {
castToObjectOrAbort < Event > ( * oclEvent ) - > setCmdType ( cmdType ) ;
}
return status ;
}
2020-08-26 11:26:44 +02:00
void CommandQueue : : updateFromCompletionStamp ( const CompletionStamp & completionStamp , Event * outEvent ) {
2017-12-22 16:05:10 +01:00
DEBUG_BREAK_IF ( this - > taskLevel > completionStamp . taskLevel ) ;
DEBUG_BREAK_IF ( this - > taskCount > completionStamp . taskCount ) ;
2020-06-16 11:19:11 +00:00
if ( completionStamp . taskCount ! = CompletionStamp : : notReady ) {
2018-01-05 12:07:47 +01:00
taskCount = completionStamp . taskCount ;
}
2017-12-21 00:45:38 +01:00
flushStamp - > setStamp ( completionStamp . flushStamp ) ;
this - > taskLevel = completionStamp . taskLevel ;
2020-08-26 11:26:44 +02:00
if ( outEvent ) {
2021-08-26 16:38:39 +00:00
outEvent - > updateCompletionStamp ( completionStamp . taskCount , outEvent - > peekBcsTaskCountFromCommandQueue ( ) , completionStamp . taskLevel , completionStamp . flushStamp ) ;
2020-08-26 11:26:44 +02:00
FileLoggerInstance ( ) . log ( DebugManager . flags . EventsDebugEnable . get ( ) , " updateCompletionStamp Event " , outEvent , " taskLevel " , outEvent - > taskLevel . load ( ) ) ;
}
2017-12-21 00:45:38 +01:00
}
2019-12-19 12:58:02 +01:00
bool CommandQueue : : setPerfCountersEnabled ( ) {
2017-12-21 00:45:38 +01:00
DEBUG_BREAK_IF ( device = = nullptr ) ;
2019-12-19 12:58:02 +01:00
2017-12-21 00:45:38 +01:00
auto perfCounters = device - > getPerformanceCounters ( ) ;
2020-01-13 13:15:03 +01:00
bool isCcsEngine = EngineHelpers : : isCcs ( getGpgpuEngine ( ) . osContext - > getEngineType ( ) ) ;
2019-05-20 11:19:27 +02:00
2019-12-19 12:58:02 +01:00
perfCountersEnabled = perfCounters - > enable ( isCcsEngine ) ;
if ( ! perfCountersEnabled ) {
2019-05-20 11:19:27 +02:00
perfCounters - > shutdown ( ) ;
2017-12-21 00:45:38 +01:00
}
2019-05-20 11:19:27 +02:00
2019-12-19 12:58:02 +01:00
return perfCountersEnabled ;
}
2017-12-21 00:45:38 +01:00
PerformanceCounters * CommandQueue : : getPerfCounters ( ) {
return device - > getPerformanceCounters ( ) ;
}
2018-02-08 22:59:03 +01:00
cl_int CommandQueue : : enqueueWriteMemObjForUnmap ( MemObj * memObj , void * mappedPtr , EventsRequest & eventsRequest ) {
2018-02-17 22:26:28 +01:00
cl_int retVal = CL_SUCCESS ;
MapInfo unmapInfo ;
if ( ! memObj - > findMappedPtr ( mappedPtr , unmapInfo ) ) {
return CL_INVALID_VALUE ;
}
if ( ! unmapInfo . readOnly ) {
2020-07-23 14:52:32 +02:00
memObj - > getMapAllocation ( getDevice ( ) . getRootDeviceIndex ( ) ) - > setAubWritable ( true , GraphicsAllocation : : defaultBank ) ;
memObj - > getMapAllocation ( getDevice ( ) . getRootDeviceIndex ( ) ) - > setTbxWritable ( true , GraphicsAllocation : : defaultBank ) ;
2019-11-28 07:58:10 +01:00
2018-02-17 22:26:28 +01:00
if ( memObj - > peekClMemObjType ( ) = = CL_MEM_OBJECT_BUFFER ) {
auto buffer = castToObject < Buffer > ( memObj ) ;
2019-11-28 07:58:10 +01:00
2020-07-23 14:52:32 +02:00
retVal = enqueueWriteBuffer ( buffer , CL_FALSE , unmapInfo . offset [ 0 ] , unmapInfo . size [ 0 ] , mappedPtr , memObj - > getMapAllocation ( getDevice ( ) . getRootDeviceIndex ( ) ) ,
2018-02-17 22:26:28 +01:00
eventsRequest . numEventsInWaitList , eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-01 13:40:30 +01:00
} else {
2018-04-04 09:29:48 +02:00
auto image = castToObjectOrAbort < Image > ( memObj ) ;
size_t writeOrigin [ 4 ] = { unmapInfo . offset [ 0 ] , unmapInfo . offset [ 1 ] , unmapInfo . offset [ 2 ] , 0 } ;
auto mipIdx = getMipLevelOriginIdx ( image - > peekClMemObjType ( ) ) ;
UNRECOVERABLE_IF ( mipIdx > = 4 ) ;
writeOrigin [ mipIdx ] = unmapInfo . mipLevel ;
retVal = enqueueWriteImage ( image , CL_FALSE , writeOrigin , & unmapInfo . size [ 0 ] ,
2020-07-23 14:52:32 +02:00
image - > getHostPtrRowPitch ( ) , image - > getHostPtrSlicePitch ( ) , mappedPtr , memObj - > getMapAllocation ( getDevice ( ) . getRootDeviceIndex ( ) ) ,
2018-02-17 22:26:28 +01:00
eventsRequest . numEventsInWaitList , eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-01 13:40:30 +01:00
}
2018-02-17 22:26:28 +01:00
} else {
retVal = enqueueMarkerWithWaitList ( eventsRequest . numEventsInWaitList , eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-01 13:40:30 +01:00
}
2018-02-17 22:26:28 +01:00
if ( retVal = = CL_SUCCESS ) {
memObj - > removeMappedPtr ( mappedPtr ) ;
if ( eventsRequest . outEvent ) {
auto event = castToObject < Event > ( * eventsRequest . outEvent ) ;
event - > setCmdType ( CL_COMMAND_UNMAP_MEM_OBJECT ) ;
}
}
2018-02-21 15:25:46 +01:00
return retVal ;
2018-02-01 13:40:30 +01:00
}
2018-02-08 22:59:03 +01:00
void * CommandQueue : : enqueueReadMemObjForMap ( TransferProperties & transferProperties , EventsRequest & eventsRequest , cl_int & errcodeRet ) {
2019-11-07 14:15:04 +01:00
void * basePtr = transferProperties . memObj - > getBasePtrForMap ( getDevice ( ) . getRootDeviceIndex ( ) ) ;
2019-04-08 14:49:35 +02:00
size_t mapPtrOffset = transferProperties . memObj - > calculateOffsetForMapping ( transferProperties . offset ) + transferProperties . mipPtrOffset ;
if ( transferProperties . memObj - > peekClMemObjType ( ) = = CL_MEM_OBJECT_BUFFER ) {
mapPtrOffset + = transferProperties . memObj - > getOffset ( ) ;
}
void * returnPtr = ptrOffset ( basePtr , mapPtrOffset ) ;
2018-02-17 22:26:28 +01:00
if ( ! transferProperties . memObj - > addMappedPtr ( returnPtr , transferProperties . memObj - > calculateMappedPtrLength ( transferProperties . size ) ,
2018-04-04 09:29:48 +02:00
transferProperties . mapFlags , transferProperties . size , transferProperties . offset , transferProperties . mipLevel ) ) {
2018-02-17 22:26:28 +01:00
errcodeRet = CL_INVALID_OPERATION ;
return nullptr ;
}
2018-02-08 22:59:03 +01:00
2021-10-13 14:29:02 +00:00
if ( transferProperties . mapFlags = = CL_MAP_WRITE_INVALIDATE_REGION ) {
errcodeRet = enqueueMarkerWithWaitList ( eventsRequest . numEventsInWaitList , eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
2018-02-08 22:59:03 +01:00
} else {
2021-10-13 14:29:02 +00:00
if ( transferProperties . memObj - > peekClMemObjType ( ) = = CL_MEM_OBJECT_BUFFER ) {
auto buffer = castToObject < Buffer > ( transferProperties . memObj ) ;
errcodeRet = enqueueReadBuffer ( buffer , transferProperties . blocking , transferProperties . offset [ 0 ] , transferProperties . size [ 0 ] ,
returnPtr , transferProperties . memObj - > getMapAllocation ( getDevice ( ) . getRootDeviceIndex ( ) ) , eventsRequest . numEventsInWaitList ,
eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
} else {
auto image = castToObjectOrAbort < Image > ( transferProperties . memObj ) ;
size_t readOrigin [ 4 ] = { transferProperties . offset [ 0 ] , transferProperties . offset [ 1 ] , transferProperties . offset [ 2 ] , 0 } ;
auto mipIdx = getMipLevelOriginIdx ( image - > peekClMemObjType ( ) ) ;
UNRECOVERABLE_IF ( mipIdx > = 4 ) ;
readOrigin [ mipIdx ] = transferProperties . mipLevel ;
errcodeRet = enqueueReadImage ( image , transferProperties . blocking , readOrigin , & transferProperties . size [ 0 ] ,
image - > getHostPtrRowPitch ( ) , image - > getHostPtrSlicePitch ( ) ,
returnPtr , transferProperties . memObj - > getMapAllocation ( getDevice ( ) . getRootDeviceIndex ( ) ) , eventsRequest . numEventsInWaitList ,
eventsRequest . eventWaitList , eventsRequest . outEvent ) ;
}
2018-02-08 22:59:03 +01:00
}
2018-02-17 22:26:28 +01:00
if ( errcodeRet ! = CL_SUCCESS ) {
transferProperties . memObj - > removeMappedPtr ( returnPtr ) ;
return nullptr ;
}
if ( eventsRequest . outEvent ) {
auto event = castToObject < Event > ( * eventsRequest . outEvent ) ;
event - > setCmdType ( transferProperties . cmdType ) ;
2018-02-08 22:59:03 +01:00
}
return returnPtr ;
}
void * CommandQueue : : enqueueMapMemObject ( TransferProperties & transferProperties , EventsRequest & eventsRequest , cl_int & errcodeRet ) {
if ( transferProperties . memObj - > mappingOnCpuAllowed ( ) ) {
return cpuDataTransferHandler ( transferProperties , eventsRequest , errcodeRet ) ;
} else {
return enqueueReadMemObjForMap ( transferProperties , eventsRequest , errcodeRet ) ;
}
}
cl_int CommandQueue : : enqueueUnmapMemObject ( TransferProperties & transferProperties , EventsRequest & eventsRequest ) {
2018-07-19 14:28:21 +02:00
cl_int retVal = CL_SUCCESS ;
2018-02-08 22:59:03 +01:00
if ( transferProperties . memObj - > mappingOnCpuAllowed ( ) ) {
cpuDataTransferHandler ( transferProperties , eventsRequest , retVal ) ;
} else {
retVal = enqueueWriteMemObjForUnmap ( transferProperties . memObj , transferProperties . ptr , eventsRequest ) ;
}
return retVal ;
}
void * CommandQueue : : enqueueMapBuffer ( Buffer * buffer , cl_bool blockingMap ,
cl_map_flags mapFlags , size_t offset ,
size_t size , cl_uint numEventsInWaitList ,
const cl_event * eventWaitList , cl_event * event ,
cl_int & errcodeRet ) {
2020-05-25 18:27:17 +02:00
TransferProperties transferProperties ( buffer , CL_COMMAND_MAP_BUFFER , mapFlags , blockingMap ! = CL_FALSE , & offset , & size , nullptr , false , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-02-08 22:59:03 +01:00
EventsRequest eventsRequest ( numEventsInWaitList , eventWaitList , event ) ;
return enqueueMapMemObject ( transferProperties , eventsRequest , errcodeRet ) ;
}
void * CommandQueue : : enqueueMapImage ( Image * image , cl_bool blockingMap ,
cl_map_flags mapFlags , const size_t * origin ,
const size_t * region , size_t * imageRowPitch ,
size_t * imageSlicePitch ,
cl_uint numEventsInWaitList ,
const cl_event * eventWaitList , cl_event * event ,
cl_int & errcodeRet ) {
2018-02-17 22:26:28 +01:00
TransferProperties transferProperties ( image , CL_COMMAND_MAP_IMAGE , mapFlags , blockingMap ! = CL_FALSE ,
2020-05-25 18:27:17 +02:00
const_cast < size_t * > ( origin ) , const_cast < size_t * > ( region ) , nullptr , false , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-02-08 22:59:03 +01:00
EventsRequest eventsRequest ( numEventsInWaitList , eventWaitList , event ) ;
2018-02-18 13:07:53 +01:00
if ( image - > isMemObjZeroCopy ( ) & & image - > mappingOnCpuAllowed ( ) ) {
GetInfoHelper : : set ( imageSlicePitch , image - > getImageDesc ( ) . image_slice_pitch ) ;
2018-04-11 17:40:42 +02:00
if ( image - > getImageDesc ( ) . image_type = = CL_MEM_OBJECT_IMAGE1D_ARRAY ) {
// There are differences in qPitch programming between Gen8 vs Gen9+ devices.
// For Gen8 qPitch is distance in rows while Gen9+ it is in pixels.
// Minimum value of qPitch is 4 and this causes slicePitch = 4*rowPitch on Gen8.
// To allow zero-copy we have to tell what is correct value rowPitch which should equal to slicePitch.
GetInfoHelper : : set ( imageRowPitch , image - > getImageDesc ( ) . image_slice_pitch ) ;
} else {
GetInfoHelper : : set ( imageRowPitch , image - > getImageDesc ( ) . image_row_pitch ) ;
}
2018-02-18 13:07:53 +01:00
} else {
2018-09-04 06:25:29 -07:00
GetInfoHelper : : set ( imageSlicePitch , image - > getHostPtrSlicePitch ( ) ) ;
GetInfoHelper : : set ( imageRowPitch , image - > getHostPtrRowPitch ( ) ) ;
2018-02-18 13:07:53 +01:00
}
2018-04-04 09:29:48 +02:00
if ( Image : : hasSlices ( image - > peekClMemObjType ( ) ) = = false ) {
GetInfoHelper : : set ( imageSlicePitch , static_cast < size_t > ( 0 ) ) ;
}
2018-09-04 06:25:29 -07:00
return enqueueMapMemObject ( transferProperties , eventsRequest , errcodeRet ) ;
2018-02-08 22:59:03 +01:00
}
cl_int CommandQueue : : enqueueUnmapMemObject ( MemObj * memObj , void * mappedPtr , cl_uint numEventsInWaitList , const cl_event * eventWaitList , cl_event * event ) {
2020-05-25 18:27:17 +02:00
TransferProperties transferProperties ( memObj , CL_COMMAND_UNMAP_MEM_OBJECT , 0 , false , nullptr , nullptr , mappedPtr , false , getDevice ( ) . getRootDeviceIndex ( ) ) ;
2018-02-08 22:59:03 +01:00
EventsRequest eventsRequest ( numEventsInWaitList , eventWaitList , event ) ;
return enqueueUnmapMemObject ( transferProperties , eventsRequest ) ;
}
void CommandQueue : : enqueueBlockedMapUnmapOperation ( const cl_event * eventWaitList ,
size_t numEventsInWaitlist ,
MapOperationType opType ,
MemObj * memObj ,
2018-02-17 22:26:28 +01:00
MemObjSizeArray & copySize ,
MemObjOffsetArray & copyOffset ,
bool readOnly ,
2018-02-08 22:59:03 +01:00
EventBuilder & externalEventBuilder ) {
EventBuilder internalEventBuilder ;
EventBuilder * eventBuilder ;
// check if event will be exposed externally
if ( externalEventBuilder . getEvent ( ) ) {
externalEventBuilder . getEvent ( ) - > incRefInternal ( ) ;
eventBuilder = & externalEventBuilder ;
} else {
// it will be an internal event
internalEventBuilder . create < VirtualEvent > ( this , context ) ;
eventBuilder = & internalEventBuilder ;
}
//store task data in event
2019-07-22 21:28:59 +02:00
auto cmd = std : : unique_ptr < Command > ( new CommandMapUnmap ( opType , * memObj , copySize , copyOffset , readOnly , * this ) ) ;
2018-02-08 22:59:03 +01:00
eventBuilder - > getEvent ( ) - > setCommand ( std : : move ( cmd ) ) ;
//bind output event with input events
eventBuilder - > addParentEvents ( ArrayRef < const cl_event > ( eventWaitList , numEventsInWaitlist ) ) ;
eventBuilder - > addParentEvent ( this - > virtualEvent ) ;
eventBuilder - > finalize ( ) ;
if ( this - > virtualEvent ) {
this - > virtualEvent - > decRefInternal ( ) ;
}
this - > virtualEvent = eventBuilder - > getEvent ( ) ;
}
2018-03-21 12:58:30 +01:00
bool CommandQueue : : setupDebugSurface ( Kernel * kernel ) {
2019-07-15 14:28:09 +02:00
auto debugSurface = getGpgpuCommandStreamReceiver ( ) . getDebugSurfaceAllocation ( ) ;
2018-03-21 12:58:30 +01:00
2021-04-08 11:05:45 +02:00
DEBUG_BREAK_IF ( ! kernel - > usesBindfulAddressingForBuffers ( ) ) ;
2021-03-22 15:26:03 +00:00
auto surfaceState = ptrOffset ( reinterpret_cast < uintptr_t * > ( kernel - > getSurfaceStateHeap ( ) ) ,
kernel - > getKernelInfo ( ) . kernelDescriptor . payloadMappings . implicitArgs . systemThreadSurfaceAddress . bindful ) ;
2018-03-21 12:58:30 +01:00
void * addressToPatch = reinterpret_cast < void * > ( debugSurface - > getGpuAddress ( ) ) ;
size_t sizeToPatch = debugSurface - > getUnderlyingBufferSize ( ) ;
2021-03-03 12:25:26 +00:00
Buffer : : setSurfaceState ( & device - > getDevice ( ) , surfaceState , false , false , sizeToPatch ,
addressToPatch , 0 , debugSurface , 0 , 0 ,
2021-03-22 15:26:03 +00:00
kernel - > getKernelInfo ( ) . kernelDescriptor . kernelAttributes . flags . useGlobalAtomics ,
2021-03-29 17:06:29 +00:00
kernel - > areMultipleSubDevicesInContext ( ) ) ;
2018-03-21 12:58:30 +01:00
return true ;
}
2020-11-24 13:29:07 +00:00
bool CommandQueue : : validateCapability ( cl_command_queue_capabilities_intel capability ) const {
2020-12-08 12:40:04 +00:00
return this - > queueCapabilities = = CL_QUEUE_DEFAULT_CAPABILITIES_INTEL | | isValueSet ( this - > queueCapabilities , capability ) ;
2020-11-24 13:29:07 +00:00
}
2020-12-14 18:07:09 +00:00
bool CommandQueue : : validateCapabilitiesForEventWaitList ( cl_uint numEventsInWaitList , const cl_event * waitList ) const {
for ( cl_uint eventIndex = 0u ; eventIndex < numEventsInWaitList ; eventIndex + + ) {
const Event * event = castToObject < Event > ( waitList [ eventIndex ] ) ;
if ( event - > isUserEvent ( ) ) {
continue ;
}
const CommandQueue * eventCommandQueue = event - > getCommandQueue ( ) ;
const bool crossQueue = this ! = eventCommandQueue ;
const cl_command_queue_capabilities_intel createCap = crossQueue ? CL_QUEUE_CAPABILITY_CREATE_CROSS_QUEUE_EVENTS_INTEL
: CL_QUEUE_CAPABILITY_CREATE_SINGLE_QUEUE_EVENTS_INTEL ;
const cl_command_queue_capabilities_intel waitCap = crossQueue ? CL_QUEUE_CAPABILITY_CROSS_QUEUE_EVENT_WAIT_LIST_INTEL
: CL_QUEUE_CAPABILITY_SINGLE_QUEUE_EVENT_WAIT_LIST_INTEL ;
if ( ! validateCapability ( waitCap ) | | ! eventCommandQueue - > validateCapability ( createCap ) ) {
return false ;
}
}
return true ;
}
bool CommandQueue : : validateCapabilityForOperation ( cl_command_queue_capabilities_intel capability ,
cl_uint numEventsInWaitList ,
const cl_event * waitList ,
const cl_event * outEvent ) const {
2020-11-24 13:29:07 +00:00
const bool operationValid = validateCapability ( capability ) ;
2020-12-14 18:07:09 +00:00
const bool waitListValid = validateCapabilitiesForEventWaitList ( numEventsInWaitList , waitList ) ;
2020-12-08 12:40:04 +00:00
const bool outEventValid = outEvent = = nullptr | |
2020-12-14 18:07:09 +00:00
validateCapability ( CL_QUEUE_CAPABILITY_CREATE_SINGLE_QUEUE_EVENTS_INTEL ) | |
validateCapability ( CL_QUEUE_CAPABILITY_CREATE_CROSS_QUEUE_EVENTS_INTEL ) ;
2020-11-24 13:29:07 +00:00
return operationValid & & waitListValid & & outEventValid ;
}
2021-02-11 13:26:05 +00:00
cl_uint CommandQueue : : getQueueFamilyIndex ( ) const {
if ( isQueueFamilySelected ( ) ) {
return queueFamilyIndex ;
} else {
const auto & hwInfo = device - > getHardwareInfo ( ) ;
const auto & hwHelper = HwHelper : : get ( hwInfo . platform . eRenderCoreFamily ) ;
2021-08-16 18:24:13 +00:00
const auto engineGroupType = hwHelper . getEngineGroupType ( gpgpuEngine - > getEngineType ( ) , gpgpuEngine - > getEngineUsage ( ) , hwInfo ) ;
2021-02-11 13:26:05 +00:00
const auto familyIndex = device - > getDevice ( ) . getIndexOfNonEmptyEngineGroup ( engineGroupType ) ;
return static_cast < cl_uint > ( familyIndex ) ;
}
}
2021-09-17 17:09:06 +00:00
void CommandQueue : : updateBcsTaskCount ( aub_stream : : EngineType bcsEngineType , uint32_t newBcsTaskCount ) {
2021-10-06 15:19:50 +00:00
CopyEngineState & state = bcsStates [ EngineHelpers : : getBcsIndex ( bcsEngineType ) ] ;
state . engineType = bcsEngineType ;
state . taskCount = newBcsTaskCount ;
2021-09-17 17:09:06 +00:00
}
2021-08-26 16:38:39 +00:00
uint32_t CommandQueue : : peekBcsTaskCount ( aub_stream : : EngineType bcsEngineType ) const {
2021-10-06 15:19:50 +00:00
const CopyEngineState & state = bcsStates [ EngineHelpers : : getBcsIndex ( bcsEngineType ) ] ;
DEBUG_BREAK_IF ( ! state . isValid ( ) ) ;
return state . taskCount ;
2021-08-26 16:38:39 +00:00
}
2018-04-26 10:01:01 +02:00
IndirectHeap & CommandQueue : : getIndirectHeap ( IndirectHeap : : Type heapType , size_t minRequiredSize ) {
2019-07-15 14:28:09 +02:00
return getGpgpuCommandStreamReceiver ( ) . getIndirectHeap ( heapType , minRequiredSize ) ;
2018-04-26 10:01:01 +02:00
}
2018-04-05 15:12:28 +02:00
2018-04-26 10:01:01 +02:00
void CommandQueue : : allocateHeapMemory ( IndirectHeap : : Type heapType , size_t minRequiredSize , IndirectHeap * & indirectHeap ) {
2019-07-15 14:28:09 +02:00
getGpgpuCommandStreamReceiver ( ) . allocateHeapMemory ( heapType , minRequiredSize , indirectHeap ) ;
2018-04-26 10:01:01 +02:00
}
2018-04-05 15:12:28 +02:00
2018-04-26 10:01:01 +02:00
void CommandQueue : : releaseIndirectHeap ( IndirectHeap : : Type heapType ) {
2019-07-15 14:28:09 +02:00
getGpgpuCommandStreamReceiver ( ) . releaseIndirectHeap ( heapType ) ;
2018-04-05 15:12:28 +02:00
}
2018-04-26 10:01:01 +02:00
2021-08-25 16:03:15 +00:00
void CommandQueue : : obtainNewTimestampPacketNodes ( size_t numberOfNodes , TimestampPacketContainer & previousNodes , bool clearAllDependencies , CommandStreamReceiver & csr ) {
TagAllocatorBase * allocator = csr . getTimestampPacketAllocator ( ) ;
2018-08-28 14:11:25 +02:00
2018-10-02 14:37:30 -07:00
previousNodes . swapNodes ( * timestampPacketContainer ) ;
2020-07-07 14:40:29 +02:00
2021-10-06 16:21:19 +00:00
if ( ( previousNodes . peekNodes ( ) . size ( ) > 0 ) & & ( previousNodes . peekNodes ( ) [ 0 ] - > getAllocator ( ) ! = allocator ) ) {
clearAllDependencies = false ;
}
2021-06-23 11:04:10 +00:00
if ( clearAllDependencies ) {
previousNodes . moveNodesToNewContainer ( * deferredTimestampPackets ) ;
}
2018-10-02 14:37:30 -07:00
DEBUG_BREAK_IF ( timestampPacketContainer - > peekNodes ( ) . size ( ) > 0 ) ;
for ( size_t i = 0 ; i < numberOfNodes ; i + + ) {
timestampPacketContainer - > add ( allocator - > getTag ( ) ) ;
2018-09-10 14:24:11 +02:00
}
2018-08-28 14:11:25 +02:00
}
2019-02-21 16:59:10 +01:00
size_t CommandQueue : : estimateTimestampPacketNodesCount ( const MultiDispatchInfo & dispatchInfo ) const {
size_t nodesCount = dispatchInfo . size ( ) ;
auto mainKernel = dispatchInfo . peekMainKernel ( ) ;
2020-03-06 14:56:23 +01:00
if ( obtainTimestampPacketForCacheFlush ( mainKernel - > requiresCacheFlushCommand ( * this ) ) ) {
2019-02-21 16:59:10 +01:00
nodesCount + + ;
}
return nodesCount ;
}
2019-05-30 14:36:12 +02:00
bool CommandQueue : : bufferCpuCopyAllowed ( Buffer * buffer , cl_command_type commandType , cl_bool blocking , size_t size , void * ptr ,
cl_uint numEventsInWaitList , const cl_event * eventWaitList ) {
2020-02-25 13:39:23 +01:00
auto debugVariableSet = false ;
2019-05-30 14:36:12 +02:00
// Requested by debug variable or allowed by Buffer
2020-02-25 13:39:23 +01:00
if ( CL_COMMAND_READ_BUFFER = = commandType & & DebugManager . flags . DoCpuCopyOnReadBuffer . get ( ) ! = - 1 ) {
if ( DebugManager . flags . DoCpuCopyOnReadBuffer . get ( ) = = 0 ) {
return false ;
}
debugVariableSet = true ;
}
if ( CL_COMMAND_WRITE_BUFFER = = commandType & & DebugManager . flags . DoCpuCopyOnWriteBuffer . get ( ) ! = - 1 ) {
if ( DebugManager . flags . DoCpuCopyOnWriteBuffer . get ( ) = = 0 ) {
return false ;
}
debugVariableSet = true ;
}
2019-05-30 14:36:12 +02:00
2020-02-21 08:25:43 +01:00
//if we are blocked by user events, we can't service the call on CPU
if ( Event : : checkUserEventDependencies ( numEventsInWaitList , eventWaitList ) ) {
return false ;
}
2020-02-21 11:11:36 +01:00
//check if buffer is compatible
2021-03-01 19:07:28 +00:00
if ( ! buffer - > isReadWriteOnCpuAllowed ( device - > getDevice ( ) ) ) {
2020-02-21 09:48:51 +01:00
return false ;
}
2020-03-12 07:23:27 +01:00
if ( buffer - > getMemoryManager ( ) & & buffer - > getMemoryManager ( ) - > isCpuCopyRequired ( ptr ) ) {
return true ;
}
2020-02-21 09:48:51 +01:00
if ( debugVariableSet ) {
2020-02-21 08:25:43 +01:00
return true ;
}
2020-02-21 09:48:51 +01:00
//non blocking transfers are not expected to be serviced by CPU
//we do not want to artifically stall the pipeline to allow CPU access
if ( blocking = = CL_FALSE ) {
return false ;
}
2020-02-21 11:11:36 +01:00
//check if it is beneficial to do transfer on CPU
2020-06-25 10:49:29 +02:00
if ( ! buffer - > isReadWriteOnCpuPreferred ( ptr , size , getDevice ( ) ) ) {
2020-02-21 08:25:43 +01:00
return false ;
}
//make sure that event wait list is empty
if ( numEventsInWaitList = = 0 ) {
return true ;
}
return false ;
2019-05-30 14:36:12 +02:00
}
2019-06-05 09:35:58 +02:00
2019-06-13 09:56:06 +02:00
bool CommandQueue : : queueDependenciesClearRequired ( ) const {
return isOOQEnabled ( ) | | DebugManager . flags . OmitTimestampPacketDependencies . get ( ) ;
}
2019-06-18 11:02:47 +02:00
2021-09-06 17:04:14 +00:00
bool CommandQueue : : blitEnqueueAllowed ( const CsrSelectionArgs & args ) const {
2021-09-24 16:32:20 +00:00
if ( getAnyBcs ( ) = = nullptr ) {
2021-09-06 17:04:14 +00:00
return false ;
}
2019-07-17 14:14:58 +02:00
2021-09-02 10:14:12 +00:00
bool blitEnqueueAllowed = getGpgpuCommandStreamReceiver ( ) . peekTimestampPacketWriteEnabled ( ) | | this - > isCopyOnly ;
2020-10-27 14:06:44 +01:00
if ( DebugManager . flags . EnableBlitterForEnqueueOperations . get ( ) ! = - 1 ) {
2021-09-02 10:14:12 +00:00
blitEnqueueAllowed = DebugManager . flags . EnableBlitterForEnqueueOperations . get ( ) ;
2020-10-20 15:27:49 +02:00
}
2021-09-06 17:04:14 +00:00
if ( ! blitEnqueueAllowed ) {
return false ;
}
2020-10-20 15:27:49 +02:00
2021-09-06 17:04:14 +00:00
switch ( args . cmdType ) {
2020-10-20 15:27:49 +02:00
case CL_COMMAND_READ_BUFFER :
case CL_COMMAND_WRITE_BUFFER :
case CL_COMMAND_COPY_BUFFER :
case CL_COMMAND_READ_BUFFER_RECT :
case CL_COMMAND_WRITE_BUFFER_RECT :
case CL_COMMAND_COPY_BUFFER_RECT :
case CL_COMMAND_SVM_MEMCPY :
2021-09-06 17:04:14 +00:00
case CL_COMMAND_SVM_MAP :
case CL_COMMAND_SVM_UNMAP :
return true ;
2020-10-20 15:27:49 +02:00
case CL_COMMAND_READ_IMAGE :
2021-09-06 17:04:14 +00:00
return blitEnqueueImageAllowed ( args . srcResource . imageOrigin , args . size , * args . srcResource . image ) ;
2020-10-20 15:27:49 +02:00
case CL_COMMAND_WRITE_IMAGE :
2021-09-06 17:04:14 +00:00
return blitEnqueueImageAllowed ( args . dstResource . imageOrigin , args . size , * args . dstResource . image ) ;
2021-07-26 00:49:41 +00:00
case CL_COMMAND_COPY_IMAGE :
2021-09-06 17:04:14 +00:00
return blitEnqueueImageAllowed ( args . srcResource . imageOrigin , args . size , * args . srcResource . image ) & &
blitEnqueueImageAllowed ( args . dstResource . imageOrigin , args . size , * args . dstResource . image ) ;
2020-10-20 15:27:49 +02:00
default :
return false ;
}
2019-06-18 11:02:47 +02:00
}
2019-07-22 20:55:09 +02:00
2021-09-06 17:04:14 +00:00
bool CommandQueue : : blitEnqueuePreferred ( const CsrSelectionArgs & args ) const {
if ( args . direction = = TransferDirection : : LocalToLocal ) {
2021-02-12 12:25:51 +00:00
if ( DebugManager . flags . PreferCopyEngineForCopyBufferToBuffer . get ( ) ! = - 1 ) {
return static_cast < bool > ( DebugManager . flags . PreferCopyEngineForCopyBufferToBuffer . get ( ) ) ;
}
const auto & clHwHelper = ClHwHelper : : get ( device - > getHardwareInfo ( ) . platform . eRenderCoreFamily ) ;
return clHwHelper . preferBlitterForLocalToLocalTransfers ( ) ;
}
2021-02-10 17:41:08 +00:00
return true ;
}
2021-09-06 17:04:14 +00:00
bool CommandQueue : : blitEnqueueImageAllowed ( const size_t * origin , const size_t * region , const Image & image ) const {
2021-04-22 13:11:33 +00:00
const auto & hwInfo = device - > getHardwareInfo ( ) ;
2021-10-13 13:30:45 +00:00
const auto & hwInfoConfig = HwInfoConfig : : get ( hwInfo . platform . eProductFamily ) ;
auto blitEnqueueImageAllowed = hwInfoConfig - > isBlitterForImagesSupported ( ) ;
2020-10-27 17:17:26 +01:00
2021-07-30 00:33:41 +00:00
if ( DebugManager . flags . EnableBlitterForEnqueueImageOperations . get ( ) ! = - 1 ) {
blitEnqueueImageAllowed = DebugManager . flags . EnableBlitterForEnqueueImageOperations . get ( ) ;
2020-11-06 11:38:49 +00:00
}
2021-07-26 00:08:39 +00:00
blitEnqueueImageAllowed & = ( origin [ 0 ] + region [ 0 ] < = BlitterConstants : : maxBlitWidth ) & & ( origin [ 1 ] + region [ 1 ] < = BlitterConstants : : maxBlitHeight ) ;
blitEnqueueImageAllowed & = ! isMipMapped ( image . getImageDesc ( ) ) ;
2021-06-10 12:53:07 +00:00
2021-07-26 00:08:39 +00:00
return blitEnqueueImageAllowed ;
2020-10-27 17:17:26 +01:00
}
2021-06-22 13:16:27 +00:00
bool CommandQueue : : isBlockedCommandStreamRequired ( uint32_t commandType , const EventsRequest & eventsRequest , bool blockedQueue , bool isMarkerWithProfiling ) const {
2019-07-22 20:55:09 +02:00
if ( ! blockedQueue ) {
return false ;
}
2021-06-22 13:16:27 +00:00
if ( isCacheFlushCommand ( commandType ) | | ! isCommandWithoutKernel ( commandType ) | | isMarkerWithProfiling ) {
2019-07-22 20:55:09 +02:00
return true ;
}
2021-06-14 15:33:53 +00:00
if ( CL_COMMAND_BARRIER = = commandType | | CL_COMMAND_MARKER = = commandType ) {
auto timestampPacketWriteEnabled = getGpgpuCommandStreamReceiver ( ) . peekTimestampPacketWriteEnabled ( ) ;
if ( timestampPacketWriteEnabled | | context - > getRootDeviceIndices ( ) . size ( ) > 1 ) {
2019-07-22 20:55:09 +02:00
2021-06-14 15:33:53 +00:00
for ( size_t i = 0 ; i < eventsRequest . numEventsInWaitList ; i + + ) {
auto waitlistEvent = castToObjectOrAbort < Event > ( eventsRequest . eventWaitList [ i ] ) ;
if ( timestampPacketWriteEnabled & & waitlistEvent - > getTimestampPacketNodes ( ) ) {
return true ;
}
if ( waitlistEvent - > getCommandQueue ( ) & & waitlistEvent - > getCommandQueue ( ) - > getDevice ( ) . getRootDeviceIndex ( ) ! = this - > getDevice ( ) . getRootDeviceIndex ( ) ) {
return true ;
}
2019-07-22 20:55:09 +02:00
}
}
}
return false ;
}
2019-08-02 15:56:28 +02:00
2020-05-27 17:12:32 +02:00
void CommandQueue : : storeProperties ( const cl_queue_properties * properties ) {
if ( properties ) {
for ( size_t i = 0 ; properties [ i ] ! = 0 ; i + = 2 ) {
propertiesVector . push_back ( properties [ i ] ) ;
propertiesVector . push_back ( properties [ i + 1 ] ) ;
}
propertiesVector . push_back ( 0 ) ;
}
}
2020-11-13 14:28:52 +00:00
void CommandQueue : : processProperties ( const cl_queue_properties * properties ) {
2020-11-16 11:43:03 +00:00
if ( properties ! = nullptr ) {
bool specificEngineSelected = false ;
cl_uint selectedQueueFamilyIndex = std : : numeric_limits < uint32_t > : : max ( ) ;
cl_uint selectedQueueIndex = std : : numeric_limits < uint32_t > : : max ( ) ;
for ( auto currentProperties = properties ; * currentProperties ! = 0 ; currentProperties + = 2 ) {
switch ( * currentProperties ) {
case CL_QUEUE_FAMILY_INTEL :
selectedQueueFamilyIndex = static_cast < cl_uint > ( * ( currentProperties + 1 ) ) ;
specificEngineSelected = true ;
break ;
case CL_QUEUE_INDEX_INTEL :
selectedQueueIndex = static_cast < cl_uint > ( * ( currentProperties + 1 ) ) ;
specificEngineSelected = true ;
break ;
}
}
if ( specificEngineSelected ) {
2020-11-25 18:10:05 +00:00
this - > queueFamilySelected = true ;
2021-08-31 16:49:46 +00:00
if ( ! getDevice ( ) . hasRootCsr ( ) ) {
2020-11-16 11:43:03 +00:00
auto queueFamily = getDevice ( ) . getNonEmptyEngineGroup ( selectedQueueFamilyIndex ) ;
2021-07-25 23:50:05 +00:00
const auto & engine = queueFamily - > at ( selectedQueueIndex ) ;
2020-11-16 11:43:03 +00:00
auto engineType = engine . getEngineType ( ) ;
2021-08-16 18:24:13 +00:00
auto engineUsage = engine . getEngineUsage ( ) ;
this - > overrideEngine ( engineType , engineUsage ) ;
2020-11-24 13:29:07 +00:00
this - > queueCapabilities = getClDevice ( ) . getDeviceInfo ( ) . queueFamilyProperties [ selectedQueueFamilyIndex ] . capabilities ;
2020-11-25 18:10:05 +00:00
this - > queueFamilyIndex = selectedQueueFamilyIndex ;
this - > queueIndexWithinFamily = selectedQueueIndex ;
2020-11-16 11:43:03 +00:00
}
}
}
2021-07-01 21:07:56 +00:00
requiresCacheFlushAfterWalker = device & & ( device - > getDeviceInfo ( ) . parentDevice ! = nullptr ) ;
2020-11-13 14:28:52 +00:00
}
2021-08-16 18:24:13 +00:00
void CommandQueue : : overrideEngine ( aub_stream : : EngineType engineType , EngineUsage engineUsage ) {
2021-01-19 17:32:22 +00:00
const HardwareInfo & hwInfo = getDevice ( ) . getHardwareInfo ( ) ;
const HwHelper & hwHelper = HwHelper : : get ( hwInfo . platform . eRenderCoreFamily ) ;
2021-08-16 18:24:13 +00:00
const EngineGroupType engineGroupType = hwHelper . getEngineGroupType ( engineType , engineUsage , hwInfo ) ;
2021-01-19 17:32:22 +00:00
const bool isEngineCopyOnly = hwHelper . isCopyOnlyEngineType ( engineGroupType ) ;
if ( isEngineCopyOnly ) {
2021-09-24 16:32:20 +00:00
std : : fill ( bcsEngines . begin ( ) , bcsEngines . end ( ) , nullptr ) ;
bcsEngines [ EngineHelpers : : getBcsIndex ( engineType ) ] = & device - > getEngine ( engineType , EngineUsage : : Regular ) ;
2020-11-16 11:43:03 +00:00
timestampPacketContainer = std : : make_unique < TimestampPacketContainer > ( ) ;
2021-06-14 16:57:09 +00:00
deferredTimestampPackets = std : : make_unique < TimestampPacketContainer > ( ) ;
2020-11-16 11:43:03 +00:00
isCopyOnly = true ;
} else {
2021-03-16 12:34:27 +00:00
gpgpuEngine = & device - > getEngine ( engineType , EngineUsage : : Regular ) ;
2020-11-16 11:43:03 +00:00
}
}
2019-08-02 15:56:28 +02:00
void CommandQueue : : aubCaptureHook ( bool & blocking , bool & clearAllDependencies , const MultiDispatchInfo & multiDispatchInfo ) {
if ( DebugManager . flags . AUBDumpSubCaptureMode . get ( ) ) {
2021-10-04 12:18:04 +00:00
auto status = getGpgpuCommandStreamReceiver ( ) . checkAndActivateAubSubCapture ( multiDispatchInfo . empty ( ) ? " " : multiDispatchInfo . peekMainKernel ( ) - > getDescriptor ( ) . kernelMetadata . kernelName ) ;
2019-08-02 15:56:28 +02:00
if ( ! status . isActive ) {
// make each enqueue blocking when subcapture is not active to split batch buffer
blocking = true ;
} else if ( ! status . wasActiveInPreviousEnqueue ) {
// omit timestamp packet dependencies dependencies upon subcapture activation
clearAllDependencies = true ;
}
}
if ( getGpgpuCommandStreamReceiver ( ) . getType ( ) > CommandStreamReceiverType : : CSR_HW ) {
for ( auto & dispatchInfo : multiDispatchInfo ) {
2021-07-19 16:12:54 +00:00
auto & kernelName = dispatchInfo . getKernel ( ) - > getKernelInfo ( ) . kernelDescriptor . kernelMetadata . kernelName ;
2019-08-02 15:56:28 +02:00
getGpgpuCommandStreamReceiver ( ) . addAubComment ( kernelName . c_str ( ) ) ;
}
}
}
2020-06-24 13:32:09 +02:00
2021-09-22 15:07:20 +00:00
void CommandQueue : : waitForAllEngines ( bool blockedQueue , PrintfHandler * printfHandler ) {
2020-08-26 15:44:12 +02:00
if ( blockedQueue ) {
while ( isQueueBlocked ( ) ) {
}
}
2021-09-22 15:07:20 +00:00
TimestampPacketContainer nodesToRelease ;
if ( deferredTimestampPackets ) {
deferredTimestampPackets - > swapNodes ( nodesToRelease ) ;
}
2021-10-06 15:19:50 +00:00
StackVec < CopyEngineState , bcsInfoMaskSize > activeBcsStates { } ;
for ( CopyEngineState & state : this - > bcsStates ) {
if ( state . isValid ( ) ) {
activeBcsStates . push_back ( state ) ;
}
}
waitUntilComplete ( taskCount , activeBcsStates , flushStamp - > peekStamp ( ) , false ) ;
2020-08-26 15:44:12 +02:00
if ( printfHandler ) {
printfHandler - > printEnqueueOutput ( ) ;
}
}
2019-03-26 11:59:46 +01:00
} // namespace NEO