/* * Copyright (C) 2018-2019 Intel Corporation * * SPDX-License-Identifier: MIT * */ #pragma once #include "core/helpers/vec.h" #include "runtime/built_ins/built_ins.h" #include "runtime/command_queue/command_queue.h" #include "runtime/command_stream/linear_stream.h" #include "runtime/command_stream/preemption.h" #include "runtime/context/context.h" #include "runtime/device_queue/device_queue_hw.h" #include "runtime/event/hw_timestamps.h" #include "runtime/event/perf_counter.h" #include "runtime/helpers/dispatch_info.h" #include "runtime/helpers/hardware_commands_helper.h" #include "runtime/helpers/task_information.h" #include "runtime/helpers/timestamp_packet.h" #include "runtime/indirect_heap/indirect_heap.h" #include "runtime/kernel/kernel.h" #include "runtime/program/kernel_info.h" #include "runtime/utilities/tag_allocator.h" namespace NEO { template using WALKER_TYPE = typename GfxFamily::WALKER_TYPE; template using MI_STORE_REG_MEM = typename GfxFamily::MI_STORE_REGISTER_MEM_CMD; constexpr int32_t NUM_ALU_INST_FOR_READ_MODIFY_WRITE = 4; constexpr int32_t L3SQC_BIT_LQSC_RO_PERF_DIS = 0x08000000; constexpr int32_t L3SQC_REG4 = 0xB118; constexpr int32_t GPGPU_WALKER_COOKIE_VALUE_BEFORE_WALKER = 0xFFFFFFFF; constexpr int32_t GPGPU_WALKER_COOKIE_VALUE_AFTER_WALKER = 0x00000000; constexpr int32_t CS_GPR_R0 = 0x2600; constexpr int32_t CS_GPR_R1 = 0x2608; constexpr int32_t ALU_OPCODE_LOAD = 0x080; constexpr int32_t ALU_OPCODE_STORE = 0x180; constexpr int32_t ALU_OPCODE_OR = 0x103; constexpr int32_t ALU_OPCODE_AND = 0x102; constexpr int32_t ALU_REGISTER_R_0 = 0x0; constexpr int32_t ALU_REGISTER_R_1 = 0x1; constexpr int32_t ALU_REGISTER_R_SRCA = 0x20; constexpr int32_t ALU_REGISTER_R_SRCB = 0x21; constexpr int32_t ALU_REGISTER_R_ACCU = 0x31; constexpr uint32_t GP_THREAD_TIME_REG_ADDRESS_OFFSET_LOW = 0x23A8; void computeWorkgroupSize1D( uint32_t maxWorkGroupSize, size_t workGroupSize[3], const size_t workItems[3], size_t simdSize); void computeWorkgroupSizeND( WorkSizeInfo wsInfo, size_t workGroupSize[3], const size_t workItems[3], const uint32_t workDim); void computeWorkgroupSize2D( uint32_t maxWorkGroupSize, size_t workGroupSize[3], const size_t workItems[3], size_t simdSize); void computeWorkgroupSizeSquared( uint32_t maxWorkGroupSize, size_t workGroupSize[3], const size_t workItems[3], size_t simdSize, const uint32_t workDim); Vec3 computeWorkgroupSize( const DispatchInfo &dispatchInfo); Vec3 generateWorkgroupSize( const DispatchInfo &dispatchInfo); Vec3 computeWorkgroupsNumber( const Vec3 gws, const Vec3 lws); Vec3 generateWorkgroupsNumber( const Vec3 gws, const Vec3 lws); Vec3 generateWorkgroupsNumber( const DispatchInfo &dispatchInfo); inline uint32_t calculateDispatchDim(Vec3 dispatchSize, Vec3 dispatchOffset) { return std::max(1U, std::max(dispatchSize.getSimplifiedDim(), dispatchOffset.getSimplifiedDim())); } Vec3 canonizeWorkgroup( Vec3 workgroup); void provideLocalWorkGroupSizeHints(Context *context, uint32_t maxWorkGroupSize, DispatchInfo dispatchInfo); inline cl_uint computeDimensions(const size_t workItems[3]) { return (workItems[2] > 1) ? 3 : (workItems[1] > 1) ? 2 : 1; } template class GpgpuWalkerHelper { public: static void applyWADisableLSQCROPERFforOCL(LinearStream *pCommandStream, const Kernel &kernel, bool disablePerfMode); static size_t getSizeForWADisableLSQCROPERFforOCL(const Kernel *pKernel); static size_t setGpgpuWalkerThreadData( WALKER_TYPE *walkerCmd, const size_t globalOffsets[3], const size_t startWorkGroups[3], const size_t numWorkGroups[3], const size_t localWorkSizesIn[3], uint32_t simd, uint32_t workDim, bool localIdsGenerationByRuntime, bool inlineDataProgrammingRequired, const iOpenCL::SPatchThreadPayload &threadPayload, uint32_t requiredWorkgroupOrder); static void dispatchProfilingCommandsStart( TagNode &hwTimeStamps, LinearStream *commandStream); static void dispatchProfilingCommandsEnd( TagNode &hwTimeStamps, LinearStream *commandStream); static void dispatchPerfCountersCommandsStart( CommandQueue &commandQueue, TagNode &hwPerfCounter, LinearStream *commandStream); static void dispatchPerfCountersCommandsEnd( CommandQueue &commandQueue, TagNode &hwPerfCounter, LinearStream *commandStream); static void setupTimestampPacket( LinearStream *cmdStream, WALKER_TYPE *walkerCmd, TagNode *timestampPacketNode, TimestampPacketStorage::WriteOperationType writeOperationType); static void dispatchScheduler( LinearStream &commandStream, DeviceQueueHw &devQueueHw, PreemptionMode preemptionMode, SchedulerKernel &scheduler, IndirectHeap *ssh, IndirectHeap *dsh); static void adjustMiStoreRegMemMode(MI_STORE_REG_MEM *storeCmd); private: using PIPE_CONTROL = typename GfxFamily::PIPE_CONTROL; static void addAluReadModifyWriteRegister( LinearStream *pCommandStream, uint32_t aluRegister, uint32_t operation, uint32_t mask); }; template struct EnqueueOperation { using PIPE_CONTROL = typename GfxFamily::PIPE_CONTROL; static size_t getTotalSizeRequiredCS(uint32_t eventType, const CsrDependencies &csrDeps, bool reserveProfilingCmdsSpace, bool reservePerfCounters, bool blitEnqueue, CommandQueue &commandQueue, const MultiDispatchInfo &multiDispatchInfo); static size_t getSizeRequiredCS(uint32_t cmdType, bool reserveProfilingCmdsSpace, bool reservePerfCounters, CommandQueue &commandQueue, const Kernel *pKernel); static size_t getSizeRequiredForTimestampPacketWrite(); private: static size_t getSizeRequiredCSKernel(bool reserveProfilingCmdsSpace, bool reservePerfCounters, CommandQueue &commandQueue, const Kernel *pKernel); static size_t getSizeRequiredCSNonKernel(bool reserveProfilingCmdsSpace, bool reservePerfCounters, CommandQueue &commandQueue); }; template LinearStream &getCommandStream(CommandQueue &commandQueue, bool reserveProfilingCmdsSpace, bool reservePerfCounterCmdsSpace, const Kernel *pKernel) { auto expectedSizeCS = EnqueueOperation::getSizeRequiredCS(eventType, reserveProfilingCmdsSpace, reservePerfCounterCmdsSpace, commandQueue, pKernel); return commandQueue.getCS(expectedSizeCS); } template LinearStream &getCommandStream(CommandQueue &commandQueue, const CsrDependencies &csrDeps, bool reserveProfilingCmdsSpace, bool reservePerfCounterCmdsSpace, bool blitEnqueue, const MultiDispatchInfo &multiDispatchInfo, Surface **surfaces, size_t numSurfaces) { size_t expectedSizeCS = EnqueueOperation::getTotalSizeRequiredCS(eventType, csrDeps, reserveProfilingCmdsSpace, reservePerfCounterCmdsSpace, blitEnqueue, commandQueue, multiDispatchInfo); return commandQueue.getCS(expectedSizeCS); } template IndirectHeap &getIndirectHeap(CommandQueue &commandQueue, const MultiDispatchInfo &multiDispatchInfo) { size_t expectedSize = 0; IndirectHeap *ih = nullptr; // clang-format off switch (heapType) { case IndirectHeap::DYNAMIC_STATE: expectedSize = HardwareCommandsHelper::getTotalSizeRequiredDSH(multiDispatchInfo); break; case IndirectHeap::INDIRECT_OBJECT: expectedSize = HardwareCommandsHelper::getTotalSizeRequiredIOH(multiDispatchInfo); break; case IndirectHeap::SURFACE_STATE: expectedSize = HardwareCommandsHelper::getTotalSizeRequiredSSH(multiDispatchInfo); break; } // clang-format on if (Kernel *parentKernel = multiDispatchInfo.peekParentKernel()) { if (heapType == IndirectHeap::SURFACE_STATE) { expectedSize += HardwareCommandsHelper::template getSizeRequiredForExecutionModel(*parentKernel); } else //if (heapType == IndirectHeap::DYNAMIC_STATE || heapType == IndirectHeap::INDIRECT_OBJECT) { DeviceQueueHw *pDevQueue = castToObject>(commandQueue.getContext().getDefaultDeviceQueue()); DEBUG_BREAK_IF(pDevQueue == nullptr); ih = pDevQueue->getIndirectHeap(IndirectHeap::DYNAMIC_STATE); } } if (ih == nullptr) ih = &commandQueue.getIndirectHeap(heapType, expectedSize); return *ih; } } // namespace NEO