/* * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * */ #pragma once #include "shared/source/execution_environment/root_device_environment.h" #include "shared/source/helpers/gfx_core_helper.h" #include "shared/source/helpers/pipe_control_args.h" #include "shared/source/helpers/simd_helper.h" #include "shared/source/utilities/hw_timestamps.h" #include "opencl/source/command_queue/gpgpu_walker_base.inl" namespace NEO { template template inline size_t GpgpuWalkerHelper::setGpgpuWalkerThreadData( WalkerType *walkerCmd, const KernelDescriptor &kernelDescriptor, const size_t globalOffsets[3], const size_t startWorkGroups[3], const size_t numWorkGroups[3], const size_t localWorkSizesIn[3], uint32_t simd, uint32_t workDim, bool localIdsGenerationByRuntime, bool inlineDataProgrammingRequired, uint32_t requiredWorkgroupOrder) { auto localWorkSize = static_cast(localWorkSizesIn[0] * localWorkSizesIn[1] * localWorkSizesIn[2]); auto threadsPerWorkGroup = getThreadsPerWG(simd, localWorkSize); walkerCmd->setThreadWidthCounterMaximum(threadsPerWorkGroup); walkerCmd->setThreadGroupIdXDimension(static_cast(numWorkGroups[0])); walkerCmd->setThreadGroupIdYDimension(static_cast(numWorkGroups[1])); walkerCmd->setThreadGroupIdZDimension(static_cast(numWorkGroups[2])); // compute executionMask - to tell which SIMD lines are active within thread auto remainderSimdLanes = localWorkSize & (simd - 1); uint64_t executionMask = maxNBitValue(remainderSimdLanes); if (!executionMask) executionMask = ~executionMask; using SIMD_SIZE = typename DefaultWalkerType::SIMD_SIZE; walkerCmd->setRightExecutionMask(static_cast(executionMask)); walkerCmd->setBottomExecutionMask(static_cast(0xffffffff)); walkerCmd->setSimdSize(getSimdConfig(simd)); walkerCmd->setThreadGroupIdStartingX(static_cast(startWorkGroups[0])); walkerCmd->setThreadGroupIdStartingY(static_cast(startWorkGroups[1])); walkerCmd->setThreadGroupIdStartingResumeZ(static_cast(startWorkGroups[2])); return localWorkSize; } template template void GpgpuWalkerHelper::setupTimestampPacket( LinearStream *cmdStream, WalkerType *walkerCmd, TagNodeBase *timestampPacketNode, const RootDeviceEnvironment &rootDeviceEnvironment) { uint64_t address = TimestampPacketHelper::getContextEndGpuAddress(*timestampPacketNode); PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *cmdStream, PostSyncMode::immediateData, address, 0, rootDeviceEnvironment, args); } template template size_t EnqueueOperation::getSizeRequiredCSKernel(bool reserveProfilingCmdsSpace, bool reservePerfCounters, CommandQueue &commandQueue, const Kernel *pKernel, const DispatchInfo &dispatchInfo) { size_t size = sizeof(typename GfxFamily::GPGPU_WALKER) + HardwareCommandsHelper::getSizeRequiredCS() + sizeof(PIPE_CONTROL) * (MemorySynchronizationCommands::isBarrierWaRequired(commandQueue.getDevice().getRootDeviceEnvironment()) ? 2 : 1); size += PreemptionHelper::getPreemptionWaCsSize(commandQueue.getDevice()); if (reserveProfilingCmdsSpace) { size += 2 * sizeof(PIPE_CONTROL) + 2 * sizeof(typename GfxFamily::MI_STORE_REGISTER_MEM); } size += PerformanceCounters::getGpuCommandsSize(commandQueue.getPerfCounters(), commandQueue.getGpgpuEngine().osContext->getEngineType(), reservePerfCounters); size += GpgpuWalkerHelper::getSizeForWADisableLSQCROPERFforOCL(pKernel); size += GpgpuWalkerHelper::getSizeForWaDisableRccRhwoOptimization(pKernel); return size; } template size_t EnqueueOperation::getSizeRequiredForTimestampPacketWrite() { return sizeof(PIPE_CONTROL); } template void GpgpuWalkerHelper::adjustMiStoreRegMemMode(MI_STORE_REG_MEM *storeCmd) { } template void GpgpuWalkerHelper::dispatchProfilingCommandsStart( TagNodeBase &hwTimeStamps, LinearStream *commandStream, const RootDeviceEnvironment &rootDeviceEnvironment) { using MI_STORE_REGISTER_MEM = typename GfxFamily::MI_STORE_REGISTER_MEM; // PIPE_CONTROL for global timestamp uint64_t timeStampAddress = hwTimeStamps.getGpuAddress() + offsetof(HwTimeStamps, globalStartTS); PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *commandStream, PostSyncMode::timestamp, timeStampAddress, 0llu, rootDeviceEnvironment, args); auto &gfxCoreHelper = rootDeviceEnvironment.getHelper(); if (!gfxCoreHelper.useOnlyGlobalTimestamps()) { // MI_STORE_REGISTER_MEM for context local timestamp timeStampAddress = hwTimeStamps.getGpuAddress() + offsetof(HwTimeStamps, contextStartTS); // low part auto pMICmdLow = commandStream->getSpaceForCmd(); MI_STORE_REGISTER_MEM cmd = GfxFamily::cmdInitStoreRegisterMem; adjustMiStoreRegMemMode(&cmd); cmd.setRegisterAddress(RegisterOffsets::gpThreadTimeRegAddressOffsetLow); cmd.setMemoryAddress(timeStampAddress); *pMICmdLow = cmd; } } template void GpgpuWalkerHelper::dispatchProfilingCommandsEnd( TagNodeBase &hwTimeStamps, LinearStream *commandStream, const RootDeviceEnvironment &rootDeviceEnvironment) { using MI_STORE_REGISTER_MEM = typename GfxFamily::MI_STORE_REGISTER_MEM; // PIPE_CONTROL for global timestamp uint64_t timeStampAddress = hwTimeStamps.getGpuAddress() + offsetof(HwTimeStamps, globalEndTS); PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *commandStream, PostSyncMode::timestamp, timeStampAddress, 0llu, rootDeviceEnvironment, args); auto &gfxCoreHelper = rootDeviceEnvironment.getHelper(); if (!gfxCoreHelper.useOnlyGlobalTimestamps()) { // MI_STORE_REGISTER_MEM for context local timestamp uint64_t timeStampAddress = hwTimeStamps.getGpuAddress() + offsetof(HwTimeStamps, contextEndTS); // low part auto pMICmdLow = commandStream->getSpaceForCmd(); MI_STORE_REGISTER_MEM cmd = GfxFamily::cmdInitStoreRegisterMem; adjustMiStoreRegMemMode(&cmd); cmd.setRegisterAddress(RegisterOffsets::gpThreadTimeRegAddressOffsetLow); cmd.setMemoryAddress(timeStampAddress); *pMICmdLow = cmd; } } template size_t EnqueueOperation::getSizeForCacheFlushAfterWalkerCommands(const Kernel &kernel, const CommandQueue &commandQueue) { return 0; } } // namespace NEO