/* * Copyright (C) 2019-2022 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include "shared/source/aub_mem_dump/aub_mem_dump.h" #include "shared/source/command_container/command_encoder.h" #include "shared/source/execution_environment/root_device_environment.h" #include "shared/source/gmm_helper/gmm.h" #include "shared/source/gmm_helper/gmm_helper.h" #include "shared/source/helpers/aligned_memory.h" #include "shared/source/helpers/basic_math.h" #include "shared/source/helpers/constants.h" #include "shared/source/helpers/hw_helper.h" #include "shared/source/helpers/hw_info.h" #include "shared/source/helpers/pipe_control_args.h" #include "shared/source/helpers/preamble.h" #include "shared/source/helpers/timestamp_packet.h" #include "shared/source/memory_manager/allocation_properties.h" #include "shared/source/memory_manager/graphics_allocation.h" #include "shared/source/os_interface/hw_info_config.h" #include "shared/source/os_interface/os_interface.h" #include "shared/source/utilities/tag_allocator.h" namespace NEO { template const AuxTranslationMode HwHelperHw::defaultAuxTranslationMode = AuxTranslationMode::Builtin; template bool HwHelperHw::isBufferSizeSuitableForCompression(const size_t size, const HardwareInfo &hwInfo) const { if (DebugManager.flags.OverrideBufferSuitableForRenderCompression.get() != -1) { return !!DebugManager.flags.OverrideBufferSuitableForRenderCompression.get(); } return size > KB; } template size_t HwHelperHw::getMax3dImageWidthOrHeight() const { return 16384; } template uint64_t HwHelperHw::getMaxMemAllocSize() const { //With statefull messages we have an allocation cap of 4GB //Reason to subtract 8KB is that driver may pad the buffer with addition pages for over fetching.. return (4ULL * MemoryConstants::gigaByte) - (8ULL * MemoryConstants::kiloByte); } template bool HwHelperHw::isStatelesToStatefullWithOffsetSupported() const { return true; } template bool HwHelperHw::isL3Configurable(const HardwareInfo &hwInfo) { return PreambleHelper::isL3Configurable(hwInfo); } template SipKernelType HwHelperHw::getSipKernelType(bool debuggingActive) const { if (!debuggingActive) { return SipKernelType::Csr; } return DebugManager.flags.UseBindlessDebugSip.get() ? SipKernelType::DbgBindless : SipKernelType::DbgCsr; } template size_t HwHelperHw::getMaxBarrierRegisterPerSlice() const { return 32; } template size_t HwHelperHw::getPaddingForISAAllocation() const { return 512; } template uint32_t HwHelperHw::getPitchAlignmentForImage(const HardwareInfo *hwInfo) const { return 4u; } template uint32_t HwHelperHw::getMaxNumSamplers() const { return 16; } template const AubMemDump::LrcaHelper &HwHelperHw::getCsTraits(aub_stream::EngineType engineType) const { return *AUBFamilyMapper::csTraits[engineType]; } template bool HwHelperHw::isFenceAllocationRequired(const HardwareInfo &hwInfo) const { return false; } template inline bool HwHelperHw::checkResourceCompatibility(GraphicsAllocation &graphicsAllocation) { return true; } template void HwHelperHw::setRenderSurfaceStateForBuffer(const RootDeviceEnvironment &rootDeviceEnvironment, void *surfaceStateBuffer, size_t bufferSize, uint64_t gpuVa, size_t offset, uint32_t pitch, GraphicsAllocation *gfxAlloc, bool isReadOnly, uint32_t surfaceType, bool forceNonAuxMode, bool useL1Cache) { using RENDER_SURFACE_STATE = typename Family::RENDER_SURFACE_STATE; using SURFACE_FORMAT = typename RENDER_SURFACE_STATE::SURFACE_FORMAT; using AUXILIARY_SURFACE_MODE = typename RENDER_SURFACE_STATE::AUXILIARY_SURFACE_MODE; auto gmmHelper = rootDeviceEnvironment.getGmmHelper(); auto surfaceState = reinterpret_cast(surfaceStateBuffer); RENDER_SURFACE_STATE state = Family::cmdInitRenderSurfaceState; auto surfaceSize = alignUp(bufferSize, 4); SURFACE_STATE_BUFFER_LENGTH Length = {0}; Length.Length = static_cast(surfaceSize - 1); state.setWidth(Length.SurfaceState.Width + 1); state.setHeight(Length.SurfaceState.Height + 1); state.setDepth(Length.SurfaceState.Depth + 1); if (pitch) { state.setSurfacePitch(pitch); } // The graphics allocation for Host Ptr surface will be created in makeResident call and GPU address is expected to be the same as CPU address auto bufferStateAddress = (gfxAlloc != nullptr) ? gfxAlloc->getGpuAddress() : gpuVa; bufferStateAddress += offset; auto bufferStateSize = (gfxAlloc != nullptr) ? gfxAlloc->getUnderlyingBufferSize() : bufferSize; state.setSurfaceType(static_cast(surfaceType)); state.setSurfaceFormat(SURFACE_FORMAT::SURFACE_FORMAT_RAW); state.setSurfaceVerticalAlignment(RENDER_SURFACE_STATE::SURFACE_VERTICAL_ALIGNMENT_VALIGN_4); state.setSurfaceHorizontalAlignment(RENDER_SURFACE_STATE::SURFACE_HORIZONTAL_ALIGNMENT_HALIGN_DEFAULT); state.setTileMode(RENDER_SURFACE_STATE::TILE_MODE_LINEAR); state.setVerticalLineStride(0); state.setVerticalLineStrideOffset(0); if ((isAligned(bufferStateAddress) && isAligned(bufferStateSize)) || isReadOnly) { state.setMemoryObjectControlState(gmmHelper->getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER)); } else { state.setMemoryObjectControlState(gmmHelper->getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED)); } if (DebugManager.flags.OverrideMocsIndexForScratchSpace.get() != -1) { auto mocsIndex = static_cast(DebugManager.flags.OverrideMocsIndexForScratchSpace.get()) << 1; state.setMemoryObjectControlState(mocsIndex); } state.setSurfaceBaseAddress(bufferStateAddress); bool isCompressionEnabled = gfxAlloc ? gfxAlloc->isCompressionEnabled() : false; if (isCompressionEnabled && !forceNonAuxMode) { // Its expected to not program pitch/qpitch/baseAddress for Aux surface in CCS scenarios EncodeSurfaceState::setCoherencyType(&state, RENDER_SURFACE_STATE::COHERENCY_TYPE_GPU_COHERENT); EncodeSurfaceState::setBufferAuxParamsForCCS(&state); } else { EncodeSurfaceState::setCoherencyType(&state, RENDER_SURFACE_STATE::COHERENCY_TYPE_IA_COHERENT); state.setAuxiliarySurfaceMode(AUXILIARY_SURFACE_MODE::AUXILIARY_SURFACE_MODE_AUX_NONE); } setL1CachePolicy(useL1Cache, &state, rootDeviceEnvironment.getHardwareInfo()); *surfaceState = state; } template void NEO::HwHelperHw::setL1CachePolicy(bool useL1Cache, typename GfxFamily::RENDER_SURFACE_STATE *surfaceState, const HardwareInfo *hwInfo) {} template bool HwHelperHw::getEnableLocalMemory(const HardwareInfo &hwInfo) const { if (DebugManager.flags.EnableLocalMemory.get() != -1) { return DebugManager.flags.EnableLocalMemory.get(); } else if (DebugManager.flags.AUBDumpForceAllToLocalMemory.get()) { return true; } return OSInterface::osEnableLocalMemory && isLocalMemoryEnabled(hwInfo); } template bool HwHelperHw::is1MbAlignmentSupported(const HardwareInfo &hwInfo, bool isCompressionEnabled) const { return false; } template AuxTranslationMode HwHelperHw::getAuxTranslationMode(const HardwareInfo &hwInfo) { auto mode = HwHelperHw::defaultAuxTranslationMode; if (DebugManager.flags.ForceAuxTranslationMode.get() != -1) { mode = static_cast(DebugManager.flags.ForceAuxTranslationMode.get()); } if (mode == AuxTranslationMode::Blit && !hwInfo.capabilityTable.blitterOperationsSupported) { DEBUG_BREAK_IF(true); mode = AuxTranslationMode::Builtin; } return mode; } template void MemorySynchronizationCommands::addPipeControlAndProgramPostSyncOperation( LinearStream &commandStream, POST_SYNC_OPERATION operation, uint64_t gpuAddress, uint64_t immediateData, const HardwareInfo &hwInfo, PipeControlArgs &args) { void *commandBuffer = commandStream.getSpace( MemorySynchronizationCommands::getSizeForPipeControlWithPostSyncOperation(hwInfo)); MemorySynchronizationCommands::setPipeControlAndProgramPostSyncOperation( commandBuffer, operation, gpuAddress, immediateData, hwInfo, args); } template void MemorySynchronizationCommands::setPipeControlAndProgramPostSyncOperation( void *&commandsBuffer, POST_SYNC_OPERATION operation, uint64_t gpuAddress, uint64_t immediateData, const HardwareInfo &hwInfo, PipeControlArgs &args) { MemorySynchronizationCommands::setPipeControlWA(commandsBuffer, gpuAddress, hwInfo); setPostSyncExtraProperties(args, hwInfo); MemorySynchronizationCommands::setPipeControlWithPostSync(commandsBuffer, operation, gpuAddress, immediateData, args); MemorySynchronizationCommands::setAdditionalSynchronization(commandsBuffer, gpuAddress, hwInfo); } template void MemorySynchronizationCommands::setPipeControlWithPostSync(void *&commandsBuffer, POST_SYNC_OPERATION operation, uint64_t gpuAddress, uint64_t immediateData, PipeControlArgs &args) { PIPE_CONTROL pipeControl = GfxFamily::cmdInitPipeControl; setPipeControl(pipeControl, args); pipeControl.setPostSyncOperation(operation); pipeControl.setAddress(static_cast(gpuAddress & 0x0000FFFFFFFFULL)); pipeControl.setAddressHigh(static_cast(gpuAddress >> 32)); if (operation == POST_SYNC_OPERATION::POST_SYNC_OPERATION_WRITE_IMMEDIATE_DATA) { pipeControl.setImmediateData(immediateData); } *reinterpret_cast(commandsBuffer) = pipeControl; commandsBuffer = ptrOffset(commandsBuffer, sizeof(PIPE_CONTROL)); } template void MemorySynchronizationCommands::addPipeControlWithPostSync( LinearStream &commandStream, POST_SYNC_OPERATION operation, uint64_t gpuAddress, uint64_t immediateData, PipeControlArgs &args) { void *pipeControl = commandStream.getSpace(sizeof(PIPE_CONTROL)); setPipeControlWithPostSync(pipeControl, operation, gpuAddress, immediateData, args); } template void MemorySynchronizationCommands::addPipeControlWA(LinearStream &commandStream, uint64_t gpuAddress, const HardwareInfo &hwInfo) { size_t requiredSize = MemorySynchronizationCommands::getSizeForPipeControlWA(hwInfo); void *commandBuffer = commandStream.getSpace(requiredSize); setPipeControlWA(commandBuffer, gpuAddress, hwInfo); } template void MemorySynchronizationCommands::setPipeControlWA(void *&commandsBuffer, uint64_t gpuAddress, const HardwareInfo &hwInfo) { if (MemorySynchronizationCommands::isPipeControlWArequired(hwInfo)) { PIPE_CONTROL cmd = GfxFamily::cmdInitPipeControl; MemorySynchronizationCommands::setPipeControlWAFlags(cmd); *reinterpret_cast(commandsBuffer) = cmd; commandsBuffer = ptrOffset(commandsBuffer, sizeof(PIPE_CONTROL)); MemorySynchronizationCommands::setAdditionalSynchronization(commandsBuffer, gpuAddress, hwInfo); } } template void MemorySynchronizationCommands::addAdditionalSynchronization(LinearStream &commandStream, uint64_t gpuAddress, const HardwareInfo &hwInfo) { size_t requiredSize = MemorySynchronizationCommands::getSizeForSingleAdditionalSynchronization(hwInfo); void *commandBuffer = commandStream.getSpace(requiredSize); setAdditionalSynchronization(commandBuffer, gpuAddress, hwInfo); } template void MemorySynchronizationCommands::setPipeControl(typename GfxFamily::PIPE_CONTROL &pipeControl, PipeControlArgs &args) { pipeControl.setCommandStreamerStallEnable(true); pipeControl.setConstantCacheInvalidationEnable(args.constantCacheInvalidationEnable); pipeControl.setInstructionCacheInvalidateEnable(args.instructionCacheInvalidateEnable); pipeControl.setPipeControlFlushEnable(args.pipeControlFlushEnable); pipeControl.setRenderTargetCacheFlushEnable(args.renderTargetCacheFlushEnable); pipeControl.setStateCacheInvalidationEnable(args.stateCacheInvalidationEnable); pipeControl.setTextureCacheInvalidationEnable(args.textureCacheInvalidationEnable); pipeControl.setVfCacheInvalidationEnable(args.vfCacheInvalidationEnable); pipeControl.setTlbInvalidate(args.tlbInvalidation); pipeControl.setNotifyEnable(args.notifyEnable); pipeControl.setDcFlushEnable(args.dcFlushEnable); if constexpr (GfxFamily::isUsingGenericMediaStateClear) { pipeControl.setGenericMediaStateClear(args.genericMediaStateClear); } setPipeControlExtraProperties(pipeControl, args); if (DebugManager.flags.FlushAllCaches.get()) { pipeControl.setDcFlushEnable(true); pipeControl.setRenderTargetCacheFlushEnable(true); pipeControl.setInstructionCacheInvalidateEnable(true); pipeControl.setTextureCacheInvalidationEnable(true); pipeControl.setPipeControlFlushEnable(true); pipeControl.setVfCacheInvalidationEnable(true); pipeControl.setConstantCacheInvalidationEnable(true); pipeControl.setStateCacheInvalidationEnable(true); pipeControl.setTlbInvalidate(true); } if (DebugManager.flags.DoNotFlushCaches.get()) { pipeControl.setDcFlushEnable(false); pipeControl.setRenderTargetCacheFlushEnable(false); pipeControl.setInstructionCacheInvalidateEnable(false); pipeControl.setTextureCacheInvalidationEnable(false); pipeControl.setPipeControlFlushEnable(false); pipeControl.setVfCacheInvalidationEnable(false); pipeControl.setConstantCacheInvalidationEnable(false); pipeControl.setStateCacheInvalidationEnable(false); } } template bool MemorySynchronizationCommands::getDcFlushEnable(bool isFlushPreferred, const HardwareInfo &hwInfo) { if (isFlushPreferred) { const auto &hwInfoConfig = *NEO::HwInfoConfig::get(hwInfo.platform.eProductFamily); return hwInfoConfig.isDcFlushAllowed(); } return false; } template void MemorySynchronizationCommands::addPipeControl(LinearStream &commandStream, PipeControlArgs &args) { using PIPE_CONTROL = typename GfxFamily::PIPE_CONTROL; PIPE_CONTROL cmd = GfxFamily::cmdInitPipeControl; MemorySynchronizationCommands::setPipeControl(cmd, args); auto pipeControl = commandStream.getSpaceForCmd(); *pipeControl = cmd; } template void MemorySynchronizationCommands::addPipeControlWithCSStallOnly(LinearStream &commandStream) { using PIPE_CONTROL = typename GfxFamily::PIPE_CONTROL; PIPE_CONTROL cmd = GfxFamily::cmdInitPipeControl; cmd.setCommandStreamerStallEnable(true); auto pipeControl = commandStream.getSpaceForCmd(); *pipeControl = cmd; } template size_t MemorySynchronizationCommands::getSizeForSinglePipeControl() { return sizeof(typename GfxFamily::PIPE_CONTROL); } template size_t MemorySynchronizationCommands::getSizeForPipeControlWithPostSyncOperation(const HardwareInfo &hwInfo) { size_t size = getSizeForSinglePipeControl() + getSizeForPipeControlWA(hwInfo) + getSizeForSingleAdditionalSynchronization(hwInfo); return size; } template size_t MemorySynchronizationCommands::getSizeForPipeControlWA(const HardwareInfo &hwInfo) { size_t size = 0; if (MemorySynchronizationCommands::isPipeControlWArequired(hwInfo)) { size = getSizeForSinglePipeControl() + getSizeForSingleAdditionalSynchronization(hwInfo); } return size; } template void MemorySynchronizationCommands::setAdditionalSynchronization(void *&commandsBuffer, uint64_t gpuAddress, const HardwareInfo &hwInfo) { } template inline size_t MemorySynchronizationCommands::getSizeForSingleAdditionalSynchronization(const HardwareInfo &hwInfo) { return 0u; } template inline size_t MemorySynchronizationCommands::getSizeForAdditonalSynchronization(const HardwareInfo &hwInfo) { return 0u; } template uint32_t HwHelperHw::getMetricsLibraryGenId() const { return static_cast(MetricsLibraryApi::ClientGen::Gen9); } template bool HwHelperHw::tilingAllowed(bool isSharedContext, bool isImage1d, bool forceLinearStorage) { if (DebugManager.flags.ForceLinearImages.get() || forceLinearStorage || isSharedContext) { return false; } return !isImage1d; } template uint32_t HwHelperHw::alignSlmSize(uint32_t slmSize) { if (slmSize == 0u) { return 0u; } slmSize = std::max(slmSize, 1024u); slmSize = Math::nextPowerOfTwo(slmSize); UNRECOVERABLE_IF(slmSize > 64u * KB); return slmSize; } template uint32_t HwHelperHw::computeSlmValues(const HardwareInfo &hwInfo, uint32_t slmSize) { auto value = std::max(slmSize, 1024u); value = Math::nextPowerOfTwo(value); value = Math::getMinLsbSet(value); value = value - 9; DEBUG_BREAK_IF(value > 7); return value * !!slmSize; } template uint32_t HwHelperHw::getBarriersCountFromHasBarriers(uint32_t hasBarriers) { return hasBarriers; } template inline bool HwHelperHw::isOffsetToSkipSetFFIDGPWARequired(const HardwareInfo &hwInfo) const { return false; } template bool HwHelperHw::isWorkaroundRequired(uint32_t lowestSteppingWithBug, uint32_t steppingWithFix, const HardwareInfo &hwInfo) const { const auto hwInfoConfig = HwInfoConfig::get(hwInfo.platform.eProductFamily); auto lowestHwRevIdWithBug = hwInfoConfig->getHwRevIdFromStepping(lowestSteppingWithBug, hwInfo); auto hwRevIdWithFix = hwInfoConfig->getHwRevIdFromStepping(steppingWithFix, hwInfo); if ((lowestHwRevIdWithBug == CommonConstants::invalidStepping) || (hwRevIdWithFix == CommonConstants::invalidStepping)) { return false; } return (lowestHwRevIdWithBug <= hwInfo.platform.usRevId && hwInfo.platform.usRevId < hwRevIdWithFix); } template bool HwHelperHw::isForceDefaultRCSEngineWARequired(const HardwareInfo &hwInfo) { return false; } template bool HwHelperHw::isWaDisableRccRhwoOptimizationRequired() const { return false; } template inline uint32_t HwHelperHw::getMinimalSIMDSize() { return 8u; } template inline bool HwHelperHw::isBlitCopyRequiredForLocalMemory(const HardwareInfo &hwInfo, const GraphicsAllocation &allocation) const { return allocation.isAllocatedInLocalMemoryPool() && (HwInfoConfig::get(hwInfo.platform.eProductFamily)->getLocalMemoryAccessMode(hwInfo) == LocalMemoryAccessMode::CpuAccessDisallowed || !allocation.isAllocationLockable()); } template std::unique_ptr HwHelperHw::createTimestampPacketAllocator(const std::vector &rootDeviceIndices, MemoryManager *memoryManager, size_t initialTagCount, CommandStreamReceiverType csrType, DeviceBitfield deviceBitfield) const { bool doNotReleaseNodes = (csrType > CommandStreamReceiverType::CSR_HW) || DebugManager.flags.DisableTimestampPacketOptimizations.get(); auto tagAlignment = getTimestampPacketAllocatorAlignment(); if (DebugManager.flags.OverrideTimestampPacketSize.get() != -1) { if (DebugManager.flags.OverrideTimestampPacketSize.get() == 4) { using TimestampPackets32T = TimestampPackets; return std::make_unique>(rootDeviceIndices, memoryManager, initialTagCount, tagAlignment, sizeof(TimestampPackets32T), doNotReleaseNodes, deviceBitfield); } else if (DebugManager.flags.OverrideTimestampPacketSize.get() == 8) { using TimestampPackets64T = TimestampPackets; return std::make_unique>(rootDeviceIndices, memoryManager, initialTagCount, tagAlignment, sizeof(TimestampPackets64T), doNotReleaseNodes, deviceBitfield); } else { UNRECOVERABLE_IF(true); } } using TimestampPacketType = typename GfxFamily::TimestampPacketType; using TimestampPacketsT = TimestampPackets; return std::make_unique>(rootDeviceIndices, memoryManager, initialTagCount, tagAlignment, sizeof(TimestampPacketsT), doNotReleaseNodes, deviceBitfield); } template size_t HwHelperHw::getTimestampPacketAllocatorAlignment() const { return MemoryConstants::cacheLineSize * 4; } template size_t HwHelperHw::getSingleTimestampPacketSize() const { return HwHelperHw::getSingleTimestampPacketSizeHw(); } template size_t HwHelperHw::getSingleTimestampPacketSizeHw() { if (DebugManager.flags.OverrideTimestampPacketSize.get() != -1) { if (DebugManager.flags.OverrideTimestampPacketSize.get() == 4) { return TimestampPackets::getSinglePacketSize(); } else if (DebugManager.flags.OverrideTimestampPacketSize.get() == 8) { return TimestampPackets::getSinglePacketSize(); } else { UNRECOVERABLE_IF(true); } } return TimestampPackets::getSinglePacketSize(); } template size_t MemorySynchronizationCommands::getSizeForFullCacheFlush() { return sizeof(typename GfxFamily::PIPE_CONTROL); } template void MemorySynchronizationCommands::addFullCacheFlush(LinearStream &commandStream, const HardwareInfo &hwInfo) { using PIPE_CONTROL = typename GfxFamily::PIPE_CONTROL; PIPE_CONTROL *pipeControl = commandStream.getSpaceForCmd(); PIPE_CONTROL cmd = GfxFamily::cmdInitPipeControl; PipeControlArgs args; args.dcFlushEnable = MemorySynchronizationCommands::getDcFlushEnable(true, hwInfo); args.renderTargetCacheFlushEnable = true; args.instructionCacheInvalidateEnable = true; args.textureCacheInvalidationEnable = true; args.pipeControlFlushEnable = true; args.constantCacheInvalidationEnable = true; args.stateCacheInvalidationEnable = true; args.tlbInvalidation = true; MemorySynchronizationCommands::setCacheFlushExtraProperties(args); MemorySynchronizationCommands::setPipeControl(cmd, args); *pipeControl = cmd; } template const StackVec HwHelperHw::getDeviceSubGroupSizes() const { return {8, 16, 32}; } template const StackVec HwHelperHw::getThreadsPerEUConfigs() const { return {}; } template void HwHelperHw::setExtraAllocationData(AllocationData &allocationData, const AllocationProperties &properties, const HardwareInfo &hwInfo) const {} template bool HwHelperHw::isBankOverrideRequired(const HardwareInfo &hwInfo) const { return false; } template int32_t HwHelperHw::getDefaultThreadArbitrationPolicy() const { return 0; } template bool HwHelperHw::useOnlyGlobalTimestamps() const { return false; } template bool HwHelperHw::useSystemMemoryPlacementForISA(const HardwareInfo &hwInfo) const { return !getEnableLocalMemory(hwInfo); } template bool HwHelperHw::isCpuImageTransferPreferred(const HardwareInfo &hwInfo) const { return false; } template bool MemorySynchronizationCommands::isPipeControlPriorToPipelineSelectWArequired(const HardwareInfo &hwInfo) { return false; } template bool HwHelperHw::isRcsAvailable(const HardwareInfo &hwInfo) const { return true; } template bool HwHelperHw::isCooperativeDispatchSupported(const EngineGroupType engineGroupType, const HardwareInfo &hwInfo) const { return true; } template uint32_t HwHelperHw::adjustMaxWorkGroupCount(uint32_t maxWorkGroupCount, const EngineGroupType engineGroupType, const HardwareInfo &hwInfo, bool isEngineInstanced) const { return maxWorkGroupCount; } template bool HwHelperHw::isKmdMigrationSupported(const HardwareInfo &hwInfo) const { return false; } template bool HwHelperHw::isCooperativeEngineSupported(const HardwareInfo &hwInfo) const { return false; } template bool HwHelperHw::isCopyOnlyEngineType(EngineGroupType type) const { return NEO::EngineGroupType::Copy == type; } template bool HwHelperHw::isSipWANeeded(const HardwareInfo &hwInfo) const { return false; } template bool HwHelperHw::isAdditionalFeatureFlagRequired(const FeatureTable *featureTable) const { return false; } template uint32_t HwHelperHw::getDefaultRevisionId(const HardwareInfo &hwInfo) const { return 0u; } template uint32_t HwHelperHw::getNumCacheRegions() const { return 0; } template bool HwHelperHw::isSubDeviceEngineSupported(const HardwareInfo &hwInfo, const DeviceBitfield &deviceBitfield, aub_stream::EngineType engineType) const { return true; } template size_t HwHelperHw::getPreemptionAllocationAlignment() const { return 256 * MemoryConstants::kiloByte; } template void HwHelperHw::applyAdditionalCompressionSettings(Gmm &gmm, bool isNotCompressed) const {} template void HwHelperHw::applyRenderCompressionFlag(Gmm &gmm, uint32_t isCompressed) const { gmm.resourceParams.Flags.Info.RenderCompressed = isCompressed; } template bool HwHelperHw::isEngineTypeRemappingToHwSpecificRequired() const { return false; } template bool HwHelperHw::isSipKernelAsHexadecimalArrayPreferred() const { return false; } template void HwHelperHw::setSipKernelData(uint32_t *&sipKernelBinary, size_t &kernelBinarySize) const { } template void HwHelperHw::adjustPreemptionSurfaceSize(size_t &csrSize) const { } template void HwHelperHw::encodeBufferSurfaceState(EncodeSurfaceStateArgs &args) { EncodeSurfaceState::encodeBuffer(args); } template bool HwHelperHw::disableL3CacheForDebug(const HardwareInfo &) const { return false; } template bool HwHelperHw::isRevisionSpecificBinaryBuiltinRequired() const { return false; } template bool HwHelperHw::forceNonGpuCoherencyWA(bool requiresCoherency) const { return requiresCoherency; } template size_t HwHelperHw::getBatchBufferEndSize() const { return sizeof(typename GfxFamily::MI_BATCH_BUFFER_END); } template const void *HwHelperHw::getBatchBufferEndReference() const { return reinterpret_cast(&GfxFamily::cmdInitBatchBufferEnd); } template bool HwHelperHw::isPlatformFlushTaskEnabled(const HardwareInfo &hwInfo) const { const auto &hwInfoConfig = *NEO::HwInfoConfig::get(hwInfo.platform.eProductFamily); return hwInfoConfig.isFlushTaskAllowed(); } } // namespace NEO