/* * Copyright (C) 2019-2022 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include "shared/source/aub_mem_dump/aub_mem_dump.h" #include "shared/source/command_container/command_encoder.h" #include "shared/source/execution_environment/root_device_environment.h" #include "shared/source/gmm_helper/gmm.h" #include "shared/source/gmm_helper/gmm_helper.h" #include "shared/source/helpers/aligned_memory.h" #include "shared/source/helpers/basic_math.h" #include "shared/source/helpers/constants.h" #include "shared/source/helpers/hw_helper.h" #include "shared/source/helpers/hw_info.h" #include "shared/source/helpers/pipe_control_args.h" #include "shared/source/helpers/preamble.h" #include "shared/source/helpers/timestamp_packet.h" #include "shared/source/memory_manager/allocation_properties.h" #include "shared/source/memory_manager/graphics_allocation.h" #include "shared/source/os_interface/hw_info_config.h" #include "shared/source/os_interface/os_interface.h" #include "shared/source/utilities/tag_allocator.h" namespace NEO { template const AuxTranslationMode GfxCoreHelperHw::defaultAuxTranslationMode = AuxTranslationMode::Builtin; template bool GfxCoreHelperHw::isBufferSizeSuitableForCompression(const size_t size, const HardwareInfo &hwInfo) const { if (DebugManager.flags.OverrideBufferSuitableForRenderCompression.get() != -1) { return !!DebugManager.flags.OverrideBufferSuitableForRenderCompression.get(); } return size > KB; } template size_t GfxCoreHelperHw::getMax3dImageWidthOrHeight() const { return 16384; } template uint64_t GfxCoreHelperHw::getMaxMemAllocSize() const { // With stateful messages we have an allocation cap of 4GB // Reason to subtract 8KB is that driver may pad the buffer with addition pages for over fetching return (4ULL * MemoryConstants::gigaByte) - (8ULL * MemoryConstants::kiloByte); } template bool GfxCoreHelperHw::isStatelessToStatefulWithOffsetSupported() const { return true; } template SipKernelType GfxCoreHelperHw::getSipKernelType(bool debuggingActive) const { if (!debuggingActive) { return SipKernelType::Csr; } return DebugManager.flags.UseBindlessDebugSip.get() ? SipKernelType::DbgBindless : SipKernelType::DbgCsr; } template size_t GfxCoreHelperHw::getMaxBarrierRegisterPerSlice() const { return 32; } template size_t GfxCoreHelperHw::getPaddingForISAAllocation() const { if (DebugManager.flags.ForceExtendedKernelIsaSize.get() >= 1) { return 512 + (MemoryConstants::pageSize * DebugManager.flags.ForceExtendedKernelIsaSize.get()); } return 512; } template uint32_t GfxCoreHelperHw::getPitchAlignmentForImage(const RootDeviceEnvironment &rootDeviceEnvironment) const { return 4u; } template uint32_t GfxCoreHelperHw::getMaxNumSamplers() const { return 16; } template const AubMemDump::LrcaHelper &GfxCoreHelperHw::getCsTraits(aub_stream::EngineType engineType) const { return *AUBFamilyMapper::csTraits[engineType]; } template bool GfxCoreHelperHw::isFenceAllocationRequired(const HardwareInfo &hwInfo) const { return false; } template inline bool GfxCoreHelperHw::checkResourceCompatibility(GraphicsAllocation &graphicsAllocation) const { return true; } template void GfxCoreHelperHw::setRenderSurfaceStateForScratchResource(const RootDeviceEnvironment &rootDeviceEnvironment, void *surfaceStateBuffer, size_t bufferSize, uint64_t gpuVa, size_t offset, uint32_t pitch, GraphicsAllocation *gfxAlloc, bool isReadOnly, uint32_t surfaceType, bool forceNonAuxMode, bool useL1Cache) const { using RENDER_SURFACE_STATE = typename Family::RENDER_SURFACE_STATE; using SURFACE_FORMAT = typename RENDER_SURFACE_STATE::SURFACE_FORMAT; using AUXILIARY_SURFACE_MODE = typename RENDER_SURFACE_STATE::AUXILIARY_SURFACE_MODE; auto gmmHelper = rootDeviceEnvironment.getGmmHelper(); auto surfaceState = reinterpret_cast(surfaceStateBuffer); RENDER_SURFACE_STATE state = Family::cmdInitRenderSurfaceState; auto surfaceSize = alignUp(bufferSize, 4); SURFACE_STATE_BUFFER_LENGTH length = {0}; length.Length = static_cast(surfaceSize - 1); state.setWidth(length.SurfaceState.Width + 1); state.setHeight(length.SurfaceState.Height + 1); state.setDepth(length.SurfaceState.Depth + 1); if (pitch) { state.setSurfacePitch(pitch); } // The graphics allocation for Host Ptr surface will be created in makeResident call and GPU address is expected to be the same as CPU address auto bufferStateAddress = (gfxAlloc != nullptr) ? gfxAlloc->getGpuAddress() : gpuVa; bufferStateAddress += offset; auto bufferStateSize = (gfxAlloc != nullptr) ? gfxAlloc->getUnderlyingBufferSize() : bufferSize; state.setSurfaceType(static_cast(surfaceType)); state.setSurfaceFormat(SURFACE_FORMAT::SURFACE_FORMAT_RAW); state.setSurfaceVerticalAlignment(RENDER_SURFACE_STATE::SURFACE_VERTICAL_ALIGNMENT_VALIGN_4); state.setSurfaceHorizontalAlignment(RENDER_SURFACE_STATE::SURFACE_HORIZONTAL_ALIGNMENT_HALIGN_DEFAULT); state.setTileMode(RENDER_SURFACE_STATE::TILE_MODE_LINEAR); state.setVerticalLineStride(0); state.setVerticalLineStrideOffset(0); if ((isAligned(bufferStateAddress) && isAligned(bufferStateSize)) || isReadOnly) { state.setMemoryObjectControlState(gmmHelper->getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER)); } else { state.setMemoryObjectControlState(gmmHelper->getMOCS(GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED)); } if (DebugManager.flags.OverrideMocsIndexForScratchSpace.get() != -1) { auto mocsIndex = static_cast(DebugManager.flags.OverrideMocsIndexForScratchSpace.get()) << 1; state.setMemoryObjectControlState(mocsIndex); } state.setSurfaceBaseAddress(bufferStateAddress); bool isCompressionEnabled = gfxAlloc ? gfxAlloc->isCompressionEnabled() : false; if (isCompressionEnabled && !forceNonAuxMode) { // Its expected to not program pitch/qpitch/baseAddress for Aux surface in CCS scenarios EncodeSurfaceState::setCoherencyType(&state, RENDER_SURFACE_STATE::COHERENCY_TYPE_GPU_COHERENT); EncodeSurfaceState::setBufferAuxParamsForCCS(&state); } else { EncodeSurfaceState::setCoherencyType(&state, RENDER_SURFACE_STATE::COHERENCY_TYPE_IA_COHERENT); state.setAuxiliarySurfaceMode(AUXILIARY_SURFACE_MODE::AUXILIARY_SURFACE_MODE_AUX_NONE); } setL1CachePolicy(useL1Cache, &state, rootDeviceEnvironment.getHardwareInfo()); *surfaceState = state; } template void NEO::GfxCoreHelperHw::setL1CachePolicy(bool useL1Cache, typename GfxFamily::RENDER_SURFACE_STATE *surfaceState, const HardwareInfo *hwInfo) const {} template bool GfxCoreHelperHw::getEnableLocalMemory(const HardwareInfo &hwInfo) const { if (DebugManager.flags.EnableLocalMemory.get() != -1) { return DebugManager.flags.EnableLocalMemory.get(); } else if (DebugManager.flags.AUBDumpForceAllToLocalMemory.get()) { return true; } return OSInterface::osEnableLocalMemory && isLocalMemoryEnabled(hwInfo); } template bool GfxCoreHelperHw::is1MbAlignmentSupported(const HardwareInfo &hwInfo, bool isCompressionEnabled) const { return false; } template AuxTranslationMode GfxCoreHelperHw::getAuxTranslationMode(const HardwareInfo &hwInfo) { auto mode = GfxCoreHelperHw::defaultAuxTranslationMode; if (DebugManager.flags.ForceAuxTranslationMode.get() != -1) { mode = static_cast(DebugManager.flags.ForceAuxTranslationMode.get()); } if (mode == AuxTranslationMode::Blit && !hwInfo.capabilityTable.blitterOperationsSupported) { DEBUG_BREAK_IF(true); mode = AuxTranslationMode::Builtin; } return mode; } template void MemorySynchronizationCommands::addBarrierWithPostSyncOperation(LinearStream &commandStream, PostSyncMode postSyncMode, uint64_t gpuAddress, uint64_t immediateData, const HardwareInfo &hwInfo, PipeControlArgs &args) { void *commandBuffer = commandStream.getSpace(MemorySynchronizationCommands::getSizeForBarrierWithPostSyncOperation(hwInfo, args.tlbInvalidation)); MemorySynchronizationCommands::setBarrierWithPostSyncOperation(commandBuffer, postSyncMode, gpuAddress, immediateData, hwInfo, args); } template void MemorySynchronizationCommands::setBarrierWithPostSyncOperation( void *&commandsBuffer, PostSyncMode postSyncMode, uint64_t gpuAddress, uint64_t immediateData, const HardwareInfo &hwInfo, PipeControlArgs &args) { MemorySynchronizationCommands::setBarrierWa(commandsBuffer, gpuAddress, hwInfo); setPostSyncExtraProperties(args, hwInfo); MemorySynchronizationCommands::setSingleBarrier(commandsBuffer, postSyncMode, gpuAddress, immediateData, args); commandsBuffer = ptrOffset(commandsBuffer, getSizeForSingleBarrier(args.tlbInvalidation)); MemorySynchronizationCommands::setAdditionalSynchronization(commandsBuffer, gpuAddress, false, hwInfo); } template void MemorySynchronizationCommands::addSingleBarrier(LinearStream &commandStream, PipeControlArgs &args) { addSingleBarrier(commandStream, PostSyncMode::NoWrite, 0, 0, args); } template void MemorySynchronizationCommands::setSingleBarrier(void *commandsBuffer, PipeControlArgs &args) { setSingleBarrier(commandsBuffer, PostSyncMode::NoWrite, 0, 0, args); } template void MemorySynchronizationCommands::addSingleBarrier(LinearStream &commandStream, PostSyncMode postSyncMode, uint64_t gpuAddress, uint64_t immediateData, PipeControlArgs &args) { auto barrier = commandStream.getSpace(MemorySynchronizationCommands::getSizeForSingleBarrier(args.tlbInvalidation)); setSingleBarrier(barrier, postSyncMode, gpuAddress, immediateData, args); } template void MemorySynchronizationCommands::setSingleBarrier(void *commandsBuffer, PostSyncMode postSyncMode, uint64_t gpuAddress, uint64_t immediateData, PipeControlArgs &args) { using PIPE_CONTROL = typename GfxFamily::PIPE_CONTROL; PIPE_CONTROL pipeControl = GfxFamily::cmdInitPipeControl; pipeControl.setCommandStreamerStallEnable(true); if (args.csStallOnly) { *reinterpret_cast(commandsBuffer) = pipeControl; return; } pipeControl.setConstantCacheInvalidationEnable(args.constantCacheInvalidationEnable); pipeControl.setInstructionCacheInvalidateEnable(args.instructionCacheInvalidateEnable); pipeControl.setPipeControlFlushEnable(args.pipeControlFlushEnable); pipeControl.setRenderTargetCacheFlushEnable(args.renderTargetCacheFlushEnable); pipeControl.setStateCacheInvalidationEnable(args.stateCacheInvalidationEnable); pipeControl.setTextureCacheInvalidationEnable(args.textureCacheInvalidationEnable); pipeControl.setVfCacheInvalidationEnable(args.vfCacheInvalidationEnable); pipeControl.setTlbInvalidate(args.tlbInvalidation); pipeControl.setNotifyEnable(args.notifyEnable); pipeControl.setDcFlushEnable(args.dcFlushEnable); pipeControl.setDepthCacheFlushEnable(args.depthCacheFlushEnable); pipeControl.setDepthStallEnable(args.depthStallEnable); pipeControl.setProtectedMemoryDisable(args.protectedMemoryDisable); if constexpr (GfxFamily::isUsingGenericMediaStateClear) { pipeControl.setGenericMediaStateClear(args.genericMediaStateClear); } setBarrierExtraProperties(&pipeControl, args); if (DebugManager.flags.FlushAllCaches.get()) { pipeControl.setDcFlushEnable(true); pipeControl.setRenderTargetCacheFlushEnable(true); pipeControl.setInstructionCacheInvalidateEnable(true); pipeControl.setTextureCacheInvalidationEnable(true); pipeControl.setPipeControlFlushEnable(true); pipeControl.setVfCacheInvalidationEnable(true); pipeControl.setConstantCacheInvalidationEnable(true); pipeControl.setStateCacheInvalidationEnable(true); pipeControl.setTlbInvalidate(true); } if (DebugManager.flags.DoNotFlushCaches.get()) { pipeControl.setDcFlushEnable(false); pipeControl.setRenderTargetCacheFlushEnable(false); pipeControl.setInstructionCacheInvalidateEnable(false); pipeControl.setTextureCacheInvalidationEnable(false); pipeControl.setPipeControlFlushEnable(false); pipeControl.setVfCacheInvalidationEnable(false); pipeControl.setConstantCacheInvalidationEnable(false); pipeControl.setStateCacheInvalidationEnable(false); } if (postSyncMode != PostSyncMode::NoWrite) { pipeControl.setAddress(static_cast(gpuAddress & 0x0000FFFFFFFFULL)); pipeControl.setAddressHigh(static_cast(gpuAddress >> 32)); } if (postSyncMode == PostSyncMode::Timestamp) { pipeControl.setPostSyncOperation(PIPE_CONTROL::POST_SYNC_OPERATION::POST_SYNC_OPERATION_WRITE_TIMESTAMP); } else if (postSyncMode == PostSyncMode::ImmediateData) { pipeControl.setPostSyncOperation(PIPE_CONTROL::POST_SYNC_OPERATION::POST_SYNC_OPERATION_WRITE_IMMEDIATE_DATA); pipeControl.setImmediateData(immediateData); } *reinterpret_cast(commandsBuffer) = pipeControl; } template void MemorySynchronizationCommands::addBarrierWa(LinearStream &commandStream, uint64_t gpuAddress, const HardwareInfo &hwInfo) { size_t requiredSize = MemorySynchronizationCommands::getSizeForBarrierWa(hwInfo); void *commandBuffer = commandStream.getSpace(requiredSize); setBarrierWa(commandBuffer, gpuAddress, hwInfo); } template void MemorySynchronizationCommands::setBarrierWa(void *&commandsBuffer, uint64_t gpuAddress, const HardwareInfo &hwInfo) { using PIPE_CONTROL = typename GfxFamily::PIPE_CONTROL; if (MemorySynchronizationCommands::isBarrierWaRequired(hwInfo)) { PIPE_CONTROL cmd = GfxFamily::cmdInitPipeControl; MemorySynchronizationCommands::setBarrierWaFlags(&cmd); *reinterpret_cast(commandsBuffer) = cmd; commandsBuffer = ptrOffset(commandsBuffer, sizeof(PIPE_CONTROL)); MemorySynchronizationCommands::setAdditionalSynchronization(commandsBuffer, gpuAddress, false, hwInfo); } } template void MemorySynchronizationCommands::addAdditionalSynchronization(LinearStream &commandStream, uint64_t gpuAddress, bool acquire, const HardwareInfo &hwInfo) { size_t requiredSize = MemorySynchronizationCommands::getSizeForSingleAdditionalSynchronization(hwInfo); void *commandBuffer = commandStream.getSpace(requiredSize); setAdditionalSynchronization(commandBuffer, gpuAddress, acquire, hwInfo); } template void MemorySynchronizationCommands::addAdditionalSynchronizationForDirectSubmission(LinearStream &commandStream, uint64_t gpuAddress, bool acquire, const HardwareInfo &hwInfo) { MemorySynchronizationCommands::addAdditionalSynchronization(commandStream, gpuAddress, acquire, hwInfo); } template bool MemorySynchronizationCommands::getDcFlushEnable(bool isFlushPreferred, const HardwareInfo &hwInfo) { if (isFlushPreferred) { const auto &productHelper = *NEO::ProductHelper::get(hwInfo.platform.eProductFamily); return productHelper.isDcFlushAllowed(); } return false; } template size_t MemorySynchronizationCommands::getSizeForSingleBarrier(bool tlbInvalidationRequired) { return sizeof(typename GfxFamily::PIPE_CONTROL); } template size_t MemorySynchronizationCommands::getSizeForBarrierWithPostSyncOperation(const HardwareInfo &hwInfo, bool tlbInvalidationRequired) { size_t size = getSizeForSingleBarrier(tlbInvalidationRequired) + getSizeForBarrierWa(hwInfo) + getSizeForSingleAdditionalSynchronization(hwInfo); return size; } template size_t MemorySynchronizationCommands::getSizeForBarrierWa(const HardwareInfo &hwInfo) { size_t size = 0; if (MemorySynchronizationCommands::isBarrierWaRequired(hwInfo)) { size = getSizeForSingleBarrier(false) + getSizeForSingleAdditionalSynchronization(hwInfo); } return size; } template void MemorySynchronizationCommands::setAdditionalSynchronization(void *&commandsBuffer, uint64_t gpuAddress, bool acquire, const HardwareInfo &hwInfo) { } template inline size_t MemorySynchronizationCommands::getSizeForSingleAdditionalSynchronization(const HardwareInfo &hwInfo) { return 0u; } template inline size_t MemorySynchronizationCommands::getSizeForSingleAdditionalSynchronizationForDirectSubmission(const HardwareInfo &hwInfo) { return MemorySynchronizationCommands::getSizeForSingleAdditionalSynchronization(hwInfo); } template inline size_t MemorySynchronizationCommands::getSizeForAdditonalSynchronization(const HardwareInfo &hwInfo) { return 0u; } template uint32_t GfxCoreHelperHw::getMetricsLibraryGenId() const { return static_cast(MetricsLibraryApi::ClientGen::Gen9); } template bool GfxCoreHelperHw::isLinearStoragePreferred(bool isSharedContext, bool isImage1d, bool forceLinearStorage) const { if (DebugManager.flags.ForceLinearImages.get() || forceLinearStorage || isSharedContext || isImage1d) { return true; } return false; } template uint32_t GfxCoreHelperHw::alignSlmSize(uint32_t slmSize) { if (slmSize == 0u) { return 0u; } slmSize = std::max(slmSize, 1024u); slmSize = Math::nextPowerOfTwo(slmSize); UNRECOVERABLE_IF(slmSize > 64u * KB); return slmSize; } template uint32_t GfxCoreHelperHw::computeSlmValues(const HardwareInfo &hwInfo, uint32_t slmSize) { auto value = std::max(slmSize, 1024u); value = Math::nextPowerOfTwo(value); value = Math::getMinLsbSet(value); value = value - 9; DEBUG_BREAK_IF(value > 7); return value * !!slmSize; } template uint8_t GfxCoreHelperHw::getBarriersCountFromHasBarriers(uint8_t hasBarriers) const { return hasBarriers; } template inline bool GfxCoreHelperHw::isOffsetToSkipSetFFIDGPWARequired(const HardwareInfo &hwInfo, const ProductHelper &productHelper) const { return false; } template bool GfxCoreHelperHw::isForceDefaultRCSEngineWARequired(const HardwareInfo &hwInfo) { return false; } template bool GfxCoreHelperHw::isWaDisableRccRhwoOptimizationRequired() const { return false; } template inline uint32_t GfxCoreHelperHw::getMinimalSIMDSize() const { return 8u; } template std::unique_ptr GfxCoreHelperHw::createTimestampPacketAllocator(const RootDeviceIndicesContainer &rootDeviceIndices, MemoryManager *memoryManager, size_t initialTagCount, CommandStreamReceiverType csrType, DeviceBitfield deviceBitfield) const { bool doNotReleaseNodes = (csrType > CommandStreamReceiverType::CSR_HW) || DebugManager.flags.DisableTimestampPacketOptimizations.get(); auto tagAlignment = getTimestampPacketAllocatorAlignment(); if (DebugManager.flags.OverrideTimestampPacketSize.get() != -1) { if (DebugManager.flags.OverrideTimestampPacketSize.get() == 4) { using TimestampPackets32T = TimestampPackets; return std::make_unique>(rootDeviceIndices, memoryManager, initialTagCount, tagAlignment, sizeof(TimestampPackets32T), doNotReleaseNodes, deviceBitfield); } else if (DebugManager.flags.OverrideTimestampPacketSize.get() == 8) { using TimestampPackets64T = TimestampPackets; return std::make_unique>(rootDeviceIndices, memoryManager, initialTagCount, tagAlignment, sizeof(TimestampPackets64T), doNotReleaseNodes, deviceBitfield); } else { UNRECOVERABLE_IF(true); } } using TimestampPacketType = typename GfxFamily::TimestampPacketType; using TimestampPacketsT = TimestampPackets; return std::make_unique>(rootDeviceIndices, memoryManager, initialTagCount, tagAlignment, sizeof(TimestampPacketsT), doNotReleaseNodes, deviceBitfield); } template size_t GfxCoreHelperHw::getTimestampPacketAllocatorAlignment() const { return MemoryConstants::cacheLineSize * 4; } template size_t GfxCoreHelperHw::getSingleTimestampPacketSize() const { return GfxCoreHelperHw::getSingleTimestampPacketSizeHw(); } template size_t GfxCoreHelperHw::getSingleTimestampPacketSizeHw() { if (DebugManager.flags.OverrideTimestampPacketSize.get() != -1) { if (DebugManager.flags.OverrideTimestampPacketSize.get() == 4) { return TimestampPackets::getSinglePacketSize(); } else if (DebugManager.flags.OverrideTimestampPacketSize.get() == 8) { return TimestampPackets::getSinglePacketSize(); } else { UNRECOVERABLE_IF(true); } } return TimestampPackets::getSinglePacketSize(); } template size_t MemorySynchronizationCommands::getSizeForFullCacheFlush() { return MemorySynchronizationCommands::getSizeForSingleBarrier(true); } template void MemorySynchronizationCommands::addFullCacheFlush(LinearStream &commandStream, const HardwareInfo &hwInfo) { PipeControlArgs args; args.dcFlushEnable = MemorySynchronizationCommands::getDcFlushEnable(true, hwInfo); args.renderTargetCacheFlushEnable = true; args.instructionCacheInvalidateEnable = true; args.textureCacheInvalidationEnable = true; args.pipeControlFlushEnable = true; args.constantCacheInvalidationEnable = true; args.stateCacheInvalidationEnable = true; args.tlbInvalidation = true; MemorySynchronizationCommands::setCacheFlushExtraProperties(args); MemorySynchronizationCommands::addSingleBarrier(commandStream, args); } template const StackVec GfxCoreHelperHw::getDeviceSubGroupSizes() const { return {8, 16, 32}; } template const StackVec GfxCoreHelperHw::getThreadsPerEUConfigs() const { return {}; } template void GfxCoreHelperHw::setExtraAllocationData(AllocationData &allocationData, const AllocationProperties &properties, const HardwareInfo &hwInfo) const {} template bool GfxCoreHelperHw::isBankOverrideRequired(const HardwareInfo &hwInfo, const ProductHelper &productHelper) const { return false; } template int32_t GfxCoreHelperHw::getDefaultThreadArbitrationPolicy() const { return 0; } template bool GfxCoreHelperHw::useOnlyGlobalTimestamps() const { return false; } template bool GfxCoreHelperHw::useSystemMemoryPlacementForISA(const HardwareInfo &hwInfo) const { return !getEnableLocalMemory(hwInfo); } template bool GfxCoreHelperHw::isCpuImageTransferPreferred(const HardwareInfo &hwInfo) const { return false; } template bool MemorySynchronizationCommands::isBarrierlPriorToPipelineSelectWaRequired(const HardwareInfo &hwInfo) { return false; } template bool GfxCoreHelperHw::isRcsAvailable(const HardwareInfo &hwInfo) const { return true; } template bool GfxCoreHelperHw::isCooperativeDispatchSupported(const EngineGroupType engineGroupType, const HardwareInfo &hwInfo) const { return true; } template uint32_t GfxCoreHelperHw::adjustMaxWorkGroupCount(uint32_t maxWorkGroupCount, const EngineGroupType engineGroupType, const HardwareInfo &hwInfo, bool isEngineInstanced) const { return maxWorkGroupCount; } template bool GfxCoreHelperHw::isKmdMigrationSupported(const HardwareInfo &hwInfo) const { return false; } template bool GfxCoreHelperHw::isSipWANeeded(const HardwareInfo &hwInfo) const { return false; } template bool GfxCoreHelperHw::isAdditionalFeatureFlagRequired(const FeatureTable *featureTable) const { return false; } template uint32_t GfxCoreHelperHw::getNumCacheRegions() const { return 0; } template bool GfxCoreHelperHw::isSubDeviceEngineSupported(const HardwareInfo &hwInfo, const DeviceBitfield &deviceBitfield, aub_stream::EngineType engineType) const { return true; } template size_t GfxCoreHelperHw::getPreemptionAllocationAlignment() const { return 256 * MemoryConstants::kiloByte; } template void GfxCoreHelperHw::applyAdditionalCompressionSettings(Gmm &gmm, bool isNotCompressed) const {} template void GfxCoreHelperHw::applyRenderCompressionFlag(Gmm &gmm, uint32_t isCompressed) const { gmm.resourceParams.Flags.Info.RenderCompressed = isCompressed; } template bool GfxCoreHelperHw::isEngineTypeRemappingToHwSpecificRequired() const { return false; } template bool GfxCoreHelperHw::isSipKernelAsHexadecimalArrayPreferred() const { return false; } template void GfxCoreHelperHw::setSipKernelData(uint32_t *&sipKernelBinary, size_t &kernelBinarySize) const { } template size_t GfxCoreHelperHw::getSipKernelMaxDbgSurfaceSize(const HardwareInfo &hwInfo) const { return 24 * MB; } template void GfxCoreHelperHw::adjustPreemptionSurfaceSize(size_t &csrSize) const { } template void GfxCoreHelperHw::encodeBufferSurfaceState(EncodeSurfaceStateArgs &args) const { EncodeSurfaceState::encodeBuffer(args); } template bool GfxCoreHelperHw::disableL3CacheForDebug(const HardwareInfo &, const ProductHelper &productHelper) const { return false; } template bool GfxCoreHelperHw::isRevisionSpecificBinaryBuiltinRequired() const { return false; } template bool GfxCoreHelperHw::forceNonGpuCoherencyWA(bool requiresCoherency) const { return requiresCoherency; } template size_t GfxCoreHelperHw::getBatchBufferEndSize() const { return sizeof(typename GfxFamily::MI_BATCH_BUFFER_END); } template const void *GfxCoreHelperHw::getBatchBufferEndReference() const { return reinterpret_cast(&GfxFamily::cmdInitBatchBufferEnd); } template bool GfxCoreHelperHw::isPlatformFlushTaskEnabled(const HardwareInfo &hwInfo) const { const auto &productHelper = *NEO::ProductHelper::get(hwInfo.platform.eProductFamily); return productHelper.isFlushTaskAllowed(); } template uint64_t GfxCoreHelperHw::getPatIndex(CacheRegion cacheRegion, CachePolicy cachePolicy) const { UNRECOVERABLE_IF(true); return -1; } template bool GfxCoreHelperHw::copyThroughLockedPtrEnabled(const HardwareInfo &hwInfo) const { if (DebugManager.flags.ExperimentalCopyThroughLock.get() != -1) { return DebugManager.flags.ExperimentalCopyThroughLock.get() == 1; } return false; } template uint32_t GfxCoreHelperHw::getAmountOfAllocationsToFill() const { if (DebugManager.flags.SetAmountOfReusableAllocations.get() != -1) { return DebugManager.flags.SetAmountOfReusableAllocations.get(); } return 0u; } template bool GfxCoreHelperHw::isChipsetUniqueUUIDSupported() const { return false; } template bool GfxCoreHelperHw::isTimestampShiftRequired() const { return true; } template bool GfxCoreHelperHw::isRelaxedOrderingSupported() const { return false; } } // namespace NEO