2023-01-09 20:07:23 +08:00
/*
2025-01-07 06:12:14 +08:00
* Copyright ( C ) 2023 - 2025 Intel Corporation
2023-01-09 20:07:23 +08:00
*
* SPDX - License - Identifier : MIT
*
*/
# include "shared/source/os_interface/linux/xe/ioctl_helper_xe.h"
2024-01-09 21:40:40 +08:00
# include "shared/source/debugger/debugger.h"
# include "shared/source/execution_environment/execution_environment.h"
2023-01-09 20:07:23 +08:00
# include "shared/source/execution_environment/root_device_environment.h"
2023-08-07 20:32:02 +08:00
# include "shared/source/gmm_helper/gmm_helper.h"
2024-07-25 18:47:37 +08:00
# include "shared/source/helpers/aligned_memory.h"
2023-01-09 20:07:23 +08:00
# include "shared/source/helpers/basic_math.h"
# include "shared/source/helpers/common_types.h"
# include "shared/source/helpers/constants.h"
2023-09-18 18:49:16 +08:00
# include "shared/source/helpers/engine_control.h"
2024-01-09 17:54:33 +08:00
# include "shared/source/helpers/gfx_core_helper.h"
2023-01-09 20:07:23 +08:00
# include "shared/source/helpers/hw_info.h"
# include "shared/source/helpers/ptr_math.h"
# include "shared/source/helpers/string.h"
2024-03-19 07:41:10 +08:00
# include "shared/source/os_interface/linux/drm_buffer_object.h"
2023-01-09 20:07:23 +08:00
# include "shared/source/os_interface/linux/drm_neo.h"
2023-05-02 19:05:31 +08:00
# include "shared/source/os_interface/linux/engine_info.h"
2023-05-04 18:13:08 +08:00
# include "shared/source/os_interface/linux/memory_info.h"
2023-01-09 20:07:23 +08:00
# include "shared/source/os_interface/linux/os_context_linux.h"
2024-03-09 09:22:47 +08:00
# include "shared/source/os_interface/linux/sys_calls.h"
2024-08-01 17:59:15 +08:00
# include "shared/source/os_interface/linux/xe/xedrm.h"
2023-09-18 18:49:16 +08:00
# include "shared/source/os_interface/os_time.h"
2023-01-09 20:07:23 +08:00
2023-06-20 15:34:12 +08:00
# include <algorithm>
2023-01-09 20:07:23 +08:00
# include <iostream>
2024-04-15 19:09:00 +08:00
# include <limits>
2023-12-27 21:27:51 +08:00
# include <sstream>
2023-01-09 20:07:23 +08:00
# define STRINGIFY_ME(X) return #X
# define RETURN_ME(X) return X
2025-01-24 11:59:30 +08:00
# ifndef DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR
2025-03-13 06:47:22 +08:00
# define DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR (1 << 5)
2025-01-24 11:59:30 +08:00
# endif
2025-03-13 06:47:22 +08:00
# ifndef DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR
# define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 2)
2025-01-24 11:59:30 +08:00
# endif
2023-01-09 20:07:23 +08:00
namespace NEO {
const char * IoctlHelperXe : : xeGetClassName ( int className ) {
switch ( className ) {
case DRM_XE_ENGINE_CLASS_RENDER :
return " rcs " ;
case DRM_XE_ENGINE_CLASS_COPY :
return " bcs " ;
case DRM_XE_ENGINE_CLASS_VIDEO_DECODE :
return " vcs " ;
case DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE :
return " vecs " ;
case DRM_XE_ENGINE_CLASS_COMPUTE :
return " ccs " ;
}
2024-03-13 00:44:45 +08:00
return " Unknown class name " ;
2023-01-09 20:07:23 +08:00
}
2023-09-18 18:49:16 +08:00
const char * IoctlHelperXe : : xeGetBindOperationName ( int bindOperation ) {
switch ( bindOperation ) {
2023-11-27 21:16:31 +08:00
case DRM_XE_VM_BIND_OP_MAP :
2023-01-09 20:07:23 +08:00
return " MAP " ;
2023-11-27 21:16:31 +08:00
case DRM_XE_VM_BIND_OP_UNMAP :
2023-01-09 20:07:23 +08:00
return " UNMAP " ;
2023-11-27 21:16:31 +08:00
case DRM_XE_VM_BIND_OP_MAP_USERPTR :
2023-01-09 20:07:23 +08:00
return " MAP_USERPTR " ;
2023-11-27 21:16:31 +08:00
case DRM_XE_VM_BIND_OP_UNMAP_ALL :
2023-09-18 18:49:16 +08:00
return " UNMAP ALL " ;
2023-11-27 21:16:31 +08:00
case DRM_XE_VM_BIND_OP_PREFETCH :
2023-09-18 18:49:16 +08:00
return " PREFETCH " ;
2023-01-09 20:07:23 +08:00
}
2023-09-18 18:49:16 +08:00
return " Unknown operation " ;
}
2024-07-29 22:22:01 +08:00
std : : string IoctlHelperXe : : xeGetBindFlagNames ( int bindFlags ) {
if ( bindFlags = = 0 ) {
return " " ;
2023-09-18 18:49:16 +08:00
}
2024-07-29 22:22:01 +08:00
std : : string flags ;
if ( bindFlags & DRM_XE_VM_BIND_FLAG_READONLY ) {
bindFlags & = ~ DRM_XE_VM_BIND_FLAG_READONLY ;
flags + = " READONLY " ;
}
if ( bindFlags & DRM_XE_VM_BIND_FLAG_IMMEDIATE ) {
bindFlags & = ~ DRM_XE_VM_BIND_FLAG_IMMEDIATE ;
flags + = " IMMEDIATE " ;
}
if ( bindFlags & DRM_XE_VM_BIND_FLAG_NULL ) {
bindFlags & = ~ DRM_XE_VM_BIND_FLAG_NULL ;
flags + = " NULL " ;
}
if ( bindFlags & DRM_XE_VM_BIND_FLAG_DUMPABLE ) {
bindFlags & = ~ DRM_XE_VM_BIND_FLAG_DUMPABLE ;
flags + = " DUMPABLE " ;
}
if ( bindFlags ! = 0 ) {
flags + = " Unknown flag " ;
}
// Remove the trailing space
if ( ! flags . empty ( ) & & flags . back ( ) = = ' ' ) {
flags . pop_back ( ) ;
}
return flags ;
2023-01-09 20:07:23 +08:00
}
const char * IoctlHelperXe : : xeGetengineClassName ( uint32_t engineClass ) {
switch ( engineClass ) {
case DRM_XE_ENGINE_CLASS_RENDER :
return " DRM_XE_ENGINE_CLASS_RENDER " ;
case DRM_XE_ENGINE_CLASS_COPY :
return " DRM_XE_ENGINE_CLASS_COPY " ;
case DRM_XE_ENGINE_CLASS_VIDEO_DECODE :
return " DRM_XE_ENGINE_CLASS_VIDEO_DECODE " ;
case DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE :
return " DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE " ;
case DRM_XE_ENGINE_CLASS_COMPUTE :
return " DRM_XE_ENGINE_CLASS_COMPUTE " ;
default :
2024-03-13 00:44:45 +08:00
return " Unknown engine class " ;
2023-01-09 20:07:23 +08:00
}
}
IoctlHelperXe : : IoctlHelperXe ( Drm & drmArg ) : IoctlHelper ( drmArg ) {
xeLog ( " IoctlHelperXe::IoctlHelperXe \n " , " " ) ;
}
2025-01-24 11:59:30 +08:00
bool IoctlHelperXe : : queryDeviceIdAndRevision ( Drm & drm ) {
2024-12-17 23:19:39 +08:00
auto fileDescriptor = drm . getFileDescriptor ( ) ;
drm_xe_device_query queryConfig = { } ;
queryConfig . query = DRM_XE_DEVICE_QUERY_CONFIG ;
int ret = SysCalls : : ioctl ( fileDescriptor , DRM_IOCTL_XE_DEVICE_QUERY , & queryConfig ) ;
if ( ret | | queryConfig . size = = 0 ) {
printDebugString ( debugManager . flags . PrintDebugMessages . get ( ) , stderr , " %s " , " FATAL: Cannot query size for device config! \n " ) ;
return false ;
}
auto data = std : : vector < uint64_t > ( Math : : divideAndRoundUp ( sizeof ( drm_xe_query_config ) + sizeof ( uint64_t ) * queryConfig . size , sizeof ( uint64_t ) ) , 0 ) ;
struct drm_xe_query_config * config = reinterpret_cast < struct drm_xe_query_config * > ( data . data ( ) ) ;
queryConfig . data = castToUint64 ( config ) ;
ret = SysCalls : : ioctl ( fileDescriptor , DRM_IOCTL_XE_DEVICE_QUERY , & queryConfig ) ;
if ( ret ) {
printDebugString ( debugManager . flags . PrintDebugMessages . get ( ) , stderr , " %s " , " FATAL: Cannot query device ID and revision! \n " ) ;
return false ;
}
auto hwInfo = drm . getRootDeviceEnvironment ( ) . getMutableHardwareInfo ( ) ;
hwInfo - > platform . usDeviceID = config - > info [ DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID ] & 0xffff ;
hwInfo - > platform . usRevId = static_cast < int > ( ( config - > info [ DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID ] > > 16 ) & 0xff ) ;
2025-01-24 11:59:30 +08:00
2025-03-13 06:47:22 +08:00
if ( ( debugManager . flags . EnableRecoverablePageFaults . get ( ) ! = 0 ) & & ( debugManager . flags . EnableSharedSystemUsmSupport . get ( ) ! = 0 ) & & ( config - > info [ DRM_XE_QUERY_CONFIG_FLAGS ] & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR ) ) {
2025-01-24 11:59:30 +08:00
drm . setSharedSystemAllocEnable ( true ) ;
2025-03-13 06:47:22 +08:00
drm . setPageFaultSupported ( true ) ;
2025-01-24 11:59:30 +08:00
}
2024-12-17 23:19:39 +08:00
return true ;
}
2023-01-09 20:07:23 +08:00
bool IoctlHelperXe : : initialize ( ) {
xeLog ( " IoctlHelperXe::initialize \n " , " " ) ;
2024-12-17 02:22:18 +08:00
euDebugInterface = EuDebugInterface : : create ( drm . getSysFsPciPath ( ) ) ;
2024-12-17 23:19:39 +08:00
2023-09-18 18:49:16 +08:00
drm_xe_device_query queryConfig = { } ;
2023-05-02 19:05:31 +08:00
queryConfig . query = DRM_XE_DEVICE_QUERY_CONFIG ;
2023-01-09 20:07:23 +08:00
2023-12-12 16:48:32 +08:00
auto retVal = IoctlHelper : : ioctl ( DrmIoctl : : query , & queryConfig ) ;
2023-05-02 19:05:31 +08:00
if ( retVal ! = 0 | | queryConfig . size = = 0 ) {
return false ;
}
2023-10-02 22:26:33 +08:00
auto data = std : : vector < uint64_t > ( Math : : divideAndRoundUp ( sizeof ( drm_xe_query_config ) + sizeof ( uint64_t ) * queryConfig . size , sizeof ( uint64_t ) ) , 0 ) ;
2023-05-02 19:05:31 +08:00
struct drm_xe_query_config * config = reinterpret_cast < struct drm_xe_query_config * > ( data . data ( ) ) ;
queryConfig . data = castToUint64 ( config ) ;
2023-12-12 16:48:32 +08:00
IoctlHelper : : ioctl ( DrmIoctl : : query , & queryConfig ) ;
2023-11-27 21:16:31 +08:00
xeLog ( " DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID \t %#llx \n " ,
config - > info [ DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID ] ) ;
2023-05-02 19:05:31 +08:00
xeLog ( " REV_ID \t \t \t \t %#llx \n " ,
2023-11-27 21:16:31 +08:00
( config - > info [ DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID ] > > 16 ) & 0xff ) ;
2023-05-02 19:05:31 +08:00
xeLog ( " DEVICE_ID \t \t \t \t %#llx \n " ,
2023-11-27 21:16:31 +08:00
config - > info [ DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID ] & 0xffff ) ;
xeLog ( " DRM_XE_QUERY_CONFIG_FLAGS \t \t \t %#llx \n " ,
config - > info [ DRM_XE_QUERY_CONFIG_FLAGS ] ) ;
xeLog ( " DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM \t %s \n " ,
config - > info [ DRM_XE_QUERY_CONFIG_FLAGS ] &
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM
2023-05-02 19:05:31 +08:00
? " ON "
: " OFF " ) ;
2023-11-27 21:16:31 +08:00
xeLog ( " DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT \t \t %#llx \n " ,
config - > info [ DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT ] ) ;
xeLog ( " DRM_XE_QUERY_CONFIG_VA_BITS \t \t %#llx \n " ,
config - > info [ DRM_XE_QUERY_CONFIG_VA_BITS ] ) ;
2023-05-02 19:05:31 +08:00
2024-03-25 23:44:38 +08:00
xeLog ( " DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY \t \t %#llx \n " ,
config - > info [ DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY ] ) ;
maxExecQueuePriority = config - > info [ DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY ] & 0xffff ;
2023-05-02 19:05:31 +08:00
memset ( & queryConfig , 0 , sizeof ( queryConfig ) ) ;
queryConfig . query = DRM_XE_DEVICE_QUERY_HWCONFIG ;
2023-12-12 16:48:32 +08:00
IoctlHelper : : ioctl ( DrmIoctl : : query , & queryConfig ) ;
2023-10-19 19:23:16 +08:00
auto newSize = queryConfig . size / sizeof ( uint32_t ) ;
2023-12-27 21:27:51 +08:00
hwconfig . resize ( newSize ) ;
queryConfig . data = castToUint64 ( hwconfig . data ( ) ) ;
2023-12-12 16:48:32 +08:00
IoctlHelper : : ioctl ( DrmIoctl : : query , & queryConfig ) ;
2023-05-02 19:05:31 +08:00
auto hwInfo = this - > drm . getRootDeviceEnvironment ( ) . getMutableHardwareInfo ( ) ;
2024-03-22 01:40:02 +08:00
hwInfo - > capabilityTable . gpuAddressSpace = ( 1ull < < config - > info [ DRM_XE_QUERY_CONFIG_VA_BITS ] ) - 1 ;
2023-09-18 18:49:16 +08:00
2024-08-01 17:59:15 +08:00
hwInfo - > capabilityTable . cxlType = 0 ;
if ( getCxlType ( ) & & config - > num_params > * getCxlType ( ) ) {
hwInfo - > capabilityTable . cxlType = static_cast < uint32_t > ( config - > info [ * getCxlType ( ) ] ) ;
}
2024-06-08 02:46:33 +08:00
2024-04-12 19:39:48 +08:00
queryGtListData = queryData < uint64_t > ( DRM_XE_DEVICE_QUERY_GT_LIST ) ;
if ( queryGtListData . empty ( ) ) {
return false ;
}
xeGtListData = reinterpret_cast < drm_xe_query_gt_list * > ( queryGtListData . data ( ) ) ;
2025-03-28 20:19:17 +08:00
auto assignValue = [ ] ( auto & container , uint16_t id , uint16_t value ) {
if ( container . size ( ) < id + 1u ) {
container . resize ( id + 1 , invalidIndex ) ;
}
container [ id ] = value ;
} ;
2024-04-12 21:09:30 +08:00
gtIdToTileId . resize ( xeGtListData - > num_gt , invalidIndex ) ;
for ( auto i = 0u ; i < xeGtListData - > num_gt ; i + + ) {
const auto & gt = xeGtListData - > gt_list [ i ] ;
if ( gt . type = = DRM_XE_QUERY_GT_TYPE_MAIN ) {
gtIdToTileId [ gt . gt_id ] = gt . tile_id ;
2025-03-28 20:19:17 +08:00
assignValue ( tileIdToGtId , gt . tile_id , gt . gt_id ) ;
} else if ( isMediaGt ( gt . type ) ) {
assignValue ( mediaGtIdToTileId , gt . gt_id , gt . tile_id ) ;
2024-04-12 21:09:30 +08:00
}
}
2024-05-23 02:20:20 +08:00
querySupportedFeatures ( ) ;
2023-06-23 03:30:16 +08:00
return true ;
2023-01-09 20:07:23 +08:00
}
2025-03-28 20:19:17 +08:00
bool IoctlHelperXe : : isMediaGt ( uint16_t gtType ) const {
return ( gtType = = DRM_XE_QUERY_GT_TYPE_MEDIA ) ;
}
2023-01-09 20:07:23 +08:00
IoctlHelperXe : : ~ IoctlHelperXe ( ) {
xeLog ( " IoctlHelperXe::~IoctlHelperXe \n " , " " ) ;
}
bool IoctlHelperXe : : isSetPairAvailable ( ) {
return false ;
}
2023-03-08 12:06:00 +08:00
bool IoctlHelperXe : : isChunkingAvailable ( ) {
return false ;
}
2023-01-09 20:07:23 +08:00
bool IoctlHelperXe : : isVmBindAvailable ( ) {
return true ;
}
2023-12-28 00:23:42 +08:00
bool IoctlHelperXe : : setDomainCpu ( uint32_t handle , bool writeEnable ) {
return false ;
}
2023-10-02 22:26:33 +08:00
template < typename DataType >
std : : vector < DataType > IoctlHelperXe : : queryData ( uint32_t queryId ) {
2023-05-02 19:05:31 +08:00
struct drm_xe_device_query deviceQuery = { } ;
deviceQuery . query = queryId ;
2023-12-12 16:48:32 +08:00
IoctlHelper : : ioctl ( DrmIoctl : : query , & deviceQuery ) ;
2023-05-02 19:05:31 +08:00
2023-10-02 22:26:33 +08:00
std : : vector < DataType > retVal ( Math : : divideAndRoundUp ( deviceQuery . size , sizeof ( DataType ) ) ) ;
2023-05-02 19:05:31 +08:00
deviceQuery . data = castToUint64 ( retVal . data ( ) ) ;
2023-12-12 16:48:32 +08:00
IoctlHelper : : ioctl ( DrmIoctl : : query , & deviceQuery ) ;
2023-05-02 19:05:31 +08:00
return retVal ;
}
2024-03-28 00:55:20 +08:00
template std : : vector < uint8_t > IoctlHelperXe : : queryData ( uint32_t queryId ) ;
template std : : vector < uint64_t > IoctlHelperXe : : queryData ( uint32_t queryId ) ;
2023-05-02 19:05:31 +08:00
2024-08-07 20:40:07 +08:00
uint32_t IoctlHelperXe : : getNumEngines ( uint64_t * enginesData ) const {
return reinterpret_cast < struct drm_xe_query_engines * > ( enginesData ) - > num_engines ;
}
2023-05-02 19:05:31 +08:00
std : : unique_ptr < EngineInfo > IoctlHelperXe : : createEngineInfo ( bool isSysmanEnabled ) {
2024-03-14 19:04:59 +08:00
auto enginesData = queryData < uint64_t > ( DRM_XE_DEVICE_QUERY_ENGINES ) ;
2023-05-04 18:13:08 +08:00
if ( enginesData . empty ( ) ) {
return { } ;
}
2024-03-14 19:04:59 +08:00
auto queryEngines = reinterpret_cast < struct drm_xe_query_engines * > ( enginesData . data ( ) ) ;
2024-08-07 20:40:07 +08:00
auto numberHwEngines = getNumEngines ( enginesData . data ( ) ) ;
2024-03-14 19:04:59 +08:00
xeLog ( " numberHwEngines=%d \n " , numberHwEngines ) ;
2023-05-02 19:05:31 +08:00
2024-07-11 00:13:50 +08:00
StackVec < std : : vector < EngineCapabilities > , 2 > enginesPerTile { } ;
2023-05-05 20:26:24 +08:00
std : : bitset < 8 > multiTileMask { } ;
2023-05-02 19:05:31 +08:00
2024-04-12 17:40:28 +08:00
auto hwInfo = drm . getRootDeviceEnvironment ( ) . getMutableHardwareInfo ( ) ;
auto defaultEngineClass = getDefaultEngineClass ( hwInfo - > capabilityTable . defaultEngineType ) ;
2025-03-28 20:19:17 +08:00
auto containsGtId = [ ] ( const auto & container , uint16_t gtId ) {
return ( ( container . size ( ) > gtId ) & & ( container [ gtId ] ! = invalidIndex ) ) ;
} ;
2023-05-02 19:05:31 +08:00
for ( auto i = 0u ; i < numberHwEngines ; i + + ) {
2024-03-14 19:04:59 +08:00
const auto & engine = queryEngines - > engines [ i ] . instance ;
2025-03-28 20:19:17 +08:00
uint16_t tile = 0 ;
if ( containsGtId ( gtIdToTileId , engine . gt_id ) ) {
tile = static_cast < uint16_t > ( gtIdToTileId [ engine . gt_id ] ) ;
} else if ( containsGtId ( mediaGtIdToTileId , engine . gt_id ) ) {
tile = static_cast < uint16_t > ( mediaGtIdToTileId [ engine . gt_id ] ) ;
} else {
2025-03-27 18:42:42 +08:00
continue ;
}
2025-03-28 20:19:17 +08:00
2023-05-05 20:26:24 +08:00
multiTileMask . set ( tile ) ;
2023-05-02 19:05:31 +08:00
EngineClassInstance engineClassInstance { } ;
2024-03-14 19:04:59 +08:00
engineClassInstance . engineClass = engine . engine_class ;
engineClassInstance . engineInstance = engine . engine_instance ;
2025-03-28 20:19:17 +08:00
xeLog ( " \t %s:%d:%d %d \n " , xeGetClassName ( engineClassInstance . engineClass ) , engineClassInstance . engineInstance , engine . gt_id , tile ) ;
2023-05-02 19:05:31 +08:00
2024-05-20 17:56:10 +08:00
const bool isBaseEngineClass = engineClassInstance . engineClass = = getDrmParamValue ( DrmParam : : engineClassCompute ) | |
engineClassInstance . engineClass = = getDrmParamValue ( DrmParam : : engineClassRender ) | |
engineClassInstance . engineClass = = getDrmParamValue ( DrmParam : : engineClassCopy ) ;
2023-05-02 19:05:31 +08:00
2024-05-20 17:56:10 +08:00
const bool isSysmanEngineClass = isSysmanEnabled & & ( engineClassInstance . engineClass = = getDrmParamValue ( DrmParam : : engineClassVideo ) | |
engineClassInstance . engineClass = = getDrmParamValue ( DrmParam : : engineClassVideoEnhance ) ) ;
if ( isBaseEngineClass | | isSysmanEngineClass | | isExtraEngineClassAllowed ( engineClassInstance . engineClass ) ) {
2023-05-02 19:05:31 +08:00
if ( enginesPerTile . size ( ) < = tile ) {
enginesPerTile . resize ( tile + 1 ) ;
}
2024-07-11 00:13:50 +08:00
enginesPerTile [ tile ] . push_back ( { engineClassInstance , { } } ) ;
2024-04-12 17:40:28 +08:00
if ( ! defaultEngine & & engineClassInstance . engineClass = = defaultEngineClass ) {
defaultEngine = std : : make_unique < drm_xe_engine_class_instance > ( ) ;
* defaultEngine = engine ;
}
2023-05-02 19:05:31 +08:00
}
}
2024-04-12 17:40:28 +08:00
UNRECOVERABLE_IF ( ! defaultEngine ) ;
2023-05-05 20:26:24 +08:00
if ( hwInfo - > featureTable . flags . ftrMultiTileArch ) {
auto & multiTileArchInfo = hwInfo - > gtSystemInfo . MultiTileArchInfo ;
multiTileArchInfo . IsValid = true ;
multiTileArchInfo . TileCount = multiTileMask . count ( ) ;
multiTileArchInfo . TileMask = static_cast < uint8_t > ( multiTileMask . to_ulong ( ) ) ;
}
2023-09-18 18:49:16 +08:00
2023-05-02 19:05:31 +08:00
return std : : make_unique < EngineInfo > ( & drm , enginesPerTile ) ;
}
2024-04-15 19:09:00 +08:00
inline MemoryRegion createMemoryRegionFromXeMemRegion ( const drm_xe_mem_region & xeMemRegion , std : : bitset < 4 > tilesMask ) {
2023-05-04 18:13:08 +08:00
MemoryRegion memoryRegion { } ;
memoryRegion . region . memoryInstance = xeMemRegion . instance ;
memoryRegion . region . memoryClass = xeMemRegion . mem_class ;
memoryRegion . probedSize = xeMemRegion . total_size ;
memoryRegion . unallocatedSize = xeMemRegion . total_size - xeMemRegion . used ;
2024-04-15 19:09:00 +08:00
memoryRegion . tilesMask = tilesMask ;
2023-05-04 18:13:08 +08:00
return memoryRegion ;
}
std : : unique_ptr < MemoryInfo > IoctlHelperXe : : createMemoryInfo ( ) {
2023-11-27 21:16:31 +08:00
auto memUsageData = queryData < uint64_t > ( DRM_XE_DEVICE_QUERY_MEM_REGIONS ) ;
2023-05-04 18:13:08 +08:00
2024-04-12 19:39:48 +08:00
if ( memUsageData . empty ( ) ) {
2023-05-04 18:13:08 +08:00
return { } ;
}
2024-04-19 01:13:25 +08:00
constexpr auto maxSupportedTilesNumber { 4u } ;
std : : array < std : : bitset < maxSupportedTilesNumber > , 64 > regionTilesMask { } ;
2024-04-15 19:09:00 +08:00
for ( auto i { 0u } ; i < xeGtListData - > num_gt ; i + + ) {
const auto & gtEntry = xeGtListData - > gt_list [ i ] ;
if ( gtEntry . type ! = DRM_XE_QUERY_GT_TYPE_MAIN ) {
continue ;
}
2023-05-04 18:13:08 +08:00
2024-04-15 19:09:00 +08:00
uint64_t nearMemRegions { gtEntry . near_mem_regions } ;
auto regionIndex { Math : : log2 ( nearMemRegions ) } ;
regionTilesMask [ regionIndex ] . set ( gtEntry . tile_id ) ;
}
MemoryInfo : : RegionContainer regionsContainer { } ;
2023-05-04 18:13:08 +08:00
2024-04-15 19:09:00 +08:00
auto xeMemRegionsData = reinterpret_cast < drm_xe_query_mem_regions * > ( memUsageData . data ( ) ) ;
2023-12-05 22:40:08 +08:00
for ( auto i = 0u ; i < xeMemRegionsData - > num_mem_regions ; i + + ) {
2024-04-15 19:09:00 +08:00
auto & xeMemRegion { xeMemRegionsData - > mem_regions [ i ] } ;
if ( xeMemRegion . mem_class = = DRM_XE_MEM_REGION_CLASS_SYSMEM ) {
// Make sure sysmem is always put at the first position
regionsContainer . insert ( regionsContainer . begin ( ) , createMemoryRegionFromXeMemRegion ( xeMemRegion , 0u ) ) ;
} else {
auto regionIndex = xeMemRegion . instance ;
UNRECOVERABLE_IF ( regionIndex > = regionTilesMask . size ( ) ) ;
if ( auto tilesMask = regionTilesMask [ regionIndex ] ; tilesMask . any ( ) ) {
2024-04-19 01:13:25 +08:00
regionsContainer . push_back ( createMemoryRegionFromXeMemRegion ( xeMemRegion , tilesMask ) ) ;
2024-04-15 19:09:00 +08:00
}
2023-05-04 18:13:08 +08:00
}
}
if ( regionsContainer . empty ( ) ) {
return { } ;
}
2024-04-15 19:09:00 +08:00
return std : : make_unique < MemoryInfo > ( regionsContainer , drm ) ;
}
size_t IoctlHelperXe : : getLocalMemoryRegionsSize ( const MemoryInfo * memoryInfo , uint32_t subDevicesCount , uint32_t tileMask ) const {
size_t size = 0 ;
for ( const auto & memoryRegion : memoryInfo - > getLocalMemoryRegions ( ) ) {
if ( ( memoryRegion . tilesMask & std : : bitset < 4 > { tileMask } ) . any ( ) ) {
size + = memoryRegion . probedSize ;
2023-10-19 19:23:16 +08:00
}
2023-05-04 18:13:08 +08:00
}
2024-04-15 19:09:00 +08:00
return size ;
2023-05-04 18:13:08 +08:00
}
2024-03-28 00:55:20 +08:00
void IoctlHelperXe : : setupIpVersion ( ) {
auto & rootDeviceEnvironment = drm . getRootDeviceEnvironment ( ) ;
auto hwInfo = rootDeviceEnvironment . getMutableHardwareInfo ( ) ;
if ( auto hwIpVersion = GtIpVersion { } ; queryHwIpVersion ( hwIpVersion ) ) {
hwInfo - > ipVersion . architecture = hwIpVersion . major ;
hwInfo - > ipVersion . release = hwIpVersion . minor ;
hwInfo - > ipVersion . revision = hwIpVersion . revision ;
} else {
xeLog ( " No HW IP version received from drm_xe_gt. Falling back to default value. " ) ;
IoctlHelper : : setupIpVersion ( ) ;
}
}
2024-08-06 21:04:01 +08:00
bool IoctlHelperXe : : queryHwIpVersion ( GtIpVersion & gtIpVersion ) {
auto gtListData = queryData < uint64_t > ( DRM_XE_DEVICE_QUERY_GT_LIST ) ;
if ( gtListData . empty ( ) ) {
return false ;
}
auto xeGtListData = reinterpret_cast < drm_xe_query_gt_list * > ( gtListData . data ( ) ) ;
for ( auto i = 0u ; i < xeGtListData - > num_gt ; i + + ) {
auto & gtEntry = xeGtListData - > gt_list [ i ] ;
if ( gtEntry . type = = DRM_XE_QUERY_GT_TYPE_MEDIA | | gtEntry . ip_ver_major = = 0u ) {
continue ;
}
gtIpVersion . major = gtEntry . ip_ver_major ;
gtIpVersion . minor = gtEntry . ip_ver_minor ;
gtIpVersion . revision = gtEntry . ip_ver_rev ;
return true ;
}
return false ;
}
2023-09-18 18:49:16 +08:00
bool IoctlHelperXe : : setGpuCpuTimes ( TimeStampData * pGpuCpuTime , OSTime * osTime ) {
if ( pGpuCpuTime = = nullptr | | osTime = = nullptr ) {
return false ;
}
drm_xe_device_query deviceQuery = { } ;
deviceQuery . query = DRM_XE_DEVICE_QUERY_ENGINE_CYCLES ;
2023-12-12 16:48:32 +08:00
auto ret = IoctlHelper : : ioctl ( DrmIoctl : : query , & deviceQuery ) ;
2023-09-18 18:49:16 +08:00
if ( ret ! = 0 ) {
xeLog ( " -> IoctlHelperXe::%s s=0x%lx r=%d \n " , __FUNCTION__ , deviceQuery . size , ret ) ;
return false ;
}
std : : vector < uint8_t > retVal ( deviceQuery . size ) ;
deviceQuery . data = castToUint64 ( retVal . data ( ) ) ;
drm_xe_query_engine_cycles * queryEngineCycles = reinterpret_cast < drm_xe_query_engine_cycles * > ( retVal . data ( ) ) ;
queryEngineCycles - > clockid = CLOCK_MONOTONIC_RAW ;
queryEngineCycles - > eci = * this - > defaultEngine ;
2023-12-12 16:48:32 +08:00
ret = IoctlHelper : : ioctl ( DrmIoctl : : query , & deviceQuery ) ;
2023-09-18 18:49:16 +08:00
auto nValidBits = queryEngineCycles - > width ;
2024-10-10 02:36:11 +08:00
if ( osTime - > getDeviceTimestampWidth ( ) ! = 0 ) {
nValidBits = osTime - > getDeviceTimestampWidth ( ) ;
}
2023-09-18 18:49:16 +08:00
auto gpuTimestampValidBits = maxNBitValue ( nValidBits ) ;
auto gpuCycles = queryEngineCycles - > engine_cycles & gpuTimestampValidBits ;
xeLog ( " -> IoctlHelperXe::%s [%d,%d] clockId=0x%x s=0x%lx nValidBits=0x%x gpuCycles=0x%x cpuTimeInNS=0x%x r=%d \n " , __FUNCTION__ ,
queryEngineCycles - > eci . engine_class , queryEngineCycles - > eci . engine_instance ,
queryEngineCycles - > clockid , deviceQuery . size , nValidBits , gpuCycles , queryEngineCycles - > cpu_timestamp , ret ) ;
pGpuCpuTime - > gpuTimeStamp = gpuCycles ;
pGpuCpuTime - > cpuTimeinNS = queryEngineCycles - > cpu_timestamp ;
return ret = = 0 ;
}
2023-06-20 15:34:12 +08:00
bool IoctlHelperXe : : getTopologyDataAndMap ( const HardwareInfo & hwInfo , DrmQueryTopologyData & topologyData , TopologyMap & topologyMap ) {
2023-10-02 22:26:33 +08:00
auto queryGtTopology = queryData < uint8_t > ( DRM_XE_DEVICE_QUERY_GT_TOPOLOGY ) ;
2023-06-20 15:34:12 +08:00
auto fillMask = [ ] ( std : : vector < std : : bitset < 8 > > & vec , drm_xe_query_topology_mask * topo ) {
for ( uint32_t j = 0 ; j < topo - > num_bytes ; j + + ) {
vec . push_back ( topo - > mask [ j ] ) ;
}
} ;
2023-10-19 19:23:16 +08:00
StackVec < std : : vector < std : : bitset < 8 > > , 2 > geomDss ;
StackVec < std : : vector < std : : bitset < 8 > > , 2 > computeDss ;
StackVec < std : : vector < std : : bitset < 8 > > , 2 > euDss ;
2024-09-11 00:30:25 +08:00
StackVec < std : : vector < std : : bitset < 8 > > , 2 > l3Banks ;
2023-10-19 19:23:16 +08:00
2023-10-05 11:46:43 +08:00
auto topologySize = queryGtTopology . size ( ) ;
2023-10-02 22:26:33 +08:00
auto dataPtr = queryGtTopology . data ( ) ;
2023-06-20 15:34:12 +08:00
2024-04-12 21:09:30 +08:00
auto numTiles = tileIdToGtId . size ( ) ;
geomDss . resize ( numTiles ) ;
computeDss . resize ( numTiles ) ;
euDss . resize ( numTiles ) ;
2024-09-11 00:30:25 +08:00
l3Banks . resize ( numTiles ) ;
2024-04-15 18:58:47 +08:00
bool receivedDssInfo = false ;
2023-06-20 15:34:12 +08:00
while ( topologySize > = sizeof ( drm_xe_query_topology_mask ) ) {
drm_xe_query_topology_mask * topo = reinterpret_cast < drm_xe_query_topology_mask * > ( dataPtr ) ;
UNRECOVERABLE_IF ( topo = = nullptr ) ;
2023-10-19 19:23:16 +08:00
uint32_t gtId = topo - > gt_id ;
2024-04-12 21:09:30 +08:00
auto tileId = gtIdToTileId [ gtId ] ;
2023-10-19 19:23:16 +08:00
2024-04-12 21:09:30 +08:00
if ( tileId ! = invalidIndex ) {
2023-10-19 19:23:16 +08:00
switch ( topo - > type ) {
2023-11-27 21:16:31 +08:00
case DRM_XE_TOPO_DSS_GEOMETRY :
2024-04-12 21:09:30 +08:00
fillMask ( geomDss [ tileId ] , topo ) ;
2024-04-15 18:58:47 +08:00
receivedDssInfo = true ;
2023-10-19 19:23:16 +08:00
break ;
2023-11-27 21:16:31 +08:00
case DRM_XE_TOPO_DSS_COMPUTE :
2024-04-12 21:09:30 +08:00
fillMask ( computeDss [ tileId ] , topo ) ;
2024-04-15 18:58:47 +08:00
receivedDssInfo = true ;
2023-10-19 19:23:16 +08:00
break ;
2024-09-17 22:55:51 +08:00
case DRM_XE_TOPO_L3_BANK :
fillMask ( l3Banks [ tileId ] , topo ) ;
break ;
2024-11-18 20:22:18 +08:00
case DRM_XE_TOPO_EU_PER_DSS :
case DRM_XE_TOPO_SIMD16_EU_PER_DSS :
fillMask ( euDss [ tileId ] , topo ) ;
break ;
2023-10-19 19:23:16 +08:00
default :
2024-11-18 20:22:18 +08:00
xeLog ( " Unhandle GT Topo type: %d \n " , topo - > type ) ;
2023-10-19 19:23:16 +08:00
}
2023-06-20 15:34:12 +08:00
}
uint32_t itemSize = sizeof ( drm_xe_query_topology_mask ) + topo - > num_bytes ;
topologySize - = itemSize ;
2023-10-02 22:26:33 +08:00
dataPtr = ptrOffset ( dataPtr , itemSize ) ;
2023-06-20 15:34:12 +08:00
}
2024-09-25 22:41:09 +08:00
int sliceCount = 0 ;
int subSliceCount = 0 ;
int euPerDss = 0 ;
int l3BankCount = 0 ;
uint32_t hwMaxSubSliceCount = hwInfo . gtSystemInfo . MaxSubSlicesSupported ;
2024-09-26 01:56:46 +08:00
topologyData . maxSlices = hwInfo . gtSystemInfo . MaxSlicesSupported ? hwInfo . gtSystemInfo . MaxSlicesSupported : 1 ;
topologyData . maxSubSlicesPerSlice = hwMaxSubSliceCount / topologyData . maxSlices ;
2023-06-20 15:34:12 +08:00
2024-09-25 22:41:09 +08:00
for ( auto tileId = 0u ; tileId < numTiles ; tileId + + ) {
int subSliceCountPerTile = 0 ;
std : : vector < int > sliceIndices ;
std : : vector < int > subSliceIndices ;
2023-06-20 15:34:12 +08:00
2024-09-26 01:56:46 +08:00
int previouslyEnabledSlice = - 1 ;
2024-09-25 22:41:09 +08:00
2024-09-26 01:56:46 +08:00
auto processSubSliceInfo = [ & ] ( const std : : vector < std : : bitset < 8 > > & subSliceInfo ) - > void {
for ( auto subSliceId = 0u ; subSliceId < std : : min ( hwMaxSubSliceCount , static_cast < uint32_t > ( subSliceInfo . size ( ) * 8 ) ) ; subSliceId + + ) {
2024-09-25 22:41:09 +08:00
auto byte = subSliceId / 8 ;
auto bit = subSliceId & 0b111 ;
2024-09-26 01:56:46 +08:00
int sliceId = static_cast < int > ( subSliceId / topologyData . maxSubSlicesPerSlice ) ;
if ( subSliceInfo [ byte ] . test ( bit ) ) {
2024-09-25 22:41:09 +08:00
subSliceIndices . push_back ( subSliceId ) ;
subSliceCountPerTile + + ;
2024-09-26 01:56:46 +08:00
if ( sliceId ! = previouslyEnabledSlice ) {
previouslyEnabledSlice = sliceId ;
sliceIndices . push_back ( sliceId ) ;
}
2024-09-25 22:41:09 +08:00
}
}
2024-09-26 01:56:46 +08:00
} ;
processSubSliceInfo ( computeDss [ tileId ] ) ;
if ( subSliceCountPerTile = = 0 ) {
processSubSliceInfo ( geomDss [ tileId ] ) ;
2024-09-25 22:41:09 +08:00
}
2024-09-26 01:56:46 +08:00
2024-09-25 22:41:09 +08:00
topologyMap [ tileId ] . sliceIndices = std : : move ( sliceIndices ) ;
2024-09-26 01:56:46 +08:00
if ( topologyMap [ tileId ] . sliceIndices . size ( ) < 2u ) {
topologyMap [ tileId ] . subsliceIndices = std : : move ( subSliceIndices ) ;
}
2024-09-25 22:41:09 +08:00
int sliceCountPerTile = static_cast < int > ( topologyMap [ tileId ] . sliceIndices . size ( ) ) ;
int euPerDssPerTile = 0 ;
for ( auto byte = 0u ; byte < euDss [ tileId ] . size ( ) ; byte + + ) {
euPerDssPerTile + = euDss [ tileId ] [ byte ] . count ( ) ;
}
int l3BankCountPerTile = 0 ;
for ( auto byte = 0u ; byte < l3Banks [ tileId ] . size ( ) ; byte + + ) {
l3BankCountPerTile + = l3Banks [ tileId ] [ byte ] . count ( ) ;
}
// pick smallest config
sliceCount = ( sliceCount = = 0 ) ? sliceCountPerTile : std : : min ( sliceCount , sliceCountPerTile ) ;
subSliceCount = ( subSliceCount = = 0 ) ? subSliceCountPerTile : std : : min ( subSliceCount , subSliceCountPerTile ) ;
euPerDss = ( euPerDss = = 0 ) ? euPerDssPerTile : std : : min ( euPerDss , euPerDssPerTile ) ;
l3BankCount = ( l3BankCount = = 0 ) ? l3BankCountPerTile : std : : min ( l3BankCount , l3BankCountPerTile ) ;
// pick max config
topologyData . maxEusPerSubSlice = std : : max ( topologyData . maxEusPerSubSlice , euPerDssPerTile ) ;
}
topologyData . sliceCount = sliceCount ;
topologyData . subSliceCount = subSliceCount ;
topologyData . euCount = subSliceCount * euPerDss ;
topologyData . numL3Banks = l3BankCount ;
2024-07-17 20:00:53 +08:00
return receivedDssInfo ;
2023-06-20 15:34:12 +08:00
}
2024-12-13 22:15:31 +08:00
void IoctlHelperXe : : updateBindInfo ( uint64_t userPtr ) {
2023-09-12 22:34:53 +08:00
std : : unique_lock < std : : mutex > lock ( xeLock ) ;
2024-12-13 22:15:31 +08:00
BindInfo b = { userPtr , 0 } ;
2023-09-12 22:34:53 +08:00
bindInfo . push_back ( b ) ;
}
2024-04-12 17:40:28 +08:00
uint16_t IoctlHelperXe : : getDefaultEngineClass ( const aub_stream : : EngineType & defaultEngineType ) {
2024-01-29 18:37:00 +08:00
if ( defaultEngineType = = aub_stream : : EngineType : : ENGINE_CCS ) {
2024-04-12 17:40:28 +08:00
return DRM_XE_ENGINE_CLASS_COMPUTE ;
2024-01-29 18:37:00 +08:00
} else if ( defaultEngineType = = aub_stream : : EngineType : : ENGINE_RCS ) {
2024-04-12 17:40:28 +08:00
return DRM_XE_ENGINE_CLASS_RENDER ;
2024-01-29 18:37:00 +08:00
} else {
/* So far defaultEngineType is either ENGINE_RCS or ENGINE_CCS */
UNRECOVERABLE_IF ( true ) ;
2024-04-12 17:40:28 +08:00
return 0 ;
2023-09-18 18:49:16 +08:00
}
}
2024-04-10 23:39:16 +08:00
/**
* @ brief returns caching policy for new allocation .
* For system memory caching policy is write - back , otherwise it ' s write - combined .
*
* @ param [ in ] allocationInSystemMemory flag that indicates if allocation will be allocated in system memory
*
* @ return returns caching policy defined as DRM_XE_GEM_CPU_CACHING_WC or DRM_XE_GEM_CPU_CACHING_WB
*/
2024-05-22 21:31:31 +08:00
uint16_t IoctlHelperXe : : getCpuCachingMode ( std : : optional < bool > isCoherent , bool allocationInSystemMemory ) const {
2023-12-06 21:47:26 +08:00
uint16_t cpuCachingMode = DRM_XE_GEM_CPU_CACHING_WC ;
2024-07-17 21:51:20 +08:00
if ( allocationInSystemMemory ) {
if ( ( isCoherent . value_or ( true ) = = true ) ) {
cpuCachingMode = DRM_XE_GEM_CPU_CACHING_WB ;
}
2024-04-10 23:39:16 +08:00
}
2023-12-06 21:47:26 +08:00
if ( debugManager . flags . OverrideCpuCaching . get ( ) ! = - 1 ) {
cpuCachingMode = debugManager . flags . OverrideCpuCaching . get ( ) ;
}
return cpuCachingMode ;
}
2024-05-22 21:31:31 +08:00
int IoctlHelperXe : : createGemExt ( const MemRegionsVec & memClassInstances , size_t allocSize , uint32_t & handle , uint64_t patIndex , std : : optional < uint32_t > vmId , int32_t pairHandle , bool isChunked , uint32_t numOfChunks , std : : optional < uint32_t > memPolicyMode , std : : optional < std : : vector < unsigned long > > memPolicyNodemask , std : : optional < bool > isCoherent ) {
2023-01-09 20:07:23 +08:00
struct drm_xe_gem_create create = { } ;
uint32_t regionsSize = static_cast < uint32_t > ( memClassInstances . size ( ) ) ;
if ( ! regionsSize ) {
xeLog ( " memClassInstances empty ! \n " , " " ) ;
return - 1 ;
}
create . size = allocSize ;
MemoryClassInstance mem = memClassInstances [ regionsSize - 1 ] ;
2023-05-04 18:13:08 +08:00
std : : bitset < 32 > memoryInstances { } ;
2024-10-24 03:11:43 +08:00
bool isSysMemOnly = true ;
2023-05-04 18:13:08 +08:00
for ( const auto & memoryClassInstance : memClassInstances ) {
memoryInstances . set ( memoryClassInstance . memoryInstance ) ;
2024-10-24 03:11:43 +08:00
if ( memoryClassInstance . memoryClass ! = drm_xe_memory_class : : DRM_XE_MEM_REGION_CLASS_SYSMEM ) {
isSysMemOnly = false ;
}
2023-01-09 20:07:23 +08:00
}
2023-12-05 22:40:08 +08:00
create . placement = static_cast < uint32_t > ( memoryInstances . to_ulong ( ) ) ;
2024-10-24 03:11:43 +08:00
create . cpu_caching = this - > getCpuCachingMode ( isCoherent , isSysMemOnly ) ;
2024-07-08 22:40:39 +08:00
2025-03-05 09:53:49 +08:00
if ( debugManager . flags . EnableDeferBacking . get ( ) ) {
2025-02-03 18:27:00 +08:00
create . flags | = DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING ;
}
2024-07-08 22:40:39 +08:00
printDebugString ( debugManager . flags . PrintBOCreateDestroyResult . get ( ) , stdout , " Performing DRM_IOCTL_XE_GEM_CREATE with {vmid=0x%x size=0x%lx flags=0x%x placement=0x%x caching=%hu } " ,
create . vm_id , create . size , create . flags , create . placement , create . cpu_caching ) ;
2023-12-12 16:48:32 +08:00
auto ret = IoctlHelper : : ioctl ( DrmIoctl : : gemCreate , & create ) ;
2023-01-09 20:07:23 +08:00
handle = create . handle ;
2024-07-08 22:40:39 +08:00
printDebugString ( debugManager . flags . PrintBOCreateDestroyResult . get ( ) , stdout , " DRM_IOCTL_XE_GEM_CREATE has returned: %d BO-%u with size: %lu \n " , ret , handle , create . size ) ;
2024-02-06 19:28:02 +08:00
xeLog ( " -> IoctlHelperXe::%s [%d,%d] vmid=0x%x s=0x%lx f=0x%x p=0x%x h=0x%x c=%hu r=%d \n " , __FUNCTION__ ,
2023-01-09 20:07:23 +08:00
mem . memoryClass , mem . memoryInstance ,
2024-02-06 19:28:02 +08:00
create . vm_id , create . size , create . flags , create . placement , handle , create . cpu_caching , ret ) ;
2023-01-19 04:22:32 +08:00
return ret ;
2023-01-09 20:07:23 +08:00
}
2024-05-22 21:31:31 +08:00
uint32_t IoctlHelperXe : : createGem ( uint64_t size , uint32_t memoryBanks , std : : optional < bool > isCoherent ) {
2023-09-06 19:04:13 +08:00
struct drm_xe_gem_create create = { } ;
create . size = size ;
auto pHwInfo = drm . getRootDeviceEnvironment ( ) . getHardwareInfo ( ) ;
auto memoryInfo = drm . getMemoryInfo ( ) ;
std : : bitset < 32 > memoryInstances { } ;
auto banks = std : : bitset < 4 > ( memoryBanks ) ;
size_t currentBank = 0 ;
size_t i = 0 ;
2024-10-24 03:11:43 +08:00
bool isSysMemOnly = true ;
2023-09-06 19:04:13 +08:00
while ( i < banks . count ( ) ) {
if ( banks . test ( currentBank ) ) {
auto regionClassAndInstance = memoryInfo - > getMemoryRegionClassAndInstance ( 1u < < currentBank , * pHwInfo ) ;
memoryInstances . set ( regionClassAndInstance . memoryInstance ) ;
2024-10-24 03:11:43 +08:00
if ( regionClassAndInstance . memoryClass ! = drm_xe_memory_class : : DRM_XE_MEM_REGION_CLASS_SYSMEM ) {
isSysMemOnly = false ;
}
2023-09-06 19:04:13 +08:00
i + + ;
}
currentBank + + ;
}
2023-09-08 22:14:57 +08:00
if ( memoryBanks = = 0 ) {
auto regionClassAndInstance = memoryInfo - > getMemoryRegionClassAndInstance ( memoryBanks , * pHwInfo ) ;
memoryInstances . set ( regionClassAndInstance . memoryInstance ) ;
}
2023-12-05 22:40:08 +08:00
create . placement = static_cast < uint32_t > ( memoryInstances . to_ulong ( ) ) ;
2024-10-24 03:11:43 +08:00
create . cpu_caching = this - > getCpuCachingMode ( isCoherent , isSysMemOnly ) ;
2024-07-08 22:40:39 +08:00
2025-03-05 09:53:49 +08:00
if ( debugManager . flags . EnableDeferBacking . get ( ) ) {
2025-02-03 18:27:00 +08:00
create . flags | = DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING ;
}
2024-07-08 22:40:39 +08:00
printDebugString ( debugManager . flags . PrintBOCreateDestroyResult . get ( ) , stdout , " Performing DRM_IOCTL_XE_GEM_CREATE with {vmid=0x%x size=0x%lx flags=0x%x placement=0x%x caching=%hu } " ,
create . vm_id , create . size , create . flags , create . placement , create . cpu_caching ) ;
2023-12-12 16:48:32 +08:00
[[maybe_unused]] auto ret = ioctl ( DrmIoctl : : gemCreate , & create ) ;
2024-02-06 19:28:02 +08:00
2024-07-08 22:40:39 +08:00
printDebugString ( debugManager . flags . PrintBOCreateDestroyResult . get ( ) , stdout , " DRM_IOCTL_XE_GEM_CREATE has returned: %d BO-%u with size: %lu \n " , ret , create . handle , create . size ) ;
2024-02-06 19:28:02 +08:00
xeLog ( " -> IoctlHelperXe::%s vmid=0x%x s=0x%lx f=0x%x p=0x%x h=0x%x c=%hu r=%d \n " , __FUNCTION__ ,
create . vm_id , create . size , create . flags , create . placement , create . handle , create . cpu_caching , ret ) ;
2023-09-06 19:04:13 +08:00
DEBUG_BREAK_IF ( ret ! = 0 ) ;
return create . handle ;
}
2024-12-04 23:54:56 +08:00
CacheRegion IoctlHelperXe : : closAlloc ( CacheLevel cacheLevel ) {
2023-01-09 20:07:23 +08:00
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
2023-12-05 20:06:54 +08:00
return CacheRegion : : none ;
2023-01-09 20:07:23 +08:00
}
uint16_t IoctlHelperXe : : closAllocWays ( CacheRegion closIndex , uint16_t cacheLevel , uint16_t numWays ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
CacheRegion IoctlHelperXe : : closFree ( CacheRegion closIndex ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
2023-12-05 20:06:54 +08:00
return CacheRegion : : none ;
2023-01-09 20:07:23 +08:00
}
2024-04-26 01:45:21 +08:00
void IoctlHelperXe : : setupXeWaitUserFenceStruct ( void * arg , uint32_t ctxId , uint16_t op , uint64_t addr , uint64_t value , int64_t timeout ) {
auto waitUserFence = reinterpret_cast < drm_xe_wait_user_fence * > ( arg ) ;
waitUserFence - > addr = addr ;
waitUserFence - > op = op ;
waitUserFence - > value = value ;
waitUserFence - > mask = std : : numeric_limits < uint64_t > : : max ( ) ;
waitUserFence - > timeout = timeout ;
waitUserFence - > exec_queue_id = ctxId ;
}
2024-05-07 19:47:34 +08:00
int IoctlHelperXe : : xeWaitUserFence ( uint32_t ctxId , uint16_t op , uint64_t addr , uint64_t value , int64_t timeout , bool userInterrupt , uint32_t externalInterruptId , GraphicsAllocation * allocForInterruptWait ) {
2024-08-13 19:27:54 +08:00
UNRECOVERABLE_IF ( addr = = 0x0 )
2024-04-26 01:45:21 +08:00
drm_xe_wait_user_fence waitUserFence = { } ;
setupXeWaitUserFenceStruct ( & waitUserFence , ctxId , op , addr , value , timeout ) ;
auto retVal = IoctlHelper : : ioctl ( DrmIoctl : : gemWaitUserFence , & waitUserFence ) ;
xeLog ( " -> IoctlHelperXe::%s a=0x%llx v=0x%llx T=0x%llx F=0x%x ctx=0x%x retVal=0x%x \n " , __FUNCTION__ ,
addr , value , timeout , waitUserFence . flags , ctxId , retVal ) ;
2023-05-04 18:13:08 +08:00
return retVal ;
2023-01-09 20:07:23 +08:00
}
int IoctlHelperXe : : waitUserFence ( uint32_t ctxId , uint64_t address ,
2024-04-26 01:45:21 +08:00
uint64_t value , uint32_t dataWidth , int64_t timeout , uint16_t flags ,
2024-05-07 19:47:34 +08:00
bool userInterrupt , uint32_t externalInterruptId , GraphicsAllocation * allocForInterruptWait ) {
2024-02-06 19:28:02 +08:00
xeLog ( " -> IoctlHelperXe::%s a=0x%llx v=0x%llx w=0x%x T=0x%llx F=0x%x ctx=0x%x \n " , __FUNCTION__ , address , value , dataWidth , timeout , flags , ctxId ) ;
2024-03-27 00:51:39 +08:00
UNRECOVERABLE_IF ( dataWidth ! = static_cast < uint32_t > ( Drm : : ValueWidth : : u64 ) ) ;
2023-01-09 20:07:23 +08:00
if ( address ) {
2024-05-07 19:47:34 +08:00
return xeWaitUserFence ( ctxId , DRM_XE_UFENCE_WAIT_OP_GTE , address , value , timeout , userInterrupt , externalInterruptId , allocForInterruptWait ) ;
2023-01-09 20:07:23 +08:00
}
return 0 ;
}
uint32_t IoctlHelperXe : : getAtomicAdvise ( bool isNonAtomic ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
2023-09-21 06:34:19 +08:00
uint32_t IoctlHelperXe : : getAtomicAccess ( AtomicAccessMode mode ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
2023-01-09 20:07:23 +08:00
uint32_t IoctlHelperXe : : getPreferredLocationAdvise ( ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
2023-04-21 08:36:45 +08:00
std : : optional < MemoryClassInstance > IoctlHelperXe : : getPreferredLocationRegion ( PreferredLocation memoryLocation , uint32_t memoryInstance ) {
2023-03-30 20:42:02 +08:00
return std : : nullopt ;
}
2023-01-09 20:07:23 +08:00
bool IoctlHelperXe : : setVmBoAdvise ( int32_t handle , uint32_t attribute , void * region ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
2024-08-13 19:27:54 +08:00
// There is no vmAdvise attribute in Xe, so return success
return true ;
2023-01-09 20:07:23 +08:00
}
2023-03-08 12:06:00 +08:00
bool IoctlHelperXe : : setVmBoAdviseForChunking ( int32_t handle , uint64_t start , uint64_t length , uint32_t attribute , void * region ) {
2024-08-13 19:27:54 +08:00
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
// There is no vmAdvise attribute in Xe, so return success
return true ;
2023-03-08 12:06:00 +08:00
}
2023-01-09 20:07:23 +08:00
bool IoctlHelperXe : : setVmPrefetch ( uint64_t start , uint64_t length , uint32_t region , uint32_t vmId ) {
2024-10-30 18:00:31 +08:00
xeLog ( " -> IoctlHelperXe::%s s=0x%llx l=0x%llx vmid=0x%x \n " , __FUNCTION__ , start , length , vmId ) ;
drm_xe_vm_bind bind = { } ;
bind . vm_id = vmId ;
bind . num_binds = 1 ;
bind . bind . range = length ;
bind . bind . addr = start ;
bind . bind . op = DRM_XE_VM_BIND_OP_PREFETCH ;
auto pHwInfo = this - > drm . getRootDeviceEnvironment ( ) . getHardwareInfo ( ) ;
constexpr uint32_t subDeviceMaskSize = DeviceBitfield ( ) . size ( ) ;
constexpr uint32_t subDeviceMaskMax = ( 1u < < subDeviceMaskSize ) - 1u ;
uint32_t subDeviceId = region & subDeviceMaskMax ;
DeviceBitfield subDeviceMask = ( 1u < < subDeviceId ) ;
MemoryClassInstance regionInstanceClass = this - > drm . getMemoryInfo ( ) - > getMemoryRegionClassAndInstance ( subDeviceMask , * pHwInfo ) ;
2025-03-11 10:59:53 +08:00
bind . bind . prefetch_mem_region_instance = regionInstanceClass . memoryInstance ;
2025-03-12 12:18:52 +08:00
2024-10-30 18:00:31 +08:00
int ret = IoctlHelper : : ioctl ( DrmIoctl : : gemVmBind , & bind ) ;
xeLog ( " vm=%d addr=0x%lx range=0x%lx region=0x%x operation=%d(%s) ret=%d \n " ,
bind . vm_id ,
bind . bind . addr ,
bind . bind . range ,
bind . bind . prefetch_mem_region_instance ,
bind . bind . op ,
xeGetBindOperationName ( bind . bind . op ) ,
ret ) ;
if ( ret ! = 0 ) {
xeLog ( " error: %s ret=%d \n " , xeGetBindOperationName ( bind . bind . op ) , ret ) ;
return false ;
}
return true ;
2023-01-09 20:07:23 +08:00
}
uint32_t IoctlHelperXe : : getDirectSubmissionFlag ( ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
uint16_t IoctlHelperXe : : getWaitUserFenceSoftFlag ( ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
2023-06-19 20:59:19 +08:00
return 0 ;
2023-01-09 20:07:23 +08:00
} ;
2023-12-27 21:27:51 +08:00
void IoctlHelperXe : : fillExecObject ( ExecObject & execObject , uint32_t handle , uint64_t gpuAddress , uint32_t drmContextId , bool bindInfo , bool isMarkedForCapture ) {
auto execObjectXe = reinterpret_cast < ExecObjectXe * > ( execObject . data ) ;
execObjectXe - > gpuAddress = gpuAddress ;
execObjectXe - > handle = handle ;
}
void IoctlHelperXe : : logExecObject ( const ExecObject & execObject , std : : stringstream & logger , size_t size ) {
auto execObjectXe = reinterpret_cast < const ExecObjectXe * > ( execObject . data ) ;
logger < < " ExecBufferXe = { handle: BO- " < < execObjectXe - > handle
< < " , address range: 0x " < < reinterpret_cast < void * > ( execObjectXe - > gpuAddress ) < < " } \n " ;
}
void IoctlHelperXe : : fillExecBuffer ( ExecBuffer & execBuffer , uintptr_t buffersPtr , uint32_t bufferCount , uint32_t startOffset , uint32_t size , uint64_t flags , uint32_t drmContextId ) {
auto execBufferXe = reinterpret_cast < ExecBufferXe * > ( execBuffer . data ) ;
execBufferXe - > execObject = reinterpret_cast < ExecObjectXe * > ( buffersPtr ) ;
execBufferXe - > startOffset = startOffset ;
execBufferXe - > drmContextId = drmContextId ;
}
void IoctlHelperXe : : logExecBuffer ( const ExecBuffer & execBuffer , std : : stringstream & logger ) {
auto execBufferXe = reinterpret_cast < const ExecBufferXe * > ( execBuffer . data ) ;
logger < < " ExecBufferXe { "
< < " exec object: " + std : : to_string ( reinterpret_cast < uintptr_t > ( execBufferXe - > execObject ) )
< < " , start offset: " + std : : to_string ( execBufferXe - > startOffset )
< < " , drm context id: " + std : : to_string ( execBufferXe - > drmContextId )
< < " } \n " ;
}
2023-01-09 20:07:23 +08:00
int IoctlHelperXe : : execBuffer ( ExecBuffer * execBuffer , uint64_t completionGpuAddress , TaskCountType counterValue ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
int ret = 0 ;
if ( execBuffer ) {
2023-12-27 21:27:51 +08:00
auto execBufferXe = reinterpret_cast < ExecBufferXe * > ( execBuffer - > data ) ;
if ( execBufferXe ) {
auto execObject = execBufferXe - > execObject ;
uint32_t engine = execBufferXe - > drmContextId ;
xeLog ( " EXEC ofs=%d ctx=0x%x ptr=0x%p \n " ,
execBufferXe - > startOffset , execBufferXe - > drmContextId , execBufferXe - > execObject ) ;
xeLog ( " -> IoctlHelperXe::%s CA=0x%llx v=0x%x ctx=0x%x \n " , __FUNCTION__ ,
completionGpuAddress , counterValue , engine ) ;
struct drm_xe_sync sync [ 1 ] = { } ;
sync [ 0 ] . type = DRM_XE_SYNC_TYPE_USER_FENCE ;
sync [ 0 ] . flags = DRM_XE_SYNC_FLAG_SIGNAL ;
sync [ 0 ] . addr = completionGpuAddress ;
sync [ 0 ] . timeline_value = counterValue ;
struct drm_xe_exec exec = { } ;
exec . exec_queue_id = engine ;
exec . num_syncs = 1 ;
exec . syncs = reinterpret_cast < uintptr_t > ( & sync ) ;
exec . address = execObject - > gpuAddress + execBufferXe - > startOffset ;
exec . num_batch_buffer = 1 ;
ret = IoctlHelper : : ioctl ( DrmIoctl : : gemExecbuffer2 , & exec ) ;
xeLog ( " r=0x%x batch=0x%lx \n " , ret , exec . address ) ;
if ( debugManager . flags . PrintCompletionFenceUsage . get ( ) ) {
std : : cout < < " Completion fence submitted. "
< < " GPU address: " < < std : : hex < < completionGpuAddress < < std : : dec
< < " , value: " < < counterValue < < std : : endl ;
2023-01-09 20:07:23 +08:00
}
}
}
return ret ;
}
bool IoctlHelperXe : : completionFenceExtensionSupported ( const bool isVmBindAvailable ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return isVmBindAvailable ;
}
2024-03-28 16:06:58 +08:00
uint64_t IoctlHelperXe : : getFlagsForVmBind ( bool bindCapture , bool bindImmediate , bool bindMakeResident , bool bindLock , bool readOnlyResource ) {
2024-07-25 18:47:37 +08:00
uint64_t flags = 0 ;
2024-03-28 16:06:58 +08:00
xeLog ( " -> IoctlHelperXe::%s %d %d %d %d %d \n " , __FUNCTION__ , bindCapture , bindImmediate , bindMakeResident , bindLock , readOnlyResource ) ;
2024-03-28 01:39:41 +08:00
if ( bindCapture ) {
2024-07-25 18:47:37 +08:00
flags | = DRM_XE_VM_BIND_FLAG_DUMPABLE ;
2024-03-28 01:39:41 +08:00
}
2024-09-24 21:39:34 +08:00
if ( bindImmediate ) {
2024-07-25 18:47:37 +08:00
flags | = DRM_XE_VM_BIND_FLAG_IMMEDIATE ;
}
2024-09-24 21:39:34 +08:00
if ( readOnlyResource ) {
2024-07-25 18:47:37 +08:00
flags | = DRM_XE_VM_BIND_FLAG_READONLY ;
}
2024-09-13 23:38:39 +08:00
if ( bindMakeResident ) {
flags | = DRM_XE_VM_BIND_FLAG_IMMEDIATE ;
}
2024-07-25 18:47:37 +08:00
return flags ;
2024-03-25 23:43:50 +08:00
}
2023-01-19 04:22:32 +08:00
int IoctlHelperXe : : queryDistances ( std : : vector < QueryItem > & queryItems , std : : vector < DistanceInfo > & distanceInfos ) {
2023-01-09 20:07:23 +08:00
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
2024-08-13 19:27:54 +08:00
bool IoctlHelperXe : : isPageFaultSupported ( ) {
2025-01-23 10:25:47 +08:00
xeLog ( " -> IoctlHelperXe::%s %d \n " , __FUNCTION__ , false ) ;
return false ;
2025-01-14 17:32:55 +08:00
}
2023-01-09 20:07:23 +08:00
uint32_t IoctlHelperXe : : getEuStallFdParameter ( ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
2024-03-09 09:22:47 +08:00
return 0u ;
2023-01-09 20:07:23 +08:00
}
std : : unique_ptr < uint8_t [ ] > IoctlHelperXe : : createVmControlExtRegion ( const std : : optional < MemoryClassInstance > & regionInstanceClass ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return { } ;
}
uint32_t IoctlHelperXe : : getFlagsForVmCreate ( bool disableScratch , bool enablePageFault , bool useVmBind ) {
xeLog ( " -> IoctlHelperXe::%s %d,%d,%d \n " , __FUNCTION__ , disableScratch , enablePageFault , useVmBind ) ;
2024-12-11 13:39:48 +08:00
uint32_t flags = DRM_XE_VM_CREATE_FLAG_LR_MODE ;
2025-01-14 17:32:55 +08:00
bool debuggingEnabled = drm . getRootDeviceEnvironment ( ) . executionEnvironment . isDebuggingEnabled ( ) ;
if ( enablePageFault | | debuggingEnabled ) {
2024-03-26 23:42:15 +08:00
flags | = DRM_XE_VM_CREATE_FLAG_FAULT_MODE ;
2023-01-09 20:07:23 +08:00
}
2025-01-15 21:12:34 +08:00
if ( ! disableScratch ) {
flags | = DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE ;
}
2023-01-09 20:07:23 +08:00
return flags ;
}
uint32_t IoctlHelperXe : : createContextWithAccessCounters ( GemContextCreateExt & gcc ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
uint32_t IoctlHelperXe : : createCooperativeContext ( GemContextCreateExt & gcc ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
void IoctlHelperXe : : fillVmBindExtSetPat ( VmBindExtSetPatT & vmBindExtSetPat , uint64_t patIndex , uint64_t nextExtension ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
}
void IoctlHelperXe : : fillVmBindExtUserFence ( VmBindExtUserFenceT & vmBindExtUserFence , uint64_t fenceAddress , uint64_t fenceValue , uint64_t nextExtension ) {
xeLog ( " -> IoctlHelperXe::%s 0x%lx 0x%lx \n " , __FUNCTION__ , fenceAddress , fenceValue ) ;
2023-05-18 18:19:49 +08:00
auto xeBindExtUserFence = reinterpret_cast < UserFenceExtension * > ( vmBindExtUserFence ) ;
2023-01-09 20:07:23 +08:00
UNRECOVERABLE_IF ( ! xeBindExtUserFence ) ;
2023-05-18 18:19:49 +08:00
xeBindExtUserFence - > tag = UserFenceExtension : : tagValue ;
2023-01-09 20:07:23 +08:00
xeBindExtUserFence - > addr = fenceAddress ;
xeBindExtUserFence - > value = fenceValue ;
}
2024-02-27 06:40:34 +08:00
void IoctlHelperXe : : setVmBindUserFence ( VmBindParams & vmBind , VmBindExtUserFenceT vmBindUserFence ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
vmBind . userFence = castToUint64 ( vmBindUserFence ) ;
return ;
}
2024-08-13 19:27:54 +08:00
std : : optional < uint32_t > IoctlHelperXe : : getVmAdviseAtomicAttribute ( ) {
2023-01-09 20:07:23 +08:00
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
2024-08-13 19:27:54 +08:00
// There is no vmAdvise attribute in Xe
return { } ;
2023-01-09 20:07:23 +08:00
}
int IoctlHelperXe : : vmBind ( const VmBindParams & vmBindParams ) {
return xeVmBind ( vmBindParams , true ) ;
}
int IoctlHelperXe : : vmUnbind ( const VmBindParams & vmBindParams ) {
return xeVmBind ( vmBindParams , false ) ;
}
2024-02-26 15:57:36 +08:00
int IoctlHelperXe : : getResetStats ( ResetStats & resetStats , uint32_t * status , ResetStatsFault * resetStatsFault ) {
return ioctl ( DrmIoctl : : getResetStats , & resetStats ) ;
}
2023-01-09 20:07:23 +08:00
UuidRegisterResult IoctlHelperXe : : registerUuid ( const std : : string & uuid , uint32_t uuidClass , uint64_t ptr , uint64_t size ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return { } ;
}
UuidRegisterResult IoctlHelperXe : : registerStringClassUuid ( const std : : string & uuid , uint64_t ptr , uint64_t size ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return { } ;
}
int IoctlHelperXe : : unregisterUuid ( uint32_t handle ) {
xeLog ( " -> IoctlHelperXe::%s \n " , __FUNCTION__ ) ;
return 0 ;
}
bool IoctlHelperXe : : isContextDebugSupported ( ) {
return false ;
}
int IoctlHelperXe : : setContextDebugFlag ( uint32_t drmContextId ) {
return 0 ;
}
bool IoctlHelperXe : : isDebugAttachAvailable ( ) {
2024-01-24 06:32:35 +08:00
return true ;
2023-01-09 20:07:23 +08:00
}
int IoctlHelperXe : : getDrmParamValue ( DrmParam drmParam ) const {
xeLog ( " -> IoctlHelperXe::%s 0x%x %s \n " , __FUNCTION__ , drmParam , getDrmParamString ( drmParam ) . c_str ( ) ) ;
switch ( drmParam ) {
2023-12-13 17:05:31 +08:00
case DrmParam : : memoryClassDevice :
2023-11-27 21:16:31 +08:00
return DRM_XE_MEM_REGION_CLASS_VRAM ;
2023-12-13 17:05:31 +08:00
case DrmParam : : memoryClassSystem :
2023-11-27 21:16:31 +08:00
return DRM_XE_MEM_REGION_CLASS_SYSMEM ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassRender :
2023-09-14 02:58:57 +08:00
return DRM_XE_ENGINE_CLASS_RENDER ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassCopy :
2023-09-14 02:58:57 +08:00
return DRM_XE_ENGINE_CLASS_COPY ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassVideo :
2023-09-14 02:58:57 +08:00
return DRM_XE_ENGINE_CLASS_VIDEO_DECODE ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassVideoEnhance :
2023-09-14 02:58:57 +08:00
return DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassCompute :
2023-09-14 02:58:57 +08:00
return DRM_XE_ENGINE_CLASS_COMPUTE ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassInvalid :
2023-09-14 02:58:57 +08:00
return - 1 ;
2024-03-15 21:47:40 +08:00
case DrmParam : : execDefault :
return DRM_XE_ENGINE_CLASS_COMPUTE ;
case DrmParam : : execBlt :
return DRM_XE_ENGINE_CLASS_COPY ;
case DrmParam : : execRender :
return DRM_XE_ENGINE_CLASS_RENDER ;
2023-01-09 20:07:23 +08:00
default :
return getDrmParamValueBase ( drmParam ) ;
}
}
int IoctlHelperXe : : getDrmParamValueBase ( DrmParam drmParam ) const {
return static_cast < int > ( drmParam ) ;
}
2023-01-19 04:22:32 +08:00
int IoctlHelperXe : : ioctl ( DrmIoctl request , void * arg ) {
int ret = - 1 ;
2023-01-09 20:07:23 +08:00
xeLog ( " => IoctlHelperXe::%s 0x%x \n " , __FUNCTION__ , request ) ;
switch ( request ) {
2023-12-12 16:48:32 +08:00
case DrmIoctl : : getparam : {
2024-04-12 17:40:28 +08:00
auto getParam = reinterpret_cast < GetParam * > ( arg ) ;
2023-01-09 20:07:23 +08:00
ret = 0 ;
2024-04-12 17:40:28 +08:00
switch ( getParam - > param ) {
2023-12-13 17:05:31 +08:00
case static_cast < int > ( DrmParam : : paramCsTimestampFrequency ) : {
2024-04-12 21:38:19 +08:00
* getParam - > value = xeGtListData - > gt_list [ defaultEngine - > gt_id ] . reference_clock ;
2023-09-18 18:49:16 +08:00
} break ;
2023-01-09 20:07:23 +08:00
default :
2023-01-19 04:22:32 +08:00
ret = - 1 ;
2023-01-09 20:07:23 +08:00
}
2024-04-12 17:40:28 +08:00
xeLog ( " -> IoctlHelperXe::ioctl Getparam 0x%x/0x%x r=%d \n " , getParam - > param , * getParam - > value , ret ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : query : {
2023-01-09 20:07:23 +08:00
2023-12-27 21:27:51 +08:00
Query * query = static_cast < Query * > ( arg ) ;
QueryItem * queryItems = reinterpret_cast < QueryItem * > ( query - > itemsPtr ) ;
for ( auto i = 0u ; i < query - > numItems ; i + + ) {
auto & queryItem = queryItems [ i ] ;
2023-01-09 20:07:23 +08:00
2023-12-27 21:27:51 +08:00
if ( queryItem . queryId ! = static_cast < int > ( DrmParam : : queryHwconfigTable ) ) {
xeLog ( " error: bad query 0x%x \n " , queryItem . queryId ) ;
return - 1 ;
}
auto queryDataSize = static_cast < int32_t > ( hwconfig . size ( ) * sizeof ( uint32_t ) ) ;
if ( queryItem . length = = 0 ) {
queryItem . length = queryDataSize ;
} else {
UNRECOVERABLE_IF ( queryItem . length ! = queryDataSize ) ;
memcpy_s ( reinterpret_cast < void * > ( queryItem . dataPtr ) ,
queryItem . length , hwconfig . data ( ) , queryItem . length ) ;
}
xeLog ( " -> IoctlHelperXe::ioctl Query id=0x%x f=0x%x len=%d \n " ,
static_cast < int > ( queryItem . queryId ) , static_cast < int > ( queryItem . flags ) , queryItem . length ) ;
ret = 0 ;
2023-01-09 20:07:23 +08:00
}
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemUserptr : {
2023-01-09 20:07:23 +08:00
GemUserPtr * d = static_cast < GemUserPtr * > ( arg ) ;
2024-12-13 22:15:31 +08:00
updateBindInfo ( d - > userPtr ) ;
2023-01-09 20:07:23 +08:00
ret = 0 ;
2024-02-06 19:28:02 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemUserptr p=0x%llx s=0x%llx f=0x%x h=0x%x r=%d \n " , d - > userPtr ,
2023-01-09 20:07:23 +08:00
d - > userSize , d - > flags , d - > handle , ret ) ;
xeShowBindTable ( ) ;
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemContextDestroy : {
2023-01-09 20:07:23 +08:00
GemContextDestroy * d = static_cast < GemContextDestroy * > ( arg ) ;
2023-09-14 02:58:57 +08:00
struct drm_xe_exec_queue_destroy destroy = { } ;
destroy . exec_queue_id = d - > contextId ;
2024-03-22 20:17:31 +08:00
ret = IoctlHelper : : ioctl ( request , & destroy ) ;
2024-07-03 19:23:58 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemContextDestrory ctx=0x%x r=%d \n " ,
2023-01-09 20:07:23 +08:00
d - > contextId , ret ) ;
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemContextGetparam : {
2023-01-09 20:07:23 +08:00
GemContextParam * d = static_cast < GemContextParam * > ( arg ) ;
2023-08-07 20:32:02 +08:00
auto addressSpace = drm . getRootDeviceEnvironment ( ) . getHardwareInfo ( ) - > capabilityTable . gpuAddressSpace ;
2023-01-09 20:07:23 +08:00
ret = 0 ;
switch ( d - > param ) {
2023-12-13 17:05:31 +08:00
case static_cast < int > ( DrmParam : : contextParamGttSize ) :
2023-08-07 20:32:02 +08:00
d - > value = addressSpace + 1u ;
2023-01-09 20:07:23 +08:00
break ;
default :
2023-01-19 04:22:32 +08:00
ret = - 1 ;
2023-01-09 20:07:23 +08:00
break ;
}
2023-01-19 04:22:32 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemContextGetparam r=%d \n " , ret ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemContextSetparam : {
2024-03-21 22:47:48 +08:00
GemContextParam * gemContextParam = static_cast < GemContextParam * > ( arg ) ;
switch ( gemContextParam - > param ) {
2023-12-13 17:05:31 +08:00
case static_cast < int > ( DrmParam : : contextParamEngines ) : {
2024-03-21 22:47:48 +08:00
auto contextEngine = reinterpret_cast < ContextParamEngines < > * > ( gemContextParam - > value ) ;
if ( ! contextEngine | | contextEngine - > numEnginesInContext = = 0 ) {
break ;
2023-01-09 20:07:23 +08:00
}
2024-03-21 22:47:48 +08:00
auto numEngines = contextEngine - > numEnginesInContext ;
contextParamEngine . resize ( numEngines ) ;
memcpy_s ( contextParamEngine . data ( ) , numEngines * sizeof ( uint64_t ) , contextEngine - > enginesData , numEngines * sizeof ( uint64_t ) ) ;
ret = 0 ;
2023-01-09 20:07:23 +08:00
} break ;
default :
2023-01-19 04:22:32 +08:00
ret = - 1 ;
2023-01-09 20:07:23 +08:00
break ;
}
2023-01-19 04:22:32 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemContextSetparam r=%d \n " , ret ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemClose : {
2024-09-11 03:27:33 +08:00
std : : unique_lock < std : : mutex > lock ( gemCloseLock ) ;
2023-01-09 20:07:23 +08:00
struct GemClose * d = static_cast < struct GemClose * > ( arg ) ;
xeShowBindTable ( ) ;
2024-03-22 18:54:26 +08:00
bool isUserptr = false ;
2024-12-13 22:15:31 +08:00
if ( d - > userptr ) {
std : : unique_lock < std : : mutex > lock ( xeLock ) ;
for ( unsigned int i = 0 ; i < bindInfo . size ( ) ; i + + ) {
if ( d - > userptr = = bindInfo [ i ] . userptr ) {
isUserptr = true ;
xeLog ( " removing 0x%x 0x%lx \n " ,
bindInfo [ i ] . userptr ,
bindInfo [ i ] . addr ) ;
bindInfo . erase ( bindInfo . begin ( ) + i ) ;
ret = 0 ;
break ;
}
2023-01-09 20:07:23 +08:00
}
}
2024-12-13 22:15:31 +08:00
if ( ! isUserptr ) {
ret = IoctlHelper : : ioctl ( request , arg ) ;
2023-01-09 20:07:23 +08:00
}
2024-12-13 22:15:31 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemClose h=0x%x r=%d \n " , d - > handle , ret ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemVmCreate : {
2024-03-26 23:42:15 +08:00
GemVmControl * vmControl = static_cast < GemVmControl * > ( arg ) ;
2023-01-09 20:07:23 +08:00
struct drm_xe_vm_create args = { } ;
2024-03-26 23:42:15 +08:00
args . flags = vmControl - > flags ;
2023-01-09 20:07:23 +08:00
ret = IoctlHelper : : ioctl ( request , & args ) ;
2024-03-26 23:42:15 +08:00
vmControl - > vmId = args . vm_id ;
xeLog ( " -> IoctlHelperXe::ioctl gemVmCreate f=0x%x vmid=0x%x r=%d \n " , vmControl - > flags , vmControl - > vmId , ret ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemVmDestroy : {
2023-01-09 20:07:23 +08:00
GemVmControl * d = static_cast < GemVmControl * > ( arg ) ;
struct drm_xe_vm_destroy args = { } ;
args . vm_id = d - > vmId ;
ret = IoctlHelper : : ioctl ( request , & args ) ;
2023-01-19 04:22:32 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemVmDestroy vmid=0x%x r=%d \n " , d - > vmId , ret ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemMmapOffset : {
2023-01-09 20:07:23 +08:00
GemMmapOffset * d = static_cast < GemMmapOffset * > ( arg ) ;
struct drm_xe_gem_mmap_offset mmo = { } ;
mmo . handle = d - > handle ;
ret = IoctlHelper : : ioctl ( request , & mmo ) ;
d - > offset = mmo . offset ;
2024-02-06 19:28:02 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemMmapOffset h=0x%x o=0x%x f=0x%x r=%d \n " ,
d - > handle , d - > offset , d - > flags , ret ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : getResetStats : {
2024-04-24 16:23:11 +08:00
ResetStats * resetStats = static_cast < ResetStats * > ( arg ) ;
drm_xe_exec_queue_get_property getProperty { } ;
getProperty . exec_queue_id = resetStats - > contextId ;
getProperty . property = DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN ;
ret = IoctlHelper : : ioctl ( request , & getProperty ) ;
resetStats - > batchPending = static_cast < uint32_t > ( getProperty . value ) ;
xeLog ( " -> IoctlHelperXe::ioctl GetResetStats ctx=0x%x r=%d value=%llu \n " ,
resetStats - > contextId , ret , getProperty . value ) ;
2023-01-09 20:07:23 +08:00
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : primeFdToHandle : {
2023-01-09 20:07:23 +08:00
PrimeHandle * prime = static_cast < PrimeHandle * > ( arg ) ;
ret = IoctlHelper : : ioctl ( request , arg ) ;
2024-02-06 19:28:02 +08:00
xeLog ( " ->PrimeFdToHandle h=0x%x f=0x%x d=0x%x r=%d \n " ,
2023-01-09 20:07:23 +08:00
prime - > handle , prime - > flags , prime - > fileDescriptor , ret ) ;
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : primeHandleToFd : {
2023-01-09 20:07:23 +08:00
PrimeHandle * prime = static_cast < PrimeHandle * > ( arg ) ;
2023-08-30 19:23:22 +08:00
ret = IoctlHelper : : ioctl ( request , arg ) ;
2023-01-19 04:22:32 +08:00
xeLog ( " ->PrimeHandleToFd h=0x%x f=0x%x d=0x%x r=%d \n " ,
2023-01-09 20:07:23 +08:00
prime - > handle , prime - > flags , prime - > fileDescriptor , ret ) ;
} break ;
2023-12-12 16:48:32 +08:00
case DrmIoctl : : gemCreate : {
2023-09-06 19:04:13 +08:00
drm_xe_gem_create * gemCreate = static_cast < drm_xe_gem_create * > ( arg ) ;
2023-03-11 00:24:01 +08:00
ret = IoctlHelper : : ioctl ( request , arg ) ;
2024-02-06 19:28:02 +08:00
xeLog ( " -> IoctlHelperXe::ioctl GemCreate h=0x%x s=0x%lx p=0x%x f=0x%x vmid=0x%x r=%d \n " ,
gemCreate - > handle , gemCreate - > size , gemCreate - > placement , gemCreate - > flags , gemCreate - > vm_id , ret ) ;
2023-03-11 00:24:01 +08:00
} break ;
2024-01-24 00:25:51 +08:00
case DrmIoctl : : debuggerOpen : {
ret = debuggerOpenIoctl ( request , arg ) ;
} break ;
2024-02-02 06:03:55 +08:00
case DrmIoctl : : metadataCreate : {
ret = debuggerMetadataCreateIoctl ( request , arg ) ;
} break ;
case DrmIoctl : : metadataDestroy : {
ret = debuggerMetadataDestroyIoctl ( request , arg ) ;
} break ;
2025-03-12 02:21:47 +08:00
case DrmIoctl : : perfQuery :
2024-05-30 03:35:26 +08:00
case DrmIoctl : : perfOpen : {
ret = perfOpenIoctl ( request , arg ) ;
} break ;
2024-02-02 06:03:55 +08:00
2023-01-09 20:07:23 +08:00
default :
xeLog ( " Not handled 0x%x \n " , request ) ;
UNRECOVERABLE_IF ( true ) ;
}
return ret ;
}
void IoctlHelperXe : : xeShowBindTable ( ) {
2024-02-02 18:00:45 +08:00
if ( debugManager . flags . PrintXeLogs . get ( ) ) {
2023-07-03 19:14:02 +08:00
std : : unique_lock < std : : mutex > lock ( xeLock ) ;
2024-12-13 22:15:31 +08:00
xeLog ( " show bind: (<index> <userptr> <addr>) \n " , " " ) ;
2023-07-03 19:14:02 +08:00
for ( unsigned int i = 0 ; i < bindInfo . size ( ) ; i + + ) {
2024-12-13 22:15:31 +08:00
xeLog ( " %3d x%016lx x%016lx \n " , i ,
2023-07-03 19:14:02 +08:00
bindInfo [ i ] . userptr ,
2024-12-13 22:15:31 +08:00
bindInfo [ i ] . addr ) ;
2023-07-03 19:14:02 +08:00
}
2023-01-09 20:07:23 +08:00
}
}
2024-06-06 19:23:55 +08:00
int IoctlHelperXe : : createDrmContext ( Drm & drm , OsContextLinux & osContext , uint32_t drmVmId , uint32_t deviceIndex , bool allocateInterrupt ) {
2023-01-09 20:07:23 +08:00
uint32_t drmContextId = 0 ;
xeLog ( " createDrmContext VM=0x%x \n " , drmVmId ) ;
2024-10-08 21:23:41 +08:00
drm . bindDrmContext ( drmContextId , deviceIndex , osContext . getEngineType ( ) ) ;
2024-03-15 21:47:40 +08:00
2024-07-03 03:15:10 +08:00
UNRECOVERABLE_IF ( contextParamEngine . empty ( ) ) ;
2023-01-09 20:07:23 +08:00
2024-03-22 22:58:25 +08:00
std : : array < drm_xe_ext_set_property , maxContextSetProperties > extProperties { } ;
2024-07-03 03:15:10 +08:00
uint32_t extPropertyIndex { 0U } ;
setOptionalContextProperties ( drm , & extProperties , extPropertyIndex ) ;
2025-02-24 23:34:54 +08:00
setContextProperties ( osContext , deviceIndex , & extProperties , extPropertyIndex ) ;
2024-03-22 22:58:25 +08:00
2024-07-03 03:15:10 +08:00
drm_xe_exec_queue_create create { } ;
create . width = 1 ;
create . num_placements = contextParamEngine . size ( ) ;
create . vm_id = drmVmId ;
create . instances = castToUint64 ( contextParamEngine . data ( ) ) ;
create . extensions = ( extPropertyIndex > 0U ? castToUint64 ( extProperties . data ( ) ) : 0UL ) ;
applyContextFlags ( & create , allocateInterrupt ) ;
2024-01-09 17:54:33 +08:00
2023-12-12 16:48:32 +08:00
int ret = IoctlHelper : : ioctl ( DrmIoctl : : gemContextCreateExt , & create ) ;
2023-09-14 02:58:57 +08:00
drmContextId = create . exec_queue_id ;
2024-07-03 03:15:10 +08:00
2024-03-21 22:47:48 +08:00
xeLog ( " %s:%d (%d) vmid=0x%x ctx=0x%x r=0x%x \n " , xeGetClassName ( contextParamEngine [ 0 ] . engine_class ) ,
contextParamEngine [ 0 ] . engine_instance , create . num_placements , drmVmId , drmContextId , ret ) ;
2023-01-09 20:07:23 +08:00
if ( ret ! = 0 ) {
UNRECOVERABLE_IF ( true ) ;
}
return drmContextId ;
}
2023-09-18 18:49:16 +08:00
int IoctlHelperXe : : xeVmBind ( const VmBindParams & vmBindParams , bool isBind ) {
2025-02-22 00:04:42 +08:00
auto gmmHelper = drm . getRootDeviceEnvironment ( ) . getGmmHelper ( ) ;
2023-01-09 20:07:23 +08:00
int ret = - 1 ;
2023-09-18 18:49:16 +08:00
const char * operation = isBind ? " bind " : " unbind " ;
2024-12-13 22:15:31 +08:00
uint64_t userptr = 0u ;
2023-09-18 18:49:16 +08:00
{
2024-12-13 22:15:31 +08:00
std : : unique_lock < std : : mutex > lock ( xeLock ) ;
if ( isBind ) {
if ( vmBindParams . userptr ) {
for ( auto i = 0u ; i < bindInfo . size ( ) ; i + + ) {
if ( vmBindParams . userptr = = bindInfo [ i ] . userptr ) {
userptr = bindInfo [ i ] . userptr ;
2025-02-22 00:04:42 +08:00
bindInfo [ i ] . addr = gmmHelper - > decanonize ( vmBindParams . start ) ;
2024-12-13 22:15:31 +08:00
break ;
}
}
}
} else // unbind
{
2025-02-22 00:04:42 +08:00
auto address = gmmHelper - > decanonize ( vmBindParams . start ) ;
2024-12-13 22:15:31 +08:00
for ( auto i = 0u ; i < bindInfo . size ( ) ; i + + ) {
if ( address = = bindInfo [ i ] . addr ) {
userptr = bindInfo [ i ] . userptr ;
break ;
}
2023-01-09 20:07:23 +08:00
}
}
}
2023-09-18 18:49:16 +08:00
2024-12-13 22:15:31 +08:00
drm_xe_vm_bind bind = { } ;
bind . vm_id = vmBindParams . vmId ;
2025-01-24 11:59:30 +08:00
2024-12-13 22:15:31 +08:00
bind . num_binds = 1 ;
2024-08-13 19:27:54 +08:00
2024-12-13 22:15:31 +08:00
bind . bind . range = vmBindParams . length ;
bind . bind . obj_offset = vmBindParams . offset ;
bind . bind . pat_index = static_cast < uint16_t > ( vmBindParams . patIndex ) ;
bind . bind . extensions = vmBindParams . extensions ;
bind . bind . flags = static_cast < uint32_t > ( vmBindParams . flags ) ;
2023-09-18 18:49:16 +08:00
2024-12-13 22:15:31 +08:00
drm_xe_sync sync [ 1 ] = { } ;
2025-01-24 11:59:30 +08:00
if ( vmBindParams . sharedSystemUsmBind = = true ) {
bind . bind . addr = 0 ;
} else {
2025-02-22 00:04:42 +08:00
bind . bind . addr = gmmHelper - > decanonize ( vmBindParams . start ) ;
2025-01-24 11:59:30 +08:00
}
bind . num_syncs = 1 ;
UNRECOVERABLE_IF ( vmBindParams . userFence = = 0x0 ) ;
2024-12-13 22:15:31 +08:00
auto xeBindExtUserFence = reinterpret_cast < UserFenceExtension * > ( vmBindParams . userFence ) ;
UNRECOVERABLE_IF ( xeBindExtUserFence - > tag ! = UserFenceExtension : : tagValue ) ;
sync [ 0 ] . type = DRM_XE_SYNC_TYPE_USER_FENCE ;
sync [ 0 ] . flags = DRM_XE_SYNC_FLAG_SIGNAL ;
sync [ 0 ] . addr = xeBindExtUserFence - > addr ;
sync [ 0 ] . timeline_value = xeBindExtUserFence - > value ;
bind . syncs = reinterpret_cast < uintptr_t > ( & sync ) ;
2024-08-13 19:27:54 +08:00
2024-12-13 22:15:31 +08:00
if ( isBind ) {
bind . bind . op = DRM_XE_VM_BIND_OP_MAP ;
bind . bind . obj = vmBindParams . handle ;
if ( userptr ) {
bind . bind . op = DRM_XE_VM_BIND_OP_MAP_USERPTR ;
2023-01-09 20:07:23 +08:00
bind . bind . obj = 0 ;
2024-12-13 22:15:31 +08:00
bind . bind . obj_offset = userptr ;
2023-01-09 20:07:23 +08:00
}
2024-12-13 22:15:31 +08:00
} else {
2025-01-24 11:59:30 +08:00
if ( vmBindParams . sharedSystemUsmEnabled ) {
// Use of MAP on unbind required for restoring the address space to the system allocator
bind . bind . op = DRM_XE_VM_BIND_OP_MAP ;
bind . bind . flags | = DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR ;
} else {
bind . bind . op = DRM_XE_VM_BIND_OP_UNMAP ;
if ( userptr ) {
bind . bind . obj_offset = userptr ;
}
2024-06-18 18:47:51 +08:00
}
2025-01-24 11:59:30 +08:00
bind . bind . obj = 0 ;
2023-01-09 20:07:23 +08:00
}
2024-12-13 22:15:31 +08:00
ret = IoctlHelper : : ioctl ( DrmIoctl : : gemVmBind , & bind ) ;
2023-01-09 20:07:23 +08:00
2024-12-13 22:15:31 +08:00
xeLog ( " vm=%d obj=0x%x off=0x%llx range=0x%llx addr=0x%llx operation=%d(%s) flags=%d(%s) nsy=%d pat=%hu ret=%d \n " ,
bind . vm_id ,
bind . bind . obj ,
bind . bind . obj_offset ,
bind . bind . range ,
bind . bind . addr ,
bind . bind . op ,
xeGetBindOperationName ( bind . bind . op ) ,
bind . bind . flags ,
xeGetBindFlagNames ( bind . bind . flags ) . c_str ( ) ,
bind . num_syncs ,
bind . bind . pat_index ,
ret ) ;
if ( ret ! = 0 ) {
xeLog ( " error: %s \n " , operation ) ;
return ret ;
}
2025-02-03 00:25:43 +08:00
constexpr auto oneSecTimeout = 1000000000ll ;
constexpr auto infiniteTimeout = - 1 ;
bool debuggingEnabled = drm . getRootDeviceEnvironment ( ) . executionEnvironment . isDebuggingEnabled ( ) ;
uint64_t timeout = debuggingEnabled ? infiniteTimeout : oneSecTimeout ;
if ( debugManager . flags . VmBindWaitUserFenceTimeout . get ( ) ! = - 1 ) {
timeout = debugManager . flags . VmBindWaitUserFenceTimeout . get ( ) ;
2024-12-17 12:32:42 +08:00
}
2025-02-03 00:25:43 +08:00
return xeWaitUserFence ( bind . exec_queue_id , DRM_XE_UFENCE_WAIT_OP_EQ ,
sync [ 0 ] . addr ,
sync [ 0 ] . timeline_value , timeout ,
false , NEO : : InterruptId : : notUsed , nullptr ) ;
2023-01-09 20:07:23 +08:00
}
std : : string IoctlHelperXe : : getDrmParamString ( DrmParam drmParam ) const {
switch ( drmParam ) {
2023-12-13 17:05:31 +08:00
case DrmParam : : contextCreateExtSetparam :
2023-01-09 20:07:23 +08:00
return " ContextCreateExtSetparam " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextCreateFlagsUseExtensions :
2023-01-09 20:07:23 +08:00
return " ContextCreateFlagsUseExtensions " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextEnginesExtLoadBalance :
2023-01-09 20:07:23 +08:00
return " ContextEnginesExtLoadBalance " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextParamEngines :
2023-01-09 20:07:23 +08:00
return " ContextParamEngines " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextParamGttSize :
2023-01-09 20:07:23 +08:00
return " ContextParamGttSize " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextParamPersistence :
2023-01-09 20:07:23 +08:00
return " ContextParamPersistence " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextParamPriority :
2023-01-09 20:07:23 +08:00
return " ContextParamPriority " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextParamRecoverable :
2023-01-09 20:07:23 +08:00
return " ContextParamRecoverable " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextParamSseu :
2023-01-09 20:07:23 +08:00
return " ContextParamSseu " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : contextParamVm :
2023-01-09 20:07:23 +08:00
return " ContextParamVm " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassRender :
2023-01-09 20:07:23 +08:00
return " EngineClassRender " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassCompute :
2023-01-09 20:07:23 +08:00
return " EngineClassCompute " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassCopy :
2023-01-09 20:07:23 +08:00
return " EngineClassCopy " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassVideo :
2023-01-09 20:07:23 +08:00
return " EngineClassVideo " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassVideoEnhance :
2023-01-09 20:07:23 +08:00
return " EngineClassVideoEnhance " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassInvalid :
2023-01-09 20:07:23 +08:00
return " EngineClassInvalid " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : engineClassInvalidNone :
2023-01-09 20:07:23 +08:00
return " EngineClassInvalidNone " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : execBlt :
2023-01-09 20:07:23 +08:00
return " ExecBlt " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : execDefault :
2023-01-09 20:07:23 +08:00
return " ExecDefault " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : execNoReloc :
2023-01-09 20:07:23 +08:00
return " ExecNoReloc " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : execRender :
2023-01-09 20:07:23 +08:00
return " ExecRender " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : memoryClassDevice :
2023-01-09 20:07:23 +08:00
return " MemoryClassDevice " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : memoryClassSystem :
2023-01-09 20:07:23 +08:00
return " MemoryClassSystem " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : mmapOffsetWb :
2023-01-09 20:07:23 +08:00
return " MmapOffsetWb " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : mmapOffsetWc :
2023-01-09 20:07:23 +08:00
return " MmapOffsetWc " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : paramHasPooledEu :
2023-01-09 20:07:23 +08:00
return " ParamHasPooledEu " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : paramEuTotal :
2023-01-09 20:07:23 +08:00
return " ParamEuTotal " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : paramSubsliceTotal :
2023-01-09 20:07:23 +08:00
return " ParamSubsliceTotal " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : paramMinEuInPool :
2023-01-09 20:07:23 +08:00
return " ParamMinEuInPool " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : paramCsTimestampFrequency :
2023-01-09 20:07:23 +08:00
return " ParamCsTimestampFrequency " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : paramHasVmBind :
2023-01-09 20:07:23 +08:00
return " ParamHasVmBind " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : paramHasPageFault :
2023-01-09 20:07:23 +08:00
return " ParamHasPageFault " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : queryEngineInfo :
2023-01-09 20:07:23 +08:00
return " QueryEngineInfo " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : queryHwconfigTable :
2023-01-09 20:07:23 +08:00
return " QueryHwconfigTable " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : queryComputeSlices :
2023-01-09 20:07:23 +08:00
return " QueryComputeSlices " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : queryMemoryRegions :
2023-01-09 20:07:23 +08:00
return " QueryMemoryRegions " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : queryTopologyInfo :
2023-01-09 20:07:23 +08:00
return " QueryTopologyInfo " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : tilingNone :
2023-01-09 20:07:23 +08:00
return " TilingNone " ;
2023-12-13 17:05:31 +08:00
case DrmParam : : tilingY :
2023-01-09 20:07:23 +08:00
return " TilingY " ;
default :
return " DrmParam::<missing> " ;
}
}
2024-04-12 18:50:47 +08:00
inline std : : string getDirectoryWithFrequencyFiles ( int tileId , int gtId ) {
return " /device/tile " + std : : to_string ( tileId ) + " /gt " + std : : to_string ( gtId ) + " /freq0 " ;
}
2023-01-09 20:07:23 +08:00
std : : string IoctlHelperXe : : getFileForMaxGpuFrequency ( ) const {
2024-04-12 21:09:30 +08:00
return getFileForMaxGpuFrequencyOfSubDevice ( 0 /* tileId */ ) ;
2023-01-09 20:07:23 +08:00
}
2024-04-12 21:09:30 +08:00
std : : string IoctlHelperXe : : getFileForMaxGpuFrequencyOfSubDevice ( int tileId ) const {
return getDirectoryWithFrequencyFiles ( tileId , tileIdToGtId [ tileId ] ) + " /max_freq " ;
2023-01-09 20:07:23 +08:00
}
2024-04-12 21:09:30 +08:00
std : : string IoctlHelperXe : : getFileForMaxMemoryFrequencyOfSubDevice ( int tileId ) const {
return getDirectoryWithFrequencyFiles ( tileId , tileIdToGtId [ tileId ] ) + " /rp0_freq " ;
2023-01-09 20:07:23 +08:00
}
bool IoctlHelperXe : : getFabricLatency ( uint32_t fabricId , uint32_t & latency , uint32_t & bandwidth ) {
return false ;
}
2023-03-17 21:00:44 +08:00
bool IoctlHelperXe : : isWaitBeforeBindRequired ( bool bind ) const {
return true ;
}
2023-09-12 22:57:55 +08:00
bool IoctlHelperXe : : setGemTiling ( void * setTiling ) {
return true ;
}
bool IoctlHelperXe : : getGemTiling ( void * setTiling ) {
return true ;
}
2023-09-26 18:44:11 +08:00
bool IoctlHelperXe : : isImmediateVmBindRequired ( ) const {
return true ;
}
2024-03-21 22:47:48 +08:00
2025-02-03 18:27:00 +08:00
bool IoctlHelperXe : : makeResidentBeforeLockNeeded ( ) const {
2025-03-05 09:53:49 +08:00
auto makeResidentBeforeLockNeeded = false ;
if ( debugManager . flags . EnableDeferBacking . get ( ) ) {
makeResidentBeforeLockNeeded = true ;
2025-02-03 18:27:00 +08:00
}
return makeResidentBeforeLockNeeded ;
}
2024-03-21 22:47:48 +08:00
void IoctlHelperXe : : insertEngineToContextParams ( ContextParamEngines < > & contextParamEngines , uint32_t engineId , const EngineClassInstance * engineClassInstance , uint32_t tileId , bool hasVirtualEngines ) {
auto engines = reinterpret_cast < drm_xe_engine_class_instance * > ( contextParamEngines . enginesData ) ;
if ( engineClassInstance ) {
engines [ engineId ] . engine_class = engineClassInstance - > engineClass ;
engines [ engineId ] . engine_instance = engineClassInstance - > engineInstance ;
2024-04-15 19:26:13 +08:00
engines [ engineId ] . gt_id = tileIdToGtId [ tileId ] ;
2024-03-21 22:47:48 +08:00
contextParamEngines . numEnginesInContext = std : : max ( contextParamEngines . numEnginesInContext , engineId + 1 ) ;
}
}
2024-03-19 07:41:10 +08:00
void IoctlHelperXe : : registerBOBindHandle ( Drm * drm , DrmAllocation * drmAllocation ) {
DrmResourceClass resourceClass = DrmResourceClass : : maxSize ;
switch ( drmAllocation - > getAllocationType ( ) ) {
case AllocationType : : debugContextSaveArea :
resourceClass = DrmResourceClass : : contextSaveArea ;
break ;
case AllocationType : : debugSbaTrackingBuffer :
resourceClass = DrmResourceClass : : sbaTrackingBuffer ;
break ;
case AllocationType : : debugModuleArea :
resourceClass = DrmResourceClass : : moduleHeapDebugArea ;
break ;
2025-01-07 06:12:14 +08:00
case AllocationType : : kernelIsa :
if ( drmAllocation - > storageInfo . tileInstanced ) {
auto & bos = drmAllocation - > getBOs ( ) ;
for ( auto bo : bos ) {
if ( ! bo ) {
continue ;
}
bo - > setRegisteredBindHandleCookie ( drmAllocation - > storageInfo . subDeviceBitfield . to_ulong ( ) ) ;
}
}
return ;
2024-03-19 07:41:10 +08:00
default :
return ;
}
uint64_t gpuAddress = drmAllocation - > getGpuAddress ( ) ;
auto handle = drm - > registerResource ( resourceClass , & gpuAddress , sizeof ( gpuAddress ) ) ;
drmAllocation - > addRegisteredBoBindHandle ( handle ) ;
auto & bos = drmAllocation - > getBOs ( ) ;
for ( auto bo : bos ) {
if ( ! bo ) {
continue ;
}
bo - > addBindExtHandle ( handle ) ;
bo - > markForCapture ( ) ;
bo - > requireImmediateBinding ( true ) ;
}
}
2024-04-17 00:19:33 +08:00
bool IoctlHelperXe : : getFdFromVmExport ( uint32_t vmId , uint32_t flags , int32_t * fd ) {
return false ;
}
2024-07-03 03:15:10 +08:00
void IoctlHelperXe : : setOptionalContextProperties ( Drm & drm , void * extProperties , uint32_t & extIndexInOut ) {
auto & ext = * reinterpret_cast < std : : array < drm_xe_ext_set_property , maxContextSetProperties > * > ( extProperties ) ;
if ( ( contextParamEngine [ 0 ] . engine_class = = DRM_XE_ENGINE_CLASS_RENDER ) | | ( contextParamEngine [ 0 ] . engine_class = = DRM_XE_ENGINE_CLASS_COMPUTE ) ) {
2024-09-18 01:01:05 +08:00
if ( drm . getRootDeviceEnvironment ( ) . executionEnvironment . isDebuggingEnabled ( ) ) {
2024-07-03 03:15:10 +08:00
ext [ extIndexInOut ] . base . next_extension = 0 ;
ext [ extIndexInOut ] . base . name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY ;
2024-09-18 01:01:05 +08:00
ext [ extIndexInOut ] . property = getEudebugExtProperty ( ) ;
2025-02-05 04:24:41 +08:00
ext [ extIndexInOut ] . value = getEudebugExtPropertyValue ( ) ;
2024-07-03 03:15:10 +08:00
extIndexInOut + + ;
}
}
}
2025-02-24 23:34:54 +08:00
void IoctlHelperXe : : setContextProperties ( const OsContextLinux & osContext , uint32_t deviceIndex , void * extProperties , uint32_t & extIndexInOut ) {
2024-04-17 00:19:33 +08:00
auto & ext = * reinterpret_cast < std : : array < drm_xe_ext_set_property , maxContextSetProperties > * > ( extProperties ) ;
if ( osContext . isLowPriority ( ) ) {
ext [ extIndexInOut ] . base . name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY ;
ext [ extIndexInOut ] . property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY ;
ext [ extIndexInOut ] . value = 0 ;
if ( extIndexInOut > 0 ) {
ext [ extIndexInOut - 1 ] . base . next_extension = castToUint64 ( & ext [ extIndexInOut ] ) ;
}
extIndexInOut + + ;
}
}
unsigned int IoctlHelperXe : : getIoctlRequestValue ( DrmIoctl ioctlRequest ) const {
xeLog ( " -> IoctlHelperXe::%s 0x%x \n " , __FUNCTION__ , ioctlRequest ) ;
switch ( ioctlRequest ) {
case DrmIoctl : : gemClose :
RETURN_ME ( DRM_IOCTL_GEM_CLOSE ) ;
case DrmIoctl : : gemVmCreate :
RETURN_ME ( DRM_IOCTL_XE_VM_CREATE ) ;
case DrmIoctl : : gemVmDestroy :
RETURN_ME ( DRM_IOCTL_XE_VM_DESTROY ) ;
case DrmIoctl : : gemMmapOffset :
RETURN_ME ( DRM_IOCTL_XE_GEM_MMAP_OFFSET ) ;
case DrmIoctl : : gemCreate :
RETURN_ME ( DRM_IOCTL_XE_GEM_CREATE ) ;
case DrmIoctl : : gemExecbuffer2 :
RETURN_ME ( DRM_IOCTL_XE_EXEC ) ;
case DrmIoctl : : gemVmBind :
RETURN_ME ( DRM_IOCTL_XE_VM_BIND ) ;
case DrmIoctl : : query :
RETURN_ME ( DRM_IOCTL_XE_DEVICE_QUERY ) ;
case DrmIoctl : : gemContextCreateExt :
RETURN_ME ( DRM_IOCTL_XE_EXEC_QUEUE_CREATE ) ;
case DrmIoctl : : gemContextDestroy :
RETURN_ME ( DRM_IOCTL_XE_EXEC_QUEUE_DESTROY ) ;
case DrmIoctl : : gemWaitUserFence :
RETURN_ME ( DRM_IOCTL_XE_WAIT_USER_FENCE ) ;
case DrmIoctl : : primeFdToHandle :
RETURN_ME ( DRM_IOCTL_PRIME_FD_TO_HANDLE ) ;
case DrmIoctl : : primeHandleToFd :
RETURN_ME ( DRM_IOCTL_PRIME_HANDLE_TO_FD ) ;
2024-04-24 16:23:11 +08:00
case DrmIoctl : : getResetStats :
RETURN_ME ( DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY ) ;
2024-04-17 00:19:33 +08:00
case DrmIoctl : : debuggerOpen :
case DrmIoctl : : metadataCreate :
case DrmIoctl : : metadataDestroy :
return getIoctlRequestValueDebugger ( ioctlRequest ) ;
2024-05-30 03:35:26 +08:00
case DrmIoctl : : perfOpen :
case DrmIoctl : : perfEnable :
case DrmIoctl : : perfDisable :
2025-03-12 02:21:47 +08:00
case DrmIoctl : : perfQuery :
2024-05-30 03:35:26 +08:00
return getIoctlRequestValuePerf ( ioctlRequest ) ;
2024-04-17 00:19:33 +08:00
default :
UNRECOVERABLE_IF ( true ) ;
return 0 ;
}
}
2024-05-30 03:35:26 +08:00
int IoctlHelperXe : : ioctl ( int fd , DrmIoctl request , void * arg ) {
return NEO : : SysCalls : : ioctl ( fd , getIoctlRequestValue ( request ) , arg ) ;
}
2024-04-17 00:19:33 +08:00
std : : string IoctlHelperXe : : getIoctlString ( DrmIoctl ioctlRequest ) const {
switch ( ioctlRequest ) {
case DrmIoctl : : gemClose :
STRINGIFY_ME ( DRM_IOCTL_GEM_CLOSE ) ;
case DrmIoctl : : gemVmCreate :
STRINGIFY_ME ( DRM_IOCTL_XE_VM_CREATE ) ;
case DrmIoctl : : gemVmDestroy :
STRINGIFY_ME ( DRM_IOCTL_XE_VM_DESTROY ) ;
case DrmIoctl : : gemMmapOffset :
STRINGIFY_ME ( DRM_IOCTL_XE_GEM_MMAP_OFFSET ) ;
case DrmIoctl : : gemCreate :
STRINGIFY_ME ( DRM_IOCTL_XE_GEM_CREATE ) ;
case DrmIoctl : : gemExecbuffer2 :
STRINGIFY_ME ( DRM_IOCTL_XE_EXEC ) ;
case DrmIoctl : : gemVmBind :
STRINGIFY_ME ( DRM_IOCTL_XE_VM_BIND ) ;
case DrmIoctl : : query :
STRINGIFY_ME ( DRM_IOCTL_XE_DEVICE_QUERY ) ;
case DrmIoctl : : gemContextCreateExt :
STRINGIFY_ME ( DRM_IOCTL_XE_EXEC_QUEUE_CREATE ) ;
case DrmIoctl : : gemContextDestroy :
STRINGIFY_ME ( DRM_IOCTL_XE_EXEC_QUEUE_DESTROY ) ;
case DrmIoctl : : gemWaitUserFence :
STRINGIFY_ME ( DRM_IOCTL_XE_WAIT_USER_FENCE ) ;
case DrmIoctl : : primeFdToHandle :
STRINGIFY_ME ( DRM_IOCTL_PRIME_FD_TO_HANDLE ) ;
case DrmIoctl : : primeHandleToFd :
STRINGIFY_ME ( DRM_IOCTL_PRIME_HANDLE_TO_FD ) ;
case DrmIoctl : : debuggerOpen :
STRINGIFY_ME ( DRM_IOCTL_XE_EUDEBUG_CONNECT ) ;
case DrmIoctl : : metadataCreate :
STRINGIFY_ME ( DRM_IOCTL_XE_DEBUG_METADATA_CREATE ) ;
case DrmIoctl : : metadataDestroy :
STRINGIFY_ME ( DRM_IOCTL_XE_DEBUG_METADATA_DESTROY ) ;
2024-04-24 16:23:11 +08:00
case DrmIoctl : : getResetStats :
STRINGIFY_ME ( DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY ) ;
2024-04-17 00:19:33 +08:00
default :
return " ??? " ;
}
}
2024-07-25 18:47:37 +08:00
void IoctlHelperXe : : querySupportedFeatures ( ) {
2024-08-13 19:27:54 +08:00
auto checkVmCreateFlagsSupport = [ & ] ( uint32_t flags ) - > bool {
struct drm_xe_vm_create vmCreate = { } ;
vmCreate . flags = flags ;
2024-09-24 21:39:34 +08:00
auto ret = IoctlHelper : : ioctl ( DrmIoctl : : gemVmCreate , & vmCreate ) ;
2024-08-13 19:27:54 +08:00
if ( ret = = 0 ) {
struct drm_xe_vm_destroy vmDestroy = { } ;
vmDestroy . vm_id = vmCreate . vm_id ;
ret = IoctlHelper : : ioctl ( DrmIoctl : : gemVmDestroy , & vmDestroy ) ;
DEBUG_BREAK_IF ( ret ! = 0 ) ;
return true ;
}
return false ;
} ;
supportedFeatures . flags . pageFault = checkVmCreateFlagsSupport ( DRM_XE_VM_CREATE_FLAG_LR_MODE | DRM_XE_VM_CREATE_FLAG_FAULT_MODE ) ;
2024-07-25 18:47:37 +08:00
} ;
2025-01-24 11:59:30 +08:00
2023-10-05 06:57:00 +08:00
} // namespace NEO