2017-12-21 07:45:38 +08:00
/*
2022-01-20 02:14:10 +08:00
* Copyright ( C ) 2018 - 2022 Intel Corporation
2017-12-21 07:45:38 +08:00
*
2018-09-18 15:11:08 +08:00
* SPDX - License - Identifier : MIT
2017-12-21 07:45:38 +08:00
*
*/
2020-02-24 05:44:01 +08:00
# include "shared/source/os_interface/linux/drm_memory_manager.h"
2019-02-27 18:39:32 +08:00
2020-02-24 05:44:01 +08:00
# include "shared/source/command_stream/command_stream_receiver.h"
# include "shared/source/execution_environment/execution_environment.h"
# include "shared/source/execution_environment/root_device_environment.h"
2022-02-04 21:50:19 +08:00
# include "shared/source/gmm_helper/cache_settings_helper.h"
2022-02-10 02:03:05 +08:00
# include "shared/source/gmm_helper/client_context/gmm_client_context.h"
2020-02-24 05:44:01 +08:00
# include "shared/source/gmm_helper/gmm.h"
# include "shared/source/gmm_helper/gmm_helper.h"
# include "shared/source/gmm_helper/resource_info.h"
2020-08-27 14:55:09 +08:00
# include "shared/source/helpers/heap_assigner.h"
2020-02-24 05:44:01 +08:00
# include "shared/source/helpers/hw_info.h"
# include "shared/source/helpers/ptr_math.h"
2021-09-08 22:16:07 +08:00
# include "shared/source/helpers/string.h"
2020-02-24 05:44:01 +08:00
# include "shared/source/helpers/surface_format_info.h"
2022-02-01 07:29:01 +08:00
# include "shared/source/memory_manager/allocation_properties.h"
2020-02-24 05:44:01 +08:00
# include "shared/source/memory_manager/host_ptr_manager.h"
2022-03-22 00:02:12 +08:00
# include "shared/source/memory_manager/memory_banks.h"
# include "shared/source/memory_manager/memory_pool.h"
2020-02-24 05:44:01 +08:00
# include "shared/source/memory_manager/residency.h"
# include "shared/source/os_interface/linux/allocator_helper.h"
2022-07-27 00:39:17 +08:00
# include "shared/source/os_interface/linux/drm_allocation.h"
# include "shared/source/os_interface/linux/drm_buffer_object.h"
# include "shared/source/os_interface/linux/drm_gem_close_worker.h"
2020-05-08 16:04:06 +08:00
# include "shared/source/os_interface/linux/drm_memory_operations_handler.h"
2022-07-27 00:39:17 +08:00
# include "shared/source/os_interface/linux/drm_neo.h"
2022-05-18 01:16:13 +08:00
# include "shared/source/os_interface/linux/drm_wrappers.h"
2020-02-24 05:44:01 +08:00
# include "shared/source/os_interface/linux/os_context_linux.h"
2021-05-21 07:17:57 +08:00
# include "shared/source/os_interface/os_interface.h"
2017-12-21 07:45:38 +08:00
2019-02-27 18:39:32 +08:00
# include <cstring>
# include <iostream>
2020-07-17 19:40:52 +08:00
# include <memory>
2017-12-21 07:45:38 +08:00
2019-03-26 18:59:46 +08:00
namespace NEO {
2017-12-21 07:45:38 +08:00
2019-03-25 20:12:55 +08:00
DrmMemoryManager : : DrmMemoryManager ( gemCloseWorkerMode mode ,
2019-03-12 06:06:30 +08:00
bool forcePinAllowed ,
bool validateHostPtrMemory ,
2019-03-15 17:22:35 +08:00
ExecutionEnvironment & executionEnvironment ) : MemoryManager ( executionEnvironment ) ,
2019-03-12 06:06:30 +08:00
forcePinEnabled ( forcePinAllowed ) ,
validateHostPtrMemory ( validateHostPtrMemory ) {
2021-05-27 21:37:06 +08:00
alignmentSelector . addCandidateAlignment ( MemoryConstants : : pageSize64k , true , AlignmentSelector : : anyWastage , HeapIndex : : HEAP_STANDARD64KB ) ;
if ( DebugManager . flags . AlignLocalMemoryVaTo2MB . get ( ) ! = 0 ) {
alignmentSelector . addCandidateAlignment ( MemoryConstants : : pageSize2Mb , false , AlignmentSelector : : anyWastage , HeapIndex : : HEAP_STANDARD2MB ) ;
}
const size_t customAlignment = static_cast < size_t > ( DebugManager . flags . ExperimentalEnableCustomLocalMemoryAlignment . get ( ) ) ;
if ( customAlignment > 0 ) {
const auto heapIndex = customAlignment > = MemoryConstants : : pageSize2Mb ? HeapIndex : : HEAP_STANDARD2MB : HeapIndex : : HEAP_STANDARD64KB ;
alignmentSelector . addCandidateAlignment ( customAlignment , true , AlignmentSelector : : anyWastage , heapIndex ) ;
}
2020-09-18 22:19:41 +08:00
initialize ( mode ) ;
}
void DrmMemoryManager : : initialize ( gemCloseWorkerMode mode ) {
2021-06-28 21:10:21 +08:00
bool disableGemCloseWorker = true ;
2019-10-22 16:26:23 +08:00
for ( uint32_t rootDeviceIndex = 0 ; rootDeviceIndex < gfxPartitions . size ( ) ; + + rootDeviceIndex ) {
2020-02-18 20:29:30 +08:00
auto gpuAddressSpace = executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > getHardwareInfo ( ) - > capabilityTable . gpuAddressSpace ;
2020-09-18 22:19:41 +08:00
if ( ! getGfxPartition ( rootDeviceIndex ) - > init ( gpuAddressSpace , getSizeToReserve ( ) , rootDeviceIndex , gfxPartitions . size ( ) , heapAssigner . apiAllowExternalHeapForSshAndDsh ) ) {
initialized = false ;
return ;
}
2020-09-10 18:36:44 +08:00
localMemAllocs . emplace_back ( ) ;
2021-06-28 21:10:21 +08:00
disableGemCloseWorker & = getDrm ( rootDeviceIndex ) . isVmBindAvailable ( ) ;
2019-10-22 16:26:23 +08:00
}
2020-10-01 17:48:08 +08:00
2021-06-28 21:10:21 +08:00
if ( disableGemCloseWorker ) {
2021-02-19 20:05:32 +08:00
mode = gemCloseWorkerMode : : gemCloseWorkerInactive ;
}
2020-10-01 17:48:08 +08:00
if ( DebugManager . flags . EnableGemCloseWorker . get ( ) ! = - 1 ) {
mode = DebugManager . flags . EnableGemCloseWorker . get ( ) ? gemCloseWorkerMode : : gemCloseWorkerActive : gemCloseWorkerMode : : gemCloseWorkerInactive ;
}
2017-12-21 07:45:38 +08:00
if ( mode ! = gemCloseWorkerMode : : gemCloseWorkerInactive ) {
gemCloseWorker . reset ( new DrmGemCloseWorker ( * this ) ) ;
}
2020-02-21 20:41:02 +08:00
for ( uint32_t rootDeviceIndex = 0 ; rootDeviceIndex < gfxPartitions . size ( ) ; + + rootDeviceIndex ) {
if ( forcePinEnabled | | validateHostPtrMemory ) {
2020-04-29 02:30:50 +08:00
auto cpuAddrBo = alignedMallocWrapper ( MemoryConstants : : pageSize , MemoryConstants : : pageSize ) ;
2020-05-13 02:37:26 +08:00
UNRECOVERABLE_IF ( cpuAddrBo = = nullptr ) ;
2020-04-29 02:30:50 +08:00
// Preprogram the Bo with MI_BATCH_BUFFER_END and MI_NOOP. This BO will be used as the last BB in a series to indicate the end of submission.
reinterpret_cast < uint32_t * > ( cpuAddrBo ) [ 0 ] = 0x05000000 ; // MI_BATCH_BUFFER_END
reinterpret_cast < uint32_t * > ( cpuAddrBo ) [ 1 ] = 0 ; // MI_NOOP
memoryForPinBBs . push_back ( cpuAddrBo ) ;
2020-02-24 19:46:03 +08:00
DEBUG_BREAK_IF ( memoryForPinBBs [ rootDeviceIndex ] = = nullptr ) ;
2020-02-21 20:41:02 +08:00
}
2021-10-07 15:26:03 +08:00
pinBBs . push_back ( createRootDeviceBufferObject ( rootDeviceIndex ) ) ;
2017-12-21 07:45:38 +08:00
}
2020-09-18 22:19:41 +08:00
initialized = true ;
2017-12-21 07:45:38 +08:00
}
2021-10-07 15:26:03 +08:00
BufferObject * DrmMemoryManager : : createRootDeviceBufferObject ( uint32_t rootDeviceIndex ) {
BufferObject * bo = nullptr ;
if ( forcePinEnabled | | validateHostPtrMemory ) {
2022-06-07 01:09:35 +08:00
bo = allocUserptr ( reinterpret_cast < uintptr_t > ( memoryForPinBBs [ rootDeviceIndex ] ) , MemoryConstants : : pageSize , rootDeviceIndex ) ;
2021-10-07 15:26:03 +08:00
if ( bo ) {
if ( isLimitedRange ( rootDeviceIndex ) ) {
2021-10-21 19:49:50 +08:00
auto boSize = bo - > peekSize ( ) ;
bo - > setAddress ( acquireGpuRange ( boSize , rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ) ;
UNRECOVERABLE_IF ( boSize < bo - > peekSize ( ) ) ;
2021-10-07 15:26:03 +08:00
}
} else {
alignedFreeWrapper ( memoryForPinBBs [ rootDeviceIndex ] ) ;
memoryForPinBBs [ rootDeviceIndex ] = nullptr ;
DEBUG_BREAK_IF ( true ) ;
UNRECOVERABLE_IF ( validateHostPtrMemory ) ;
}
}
return bo ;
}
void DrmMemoryManager : : createDeviceSpecificMemResources ( uint32_t rootDeviceIndex ) {
pinBBs [ rootDeviceIndex ] = createRootDeviceBufferObject ( rootDeviceIndex ) ;
}
2017-12-21 07:45:38 +08:00
DrmMemoryManager : : ~ DrmMemoryManager ( ) {
2020-02-21 20:41:02 +08:00
for ( auto & memoryForPinBB : memoryForPinBBs ) {
if ( memoryForPinBB ) {
MemoryManager : : alignedFreeWrapper ( memoryForPinBB ) ;
}
2020-02-12 00:48:40 +08:00
}
}
2021-10-07 15:26:03 +08:00
void DrmMemoryManager : : releaseDeviceSpecificMemResources ( uint32_t rootDeviceIndex ) {
return releaseBufferObject ( rootDeviceIndex ) ;
}
void DrmMemoryManager : : releaseBufferObject ( uint32_t rootDeviceIndex ) {
if ( auto bo = pinBBs [ rootDeviceIndex ] ) {
if ( isLimitedRange ( rootDeviceIndex ) ) {
2021-10-21 19:49:50 +08:00
releaseGpuRange ( reinterpret_cast < void * > ( bo - > peekAddress ( ) ) , bo - > peekSize ( ) , rootDeviceIndex ) ;
2021-10-07 15:26:03 +08:00
}
DrmMemoryManager : : unreference ( bo , true ) ;
pinBBs [ rootDeviceIndex ] = nullptr ;
}
}
2020-02-12 00:48:40 +08:00
void DrmMemoryManager : : commonCleanup ( ) {
2017-12-21 07:45:38 +08:00
if ( gemCloseWorker ) {
2021-12-22 21:14:52 +08:00
gemCloseWorker - > close ( true ) ;
2017-12-21 07:45:38 +08:00
}
2020-04-29 02:30:50 +08:00
for ( uint32_t rootDeviceIndex = 0 ; rootDeviceIndex < pinBBs . size ( ) ; + + rootDeviceIndex ) {
2021-10-07 15:26:03 +08:00
releaseBufferObject ( rootDeviceIndex ) ;
2019-08-05 19:34:29 +08:00
}
2020-02-21 20:41:02 +08:00
pinBBs . clear ( ) ;
2017-12-21 07:45:38 +08:00
}
2019-03-26 18:59:46 +08:00
void DrmMemoryManager : : eraseSharedBufferObject ( NEO : : BufferObject * bo ) {
2017-12-21 07:45:38 +08:00
auto it = std : : find ( sharingBufferObjects . begin ( ) , sharingBufferObjects . end ( ) , bo ) ;
DEBUG_BREAK_IF ( it = = sharingBufferObjects . end ( ) ) ;
2021-10-21 19:49:50 +08:00
releaseGpuRange ( reinterpret_cast < void * > ( ( * it ) - > peekAddress ( ) ) , ( * it ) - > peekUnmapSize ( ) , this - > getRootDeviceIndex ( bo - > peekDrm ( ) ) ) ;
2017-12-21 07:45:38 +08:00
sharingBufferObjects . erase ( it ) ;
}
2019-03-26 18:59:46 +08:00
void DrmMemoryManager : : pushSharedBufferObject ( NEO : : BufferObject * bo ) {
2021-10-21 19:49:50 +08:00
bo - > markAsReusableAllocation ( ) ;
2017-12-21 07:45:38 +08:00
sharingBufferObjects . push_back ( bo ) ;
}
2019-03-26 18:59:46 +08:00
uint32_t DrmMemoryManager : : unreference ( NEO : : BufferObject * bo , bool synchronousDestroy ) {
2017-12-21 07:45:38 +08:00
if ( ! bo )
return - 1 ;
if ( synchronousDestroy ) {
2021-10-21 19:49:50 +08:00
while ( bo - > getRefCount ( ) > 1 )
2017-12-21 07:45:38 +08:00
;
}
2018-08-14 19:14:06 +08:00
std : : unique_lock < std : : mutex > lock ( mtx , std : : defer_lock ) ;
2021-10-21 19:49:50 +08:00
if ( bo - > peekIsReusableAllocation ( ) ) {
2018-08-14 19:14:06 +08:00
lock . lock ( ) ;
}
2021-10-21 19:49:50 +08:00
uint32_t r = bo - > unreference ( ) ;
2017-12-21 07:45:38 +08:00
if ( r = = 1 ) {
2021-10-21 19:49:50 +08:00
if ( bo - > peekIsReusableAllocation ( ) ) {
2017-12-21 07:45:38 +08:00
eraseSharedBufferObject ( bo ) ;
}
bo - > close ( ) ;
2018-08-14 19:14:06 +08:00
if ( lock ) {
lock . unlock ( ) ;
}
2017-12-21 07:45:38 +08:00
delete bo ;
}
return r ;
}
2021-03-10 06:41:46 +08:00
uint64_t DrmMemoryManager : : acquireGpuRange ( size_t & size , uint32_t rootDeviceIndex , HeapIndex heapIndex ) {
2019-10-22 16:26:23 +08:00
auto gfxPartition = getGfxPartition ( rootDeviceIndex ) ;
2022-05-09 20:51:13 +08:00
auto gmmHelper = getGmmHelper ( rootDeviceIndex ) ;
2022-04-29 21:28:15 +08:00
return gmmHelper - > canonize ( gfxPartition - > heapAllocate ( heapIndex , size ) ) ;
2018-11-16 02:43:12 +08:00
}
2019-10-22 16:26:23 +08:00
void DrmMemoryManager : : releaseGpuRange ( void * address , size_t unmapSize , uint32_t rootDeviceIndex ) {
2018-11-16 02:43:12 +08:00
uint64_t graphicsAddress = static_cast < uint64_t > ( reinterpret_cast < uintptr_t > ( address ) ) ;
2022-04-26 03:34:32 +08:00
auto gmmHelper = getGmmHelper ( rootDeviceIndex ) ;
graphicsAddress = gmmHelper - > decanonize ( graphicsAddress ) ;
2019-10-22 16:26:23 +08:00
auto gfxPartition = getGfxPartition ( rootDeviceIndex ) ;
2019-07-29 23:50:46 +08:00
gfxPartition - > freeGpuAddressRange ( graphicsAddress , unmapSize ) ;
2018-11-16 02:43:12 +08:00
}
2021-02-03 22:53:13 +08:00
bool DrmMemoryManager : : isKmdMigrationAvailable ( uint32_t rootDeviceIndex ) {
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > getHardwareInfo ( ) ;
auto & hwHelper = NEO : : HwHelper : : get ( hwInfo - > platform . eRenderCoreFamily ) ;
2021-07-01 23:18:23 +08:00
auto useKmdMigration = hwHelper . isKmdMigrationSupported ( * hwInfo ) ;
2021-02-03 22:53:13 +08:00
if ( DebugManager . flags . UseKmdMigration . get ( ) ! = - 1 ) {
useKmdMigration = DebugManager . flags . UseKmdMigration . get ( ) ;
}
return useKmdMigration ;
}
2021-10-11 17:27:26 +08:00
bool DrmMemoryManager : : setMemAdvise ( GraphicsAllocation * gfxAllocation , MemAdviseFlags flags , uint32_t rootDeviceIndex ) {
auto drmAllocation = static_cast < DrmAllocation * > ( gfxAllocation ) ;
2021-10-07 07:22:22 +08:00
2021-10-11 17:27:26 +08:00
return drmAllocation - > setMemAdvise ( & this - > getDrm ( rootDeviceIndex ) , flags ) ;
2021-10-07 07:22:22 +08:00
}
2022-03-22 23:54:48 +08:00
bool DrmMemoryManager : : setMemPrefetch ( GraphicsAllocation * gfxAllocation , uint32_t subDeviceId , uint32_t rootDeviceIndex ) {
2022-03-02 21:38:28 +08:00
auto drmAllocation = static_cast < DrmAllocation * > ( gfxAllocation ) ;
2022-07-29 22:28:59 +08:00
auto osContextLinux = static_cast < OsContextLinux * > ( registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext ) ;
auto vmHandleId = subDeviceId ;
auto retVal = drmAllocation - > bindBOs ( osContextLinux , vmHandleId , nullptr , true ) ;
if ( retVal ! = 0 ) {
DEBUG_BREAK_IF ( true ) ;
return false ;
}
2022-03-02 21:38:28 +08:00
2022-03-22 23:54:48 +08:00
return drmAllocation - > setMemPrefetch ( & this - > getDrm ( rootDeviceIndex ) , subDeviceId ) ;
2022-03-02 21:38:28 +08:00
}
2022-06-07 01:09:35 +08:00
NEO : : BufferObject * DrmMemoryManager : : allocUserptr ( uintptr_t address , size_t size , uint32_t rootDeviceIndex ) {
2022-05-16 21:05:56 +08:00
GemUserPtr userptr = { } ;
userptr . userPtr = address ;
userptr . userSize = size ;
2017-12-21 07:45:38 +08:00
2022-04-20 03:24:19 +08:00
auto & drm = this - > getDrm ( rootDeviceIndex ) ;
2022-06-29 01:56:14 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-04-20 03:24:19 +08:00
2022-06-30 00:49:29 +08:00
if ( ioctlHelper - > ioctl ( DrmIoctl : : GemUserptr , & userptr ) ! = 0 ) {
2017-12-21 07:45:38 +08:00
return nullptr ;
2018-03-20 17:49:09 +08:00
}
2017-12-21 07:45:38 +08:00
2020-09-25 17:24:15 +08:00
PRINT_DEBUG_STRING ( DebugManager . flags . PrintBOCreateDestroyResult . get ( ) , stdout , " Created new BO with GEM_USERPTR, handle: BO-%d \n " , userptr . handle ) ;
2020-08-13 16:10:48 +08:00
2022-04-20 03:24:19 +08:00
auto patIndex = drm . getPatIndex ( nullptr , AllocationType : : EXTERNAL_HOST_PTR , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
auto res = new ( std : : nothrow ) BufferObject ( & drm , patIndex , userptr . handle , size , maxOsContextCount ) ;
2017-12-21 07:45:38 +08:00
if ( ! res ) {
2017-12-20 21:28:42 +08:00
DEBUG_BREAK_IF ( true ) ;
2017-12-21 07:45:38 +08:00
return nullptr ;
}
2021-10-21 19:49:50 +08:00
res - > setAddress ( address ) ;
2017-12-21 07:45:38 +08:00
return res ;
}
2019-02-25 21:11:34 +08:00
void DrmMemoryManager : : emitPinningRequest ( BufferObject * bo , const AllocationData & allocationData ) const {
2021-04-20 20:24:04 +08:00
auto rootDeviceIndex = allocationData . rootDeviceIndex ;
if ( forcePinEnabled & & pinBBs . at ( rootDeviceIndex ) ! = nullptr & & allocationData . flags . forcePin & & allocationData . size > = this - > pinThreshold ) {
pinBBs . at ( rootDeviceIndex ) - > pin ( & bo , 1 , registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext , 0 , getDefaultDrmContextId ( rootDeviceIndex ) ) ;
2019-02-25 21:11:34 +08:00
}
}
2022-07-27 00:39:17 +08:00
GraphicsAllocation * DrmMemoryManager : : createGraphicsAllocation ( OsHandleStorage & handleStorage , const AllocationData & allocationData ) {
2019-02-25 21:11:34 +08:00
auto hostPtr = const_cast < void * > ( allocationData . hostPtr ) ;
2022-05-30 22:18:50 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( castToUint64 ( hostPtr ) ) ;
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , allocationData . type , nullptr , hostPtr , canonizedGpuAddress , allocationData . size , MemoryPool : : System4KBPages ) ;
2017-12-21 07:45:38 +08:00
allocation - > fragmentsStorage = handleStorage ;
2021-02-17 06:58:32 +08:00
if ( ! allocation - > setCacheRegion ( & this - > getDrm ( allocationData . rootDeviceIndex ) , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
return nullptr ;
}
return allocation . release ( ) ;
2017-12-21 07:45:38 +08:00
}
2022-07-27 00:39:17 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithAlignment ( const AllocationData & allocationData ) {
2022-08-30 16:42:06 +08:00
if ( ( allocationData . type = = NEO : : AllocationType : : DEBUG_CONTEXT_SAVE_AREA | |
allocationData . type = = NEO : : AllocationType : : DEBUG_SBA_TRACKING_BUFFER ) & &
allocationData . storageInfo . subDeviceBitfield . count ( ) > 1 ) {
2021-05-05 02:17:30 +08:00
return createMultiHostAllocation ( allocationData ) ;
}
return allocateGraphicsMemoryWithAlignmentImpl ( allocationData ) ;
}
DrmAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithAlignmentImpl ( const AllocationData & allocationData ) {
2020-08-28 14:37:26 +08:00
const size_t minAlignment = getUserptrAlignment ( ) ;
2018-11-30 18:01:33 +08:00
size_t cAlignment = alignUp ( std : : max ( allocationData . alignment , minAlignment ) , minAlignment ) ;
2017-12-21 07:45:38 +08:00
// When size == 0 allocate allocationAlignment
// It's needed to prevent overlapping pages with user pointers
2018-11-30 18:01:33 +08:00
size_t cSize = std : : max ( alignUp ( allocationData . size , minAlignment ) , minAlignment ) ;
2017-12-21 07:45:38 +08:00
2021-06-09 16:03:22 +08:00
uint64_t gpuReservationAddress = 0 ;
uint64_t alignedGpuAddress = 0 ;
size_t alignedStorageSize = cSize ;
size_t alignedVirtualAdressRangeSize = cSize ;
2022-02-04 21:59:01 +08:00
auto svmCpuAllocation = allocationData . type = = AllocationType : : SVM_CPU ;
2019-08-05 19:34:29 +08:00
if ( svmCpuAllocation ) {
2022-02-07 22:27:53 +08:00
// add padding in case reserved addr is not aligned
2021-06-09 16:03:22 +08:00
alignedStorageSize = alignUp ( cSize , cAlignment ) ;
alignedVirtualAdressRangeSize = alignedStorageSize + cAlignment ;
2019-02-08 03:29:30 +08:00
}
2020-10-05 15:57:50 +08:00
// if limitedRangeAlloction is enabled, memory allocation for bo in the limited Range heap is required
if ( ( isLimitedRange ( allocationData . rootDeviceIndex ) | | svmCpuAllocation ) & & ! allocationData . flags . isUSMHostAllocation ) {
2021-06-09 16:03:22 +08:00
gpuReservationAddress = acquireGpuRange ( alignedVirtualAdressRangeSize , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
if ( ! gpuReservationAddress ) {
2019-07-18 05:31:38 +08:00
return nullptr ;
}
2019-08-05 19:34:29 +08:00
2021-06-09 16:03:22 +08:00
alignedGpuAddress = gpuReservationAddress ;
2019-08-05 19:34:29 +08:00
if ( svmCpuAllocation ) {
2021-06-09 16:03:22 +08:00
alignedGpuAddress = alignUp ( gpuReservationAddress , cAlignment ) ;
2019-08-05 19:34:29 +08:00
}
2019-07-18 05:31:38 +08:00
}
2021-06-09 16:03:22 +08:00
auto drmAllocation = createAllocWithAlignment ( allocationData , cSize , cAlignment , alignedStorageSize , alignedGpuAddress ) ;
if ( drmAllocation ! = nullptr ) {
drmAllocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuReservationAddress ) , alignedVirtualAdressRangeSize ) ;
}
return drmAllocation ;
2020-10-05 15:57:50 +08:00
}
2019-02-08 03:29:30 +08:00
2020-10-05 15:57:50 +08:00
DrmAllocation * DrmMemoryManager : : createAllocWithAlignmentFromUserptr ( const AllocationData & allocationData , size_t size , size_t alignment , size_t alignedSVMSize , uint64_t gpuAddress ) {
auto res = alignedMallocWrapper ( size , alignment ) ;
if ( ! res ) {
return nullptr ;
}
2019-07-18 05:31:38 +08:00
2022-06-07 01:09:35 +08:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( res ) , size , allocationData . rootDeviceIndex ) ) ;
2020-10-05 15:57:50 +08:00
if ( ! bo ) {
alignedFreeWrapper ( res ) ;
return nullptr ;
}
2021-09-22 02:17:41 +08:00
zeroCpuMemoryIfRequested ( allocationData , res , size ) ;
2021-02-17 06:58:32 +08:00
obtainGpuAddress ( allocationData , bo . get ( ) , gpuAddress ) ;
emitPinningRequest ( bo . get ( ) , allocationData ) ;
2020-10-05 15:57:50 +08:00
2022-05-30 22:18:50 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( bo - > peekAddress ( ) ) ;
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , res , canonizedGpuAddress , size , MemoryPool : : System4KBPages ) ;
2020-10-05 15:57:50 +08:00
allocation - > setDriverAllocatedCpuPtr ( res ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuAddress ) , alignedSVMSize ) ;
2021-02-17 06:58:32 +08:00
if ( ! allocation - > setCacheRegion ( & this - > getDrm ( allocationData . rootDeviceIndex ) , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
alignedFreeWrapper ( res ) ;
return nullptr ;
}
bo . release ( ) ;
2019-07-18 05:31:38 +08:00
2021-02-17 06:58:32 +08:00
return allocation . release ( ) ;
2017-12-21 07:45:38 +08:00
}
2020-10-05 15:57:50 +08:00
void DrmMemoryManager : : obtainGpuAddress ( const AllocationData & allocationData , BufferObject * bo , uint64_t gpuAddress ) {
2022-02-04 21:59:01 +08:00
if ( ( isLimitedRange ( allocationData . rootDeviceIndex ) | | allocationData . type = = AllocationType : : SVM_CPU ) & &
2020-10-05 15:57:50 +08:00
! allocationData . flags . isUSMHostAllocation ) {
2021-10-21 19:49:50 +08:00
bo - > setAddress ( gpuAddress ) ;
2020-10-05 15:57:50 +08:00
}
}
2022-07-27 00:39:17 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateUSMHostGraphicsMemory ( const AllocationData & allocationData ) {
2020-09-03 15:57:05 +08:00
const size_t minAlignment = getUserptrAlignment ( ) ;
// When size == 0 allocate allocationAlignment
// It's needed to prevent overlapping pages with user pointers
size_t cSize = std : : max ( alignUp ( allocationData . size , minAlignment ) , minAlignment ) ;
void * bufferPtr = const_cast < void * > ( allocationData . hostPtr ) ;
DEBUG_BREAK_IF ( nullptr = = bufferPtr ) ;
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( bufferPtr ) ,
cSize ,
allocationData . rootDeviceIndex ) ) ;
if ( ! bo ) {
return nullptr ;
}
// if limitedRangeAlloction is enabled, memory allocation for bo in the limited Range heap is required
uint64_t gpuAddress = 0 ;
if ( isLimitedRange ( allocationData . rootDeviceIndex ) ) {
2021-03-10 06:41:46 +08:00
gpuAddress = acquireGpuRange ( cSize , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2020-09-03 15:57:05 +08:00
if ( ! gpuAddress ) {
return nullptr ;
}
2021-10-21 19:49:50 +08:00
bo - > setAddress ( gpuAddress ) ;
2020-09-03 15:57:05 +08:00
}
emitPinningRequest ( bo . get ( ) , allocationData ) ;
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex ,
allocationData . type ,
bo . get ( ) ,
bufferPtr ,
2021-10-21 19:49:50 +08:00
bo - > peekAddress ( ) ,
2020-09-03 15:57:05 +08:00
cSize ,
MemoryPool : : System4KBPages ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuAddress ) , cSize ) ;
bo . release ( ) ;
return allocation ;
}
2022-07-27 00:39:17 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithHostPtr ( const AllocationData & allocationData ) {
2018-11-30 18:01:33 +08:00
auto res = static_cast < DrmAllocation * > ( MemoryManager : : allocateGraphicsMemoryWithHostPtr ( allocationData ) ) ;
2017-12-21 07:45:38 +08:00
2019-02-25 21:11:34 +08:00
if ( res ! = nullptr & & ! validateHostPtrMemory ) {
emitPinningRequest ( res - > getBO ( ) , allocationData ) ;
2017-12-21 07:45:38 +08:00
}
return res ;
}
2020-07-01 16:38:19 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithGpuVa ( const AllocationData & allocationData ) {
2022-04-15 03:13:34 +08:00
if ( allocationData . type = = NEO : : AllocationType : : DEBUG_SBA_TRACKING_BUFFER & &
allocationData . storageInfo . subDeviceBitfield . count ( ) > 1 ) {
return createMultiHostAllocation ( allocationData ) ;
}
2020-07-01 16:38:19 +08:00
auto osContextLinux = static_cast < OsContextLinux * > ( allocationData . osContext ) ;
2020-08-28 14:37:26 +08:00
const size_t minAlignment = getUserptrAlignment ( ) ;
2020-07-01 16:38:19 +08:00
size_t alignedSize = alignUp ( allocationData . size , minAlignment ) ;
auto res = alignedMallocWrapper ( alignedSize , minAlignment ) ;
if ( ! res )
return nullptr ;
2022-06-07 01:09:35 +08:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( res ) , alignedSize , allocationData . rootDeviceIndex ) ) ;
2020-07-17 19:40:52 +08:00
2020-07-01 16:38:19 +08:00
if ( ! bo ) {
alignedFreeWrapper ( res ) ;
return nullptr ;
}
UNRECOVERABLE_IF ( allocationData . gpuAddress = = 0 ) ;
2021-10-21 19:49:50 +08:00
bo - > setAddress ( allocationData . gpuAddress ) ;
2020-07-01 16:38:19 +08:00
2020-07-17 19:40:52 +08:00
BufferObject * boPtr = bo . get ( ) ;
2020-07-01 16:38:19 +08:00
if ( forcePinEnabled & & pinBBs . at ( allocationData . rootDeviceIndex ) ! = nullptr & & alignedSize > = this - > pinThreshold ) {
2020-10-16 19:46:25 +08:00
pinBBs . at ( allocationData . rootDeviceIndex ) - > pin ( & boPtr , 1 , osContextLinux , 0 , osContextLinux - > getDrmContextIds ( ) [ 0 ] ) ;
2020-07-01 16:38:19 +08:00
}
2021-10-21 19:49:50 +08:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , res , bo - > peekAddress ( ) , alignedSize , MemoryPool : : System4KBPages ) ;
2020-07-01 16:38:19 +08:00
allocation - > setDriverAllocatedCpuPtr ( res ) ;
2020-07-17 19:40:52 +08:00
bo . release ( ) ;
2020-07-01 16:38:19 +08:00
return allocation ;
}
2022-07-27 00:39:17 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryForNonSvmHostPtr ( const AllocationData & allocationData ) {
2019-03-12 19:00:41 +08:00
if ( allocationData . size = = 0 | | ! allocationData . hostPtr )
2018-11-16 02:43:12 +08:00
return nullptr ;
2019-03-12 19:00:41 +08:00
auto alignedPtr = alignDown ( allocationData . hostPtr , MemoryConstants : : pageSize ) ;
auto alignedSize = alignSizeWholePage ( allocationData . hostPtr , allocationData . size ) ;
2019-03-14 00:10:04 +08:00
auto realAllocationSize = alignedSize ;
2019-03-12 19:00:41 +08:00
auto offsetInPage = ptrDiff ( allocationData . hostPtr , alignedPtr ) ;
2021-04-20 20:24:04 +08:00
auto rootDeviceIndex = allocationData . rootDeviceIndex ;
2018-11-16 02:43:12 +08:00
2021-04-20 20:24:04 +08:00
auto gpuVirtualAddress = acquireGpuRange ( alignedSize , rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2018-11-16 02:43:12 +08:00
if ( ! gpuVirtualAddress ) {
return nullptr ;
}
2022-06-07 01:09:35 +08:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( alignedPtr ) , realAllocationSize , rootDeviceIndex ) ) ;
2018-11-16 02:43:12 +08:00
if ( ! bo ) {
2021-04-20 20:24:04 +08:00
releaseGpuRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , alignedSize , rootDeviceIndex ) ;
2018-11-16 02:43:12 +08:00
return nullptr ;
}
2021-10-21 19:49:50 +08:00
bo - > setAddress ( gpuVirtualAddress ) ;
2020-03-14 06:24:12 +08:00
2019-12-17 00:50:54 +08:00
if ( validateHostPtrMemory ) {
2020-07-17 19:40:52 +08:00
auto boPtr = bo . get ( ) ;
2021-08-04 19:56:36 +08:00
auto vmHandleId = Math : : getMinLsbSet ( static_cast < uint32_t > ( allocationData . storageInfo . subDeviceBitfield . to_ulong ( ) ) ) ;
2022-10-18 04:26:22 +08:00
auto defaultContext = getDefaultEngineContext ( rootDeviceIndex , allocationData . storageInfo . subDeviceBitfield ) ;
int result = pinBBs . at ( rootDeviceIndex ) - > validateHostPtr ( & boPtr , 1 , defaultContext , vmHandleId , static_cast < OsContextLinux * > ( defaultContext ) - > getDrmContextIds ( ) [ 0 ] ) ;
2020-10-23 19:01:46 +08:00
if ( result ! = 0 ) {
2020-07-17 19:40:52 +08:00
unreference ( bo . release ( ) , true ) ;
2021-04-20 20:24:04 +08:00
releaseGpuRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , alignedSize , rootDeviceIndex ) ;
2019-12-17 00:50:54 +08:00
return nullptr ;
}
}
2020-07-17 19:40:52 +08:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , const_cast < void * > ( allocationData . hostPtr ) ,
2019-11-04 23:03:30 +08:00
gpuVirtualAddress , allocationData . size , MemoryPool : : System4KBPages ) ;
2019-02-27 21:59:46 +08:00
allocation - > setAllocationOffset ( offsetInPage ) ;
2018-11-16 02:43:12 +08:00
2019-08-05 19:34:29 +08:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , alignedSize ) ;
2020-07-17 19:40:52 +08:00
bo . release ( ) ;
2018-11-16 02:43:12 +08:00
return allocation ;
}
2022-07-27 00:39:17 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemory64kb ( const AllocationData & allocationData ) {
2017-12-21 07:45:38 +08:00
return nullptr ;
}
2021-08-11 18:36:04 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateMemoryByKMD ( const AllocationData & allocationData ) {
2022-02-10 02:03:05 +08:00
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getHardwareInfo ( ) ;
2022-02-07 22:27:53 +08:00
StorageInfo systemMemoryStorageInfo = { } ;
2022-04-27 18:20:10 +08:00
auto gmm = std : : make_unique < Gmm > ( executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getGmmHelper ( ) , allocationData . hostPtr ,
2022-02-10 02:03:05 +08:00
allocationData . size , 0u , CacheSettingsHelper : : getGmmUsageType ( allocationData . type , allocationData . flags . uncacheable , * hwInfo ) , false , systemMemoryStorageInfo , true ) ;
2019-11-14 17:08:59 +08:00
size_t bufferSize = allocationData . size ;
2021-03-10 06:41:46 +08:00
uint64_t gpuRange = acquireGpuRange ( bufferSize , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD64KB ) ;
2019-11-14 17:08:59 +08:00
2022-05-16 21:05:56 +08:00
GemCreate create { } ;
2019-11-14 17:08:59 +08:00
create . size = bufferSize ;
2022-04-20 03:24:19 +08:00
auto & drm = getDrm ( allocationData . rootDeviceIndex ) ;
2022-06-29 01:56:14 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-04-20 03:24:19 +08:00
2022-06-30 00:49:29 +08:00
[[maybe_unused]] auto ret = ioctlHelper - > ioctl ( DrmIoctl : : GemCreate , & create ) ;
2019-11-14 17:08:59 +08:00
DEBUG_BREAK_IF ( ret ! = 0 ) ;
2022-04-20 03:24:19 +08:00
auto patIndex = drm . getPatIndex ( gmm . get ( ) , allocationData . type , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( new BufferObject ( & drm , patIndex , create . handle , bufferSize , maxOsContextCount ) ) ;
2021-10-21 19:49:50 +08:00
bo - > setAddress ( gpuRange ) ;
2019-11-14 17:08:59 +08:00
2020-07-17 19:40:52 +08:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , nullptr , gpuRange , bufferSize , MemoryPool : : SystemCpuInaccessible ) ;
2019-11-14 17:08:59 +08:00
allocation - > setDefaultGmm ( gmm . release ( ) ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuRange ) , bufferSize ) ;
2022-03-22 00:02:12 +08:00
2020-07-17 19:40:52 +08:00
bo . release ( ) ;
2019-11-14 17:08:59 +08:00
return allocation ;
}
2019-01-22 19:40:17 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryForImageImpl ( const AllocationData & allocationData , std : : unique_ptr < Gmm > gmm ) {
2019-08-26 15:27:30 +08:00
if ( allocationData . imgInfo - > linearStorage ) {
2019-01-22 19:40:17 +08:00
auto alloc = allocateGraphicsMemoryWithAlignment ( allocationData ) ;
2017-12-21 07:45:38 +08:00
if ( alloc ) {
2019-03-12 20:24:58 +08:00
alloc - > setDefaultGmm ( gmm . release ( ) ) ;
2017-12-21 07:45:38 +08:00
}
return alloc ;
}
2021-03-10 06:41:46 +08:00
uint64_t gpuRange = acquireGpuRange ( allocationData . imgInfo - > size , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2017-12-21 07:45:38 +08:00
2022-05-16 21:05:56 +08:00
GemCreate create { } ;
2019-01-22 19:40:17 +08:00
create . size = allocationData . imgInfo - > size ;
2017-12-21 07:45:38 +08:00
2022-04-20 03:24:19 +08:00
auto & drm = this - > getDrm ( allocationData . rootDeviceIndex ) ;
2022-06-29 01:56:14 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-04-20 03:24:19 +08:00
2022-06-30 00:49:29 +08:00
[[maybe_unused]] auto ret = ioctlHelper - > ioctl ( DrmIoctl : : GemCreate , & create ) ;
2017-12-21 07:45:38 +08:00
DEBUG_BREAK_IF ( ret ! = 0 ) ;
2022-04-20 03:24:19 +08:00
auto patIndex = drm . getPatIndex ( gmm . get ( ) , allocationData . type , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( new ( std : : nothrow ) BufferObject ( & drm , patIndex , create . handle , allocationData . imgInfo - > size , maxOsContextCount ) ) ;
2017-12-21 07:45:38 +08:00
if ( ! bo ) {
return nullptr ;
}
2021-10-21 19:49:50 +08:00
bo - > setAddress ( gpuRange ) ;
2019-04-25 16:32:56 +08:00
2022-06-21 23:41:40 +08:00
[[maybe_unused]] auto ret2 = bo - > setTiling ( ioctlHelper - > getDrmParamValue ( DrmParam : : TilingY ) , static_cast < uint32_t > ( allocationData . imgInfo - > rowPitch ) ) ;
2017-12-21 07:45:38 +08:00
DEBUG_BREAK_IF ( ret2 ! = true ) ;
2020-07-17 19:40:52 +08:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , nullptr , gpuRange , allocationData . imgInfo - > size , MemoryPool : : SystemCpuInaccessible ) ;
2019-03-12 20:24:58 +08:00
allocation - > setDefaultGmm ( gmm . release ( ) ) ;
2019-08-05 19:34:29 +08:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuRange ) , allocationData . imgInfo - > size ) ;
2020-07-17 19:40:52 +08:00
bo . release ( ) ;
2017-12-21 07:45:38 +08:00
return allocation ;
}
2022-07-27 00:39:17 +08:00
GraphicsAllocation * DrmMemoryManager : : allocate32BitGraphicsMemoryImpl ( const AllocationData & allocationData , bool useLocalMemory ) {
2020-08-27 14:55:09 +08:00
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getHardwareInfo ( ) ;
2020-10-21 16:48:09 +08:00
auto allocatorToUse = heapAssigner . get32BitHeapIndex ( allocationData . type , useLocalMemory , * hwInfo , allocationData . flags . use32BitFrontWindow ) ;
2018-02-28 22:12:10 +08:00
2018-12-21 17:16:27 +08:00
if ( allocationData . hostPtr ) {
uintptr_t inputPtr = reinterpret_cast < uintptr_t > ( allocationData . hostPtr ) ;
auto allocationSize = alignSizeWholePage ( allocationData . hostPtr , allocationData . size ) ;
2017-12-21 07:45:38 +08:00
auto realAllocationSize = allocationSize ;
2019-10-22 16:26:23 +08:00
auto gfxPartition = getGfxPartition ( allocationData . rootDeviceIndex ) ;
2019-07-29 23:50:46 +08:00
auto gpuVirtualAddress = gfxPartition - > heapAllocate ( allocatorToUse , realAllocationSize ) ;
2017-12-21 07:45:38 +08:00
if ( ! gpuVirtualAddress ) {
return nullptr ;
}
2018-12-21 17:16:27 +08:00
auto alignedUserPointer = reinterpret_cast < uintptr_t > ( alignDown ( allocationData . hostPtr , MemoryConstants : : pageSize ) ) ;
2017-12-21 07:45:38 +08:00
auto inputPointerOffset = inputPtr - alignedUserPointer ;
2022-06-07 01:09:35 +08:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( alignedUserPointer , allocationSize , allocationData . rootDeviceIndex ) ) ;
2017-12-21 07:45:38 +08:00
if ( ! bo ) {
2019-07-29 23:50:46 +08:00
gfxPartition - > heapFree ( allocatorToUse , gpuVirtualAddress , realAllocationSize ) ;
2017-12-21 07:45:38 +08:00
return nullptr ;
}
2021-10-21 00:16:59 +08:00
bo - > setAddress ( gpuVirtualAddress ) ;
2022-05-09 20:51:13 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
2022-05-30 22:18:50 +08:00
auto canonizedGpuAddress = gmmHelper - > canonize ( ptrOffset ( gpuVirtualAddress , inputPointerOffset ) ) ;
2022-04-29 21:28:15 +08:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , const_cast < void * > ( allocationData . hostPtr ) ,
2022-05-30 22:18:50 +08:00
canonizedGpuAddress ,
2019-11-04 23:03:30 +08:00
allocationSize , MemoryPool : : System4KBPagesWith32BitGpuAddressing ) ;
2019-02-27 21:59:46 +08:00
allocation - > set32BitAllocation ( true ) ;
2022-04-29 21:28:15 +08:00
allocation - > setGpuBaseAddress ( gmmHelper - > canonize ( gfxPartition - > getHeapBase ( allocatorToUse ) ) ) ;
2019-08-05 19:34:29 +08:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , realAllocationSize ) ;
2020-07-17 19:40:52 +08:00
bo . release ( ) ;
2019-02-08 03:29:30 +08:00
return allocation ;
2017-12-21 07:45:38 +08:00
}
2018-12-21 17:16:27 +08:00
size_t alignedAllocationSize = alignUp ( allocationData . size , MemoryConstants : : pageSize ) ;
2017-12-21 07:45:38 +08:00
auto allocationSize = alignedAllocationSize ;
2019-10-22 16:26:23 +08:00
auto gfxPartition = getGfxPartition ( allocationData . rootDeviceIndex ) ;
2022-07-09 06:53:42 +08:00
auto gpuVA = gfxPartition - > heapAllocate ( allocatorToUse , allocationSize ) ;
2017-12-21 07:45:38 +08:00
2020-10-14 15:50:07 +08:00
if ( ! gpuVA ) {
2017-12-21 07:45:38 +08:00
return nullptr ;
}
2020-08-28 14:37:26 +08:00
auto ptrAlloc = alignedMallocWrapper ( alignedAllocationSize , getUserptrAlignment ( ) ) ;
2019-04-25 16:32:56 +08:00
2019-06-25 22:13:45 +08:00
if ( ! ptrAlloc ) {
2020-10-14 15:50:07 +08:00
gfxPartition - > heapFree ( allocatorToUse , gpuVA , allocationSize ) ;
2019-06-25 22:13:45 +08:00
return nullptr ;
2018-12-21 01:47:43 +08:00
}
2022-06-07 01:09:35 +08:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( ptrAlloc ) , alignedAllocationSize , allocationData . rootDeviceIndex ) ) ;
2017-12-21 07:45:38 +08:00
if ( ! bo ) {
2019-06-25 22:13:45 +08:00
alignedFreeWrapper ( ptrAlloc ) ;
2020-10-14 15:50:07 +08:00
gfxPartition - > heapFree ( allocatorToUse , gpuVA , allocationSize ) ;
2017-12-21 07:45:38 +08:00
return nullptr ;
}
2021-10-21 00:16:59 +08:00
bo - > setAddress ( gpuVA ) ;
2022-05-09 20:51:13 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
2018-02-28 22:12:10 +08:00
2019-04-25 16:32:56 +08:00
// softpin to the GPU address, res if it uses limitedRange Allocation
2022-05-30 22:18:50 +08:00
auto canonizedGpuAddress = gmmHelper - > canonize ( gpuVA ) ;
2022-04-29 21:28:15 +08:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , ptrAlloc ,
2022-05-30 22:18:50 +08:00
canonizedGpuAddress , alignedAllocationSize ,
2019-11-04 23:03:30 +08:00
MemoryPool : : System4KBPagesWith32BitGpuAddressing ) ;
2018-12-21 01:47:43 +08:00
2019-02-27 21:59:46 +08:00
allocation - > set32BitAllocation ( true ) ;
2022-04-29 21:28:15 +08:00
allocation - > setGpuBaseAddress ( gmmHelper - > canonize ( gfxPartition - > getHeapBase ( allocatorToUse ) ) ) ;
2019-06-25 22:13:45 +08:00
allocation - > setDriverAllocatedCpuPtr ( ptrAlloc ) ;
2020-10-14 15:50:07 +08:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuVA ) , allocationSize ) ;
2020-07-17 19:40:52 +08:00
bo . release ( ) ;
2019-02-08 03:29:30 +08:00
return allocation ;
2017-12-21 07:45:38 +08:00
}
2021-09-11 17:37:42 +08:00
BufferObject * DrmMemoryManager : : findAndReferenceSharedBufferObject ( int boHandle , uint32_t rootDeviceIndex ) {
2017-12-21 07:45:38 +08:00
BufferObject * bo = nullptr ;
for ( const auto & i : sharingBufferObjects ) {
2021-09-11 17:37:42 +08:00
if ( i - > getHandle ( ) = = boHandle & & i - > getRootDeviceIndex ( ) = = rootDeviceIndex ) {
2017-12-21 07:45:38 +08:00
bo = i ;
bo - > reference ( ) ;
break ;
}
}
return bo ;
}
2022-10-21 06:18:08 +08:00
GraphicsAllocation * DrmMemoryManager : : createGraphicsAllocationFromMultipleSharedHandles ( const std : : vector < osHandle > & handles , AllocationProperties & properties , bool requireSpecificBitness , bool isHostIpcAllocation ) {
2022-02-01 07:29:01 +08:00
BufferObjects bos ;
std : : vector < size_t > sizes ;
size_t totalSize = 0 ;
std : : unique_lock < std : : mutex > lock ( mtx ) ;
uint32_t i = 0 ;
if ( handles . size ( ) ! = 1 ) {
properties . multiStorageResource = true ;
}
auto & drm = this - > getDrm ( properties . rootDeviceIndex ) ;
bool areBosSharedObjects = true ;
2022-06-29 01:56:14 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-02-01 07:29:01 +08:00
for ( auto handle : handles ) {
2022-05-25 00:13:02 +08:00
PrimeHandle openFd = { 0 , 0 , 0 } ;
openFd . fileDescriptor = handle ;
2022-02-01 07:29:01 +08:00
2022-06-30 00:49:29 +08:00
auto ret = ioctlHelper - > ioctl ( DrmIoctl : : PrimeFdToHandle , & openFd ) ;
2022-02-01 07:29:01 +08:00
if ( ret ! = 0 ) {
[[maybe_unused]] int err = errno ;
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " ioctl(PRIME_FD_TO_HANDLE) failed with %d. errno=%d(%s) \n " , ret , err , strerror ( err ) ) ;
return nullptr ;
}
auto boHandle = openFd . handle ;
auto bo = findAndReferenceSharedBufferObject ( boHandle , properties . rootDeviceIndex ) ;
if ( bo = = nullptr ) {
areBosSharedObjects = false ;
size_t size = lseekFunction ( handle , 0 , SEEK_END ) ;
totalSize + = size ;
auto patIndex = drm . getPatIndex ( nullptr , properties . allocationType , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
bo = new ( std : : nothrow ) BufferObject ( & drm , patIndex , boHandle , size , maxOsContextCount ) ;
bo - > setRootDeviceIndex ( properties . rootDeviceIndex ) ;
i + + ;
}
bos . push_back ( bo ) ;
sizes . push_back ( bo - > peekSize ( ) ) ;
}
2022-11-08 02:39:23 +08:00
auto gfxPartition = getGfxPartition ( properties . rootDeviceIndex ) ;
auto prefer57bitAddressing = ( gfxPartition - > getHeapLimit ( HeapIndex : : HEAP_EXTENDED ) > 0 ) ;
auto heapIndex = prefer57bitAddressing ? HeapIndex : : HEAP_EXTENDED : HeapIndex : : HEAP_STANDARD2MB ;
2022-02-01 07:29:01 +08:00
auto gpuRange = acquireGpuRange ( totalSize , properties . rootDeviceIndex , heapIndex ) ;
lock . unlock ( ) ;
AllocationData allocationData ;
properties . size = totalSize ;
getAllocationData ( allocationData , properties , nullptr , createStorageInfoFromProperties ( properties ) ) ;
auto drmAllocation = new DrmAllocation ( properties . rootDeviceIndex ,
handles . size ( ) ,
properties . allocationType ,
bos ,
nullptr ,
gpuRange ,
totalSize ,
MemoryPool : : LocalMemory ) ;
drmAllocation - > storageInfo = allocationData . storageInfo ;
auto gmmHelper = executionEnvironment . rootDeviceEnvironments [ properties . rootDeviceIndex ] - > getGmmHelper ( ) ;
for ( i = 0u ; i < handles . size ( ) ; i + + ) {
auto bo = bos [ i ] ;
StorageInfo limitedStorageInfo = allocationData . storageInfo ;
limitedStorageInfo . memoryBanks & = ( 1u < < ( i % handles . size ( ) ) ) ;
auto gmm = new Gmm ( gmmHelper ,
nullptr ,
bo - > peekSize ( ) ,
0u ,
CacheSettingsHelper : : getGmmUsageType ( drmAllocation - > getAllocationType ( ) , false , * gmmHelper - > getHardwareInfo ( ) ) ,
false ,
allocationData . storageInfo ,
true ) ;
drmAllocation - > setGmm ( gmm , i ) ;
if ( areBosSharedObjects = = false ) {
bo - > setAddress ( gpuRange ) ;
gpuRange + = bo - > peekSize ( ) ;
bo - > setUnmapSize ( sizes [ i ] ) ;
pushSharedBufferObject ( bo ) ;
}
drmAllocation - > getBufferObjectToModify ( i ) = bo ;
}
return drmAllocation ;
}
2021-05-24 14:14:30 +08:00
GraphicsAllocation * DrmMemoryManager : : createGraphicsAllocationFromSharedHandle ( osHandle handle , const AllocationProperties & properties , bool requireSpecificBitness , bool isHostIpcAllocation ) {
if ( isHostIpcAllocation ) {
2021-06-09 08:12:25 +08:00
return createUSMHostAllocationFromSharedHandle ( handle , properties , false ) ;
2021-05-24 14:14:30 +08:00
}
2018-08-29 20:50:36 +08:00
std : : unique_lock < std : : mutex > lock ( mtx ) ;
2017-12-21 07:45:38 +08:00
2022-05-25 00:13:02 +08:00
PrimeHandle openFd { } ;
openFd . fileDescriptor = handle ;
2018-08-14 19:14:06 +08:00
2022-04-20 03:24:19 +08:00
auto & drm = this - > getDrm ( properties . rootDeviceIndex ) ;
2022-06-29 01:56:14 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-04-20 03:24:19 +08:00
2022-06-30 00:49:29 +08:00
auto ret = ioctlHelper - > ioctl ( DrmIoctl : : PrimeFdToHandle , & openFd ) ;
2018-08-14 19:14:06 +08:00
if ( ret ! = 0 ) {
2021-10-22 19:43:24 +08:00
[[maybe_unused]] int err = errno ;
2020-09-25 17:24:15 +08:00
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " ioctl(PRIME_FD_TO_HANDLE) failed with %d. errno=%d(%s) \n " , ret , err , strerror ( err ) ) ;
2021-10-22 19:43:24 +08:00
2018-08-14 19:14:06 +08:00
return nullptr ;
}
2017-12-21 07:45:38 +08:00
auto boHandle = openFd . handle ;
2021-09-11 17:37:42 +08:00
auto bo = findAndReferenceSharedBufferObject ( boHandle , properties . rootDeviceIndex ) ;
2017-12-21 07:45:38 +08:00
if ( bo = = nullptr ) {
size_t size = lseekFunction ( handle , 0 , SEEK_END ) ;
2021-03-02 19:53:34 +08:00
2022-04-20 03:24:19 +08:00
auto patIndex = drm . getPatIndex ( nullptr , properties . allocationType , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
bo = new ( std : : nothrow ) BufferObject ( & drm , patIndex , boHandle , size , maxOsContextCount ) ;
2017-12-21 07:45:38 +08:00
if ( ! bo ) {
return nullptr ;
}
2022-11-08 02:39:23 +08:00
auto getHeapIndex = [ & ] {
if ( requireSpecificBitness & & this - > force32bitAllocations ) {
return HeapIndex : : HEAP_EXTERNAL ;
}
auto gfxPartition = getGfxPartition ( properties . rootDeviceIndex ) ;
auto prefer57bitAddressing = ( gfxPartition - > getHeapLimit ( HeapIndex : : HEAP_EXTENDED ) > 0 ) ;
if ( prefer57bitAddressing ) {
return HeapIndex : : HEAP_EXTENDED ;
}
if ( isLocalMemorySupported ( properties . rootDeviceIndex ) ) {
return HeapIndex : : HEAP_STANDARD2MB ;
}
return HeapIndex : : HEAP_STANDARD ;
} ;
auto heapIndex = getHeapIndex ( ) ;
2021-03-11 21:11:44 +08:00
auto gpuRange = acquireGpuRange ( size , properties . rootDeviceIndex , heapIndex ) ;
2021-03-02 19:53:34 +08:00
bo - > setAddress ( gpuRange ) ;
bo - > setUnmapSize ( size ) ;
2021-09-11 17:37:42 +08:00
bo - > setRootDeviceIndex ( properties . rootDeviceIndex ) ;
2021-03-02 19:53:34 +08:00
2018-08-29 20:50:36 +08:00
pushSharedBufferObject ( bo ) ;
2018-08-14 19:14:06 +08:00
}
2017-12-21 07:45:38 +08:00
2018-08-29 20:50:36 +08:00
lock . unlock ( ) ;
2022-06-03 19:48:45 +08:00
auto gmmHelper = getGmmHelper ( properties . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( castToUint64 ( reinterpret_cast < void * > ( bo - > peekAddress ( ) ) ) ) ;
2021-10-21 19:49:50 +08:00
auto drmAllocation = new DrmAllocation ( properties . rootDeviceIndex , properties . allocationType , bo , reinterpret_cast < void * > ( bo - > peekAddress ( ) ) , bo - > peekSize ( ) ,
2022-06-03 19:48:45 +08:00
handle , MemoryPool : : SystemCpuInaccessible , canonizedGpuAddress ) ;
2017-12-21 07:45:38 +08:00
if ( requireSpecificBitness & & this - > force32bitAllocations ) {
2019-02-27 21:59:46 +08:00
drmAllocation - > set32BitAllocation ( true ) ;
2022-05-09 20:51:13 +08:00
auto gmmHelper = getGmmHelper ( properties . rootDeviceIndex ) ;
2022-04-29 21:28:15 +08:00
drmAllocation - > setGpuBaseAddress ( gmmHelper - > canonize ( getExternalHeapBaseAddress ( properties . rootDeviceIndex , drmAllocation - > isAllocatedInLocalMemoryPool ( ) ) ) ) ;
2017-12-21 07:45:38 +08:00
}
2019-04-02 16:53:22 +08:00
if ( properties . imgInfo ) {
2022-05-16 22:41:00 +08:00
GemGetTiling getTiling { } ;
2019-04-02 16:53:22 +08:00
getTiling . handle = boHandle ;
2022-06-29 01:56:14 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-06-30 00:49:29 +08:00
ret = ioctlHelper - > ioctl ( DrmIoctl : : GemGetTiling , & getTiling ) ;
2019-04-02 16:53:22 +08:00
2020-02-21 05:37:44 +08:00
if ( ret = = 0 ) {
2022-06-21 23:41:40 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
if ( getTiling . tilingMode = = static_cast < uint32_t > ( ioctlHelper - > getDrmParamValue ( DrmParam : : TilingNone ) ) ) {
2020-02-21 05:37:44 +08:00
properties . imgInfo - > linearStorage = true ;
}
2019-08-26 15:27:30 +08:00
}
2022-04-27 18:20:10 +08:00
Gmm * gmm = new Gmm ( executionEnvironment . rootDeviceEnvironments [ properties . rootDeviceIndex ] - > getGmmHelper ( ) , * properties . imgInfo ,
2021-12-02 20:21:33 +08:00
createStorageInfoFromProperties ( properties ) , properties . flags . preferCompressed ) ;
2022-04-20 03:24:19 +08:00
2019-04-02 16:53:22 +08:00
drmAllocation - > setDefaultGmm ( gmm ) ;
2022-04-20 03:24:19 +08:00
bo - > setPatIndex ( drm . getPatIndex ( gmm , properties . allocationType , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ) ;
2019-04-02 16:53:22 +08:00
}
2017-12-21 07:45:38 +08:00
return drmAllocation ;
}
2021-03-18 23:16:58 +08:00
void DrmMemoryManager : : closeSharedHandle ( GraphicsAllocation * gfxAllocation ) {
DrmAllocation * drmAllocation = static_cast < DrmAllocation * > ( gfxAllocation ) ;
if ( drmAllocation - > peekSharedHandle ( ) ! = Sharing : : nonSharedResource ) {
closeFunction ( drmAllocation - > peekSharedHandle ( ) ) ;
drmAllocation - > setSharedHandle ( Sharing : : nonSharedResource ) ;
}
2020-11-23 05:46:47 +08:00
}
2018-05-08 16:00:23 +08:00
void DrmMemoryManager : : addAllocationToHostPtrManager ( GraphicsAllocation * gfxAllocation ) {
DrmAllocation * drmMemory = static_cast < DrmAllocation * > ( gfxAllocation ) ;
2021-09-23 23:09:21 +08:00
2018-05-08 16:00:23 +08:00
FragmentStorage fragment = { } ;
fragment . driverAllocation = true ;
fragment . fragmentCpuPointer = gfxAllocation - > getUnderlyingBuffer ( ) ;
fragment . fragmentSize = alignUp ( gfxAllocation - > getUnderlyingBufferSize ( ) , MemoryConstants : : pageSize ) ;
2021-04-03 01:01:51 +08:00
auto osHandle = new OsHandleLinux ( ) ;
osHandle - > bo = drmMemory - > getBO ( ) ;
fragment . osInternalStorage = osHandle ;
2020-08-17 22:38:21 +08:00
fragment . residency = new ResidencyData ( maxOsContextCount ) ;
2020-07-07 14:41:26 +08:00
hostPtrManager - > storeFragment ( gfxAllocation - > getRootDeviceIndex ( ) , fragment ) ;
2018-05-08 16:00:23 +08:00
}
void DrmMemoryManager : : removeAllocationFromHostPtrManager ( GraphicsAllocation * gfxAllocation ) {
auto buffer = gfxAllocation - > getUnderlyingBuffer ( ) ;
2020-07-07 14:41:26 +08:00
auto fragment = hostPtrManager - > getFragment ( { buffer , gfxAllocation - > getRootDeviceIndex ( ) } ) ;
2018-05-08 16:00:23 +08:00
if ( fragment & & fragment - > driverAllocation ) {
OsHandle * osStorageToRelease = fragment - > osInternalStorage ;
2018-08-27 17:33:25 +08:00
ResidencyData * residencyDataToRelease = fragment - > residency ;
2020-07-07 14:41:26 +08:00
if ( hostPtrManager - > releaseHostPtr ( gfxAllocation - > getRootDeviceIndex ( ) , buffer ) ) {
2018-05-08 16:00:23 +08:00
delete osStorageToRelease ;
2018-08-27 17:33:25 +08:00
delete residencyDataToRelease ;
2018-05-08 16:00:23 +08:00
}
}
}
2017-12-21 07:45:38 +08:00
void DrmMemoryManager : : freeGraphicsMemoryImpl ( GraphicsAllocation * gfxAllocation ) {
2022-03-08 04:41:32 +08:00
freeGraphicsMemoryImpl ( gfxAllocation , false ) ;
2022-03-02 11:43:59 +08:00
}
void DrmMemoryManager : : freeGraphicsMemoryImpl ( GraphicsAllocation * gfxAllocation , bool isImported ) {
2021-08-06 20:57:10 +08:00
if ( DebugManager . flags . DoNotFreeResources . get ( ) ) {
return ;
}
2020-09-17 19:27:32 +08:00
DrmAllocation * drmAlloc = static_cast < DrmAllocation * > ( gfxAllocation ) ;
2020-09-10 18:36:44 +08:00
this - > unregisterAllocation ( gfxAllocation ) ;
2020-07-02 17:49:46 +08:00
for ( auto & engine : this - > registeredEngines ) {
auto memoryOperationsInterface = static_cast < DrmMemoryOperationsHandler * > ( executionEnvironment . rootDeviceEnvironments [ gfxAllocation - > getRootDeviceIndex ( ) ] - > memoryOperationsInterface . get ( ) ) ;
memoryOperationsInterface - > evictWithinOsContext ( engine . osContext , * gfxAllocation ) ;
}
2020-05-08 16:04:06 +08:00
2021-04-09 18:57:55 +08:00
if ( drmAlloc - > getMmapPtr ( ) ) {
this - > munmapFunction ( drmAlloc - > getMmapPtr ( ) , drmAlloc - > getMmapSize ( ) ) ;
}
2020-04-21 19:16:45 +08:00
for ( auto handleId = 0u ; handleId < gfxAllocation - > getNumGmms ( ) ; handleId + + ) {
delete gfxAllocation - > getGmm ( handleId ) ;
2019-03-21 21:01:08 +08:00
}
2017-12-21 07:45:38 +08:00
if ( gfxAllocation - > fragmentsStorage . fragmentCount ) {
cleanGraphicsMemoryCreatedFromHostPtr ( gfxAllocation ) ;
2019-08-27 21:33:58 +08:00
} else {
2019-09-02 03:36:15 +08:00
auto & bos = static_cast < DrmAllocation * > ( gfxAllocation ) - > getBOs ( ) ;
for ( auto bo : bos ) {
2021-10-21 19:49:50 +08:00
unreference ( bo , bo & & bo - > peekIsReusableAllocation ( ) ? false : true ) ;
2019-09-02 03:36:15 +08:00
}
2022-03-02 11:43:59 +08:00
if ( isImported = = false ) {
closeSharedHandle ( gfxAllocation ) ;
}
2017-12-21 07:45:38 +08:00
}
2019-08-05 19:34:29 +08:00
2019-10-22 16:26:23 +08:00
releaseGpuRange ( gfxAllocation - > getReservedAddressPtr ( ) , gfxAllocation - > getReservedAddressSize ( ) , gfxAllocation - > getRootDeviceIndex ( ) ) ;
2019-08-27 21:33:58 +08:00
alignedFreeWrapper ( gfxAllocation - > getDriverAllocatedCpuPtr ( ) ) ;
2019-08-05 19:34:29 +08:00
2020-09-17 19:27:32 +08:00
drmAlloc - > freeRegisteredBOBindExtHandles ( & getDrm ( drmAlloc - > getRootDeviceIndex ( ) ) ) ;
2017-12-21 07:45:38 +08:00
delete gfxAllocation ;
2018-02-27 06:23:43 +08:00
}
2017-12-21 07:45:38 +08:00
2019-03-04 21:50:26 +08:00
void DrmMemoryManager : : handleFenceCompletion ( GraphicsAllocation * allocation ) {
2022-01-20 02:14:10 +08:00
auto & drm = this - > getDrm ( allocation - > getRootDeviceIndex ( ) ) ;
if ( drm . isVmBindAvailable ( ) ) {
if ( drm . completionFenceSupport ( ) ) {
waitOnCompletionFence ( allocation ) ;
} else {
waitForEnginesCompletion ( * allocation ) ;
}
2021-04-29 16:58:16 +08:00
} else {
static_cast < DrmAllocation * > ( allocation ) - > getBO ( ) - > wait ( - 1 ) ;
}
2019-03-04 21:50:26 +08:00
}
2020-10-21 16:50:53 +08:00
GraphicsAllocation * DrmMemoryManager : : createGraphicsAllocationFromExistingStorage ( AllocationProperties & properties , void * ptr , MultiGraphicsAllocation & multiGraphicsAllocation ) {
auto defaultAlloc = multiGraphicsAllocation . getDefaultGraphicsAllocation ( ) ;
2022-02-23 22:06:22 +08:00
if ( defaultAlloc & & static_cast < DrmAllocation * > ( defaultAlloc ) - > getMmapPtr ( ) ) {
2020-10-21 16:50:53 +08:00
properties . size = defaultAlloc - > getUnderlyingBufferSize ( ) ;
properties . gpuAddress = castToUint64 ( ptr ) ;
auto internalHandle = defaultAlloc - > peekInternalHandle ( this ) ;
2021-06-09 08:12:25 +08:00
return createUSMHostAllocationFromSharedHandle ( static_cast < osHandle > ( internalHandle ) , properties , true ) ;
2020-10-21 16:50:53 +08:00
} else {
return allocateGraphicsMemoryWithProperties ( properties , ptr ) ;
}
}
2020-01-07 14:42:40 +08:00
uint64_t DrmMemoryManager : : getSystemSharedMemory ( uint32_t rootDeviceIndex ) {
2017-12-21 07:45:38 +08:00
uint64_t hostMemorySize = MemoryConstants : : pageSize * ( uint64_t ) ( sysconf ( _SC_PHYS_PAGES ) ) ;
2022-06-28 22:16:14 +08:00
uint64_t gpuMemorySize = 0u ;
2021-10-22 19:43:24 +08:00
2022-06-28 22:16:14 +08:00
[[maybe_unused]] auto ret = getDrm ( rootDeviceIndex ) . queryGttSize ( gpuMemorySize ) ;
DEBUG_BREAK_IF ( ret ! = 0 ) ;
2017-12-21 07:45:38 +08:00
return std : : min ( hostMemorySize , gpuMemorySize ) ;
}
2021-07-27 22:24:20 +08:00
double DrmMemoryManager : : getPercentOfGlobalMemoryAvailable ( uint32_t rootDeviceIndex ) {
if ( isLocalMemorySupported ( rootDeviceIndex ) ) {
return 0.95 ;
}
return 0.8 ;
}
2020-01-07 14:42:40 +08:00
MemoryManager : : AllocationStatus DrmMemoryManager : : populateOsHandles ( OsHandleStorage & handleStorage , uint32_t rootDeviceIndex ) {
2018-10-22 20:20:05 +08:00
BufferObject * allocatedBos [ maxFragmentsCount ] ;
2018-03-27 20:01:04 +08:00
uint32_t numberOfBosAllocated = 0 ;
2018-10-22 20:20:05 +08:00
uint32_t indexesOfAllocatedBos [ maxFragmentsCount ] ;
2018-02-28 19:09:48 +08:00
2018-10-22 20:20:05 +08:00
for ( unsigned int i = 0 ; i < maxFragmentsCount ; i + + ) {
2017-12-21 07:45:38 +08:00
// If there is no fragment it means it already exists.
if ( ! handleStorage . fragmentStorageData [ i ] . osHandleStorage & & handleStorage . fragmentStorageData [ i ] . fragmentSize ) {
2021-04-03 01:01:51 +08:00
auto osHandle = new OsHandleLinux ( ) ;
handleStorage . fragmentStorageData [ i ] . osHandleStorage = osHandle ;
2020-08-17 22:38:21 +08:00
handleStorage . fragmentStorageData [ i ] . residency = new ResidencyData ( maxOsContextCount ) ;
2017-12-21 07:45:38 +08:00
2021-04-03 01:01:51 +08:00
osHandle - > bo = allocUserptr ( ( uintptr_t ) handleStorage . fragmentStorageData [ i ] . cpuPtr ,
2022-06-07 01:09:35 +08:00
handleStorage . fragmentStorageData [ i ] . fragmentSize , rootDeviceIndex ) ;
2021-04-03 01:01:51 +08:00
if ( ! osHandle - > bo ) {
2017-12-21 07:45:38 +08:00
handleStorage . fragmentStorageData [ i ] . freeTheFragment = true ;
2018-02-28 19:09:48 +08:00
return AllocationStatus : : Error ;
2017-12-21 07:45:38 +08:00
}
2018-02-28 19:09:48 +08:00
2021-04-03 01:01:51 +08:00
allocatedBos [ numberOfBosAllocated ] = osHandle - > bo ;
2018-03-20 23:06:16 +08:00
indexesOfAllocatedBos [ numberOfBosAllocated ] = i ;
2018-02-28 19:09:48 +08:00
numberOfBosAllocated + + ;
2017-12-21 07:45:38 +08:00
}
}
2018-02-28 19:09:48 +08:00
if ( validateHostPtrMemory ) {
2021-04-20 20:24:04 +08:00
int result = pinBBs . at ( rootDeviceIndex ) - > validateHostPtr ( allocatedBos , numberOfBosAllocated , registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext , 0 , getDefaultDrmContextId ( rootDeviceIndex ) ) ;
2018-02-28 19:09:48 +08:00
if ( result = = EFAULT ) {
2018-03-20 23:06:16 +08:00
for ( uint32_t i = 0 ; i < numberOfBosAllocated ; i + + ) {
handleStorage . fragmentStorageData [ indexesOfAllocatedBos [ i ] ] . freeTheFragment = true ;
}
2018-02-28 19:09:48 +08:00
return AllocationStatus : : InvalidHostPointer ;
} else if ( result ! = 0 ) {
return AllocationStatus : : Error ;
}
}
2018-03-27 20:01:04 +08:00
for ( uint32_t i = 0 ; i < numberOfBosAllocated ; i + + ) {
2020-07-07 14:41:26 +08:00
hostPtrManager - > storeFragment ( rootDeviceIndex , handleStorage . fragmentStorageData [ indexesOfAllocatedBos [ i ] ] ) ;
2018-03-27 20:01:04 +08:00
}
2018-02-28 19:09:48 +08:00
return AllocationStatus : : Success ;
2017-12-21 07:45:38 +08:00
}
2018-03-27 20:01:04 +08:00
2019-11-15 16:59:48 +08:00
void DrmMemoryManager : : cleanOsHandles ( OsHandleStorage & handleStorage , uint32_t rootDeviceIndex ) {
2018-10-22 20:20:05 +08:00
for ( unsigned int i = 0 ; i < maxFragmentsCount ; i + + ) {
2017-12-21 07:45:38 +08:00
if ( handleStorage . fragmentStorageData [ i ] . freeTheFragment ) {
2021-04-03 01:01:51 +08:00
auto osHandle = static_cast < OsHandleLinux * > ( handleStorage . fragmentStorageData [ i ] . osHandleStorage ) ;
if ( osHandle - > bo ) {
BufferObject * search = osHandle - > bo ;
2017-12-21 07:45:38 +08:00
search - > wait ( - 1 ) ;
2021-10-22 19:43:24 +08:00
[[maybe_unused]] auto refCount = unreference ( search , true ) ;
2017-12-21 07:45:38 +08:00
DEBUG_BREAK_IF ( refCount ! = 1u ) ;
}
delete handleStorage . fragmentStorageData [ i ] . osHandleStorage ;
2018-03-27 20:01:04 +08:00
handleStorage . fragmentStorageData [ i ] . osHandleStorage = nullptr ;
2017-12-21 07:45:38 +08:00
delete handleStorage . fragmentStorageData [ i ] . residency ;
2018-03-27 20:01:04 +08:00
handleStorage . fragmentStorageData [ i ] . residency = nullptr ;
2017-12-21 07:45:38 +08:00
}
}
}
2018-02-27 06:23:43 +08:00
bool DrmMemoryManager : : setDomainCpu ( GraphicsAllocation & graphicsAllocation , bool writeEnable ) {
2022-02-07 22:27:53 +08:00
DEBUG_BREAK_IF ( writeEnable ) ; // unsupported path (for CPU writes call SW_FINISH ioctl in unlockResource)
2018-02-27 06:23:43 +08:00
auto bo = static_cast < DrmAllocation * > ( & graphicsAllocation ) - > getBO ( ) ;
if ( bo = = nullptr )
return false ;
2022-06-29 01:56:14 +08:00
auto & drm = this - > getDrm ( graphicsAllocation . getRootDeviceIndex ( ) ) ;
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-07-25 22:47:08 +08:00
return ioctlHelper - > setDomainCpu ( bo - > peekHandle ( ) , writeEnable ) ;
2018-02-27 06:23:43 +08:00
}
2019-01-24 18:51:33 +08:00
void * DrmMemoryManager : : lockResourceImpl ( GraphicsAllocation & graphicsAllocation ) {
auto cpuPtr = graphicsAllocation . getUnderlyingBuffer ( ) ;
2018-02-27 06:23:43 +08:00
if ( cpuPtr ! = nullptr ) {
2021-12-01 05:04:54 +08:00
[[maybe_unused]] auto success = setDomainCpu ( graphicsAllocation , false ) ;
2018-02-27 06:23:43 +08:00
DEBUG_BREAK_IF ( ! success ) ;
return cpuPtr ;
}
2019-01-24 18:51:33 +08:00
auto bo = static_cast < DrmAllocation & > ( graphicsAllocation ) . getBO ( ) ;
2018-02-27 06:23:43 +08:00
2022-06-07 01:43:28 +08:00
if ( graphicsAllocation . getAllocationType ( ) = = AllocationType : : WRITE_COMBINED ) {
auto addr = lockBufferObject ( bo ) ;
auto alignedAddr = alignUp ( addr , MemoryConstants : : pageSize64k ) ;
auto notUsedSize = ptrDiff ( alignedAddr , addr ) ;
// call unmap to free the unaligned pages preceding the BO allocation and
// adjust the pointer in the CPU mapping to the beginning of the BO allocation
munmapFunction ( addr , notUsedSize ) ;
bo - > setLockedAddress ( alignedAddr ) ;
return bo - > peekLockedAddress ( ) ;
2018-03-20 17:49:09 +08:00
}
2018-02-27 06:23:43 +08:00
2022-06-07 01:43:28 +08:00
return lockBufferObject ( bo ) ;
2018-02-27 06:23:43 +08:00
}
2019-01-24 18:51:33 +08:00
void DrmMemoryManager : : unlockResourceImpl ( GraphicsAllocation & graphicsAllocation ) {
2022-06-07 01:43:28 +08:00
return unlockBufferObject ( static_cast < DrmAllocation & > ( graphicsAllocation ) . getBO ( ) ) ;
2018-02-27 06:23:43 +08:00
}
2019-07-08 02:33:17 +08:00
2022-06-29 01:56:14 +08:00
int DrmMemoryManager : : obtainFdFromHandle ( int boHandle , uint32_t rootDeviceIndex ) {
2022-07-27 02:11:27 +08:00
auto & drm = this - > getDrm ( rootDeviceIndex ) ;
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-05-25 00:13:02 +08:00
PrimeHandle openFd { } ;
2019-06-06 22:26:47 +08:00
2022-07-27 02:11:27 +08:00
openFd . flags = ioctlHelper - > getFlagsForPrimeHandleToFd ( ) ;
2019-06-06 22:26:47 +08:00
openFd . handle = boHandle ;
2022-06-30 00:49:29 +08:00
ioctlHelper - > ioctl ( DrmIoctl : : PrimeHandleToFd , & openFd ) ;
2019-06-06 22:26:47 +08:00
2022-05-25 00:13:02 +08:00
return openFd . fileDescriptor ;
2019-06-06 22:26:47 +08:00
}
2019-07-17 21:38:14 +08:00
2021-04-20 20:24:04 +08:00
uint32_t DrmMemoryManager : : getDefaultDrmContextId ( uint32_t rootDeviceIndex ) const {
auto osContextLinux = static_cast < OsContextLinux * > ( registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext ) ;
2019-10-22 19:29:39 +08:00
return osContextLinux - > getDrmContextIds ( ) [ 0 ] ;
2019-07-17 21:38:14 +08:00
}
2019-08-05 19:34:29 +08:00
2020-08-28 14:37:26 +08:00
size_t DrmMemoryManager : : getUserptrAlignment ( ) {
auto alignment = MemoryConstants : : allocationAlignment ;
if ( DebugManager . flags . ForceUserptrAlignment . get ( ) ! = - 1 ) {
alignment = DebugManager . flags . ForceUserptrAlignment . get ( ) * MemoryConstants : : kiloByte ;
}
return alignment ;
}
2020-01-07 14:42:40 +08:00
Drm & DrmMemoryManager : : getDrm ( uint32_t rootDeviceIndex ) const {
2021-05-21 07:17:57 +08:00
return * this - > executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > osInterface - > getDriverModel ( ) - > as < Drm > ( ) ;
2020-01-07 14:42:40 +08:00
}
2020-03-19 17:41:35 +08:00
uint32_t DrmMemoryManager : : getRootDeviceIndex ( const Drm * drm ) {
auto rootDeviceCount = this - > executionEnvironment . rootDeviceEnvironments . size ( ) ;
for ( auto rootDeviceIndex = 0u ; rootDeviceIndex < rootDeviceCount ; rootDeviceIndex + + ) {
if ( & getDrm ( rootDeviceIndex ) = = drm ) {
return rootDeviceIndex ;
}
}
2020-03-27 22:21:18 +08:00
return CommonConstants : : unspecifiedDeviceIndex ;
2020-03-19 17:41:35 +08:00
}
2020-07-01 16:38:19 +08:00
AddressRange DrmMemoryManager : : reserveGpuAddress ( size_t size , uint32_t rootDeviceIndex ) {
2021-03-10 06:41:46 +08:00
auto gpuVa = acquireGpuRange ( size , rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2020-07-01 16:38:19 +08:00
return AddressRange { gpuVa , size } ;
}
void DrmMemoryManager : : freeGpuAddress ( AddressRange addressRange , uint32_t rootDeviceIndex ) {
releaseGpuRange ( reinterpret_cast < void * > ( addressRange . address ) , addressRange . size , rootDeviceIndex ) ;
}
2020-09-10 18:36:44 +08:00
std : : unique_lock < std : : mutex > DrmMemoryManager : : acquireAllocLock ( ) {
return std : : unique_lock < std : : mutex > ( this - > allocMutex ) ;
}
std : : vector < GraphicsAllocation * > & DrmMemoryManager : : getSysMemAllocs ( ) {
return this - > sysMemAllocs ;
}
std : : vector < GraphicsAllocation * > & DrmMemoryManager : : getLocalMemAllocs ( uint32_t rootDeviceIndex ) {
return this - > localMemAllocs [ rootDeviceIndex ] ;
}
2022-02-08 20:14:24 +08:00
void DrmMemoryManager : : makeAllocationResident ( GraphicsAllocation * allocation ) {
if ( DebugManager . flags . MakeEachAllocationResident . get ( ) = = 1 ) {
auto drmAllocation = static_cast < DrmAllocation * > ( allocation ) ;
for ( uint32_t i = 0 ; getDrm ( allocation - > getRootDeviceIndex ( ) ) . getVirtualMemoryAddressSpace ( i ) > 0u ; i + + ) {
drmAllocation - > makeBOsResident ( registeredEngines [ defaultEngineIndex [ allocation - > getRootDeviceIndex ( ) ] ] . osContext , i , nullptr , true ) ;
getDrm ( allocation - > getRootDeviceIndex ( ) ) . waitForBind ( i ) ;
}
}
}
2020-09-10 18:36:44 +08:00
void DrmMemoryManager : : registerSysMemAlloc ( GraphicsAllocation * allocation ) {
2022-02-08 20:14:24 +08:00
makeAllocationResident ( allocation ) ;
2020-09-10 18:36:44 +08:00
std : : lock_guard < std : : mutex > lock ( this - > allocMutex ) ;
this - > sysMemAllocs . push_back ( allocation ) ;
}
void DrmMemoryManager : : registerLocalMemAlloc ( GraphicsAllocation * allocation , uint32_t rootDeviceIndex ) {
2022-02-08 20:14:24 +08:00
makeAllocationResident ( allocation ) ;
2020-09-10 18:36:44 +08:00
std : : lock_guard < std : : mutex > lock ( this - > allocMutex ) ;
this - > localMemAllocs [ rootDeviceIndex ] . push_back ( allocation ) ;
}
2022-02-08 20:14:24 +08:00
2020-09-10 18:36:44 +08:00
void DrmMemoryManager : : unregisterAllocation ( GraphicsAllocation * allocation ) {
std : : lock_guard < std : : mutex > lock ( this - > allocMutex ) ;
sysMemAllocs . erase ( std : : remove ( sysMemAllocs . begin ( ) , sysMemAllocs . end ( ) , allocation ) ,
sysMemAllocs . end ( ) ) ;
localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . erase ( std : : remove ( localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . begin ( ) ,
localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . end ( ) ,
allocation ) ,
localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . end ( ) ) ;
}
2020-09-17 19:27:32 +08:00
2020-10-09 16:48:37 +08:00
void DrmMemoryManager : : registerAllocationInOs ( GraphicsAllocation * allocation ) {
2020-11-19 22:11:37 +08:00
if ( allocation & & getDrm ( allocation - > getRootDeviceIndex ( ) ) . resourceRegistrationEnabled ( ) ) {
2020-09-17 19:27:32 +08:00
auto drmAllocation = static_cast < DrmAllocation * > ( allocation ) ;
drmAllocation - > registerBOBindExtHandle ( & getDrm ( drmAllocation - > getRootDeviceIndex ( ) ) ) ;
2021-03-10 07:02:59 +08:00
if ( isAllocationTypeToCapture ( drmAllocation - > getAllocationType ( ) ) ) {
drmAllocation - > markForCapture ( ) ;
}
2020-09-17 19:27:32 +08:00
}
}
2021-05-28 01:44:47 +08:00
std : : unique_ptr < MemoryManager > DrmMemoryManager : : create ( ExecutionEnvironment & executionEnvironment ) {
2021-08-03 22:10:30 +08:00
bool validateHostPtr = true ;
if ( DebugManager . flags . EnableHostPtrValidation . get ( ) ! = - 1 ) {
validateHostPtr = DebugManager . flags . EnableHostPtrValidation . get ( ) ;
}
2021-05-28 01:44:47 +08:00
return std : : make_unique < DrmMemoryManager > ( gemCloseWorkerMode : : gemCloseWorkerActive ,
DebugManager . flags . EnableForcePin . get ( ) ,
2021-08-03 22:10:30 +08:00
validateHostPtr ,
2021-05-28 01:44:47 +08:00
executionEnvironment ) ;
}
2021-09-13 18:04:37 +08:00
uint64_t DrmMemoryManager : : getLocalMemorySize ( uint32_t rootDeviceIndex , uint32_t deviceBitfield ) {
auto memoryInfo = getDrm ( rootDeviceIndex ) . getMemoryInfo ( ) ;
if ( ! memoryInfo ) {
return 0 ;
}
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > getHardwareInfo ( ) ;
uint32_t subDevicesCount = HwHelper : : getSubDevicesCount ( hwInfo ) ;
size_t size = 0 ;
for ( uint32_t i = 0 ; i < subDevicesCount ; i + + ) {
auto memoryBank = ( 1 < < i ) ;
if ( deviceBitfield & memoryBank ) {
size + = memoryInfo - > getMemoryRegionSize ( memoryBank ) ;
}
}
return size ;
}
2021-09-08 22:16:07 +08:00
bool DrmMemoryManager : : copyMemoryToAllocation ( GraphicsAllocation * graphicsAllocation , size_t destinationOffset , const void * memoryToCopy , size_t sizeToCopy ) {
2022-11-03 21:18:23 +08:00
if ( graphicsAllocation - > getUnderlyingBuffer ( ) & & graphicsAllocation - > storageInfo . getNumBanks ( ) = = 1 ) {
2021-09-08 22:16:07 +08:00
return MemoryManager : : copyMemoryToAllocation ( graphicsAllocation , destinationOffset , memoryToCopy , sizeToCopy ) ;
}
2021-10-21 19:16:19 +08:00
return copyMemoryToAllocationBanks ( graphicsAllocation , destinationOffset , memoryToCopy , sizeToCopy , maxNBitValue ( graphicsAllocation - > storageInfo . getNumBanks ( ) ) ) ;
}
bool DrmMemoryManager : : copyMemoryToAllocationBanks ( GraphicsAllocation * graphicsAllocation , size_t destinationOffset , const void * memoryToCopy , size_t sizeToCopy , DeviceBitfield handleMask ) {
2022-06-02 05:13:52 +08:00
if ( MemoryPoolHelper : : isSystemMemoryPool ( graphicsAllocation - > getMemoryPool ( ) ) ) {
2021-10-21 19:16:19 +08:00
return false ;
}
2021-09-08 22:16:07 +08:00
auto drmAllocation = static_cast < DrmAllocation * > ( graphicsAllocation ) ;
2021-10-29 09:53:31 +08:00
for ( auto handleId = 0u ; handleId < graphicsAllocation - > storageInfo . getNumBanks ( ) ; handleId + + ) {
2021-10-21 19:16:19 +08:00
if ( ! handleMask . test ( handleId ) ) {
continue ;
}
2022-06-07 01:43:28 +08:00
auto ptr = lockBufferObject ( drmAllocation - > getBOs ( ) [ handleId ] ) ;
2021-09-08 22:16:07 +08:00
if ( ! ptr ) {
return false ;
}
memcpy_s ( ptrOffset ( ptr , destinationOffset ) , graphicsAllocation - > getUnderlyingBufferSize ( ) - destinationOffset , memoryToCopy , sizeToCopy ) ;
2022-06-07 01:43:28 +08:00
this - > unlockBufferObject ( drmAllocation - > getBOs ( ) [ handleId ] ) ;
2021-09-08 22:16:07 +08:00
}
return true ;
}
2022-06-07 01:43:28 +08:00
void DrmMemoryManager : : unlockBufferObject ( BufferObject * bo ) {
2021-09-08 22:16:07 +08:00
if ( bo = = nullptr )
return ;
2021-10-21 19:49:50 +08:00
releaseReservedCpuAddressRange ( bo - > peekLockedAddress ( ) , bo - > peekSize ( ) , this - > getRootDeviceIndex ( bo - > peekDrm ( ) ) ) ;
2021-09-08 22:16:07 +08:00
[[maybe_unused]] auto ret = munmapFunction ( bo - > peekLockedAddress ( ) , bo - > peekSize ( ) ) ;
DEBUG_BREAK_IF ( ret ! = 0 ) ;
bo - > setLockedAddress ( nullptr ) ;
}
2021-09-13 18:04:37 +08:00
2022-04-27 18:20:10 +08:00
void createColouredGmms ( GmmHelper * gmmHelper , DrmAllocation & allocation , const StorageInfo & storageInfo , bool compression ) {
2021-10-04 23:23:42 +08:00
DEBUG_BREAK_IF ( storageInfo . colouringPolicy = = ColouringPolicy : : DeviceCountBased & & storageInfo . colouringGranularity ! = MemoryConstants : : pageSize64k ) ;
2022-04-08 18:32:44 +08:00
auto remainingSize = alignUp ( allocation . getUnderlyingBufferSize ( ) , storageInfo . colouringGranularity ) ;
2021-09-15 23:46:29 +08:00
auto handles = storageInfo . getNumBanks ( ) ;
2021-10-04 23:23:42 +08:00
auto banksCnt = storageInfo . getTotalBanksCnt ( ) ;
if ( storageInfo . colouringPolicy = = ColouringPolicy : : ChunkSizeBased ) {
2022-04-08 18:32:44 +08:00
handles = static_cast < uint32_t > ( remainingSize / storageInfo . colouringGranularity ) ;
2021-10-04 23:23:42 +08:00
allocation . resizeGmms ( handles ) ;
}
2022-04-08 18:32:44 +08:00
/* This logic is to colour resource as equally as possible.
Divide size by number of devices and align result up to 64 kb page , then subtract it from whole size and allocate it on the first tile . First tile has it ' s chunk .
In the following iteration divide rest of a size by remaining devices and again subtract it .
Notice that if allocation size ( in pages ) is not divisible by 4 then remainder can be equal to 1 , 2 , 3 and by using this algorithm it can be spread efficiently .
2021-09-15 23:46:29 +08:00
2022-04-08 18:32:44 +08:00
For example : 18 pages allocation and 4 devices . Page size is 64 kb .
Divide by 4 and align up to page size and result is 5 pages . After subtract , remaining size is 13 pages .
Now divide 13 by 3 and align up - result is 5 pages . After subtract , remaining size is 8 pages .
Divide 8 by 2 - result is 4 pages .
In last iteration remaining 4 pages go to last tile .
18 pages is coloured to ( 5 , 5 , 4 , 4 ) .
2021-09-15 23:46:29 +08:00
2022-04-08 18:32:44 +08:00
It was tested and doesn ' t require any debug */
2021-09-15 23:46:29 +08:00
for ( auto handleId = 0u ; handleId < handles ; handleId + + ) {
2022-04-08 18:32:44 +08:00
auto currentSize = alignUp ( remainingSize / ( handles - handleId ) , storageInfo . colouringGranularity ) ;
remainingSize - = currentSize ;
2021-09-15 23:46:29 +08:00
StorageInfo limitedStorageInfo = storageInfo ;
2021-10-04 23:23:42 +08:00
limitedStorageInfo . memoryBanks & = ( 1u < < ( handleId % banksCnt ) ) ;
2022-04-27 18:20:10 +08:00
auto gmm = new Gmm ( gmmHelper ,
2021-09-15 23:46:29 +08:00
nullptr ,
2022-04-08 18:32:44 +08:00
currentSize ,
2021-09-15 23:46:29 +08:00
0u ,
2022-04-27 18:20:10 +08:00
CacheSettingsHelper : : getGmmUsageType ( allocation . getAllocationType ( ) , false , * gmmHelper - > getHardwareInfo ( ) ) ,
2021-09-15 23:46:29 +08:00
compression ,
2022-02-07 22:27:53 +08:00
limitedStorageInfo ,
true ) ;
2021-09-15 23:46:29 +08:00
allocation . setGmm ( gmm , handleId ) ;
}
}
2022-04-27 18:20:10 +08:00
void fillGmmsInAllocation ( GmmHelper * gmmHelper , DrmAllocation * allocation , const StorageInfo & storageInfo ) {
2021-09-15 23:46:29 +08:00
auto alignedSize = alignUp ( allocation - > getUnderlyingBufferSize ( ) , MemoryConstants : : pageSize64k ) ;
for ( auto handleId = 0u ; handleId < storageInfo . getNumBanks ( ) ; handleId + + ) {
StorageInfo limitedStorageInfo = storageInfo ;
limitedStorageInfo . memoryBanks & = 1u < < handleId ;
limitedStorageInfo . pageTablesVisibility & = 1u < < handleId ;
2022-04-27 18:20:10 +08:00
auto gmm = new Gmm ( gmmHelper , nullptr , alignedSize , 0u ,
CacheSettingsHelper : : getGmmUsageType ( allocation - > getAllocationType ( ) , false , * gmmHelper - > getHardwareInfo ( ) ) , false , limitedStorageInfo , true ) ;
2021-09-15 23:46:29 +08:00
allocation - > setGmm ( gmm , handleId ) ;
}
}
2022-02-04 21:59:01 +08:00
uint64_t getGpuAddress ( const AlignmentSelector & alignmentSelector , HeapAssigner & heapAssigner , const HardwareInfo & hwInfo , AllocationType allocType , GfxPartition * gfxPartition ,
2022-06-08 16:23:02 +08:00
size_t & sizeAllocated , const void * hostPtr , bool resource48Bit , bool useFrontWindow , GmmHelper & gmmHelper ) {
2021-09-15 23:46:29 +08:00
uint64_t gpuAddress = 0 ;
switch ( allocType ) {
2022-02-04 21:59:01 +08:00
case AllocationType : : SVM_GPU :
2021-09-15 23:46:29 +08:00
gpuAddress = reinterpret_cast < uint64_t > ( hostPtr ) ;
sizeAllocated = 0 ;
break ;
2022-02-04 21:59:01 +08:00
case AllocationType : : KERNEL_ISA :
case AllocationType : : KERNEL_ISA_INTERNAL :
case AllocationType : : INTERNAL_HEAP :
case AllocationType : : DEBUG_MODULE_AREA : {
2021-09-15 23:46:29 +08:00
auto heap = heapAssigner . get32BitHeapIndex ( allocType , true , hwInfo , useFrontWindow ) ;
2022-08-29 21:30:21 +08:00
size_t alignment = 0 ;
if ( DebugManager . flags . ExperimentalEnableCustomLocalMemoryAlignment . get ( ) ! = - 1 ) {
alignment = static_cast < size_t > ( DebugManager . flags . ExperimentalEnableCustomLocalMemoryAlignment . get ( ) ) ;
}
gpuAddress = gmmHelper . canonize ( gfxPartition - > heapAllocateWithCustomAlignment ( heap , sizeAllocated , alignment ) ) ;
2021-09-15 23:46:29 +08:00
} break ;
2022-02-04 21:59:01 +08:00
case AllocationType : : WRITE_COMBINED :
2021-09-15 23:46:29 +08:00
sizeAllocated = 0 ;
break ;
default :
AlignmentSelector : : CandidateAlignment alignment = alignmentSelector . selectAlignment ( sizeAllocated ) ;
if ( gfxPartition - > getHeapLimit ( HeapIndex : : HEAP_EXTENDED ) > 0 & & ! resource48Bit ) {
2022-11-09 01:32:18 +08:00
auto alignSize = true ;
if ( DebugManager . flags . UseHighAlignmentForHeapExtended . get ( ) ! = - 1 ) {
alignSize = ! ! DebugManager . flags . UseHighAlignmentForHeapExtended . get ( ) ;
}
if ( alignSize ) {
alignment . alignment = Math : : nextPowerOfTwo ( sizeAllocated ) ;
}
2021-09-15 23:46:29 +08:00
alignment . heap = HeapIndex : : HEAP_EXTENDED ;
}
2022-05-11 22:59:23 +08:00
gpuAddress = gmmHelper . canonize ( gfxPartition - > heapAllocateWithCustomAlignment ( alignment . heap , sizeAllocated , alignment . alignment ) ) ;
2021-09-15 23:46:29 +08:00
break ;
}
return gpuAddress ;
}
2022-04-11 11:40:18 +08:00
void DrmMemoryManager : : cleanupBeforeReturn ( const AllocationData & allocationData , GfxPartition * gfxPartition , DrmAllocation * drmAllocation , GraphicsAllocation * graphicsAllocation , uint64_t & gpuAddress , size_t & sizeAllocated ) {
for ( auto bo : drmAllocation - > getBOs ( ) ) {
delete bo ;
}
for ( auto handleId = 0u ; handleId < allocationData . storageInfo . getNumBanks ( ) ; handleId + + ) {
delete graphicsAllocation - > getGmm ( handleId ) ;
}
2022-04-26 03:34:32 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
gfxPartition - > freeGpuAddressRange ( gmmHelper - > decanonize ( gpuAddress ) , sizeAllocated ) ;
2022-04-11 11:40:18 +08:00
}
2021-09-15 23:46:29 +08:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryInDevicePool ( const AllocationData & allocationData , AllocationStatus & status ) {
status = AllocationStatus : : RetryInNonDevicePool ;
if ( ! this - > localMemorySupported [ allocationData . rootDeviceIndex ] | |
allocationData . flags . useSystemMemory | |
( allocationData . flags . allow32Bit & & this - > force32bitAllocations ) | |
2022-02-04 21:59:01 +08:00
allocationData . type = = AllocationType : : SHARED_RESOURCE_COPY ) {
2021-09-15 23:46:29 +08:00
return nullptr ;
}
2022-02-04 21:59:01 +08:00
if ( allocationData . type = = AllocationType : : UNIFIED_SHARED_MEMORY ) {
2021-09-15 23:46:29 +08:00
auto allocation = this - > createSharedUnifiedMemoryAllocation ( allocationData ) ;
status = allocation ? AllocationStatus : : Success : AllocationStatus : : Error ;
return allocation ;
}
2022-02-10 02:03:05 +08:00
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getHardwareInfo ( ) ;
2021-09-15 23:46:29 +08:00
std : : unique_ptr < Gmm > gmm ;
size_t sizeAligned = 0 ;
auto numHandles = allocationData . storageInfo . getNumBanks ( ) ;
bool createSingleHandle = 1 = = numHandles ;
2022-05-11 22:59:23 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
2022-02-04 21:59:01 +08:00
if ( allocationData . type = = AllocationType : : IMAGE ) {
2021-09-15 23:46:29 +08:00
allocationData . imgInfo - > useLocalMemory = true ;
2022-05-11 22:59:23 +08:00
gmm = std : : make_unique < Gmm > ( gmmHelper , * allocationData . imgInfo ,
2021-12-03 21:52:16 +08:00
allocationData . storageInfo , allocationData . flags . preferCompressed ) ;
2021-09-15 23:46:29 +08:00
sizeAligned = alignUp ( allocationData . imgInfo - > size , MemoryConstants : : pageSize64k ) ;
} else {
2022-02-04 21:59:01 +08:00
if ( allocationData . type = = AllocationType : : WRITE_COMBINED ) {
2021-09-15 23:46:29 +08:00
sizeAligned = alignUp ( allocationData . size + MemoryConstants : : pageSize64k , 2 * MemoryConstants : : megaByte ) + 2 * MemoryConstants : : megaByte ;
} else {
sizeAligned = alignUp ( allocationData . size , MemoryConstants : : pageSize64k ) ;
}
if ( createSingleHandle ) {
2022-02-10 02:03:05 +08:00
2022-05-11 22:59:23 +08:00
gmm = std : : make_unique < Gmm > ( gmmHelper ,
2021-09-15 23:46:29 +08:00
nullptr ,
sizeAligned ,
0u ,
2022-02-10 02:03:05 +08:00
CacheSettingsHelper : : getGmmUsageType ( allocationData . type , ! ! allocationData . flags . uncacheable , * hwInfo ) ,
2021-12-03 21:52:16 +08:00
allocationData . flags . preferCompressed ,
2022-02-07 22:27:53 +08:00
allocationData . storageInfo ,
true ) ;
2021-09-15 23:46:29 +08:00
}
}
auto sizeAllocated = sizeAligned ;
auto gfxPartition = getGfxPartition ( allocationData . rootDeviceIndex ) ;
2022-07-09 06:53:42 +08:00
auto gpuAddress = getGpuAddress ( this - > alignmentSelector , this - > heapAssigner , * hwInfo ,
allocationData . type , gfxPartition , sizeAllocated ,
allocationData . hostPtr , allocationData . flags . resource48Bit , allocationData . flags . use32BitFrontWindow , * gmmHelper ) ;
2022-05-30 22:18:50 +08:00
auto canonizedGpuAddress = gmmHelper - > canonize ( gpuAddress ) ;
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , numHandles , allocationData . type , nullptr , nullptr , canonizedGpuAddress , sizeAligned , MemoryPool : : LocalMemory ) ;
2022-04-11 11:40:18 +08:00
DrmAllocation * drmAllocation = static_cast < DrmAllocation * > ( allocation . get ( ) ) ;
GraphicsAllocation * graphicsAllocation = static_cast < GraphicsAllocation * > ( allocation . get ( ) ) ;
2021-09-15 23:46:29 +08:00
if ( createSingleHandle ) {
allocation - > setDefaultGmm ( gmm . release ( ) ) ;
} else if ( allocationData . storageInfo . multiStorage ) {
2022-05-11 22:59:23 +08:00
createColouredGmms ( gmmHelper ,
2021-09-15 23:46:29 +08:00
* allocation ,
allocationData . storageInfo ,
2021-12-03 21:52:16 +08:00
allocationData . flags . preferCompressed ) ;
2021-09-15 23:46:29 +08:00
} else {
2022-05-11 22:59:23 +08:00
fillGmmsInAllocation ( gmmHelper , allocation . get ( ) , allocationData . storageInfo ) ;
2021-09-15 23:46:29 +08:00
}
allocation - > storageInfo = allocationData . storageInfo ;
allocation - > setFlushL3Required ( allocationData . flags . flushL3 ) ;
allocation - > setUncacheable ( allocationData . flags . uncacheable ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuAddress ) , sizeAllocated ) ;
if ( ! createDrmAllocation ( & getDrm ( allocationData . rootDeviceIndex ) , allocation . get ( ) , gpuAddress , maxOsContextCount ) ) {
for ( auto handleId = 0u ; handleId < allocationData . storageInfo . getNumBanks ( ) ; handleId + + ) {
delete allocation - > getGmm ( handleId ) ;
}
2022-04-26 03:34:32 +08:00
gfxPartition - > freeGpuAddressRange ( gmmHelper - > decanonize ( gpuAddress ) , sizeAllocated ) ;
2021-09-15 23:46:29 +08:00
status = AllocationStatus : : Error ;
return nullptr ;
}
2022-02-04 21:59:01 +08:00
if ( allocationData . type = = AllocationType : : WRITE_COMBINED ) {
2021-09-15 23:46:29 +08:00
auto cpuAddress = lockResource ( allocation . get ( ) ) ;
2022-04-11 11:40:18 +08:00
if ( ! cpuAddress ) {
cleanupBeforeReturn ( allocationData , gfxPartition , drmAllocation , graphicsAllocation , gpuAddress , sizeAllocated ) ;
status = AllocationStatus : : Error ;
return nullptr ;
}
2021-09-15 23:46:29 +08:00
auto alignedCpuAddress = alignDown ( cpuAddress , 2 * MemoryConstants : : megaByte ) ;
auto offset = ptrDiff ( cpuAddress , alignedCpuAddress ) ;
2022-06-07 04:34:20 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( reinterpret_cast < uint64_t > ( alignedCpuAddress ) ) ;
2021-09-15 23:46:29 +08:00
allocation - > setAllocationOffset ( offset ) ;
2022-06-07 04:34:20 +08:00
allocation - > setCpuPtrAndGpuAddress ( cpuAddress , canonizedGpuAddress ) ;
2021-09-15 23:46:29 +08:00
DEBUG_BREAK_IF ( allocation - > storageInfo . multiStorage ) ;
allocation - > getBO ( ) - > setAddress ( reinterpret_cast < uint64_t > ( cpuAddress ) ) ;
}
if ( allocationData . flags . requiresCpuAccess ) {
auto cpuAddress = lockResource ( allocation . get ( ) ) ;
2022-04-11 11:40:18 +08:00
if ( ! cpuAddress ) {
cleanupBeforeReturn ( allocationData , gfxPartition , drmAllocation , graphicsAllocation , gpuAddress , sizeAllocated ) ;
status = AllocationStatus : : Error ;
return nullptr ;
}
2022-06-07 04:34:20 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( gpuAddress ) ;
allocation - > setCpuPtrAndGpuAddress ( cpuAddress , canonizedGpuAddress ) ;
2021-09-15 23:46:29 +08:00
}
if ( heapAssigner . useInternal32BitHeap ( allocationData . type ) ) {
2022-04-29 21:28:15 +08:00
allocation - > setGpuBaseAddress ( gmmHelper - > canonize ( getInternalHeapBaseAddress ( allocationData . rootDeviceIndex , true ) ) ) ;
2021-09-15 23:46:29 +08:00
}
if ( ! allocation - > setCacheRegion ( & getDrm ( allocationData . rootDeviceIndex ) , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
2022-04-11 11:40:18 +08:00
cleanupBeforeReturn ( allocationData , gfxPartition , drmAllocation , graphicsAllocation , gpuAddress , sizeAllocated ) ;
2021-09-15 23:46:29 +08:00
status = AllocationStatus : : Error ;
return nullptr ;
}
status = AllocationStatus : : Success ;
return allocation . release ( ) ;
}
2022-04-20 03:24:19 +08:00
BufferObject * DrmMemoryManager : : createBufferObjectInMemoryRegion ( Drm * drm , Gmm * gmm , AllocationType allocationType , uint64_t gpuAddress ,
2022-09-20 15:07:59 +08:00
size_t size , uint32_t memoryBanks , size_t maxOsContextCount , int32_t pairHandle ) {
2021-09-14 20:36:43 +08:00
auto memoryInfo = drm - > getMemoryInfo ( ) ;
if ( ! memoryInfo ) {
return nullptr ;
}
uint32_t handle = 0 ;
2022-06-28 20:36:40 +08:00
uint32_t ret = 0 ;
2022-09-20 00:38:59 +08:00
auto banks = std : : bitset < 4 > ( memoryBanks ) ;
2022-06-28 20:36:40 +08:00
if ( banks . count ( ) > 1 ) {
2022-07-25 19:50:32 +08:00
ret = memoryInfo - > createGemExtWithMultipleRegions ( memoryBanks , size , handle ) ;
2022-06-28 20:36:40 +08:00
} else {
2022-09-20 15:07:59 +08:00
ret = memoryInfo - > createGemExtWithSingleRegion ( memoryBanks , size , handle , pairHandle ) ;
2022-06-28 20:36:40 +08:00
}
2021-09-14 20:36:43 +08:00
if ( ret ! = 0 ) {
return nullptr ;
}
2022-04-20 03:24:19 +08:00
auto patIndex = drm - > getPatIndex ( gmm , allocationType , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
auto bo = new ( std : : nothrow ) BufferObject ( drm , patIndex , handle , size , maxOsContextCount ) ;
2021-09-14 20:36:43 +08:00
if ( ! bo ) {
return nullptr ;
}
bo - > setAddress ( gpuAddress ) ;
return bo ;
}
2021-09-14 19:29:11 +08:00
bool DrmMemoryManager : : createDrmAllocation ( Drm * drm , DrmAllocation * allocation , uint64_t gpuAddress , size_t maxOsContextCount ) {
2021-10-04 23:23:42 +08:00
BufferObjects bos { } ;
2021-09-14 19:29:11 +08:00
auto & storageInfo = allocation - > storageInfo ;
auto boAddress = gpuAddress ;
auto currentBank = 0u ;
2021-10-04 23:23:42 +08:00
auto iterationOffset = 0u ;
auto banksCnt = storageInfo . getTotalBanksCnt ( ) ;
2022-06-28 20:36:40 +08:00
auto useKmdMigrationForBuffers = ( AllocationType : : BUFFER = = allocation - > getAllocationType ( ) & & ( DebugManager . flags . UseKmdMigrationForBuffers . get ( ) > 0 ) ) ;
2021-10-04 23:23:42 +08:00
auto handles = storageInfo . getNumBanks ( ) ;
if ( storageInfo . colouringPolicy = = ColouringPolicy : : ChunkSizeBased ) {
handles = allocation - > getNumGmms ( ) ;
allocation - > resizeBufferObjects ( handles ) ;
bos . resize ( handles ) ;
}
2022-02-01 07:29:01 +08:00
allocation - > setNumHandles ( handles ) ;
2021-10-04 23:23:42 +08:00
2022-09-20 15:07:59 +08:00
int32_t pairHandle = - 1 ;
2021-10-04 23:23:42 +08:00
for ( auto handleId = 0u ; handleId < handles ; handleId + + , currentBank + + ) {
if ( currentBank = = banksCnt ) {
currentBank = 0 ;
iterationOffset + = banksCnt ;
}
2022-06-28 20:36:40 +08:00
auto memoryBanks = static_cast < uint32_t > ( storageInfo . memoryBanks . to_ulong ( ) ) ;
if ( ! useKmdMigrationForBuffers ) {
if ( storageInfo . getNumBanks ( ) > 1 ) {
// check if we have this bank, if not move to next one
// we may have holes in memoryBanks that we need to skip i.e. memoryBanks 1101 and 3 handle allocation
while ( ! ( memoryBanks & ( 1u < < currentBank ) ) ) {
currentBank + + ;
}
memoryBanks & = 1u < < currentBank ;
2021-09-14 19:29:11 +08:00
}
}
2022-04-20 03:24:19 +08:00
auto gmm = allocation - > getGmm ( handleId ) ;
auto boSize = alignUp ( gmm - > gmmResourceInfo - > getSizeAllocation ( ) , MemoryConstants : : pageSize64k ) ;
2022-09-20 15:07:59 +08:00
bos [ handleId ] = createBufferObjectInMemoryRegion ( drm , gmm , allocation - > getAllocationType ( ) , boAddress , boSize , memoryBanks , maxOsContextCount , pairHandle ) ;
2021-09-14 19:29:11 +08:00
if ( nullptr = = bos [ handleId ] ) {
return false ;
}
2021-10-04 23:23:42 +08:00
allocation - > getBufferObjectToModify ( currentBank + iterationOffset ) = bos [ handleId ] ;
2021-09-14 19:29:11 +08:00
if ( storageInfo . multiStorage ) {
2022-04-08 18:32:44 +08:00
boAddress + = boSize ;
2021-09-14 19:29:11 +08:00
}
2022-09-20 15:07:59 +08:00
// only support pairing of handles with PRELIM_I915_PARAM_SET_PAIR for implicit scaling scenarios, which
// have 2 handles
if ( AllocationType : : BUFFER = = allocation - > getAllocationType ( ) & & handles = = 2 & & drm - > getSetPairAvailable ( ) ) {
pairHandle = bos [ handleId ] - > peekHandle ( ) ;
}
2021-09-14 19:29:11 +08:00
}
2021-10-21 01:07:51 +08:00
if ( storageInfo . colouringPolicy = = ColouringPolicy : : MappingBased ) {
2022-04-08 18:32:44 +08:00
auto size = alignUp ( allocation - > getUnderlyingBufferSize ( ) , storageInfo . colouringGranularity ) ;
auto chunks = static_cast < uint32_t > ( size / storageInfo . colouringGranularity ) ;
2021-10-21 01:07:51 +08:00
auto granularity = storageInfo . colouringGranularity ;
for ( uint32_t boHandle = 0 ; boHandle < handles ; boHandle + + ) {
bos [ boHandle ] - > setColourWithBind ( ) ;
bos [ boHandle ] - > setColourChunk ( granularity ) ;
2022-04-08 18:32:44 +08:00
bos [ boHandle ] - > reserveAddressVector ( alignUp ( chunks , handles ) / handles ) ;
2021-10-21 01:07:51 +08:00
}
auto boHandle = 0u ;
auto colourAddress = gpuAddress ;
2022-04-08 18:32:44 +08:00
for ( auto chunk = 0u ; chunk < chunks ; chunk + + ) {
2021-10-21 01:07:51 +08:00
if ( boHandle = = handles ) {
boHandle = 0u ;
}
bos [ boHandle ] - > addColouringAddress ( colourAddress ) ;
colourAddress + = granularity ;
boHandle + + ;
}
}
2021-09-14 19:29:11 +08:00
return true ;
}
2021-11-30 01:48:55 +08:00
bool DrmMemoryManager : : retrieveMmapOffsetForBufferObject ( uint32_t rootDeviceIndex , BufferObject & bo , uint64_t flags , uint64_t & offset ) {
constexpr uint64_t mmapOffsetFixed = 4 ;
2022-05-20 00:56:01 +08:00
GemMmapOffset mmapOffset = { } ;
2021-11-30 01:48:55 +08:00
mmapOffset . handle = bo . peekHandle ( ) ;
mmapOffset . flags = isLocalMemorySupported ( rootDeviceIndex ) ? mmapOffsetFixed : flags ;
2022-06-29 01:56:14 +08:00
auto & drm = this - > getDrm ( rootDeviceIndex ) ;
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-06-30 00:49:29 +08:00
auto ret = ioctlHelper - > ioctl ( DrmIoctl : : GemMmapOffset , & mmapOffset ) ;
2021-11-30 01:48:55 +08:00
if ( ret ! = 0 & & isLocalMemorySupported ( rootDeviceIndex ) ) {
mmapOffset . flags = flags ;
2022-06-30 00:49:29 +08:00
ret = ioctlHelper - > ioctl ( DrmIoctl : : GemMmapOffset , & mmapOffset ) ;
2021-11-30 01:48:55 +08:00
}
if ( ret ! = 0 ) {
int err = drm . getErrno ( ) ;
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " ioctl(DRM_IOCTL_I915_GEM_MMAP_OFFSET) failed with %d. errno=%d(%s) \n " , ret , err , strerror ( err ) ) ;
DEBUG_BREAK_IF ( ret ! = 0 ) ;
return false ;
}
offset = mmapOffset . offset ;
return true ;
}
2022-02-04 21:59:01 +08:00
bool DrmMemoryManager : : allocationTypeForCompletionFence ( AllocationType allocationType ) {
2022-01-21 02:13:07 +08:00
int32_t overrideAllowAllAllocations = DebugManager . flags . UseDrmCompletionFenceForAllAllocations . get ( ) ;
bool allowAllAllocations = overrideAllowAllAllocations = = - 1 ? false : ! ! overrideAllowAllAllocations ;
if ( allowAllAllocations ) {
return true ;
}
2022-02-04 21:59:01 +08:00
if ( allocationType = = AllocationType : : COMMAND_BUFFER | |
allocationType = = AllocationType : : RING_BUFFER | |
allocationType = = AllocationType : : SEMAPHORE_BUFFER | |
allocationType = = AllocationType : : TAG_BUFFER ) {
2022-01-20 02:14:10 +08:00
return true ;
}
return false ;
}
void DrmMemoryManager : : waitOnCompletionFence ( GraphicsAllocation * allocation ) {
auto allocationType = allocation - > getAllocationType ( ) ;
if ( allocationTypeForCompletionFence ( allocationType ) ) {
for ( auto & engine : getRegisteredEngines ( ) ) {
OsContext * osContext = engine . osContext ;
CommandStreamReceiver * csr = engine . commandStreamReceiver ;
auto osContextId = osContext - > getContextId ( ) ;
2022-03-30 20:40:09 +08:00
auto allocationTaskCount = csr - > getCompletionValue ( * allocation ) ;
uint64_t completionFenceAddress = csr - > getCompletionAddress ( ) ;
2022-01-21 02:13:07 +08:00
if ( completionFenceAddress = = 0 ) {
continue ;
}
2022-01-20 02:14:10 +08:00
if ( allocation - > isUsedByOsContext ( osContextId ) ) {
2022-01-21 02:13:07 +08:00
Drm & drm = getDrm ( csr - > getRootDeviceIndex ( ) ) ;
2022-04-25 19:49:22 +08:00
drm . waitOnUserFences ( static_cast < const OsContextLinux & > ( * osContext ) , completionFenceAddress , allocationTaskCount , csr - > getActivePartitions ( ) , csr - > getPostSyncWriteOffset ( ) ) ;
2022-01-20 02:14:10 +08:00
}
}
} else {
waitForEnginesCompletion ( * allocation ) ;
}
}
2022-02-16 20:50:32 +08:00
DrmAllocation * DrmMemoryManager : : createAllocWithAlignment ( const AllocationData & allocationData , size_t size , size_t alignment , size_t alignedSize , uint64_t gpuAddress ) {
2022-07-27 01:33:02 +08:00
auto & drm = this - > getDrm ( allocationData . rootDeviceIndex ) ;
bool useBooMmap = drm . getMemoryInfo ( ) & & allocationData . useMmapObject ;
2022-02-16 20:50:32 +08:00
if ( DebugManager . flags . EnableBOMmapCreate . get ( ) ! = - 1 ) {
useBooMmap = DebugManager . flags . EnableBOMmapCreate . get ( ) ;
}
if ( useBooMmap ) {
auto totalSizeToAlloc = alignedSize + alignment ;
auto cpuPointer = this - > mmapFunction ( 0 , totalSizeToAlloc , PROT_NONE , MAP_SHARED | MAP_ANONYMOUS , - 1 , 0 ) ;
auto cpuBasePointer = cpuPointer ;
cpuPointer = alignUp ( cpuPointer , alignment ) ;
auto pointerDiff = ptrDiff ( cpuPointer , cpuBasePointer ) ;
2022-07-27 01:33:02 +08:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( this - > createBufferObjectInMemoryRegion ( & drm , nullptr , allocationData . type ,
2022-09-20 15:07:59 +08:00
reinterpret_cast < uintptr_t > ( cpuPointer ) , alignedSize , 0u , maxOsContextCount , - 1 ) ) ;
2022-02-16 20:50:32 +08:00
if ( ! bo ) {
this - > munmapFunction ( cpuBasePointer , totalSizeToAlloc ) ;
return nullptr ;
}
uint64_t offset = 0 ;
2022-07-27 01:33:02 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
uint64_t mmapOffsetWb = ioctlHelper - > getDrmParamValue ( DrmParam : : MmapOffsetWb ) ;
if ( ! retrieveMmapOffsetForBufferObject ( allocationData . rootDeviceIndex , * bo , mmapOffsetWb , offset ) ) {
2022-02-16 20:50:32 +08:00
this - > munmapFunction ( cpuPointer , size ) ;
return nullptr ;
}
2022-07-27 01:33:02 +08:00
[[maybe_unused]] auto retPtr = this - > mmapFunction ( cpuPointer , alignedSize , PROT_READ | PROT_WRITE , MAP_SHARED | MAP_FIXED , drm . getFileDescriptor ( ) , static_cast < off_t > ( offset ) ) ;
2022-02-16 20:50:32 +08:00
DEBUG_BREAK_IF ( retPtr ! = cpuPointer ) ;
obtainGpuAddress ( allocationData , bo . get ( ) , gpuAddress ) ;
emitPinningRequest ( bo . get ( ) , allocationData ) ;
2022-05-30 22:18:50 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( bo - > peekAddress ( ) ) ;
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , cpuPointer , canonizedGpuAddress , alignedSize , MemoryPool : : System4KBPages ) ;
2022-02-16 20:50:32 +08:00
allocation - > setMmapPtr ( cpuPointer ) ;
allocation - > setMmapSize ( alignedSize ) ;
if ( pointerDiff ! = 0 ) {
allocation - > registerMemoryToUnmap ( cpuBasePointer , pointerDiff , this - > munmapFunction ) ;
}
[[maybe_unused]] int retCode = this - > munmapFunction ( ptrOffset ( cpuPointer , alignedSize ) , alignment - pointerDiff ) ;
DEBUG_BREAK_IF ( retCode ! = 0 ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuAddress ) , alignedSize ) ;
2022-07-27 01:33:02 +08:00
if ( ! allocation - > setCacheRegion ( & drm , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
2022-02-16 20:50:32 +08:00
if ( pointerDiff = = 0 ) {
allocation - > registerMemoryToUnmap ( cpuBasePointer , totalSizeToAlloc , this - > munmapFunction ) ;
}
return nullptr ;
}
bo . release ( ) ;
2022-08-04 05:06:59 +08:00
allocation - > isShareableHostMemory = true ;
2022-02-16 20:50:32 +08:00
return allocation . release ( ) ;
} else {
return createAllocWithAlignmentFromUserptr ( allocationData , size , alignment , alignedSize , gpuAddress ) ;
}
}
2022-06-07 01:43:28 +08:00
void * DrmMemoryManager : : lockBufferObject ( BufferObject * bo ) {
2022-02-16 20:50:32 +08:00
if ( bo = = nullptr ) {
return nullptr ;
}
2022-07-27 01:33:02 +08:00
auto drm = bo - > peekDrm ( ) ;
auto rootDeviceIndex = this - > getRootDeviceIndex ( drm ) ;
2022-02-16 20:50:32 +08:00
2022-07-27 01:33:02 +08:00
auto ioctlHelper = drm - > getIoctlHelper ( ) ;
uint64_t mmapOffsetWc = ioctlHelper - > getDrmParamValue ( DrmParam : : MmapOffsetWc ) ;
2022-02-16 20:50:32 +08:00
uint64_t offset = 0 ;
2022-07-27 01:33:02 +08:00
if ( ! retrieveMmapOffsetForBufferObject ( rootDeviceIndex , * bo , mmapOffsetWc , offset ) ) {
2022-02-16 20:50:32 +08:00
return nullptr ;
}
2022-07-27 01:33:02 +08:00
auto addr = mmapFunction ( nullptr , bo - > peekSize ( ) , PROT_WRITE | PROT_READ , MAP_SHARED , drm - > getFileDescriptor ( ) , static_cast < off_t > ( offset ) ) ;
2022-02-16 20:50:32 +08:00
DEBUG_BREAK_IF ( addr = = MAP_FAILED ) ;
2022-04-11 11:40:18 +08:00
if ( addr = = MAP_FAILED ) {
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " %s " , " mmap return of MAP_FAILED \n " ) ;
return nullptr ;
}
2022-02-16 20:50:32 +08:00
bo - > setLockedAddress ( addr ) ;
return bo - > peekLockedAddress ( ) ;
}
2022-03-22 00:02:12 +08:00
void createMemoryRegionsForSharedAllocation ( const HardwareInfo & hwInfo , MemoryInfo & memoryInfo , const AllocationData & allocationData , MemRegionsVec & memRegions ) {
auto memoryBanks = allocationData . storageInfo . memoryBanks ;
if ( allocationData . usmInitialPlacement = = GraphicsAllocation : : UsmInitialPlacement : : CPU ) {
2022-08-29 21:30:21 +08:00
// System memory region
2022-03-22 00:02:12 +08:00
auto regionClassAndInstance = memoryInfo . getMemoryRegionClassAndInstance ( 0u , hwInfo ) ;
memRegions . push_back ( regionClassAndInstance ) ;
}
2022-08-29 21:30:21 +08:00
// All local memory regions
2022-03-22 00:02:12 +08:00
size_t currentBank = 0 ;
size_t i = 0 ;
while ( i < memoryBanks . count ( ) ) {
if ( memoryBanks . test ( currentBank ) ) {
auto regionClassAndInstance = memoryInfo . getMemoryRegionClassAndInstance ( 1u < < currentBank , hwInfo ) ;
memRegions . push_back ( regionClassAndInstance ) ;
i + + ;
}
currentBank + + ;
}
if ( allocationData . usmInitialPlacement = = GraphicsAllocation : : UsmInitialPlacement : : GPU ) {
2022-08-29 21:30:21 +08:00
// System memory region
2022-03-22 00:02:12 +08:00
auto regionClassAndInstance = memoryInfo . getMemoryRegionClassAndInstance ( 0u , hwInfo ) ;
memRegions . push_back ( regionClassAndInstance ) ;
}
}
GraphicsAllocation * DrmMemoryManager : : createSharedUnifiedMemoryAllocation ( const AllocationData & allocationData ) {
auto & drm = this - > getDrm ( allocationData . rootDeviceIndex ) ;
2022-07-27 01:33:02 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-03-22 00:02:12 +08:00
2022-07-27 01:33:02 +08:00
const auto vmAdviseAttribute = ioctlHelper - > getVmAdviseAtomicAttribute ( ) ;
2022-03-22 00:02:12 +08:00
if ( vmAdviseAttribute = = 0 ) {
return nullptr ;
}
auto memoryInfo = drm . getMemoryInfo ( ) ;
const bool useBooMmap = memoryInfo & & allocationData . useMmapObject ;
2022-03-24 19:56:02 +08:00
if ( ! useBooMmap ) {
2022-03-22 00:02:12 +08:00
return nullptr ;
}
auto size = allocationData . size ;
auto alignment = allocationData . alignment ;
2022-10-10 20:19:36 +08:00
auto totalSizeToAlloc = size + alignment ;
auto cpuPointer = this - > mmapFunction ( 0 , totalSizeToAlloc , PROT_NONE , MAP_SHARED | MAP_ANONYMOUS , - 1 , 0 ) ;
if ( cpuPointer = = MAP_FAILED ) {
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " %s " , " mmap return of MAP_FAILED \n " ) ;
return nullptr ;
}
auto cpuBasePointer = cpuPointer ;
cpuPointer = alignUp ( cpuPointer , alignment ) ;
2022-03-22 00:02:12 +08:00
auto pHwInfo = drm . getRootDeviceEnvironment ( ) . getHardwareInfo ( ) ;
MemRegionsVec memRegions ;
createMemoryRegionsForSharedAllocation ( * pHwInfo , * memoryInfo , allocationData , memRegions ) ;
2022-10-10 20:19:36 +08:00
BufferObjects bos { } ;
auto currentAddress = cpuPointer ;
auto remainingSize = size ;
auto getNumHandles = [ ] ( uint32_t numBanks ) - > uint32_t {
2022-10-14 05:54:31 +08:00
return ( numBanks > 1 ) & & ( DebugManager . flags . CreateKmdMigratedSharedAllocationWithMultipleBOs . get ( ) ! = 0 ) ? numBanks : 1u ;
2022-10-10 20:19:36 +08:00
} ;
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
auto handles = getNumHandles ( allocationData . storageInfo . getNumBanks ( ) ) ;
for ( auto handleId = 0u ; handleId < handles ; handleId + + ) {
uint32_t handle = 0 ;
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
auto currentSize = alignUp ( remainingSize / ( handles - handleId ) , MemoryConstants : : pageSize64k ) ;
2022-10-14 05:54:31 +08:00
if ( currentSize = = 0 ) {
break ;
}
2022-04-20 03:24:19 +08:00
2022-10-10 20:19:36 +08:00
auto ret = memoryInfo - > createGemExt ( memRegions , currentSize , handle , { } , - 1 ) ;
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
if ( ret ) {
this - > munmapFunction ( cpuPointer , totalSizeToAlloc ) ;
return nullptr ;
}
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
auto patIndex = drm . getPatIndex ( nullptr , allocationData . type , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( new BufferObject ( & drm , patIndex , handle , currentSize , maxOsContextCount ) ) ;
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
if ( ! ioctlHelper - > setVmBoAdvise ( bo - > peekHandle ( ) , vmAdviseAttribute , nullptr ) ) {
this - > munmapFunction ( cpuBasePointer , totalSizeToAlloc ) ;
return nullptr ;
}
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
uint64_t mmapOffsetWb = ioctlHelper - > getDrmParamValue ( DrmParam : : MmapOffsetWb ) ;
uint64_t offset = 0 ;
if ( ! retrieveMmapOffsetForBufferObject ( allocationData . rootDeviceIndex , * bo , mmapOffsetWb , offset ) ) {
this - > munmapFunction ( cpuBasePointer , totalSizeToAlloc ) ;
return nullptr ;
}
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
this - > mmapFunction ( currentAddress , currentSize , PROT_READ | PROT_WRITE , MAP_SHARED | MAP_FIXED , drm . getFileDescriptor ( ) , static_cast < off_t > ( offset ) ) ;
2022-03-22 00:02:12 +08:00
2022-10-10 20:19:36 +08:00
bo - > setAddress ( castToUint64 ( currentAddress ) ) ;
bos . push_back ( bo . release ( ) ) ;
currentAddress = reinterpret_cast < void * > ( castToUint64 ( currentAddress ) + currentSize ) ;
2022-10-14 05:54:31 +08:00
remainingSize - = currentSize ;
2022-10-10 20:19:36 +08:00
}
2022-03-22 00:02:12 +08:00
2022-05-30 22:18:50 +08:00
auto gmmHelper = getGmmHelper ( allocationData . rootDeviceIndex ) ;
2022-10-10 20:19:36 +08:00
auto canonizedGpuAddress = gmmHelper - > canonize ( reinterpret_cast < uintptr_t > ( cpuPointer ) ) ;
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , allocationData . type , bos , cpuPointer , canonizedGpuAddress , size , MemoryPool : : System4KBPages ) ;
2022-03-22 00:02:12 +08:00
allocation - > setMmapPtr ( cpuBasePointer ) ;
allocation - > setMmapSize ( totalSizeToAlloc ) ;
2022-07-27 01:33:02 +08:00
if ( ! allocation - > setCacheRegion ( & drm , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
2022-10-10 20:19:36 +08:00
this - > munmapFunction ( cpuBasePointer , totalSizeToAlloc ) ;
for ( auto bo : bos ) {
delete bo ;
}
2022-03-22 00:02:12 +08:00
return nullptr ;
}
2022-10-10 20:19:36 +08:00
if ( handles > 1 ) {
allocation - > storageInfo = allocationData . storageInfo ;
}
2022-03-22 00:02:12 +08:00
return allocation . release ( ) ;
}
DrmAllocation * DrmMemoryManager : : createUSMHostAllocationFromSharedHandle ( osHandle handle , const AllocationProperties & properties , bool hasMappedPtr ) {
2022-05-25 00:13:02 +08:00
PrimeHandle openFd { } ;
openFd . fileDescriptor = handle ;
2022-03-22 00:02:12 +08:00
2022-04-20 03:24:19 +08:00
auto & drm = this - > getDrm ( properties . rootDeviceIndex ) ;
auto patIndex = drm . getPatIndex ( nullptr , properties . allocationType , CacheRegion : : Default , CachePolicy : : WriteBack , false ) ;
2022-06-29 01:56:14 +08:00
auto ioctlHelper = drm . getIoctlHelper ( ) ;
2022-04-20 03:24:19 +08:00
2022-06-30 00:49:29 +08:00
auto ret = ioctlHelper - > ioctl ( DrmIoctl : : PrimeFdToHandle , & openFd ) ;
2022-03-22 00:02:12 +08:00
if ( ret ! = 0 ) {
2022-04-20 03:24:19 +08:00
int err = drm . getErrno ( ) ;
2022-03-22 00:02:12 +08:00
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " ioctl(PRIME_FD_TO_HANDLE) failed with %d. errno=%d(%s) \n " , ret , err , strerror ( err ) ) ;
DEBUG_BREAK_IF ( ret ! = 0 ) ;
return nullptr ;
}
if ( hasMappedPtr ) {
2022-04-20 03:24:19 +08:00
auto bo = new BufferObject ( & drm , patIndex , openFd . handle , properties . size , maxOsContextCount ) ;
2022-03-22 00:02:12 +08:00
bo - > setAddress ( properties . gpuAddress ) ;
2022-06-03 19:48:45 +08:00
auto gmmHelper = getGmmHelper ( properties . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( castToUint64 ( reinterpret_cast < void * > ( bo - > peekAddress ( ) ) ) ) ;
2022-03-22 00:02:12 +08:00
return new DrmAllocation ( properties . rootDeviceIndex , properties . allocationType , bo , reinterpret_cast < void * > ( bo - > peekAddress ( ) ) , bo - > peekSize ( ) ,
2022-06-03 19:48:45 +08:00
handle , MemoryPool : : SystemCpuInaccessible , canonizedGpuAddress ) ;
2022-03-22 00:02:12 +08:00
}
2022-04-20 03:24:19 +08:00
const bool useBooMmap = drm . getMemoryInfo ( ) & & properties . useMmapObject ;
2022-03-24 19:56:02 +08:00
if ( ! useBooMmap ) {
2022-04-20 03:24:19 +08:00
auto bo = new BufferObject ( & drm , patIndex , openFd . handle , properties . size , maxOsContextCount ) ;
2022-03-22 00:02:12 +08:00
bo - > setAddress ( properties . gpuAddress ) ;
2022-06-03 19:48:45 +08:00
auto gmmHelper = getGmmHelper ( properties . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( castToUint64 ( reinterpret_cast < void * > ( bo - > peekAddress ( ) ) ) ) ;
2022-03-22 00:02:12 +08:00
return new DrmAllocation ( properties . rootDeviceIndex , properties . allocationType , bo , reinterpret_cast < void * > ( bo - > peekAddress ( ) ) , bo - > peekSize ( ) ,
2022-06-03 19:48:45 +08:00
handle , MemoryPool : : SystemCpuInaccessible , canonizedGpuAddress ) ;
2022-03-22 00:02:12 +08:00
}
auto boHandle = openFd . handle ;
auto bo = findAndReferenceSharedBufferObject ( boHandle , properties . rootDeviceIndex ) ;
if ( bo = = nullptr ) {
void * cpuPointer = nullptr ;
size_t size = lseekFunction ( handle , 0 , SEEK_END ) ;
2022-04-20 03:24:19 +08:00
bo = new BufferObject ( & drm , patIndex , boHandle , size , maxOsContextCount ) ;
2022-03-22 00:02:12 +08:00
cpuPointer = this - > mmapFunction ( 0 , size , PROT_NONE , MAP_SHARED | MAP_ANONYMOUS , - 1 , 0 ) ;
if ( cpuPointer = = MAP_FAILED ) {
2022-04-11 11:40:18 +08:00
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " %s " , " mmap return of MAP_FAILED \n " ) ;
2022-03-22 00:02:12 +08:00
delete bo ;
return nullptr ;
}
bo - > setAddress ( reinterpret_cast < uintptr_t > ( cpuPointer ) ) ;
2022-07-27 01:33:02 +08:00
uint64_t mmapOffsetWb = ioctlHelper - > getDrmParamValue ( DrmParam : : MmapOffsetWb ) ;
2022-03-22 00:02:12 +08:00
uint64_t offset = 0 ;
2022-07-27 01:33:02 +08:00
if ( ! retrieveMmapOffsetForBufferObject ( properties . rootDeviceIndex , * bo , mmapOffsetWb , offset ) ) {
2022-03-22 00:02:12 +08:00
this - > munmapFunction ( cpuPointer , size ) ;
delete bo ;
return nullptr ;
}
2022-04-20 03:24:19 +08:00
[[maybe_unused]] auto retPtr = this - > mmapFunction ( cpuPointer , size , PROT_READ | PROT_WRITE , MAP_SHARED | MAP_FIXED , drm . getFileDescriptor ( ) , static_cast < off_t > ( offset ) ) ;
2022-03-22 00:02:12 +08:00
DEBUG_BREAK_IF ( retPtr ! = cpuPointer ) ;
AllocationData allocationData = { } ;
allocationData . rootDeviceIndex = properties . rootDeviceIndex ;
allocationData . size = size ;
emitPinningRequest ( bo , allocationData ) ;
bo - > setUnmapSize ( size ) ;
bo - > setRootDeviceIndex ( properties . rootDeviceIndex ) ;
pushSharedBufferObject ( bo ) ;
DrmAllocation * drmAllocation = nullptr ;
drmAllocation = new DrmAllocation ( properties . rootDeviceIndex , properties . allocationType , bo , cpuPointer , bo - > peekAddress ( ) , bo - > peekSize ( ) , MemoryPool : : System4KBPages ) ;
drmAllocation - > setMmapPtr ( cpuPointer ) ;
drmAllocation - > setMmapSize ( size ) ;
drmAllocation - > setReservedAddressRange ( reinterpret_cast < void * > ( cpuPointer ) , size ) ;
2022-07-27 01:33:02 +08:00
drmAllocation - > setCacheRegion ( & drm , static_cast < CacheRegion > ( properties . cacheRegion ) ) ;
2022-03-22 00:02:12 +08:00
return drmAllocation ;
}
2022-06-03 19:48:45 +08:00
auto gmmHelper = getGmmHelper ( properties . rootDeviceIndex ) ;
auto canonizedGpuAddress = gmmHelper - > canonize ( castToUint64 ( reinterpret_cast < void * > ( bo - > peekAddress ( ) ) ) ) ;
2022-03-22 00:02:12 +08:00
return new DrmAllocation ( properties . rootDeviceIndex , properties . allocationType , bo , reinterpret_cast < void * > ( bo - > peekAddress ( ) ) , bo - > peekSize ( ) ,
2022-06-03 19:48:45 +08:00
handle , MemoryPool : : SystemCpuInaccessible , canonizedGpuAddress ) ;
2022-03-22 00:02:12 +08:00
}
2022-07-27 00:39:17 +08:00
bool DrmMemoryManager : : allowIndirectAllocationsAsPack ( uint32_t rootDeviceIndex ) {
return this - > getDrm ( rootDeviceIndex ) . isVmBindAvailable ( ) ;
}
2022-03-22 00:02:12 +08:00
2019-03-26 18:59:46 +08:00
} // namespace NEO