2017-12-21 00:45:38 +01:00
/*
2021-05-16 20:51:16 +02:00
* Copyright ( C ) 2018 - 2021 Intel Corporation
2017-12-21 00:45:38 +01:00
*
2018-09-18 09:11:08 +02:00
* SPDX - License - Identifier : MIT
2017-12-21 00:45:38 +01:00
*
*/
2020-02-23 22:44:01 +01:00
# include "shared/source/os_interface/linux/drm_memory_manager.h"
2019-02-27 11:39:32 +01:00
2020-02-23 22:44:01 +01:00
# include "shared/source/command_stream/command_stream_receiver.h"
# include "shared/source/execution_environment/execution_environment.h"
# include "shared/source/execution_environment/root_device_environment.h"
# include "shared/source/gmm_helper/gmm.h"
# include "shared/source/gmm_helper/gmm_helper.h"
# include "shared/source/gmm_helper/resource_info.h"
2020-08-27 08:55:09 +02:00
# include "shared/source/helpers/heap_assigner.h"
2020-02-23 22:44:01 +01:00
# include "shared/source/helpers/hw_info.h"
# include "shared/source/helpers/ptr_math.h"
2021-09-08 14:16:07 +00:00
# include "shared/source/helpers/string.h"
2020-02-23 22:44:01 +01:00
# include "shared/source/helpers/surface_format_info.h"
# include "shared/source/memory_manager/host_ptr_manager.h"
# include "shared/source/memory_manager/residency.h"
# include "shared/source/os_interface/linux/allocator_helper.h"
2020-05-08 10:04:06 +02:00
# include "shared/source/os_interface/linux/drm_memory_operations_handler.h"
2020-02-23 22:44:01 +01:00
# include "shared/source/os_interface/linux/os_context_linux.h"
2021-05-21 01:17:57 +02:00
# include "shared/source/os_interface/os_interface.h"
2017-12-21 00:45:38 +01:00
# include "drm/i915_drm.h"
2019-02-27 11:39:32 +01:00
# include <cstring>
# include <iostream>
2020-07-17 13:40:52 +02:00
# include <memory>
2017-12-21 00:45:38 +01:00
2019-03-26 11:59:46 +01:00
namespace NEO {
2017-12-21 00:45:38 +01:00
2019-03-25 13:12:55 +01:00
DrmMemoryManager : : DrmMemoryManager ( gemCloseWorkerMode mode ,
2019-03-11 23:06:30 +01:00
bool forcePinAllowed ,
bool validateHostPtrMemory ,
2019-03-15 10:22:35 +01:00
ExecutionEnvironment & executionEnvironment ) : MemoryManager ( executionEnvironment ) ,
2019-03-11 23:06:30 +01:00
forcePinEnabled ( forcePinAllowed ) ,
validateHostPtrMemory ( validateHostPtrMemory ) {
2021-05-27 13:37:06 +00:00
alignmentSelector . addCandidateAlignment ( MemoryConstants : : pageSize64k , true , AlignmentSelector : : anyWastage , HeapIndex : : HEAP_STANDARD64KB ) ;
if ( DebugManager . flags . AlignLocalMemoryVaTo2MB . get ( ) ! = 0 ) {
alignmentSelector . addCandidateAlignment ( MemoryConstants : : pageSize2Mb , false , AlignmentSelector : : anyWastage , HeapIndex : : HEAP_STANDARD2MB ) ;
}
const size_t customAlignment = static_cast < size_t > ( DebugManager . flags . ExperimentalEnableCustomLocalMemoryAlignment . get ( ) ) ;
if ( customAlignment > 0 ) {
const auto heapIndex = customAlignment > = MemoryConstants : : pageSize2Mb ? HeapIndex : : HEAP_STANDARD2MB : HeapIndex : : HEAP_STANDARD64KB ;
alignmentSelector . addCandidateAlignment ( customAlignment , true , AlignmentSelector : : anyWastage , heapIndex ) ;
}
2020-09-18 16:19:41 +02:00
initialize ( mode ) ;
}
void DrmMemoryManager : : initialize ( gemCloseWorkerMode mode ) {
2021-06-28 13:10:21 +00:00
bool disableGemCloseWorker = true ;
2019-10-22 10:26:23 +02:00
for ( uint32_t rootDeviceIndex = 0 ; rootDeviceIndex < gfxPartitions . size ( ) ; + + rootDeviceIndex ) {
2020-02-18 13:29:30 +01:00
auto gpuAddressSpace = executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > getHardwareInfo ( ) - > capabilityTable . gpuAddressSpace ;
2020-09-18 16:19:41 +02:00
if ( ! getGfxPartition ( rootDeviceIndex ) - > init ( gpuAddressSpace , getSizeToReserve ( ) , rootDeviceIndex , gfxPartitions . size ( ) , heapAssigner . apiAllowExternalHeapForSshAndDsh ) ) {
initialized = false ;
return ;
}
2020-09-10 12:36:44 +02:00
localMemAllocs . emplace_back ( ) ;
2021-06-28 13:10:21 +00:00
disableGemCloseWorker & = getDrm ( rootDeviceIndex ) . isVmBindAvailable ( ) ;
2019-10-22 10:26:23 +02:00
}
2017-12-21 00:45:38 +01:00
MemoryManager : : virtualPaddingAvailable = true ;
2020-10-01 11:48:08 +02:00
2021-06-28 13:10:21 +00:00
if ( disableGemCloseWorker ) {
2021-02-19 12:05:32 +00:00
mode = gemCloseWorkerMode : : gemCloseWorkerInactive ;
}
2020-10-01 11:48:08 +02:00
if ( DebugManager . flags . EnableGemCloseWorker . get ( ) ! = - 1 ) {
mode = DebugManager . flags . EnableGemCloseWorker . get ( ) ? gemCloseWorkerMode : : gemCloseWorkerActive : gemCloseWorkerMode : : gemCloseWorkerInactive ;
}
2017-12-21 00:45:38 +01:00
if ( mode ! = gemCloseWorkerMode : : gemCloseWorkerInactive ) {
gemCloseWorker . reset ( new DrmGemCloseWorker ( * this ) ) ;
}
2020-02-21 13:41:02 +01:00
for ( uint32_t rootDeviceIndex = 0 ; rootDeviceIndex < gfxPartitions . size ( ) ; + + rootDeviceIndex ) {
if ( forcePinEnabled | | validateHostPtrMemory ) {
2020-04-28 11:30:50 -07:00
auto cpuAddrBo = alignedMallocWrapper ( MemoryConstants : : pageSize , MemoryConstants : : pageSize ) ;
2020-05-12 11:37:26 -07:00
UNRECOVERABLE_IF ( cpuAddrBo = = nullptr ) ;
2020-04-28 11:30:50 -07:00
// Preprogram the Bo with MI_BATCH_BUFFER_END and MI_NOOP. This BO will be used as the last BB in a series to indicate the end of submission.
reinterpret_cast < uint32_t * > ( cpuAddrBo ) [ 0 ] = 0x05000000 ; // MI_BATCH_BUFFER_END
reinterpret_cast < uint32_t * > ( cpuAddrBo ) [ 1 ] = 0 ; // MI_NOOP
memoryForPinBBs . push_back ( cpuAddrBo ) ;
2020-02-24 12:46:03 +01:00
DEBUG_BREAK_IF ( memoryForPinBBs [ rootDeviceIndex ] = = nullptr ) ;
2020-02-21 13:41:02 +01:00
}
2021-10-07 07:26:03 +00:00
pinBBs . push_back ( createRootDeviceBufferObject ( rootDeviceIndex ) ) ;
2017-12-21 00:45:38 +01:00
}
2020-09-18 16:19:41 +02:00
initialized = true ;
2017-12-21 00:45:38 +01:00
}
2021-10-07 07:26:03 +00:00
BufferObject * DrmMemoryManager : : createRootDeviceBufferObject ( uint32_t rootDeviceIndex ) {
BufferObject * bo = nullptr ;
if ( forcePinEnabled | | validateHostPtrMemory ) {
bo = allocUserptr ( reinterpret_cast < uintptr_t > ( memoryForPinBBs [ rootDeviceIndex ] ) , MemoryConstants : : pageSize , 0 , rootDeviceIndex ) ;
if ( bo ) {
if ( isLimitedRange ( rootDeviceIndex ) ) {
2021-10-21 11:49:50 +00:00
auto boSize = bo - > peekSize ( ) ;
bo - > setAddress ( acquireGpuRange ( boSize , rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ) ;
UNRECOVERABLE_IF ( boSize < bo - > peekSize ( ) ) ;
2021-10-07 07:26:03 +00:00
}
} else {
alignedFreeWrapper ( memoryForPinBBs [ rootDeviceIndex ] ) ;
memoryForPinBBs [ rootDeviceIndex ] = nullptr ;
DEBUG_BREAK_IF ( true ) ;
UNRECOVERABLE_IF ( validateHostPtrMemory ) ;
}
}
return bo ;
}
void DrmMemoryManager : : createDeviceSpecificMemResources ( uint32_t rootDeviceIndex ) {
pinBBs [ rootDeviceIndex ] = createRootDeviceBufferObject ( rootDeviceIndex ) ;
}
2017-12-21 00:45:38 +01:00
DrmMemoryManager : : ~ DrmMemoryManager ( ) {
2020-02-21 13:41:02 +01:00
for ( auto & memoryForPinBB : memoryForPinBBs ) {
if ( memoryForPinBB ) {
MemoryManager : : alignedFreeWrapper ( memoryForPinBB ) ;
}
2020-02-11 17:48:40 +01:00
}
}
2021-10-07 07:26:03 +00:00
void DrmMemoryManager : : releaseDeviceSpecificMemResources ( uint32_t rootDeviceIndex ) {
return releaseBufferObject ( rootDeviceIndex ) ;
}
void DrmMemoryManager : : releaseBufferObject ( uint32_t rootDeviceIndex ) {
if ( auto bo = pinBBs [ rootDeviceIndex ] ) {
if ( isLimitedRange ( rootDeviceIndex ) ) {
2021-10-21 11:49:50 +00:00
releaseGpuRange ( reinterpret_cast < void * > ( bo - > peekAddress ( ) ) , bo - > peekSize ( ) , rootDeviceIndex ) ;
2021-10-07 07:26:03 +00:00
}
DrmMemoryManager : : unreference ( bo , true ) ;
pinBBs [ rootDeviceIndex ] = nullptr ;
}
}
2020-02-11 17:48:40 +01:00
void DrmMemoryManager : : commonCleanup ( ) {
2017-12-21 00:45:38 +01:00
if ( gemCloseWorker ) {
gemCloseWorker - > close ( false ) ;
}
2020-04-28 11:30:50 -07:00
for ( uint32_t rootDeviceIndex = 0 ; rootDeviceIndex < pinBBs . size ( ) ; + + rootDeviceIndex ) {
2021-10-07 07:26:03 +00:00
releaseBufferObject ( rootDeviceIndex ) ;
2019-08-05 13:34:29 +02:00
}
2020-02-21 13:41:02 +01:00
pinBBs . clear ( ) ;
2017-12-21 00:45:38 +01:00
}
2019-03-26 11:59:46 +01:00
void DrmMemoryManager : : eraseSharedBufferObject ( NEO : : BufferObject * bo ) {
2017-12-21 00:45:38 +01:00
auto it = std : : find ( sharingBufferObjects . begin ( ) , sharingBufferObjects . end ( ) , bo ) ;
DEBUG_BREAK_IF ( it = = sharingBufferObjects . end ( ) ) ;
2021-10-21 11:49:50 +00:00
releaseGpuRange ( reinterpret_cast < void * > ( ( * it ) - > peekAddress ( ) ) , ( * it ) - > peekUnmapSize ( ) , this - > getRootDeviceIndex ( bo - > peekDrm ( ) ) ) ;
2017-12-21 00:45:38 +01:00
sharingBufferObjects . erase ( it ) ;
}
2019-03-26 11:59:46 +01:00
void DrmMemoryManager : : pushSharedBufferObject ( NEO : : BufferObject * bo ) {
2021-10-21 11:49:50 +00:00
bo - > markAsReusableAllocation ( ) ;
2017-12-21 00:45:38 +01:00
sharingBufferObjects . push_back ( bo ) ;
}
2019-03-26 11:59:46 +01:00
uint32_t DrmMemoryManager : : unreference ( NEO : : BufferObject * bo , bool synchronousDestroy ) {
2017-12-21 00:45:38 +01:00
if ( ! bo )
return - 1 ;
if ( synchronousDestroy ) {
2021-10-21 11:49:50 +00:00
while ( bo - > getRefCount ( ) > 1 )
2017-12-21 00:45:38 +01:00
;
}
2018-08-14 13:14:06 +02:00
std : : unique_lock < std : : mutex > lock ( mtx , std : : defer_lock ) ;
2021-10-21 11:49:50 +00:00
if ( bo - > peekIsReusableAllocation ( ) ) {
2018-08-14 13:14:06 +02:00
lock . lock ( ) ;
}
2021-10-21 11:49:50 +00:00
uint32_t r = bo - > unreference ( ) ;
2017-12-21 00:45:38 +01:00
if ( r = = 1 ) {
2021-10-21 11:49:50 +00:00
if ( bo - > peekIsReusableAllocation ( ) ) {
2017-12-21 00:45:38 +01:00
eraseSharedBufferObject ( bo ) ;
}
bo - > close ( ) ;
2018-08-14 13:14:06 +02:00
if ( lock ) {
lock . unlock ( ) ;
}
2017-12-21 00:45:38 +01:00
delete bo ;
}
return r ;
}
2021-03-09 22:41:46 +00:00
uint64_t DrmMemoryManager : : acquireGpuRange ( size_t & size , uint32_t rootDeviceIndex , HeapIndex heapIndex ) {
2019-10-22 10:26:23 +02:00
auto gfxPartition = getGfxPartition ( rootDeviceIndex ) ;
2021-03-09 22:41:46 +00:00
return GmmHelper : : canonize ( gfxPartition - > heapAllocate ( heapIndex , size ) ) ;
2018-11-15 10:43:12 -08:00
}
2019-10-22 10:26:23 +02:00
void DrmMemoryManager : : releaseGpuRange ( void * address , size_t unmapSize , uint32_t rootDeviceIndex ) {
2018-11-15 10:43:12 -08:00
uint64_t graphicsAddress = static_cast < uint64_t > ( reinterpret_cast < uintptr_t > ( address ) ) ;
2019-07-08 19:05:23 +02:00
graphicsAddress = GmmHelper : : decanonize ( graphicsAddress ) ;
2019-10-22 10:26:23 +02:00
auto gfxPartition = getGfxPartition ( rootDeviceIndex ) ;
2019-07-29 17:50:46 +02:00
gfxPartition - > freeGpuAddressRange ( graphicsAddress , unmapSize ) ;
2018-11-15 10:43:12 -08:00
}
2021-02-03 14:53:13 +00:00
bool DrmMemoryManager : : isKmdMigrationAvailable ( uint32_t rootDeviceIndex ) {
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > getHardwareInfo ( ) ;
auto & hwHelper = NEO : : HwHelper : : get ( hwInfo - > platform . eRenderCoreFamily ) ;
2021-07-01 15:18:23 +00:00
auto useKmdMigration = hwHelper . isKmdMigrationSupported ( * hwInfo ) ;
2021-02-03 14:53:13 +00:00
if ( DebugManager . flags . UseKmdMigration . get ( ) ! = - 1 ) {
useKmdMigration = DebugManager . flags . UseKmdMigration . get ( ) ;
}
return useKmdMigration ;
}
2021-10-11 09:27:26 +00:00
bool DrmMemoryManager : : setMemAdvise ( GraphicsAllocation * gfxAllocation , MemAdviseFlags flags , uint32_t rootDeviceIndex ) {
auto drmAllocation = static_cast < DrmAllocation * > ( gfxAllocation ) ;
2021-10-06 23:22:22 +00:00
2021-10-11 09:27:26 +00:00
return drmAllocation - > setMemAdvise ( & this - > getDrm ( rootDeviceIndex ) , flags ) ;
2021-10-06 23:22:22 +00:00
}
2019-10-22 10:26:23 +02:00
NEO : : BufferObject * DrmMemoryManager : : allocUserptr ( uintptr_t address , size_t size , uint64_t flags , uint32_t rootDeviceIndex ) {
2018-03-20 10:49:09 +01:00
drm_i915_gem_userptr userptr = { } ;
2017-12-21 00:45:38 +01:00
userptr . user_ptr = address ;
userptr . user_size = size ;
userptr . flags = static_cast < uint32_t > ( flags ) ;
2020-01-07 07:42:40 +01:00
if ( this - > getDrm ( rootDeviceIndex ) . ioctl ( DRM_IOCTL_I915_GEM_USERPTR , & userptr ) ! = 0 ) {
2017-12-21 00:45:38 +01:00
return nullptr ;
2018-03-20 10:49:09 +01:00
}
2017-12-21 00:45:38 +01:00
2020-09-25 11:24:15 +02:00
PRINT_DEBUG_STRING ( DebugManager . flags . PrintBOCreateDestroyResult . get ( ) , stdout , " Created new BO with GEM_USERPTR, handle: BO-%d \n " , userptr . handle ) ;
2020-08-13 10:10:48 +02:00
2020-08-20 13:20:20 +02:00
auto res = new ( std : : nothrow ) BufferObject ( & getDrm ( rootDeviceIndex ) , userptr . handle , size , maxOsContextCount ) ;
2017-12-21 00:45:38 +01:00
if ( ! res ) {
2017-12-20 14:28:42 +01:00
DEBUG_BREAK_IF ( true ) ;
2017-12-21 00:45:38 +01:00
return nullptr ;
}
2021-10-21 11:49:50 +00:00
res - > setAddress ( address ) ;
2017-12-21 00:45:38 +01:00
return res ;
}
2019-02-25 14:11:34 +01:00
void DrmMemoryManager : : emitPinningRequest ( BufferObject * bo , const AllocationData & allocationData ) const {
2021-04-20 12:24:04 +00:00
auto rootDeviceIndex = allocationData . rootDeviceIndex ;
if ( forcePinEnabled & & pinBBs . at ( rootDeviceIndex ) ! = nullptr & & allocationData . flags . forcePin & & allocationData . size > = this - > pinThreshold ) {
pinBBs . at ( rootDeviceIndex ) - > pin ( & bo , 1 , registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext , 0 , getDefaultDrmContextId ( rootDeviceIndex ) ) ;
2019-02-25 14:11:34 +01:00
}
}
DrmAllocation * DrmMemoryManager : : createGraphicsAllocation ( OsHandleStorage & handleStorage , const AllocationData & allocationData ) {
auto hostPtr = const_cast < void * > ( allocationData . hostPtr ) ;
2021-02-16 22:58:32 +00:00
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , allocationData . type , nullptr , hostPtr , castToUint64 ( hostPtr ) , allocationData . size , MemoryPool : : System4KBPages ) ;
2017-12-21 00:45:38 +01:00
allocation - > fragmentsStorage = handleStorage ;
2021-02-16 22:58:32 +00:00
if ( ! allocation - > setCacheRegion ( & this - > getDrm ( allocationData . rootDeviceIndex ) , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
return nullptr ;
}
return allocation . release ( ) ;
2017-12-21 00:45:38 +01:00
}
2018-11-30 11:01:33 +01:00
DrmAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithAlignment ( const AllocationData & allocationData ) {
2021-05-04 18:17:30 +00:00
if ( allocationData . type = = NEO : : GraphicsAllocation : : AllocationType : : DEBUG_CONTEXT_SAVE_AREA ) {
return createMultiHostAllocation ( allocationData ) ;
}
return allocateGraphicsMemoryWithAlignmentImpl ( allocationData ) ;
}
DrmAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithAlignmentImpl ( const AllocationData & allocationData ) {
2020-08-28 08:37:26 +02:00
const size_t minAlignment = getUserptrAlignment ( ) ;
2018-11-30 11:01:33 +01:00
size_t cAlignment = alignUp ( std : : max ( allocationData . alignment , minAlignment ) , minAlignment ) ;
2017-12-21 00:45:38 +01:00
// When size == 0 allocate allocationAlignment
// It's needed to prevent overlapping pages with user pointers
2018-11-30 11:01:33 +01:00
size_t cSize = std : : max ( alignUp ( allocationData . size , minAlignment ) , minAlignment ) ;
2017-12-21 00:45:38 +01:00
2021-06-09 08:03:22 +00:00
uint64_t gpuReservationAddress = 0 ;
uint64_t alignedGpuAddress = 0 ;
size_t alignedStorageSize = cSize ;
size_t alignedVirtualAdressRangeSize = cSize ;
2019-08-05 13:34:29 +02:00
auto svmCpuAllocation = allocationData . type = = GraphicsAllocation : : AllocationType : : SVM_CPU ;
if ( svmCpuAllocation ) {
2021-10-07 10:46:48 +00:00
//add padding in case reserved addr is not aligned
2021-06-09 08:03:22 +00:00
alignedStorageSize = alignUp ( cSize , cAlignment ) ;
alignedVirtualAdressRangeSize = alignedStorageSize + cAlignment ;
2019-02-07 11:29:30 -08:00
}
2020-10-05 09:57:50 +02:00
// if limitedRangeAlloction is enabled, memory allocation for bo in the limited Range heap is required
if ( ( isLimitedRange ( allocationData . rootDeviceIndex ) | | svmCpuAllocation ) & & ! allocationData . flags . isUSMHostAllocation ) {
2021-06-09 08:03:22 +00:00
gpuReservationAddress = acquireGpuRange ( alignedVirtualAdressRangeSize , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
if ( ! gpuReservationAddress ) {
2019-07-17 23:31:38 +02:00
return nullptr ;
}
2019-08-05 13:34:29 +02:00
2021-06-09 08:03:22 +00:00
alignedGpuAddress = gpuReservationAddress ;
2019-08-05 13:34:29 +02:00
if ( svmCpuAllocation ) {
2021-06-09 08:03:22 +00:00
alignedGpuAddress = alignUp ( gpuReservationAddress , cAlignment ) ;
2019-08-05 13:34:29 +02:00
}
2019-07-17 23:31:38 +02:00
}
2021-06-09 08:03:22 +00:00
auto drmAllocation = createAllocWithAlignment ( allocationData , cSize , cAlignment , alignedStorageSize , alignedGpuAddress ) ;
if ( drmAllocation ! = nullptr ) {
drmAllocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuReservationAddress ) , alignedVirtualAdressRangeSize ) ;
}
return drmAllocation ;
2020-10-05 09:57:50 +02:00
}
2019-02-07 11:29:30 -08:00
2020-10-05 09:57:50 +02:00
DrmAllocation * DrmMemoryManager : : createAllocWithAlignmentFromUserptr ( const AllocationData & allocationData , size_t size , size_t alignment , size_t alignedSVMSize , uint64_t gpuAddress ) {
auto res = alignedMallocWrapper ( size , alignment ) ;
if ( ! res ) {
return nullptr ;
}
2019-07-17 23:31:38 +02:00
2021-02-16 22:58:32 +00:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( res ) , size , 0 , allocationData . rootDeviceIndex ) ) ;
2020-10-05 09:57:50 +02:00
if ( ! bo ) {
alignedFreeWrapper ( res ) ;
return nullptr ;
}
2021-09-21 18:17:41 +00:00
zeroCpuMemoryIfRequested ( allocationData , res , size ) ;
2021-02-16 22:58:32 +00:00
obtainGpuAddress ( allocationData , bo . get ( ) , gpuAddress ) ;
emitPinningRequest ( bo . get ( ) , allocationData ) ;
2020-10-05 09:57:50 +02:00
2021-10-21 11:49:50 +00:00
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , res , bo - > peekAddress ( ) , size , MemoryPool : : System4KBPages ) ;
2020-10-05 09:57:50 +02:00
allocation - > setDriverAllocatedCpuPtr ( res ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuAddress ) , alignedSVMSize ) ;
2021-02-16 22:58:32 +00:00
if ( ! allocation - > setCacheRegion ( & this - > getDrm ( allocationData . rootDeviceIndex ) , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
alignedFreeWrapper ( res ) ;
return nullptr ;
}
bo . release ( ) ;
2019-07-17 23:31:38 +02:00
2021-02-16 22:58:32 +00:00
return allocation . release ( ) ;
2017-12-21 00:45:38 +01:00
}
2020-10-05 09:57:50 +02:00
void DrmMemoryManager : : obtainGpuAddress ( const AllocationData & allocationData , BufferObject * bo , uint64_t gpuAddress ) {
if ( ( isLimitedRange ( allocationData . rootDeviceIndex ) | | allocationData . type = = GraphicsAllocation : : AllocationType : : SVM_CPU ) & &
! allocationData . flags . isUSMHostAllocation ) {
2021-10-21 11:49:50 +00:00
bo - > setAddress ( gpuAddress ) ;
2020-10-05 09:57:50 +02:00
}
}
2020-09-03 00:57:05 -07:00
DrmAllocation * DrmMemoryManager : : allocateUSMHostGraphicsMemory ( const AllocationData & allocationData ) {
const size_t minAlignment = getUserptrAlignment ( ) ;
// When size == 0 allocate allocationAlignment
// It's needed to prevent overlapping pages with user pointers
size_t cSize = std : : max ( alignUp ( allocationData . size , minAlignment ) , minAlignment ) ;
void * bufferPtr = const_cast < void * > ( allocationData . hostPtr ) ;
DEBUG_BREAK_IF ( nullptr = = bufferPtr ) ;
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( bufferPtr ) ,
cSize ,
0 ,
allocationData . rootDeviceIndex ) ) ;
if ( ! bo ) {
return nullptr ;
}
// if limitedRangeAlloction is enabled, memory allocation for bo in the limited Range heap is required
uint64_t gpuAddress = 0 ;
if ( isLimitedRange ( allocationData . rootDeviceIndex ) ) {
2021-03-09 22:41:46 +00:00
gpuAddress = acquireGpuRange ( cSize , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2020-09-03 00:57:05 -07:00
if ( ! gpuAddress ) {
return nullptr ;
}
2021-10-21 11:49:50 +00:00
bo - > setAddress ( gpuAddress ) ;
2020-09-03 00:57:05 -07:00
}
emitPinningRequest ( bo . get ( ) , allocationData ) ;
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex ,
allocationData . type ,
bo . get ( ) ,
bufferPtr ,
2021-10-21 11:49:50 +00:00
bo - > peekAddress ( ) ,
2020-09-03 00:57:05 -07:00
cSize ,
MemoryPool : : System4KBPages ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuAddress ) , cSize ) ;
bo . release ( ) ;
return allocation ;
}
2018-11-30 11:01:33 +01:00
DrmAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithHostPtr ( const AllocationData & allocationData ) {
auto res = static_cast < DrmAllocation * > ( MemoryManager : : allocateGraphicsMemoryWithHostPtr ( allocationData ) ) ;
2017-12-21 00:45:38 +01:00
2019-02-25 14:11:34 +01:00
if ( res ! = nullptr & & ! validateHostPtrMemory ) {
emitPinningRequest ( res - > getBO ( ) , allocationData ) ;
2017-12-21 00:45:38 +01:00
}
return res ;
}
2020-07-01 10:38:19 +02:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryWithGpuVa ( const AllocationData & allocationData ) {
auto osContextLinux = static_cast < OsContextLinux * > ( allocationData . osContext ) ;
2020-08-28 08:37:26 +02:00
const size_t minAlignment = getUserptrAlignment ( ) ;
2020-07-01 10:38:19 +02:00
size_t alignedSize = alignUp ( allocationData . size , minAlignment ) ;
auto res = alignedMallocWrapper ( alignedSize , minAlignment ) ;
if ( ! res )
return nullptr ;
2020-07-17 13:40:52 +02:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( res ) , alignedSize , 0 , allocationData . rootDeviceIndex ) ) ;
2020-07-01 10:38:19 +02:00
if ( ! bo ) {
alignedFreeWrapper ( res ) ;
return nullptr ;
}
UNRECOVERABLE_IF ( allocationData . gpuAddress = = 0 ) ;
2021-10-21 11:49:50 +00:00
bo - > setAddress ( allocationData . gpuAddress ) ;
2020-07-01 10:38:19 +02:00
2020-07-17 13:40:52 +02:00
BufferObject * boPtr = bo . get ( ) ;
2020-07-01 10:38:19 +02:00
if ( forcePinEnabled & & pinBBs . at ( allocationData . rootDeviceIndex ) ! = nullptr & & alignedSize > = this - > pinThreshold ) {
2020-10-16 13:46:25 +02:00
pinBBs . at ( allocationData . rootDeviceIndex ) - > pin ( & boPtr , 1 , osContextLinux , 0 , osContextLinux - > getDrmContextIds ( ) [ 0 ] ) ;
2020-07-01 10:38:19 +02:00
}
2021-10-21 11:49:50 +00:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , res , bo - > peekAddress ( ) , alignedSize , MemoryPool : : System4KBPages ) ;
2020-07-01 10:38:19 +02:00
allocation - > setDriverAllocatedCpuPtr ( res ) ;
2020-07-17 13:40:52 +02:00
bo . release ( ) ;
2020-07-01 10:38:19 +02:00
return allocation ;
}
2019-03-12 12:00:41 +01:00
DrmAllocation * DrmMemoryManager : : allocateGraphicsMemoryForNonSvmHostPtr ( const AllocationData & allocationData ) {
if ( allocationData . size = = 0 | | ! allocationData . hostPtr )
2018-11-15 10:43:12 -08:00
return nullptr ;
2019-03-12 12:00:41 +01:00
auto alignedPtr = alignDown ( allocationData . hostPtr , MemoryConstants : : pageSize ) ;
auto alignedSize = alignSizeWholePage ( allocationData . hostPtr , allocationData . size ) ;
2019-03-13 09:10:04 -07:00
auto realAllocationSize = alignedSize ;
2019-03-12 12:00:41 +01:00
auto offsetInPage = ptrDiff ( allocationData . hostPtr , alignedPtr ) ;
2021-04-20 12:24:04 +00:00
auto rootDeviceIndex = allocationData . rootDeviceIndex ;
2018-11-15 10:43:12 -08:00
2021-04-20 12:24:04 +00:00
auto gpuVirtualAddress = acquireGpuRange ( alignedSize , rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2018-11-15 10:43:12 -08:00
if ( ! gpuVirtualAddress ) {
return nullptr ;
}
2021-04-20 12:24:04 +00:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( alignedPtr ) , realAllocationSize , 0 , rootDeviceIndex ) ) ;
2018-11-15 10:43:12 -08:00
if ( ! bo ) {
2021-04-20 12:24:04 +00:00
releaseGpuRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , alignedSize , rootDeviceIndex ) ;
2018-11-15 10:43:12 -08:00
return nullptr ;
}
2021-10-21 11:49:50 +00:00
bo - > setAddress ( gpuVirtualAddress ) ;
2020-03-13 15:24:12 -07:00
2019-12-16 17:50:54 +01:00
if ( validateHostPtrMemory ) {
2020-07-17 13:40:52 +02:00
auto boPtr = bo . get ( ) ;
2021-08-04 11:56:36 +00:00
auto vmHandleId = Math : : getMinLsbSet ( static_cast < uint32_t > ( allocationData . storageInfo . subDeviceBitfield . to_ulong ( ) ) ) ;
int result = pinBBs . at ( rootDeviceIndex ) - > validateHostPtr ( & boPtr , 1 , registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext , vmHandleId , getDefaultDrmContextId ( rootDeviceIndex ) ) ;
2020-10-23 13:01:46 +02:00
if ( result ! = 0 ) {
2020-07-17 13:40:52 +02:00
unreference ( bo . release ( ) , true ) ;
2021-04-20 12:24:04 +00:00
releaseGpuRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , alignedSize , rootDeviceIndex ) ;
2019-12-16 17:50:54 +01:00
return nullptr ;
}
}
2020-07-17 13:40:52 +02:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , const_cast < void * > ( allocationData . hostPtr ) ,
2019-11-04 16:03:30 +01:00
gpuVirtualAddress , allocationData . size , MemoryPool : : System4KBPages ) ;
2019-02-27 14:59:46 +01:00
allocation - > setAllocationOffset ( offsetInPage ) ;
2018-11-15 10:43:12 -08:00
2019-08-05 13:34:29 +02:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , alignedSize ) ;
2020-07-17 13:40:52 +02:00
bo . release ( ) ;
2018-11-15 10:43:12 -08:00
return allocation ;
}
2019-02-28 14:12:13 +01:00
DrmAllocation * DrmMemoryManager : : allocateGraphicsMemory64kb ( const AllocationData & allocationData ) {
2017-12-21 00:45:38 +01:00
return nullptr ;
}
2021-08-11 10:36:04 +00:00
GraphicsAllocation * DrmMemoryManager : : allocateMemoryByKMD ( const AllocationData & allocationData ) {
2021-04-12 14:34:35 +00:00
auto gmm = std : : make_unique < Gmm > ( executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getGmmClientContext ( ) , allocationData . hostPtr , allocationData . size , 0u , false ) ;
2019-11-14 04:08:59 -05:00
size_t bufferSize = allocationData . size ;
2021-03-09 22:41:46 +00:00
uint64_t gpuRange = acquireGpuRange ( bufferSize , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD64KB ) ;
2019-11-14 04:08:59 -05:00
drm_i915_gem_create create = { 0 , 0 , 0 } ;
create . size = bufferSize ;
2021-11-30 21:04:54 +00:00
[[maybe_unused]] auto ret = this - > getDrm ( allocationData . rootDeviceIndex ) . ioctl ( DRM_IOCTL_I915_GEM_CREATE , & create ) ;
2019-11-14 04:08:59 -05:00
DEBUG_BREAK_IF ( ret ! = 0 ) ;
2020-08-20 13:20:20 +02:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( new BufferObject ( & getDrm ( allocationData . rootDeviceIndex ) , create . handle , bufferSize , maxOsContextCount ) ) ;
2021-10-21 11:49:50 +00:00
bo - > setAddress ( gpuRange ) ;
2019-11-14 04:08:59 -05:00
2020-07-17 13:40:52 +02:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , nullptr , gpuRange , bufferSize , MemoryPool : : SystemCpuInaccessible ) ;
2019-11-14 04:08:59 -05:00
allocation - > setDefaultGmm ( gmm . release ( ) ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuRange ) , bufferSize ) ;
2020-07-17 13:40:52 +02:00
bo . release ( ) ;
2019-11-14 04:08:59 -05:00
return allocation ;
}
2019-01-22 12:40:17 +01:00
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryForImageImpl ( const AllocationData & allocationData , std : : unique_ptr < Gmm > gmm ) {
2019-08-26 09:27:30 +02:00
if ( allocationData . imgInfo - > linearStorage ) {
2019-01-22 12:40:17 +01:00
auto alloc = allocateGraphicsMemoryWithAlignment ( allocationData ) ;
2017-12-21 00:45:38 +01:00
if ( alloc ) {
2019-03-12 13:24:58 +01:00
alloc - > setDefaultGmm ( gmm . release ( ) ) ;
2017-12-21 00:45:38 +01:00
}
return alloc ;
}
2021-03-09 22:41:46 +00:00
uint64_t gpuRange = acquireGpuRange ( allocationData . imgInfo - > size , allocationData . rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2017-12-21 00:45:38 +01:00
drm_i915_gem_create create = { 0 , 0 , 0 } ;
2019-01-22 12:40:17 +01:00
create . size = allocationData . imgInfo - > size ;
2017-12-21 00:45:38 +01:00
2021-10-22 11:43:24 +00:00
[[maybe_unused]] auto ret = this - > getDrm ( allocationData . rootDeviceIndex ) . ioctl ( DRM_IOCTL_I915_GEM_CREATE , & create ) ;
2017-12-21 00:45:38 +01:00
DEBUG_BREAK_IF ( ret ! = 0 ) ;
2020-08-20 13:20:20 +02:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( new ( std : : nothrow ) BufferObject ( & getDrm ( allocationData . rootDeviceIndex ) , create . handle , allocationData . imgInfo - > size , maxOsContextCount ) ) ;
2017-12-21 00:45:38 +01:00
if ( ! bo ) {
return nullptr ;
}
2021-10-21 11:49:50 +00:00
bo - > setAddress ( gpuRange ) ;
2019-04-25 10:32:56 +02:00
2021-10-22 11:43:24 +00:00
[[maybe_unused]] auto ret2 = bo - > setTiling ( I915_TILING_Y , static_cast < uint32_t > ( allocationData . imgInfo - > rowPitch ) ) ;
2017-12-21 00:45:38 +01:00
DEBUG_BREAK_IF ( ret2 ! = true ) ;
2020-07-17 13:40:52 +02:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , nullptr , gpuRange , allocationData . imgInfo - > size , MemoryPool : : SystemCpuInaccessible ) ;
2019-03-12 13:24:58 +01:00
allocation - > setDefaultGmm ( gmm . release ( ) ) ;
2019-08-05 13:34:29 +02:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuRange ) , allocationData . imgInfo - > size ) ;
2020-07-17 13:40:52 +02:00
bo . release ( ) ;
2017-12-21 00:45:38 +01:00
return allocation ;
}
2020-07-01 14:03:46 +02:00
DrmAllocation * DrmMemoryManager : : allocate32BitGraphicsMemoryImpl ( const AllocationData & allocationData , bool useLocalMemory ) {
2020-08-27 08:55:09 +02:00
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getHardwareInfo ( ) ;
2020-10-21 10:48:09 +02:00
auto allocatorToUse = heapAssigner . get32BitHeapIndex ( allocationData . type , useLocalMemory , * hwInfo , allocationData . flags . use32BitFrontWindow ) ;
2018-02-28 15:12:10 +01:00
2018-12-21 10:16:27 +01:00
if ( allocationData . hostPtr ) {
uintptr_t inputPtr = reinterpret_cast < uintptr_t > ( allocationData . hostPtr ) ;
auto allocationSize = alignSizeWholePage ( allocationData . hostPtr , allocationData . size ) ;
2017-12-21 00:45:38 +01:00
auto realAllocationSize = allocationSize ;
2019-10-22 10:26:23 +02:00
auto gfxPartition = getGfxPartition ( allocationData . rootDeviceIndex ) ;
2019-07-29 17:50:46 +02:00
auto gpuVirtualAddress = gfxPartition - > heapAllocate ( allocatorToUse , realAllocationSize ) ;
2017-12-21 00:45:38 +01:00
if ( ! gpuVirtualAddress ) {
return nullptr ;
}
2018-12-21 10:16:27 +01:00
auto alignedUserPointer = reinterpret_cast < uintptr_t > ( alignDown ( allocationData . hostPtr , MemoryConstants : : pageSize ) ) ;
2017-12-21 00:45:38 +01:00
auto inputPointerOffset = inputPtr - alignedUserPointer ;
2020-07-17 13:40:52 +02:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( alignedUserPointer , allocationSize , 0 , allocationData . rootDeviceIndex ) ) ;
2017-12-21 00:45:38 +01:00
if ( ! bo ) {
2019-07-29 17:50:46 +02:00
gfxPartition - > heapFree ( allocatorToUse , gpuVirtualAddress , realAllocationSize ) ;
2017-12-21 00:45:38 +01:00
return nullptr ;
}
2021-10-20 16:16:59 +00:00
bo - > setAddress ( gpuVirtualAddress ) ;
2020-07-17 13:40:52 +02:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , const_cast < void * > ( allocationData . hostPtr ) , GmmHelper : : canonize ( ptrOffset ( gpuVirtualAddress , inputPointerOffset ) ) ,
2019-11-04 16:03:30 +01:00
allocationSize , MemoryPool : : System4KBPagesWith32BitGpuAddressing ) ;
2019-02-27 14:59:46 +01:00
allocation - > set32BitAllocation ( true ) ;
2019-07-29 17:50:46 +02:00
allocation - > setGpuBaseAddress ( GmmHelper : : canonize ( gfxPartition - > getHeapBase ( allocatorToUse ) ) ) ;
2019-08-05 13:34:29 +02:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuVirtualAddress ) , realAllocationSize ) ;
2020-07-17 13:40:52 +02:00
bo . release ( ) ;
2019-02-07 11:29:30 -08:00
return allocation ;
2017-12-21 00:45:38 +01:00
}
2018-12-21 10:16:27 +01:00
size_t alignedAllocationSize = alignUp ( allocationData . size , MemoryConstants : : pageSize ) ;
2017-12-21 00:45:38 +01:00
auto allocationSize = alignedAllocationSize ;
2019-10-22 10:26:23 +02:00
auto gfxPartition = getGfxPartition ( allocationData . rootDeviceIndex ) ;
2020-10-14 09:50:07 +02:00
auto gpuVA = gfxPartition - > heapAllocate ( allocatorToUse , allocationSize ) ;
2017-12-21 00:45:38 +01:00
2020-10-14 09:50:07 +02:00
if ( ! gpuVA ) {
2017-12-21 00:45:38 +01:00
return nullptr ;
}
2020-08-28 08:37:26 +02:00
auto ptrAlloc = alignedMallocWrapper ( alignedAllocationSize , getUserptrAlignment ( ) ) ;
2019-04-25 10:32:56 +02:00
2019-06-25 16:13:45 +02:00
if ( ! ptrAlloc ) {
2020-10-14 09:50:07 +02:00
gfxPartition - > heapFree ( allocatorToUse , gpuVA , allocationSize ) ;
2019-06-25 16:13:45 +02:00
return nullptr ;
2018-12-20 09:47:43 -08:00
}
2020-07-17 13:40:52 +02:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( reinterpret_cast < uintptr_t > ( ptrAlloc ) , alignedAllocationSize , 0 , allocationData . rootDeviceIndex ) ) ;
2017-12-21 00:45:38 +01:00
if ( ! bo ) {
2019-06-25 16:13:45 +02:00
alignedFreeWrapper ( ptrAlloc ) ;
2020-10-14 09:50:07 +02:00
gfxPartition - > heapFree ( allocatorToUse , gpuVA , allocationSize ) ;
2017-12-21 00:45:38 +01:00
return nullptr ;
}
2021-10-20 16:16:59 +00:00
bo - > setAddress ( gpuVA ) ;
2018-02-28 15:12:10 +01:00
2019-04-25 10:32:56 +02:00
// softpin to the GPU address, res if it uses limitedRange Allocation
2020-10-14 09:50:07 +02:00
auto allocation = new DrmAllocation ( allocationData . rootDeviceIndex , allocationData . type , bo . get ( ) , ptrAlloc , GmmHelper : : canonize ( gpuVA ) , alignedAllocationSize ,
2019-11-04 16:03:30 +01:00
MemoryPool : : System4KBPagesWith32BitGpuAddressing ) ;
2018-12-20 09:47:43 -08:00
2019-02-27 14:59:46 +01:00
allocation - > set32BitAllocation ( true ) ;
2019-07-29 17:50:46 +02:00
allocation - > setGpuBaseAddress ( GmmHelper : : canonize ( gfxPartition - > getHeapBase ( allocatorToUse ) ) ) ;
2019-06-25 16:13:45 +02:00
allocation - > setDriverAllocatedCpuPtr ( ptrAlloc ) ;
2020-10-14 09:50:07 +02:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuVA ) , allocationSize ) ;
2020-07-17 13:40:52 +02:00
bo . release ( ) ;
2019-02-07 11:29:30 -08:00
return allocation ;
2017-12-21 00:45:38 +01:00
}
BufferObject * DrmMemoryManager : : findAndReferenceSharedBufferObject ( int boHandle ) {
BufferObject * bo = nullptr ;
for ( const auto & i : sharingBufferObjects ) {
2021-10-21 11:49:50 +00:00
if ( i - > peekHandle ( ) = = boHandle ) {
2017-12-21 00:45:38 +01:00
bo = i ;
bo - > reference ( ) ;
break ;
}
}
return bo ;
}
2021-05-24 06:14:30 +00:00
GraphicsAllocation * DrmMemoryManager : : createGraphicsAllocationFromSharedHandle ( osHandle handle , const AllocationProperties & properties , bool requireSpecificBitness , bool isHostIpcAllocation ) {
if ( isHostIpcAllocation ) {
2021-06-09 00:12:25 +00:00
return createUSMHostAllocationFromSharedHandle ( handle , properties , false ) ;
2021-05-24 06:14:30 +00:00
}
2018-08-29 14:50:36 +02:00
std : : unique_lock < std : : mutex > lock ( mtx ) ;
2017-12-21 00:45:38 +01:00
drm_prime_handle openFd = { 0 , 0 , 0 } ;
openFd . fd = handle ;
2018-08-14 13:14:06 +02:00
2020-01-07 07:42:40 +01:00
auto ret = this - > getDrm ( properties . rootDeviceIndex ) . ioctl ( DRM_IOCTL_PRIME_FD_TO_HANDLE , & openFd ) ;
2018-08-14 13:14:06 +02:00
if ( ret ! = 0 ) {
2021-10-22 11:43:24 +00:00
[[maybe_unused]] int err = errno ;
2020-09-25 11:24:15 +02:00
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " ioctl(PRIME_FD_TO_HANDLE) failed with %d. errno=%d(%s) \n " , ret , err , strerror ( err ) ) ;
2018-08-14 13:14:06 +02:00
DEBUG_BREAK_IF ( ret ! = 0 ) ;
2021-10-22 11:43:24 +00:00
2018-08-14 13:14:06 +02:00
return nullptr ;
}
2017-12-21 00:45:38 +01:00
auto boHandle = openFd . handle ;
2018-08-29 14:50:36 +02:00
auto bo = findAndReferenceSharedBufferObject ( boHandle ) ;
2017-12-21 00:45:38 +01:00
if ( bo = = nullptr ) {
size_t size = lseekFunction ( handle , 0 , SEEK_END ) ;
2021-03-02 11:53:34 +00:00
bo = new ( std : : nothrow ) BufferObject ( & getDrm ( properties . rootDeviceIndex ) , boHandle , size , maxOsContextCount ) ;
2017-12-21 00:45:38 +01:00
if ( ! bo ) {
return nullptr ;
}
2021-03-09 22:41:46 +00:00
auto heapIndex = isLocalMemorySupported ( properties . rootDeviceIndex ) ? HeapIndex : : HEAP_STANDARD2MB : HeapIndex : : HEAP_STANDARD ;
if ( requireSpecificBitness & & this - > force32bitAllocations ) {
heapIndex = HeapIndex : : HEAP_EXTERNAL ;
}
2021-03-11 13:11:44 +00:00
auto gpuRange = acquireGpuRange ( size , properties . rootDeviceIndex , heapIndex ) ;
2021-03-02 11:53:34 +00:00
bo - > setAddress ( gpuRange ) ;
bo - > setUnmapSize ( size ) ;
2018-08-29 14:50:36 +02:00
pushSharedBufferObject ( bo ) ;
2018-08-14 13:14:06 +02:00
}
2017-12-21 00:45:38 +01:00
2018-08-29 14:50:36 +02:00
lock . unlock ( ) ;
2021-10-21 11:49:50 +00:00
auto drmAllocation = new DrmAllocation ( properties . rootDeviceIndex , properties . allocationType , bo , reinterpret_cast < void * > ( bo - > peekAddress ( ) ) , bo - > peekSize ( ) ,
2019-11-04 16:03:30 +01:00
handle , MemoryPool : : SystemCpuInaccessible ) ;
2017-12-21 00:45:38 +01:00
if ( requireSpecificBitness & & this - > force32bitAllocations ) {
2019-02-27 14:59:46 +01:00
drmAllocation - > set32BitAllocation ( true ) ;
2020-07-01 14:03:46 +02:00
drmAllocation - > setGpuBaseAddress ( GmmHelper : : canonize ( getExternalHeapBaseAddress ( properties . rootDeviceIndex , drmAllocation - > isAllocatedInLocalMemoryPool ( ) ) ) ) ;
2017-12-21 00:45:38 +01:00
}
2019-04-02 10:53:22 +02:00
if ( properties . imgInfo ) {
drm_i915_gem_get_tiling getTiling = { 0 } ;
getTiling . handle = boHandle ;
2020-01-07 07:42:40 +01:00
ret = this - > getDrm ( properties . rootDeviceIndex ) . ioctl ( DRM_IOCTL_I915_GEM_GET_TILING , & getTiling ) ;
2019-04-02 10:53:22 +02:00
2020-02-20 22:37:44 +01:00
if ( ret = = 0 ) {
if ( getTiling . tiling_mode = = I915_TILING_NONE ) {
properties . imgInfo - > linearStorage = true ;
}
2019-08-26 09:27:30 +02:00
}
2021-12-02 12:21:33 +00:00
Gmm * gmm = new Gmm ( executionEnvironment . rootDeviceEnvironments [ properties . rootDeviceIndex ] - > getGmmClientContext ( ) , * properties . imgInfo ,
createStorageInfoFromProperties ( properties ) , properties . flags . preferCompressed ) ;
2019-04-02 10:53:22 +02:00
drmAllocation - > setDefaultGmm ( gmm ) ;
}
2017-12-21 00:45:38 +01:00
return drmAllocation ;
}
2021-03-18 15:16:58 +00:00
void DrmMemoryManager : : closeSharedHandle ( GraphicsAllocation * gfxAllocation ) {
DrmAllocation * drmAllocation = static_cast < DrmAllocation * > ( gfxAllocation ) ;
if ( drmAllocation - > peekSharedHandle ( ) ! = Sharing : : nonSharedResource ) {
closeFunction ( drmAllocation - > peekSharedHandle ( ) ) ;
drmAllocation - > setSharedHandle ( Sharing : : nonSharedResource ) ;
}
2020-11-22 22:46:47 +01:00
}
2017-12-21 00:45:38 +01:00
GraphicsAllocation * DrmMemoryManager : : createPaddedAllocation ( GraphicsAllocation * inputGraphicsAllocation , size_t sizeWithPadding ) {
2018-11-15 10:43:12 -08:00
uint64_t gpuRange = 0llu ;
2019-10-22 10:26:23 +02:00
auto rootDeviceIndex = inputGraphicsAllocation - > getRootDeviceIndex ( ) ;
2021-03-09 22:41:46 +00:00
gpuRange = acquireGpuRange ( sizeWithPadding , rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2017-12-21 00:45:38 +01:00
auto srcPtr = inputGraphicsAllocation - > getUnderlyingBuffer ( ) ;
auto srcSize = inputGraphicsAllocation - > getUnderlyingBufferSize ( ) ;
auto alignedSrcSize = alignUp ( srcSize , MemoryConstants : : pageSize ) ;
auto alignedPtr = ( uintptr_t ) alignDown ( srcPtr , MemoryConstants : : pageSize ) ;
auto offset = ( uintptr_t ) srcPtr - alignedPtr ;
2020-07-17 13:40:52 +02:00
std : : unique_ptr < BufferObject , BufferObject : : Deleter > bo ( allocUserptr ( alignedPtr , alignedSrcSize , 0 , rootDeviceIndex ) ) ;
2017-12-21 00:45:38 +01:00
if ( ! bo ) {
return nullptr ;
}
2021-10-21 11:49:50 +00:00
bo - > setAddress ( gpuRange ) ;
2020-07-17 13:40:52 +02:00
auto allocation = new DrmAllocation ( rootDeviceIndex , inputGraphicsAllocation - > getAllocationType ( ) , bo . get ( ) , srcPtr , GmmHelper : : canonize ( ptrOffset ( gpuRange , offset ) ) , sizeWithPadding ,
2019-11-04 16:03:30 +01:00
inputGraphicsAllocation - > getMemoryPool ( ) ) ;
2019-08-05 13:34:29 +02:00
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuRange ) , sizeWithPadding ) ;
2020-07-17 13:40:52 +02:00
bo . release ( ) ;
2019-08-05 13:34:29 +02:00
return allocation ;
2017-12-21 00:45:38 +01:00
}
2018-05-08 10:00:23 +02:00
void DrmMemoryManager : : addAllocationToHostPtrManager ( GraphicsAllocation * gfxAllocation ) {
DrmAllocation * drmMemory = static_cast < DrmAllocation * > ( gfxAllocation ) ;
2021-09-23 15:09:21 +00:00
2018-05-08 10:00:23 +02:00
FragmentStorage fragment = { } ;
fragment . driverAllocation = true ;
fragment . fragmentCpuPointer = gfxAllocation - > getUnderlyingBuffer ( ) ;
fragment . fragmentSize = alignUp ( gfxAllocation - > getUnderlyingBufferSize ( ) , MemoryConstants : : pageSize ) ;
2021-04-02 17:01:51 +00:00
auto osHandle = new OsHandleLinux ( ) ;
osHandle - > bo = drmMemory - > getBO ( ) ;
fragment . osInternalStorage = osHandle ;
2020-08-17 16:38:21 +02:00
fragment . residency = new ResidencyData ( maxOsContextCount ) ;
2020-07-07 08:41:26 +02:00
hostPtrManager - > storeFragment ( gfxAllocation - > getRootDeviceIndex ( ) , fragment ) ;
2018-05-08 10:00:23 +02:00
}
void DrmMemoryManager : : removeAllocationFromHostPtrManager ( GraphicsAllocation * gfxAllocation ) {
auto buffer = gfxAllocation - > getUnderlyingBuffer ( ) ;
2020-07-07 08:41:26 +02:00
auto fragment = hostPtrManager - > getFragment ( { buffer , gfxAllocation - > getRootDeviceIndex ( ) } ) ;
2018-05-08 10:00:23 +02:00
if ( fragment & & fragment - > driverAllocation ) {
OsHandle * osStorageToRelease = fragment - > osInternalStorage ;
2018-08-27 11:33:25 +02:00
ResidencyData * residencyDataToRelease = fragment - > residency ;
2020-07-07 08:41:26 +02:00
if ( hostPtrManager - > releaseHostPtr ( gfxAllocation - > getRootDeviceIndex ( ) , buffer ) ) {
2018-05-08 10:00:23 +02:00
delete osStorageToRelease ;
2018-08-27 11:33:25 +02:00
delete residencyDataToRelease ;
2018-05-08 10:00:23 +02:00
}
}
}
2017-12-21 00:45:38 +01:00
void DrmMemoryManager : : freeGraphicsMemoryImpl ( GraphicsAllocation * gfxAllocation ) {
2021-08-06 12:57:10 +00:00
if ( DebugManager . flags . DoNotFreeResources . get ( ) ) {
return ;
}
2020-09-17 13:27:32 +02:00
DrmAllocation * drmAlloc = static_cast < DrmAllocation * > ( gfxAllocation ) ;
2020-09-10 12:36:44 +02:00
this - > unregisterAllocation ( gfxAllocation ) ;
2020-07-02 11:49:46 +02:00
for ( auto & engine : this - > registeredEngines ) {
auto memoryOperationsInterface = static_cast < DrmMemoryOperationsHandler * > ( executionEnvironment . rootDeviceEnvironments [ gfxAllocation - > getRootDeviceIndex ( ) ] - > memoryOperationsInterface . get ( ) ) ;
memoryOperationsInterface - > evictWithinOsContext ( engine . osContext , * gfxAllocation ) ;
}
2020-05-08 10:04:06 +02:00
2021-04-09 10:57:55 +00:00
if ( drmAlloc - > getMmapPtr ( ) ) {
this - > munmapFunction ( drmAlloc - > getMmapPtr ( ) , drmAlloc - > getMmapSize ( ) ) ;
}
2020-04-21 13:16:45 +02:00
for ( auto handleId = 0u ; handleId < gfxAllocation - > getNumGmms ( ) ; handleId + + ) {
delete gfxAllocation - > getGmm ( handleId ) ;
2019-03-21 14:01:08 +01:00
}
2017-12-21 00:45:38 +01:00
if ( gfxAllocation - > fragmentsStorage . fragmentCount ) {
cleanGraphicsMemoryCreatedFromHostPtr ( gfxAllocation ) ;
2019-08-27 15:33:58 +02:00
} else {
2019-09-01 21:36:15 +02:00
auto & bos = static_cast < DrmAllocation * > ( gfxAllocation ) - > getBOs ( ) ;
for ( auto bo : bos ) {
2021-10-21 11:49:50 +00:00
unreference ( bo , bo & & bo - > peekIsReusableAllocation ( ) ? false : true ) ;
2019-09-01 21:36:15 +02:00
}
2021-03-18 15:16:58 +00:00
closeSharedHandle ( gfxAllocation ) ;
2017-12-21 00:45:38 +01:00
}
2019-08-05 13:34:29 +02:00
2019-10-22 10:26:23 +02:00
releaseGpuRange ( gfxAllocation - > getReservedAddressPtr ( ) , gfxAllocation - > getReservedAddressSize ( ) , gfxAllocation - > getRootDeviceIndex ( ) ) ;
2019-08-27 15:33:58 +02:00
alignedFreeWrapper ( gfxAllocation - > getDriverAllocatedCpuPtr ( ) ) ;
2019-08-05 13:34:29 +02:00
2020-09-17 13:27:32 +02:00
drmAlloc - > freeRegisteredBOBindExtHandles ( & getDrm ( drmAlloc - > getRootDeviceIndex ( ) ) ) ;
2017-12-21 00:45:38 +01:00
delete gfxAllocation ;
2018-02-26 23:23:43 +01:00
}
2017-12-21 00:45:38 +01:00
2019-03-04 14:50:26 +01:00
void DrmMemoryManager : : handleFenceCompletion ( GraphicsAllocation * allocation ) {
2021-04-29 08:58:16 +00:00
if ( this - > getDrm ( allocation - > getRootDeviceIndex ( ) ) . isVmBindAvailable ( ) ) {
waitForEnginesCompletion ( * allocation ) ;
} else {
static_cast < DrmAllocation * > ( allocation ) - > getBO ( ) - > wait ( - 1 ) ;
}
2019-03-04 14:50:26 +01:00
}
2020-10-21 10:50:53 +02:00
GraphicsAllocation * DrmMemoryManager : : createGraphicsAllocationFromExistingStorage ( AllocationProperties & properties , void * ptr , MultiGraphicsAllocation & multiGraphicsAllocation ) {
auto defaultAlloc = multiGraphicsAllocation . getDefaultGraphicsAllocation ( ) ;
if ( static_cast < DrmAllocation * > ( defaultAlloc ) - > getMmapPtr ( ) ) {
properties . size = defaultAlloc - > getUnderlyingBufferSize ( ) ;
properties . gpuAddress = castToUint64 ( ptr ) ;
auto internalHandle = defaultAlloc - > peekInternalHandle ( this ) ;
2021-06-09 00:12:25 +00:00
return createUSMHostAllocationFromSharedHandle ( static_cast < osHandle > ( internalHandle ) , properties , true ) ;
2020-10-21 10:50:53 +02:00
} else {
return allocateGraphicsMemoryWithProperties ( properties , ptr ) ;
}
}
2020-01-07 07:42:40 +01:00
uint64_t DrmMemoryManager : : getSystemSharedMemory ( uint32_t rootDeviceIndex ) {
2017-12-21 00:45:38 +01:00
uint64_t hostMemorySize = MemoryConstants : : pageSize * ( uint64_t ) ( sysconf ( _SC_PHYS_PAGES ) ) ;
2018-03-15 15:54:28 +01:00
drm_i915_gem_context_param getContextParam = { } ;
getContextParam . param = I915_CONTEXT_PARAM_GTT_SIZE ;
2021-10-22 11:43:24 +00:00
[[maybe_unused]] auto ret = getDrm ( rootDeviceIndex ) . ioctl ( DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM , & getContextParam ) ;
2017-12-21 00:45:38 +01:00
DEBUG_BREAK_IF ( ret ! = 0 ) ;
2021-10-22 11:43:24 +00:00
2018-03-15 15:54:28 +01:00
uint64_t gpuMemorySize = getContextParam . value ;
2017-12-21 00:45:38 +01:00
return std : : min ( hostMemorySize , gpuMemorySize ) ;
}
2021-07-27 14:24:20 +00:00
double DrmMemoryManager : : getPercentOfGlobalMemoryAvailable ( uint32_t rootDeviceIndex ) {
if ( isLocalMemorySupported ( rootDeviceIndex ) ) {
return 0.95 ;
}
return 0.8 ;
}
2020-01-07 07:42:40 +01:00
MemoryManager : : AllocationStatus DrmMemoryManager : : populateOsHandles ( OsHandleStorage & handleStorage , uint32_t rootDeviceIndex ) {
2018-10-22 14:20:05 +02:00
BufferObject * allocatedBos [ maxFragmentsCount ] ;
2018-03-27 14:01:04 +02:00
uint32_t numberOfBosAllocated = 0 ;
2018-10-22 14:20:05 +02:00
uint32_t indexesOfAllocatedBos [ maxFragmentsCount ] ;
2018-02-28 12:09:48 +01:00
2018-10-22 14:20:05 +02:00
for ( unsigned int i = 0 ; i < maxFragmentsCount ; i + + ) {
2017-12-21 00:45:38 +01:00
// If there is no fragment it means it already exists.
if ( ! handleStorage . fragmentStorageData [ i ] . osHandleStorage & & handleStorage . fragmentStorageData [ i ] . fragmentSize ) {
2021-04-02 17:01:51 +00:00
auto osHandle = new OsHandleLinux ( ) ;
handleStorage . fragmentStorageData [ i ] . osHandleStorage = osHandle ;
2020-08-17 16:38:21 +02:00
handleStorage . fragmentStorageData [ i ] . residency = new ResidencyData ( maxOsContextCount ) ;
2017-12-21 00:45:38 +01:00
2021-04-02 17:01:51 +00:00
osHandle - > bo = allocUserptr ( ( uintptr_t ) handleStorage . fragmentStorageData [ i ] . cpuPtr ,
handleStorage . fragmentStorageData [ i ] . fragmentSize ,
0 , rootDeviceIndex ) ;
if ( ! osHandle - > bo ) {
2017-12-21 00:45:38 +01:00
handleStorage . fragmentStorageData [ i ] . freeTheFragment = true ;
2018-02-28 12:09:48 +01:00
return AllocationStatus : : Error ;
2017-12-21 00:45:38 +01:00
}
2018-02-28 12:09:48 +01:00
2021-04-02 17:01:51 +00:00
allocatedBos [ numberOfBosAllocated ] = osHandle - > bo ;
2018-03-20 16:06:16 +01:00
indexesOfAllocatedBos [ numberOfBosAllocated ] = i ;
2018-02-28 12:09:48 +01:00
numberOfBosAllocated + + ;
2017-12-21 00:45:38 +01:00
}
}
2018-02-28 12:09:48 +01:00
if ( validateHostPtrMemory ) {
2021-04-20 12:24:04 +00:00
int result = pinBBs . at ( rootDeviceIndex ) - > validateHostPtr ( allocatedBos , numberOfBosAllocated , registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext , 0 , getDefaultDrmContextId ( rootDeviceIndex ) ) ;
2018-02-28 12:09:48 +01:00
if ( result = = EFAULT ) {
2018-03-20 16:06:16 +01:00
for ( uint32_t i = 0 ; i < numberOfBosAllocated ; i + + ) {
handleStorage . fragmentStorageData [ indexesOfAllocatedBos [ i ] ] . freeTheFragment = true ;
}
2018-02-28 12:09:48 +01:00
return AllocationStatus : : InvalidHostPointer ;
} else if ( result ! = 0 ) {
return AllocationStatus : : Error ;
}
}
2018-03-27 14:01:04 +02:00
for ( uint32_t i = 0 ; i < numberOfBosAllocated ; i + + ) {
2020-07-07 08:41:26 +02:00
hostPtrManager - > storeFragment ( rootDeviceIndex , handleStorage . fragmentStorageData [ indexesOfAllocatedBos [ i ] ] ) ;
2018-03-27 14:01:04 +02:00
}
2018-02-28 12:09:48 +01:00
return AllocationStatus : : Success ;
2017-12-21 00:45:38 +01:00
}
2018-03-27 14:01:04 +02:00
2019-11-15 09:59:48 +01:00
void DrmMemoryManager : : cleanOsHandles ( OsHandleStorage & handleStorage , uint32_t rootDeviceIndex ) {
2018-10-22 14:20:05 +02:00
for ( unsigned int i = 0 ; i < maxFragmentsCount ; i + + ) {
2017-12-21 00:45:38 +01:00
if ( handleStorage . fragmentStorageData [ i ] . freeTheFragment ) {
2021-04-02 17:01:51 +00:00
auto osHandle = static_cast < OsHandleLinux * > ( handleStorage . fragmentStorageData [ i ] . osHandleStorage ) ;
if ( osHandle - > bo ) {
BufferObject * search = osHandle - > bo ;
2017-12-21 00:45:38 +01:00
search - > wait ( - 1 ) ;
2021-10-22 11:43:24 +00:00
[[maybe_unused]] auto refCount = unreference ( search , true ) ;
2017-12-21 00:45:38 +01:00
DEBUG_BREAK_IF ( refCount ! = 1u ) ;
}
delete handleStorage . fragmentStorageData [ i ] . osHandleStorage ;
2018-03-27 14:01:04 +02:00
handleStorage . fragmentStorageData [ i ] . osHandleStorage = nullptr ;
2017-12-21 00:45:38 +01:00
delete handleStorage . fragmentStorageData [ i ] . residency ;
2018-03-27 14:01:04 +02:00
handleStorage . fragmentStorageData [ i ] . residency = nullptr ;
2017-12-21 00:45:38 +01:00
}
}
}
2018-02-26 23:23:43 +01:00
bool DrmMemoryManager : : setDomainCpu ( GraphicsAllocation & graphicsAllocation , bool writeEnable ) {
DEBUG_BREAK_IF ( writeEnable ) ; //unsupported path (for CPU writes call SW_FINISH ioctl in unlockResource)
auto bo = static_cast < DrmAllocation * > ( & graphicsAllocation ) - > getBO ( ) ;
if ( bo = = nullptr )
return false ;
// move a buffer object to the CPU read, and possibly write domain, including waiting on flushes to occur
2018-03-20 10:49:09 +01:00
drm_i915_gem_set_domain set_domain = { } ;
2018-02-26 23:23:43 +01:00
set_domain . handle = bo - > peekHandle ( ) ;
set_domain . read_domains = I915_GEM_DOMAIN_CPU ;
set_domain . write_domain = writeEnable ? I915_GEM_DOMAIN_CPU : 0 ;
2020-01-07 07:42:40 +01:00
return getDrm ( graphicsAllocation . getRootDeviceIndex ( ) ) . ioctl ( DRM_IOCTL_I915_GEM_SET_DOMAIN , & set_domain ) = = 0 ;
2018-02-26 23:23:43 +01:00
}
2019-01-24 11:51:33 +01:00
void * DrmMemoryManager : : lockResourceImpl ( GraphicsAllocation & graphicsAllocation ) {
2019-07-07 20:33:17 +02:00
if ( MemoryPool : : LocalMemory = = graphicsAllocation . getMemoryPool ( ) ) {
return lockResourceInLocalMemoryImpl ( graphicsAllocation ) ;
}
2019-01-24 11:51:33 +01:00
auto cpuPtr = graphicsAllocation . getUnderlyingBuffer ( ) ;
2018-02-26 23:23:43 +01:00
if ( cpuPtr ! = nullptr ) {
2021-11-30 21:04:54 +00:00
[[maybe_unused]] auto success = setDomainCpu ( graphicsAllocation , false ) ;
2018-02-26 23:23:43 +01:00
DEBUG_BREAK_IF ( ! success ) ;
return cpuPtr ;
}
2019-01-24 11:51:33 +01:00
auto bo = static_cast < DrmAllocation & > ( graphicsAllocation ) . getBO ( ) ;
2018-02-26 23:23:43 +01:00
if ( bo = = nullptr )
return nullptr ;
2018-03-20 10:49:09 +01:00
drm_i915_gem_mmap mmap_arg = { } ;
2018-02-26 23:23:43 +01:00
mmap_arg . handle = bo - > peekHandle ( ) ;
mmap_arg . size = bo - > peekSize ( ) ;
2020-01-07 07:42:40 +01:00
if ( getDrm ( graphicsAllocation . getRootDeviceIndex ( ) ) . ioctl ( DRM_IOCTL_I915_GEM_MMAP , & mmap_arg ) ! = 0 ) {
2018-02-26 23:23:43 +01:00
return nullptr ;
2018-03-20 10:49:09 +01:00
}
2018-02-26 23:23:43 +01:00
bo - > setLockedAddress ( reinterpret_cast < void * > ( mmap_arg . addr_ptr ) ) ;
2021-11-30 21:04:54 +00:00
[[maybe_unused]] auto success = setDomainCpu ( graphicsAllocation , false ) ;
2018-02-26 23:23:43 +01:00
DEBUG_BREAK_IF ( ! success ) ;
return bo - > peekLockedAddress ( ) ;
}
2019-01-24 11:51:33 +01:00
void DrmMemoryManager : : unlockResourceImpl ( GraphicsAllocation & graphicsAllocation ) {
2020-03-23 10:51:30 +01:00
if ( MemoryPool : : LocalMemory = = graphicsAllocation . getMemoryPool ( ) ) {
return unlockResourceInLocalMemoryImpl ( static_cast < DrmAllocation & > ( graphicsAllocation ) . getBO ( ) ) ;
}
2019-01-24 11:51:33 +01:00
auto cpuPtr = graphicsAllocation . getUnderlyingBuffer ( ) ;
2018-02-26 23:23:43 +01:00
if ( cpuPtr ! = nullptr ) {
return ;
}
2019-01-24 11:51:33 +01:00
auto bo = static_cast < DrmAllocation & > ( graphicsAllocation ) . getBO ( ) ;
2018-02-26 23:23:43 +01:00
if ( bo = = nullptr )
return ;
2020-01-07 07:42:40 +01:00
releaseReservedCpuAddressRange ( bo - > peekLockedAddress ( ) , bo - > peekSize ( ) , graphicsAllocation . getRootDeviceIndex ( ) ) ;
2018-02-26 23:23:43 +01:00
bo - > setLockedAddress ( nullptr ) ;
}
2019-07-07 20:33:17 +02:00
2020-01-07 07:42:40 +01:00
int DrmMemoryManager : : obtainFdFromHandle ( int boHandle , uint32_t rootDeviceindex ) {
2019-06-06 16:26:47 +02:00
drm_prime_handle openFd = { 0 , 0 , 0 } ;
openFd . flags = DRM_CLOEXEC | DRM_RDWR ;
openFd . handle = boHandle ;
2020-01-07 07:42:40 +01:00
getDrm ( rootDeviceindex ) . ioctl ( DRM_IOCTL_PRIME_HANDLE_TO_FD , & openFd ) ;
2019-06-06 16:26:47 +02:00
return openFd . fd ;
}
2019-07-17 15:38:14 +02:00
2021-04-20 12:24:04 +00:00
uint32_t DrmMemoryManager : : getDefaultDrmContextId ( uint32_t rootDeviceIndex ) const {
auto osContextLinux = static_cast < OsContextLinux * > ( registeredEngines [ defaultEngineIndex [ rootDeviceIndex ] ] . osContext ) ;
2019-10-22 13:29:39 +02:00
return osContextLinux - > getDrmContextIds ( ) [ 0 ] ;
2019-07-17 15:38:14 +02:00
}
2019-08-05 13:34:29 +02:00
2020-08-28 08:37:26 +02:00
size_t DrmMemoryManager : : getUserptrAlignment ( ) {
auto alignment = MemoryConstants : : allocationAlignment ;
if ( DebugManager . flags . ForceUserptrAlignment . get ( ) ! = - 1 ) {
alignment = DebugManager . flags . ForceUserptrAlignment . get ( ) * MemoryConstants : : kiloByte ;
}
return alignment ;
}
2020-01-07 07:42:40 +01:00
Drm & DrmMemoryManager : : getDrm ( uint32_t rootDeviceIndex ) const {
2021-05-21 01:17:57 +02:00
return * this - > executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > osInterface - > getDriverModel ( ) - > as < Drm > ( ) ;
2020-01-07 07:42:40 +01:00
}
2020-03-19 10:41:35 +01:00
uint32_t DrmMemoryManager : : getRootDeviceIndex ( const Drm * drm ) {
auto rootDeviceCount = this - > executionEnvironment . rootDeviceEnvironments . size ( ) ;
for ( auto rootDeviceIndex = 0u ; rootDeviceIndex < rootDeviceCount ; rootDeviceIndex + + ) {
if ( & getDrm ( rootDeviceIndex ) = = drm ) {
return rootDeviceIndex ;
}
}
2020-03-27 15:21:18 +01:00
return CommonConstants : : unspecifiedDeviceIndex ;
2020-03-19 10:41:35 +01:00
}
2020-07-01 10:38:19 +02:00
AddressRange DrmMemoryManager : : reserveGpuAddress ( size_t size , uint32_t rootDeviceIndex ) {
2021-03-09 22:41:46 +00:00
auto gpuVa = acquireGpuRange ( size , rootDeviceIndex , HeapIndex : : HEAP_STANDARD ) ;
2020-07-01 10:38:19 +02:00
return AddressRange { gpuVa , size } ;
}
void DrmMemoryManager : : freeGpuAddress ( AddressRange addressRange , uint32_t rootDeviceIndex ) {
releaseGpuRange ( reinterpret_cast < void * > ( addressRange . address ) , addressRange . size , rootDeviceIndex ) ;
}
2020-09-10 12:36:44 +02:00
std : : unique_lock < std : : mutex > DrmMemoryManager : : acquireAllocLock ( ) {
return std : : unique_lock < std : : mutex > ( this - > allocMutex ) ;
}
std : : vector < GraphicsAllocation * > & DrmMemoryManager : : getSysMemAllocs ( ) {
return this - > sysMemAllocs ;
}
std : : vector < GraphicsAllocation * > & DrmMemoryManager : : getLocalMemAllocs ( uint32_t rootDeviceIndex ) {
return this - > localMemAllocs [ rootDeviceIndex ] ;
}
void DrmMemoryManager : : registerSysMemAlloc ( GraphicsAllocation * allocation ) {
std : : lock_guard < std : : mutex > lock ( this - > allocMutex ) ;
this - > sysMemAllocs . push_back ( allocation ) ;
}
void DrmMemoryManager : : registerLocalMemAlloc ( GraphicsAllocation * allocation , uint32_t rootDeviceIndex ) {
std : : lock_guard < std : : mutex > lock ( this - > allocMutex ) ;
this - > localMemAllocs [ rootDeviceIndex ] . push_back ( allocation ) ;
}
void DrmMemoryManager : : unregisterAllocation ( GraphicsAllocation * allocation ) {
std : : lock_guard < std : : mutex > lock ( this - > allocMutex ) ;
sysMemAllocs . erase ( std : : remove ( sysMemAllocs . begin ( ) , sysMemAllocs . end ( ) , allocation ) ,
sysMemAllocs . end ( ) ) ;
localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . erase ( std : : remove ( localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . begin ( ) ,
localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . end ( ) ,
allocation ) ,
localMemAllocs [ allocation - > getRootDeviceIndex ( ) ] . end ( ) ) ;
}
2020-09-17 13:27:32 +02:00
2020-10-09 10:48:37 +02:00
void DrmMemoryManager : : registerAllocationInOs ( GraphicsAllocation * allocation ) {
2020-11-19 14:11:37 +00:00
if ( allocation & & getDrm ( allocation - > getRootDeviceIndex ( ) ) . resourceRegistrationEnabled ( ) ) {
2020-09-17 13:27:32 +02:00
auto drmAllocation = static_cast < DrmAllocation * > ( allocation ) ;
drmAllocation - > registerBOBindExtHandle ( & getDrm ( drmAllocation - > getRootDeviceIndex ( ) ) ) ;
2021-03-09 23:02:59 +00:00
if ( isAllocationTypeToCapture ( drmAllocation - > getAllocationType ( ) ) ) {
drmAllocation - > markForCapture ( ) ;
}
2020-09-17 13:27:32 +02:00
}
}
2021-05-27 19:44:47 +02:00
std : : unique_ptr < MemoryManager > DrmMemoryManager : : create ( ExecutionEnvironment & executionEnvironment ) {
2021-08-03 14:10:30 +00:00
bool validateHostPtr = true ;
if ( DebugManager . flags . EnableHostPtrValidation . get ( ) ! = - 1 ) {
validateHostPtr = DebugManager . flags . EnableHostPtrValidation . get ( ) ;
}
2021-05-27 19:44:47 +02:00
return std : : make_unique < DrmMemoryManager > ( gemCloseWorkerMode : : gemCloseWorkerActive ,
DebugManager . flags . EnableForcePin . get ( ) ,
2021-08-03 14:10:30 +00:00
validateHostPtr ,
2021-05-27 19:44:47 +02:00
executionEnvironment ) ;
}
2021-09-13 10:04:37 +00:00
uint64_t DrmMemoryManager : : getLocalMemorySize ( uint32_t rootDeviceIndex , uint32_t deviceBitfield ) {
auto memoryInfo = getDrm ( rootDeviceIndex ) . getMemoryInfo ( ) ;
if ( ! memoryInfo ) {
return 0 ;
}
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ rootDeviceIndex ] - > getHardwareInfo ( ) ;
uint32_t subDevicesCount = HwHelper : : getSubDevicesCount ( hwInfo ) ;
size_t size = 0 ;
for ( uint32_t i = 0 ; i < subDevicesCount ; i + + ) {
auto memoryBank = ( 1 < < i ) ;
if ( deviceBitfield & memoryBank ) {
size + = memoryInfo - > getMemoryRegionSize ( memoryBank ) ;
}
}
return size ;
}
2021-09-08 14:16:07 +00:00
void * DrmMemoryManager : : lockResourceInLocalMemoryImpl ( GraphicsAllocation & graphicsAllocation ) {
if ( ! isLocalMemorySupported ( graphicsAllocation . getRootDeviceIndex ( ) ) ) {
return nullptr ;
}
auto bo = static_cast < DrmAllocation & > ( graphicsAllocation ) . getBO ( ) ;
if ( graphicsAllocation . getAllocationType ( ) = = GraphicsAllocation : : AllocationType : : WRITE_COMBINED ) {
auto addr = lockResourceInLocalMemoryImpl ( bo ) ;
auto alignedAddr = alignUp ( addr , MemoryConstants : : pageSize64k ) ;
auto notUsedSize = ptrDiff ( alignedAddr , addr ) ;
//call unmap to free the unaligned pages preceding the BO allocation and
//adjust the pointer in the CPU mapping to the beginning of the BO allocation
munmapFunction ( addr , notUsedSize ) ;
bo - > setLockedAddress ( alignedAddr ) ;
return bo - > peekLockedAddress ( ) ;
}
return lockResourceInLocalMemoryImpl ( bo ) ;
}
bool DrmMemoryManager : : copyMemoryToAllocation ( GraphicsAllocation * graphicsAllocation , size_t destinationOffset , const void * memoryToCopy , size_t sizeToCopy ) {
if ( graphicsAllocation - > getUnderlyingBuffer ( ) | | ! isLocalMemorySupported ( graphicsAllocation - > getRootDeviceIndex ( ) ) ) {
return MemoryManager : : copyMemoryToAllocation ( graphicsAllocation , destinationOffset , memoryToCopy , sizeToCopy ) ;
}
2021-10-21 11:16:19 +00:00
return copyMemoryToAllocationBanks ( graphicsAllocation , destinationOffset , memoryToCopy , sizeToCopy , maxNBitValue ( graphicsAllocation - > storageInfo . getNumBanks ( ) ) ) ;
}
bool DrmMemoryManager : : copyMemoryToAllocationBanks ( GraphicsAllocation * graphicsAllocation , size_t destinationOffset , const void * memoryToCopy , size_t sizeToCopy , DeviceBitfield handleMask ) {
if ( MemoryPool : : isSystemMemoryPool ( graphicsAllocation - > getMemoryPool ( ) ) ) {
return false ;
}
2021-09-08 14:16:07 +00:00
auto drmAllocation = static_cast < DrmAllocation * > ( graphicsAllocation ) ;
2021-10-29 03:53:31 +02:00
for ( auto handleId = 0u ; handleId < graphicsAllocation - > storageInfo . getNumBanks ( ) ; handleId + + ) {
2021-10-21 11:16:19 +00:00
if ( ! handleMask . test ( handleId ) ) {
continue ;
}
2021-09-08 14:16:07 +00:00
auto ptr = lockResourceInLocalMemoryImpl ( drmAllocation - > getBOs ( ) [ handleId ] ) ;
if ( ! ptr ) {
return false ;
}
memcpy_s ( ptrOffset ( ptr , destinationOffset ) , graphicsAllocation - > getUnderlyingBufferSize ( ) - destinationOffset , memoryToCopy , sizeToCopy ) ;
this - > unlockResourceInLocalMemoryImpl ( drmAllocation - > getBOs ( ) [ handleId ] ) ;
}
return true ;
}
void DrmMemoryManager : : unlockResourceInLocalMemoryImpl ( BufferObject * bo ) {
if ( bo = = nullptr )
return ;
2021-10-21 11:49:50 +00:00
releaseReservedCpuAddressRange ( bo - > peekLockedAddress ( ) , bo - > peekSize ( ) , this - > getRootDeviceIndex ( bo - > peekDrm ( ) ) ) ;
2021-09-08 14:16:07 +00:00
[[maybe_unused]] auto ret = munmapFunction ( bo - > peekLockedAddress ( ) , bo - > peekSize ( ) ) ;
DEBUG_BREAK_IF ( ret ! = 0 ) ;
bo - > setLockedAddress ( nullptr ) ;
}
2021-09-13 10:04:37 +00:00
2021-09-15 15:46:29 +00:00
void createColouredGmms ( GmmClientContext * clientContext , DrmAllocation & allocation , const StorageInfo & storageInfo , bool compression ) {
2021-10-04 15:23:42 +00:00
DEBUG_BREAK_IF ( storageInfo . colouringPolicy = = ColouringPolicy : : DeviceCountBased & & storageInfo . colouringGranularity ! = MemoryConstants : : pageSize64k ) ;
auto remainingSize = alignUp ( allocation . getUnderlyingBufferSize ( ) , storageInfo . colouringGranularity ) ;
2021-09-15 15:46:29 +00:00
auto handles = storageInfo . getNumBanks ( ) ;
2021-10-04 15:23:42 +00:00
auto banksCnt = storageInfo . getTotalBanksCnt ( ) ;
if ( storageInfo . colouringPolicy = = ColouringPolicy : : ChunkSizeBased ) {
handles = static_cast < uint32_t > ( remainingSize / storageInfo . colouringGranularity ) ;
allocation . resizeGmms ( handles ) ;
}
2021-09-15 15:46:29 +00:00
/* This logic is to colour resource as equally as possible.
Divide size by number of devices and align result up to 64 kb page , then subtract it from whole size and allocate it on the first tile . First tile has it ' s chunk .
In the following iteration divide rest of a size by remaining devices and again subtract it .
Notice that if allocation size ( in pages ) is not divisible by 4 then remainder can be equal to 1 , 2 , 3 and by using this algorithm it can be spread efficiently .
For example : 18 pages allocation and 4 devices . Page size is 64 kb .
Divide by 4 and align up to page size and result is 5 pages . After subtract , remaining size is 13 pages .
Now divide 13 by 3 and align up - result is 5 pages . After subtract , remaining size is 8 pages .
Divide 8 by 2 - result is 4 pages .
In last iteration remaining 4 pages go to last tile .
18 pages is coloured to ( 5 , 5 , 4 , 4 ) .
It was tested and doesn ' t require any debug */
for ( auto handleId = 0u ; handleId < handles ; handleId + + ) {
2021-10-04 15:23:42 +00:00
auto currentSize = alignUp ( remainingSize / ( handles - handleId ) , storageInfo . colouringGranularity ) ;
2021-09-15 15:46:29 +00:00
remainingSize - = currentSize ;
StorageInfo limitedStorageInfo = storageInfo ;
2021-10-04 15:23:42 +00:00
limitedStorageInfo . memoryBanks & = ( 1u < < ( handleId % banksCnt ) ) ;
2021-09-15 15:46:29 +00:00
auto gmm = new Gmm ( clientContext ,
nullptr ,
currentSize ,
0u ,
false ,
compression ,
false ,
limitedStorageInfo ) ;
allocation . setGmm ( gmm , handleId ) ;
}
}
void fillGmmsInAllocation ( GmmClientContext * clientContext , DrmAllocation * allocation , const StorageInfo & storageInfo ) {
auto alignedSize = alignUp ( allocation - > getUnderlyingBufferSize ( ) , MemoryConstants : : pageSize64k ) ;
for ( auto handleId = 0u ; handleId < storageInfo . getNumBanks ( ) ; handleId + + ) {
StorageInfo limitedStorageInfo = storageInfo ;
limitedStorageInfo . memoryBanks & = 1u < < handleId ;
limitedStorageInfo . pageTablesVisibility & = 1u < < handleId ;
auto gmm = new Gmm ( clientContext , nullptr , alignedSize , 0u , false , false , false , limitedStorageInfo ) ;
allocation - > setGmm ( gmm , handleId ) ;
}
}
uint64_t getGpuAddress ( const AlignmentSelector & alignmentSelector , HeapAssigner & heapAssigner , const HardwareInfo & hwInfo , GraphicsAllocation : : AllocationType allocType , GfxPartition * gfxPartition ,
size_t & sizeAllocated , const void * hostPtr , bool resource48Bit , bool useFrontWindow ) {
uint64_t gpuAddress = 0 ;
switch ( allocType ) {
case GraphicsAllocation : : AllocationType : : SVM_GPU :
gpuAddress = reinterpret_cast < uint64_t > ( hostPtr ) ;
sizeAllocated = 0 ;
break ;
case GraphicsAllocation : : AllocationType : : KERNEL_ISA :
case GraphicsAllocation : : AllocationType : : KERNEL_ISA_INTERNAL :
case GraphicsAllocation : : AllocationType : : INTERNAL_HEAP :
case GraphicsAllocation : : AllocationType : : DEBUG_MODULE_AREA : {
auto heap = heapAssigner . get32BitHeapIndex ( allocType , true , hwInfo , useFrontWindow ) ;
gpuAddress = GmmHelper : : canonize ( gfxPartition - > heapAllocate ( heap , sizeAllocated ) ) ;
} break ;
case GraphicsAllocation : : AllocationType : : WRITE_COMBINED :
sizeAllocated = 0 ;
break ;
default :
AlignmentSelector : : CandidateAlignment alignment = alignmentSelector . selectAlignment ( sizeAllocated ) ;
if ( gfxPartition - > getHeapLimit ( HeapIndex : : HEAP_EXTENDED ) > 0 & & ! resource48Bit ) {
alignment . heap = HeapIndex : : HEAP_EXTENDED ;
}
gpuAddress = GmmHelper : : canonize ( gfxPartition - > heapAllocateWithCustomAlignment ( alignment . heap , sizeAllocated , alignment . alignment ) ) ;
break ;
}
return gpuAddress ;
}
GraphicsAllocation * DrmMemoryManager : : allocateGraphicsMemoryInDevicePool ( const AllocationData & allocationData , AllocationStatus & status ) {
status = AllocationStatus : : RetryInNonDevicePool ;
if ( ! this - > localMemorySupported [ allocationData . rootDeviceIndex ] | |
allocationData . flags . useSystemMemory | |
( allocationData . flags . allow32Bit & & this - > force32bitAllocations ) | |
allocationData . type = = GraphicsAllocation : : AllocationType : : SHARED_RESOURCE_COPY ) {
return nullptr ;
}
if ( allocationData . type = = GraphicsAllocation : : AllocationType : : UNIFIED_SHARED_MEMORY ) {
auto allocation = this - > createSharedUnifiedMemoryAllocation ( allocationData ) ;
status = allocation ? AllocationStatus : : Success : AllocationStatus : : Error ;
return allocation ;
}
std : : unique_ptr < Gmm > gmm ;
size_t sizeAligned = 0 ;
auto numHandles = allocationData . storageInfo . getNumBanks ( ) ;
bool createSingleHandle = 1 = = numHandles ;
if ( allocationData . type = = GraphicsAllocation : : AllocationType : : IMAGE ) {
allocationData . imgInfo - > useLocalMemory = true ;
2021-12-02 12:21:33 +00:00
gmm = std : : make_unique < Gmm > ( executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getGmmClientContext ( ) , * allocationData . imgInfo ,
allocationData . storageInfo , allocationData . flags . preferRenderCompressed ) ;
2021-09-15 15:46:29 +00:00
sizeAligned = alignUp ( allocationData . imgInfo - > size , MemoryConstants : : pageSize64k ) ;
} else {
if ( allocationData . type = = GraphicsAllocation : : AllocationType : : WRITE_COMBINED ) {
sizeAligned = alignUp ( allocationData . size + MemoryConstants : : pageSize64k , 2 * MemoryConstants : : megaByte ) + 2 * MemoryConstants : : megaByte ;
} else {
sizeAligned = alignUp ( allocationData . size , MemoryConstants : : pageSize64k ) ;
}
if ( createSingleHandle ) {
gmm = std : : make_unique < Gmm > ( executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getGmmClientContext ( ) ,
nullptr ,
sizeAligned ,
0u ,
allocationData . flags . uncacheable ,
allocationData . flags . preferRenderCompressed ,
false ,
allocationData . storageInfo ) ;
}
}
auto sizeAllocated = sizeAligned ;
auto gfxPartition = getGfxPartition ( allocationData . rootDeviceIndex ) ;
auto hwInfo = executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getHardwareInfo ( ) ;
auto gpuAddress = getGpuAddress ( this - > alignmentSelector , this - > heapAssigner , * hwInfo ,
allocationData . type , gfxPartition , sizeAllocated ,
allocationData . hostPtr , allocationData . flags . resource48Bit , allocationData . flags . use32BitFrontWindow ) ;
auto allocation = std : : make_unique < DrmAllocation > ( allocationData . rootDeviceIndex , numHandles , allocationData . type , nullptr , nullptr , gpuAddress , sizeAligned , MemoryPool : : LocalMemory ) ;
if ( createSingleHandle ) {
allocation - > setDefaultGmm ( gmm . release ( ) ) ;
} else if ( allocationData . storageInfo . multiStorage ) {
createColouredGmms ( executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getGmmClientContext ( ) ,
* allocation ,
allocationData . storageInfo ,
allocationData . flags . preferRenderCompressed ) ;
} else {
fillGmmsInAllocation ( executionEnvironment . rootDeviceEnvironments [ allocationData . rootDeviceIndex ] - > getGmmClientContext ( ) , allocation . get ( ) , allocationData . storageInfo ) ;
}
allocation - > storageInfo = allocationData . storageInfo ;
allocation - > setFlushL3Required ( allocationData . flags . flushL3 ) ;
allocation - > setUncacheable ( allocationData . flags . uncacheable ) ;
allocation - > setReservedAddressRange ( reinterpret_cast < void * > ( gpuAddress ) , sizeAllocated ) ;
if ( ! createDrmAllocation ( & getDrm ( allocationData . rootDeviceIndex ) , allocation . get ( ) , gpuAddress , maxOsContextCount ) ) {
for ( auto handleId = 0u ; handleId < allocationData . storageInfo . getNumBanks ( ) ; handleId + + ) {
delete allocation - > getGmm ( handleId ) ;
}
gfxPartition - > freeGpuAddressRange ( GmmHelper : : decanonize ( gpuAddress ) , sizeAllocated ) ;
status = AllocationStatus : : Error ;
return nullptr ;
}
if ( allocationData . type = = GraphicsAllocation : : AllocationType : : WRITE_COMBINED ) {
auto cpuAddress = lockResource ( allocation . get ( ) ) ;
auto alignedCpuAddress = alignDown ( cpuAddress , 2 * MemoryConstants : : megaByte ) ;
auto offset = ptrDiff ( cpuAddress , alignedCpuAddress ) ;
allocation - > setAllocationOffset ( offset ) ;
allocation - > setCpuPtrAndGpuAddress ( cpuAddress , reinterpret_cast < uint64_t > ( alignedCpuAddress ) ) ;
DEBUG_BREAK_IF ( allocation - > storageInfo . multiStorage ) ;
allocation - > getBO ( ) - > setAddress ( reinterpret_cast < uint64_t > ( cpuAddress ) ) ;
}
if ( allocationData . flags . requiresCpuAccess ) {
auto cpuAddress = lockResource ( allocation . get ( ) ) ;
allocation - > setCpuPtrAndGpuAddress ( cpuAddress , gpuAddress ) ;
}
if ( heapAssigner . useInternal32BitHeap ( allocationData . type ) ) {
allocation - > setGpuBaseAddress ( GmmHelper : : canonize ( getInternalHeapBaseAddress ( allocationData . rootDeviceIndex , true ) ) ) ;
}
if ( ! allocation - > setCacheRegion ( & getDrm ( allocationData . rootDeviceIndex ) , static_cast < CacheRegion > ( allocationData . cacheRegion ) ) ) {
for ( auto bo : allocation - > getBOs ( ) ) {
delete bo ;
}
for ( auto handleId = 0u ; handleId < allocationData . storageInfo . getNumBanks ( ) ; handleId + + ) {
delete allocation - > getGmm ( handleId ) ;
}
gfxPartition - > freeGpuAddressRange ( GmmHelper : : decanonize ( gpuAddress ) , sizeAllocated ) ;
status = AllocationStatus : : Error ;
return nullptr ;
}
status = AllocationStatus : : Success ;
return allocation . release ( ) ;
}
2021-09-14 12:36:43 +00:00
BufferObject * DrmMemoryManager : : createBufferObjectInMemoryRegion ( Drm * drm ,
uint64_t gpuAddress ,
size_t size ,
uint32_t memoryBanks ,
size_t maxOsContextCount ) {
auto memoryInfo = drm - > getMemoryInfo ( ) ;
if ( ! memoryInfo ) {
return nullptr ;
}
uint32_t handle = 0 ;
auto ret = memoryInfo - > createGemExtWithSingleRegion ( drm , memoryBanks , size , handle ) ;
if ( ret ! = 0 ) {
return nullptr ;
}
auto bo = new ( std : : nothrow ) BufferObject ( drm , handle , size , maxOsContextCount ) ;
if ( ! bo ) {
return nullptr ;
}
bo - > setAddress ( gpuAddress ) ;
return bo ;
}
2021-09-14 11:29:11 +00:00
bool DrmMemoryManager : : createDrmAllocation ( Drm * drm , DrmAllocation * allocation , uint64_t gpuAddress , size_t maxOsContextCount ) {
2021-10-04 15:23:42 +00:00
BufferObjects bos { } ;
2021-09-14 11:29:11 +00:00
auto & storageInfo = allocation - > storageInfo ;
auto boAddress = gpuAddress ;
auto currentBank = 0u ;
2021-10-04 15:23:42 +00:00
auto iterationOffset = 0u ;
auto banksCnt = storageInfo . getTotalBanksCnt ( ) ;
auto handles = storageInfo . getNumBanks ( ) ;
if ( storageInfo . colouringPolicy = = ColouringPolicy : : ChunkSizeBased ) {
handles = allocation - > getNumGmms ( ) ;
allocation - > resizeBufferObjects ( handles ) ;
bos . resize ( handles ) ;
}
for ( auto handleId = 0u ; handleId < handles ; handleId + + , currentBank + + ) {
if ( currentBank = = banksCnt ) {
currentBank = 0 ;
iterationOffset + = banksCnt ;
}
2021-09-14 11:29:11 +00:00
uint32_t memoryBanks = static_cast < uint32_t > ( storageInfo . memoryBanks . to_ulong ( ) ) ;
if ( storageInfo . getNumBanks ( ) > 1 ) {
//check if we have this bank, if not move to next one
//we may have holes in memoryBanks that we need to skip i.e. memoryBanks 1101 and 3 handle allocation
while ( ! ( memoryBanks & ( 1u < < currentBank ) ) ) {
currentBank + + ;
}
memoryBanks & = 1u < < currentBank ;
}
auto boSize = alignUp ( allocation - > getGmm ( handleId ) - > gmmResourceInfo - > getSizeAllocation ( ) , MemoryConstants : : pageSize64k ) ;
2021-10-04 15:23:42 +00:00
bos [ handleId ] = createBufferObjectInMemoryRegion ( drm , boAddress , boSize , memoryBanks , maxOsContextCount ) ;
2021-09-14 11:29:11 +00:00
if ( nullptr = = bos [ handleId ] ) {
return false ;
}
2021-10-04 15:23:42 +00:00
allocation - > getBufferObjectToModify ( currentBank + iterationOffset ) = bos [ handleId ] ;
2021-09-14 11:29:11 +00:00
if ( storageInfo . multiStorage ) {
boAddress + = boSize ;
}
}
2021-10-20 17:07:51 +00:00
if ( storageInfo . colouringPolicy = = ColouringPolicy : : MappingBased ) {
auto size = alignUp ( allocation - > getUnderlyingBufferSize ( ) , storageInfo . colouringGranularity ) ;
auto chunks = static_cast < uint32_t > ( size / storageInfo . colouringGranularity ) ;
auto granularity = storageInfo . colouringGranularity ;
for ( uint32_t boHandle = 0 ; boHandle < handles ; boHandle + + ) {
bos [ boHandle ] - > setColourWithBind ( ) ;
bos [ boHandle ] - > setColourChunk ( granularity ) ;
bos [ boHandle ] - > reserveAddressVector ( alignUp ( chunks , handles ) / handles ) ;
}
auto boHandle = 0u ;
auto colourAddress = gpuAddress ;
for ( auto chunk = 0u ; chunk < chunks ; chunk + + ) {
if ( boHandle = = handles ) {
boHandle = 0u ;
}
bos [ boHandle ] - > addColouringAddress ( colourAddress ) ;
colourAddress + = granularity ;
boHandle + + ;
}
}
2021-09-14 11:29:11 +00:00
return true ;
}
2021-11-29 17:48:55 +00:00
bool DrmMemoryManager : : retrieveMmapOffsetForBufferObject ( uint32_t rootDeviceIndex , BufferObject & bo , uint64_t flags , uint64_t & offset ) {
constexpr uint64_t mmapOffsetFixed = 4 ;
drm_i915_gem_mmap_offset mmapOffset = { } ;
mmapOffset . handle = bo . peekHandle ( ) ;
mmapOffset . flags = isLocalMemorySupported ( rootDeviceIndex ) ? mmapOffsetFixed : flags ;
auto & drm = getDrm ( rootDeviceIndex ) ;
auto ret = drm . ioctl ( DRM_IOCTL_I915_GEM_MMAP_OFFSET , & mmapOffset ) ;
if ( ret ! = 0 & & isLocalMemorySupported ( rootDeviceIndex ) ) {
mmapOffset . flags = flags ;
ret = drm . ioctl ( DRM_IOCTL_I915_GEM_MMAP_OFFSET , & mmapOffset ) ;
}
if ( ret ! = 0 ) {
int err = drm . getErrno ( ) ;
PRINT_DEBUG_STRING ( DebugManager . flags . PrintDebugMessages . get ( ) , stderr , " ioctl(DRM_IOCTL_I915_GEM_MMAP_OFFSET) failed with %d. errno=%d(%s) \n " , ret , err , strerror ( err ) ) ;
DEBUG_BREAK_IF ( ret ! = 0 ) ;
return false ;
}
offset = mmapOffset . offset ;
return true ;
}
2019-03-26 11:59:46 +01:00
} // namespace NEO