refactor: correct naming of enum class constants 4/n

Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
Mateusz Jablonski
2023-12-12 11:37:31 +00:00
committed by Compute-Runtime-Automation
parent 01dd503e47
commit 432142c574
86 changed files with 314 additions and 314 deletions

View File

@@ -61,7 +61,7 @@ const SipKernel &BuiltIns::getSipKernel(SipKernelType type, Device &device) {
}
sipBuiltIn.first.reset(new SipKernel(type, sipAllocation, std::move(stateSaveAreaHeader), std::move(sipBinary)));
if (rootDeviceEnvironment.executionEnvironment.getDebuggingMode() == DebuggingMode::Offline) {
if (rootDeviceEnvironment.executionEnvironment.getDebuggingMode() == DebuggingMode::offline) {
sipBuiltIn.first->parseBinaryForContextId();
}
};

View File

@@ -16,7 +16,7 @@ bool SipKernel::initSipKernel(SipKernelType type, Device &device) {
}
const SipKernel &SipKernel::getSipKernel(Device &device, OsContext *context) {
if (context && device.getExecutionEnvironment()->getDebuggingMode() == NEO::DebuggingMode::Offline) {
if (context && device.getExecutionEnvironment()->getDebuggingMode() == NEO::DebuggingMode::offline) {
return SipKernel::getBindlessDebugSipKernel(device, context);
} else {
return SipKernel::getSipKernelImpl(device);

View File

@@ -81,7 +81,7 @@ CommandContainer::ErrorCode CommandContainer::initialize(Device *device, Allocat
auto cmdBufferAllocation = this->obtainNextCommandBufferAllocation();
if (!cmdBufferAllocation) {
return ErrorCode::OUT_OF_DEVICE_MEMORY;
return ErrorCode::outOfDeviceMemory;
}
cmdBufferAllocations.push_back(cmdBufferAllocation);
@@ -108,7 +108,7 @@ CommandContainer::ErrorCode CommandContainer::initialize(Device *device, Allocat
auto cmdBufferAllocationHost = this->obtainNextCommandBufferAllocation(true);
if (!cmdBufferAllocationHost) {
return ErrorCode::OUT_OF_DEVICE_MEMORY;
return ErrorCode::outOfDeviceMemory;
}
secondaryCommandStreamForImmediateCmdList = std::make_unique<LinearStream>(cmdBufferAllocationHost->getUnderlyingBuffer(),
usableSize, this, this->selectedBbCmdSize);
@@ -135,7 +135,7 @@ CommandContainer::ErrorCode CommandContainer::initialize(Device *device, Allocat
device->getRootDeviceIndex());
if (!allocationIndirectHeaps[i]) {
return ErrorCode::OUT_OF_DEVICE_MEMORY;
return ErrorCode::outOfDeviceMemory;
}
residencyContainer.push_back(allocationIndirectHeaps[i]);
@@ -157,7 +157,7 @@ CommandContainer::ErrorCode CommandContainer::initialize(Device *device, Allocat
iddBlock = nullptr;
nextIddInBlock = this->getNumIddPerBlock();
}
return ErrorCode::SUCCESS;
return ErrorCode::success;
}
void CommandContainer::addToResidencyContainer(GraphicsAllocation *alloc) {
@@ -579,7 +579,7 @@ bool CommandContainer::skipHeapAllocationCreation(HeapType heapType) {
bool skipCreation = (globalBindlessHeapsEnabled && IndirectHeap::Type::SURFACE_STATE == heapType) ||
this->immediateCmdListSharedHeap(heapType) ||
(!hardwareInfo.capabilityTable.supportsImages && IndirectHeap::Type::DYNAMIC_STATE == heapType) ||
(this->heapAddressModel != HeapAddressModel::PrivateHeaps);
(this->heapAddressModel != HeapAddressModel::privateHeaps);
return skipCreation;
}

View File

@@ -61,8 +61,8 @@ struct HeapReserveArguments {
class CommandContainer : public NonCopyableOrMovableClass {
public:
enum class ErrorCode {
SUCCESS = 0,
OUT_OF_DEVICE_MEMORY = 1
success = 0,
outOfDeviceMemory = 1
};
static constexpr size_t defaultListCmdBufferSize = 1u * MemoryConstants ::megaByte;
@@ -251,7 +251,7 @@ class CommandContainer : public NonCopyableOrMovableClass {
uint32_t dirtyHeaps = std::numeric_limits<uint32_t>::max();
uint32_t numIddsPerBlock = 64;
HeapAddressModel heapAddressModel = HeapAddressModel::PrivateHeaps;
HeapAddressModel heapAddressModel = HeapAddressModel::privateHeaps;
uint32_t slmSize = std::numeric_limits<uint32_t>::max();
uint32_t nextIddInBlock = 0;

View File

@@ -16,9 +16,9 @@ class CommandStreamReceiver;
class CsrDependencies {
public:
enum class DependenciesType {
OnCsr,
OutOfCsr,
All
onCsr,
outOfCsr,
all
};
StackVec<TimestampPacketContainer *, 32> multiRootTimeStampSyncContainer;

View File

@@ -40,25 +40,25 @@ class Debugger {
};
enum class DebuggingMode : uint32_t {
Disabled,
Online,
Offline
disabled,
online,
offline
};
inline DebuggingMode getDebuggingMode(uint32_t programDebugging) {
switch (programDebugging) {
case 1: {
return DebuggingMode::Online;
return DebuggingMode::online;
}
case 2: {
return DebuggingMode::Offline;
return DebuggingMode::offline;
}
case 0:
default: {
return DebuggingMode::Disabled;
return DebuggingMode::disabled;
}
}
}
static_assert(std::is_standard_layout<Debugger::SbaAddresses>::value);
} // namespace NEO
} // namespace NEO

View File

@@ -24,8 +24,8 @@ bool DebuggerL0::initDebuggingInOs(NEO::OSInterface *osInterface) {
const bool vmBindAvailable = drm->isVmBindAvailable();
const bool perContextVms = drm->isPerContextVMRequired();
const bool allowDebug = (drm->getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::Offline) ||
(perContextVms && drm->getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::Online);
const bool allowDebug = (drm->getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::offline) ||
(perContextVms && drm->getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::online);
if (vmBindAvailable && allowDebug) {
drm->registerResourceClasses();
@@ -39,7 +39,7 @@ bool DebuggerL0::initDebuggingInOs(NEO::OSInterface *osInterface) {
}
void DebuggerL0::initSbaTrackingMode() {
if (device->getExecutionEnvironment()->getDebuggingMode() == DebuggingMode::Offline) {
if (device->getExecutionEnvironment()->getDebuggingMode() == DebuggingMode::offline) {
singleAddressSpaceSbaTracking = true;
} else {
singleAddressSpaceSbaTracking = false;

View File

@@ -98,7 +98,7 @@ Drm *Drm::create(std::unique_ptr<HwDeviceIdDrm> &&hwDeviceId, RootDeviceEnvironm
drm->queryPageFaultSupport();
if (rootDeviceEnvironment.executionEnvironment.isDebuggingEnabled()) {
if (drm->getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::Offline) {
if (drm->getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::offline) {
drm->setPerContextVMRequired(false);
} else {
if (drm->isVmBindAvailable()) {

View File

@@ -41,7 +41,7 @@ class ExecutionEnvironment : public ReferenceTrackedObject<ExecutionEnvironment>
debuggingEnabledMode = debuggingMode;
}
DebuggingMode getDebuggingMode() const { return debuggingEnabledMode; }
bool isDebuggingEnabled() const { return debuggingEnabledMode != DebuggingMode::Disabled; }
bool isDebuggingEnabled() const { return debuggingEnabledMode != DebuggingMode::disabled; }
void setMetricsEnabled(bool value) {
this->metricsEnabled = value;
}
@@ -82,7 +82,7 @@ class ExecutionEnvironment : public ReferenceTrackedObject<ExecutionEnvironment>
bool subDevicesAsDevices = false;
bool combinedDeviceHierarchy = false;
DebuggingMode debuggingEnabledMode = DebuggingMode::Disabled;
DebuggingMode debuggingEnabledMode = DebuggingMode::disabled;
std::unordered_map<uint32_t, uint32_t> rootDeviceNumCcsMap;
std::mutex initializeDirectSubmissionControllerMutex;
};

View File

@@ -17,26 +17,26 @@ enum class HeapAddressModel : uint32_t {
* Heaps are allocated and owned by each command list.
* Command lists will require to reload SBA command before execution.
*/
PrivateHeaps = 0,
privateHeaps = 0,
/*
* Global stateless - command list can use only stateless addressing.
* Surface state heap is allocated per context for driver's allocations only (debug and scratch)
* Command lists do not require SBA command reload. SBA is dispatched only once for heap addresses.
*/
GlobalStateless = 1,
globalStateless = 1,
/*
* Global bindless - command list can use stateless or bindless addressing.
* Surface and dynamic heaps are allocated in global, 4GB allocator for all allocations and samplers used.
* Command lists do not require SBA command reload. SBA is dispatched only once for heap addresses.
*/
GlobalBindless = 2,
globalBindless = 2,
/*
* Global bindful - command list can use any addressing.
* Surface and dynamic heaps are allocated in global, 4GB allocator for all allocations and samplers used.
* Binding table base address is programed by special heap manager that provides and reuses space for bti
* Command lists might require dynamic binding table base address command reload when binding table heap manager requires to reload base address.
*/
GlobalBindful = 3
globalBindful = 3
};
} // namespace NEO

View File

@@ -53,7 +53,7 @@ KernelHelper::ErrorCode KernelHelper::checkIfThereIsSpaceForScratchOrPrivate(Ker
auto &gfxCoreHelper = device->getRootDeviceEnvironment().getHelper<NEO::GfxCoreHelper>();
uint32_t maxScratchSize = gfxCoreHelper.getMaxScratchSize();
if ((attributes.perThreadScratchSize[0] > maxScratchSize) || (attributes.perThreadScratchSize[1] > maxScratchSize)) {
return KernelHelper::ErrorCode::INVALID_KERNEL;
return KernelHelper::ErrorCode::invalidKernel;
}
auto globalMemorySize = device->getDeviceInfo().globalMemSize;
auto computeUnitsForScratch = device->getDeviceInfo().computeUnitsUsedForScratch;
@@ -80,9 +80,9 @@ KernelHelper::ErrorCode KernelHelper::checkIfThereIsSpaceForScratchOrPrivate(Ker
totalScratchSize > globalMemorySize ||
totalPrivateScratchSize > globalMemorySize) {
return KernelHelper::ErrorCode::OUT_OF_DEVICE_MEMORY;
return KernelHelper::ErrorCode::outOfDeviceMemory;
}
return KernelHelper::ErrorCode::SUCCESS;
return KernelHelper::ErrorCode::success;
}
bool KernelHelper::isAnyArgumentPtrByValue(const KernelDescriptor &kernelDescriptor) {

View File

@@ -17,9 +17,9 @@ class Device;
struct KernelHelper {
enum class ErrorCode {
SUCCESS = 0,
OUT_OF_DEVICE_MEMORY = 1,
INVALID_KERNEL = 2
success = 0,
outOfDeviceMemory = 1,
invalidKernel = 2
};
static uint32_t getMaxWorkGroupCount(uint32_t simd, uint32_t availableThreadCount, uint32_t dssCount, uint32_t availableSlmSize,
uint32_t usedSlmSize, uint32_t maxBarrierCount, uint32_t numberOfBarriers, uint32_t workDim,

View File

@@ -117,7 +117,7 @@ struct AllocationData {
static_assert(sizeof(AllocationData::flags) == sizeof(AllocationData::allFlags), "");
AllocationType type = AllocationType::unknown;
GraphicsAllocation::UsmInitialPlacement usmInitialPlacement = GraphicsAllocation::UsmInitialPlacement::DEFAULT;
GfxMemoryAllocationMethod allocationMethod = GfxMemoryAllocationMethod::NotDefined;
GfxMemoryAllocationMethod allocationMethod = GfxMemoryAllocationMethod::notDefined;
const void *hostPtr = nullptr;
uint64_t gpuAddress = 0;
size_t size = 0;

View File

@@ -62,8 +62,8 @@ enum class AllocationType {
};
enum class GfxMemoryAllocationMethod : uint32_t {
UseUmdSystemPtr,
AllocateByKmd,
NotDefined = std::numeric_limits<uint32_t>::max()
useUmdSystemPtr,
allocateByKmd,
notDefined = std::numeric_limits<uint32_t>::max()
};
} // namespace NEO

View File

@@ -24,19 +24,19 @@ struct ResidencyData;
constexpr int maxFragmentsCount = 3;
enum class FragmentPosition {
NONE = 0,
LEADING,
MIDDLE,
TRAILING
none = 0,
leading,
middle,
trailing
};
enum class RequirementsStatus {
SUCCESS = 0,
FATAL
success = 0,
fatal
};
struct PartialAllocation {
FragmentPosition fragmentPosition = FragmentPosition::NONE;
FragmentPosition fragmentPosition = FragmentPosition::none;
const void *allocationPtr = nullptr;
size_t allocationSize = 0u;
};

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2018-2022 Intel Corporation
* Copyright (C) 2018-2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@@ -53,7 +53,7 @@ AllocationRequirements HostPtrManager::getAllocationRequirements(uint32_t rootDe
if (alignedStartAddress != inputPtr) {
leadingNeeded = true;
requiredAllocations.allocationFragments[allocationCount].allocationPtr = alignedStartAddress;
requiredAllocations.allocationFragments[allocationCount].fragmentPosition = FragmentPosition::LEADING;
requiredAllocations.allocationFragments[allocationCount].fragmentPosition = FragmentPosition::leading;
requiredAllocations.allocationFragments[allocationCount].allocationSize = MemoryConstants::pageSize;
allocationCount++;
}
@@ -69,14 +69,14 @@ AllocationRequirements HostPtrManager::getAllocationRequirements(uint32_t rootDe
auto middleSize = wholeAllocationSize - (trailingNeeded + leadingNeeded) * MemoryConstants::pageSize;
if (middleSize) {
requiredAllocations.allocationFragments[allocationCount].allocationPtr = alignUp(inputPtr, MemoryConstants::pageSize);
requiredAllocations.allocationFragments[allocationCount].fragmentPosition = FragmentPosition::MIDDLE;
requiredAllocations.allocationFragments[allocationCount].fragmentPosition = FragmentPosition::middle;
requiredAllocations.allocationFragments[allocationCount].allocationSize = middleSize;
allocationCount++;
}
if (trailingNeeded) {
requiredAllocations.allocationFragments[allocationCount].allocationPtr = alignedEndAddress;
requiredAllocations.allocationFragments[allocationCount].fragmentPosition = FragmentPosition::TRAILING;
requiredAllocations.allocationFragments[allocationCount].fragmentPosition = FragmentPosition::trailing;
requiredAllocations.allocationFragments[allocationCount].allocationSize = MemoryConstants::pageSize;
allocationCount++;
}
@@ -249,7 +249,7 @@ FragmentStorage *HostPtrManager::getFragmentAndCheckForOverlaps(uint32_t rootDev
OsHandleStorage HostPtrManager::prepareOsStorageForAllocation(MemoryManager &memoryManager, size_t size, const void *ptr, uint32_t rootDeviceIndex) {
std::lock_guard<decltype(allocationsMutex)> lock(allocationsMutex);
auto requirements = HostPtrManager::getAllocationRequirements(rootDeviceIndex, ptr, size);
UNRECOVERABLE_IF(checkAllocationsForOverlapping(memoryManager, &requirements) == RequirementsStatus::FATAL);
UNRECOVERABLE_IF(checkAllocationsForOverlapping(memoryManager, &requirements) == RequirementsStatus::fatal);
auto osStorage = populateAlreadyAllocatedFragments(requirements);
if (osStorage.fragmentCount > 0) {
if (memoryManager.populateOsHandles(osStorage, rootDeviceIndex) != MemoryManager::AllocationStatus::Success) {
@@ -263,7 +263,7 @@ OsHandleStorage HostPtrManager::prepareOsStorageForAllocation(MemoryManager &mem
RequirementsStatus HostPtrManager::checkAllocationsForOverlapping(MemoryManager &memoryManager, AllocationRequirements *requirements) {
UNRECOVERABLE_IF(requirements == nullptr);
RequirementsStatus status = RequirementsStatus::SUCCESS;
RequirementsStatus status = RequirementsStatus::success;
for (unsigned int i = 0; i < requirements->requiredFragmentsCount; i++) {
OverlapStatus overlapStatus = OverlapStatus::FRAGMENT_NOT_CHECKED;
@@ -286,7 +286,7 @@ RequirementsStatus HostPtrManager::checkAllocationsForOverlapping(MemoryManager
getFragmentAndCheckForOverlaps(requirements->rootDeviceIndex, requirements->allocationFragments[i].allocationPtr,
requirements->allocationFragments[i].allocationSize, overlapStatus);
if (overlapStatus == OverlapStatus::FRAGMENT_OVERLAPING_AND_BIGGER_THEN_STORED_FRAGMENT) {
status = RequirementsStatus::FATAL;
status = RequirementsStatus::fatal;
break;
}
}

View File

@@ -536,7 +536,7 @@ bool MemoryManager::getAllocationData(AllocationData &allocationData, const Allo
}
GfxMemoryAllocationMethod MemoryManager::getPreferredAllocationMethod(const AllocationProperties &allocationProperties) const {
return GfxMemoryAllocationMethod::NotDefined;
return GfxMemoryAllocationMethod::notDefined;
}
GraphicsAllocation *MemoryManager::allocatePhysicalGraphicsMemory(const AllocationProperties &properties) {

View File

@@ -99,7 +99,7 @@ int IoctlHelper::createDrmContext(Drm &drm, OsContextLinux &osContext, uint32_t
const auto numberOfCCS = drm.getRootDeviceEnvironment().getHardwareInfo()->gtSystemInfo.CCSInfo.NumberOfCCSEnabled;
const bool debuggableContext = drm.isContextDebugSupported() && drm.getRootDeviceEnvironment().executionEnvironment.isDebuggingEnabled() && !osContext.isInternalEngine();
const bool debuggableContextCooperative = drm.getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::Offline ? false : (debuggableContext && numberOfCCS > 0);
const bool debuggableContextCooperative = drm.getRootDeviceEnvironment().executionEnvironment.getDebuggingMode() == DebuggingMode::offline ? false : (debuggableContext && numberOfCCS > 0);
auto drmContextId = drm.createDrmContext(drmVmId, drm.isVmBindAvailable(), osContext.isCooperativeEngine() || debuggableContextCooperative);
if (drmContextId < 0) {
return drmContextId;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2021 Intel Corporation
* Copyright (C) 2021-2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@@ -9,10 +9,10 @@
namespace NEO {
const GfxMemoryAllocationMethod preferredAllocationMethod = GfxMemoryAllocationMethod::AllocateByKmd;
const GfxMemoryAllocationMethod preferredAllocationMethod = GfxMemoryAllocationMethod::allocateByKmd;
size_t WddmMemoryManager::getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod allocationMethod) const {
if (GfxMemoryAllocationMethod::AllocateByKmd == allocationMethod) {
if (GfxMemoryAllocationMethod::allocateByKmd == allocationMethod) {
return 4 * MemoryConstants::gigaByte - MemoryConstants::pageSize64k;
} else {
return 31 * MemoryConstants::megaByte;

View File

@@ -9,7 +9,7 @@
namespace NEO {
const GfxMemoryAllocationMethod preferredAllocationMethod = GfxMemoryAllocationMethod::UseUmdSystemPtr;
const GfxMemoryAllocationMethod preferredAllocationMethod = GfxMemoryAllocationMethod::useUmdSystemPtr;
size_t WddmMemoryManager::getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod allocationMethod) const {
return 4 * MemoryConstants::gigaByte - MemoryConstants::pageSize64k;

View File

@@ -134,7 +134,7 @@ GraphicsAllocation *WddmMemoryManager::allocatePhysicalDeviceMemory(const Alloca
}
GraphicsAllocation *WddmMemoryManager::allocateMemoryByKMD(const AllocationData &allocationData) {
if (allocationData.size > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::AllocateByKmd)) {
if (allocationData.size > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::allocateByKmd)) {
return allocateHugeGraphicsMemory(allocationData, false);
}
@@ -191,13 +191,13 @@ GraphicsAllocation *WddmMemoryManager::allocateGraphicsMemoryUsingKmdAndMapItToC
}
size_t sizeAligned = alignUp(allocationData.size, allowLargePages ? MemoryConstants::pageSize64k : MemoryConstants::pageSize);
if (sizeAligned > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::AllocateByKmd)) {
if (sizeAligned > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::allocateByKmd)) {
const bool isBufferHostMemory = allocationData.type == NEO::AllocationType::bufferHostMemory;
return allocateHugeGraphicsMemory(allocationData, isBufferHostMemory);
}
// algin gpu address of device part of usm shared allocation to 64kb for WSL2
auto alignGpuAddressTo64KB = allocationData.allocationMethod == GfxMemoryAllocationMethod::AllocateByKmd && allocationData.makeGPUVaDifferentThanCPUPtr;
auto alignGpuAddressTo64KB = allocationData.allocationMethod == GfxMemoryAllocationMethod::allocateByKmd && allocationData.makeGPUVaDifferentThanCPUPtr;
if (alignGpuAddressTo64KB) {
sizeAligned = sizeAligned + allocationData.alignment;
@@ -287,7 +287,7 @@ GraphicsAllocation *WddmMemoryManager::allocateHugeGraphicsMemory(const Allocati
}
}
auto chunkSize = getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::UseUmdSystemPtr);
auto chunkSize = getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::useUmdSystemPtr);
auto numGmms = (alignedSize + chunkSize - 1) / chunkSize;
auto gmmHelper = getGmmHelper(allocationData.rootDeviceIndex);
auto canonizedAddress = gmmHelper->canonize(castToUint64(const_cast<void *>(hostPtr)));
@@ -357,7 +357,7 @@ GraphicsAllocation *WddmMemoryManager::allocateUSMHostGraphicsMemory(const Alloc
GraphicsAllocation *WddmMemoryManager::allocateGraphicsMemoryWithAlignment(const AllocationData &allocationData) {
auto pageSize = NEO::OSInterface::osEnabled64kbPages ? MemoryConstants::pageSize64k : MemoryConstants::pageSize;
bool requiresNonStandardAlignment = allocationData.alignment > pageSize;
if ((allocationData.allocationMethod == GfxMemoryAllocationMethod::UseUmdSystemPtr) || (requiresNonStandardAlignment && allocationData.forceKMDAllocation == false)) {
if ((allocationData.allocationMethod == GfxMemoryAllocationMethod::useUmdSystemPtr) || (requiresNonStandardAlignment && allocationData.forceKMDAllocation == false)) {
return allocateSystemMemoryAndCreateGraphicsAllocationFromIt(allocationData);
} else {
return allocateGraphicsMemoryUsingKmdAndMapItToCpuVA(allocationData, NEO::OSInterface::osEnabled64kbPages);
@@ -367,7 +367,7 @@ GraphicsAllocation *WddmMemoryManager::allocateGraphicsMemoryWithAlignment(const
GraphicsAllocation *WddmMemoryManager::allocateSystemMemoryAndCreateGraphicsAllocationFromIt(const AllocationData &allocationData) {
size_t newAlignment = allocationData.alignment ? alignUp(allocationData.alignment, MemoryConstants::pageSize) : MemoryConstants::pageSize;
size_t sizeAligned = allocationData.size ? alignUp(allocationData.size, MemoryConstants::pageSize) : MemoryConstants::pageSize;
if (sizeAligned > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::UseUmdSystemPtr)) {
if (sizeAligned > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::useUmdSystemPtr)) {
return allocateHugeGraphicsMemory(allocationData, true);
}
void *pSysMem = allocateSystemMemory(sizeAligned, newAlignment);
@@ -425,7 +425,7 @@ GraphicsAllocation *WddmMemoryManager::allocateSystemMemoryAndCreateGraphicsAllo
GraphicsAllocation *WddmMemoryManager::allocateGraphicsMemoryForNonSvmHostPtr(const AllocationData &allocationData) {
auto alignedSize = alignSizeWholePage(allocationData.hostPtr, allocationData.size);
if (alignedSize > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::UseUmdSystemPtr)) {
if (alignedSize > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::useUmdSystemPtr)) {
return allocateHugeGraphicsMemory(allocationData, false);
}
@@ -461,7 +461,7 @@ GraphicsAllocation *WddmMemoryManager::allocateGraphicsMemoryForNonSvmHostPtr(co
}
GraphicsAllocation *WddmMemoryManager::allocateGraphicsMemoryWithHostPtr(const AllocationData &allocationData) {
if (allocationData.size > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::UseUmdSystemPtr)) {
if (allocationData.size > getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::useUmdSystemPtr)) {
return allocateHugeGraphicsMemory(allocationData, false);
}
@@ -518,7 +518,7 @@ GraphicsAllocation *WddmMemoryManager::allocate32BitGraphicsMemoryImpl(const All
ptrAligned = alignDown(allocationData.hostPtr, MemoryConstants::allocationAlignment);
sizeAligned = alignSizeWholePage(allocationData.hostPtr, sizeAligned);
offset = ptrDiff(allocationData.hostPtr, ptrAligned);
} else if (allocationData.allocationMethod == GfxMemoryAllocationMethod::UseUmdSystemPtr) {
} else if (allocationData.allocationMethod == GfxMemoryAllocationMethod::useUmdSystemPtr) {
sizeAligned = alignUp(sizeAligned, MemoryConstants::allocationAlignment);
pSysMem = allocateSystemMemory(sizeAligned, MemoryConstants::allocationAlignment);
if (pSysMem == nullptr) {
@@ -547,7 +547,7 @@ GraphicsAllocation *WddmMemoryManager::allocate32BitGraphicsMemoryImpl(const All
auto hwInfo = executionEnvironment.rootDeviceEnvironments[allocationData.rootDeviceIndex]->getHardwareInfo();
StorageInfo storageInfo{};
storageInfo.isLockable = allocationData.allocationMethod != GfxMemoryAllocationMethod::UseUmdSystemPtr;
storageInfo.isLockable = allocationData.allocationMethod != GfxMemoryAllocationMethod::useUmdSystemPtr;
GmmRequirements gmmRequirements{};
gmmRequirements.allowLargePages = true;
@@ -1318,7 +1318,7 @@ GraphicsAllocation *WddmMemoryManager::allocatePhysicalLocalDeviceMemory(const A
gmmRequirements);
}
const auto chunkSize = alignDown(getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::AllocateByKmd), alignment);
const auto chunkSize = alignDown(getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::allocateByKmd), alignment);
const size_t numGmms = (static_cast<uint64_t>(sizeAligned) + chunkSize - 1) / chunkSize;
auto wddmAllocation = std::make_unique<WddmAllocation>(allocationData.rootDeviceIndex, singleBankAllocation ? numGmms : numBanks,
@@ -1404,7 +1404,7 @@ GraphicsAllocation *WddmMemoryManager::allocateGraphicsMemoryInDevicePool(const
}
}
const auto chunkSize = alignDown(getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::AllocateByKmd), alignment);
const auto chunkSize = alignDown(getHugeGfxMemoryChunkSize(GfxMemoryAllocationMethod::allocateByKmd), alignment);
const size_t numGmms = (static_cast<uint64_t>(sizeAligned) + chunkSize - 1) / chunkSize;
auto wddmAllocation = std::make_unique<WddmAllocation>(allocationData.rootDeviceIndex, singleBankAllocation ? numGmms : numBanks,

View File

@@ -22,7 +22,7 @@ std::optional<GfxMemoryAllocationMethod> ReleaseHelperHw<release>::getPreferredA
case AllocationType::timestampPacketTagBuffer:
return {};
default:
return GfxMemoryAllocationMethod::AllocateByKmd;
return GfxMemoryAllocationMethod::allocateByKmd;
}
}