mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-09 22:43:00 +08:00
refactor: correct naming of enum class constants 9/n
Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
c612a86d28
commit
a6458433dc
@@ -51,7 +51,7 @@ struct BuiltinOpParams {
|
||||
uint32_t dstMipLevel = 0;
|
||||
void *userPtrForPostOperationCpuCopy = nullptr;
|
||||
bool bcsSplit = false;
|
||||
TransferDirection direction = TransferDirection::LocalToLocal;
|
||||
TransferDirection direction = TransferDirection::localToLocal;
|
||||
};
|
||||
|
||||
class BuiltinDispatchInfoBuilder {
|
||||
|
||||
@@ -272,7 +272,7 @@ CommandStreamReceiver &CommandQueue::selectCsrForBuiltinOperation(const CsrSelec
|
||||
bool preferBcs = true;
|
||||
aub_stream::EngineType preferredBcsEngineType = aub_stream::EngineType::NUM_ENGINES;
|
||||
switch (args.direction) {
|
||||
case TransferDirection::LocalToLocal: {
|
||||
case TransferDirection::localToLocal: {
|
||||
const auto &clGfxCoreHelper = device->getRootDeviceEnvironment().getHelper<ClGfxCoreHelper>();
|
||||
preferBcs = clGfxCoreHelper.preferBlitterForLocalToLocalTransfers();
|
||||
if (auto flag = debugManager.flags.PreferCopyEngineForCopyBufferToBuffer.get(); flag != -1) {
|
||||
@@ -283,9 +283,9 @@ CommandStreamReceiver &CommandQueue::selectCsrForBuiltinOperation(const CsrSelec
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TransferDirection::HostToHost:
|
||||
case TransferDirection::HostToLocal:
|
||||
case TransferDirection::LocalToHost: {
|
||||
case TransferDirection::hostToHost:
|
||||
case TransferDirection::hostToLocal:
|
||||
case TransferDirection::localToHost: {
|
||||
preferBcs = true;
|
||||
|
||||
auto preferredBCSType = true;
|
||||
|
||||
@@ -47,9 +47,9 @@ struct MultiDispatchInfo;
|
||||
struct TimestampPacketDependencies;
|
||||
|
||||
enum class QueuePriority {
|
||||
LOW,
|
||||
MEDIUM,
|
||||
HIGH
|
||||
low,
|
||||
medium,
|
||||
high
|
||||
};
|
||||
|
||||
template <>
|
||||
@@ -443,7 +443,7 @@ class CommandQueue : public BaseObject<_cl_command_queue> {
|
||||
cl_uint queueIndexWithinFamily = 0;
|
||||
bool queueFamilySelected = false;
|
||||
|
||||
QueuePriority priority = QueuePriority::MEDIUM;
|
||||
QueuePriority priority = QueuePriority::medium;
|
||||
QueueThrottle throttle = QueueThrottle::MEDIUM;
|
||||
EnqueueProperties::Operation latestSentEnqueueType = EnqueueProperties::Operation::none;
|
||||
uint64_t sliceCount = QueueSliceCount::defaultSliceCount;
|
||||
|
||||
@@ -46,12 +46,12 @@ class CommandQueueHw : public CommandQueue {
|
||||
auto clPriority = getCmdQueueProperties<cl_queue_priority_khr>(properties, CL_QUEUE_PRIORITY_KHR);
|
||||
|
||||
if (clPriority & static_cast<cl_queue_priority_khr>(CL_QUEUE_PRIORITY_LOW_KHR)) {
|
||||
priority = QueuePriority::LOW;
|
||||
priority = QueuePriority::low;
|
||||
this->gpgpuEngine = &device->getNearestGenericSubDevice(0)->getEngine(getChosenEngineType(device->getHardwareInfo()), EngineUsage::lowPriority);
|
||||
} else if (clPriority & static_cast<cl_queue_priority_khr>(CL_QUEUE_PRIORITY_MED_KHR)) {
|
||||
priority = QueuePriority::MEDIUM;
|
||||
priority = QueuePriority::medium;
|
||||
} else if (clPriority & static_cast<cl_queue_priority_khr>(CL_QUEUE_PRIORITY_HIGH_KHR)) {
|
||||
priority = QueuePriority::HIGH;
|
||||
priority = QueuePriority::high;
|
||||
}
|
||||
|
||||
auto clThrottle = getCmdQueueProperties<cl_queue_throttle_khr>(properties, CL_QUEUE_THROTTLE_KHR);
|
||||
|
||||
@@ -33,7 +33,7 @@ struct CsrSelectionArgs {
|
||||
CsrSelectionArgs(cl_command_type cmdType, const size_t *size)
|
||||
: cmdType(cmdType),
|
||||
size(size),
|
||||
direction(TransferDirection::HostToHost) {}
|
||||
direction(TransferDirection::hostToHost) {}
|
||||
|
||||
template <typename ResourceType>
|
||||
CsrSelectionArgs(cl_command_type cmdType, ResourceType *src, ResourceType *dst, uint32_t rootDeviceIndex, const size_t *size)
|
||||
|
||||
@@ -890,7 +890,7 @@ CompletionStamp CommandQueueHw<GfxFamily>::enqueueNonBlocked(
|
||||
multiDispatchInfo.usesSlm(), // useSLM
|
||||
!getGpgpuCommandStreamReceiver().isUpdateTagFromWaitEnabled() || commandType == CL_COMMAND_FILL_BUFFER, // guardCommandBufferWithPipeControl
|
||||
commandType == CL_COMMAND_NDRANGE_KERNEL, // GSBA32BitRequired
|
||||
(QueuePriority::LOW == priority), // lowPriority
|
||||
(QueuePriority::low == priority), // lowPriority
|
||||
implicitFlush, // implicitFlush
|
||||
!eventBuilder.getEvent() || getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), // outOfOrderExecutionAllowed
|
||||
false, // epilogueRequired
|
||||
@@ -1236,7 +1236,7 @@ bool CommandQueueHw<GfxFamily>::isSplitEnqueueBlitNeeded(TransferDirection trans
|
||||
auto bcsSplit = getDevice().isBcsSplitSupported() &&
|
||||
csr.getOsContext().getEngineType() == aub_stream::EngineType::ENGINE_BCS &&
|
||||
transferSize >= minimalSizeForBcsSplit &&
|
||||
transferDirection != TransferDirection::LocalToLocal;
|
||||
transferDirection != TransferDirection::localToLocal;
|
||||
|
||||
if (bcsSplit) {
|
||||
this->constructBcsEnginesForSplit();
|
||||
@@ -1263,9 +1263,9 @@ cl_int CommandQueueHw<GfxFamily>::enqueueBlitSplit(MultiDispatchInfo &dispatchIn
|
||||
StackVec<CommandStreamReceiver *, 2u> copyEngines;
|
||||
|
||||
auto splitEngines = this->splitEngines;
|
||||
if (dispatchInfo.peekBuiltinOpParams().direction == NEO::TransferDirection::HostToLocal) {
|
||||
if (dispatchInfo.peekBuiltinOpParams().direction == NEO::TransferDirection::hostToLocal) {
|
||||
splitEngines = this->h2dEngines;
|
||||
} else if (dispatchInfo.peekBuiltinOpParams().direction == NEO::TransferDirection::LocalToHost) {
|
||||
} else if (dispatchInfo.peekBuiltinOpParams().direction == NEO::TransferDirection::localToHost) {
|
||||
splitEngines = this->d2hEngines;
|
||||
}
|
||||
|
||||
|
||||
@@ -64,22 +64,22 @@ void GpgpuWalkerHelper<GfxFamily>::addAluReadModifyWriteRegister(
|
||||
|
||||
// Setup first operand of MI_MATH - load CS_GPR_R0 into register A
|
||||
cmdAluParam.DW0.BitField.ALUOpcode =
|
||||
static_cast<uint32_t>(AluRegisters::OPCODE_LOAD);
|
||||
static_cast<uint32_t>(AluRegisters::opcodeLoad);
|
||||
cmdAluParam.DW0.BitField.Operand1 =
|
||||
static_cast<uint32_t>(AluRegisters::R_SRCA);
|
||||
static_cast<uint32_t>(AluRegisters::srca);
|
||||
cmdAluParam.DW0.BitField.Operand2 =
|
||||
static_cast<uint32_t>(AluRegisters::R_0);
|
||||
static_cast<uint32_t>(AluRegisters::gpr0);
|
||||
*pAluParam = cmdAluParam;
|
||||
pAluParam++;
|
||||
|
||||
cmdAluParam.DW0.Value = 0x0;
|
||||
// Setup second operand of MI_MATH - load CS_GPR_R1 into register B
|
||||
cmdAluParam.DW0.BitField.ALUOpcode =
|
||||
static_cast<uint32_t>(AluRegisters::OPCODE_LOAD);
|
||||
static_cast<uint32_t>(AluRegisters::opcodeLoad);
|
||||
cmdAluParam.DW0.BitField.Operand1 =
|
||||
static_cast<uint32_t>(AluRegisters::R_SRCB);
|
||||
static_cast<uint32_t>(AluRegisters::srcb);
|
||||
cmdAluParam.DW0.BitField.Operand2 =
|
||||
static_cast<uint32_t>(AluRegisters::R_1);
|
||||
static_cast<uint32_t>(AluRegisters::gpr1);
|
||||
*pAluParam = cmdAluParam;
|
||||
pAluParam++;
|
||||
|
||||
@@ -94,11 +94,11 @@ void GpgpuWalkerHelper<GfxFamily>::addAluReadModifyWriteRegister(
|
||||
cmdAluParam.DW0.Value = 0x0;
|
||||
// Setup fourth operand of MI_MATH - store result into CS_GPR_R0
|
||||
cmdAluParam.DW0.BitField.ALUOpcode =
|
||||
static_cast<uint32_t>(AluRegisters::OPCODE_STORE);
|
||||
static_cast<uint32_t>(AluRegisters::opcodeStore);
|
||||
cmdAluParam.DW0.BitField.Operand1 =
|
||||
static_cast<uint32_t>(AluRegisters::R_0);
|
||||
static_cast<uint32_t>(AluRegisters::gpr0);
|
||||
cmdAluParam.DW0.BitField.Operand2 =
|
||||
static_cast<uint32_t>(AluRegisters::R_ACCU);
|
||||
static_cast<uint32_t>(AluRegisters::accu);
|
||||
*pAluParam = cmdAluParam;
|
||||
|
||||
// LOAD value of CS_GPR_R0 into "Register"
|
||||
|
||||
@@ -20,7 +20,7 @@ void GpgpuWalkerHelper<Family>::applyWADisableLSQCROPERFforOCL(NEO::LinearStream
|
||||
if (disablePerfMode) {
|
||||
if (kernel.getKernelInfo().kernelDescriptor.kernelAttributes.flags.usesFencesForReadWriteImages) {
|
||||
// Set bit L3SQC_BIT_LQSC_RO_PERF_DIS in L3SQC_REG4
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::OPCODE_OR, RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::opcodeOr, RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
}
|
||||
} else {
|
||||
if (kernel.getKernelInfo().kernelDescriptor.kernelAttributes.flags.usesFencesForReadWriteImages) {
|
||||
@@ -31,7 +31,7 @@ void GpgpuWalkerHelper<Family>::applyWADisableLSQCROPERFforOCL(NEO::LinearStream
|
||||
pipeControl.setCommandStreamerStallEnable(true);
|
||||
*pipeControlSpace = pipeControl;
|
||||
// Clear bit L3SQC_BIT_LQSC_RO_PERF_DIS in L3SQC_REG4
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::OPCODE_AND, ~RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::opcodeAnd, ~RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ void GpgpuWalkerHelper<Family>::applyWADisableLSQCROPERFforOCL(NEO::LinearStream
|
||||
if (disablePerfMode) {
|
||||
if (kernel.getKernelInfo().kernelDescriptor.kernelAttributes.flags.usesFencesForReadWriteImages) {
|
||||
// Set bit L3SQC_BIT_LQSC_RO_PERF_DIS in L3SQC_REG4
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::OPCODE_OR, RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::opcodeOr, RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
}
|
||||
} else {
|
||||
if (kernel.getKernelInfo().kernelDescriptor.kernelAttributes.flags.usesFencesForReadWriteImages) {
|
||||
@@ -31,7 +31,7 @@ void GpgpuWalkerHelper<Family>::applyWADisableLSQCROPERFforOCL(NEO::LinearStream
|
||||
*pipeControlSpace = pipeControl;
|
||||
|
||||
// Clear bit L3SQC_BIT_LQSC_RO_PERF_DIS in L3SQC_REG4
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::OPCODE_AND, ~RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
GpgpuWalkerHelper<Family>::addAluReadModifyWriteRegister(pCommandStream, RegisterOffsets::l3sqcReg4, AluRegisters::opcodeAnd, ~RegisterConstants::l3SqcBitLqscR0PerfDis);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ CompletionStamp &CommandMapUnmap::submit(TaskCountType taskLevel, bool terminate
|
||||
false, // useSLM
|
||||
!commandQueue.getGpgpuCommandStreamReceiver().isUpdateTagFromWaitEnabled(), // guardCommandBufferWithPipeControl
|
||||
false, // GSBA32BitRequired
|
||||
commandQueue.getPriority() == QueuePriority::LOW, // lowPriority
|
||||
commandQueue.getPriority() == QueuePriority::low, // lowPriority
|
||||
false, // implicitFlush
|
||||
commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), // outOfOrderExecutionAllowed
|
||||
false, // epilogueRequired
|
||||
@@ -206,7 +206,7 @@ CompletionStamp &CommandComputeKernel::submit(TaskCountType taskLevel, bool term
|
||||
slmUsed, // useSLM
|
||||
!commandQueue.getGpgpuCommandStreamReceiver().isUpdateTagFromWaitEnabled(), // guardCommandBufferWithPipeControl
|
||||
commandType == CL_COMMAND_NDRANGE_KERNEL, // GSBA32BitRequired
|
||||
commandQueue.getPriority() == QueuePriority::LOW, // lowPriority
|
||||
commandQueue.getPriority() == QueuePriority::low, // lowPriority
|
||||
false, // implicitFlush
|
||||
commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), // outOfOrderExecutionAllowed
|
||||
false, // epilogueRequired
|
||||
@@ -382,7 +382,7 @@ CompletionStamp &CommandWithoutKernel::submit(TaskCountType taskLevel, bool term
|
||||
false, // useSLM
|
||||
!commandStreamReceiver.isUpdateTagFromWaitEnabled(), // guardCommandBufferWithPipeControl
|
||||
false, // GSBA32BitRequired
|
||||
commandQueue.getPriority() == QueuePriority::LOW, // lowPriority
|
||||
commandQueue.getPriority() == QueuePriority::low, // lowPriority
|
||||
false, // implicitFlush
|
||||
commandStreamReceiver.isNTo1SubmissionModelEnabled(), // outOfOrderExecutionAllowed
|
||||
false, // epilogueRequired
|
||||
|
||||
@@ -1199,7 +1199,7 @@ void Kernel::performKernelTuning(CommandStreamReceiver &commandStreamReceiver, c
|
||||
KernelSubmissionData submissionData;
|
||||
submissionData.kernelStandardTimestamps = std::make_unique<TimestampPacketContainer>();
|
||||
submissionData.kernelSubdeviceTimestamps = std::make_unique<TimestampPacketContainer>();
|
||||
submissionData.status = TunningStatus::STANDARD_TUNNING_IN_PROGRESS;
|
||||
submissionData.status = TunningStatus::standardTunningInProgress;
|
||||
submissionData.kernelStandardTimestamps->assignAndIncrementNodesRefCounts(*timestampContainer);
|
||||
this->kernelSubmissionMap[config] = std::move(submissionData);
|
||||
this->singleSubdevicePreferredInCurrentEnqueue = false;
|
||||
@@ -1208,13 +1208,13 @@ void Kernel::performKernelTuning(CommandStreamReceiver &commandStreamReceiver, c
|
||||
|
||||
auto &submissionData = submissionDataIt->second;
|
||||
|
||||
if (submissionData.status == TunningStatus::TUNNING_DONE) {
|
||||
if (submissionData.status == TunningStatus::tunningDone) {
|
||||
this->singleSubdevicePreferredInCurrentEnqueue = submissionData.singleSubdevicePreferred;
|
||||
}
|
||||
|
||||
if (submissionData.status == TunningStatus::SUBDEVICE_TUNNING_IN_PROGRESS) {
|
||||
if (submissionData.status == TunningStatus::subdeviceTunningInProgress) {
|
||||
if (this->hasTunningFinished(submissionData)) {
|
||||
submissionData.status = TunningStatus::TUNNING_DONE;
|
||||
submissionData.status = TunningStatus::tunningDone;
|
||||
submissionData.kernelStandardTimestamps.reset();
|
||||
submissionData.kernelSubdeviceTimestamps.reset();
|
||||
this->singleSubdevicePreferredInCurrentEnqueue = submissionData.singleSubdevicePreferred;
|
||||
@@ -1223,8 +1223,8 @@ void Kernel::performKernelTuning(CommandStreamReceiver &commandStreamReceiver, c
|
||||
}
|
||||
}
|
||||
|
||||
if (submissionData.status == TunningStatus::STANDARD_TUNNING_IN_PROGRESS) {
|
||||
submissionData.status = TunningStatus::SUBDEVICE_TUNNING_IN_PROGRESS;
|
||||
if (submissionData.status == TunningStatus::standardTunningInProgress) {
|
||||
submissionData.status = TunningStatus::subdeviceTunningInProgress;
|
||||
submissionData.kernelSubdeviceTimestamps->assignAndIncrementNodesRefCounts(*timestampContainer);
|
||||
this->singleSubdevicePreferredInCurrentEnqueue = true;
|
||||
}
|
||||
|
||||
@@ -75,9 +75,9 @@ class Kernel : public ReferenceTrackedObject<Kernel> {
|
||||
};
|
||||
|
||||
enum class TunningStatus {
|
||||
STANDARD_TUNNING_IN_PROGRESS,
|
||||
SUBDEVICE_TUNNING_IN_PROGRESS,
|
||||
TUNNING_DONE
|
||||
standardTunningInProgress,
|
||||
subdeviceTunningInProgress,
|
||||
tunningDone
|
||||
};
|
||||
|
||||
enum class TunningType {
|
||||
|
||||
@@ -219,13 +219,13 @@ bool inline copyHostPointer(Buffer *buffer,
|
||||
memory->setTbxWritable(true, GraphicsAllocation::defaultBank);
|
||||
return true;
|
||||
} else {
|
||||
auto blitMemoryToAllocationResult = BlitOperationResult::Unsupported;
|
||||
auto blitMemoryToAllocationResult = BlitOperationResult::unsupported;
|
||||
|
||||
if (productHelper.isBlitterFullySupported(hwInfo) && isLocalMemory) {
|
||||
blitMemoryToAllocationResult = BlitHelperFunctions::blitMemoryToAllocation(device, memory, buffer->getOffset(), hostPtr, {size, 1, 1});
|
||||
}
|
||||
|
||||
if (blitMemoryToAllocationResult != BlitOperationResult::Success) {
|
||||
if (blitMemoryToAllocationResult != BlitOperationResult::success) {
|
||||
auto context = buffer->getContext();
|
||||
auto cmdQ = context->getSpecialQueue(rootDeviceIndex);
|
||||
auto mapAllocation = buffer->getMapAllocation(rootDeviceIndex);
|
||||
|
||||
@@ -126,7 +126,7 @@ cl_int Program::linkBinary(Device *pDevice, const void *constantsInitData, size_
|
||||
}
|
||||
|
||||
Linker::UnresolvedExternals unresolvedExternalsInfo;
|
||||
bool linkSuccess = LinkingStatus::LinkedFully == linker.link(globals, constants, exportedFunctions, strings,
|
||||
bool linkSuccess = LinkingStatus::linkedFully == linker.link(globals, constants, exportedFunctions, strings,
|
||||
globalsForPatching, constantsForPatching,
|
||||
isaSegmentsForPatching, unresolvedExternalsInfo,
|
||||
pDevice, constantsInitData, constantsInitDataSize,
|
||||
|
||||
@@ -262,7 +262,7 @@ cl_int Program::setProgramSpecializationConstant(cl_uint specId, size_t specSize
|
||||
SpecConstantInfo specConstInfo;
|
||||
auto retVal = pCompilerInterface->getSpecConstantsInfo(device, ArrayRef<const char>(irBinary.get(), irBinarySize), specConstInfo);
|
||||
|
||||
if (retVal != TranslationOutput::ErrorCode::Success) {
|
||||
if (retVal != TranslationOutput::ErrorCode::success) {
|
||||
return CL_INVALID_VALUE;
|
||||
}
|
||||
|
||||
|
||||
@@ -50,15 +50,15 @@ constexpr cl_int asClError(TranslationOutput::ErrorCode err) {
|
||||
switch (err) {
|
||||
default:
|
||||
return CL_OUT_OF_HOST_MEMORY;
|
||||
case TranslationOutput::ErrorCode::Success:
|
||||
case TranslationOutput::ErrorCode::success:
|
||||
return CL_SUCCESS;
|
||||
case TranslationOutput::ErrorCode::CompilerNotAvailable:
|
||||
case TranslationOutput::ErrorCode::compilerNotAvailable:
|
||||
return CL_COMPILER_NOT_AVAILABLE;
|
||||
case TranslationOutput::ErrorCode::CompilationFailure:
|
||||
case TranslationOutput::ErrorCode::compilationFailure:
|
||||
return CL_COMPILE_PROGRAM_FAILURE;
|
||||
case TranslationOutput::ErrorCode::BuildFailure:
|
||||
case TranslationOutput::ErrorCode::buildFailure:
|
||||
return CL_BUILD_PROGRAM_FAILURE;
|
||||
case TranslationOutput::ErrorCode::LinkFailure:
|
||||
case TranslationOutput::ErrorCode::linkFailure:
|
||||
return CL_LINK_PROGRAM_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user