mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-03 06:49:52 +08:00
refactor: correct variable naming
Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
f8bb71e1ba
commit
c3ac7b78bd
@@ -3546,7 +3546,7 @@ cl_int CL_API_CALL clEnqueueNDRangeKernel(cl_command_queue commandQueue,
|
||||
return retVal;
|
||||
}
|
||||
|
||||
if ((pKernel->getExecutionType() != KernelExecutionType::Default) ||
|
||||
if ((pKernel->getExecutionType() != KernelExecutionType::defaultType) ||
|
||||
pKernel->usesSyncBuffer()) {
|
||||
retVal = CL_INVALID_KERNEL;
|
||||
TRACING_EXIT(ClEnqueueNdRangeKernel, &retVal);
|
||||
@@ -6076,7 +6076,7 @@ cl_int CL_API_CALL clEnqueueNDCountKernelINTEL(cl_command_queue commandQueue,
|
||||
}
|
||||
|
||||
if (pKernel->usesSyncBuffer()) {
|
||||
if (pKernel->getExecutionType() != KernelExecutionType::Concurrent) {
|
||||
if (pKernel->getExecutionType() != KernelExecutionType::concurrent) {
|
||||
retVal = CL_INVALID_KERNEL;
|
||||
return retVal;
|
||||
}
|
||||
@@ -6091,7 +6091,7 @@ cl_int CL_API_CALL clEnqueueNDCountKernelINTEL(cl_command_queue commandQueue,
|
||||
}
|
||||
}
|
||||
|
||||
if (pKernel->getExecutionType() == KernelExecutionType::Concurrent) {
|
||||
if (pKernel->getExecutionType() == KernelExecutionType::concurrent) {
|
||||
size_t requestedNumberOfWorkgroups = 1;
|
||||
for (size_t i = 0; i < workDim; i++) {
|
||||
requestedNumberOfWorkgroups *= workgroupCount[i];
|
||||
|
||||
@@ -812,7 +812,7 @@ CompletionStamp CommandQueueHw<GfxFamily>::enqueueNonBlocked(
|
||||
}
|
||||
|
||||
auto mediaSamplerRequired = false;
|
||||
uint32_t numGrfRequired = GrfConfig::DefaultGrfNumber;
|
||||
uint32_t numGrfRequired = GrfConfig::defaultGrfNumber;
|
||||
auto systolicPipelineSelectMode = false;
|
||||
Kernel *kernel = nullptr;
|
||||
bool auxTranslationRequired = false;
|
||||
@@ -1135,12 +1135,12 @@ CompletionStamp CommandQueueHw<GfxFamily>::enqueueCommandWithoutKernel(
|
||||
flushStamp->getStampReference(), // flushStampReference
|
||||
getThrottle(), // throttle
|
||||
device->getPreemptionMode(), // preemptionMode
|
||||
GrfConfig::NotApplicable, // numGrfRequired
|
||||
L3CachingSettings::NotApplicable, // l3CacheSettings
|
||||
GrfConfig::notApplicable, // numGrfRequired
|
||||
L3CachingSettings::notApplicable, // l3CacheSettings
|
||||
ThreadArbitrationPolicy::NotPresent, // threadArbitrationPolicy
|
||||
AdditionalKernelExecInfo::NotApplicable, // additionalKernelExecInfo
|
||||
KernelExecutionType::NotApplicable, // kernelExecutionType
|
||||
MemoryCompressionState::NotApplicable, // memoryCompressionState
|
||||
AdditionalKernelExecInfo::notApplicable, // additionalKernelExecInfo
|
||||
KernelExecutionType::notApplicable, // kernelExecutionType
|
||||
MemoryCompressionState::notApplicable, // memoryCompressionState
|
||||
getSliceCount(), // sliceCount
|
||||
blocking, // blocking
|
||||
false, // dcFlush
|
||||
|
||||
@@ -58,12 +58,12 @@ CompletionStamp &CommandMapUnmap::submit(TaskCountType taskLevel, bool terminate
|
||||
commandQueue.flushStamp->getStampReference(), // flushStampReference
|
||||
commandQueue.getThrottle(), // throttle
|
||||
ClPreemptionHelper::taskPreemptionMode(device, multiDispatch), // preemptionMode
|
||||
GrfConfig::NotApplicable, // numGrfRequired
|
||||
L3CachingSettings::NotApplicable, // l3CacheSettings
|
||||
GrfConfig::notApplicable, // numGrfRequired
|
||||
L3CachingSettings::notApplicable, // l3CacheSettings
|
||||
ThreadArbitrationPolicy::NotPresent, // threadArbitrationPolicy
|
||||
AdditionalKernelExecInfo::NotApplicable, // additionalKernelExecInfo
|
||||
KernelExecutionType::NotApplicable, // kernelExecutionType
|
||||
MemoryCompressionState::NotApplicable, // memoryCompressionState
|
||||
AdditionalKernelExecInfo::notApplicable, // additionalKernelExecInfo
|
||||
KernelExecutionType::notApplicable, // kernelExecutionType
|
||||
MemoryCompressionState::notApplicable, // memoryCompressionState
|
||||
commandQueue.getSliceCount(), // sliceCount
|
||||
true, // blocking
|
||||
true, // dcFlush
|
||||
@@ -370,12 +370,12 @@ CompletionStamp &CommandWithoutKernel::submit(TaskCountType taskLevel, bool term
|
||||
commandQueue.flushStamp->getStampReference(), // flushStampReference
|
||||
commandQueue.getThrottle(), // throttle
|
||||
commandQueue.getDevice().getPreemptionMode(), // preemptionMode
|
||||
GrfConfig::NotApplicable, // numGrfRequired
|
||||
L3CachingSettings::NotApplicable, // l3CacheSettings
|
||||
GrfConfig::notApplicable, // numGrfRequired
|
||||
L3CachingSettings::notApplicable, // l3CacheSettings
|
||||
ThreadArbitrationPolicy::NotPresent, // threadArbitrationPolicy
|
||||
AdditionalKernelExecInfo::NotApplicable, // additionalKernelExecInfo
|
||||
KernelExecutionType::NotApplicable, // kernelExecutionType
|
||||
MemoryCompressionState::NotApplicable, // memoryCompressionState
|
||||
AdditionalKernelExecInfo::notApplicable, // additionalKernelExecInfo
|
||||
KernelExecutionType::notApplicable, // kernelExecutionType
|
||||
MemoryCompressionState::notApplicable, // memoryCompressionState
|
||||
commandQueue.getSliceCount(), // sliceCount
|
||||
true, // blocking
|
||||
false, // dcFlush
|
||||
|
||||
@@ -1063,10 +1063,10 @@ void Kernel::clearUnifiedMemoryExecInfo() {
|
||||
cl_int Kernel::setKernelExecutionType(cl_execution_info_kernel_type_intel executionType) {
|
||||
switch (executionType) {
|
||||
case CL_KERNEL_EXEC_INFO_DEFAULT_TYPE_INTEL:
|
||||
this->executionType = KernelExecutionType::Default;
|
||||
this->executionType = KernelExecutionType::defaultType;
|
||||
break;
|
||||
case CL_KERNEL_EXEC_INFO_CONCURRENT_TYPE_INTEL:
|
||||
this->executionType = KernelExecutionType::Concurrent;
|
||||
this->executionType = KernelExecutionType::concurrent;
|
||||
break;
|
||||
default: {
|
||||
return CL_INVALID_VALUE;
|
||||
@@ -2194,7 +2194,7 @@ bool Kernel::areMultipleSubDevicesInContext() const {
|
||||
|
||||
void Kernel::reconfigureKernel() {
|
||||
const auto &kernelDescriptor = kernelInfo.kernelDescriptor;
|
||||
if (kernelDescriptor.kernelAttributes.numGrfRequired == GrfConfig::LargeGrfNumber &&
|
||||
if (kernelDescriptor.kernelAttributes.numGrfRequired == GrfConfig::largeGrfNumber &&
|
||||
kernelDescriptor.kernelAttributes.simdSize != 32) {
|
||||
this->maxKernelWorkGroupSize >>= 1;
|
||||
}
|
||||
|
||||
@@ -90,12 +90,12 @@ class Kernel : public ReferenceTrackedObject<Kernel> {
|
||||
size_t argSize,
|
||||
const void *argVal);
|
||||
|
||||
template <typename kernel_t = Kernel, typename program_t = Program>
|
||||
static kernel_t *create(program_t *program, const KernelInfo &kernelInfo, ClDevice &clDevice, cl_int &errcodeRet) {
|
||||
template <typename KernelType = Kernel, typename ProgramType = Program>
|
||||
static KernelType *create(ProgramType *program, const KernelInfo &kernelInfo, ClDevice &clDevice, cl_int &errcodeRet) {
|
||||
cl_int retVal;
|
||||
kernel_t *pKernel = nullptr;
|
||||
KernelType *pKernel = nullptr;
|
||||
|
||||
pKernel = new kernel_t(program, kernelInfo, clDevice);
|
||||
pKernel = new KernelType(program, kernelInfo, clDevice);
|
||||
retVal = pKernel->initialize();
|
||||
|
||||
if (retVal != CL_SUCCESS) {
|
||||
@@ -513,12 +513,12 @@ class Kernel : public ReferenceTrackedObject<Kernel> {
|
||||
char *crossThreadData = nullptr;
|
||||
|
||||
AuxTranslationDirection auxTranslationDirection = AuxTranslationDirection::None;
|
||||
KernelExecutionType executionType = KernelExecutionType::Default;
|
||||
KernelExecutionType executionType = KernelExecutionType::defaultType;
|
||||
|
||||
uint32_t patchedArgumentsNum = 0;
|
||||
uint32_t startOffset = 0;
|
||||
uint32_t statelessUncacheableArgsCount = 0;
|
||||
uint32_t additionalKernelExecInfo = AdditionalKernelExecInfo::DisableOverdispatch;
|
||||
uint32_t additionalKernelExecInfo = AdditionalKernelExecInfo::disableOverdispatch;
|
||||
uint32_t maxKernelWorkGroupSize = 0;
|
||||
uint32_t slmTotalSize = 0u;
|
||||
uint32_t sshLocalSize = 0u;
|
||||
|
||||
@@ -34,8 +34,8 @@ class MultiDeviceKernel : public BaseObject<_cl_kernel> {
|
||||
Kernel *getKernel(uint32_t rootDeviceIndex) const { return kernels[rootDeviceIndex]; }
|
||||
Kernel *getDefaultKernel() const { return defaultKernel; }
|
||||
|
||||
template <typename kernel_t = Kernel, typename program_t = Program, typename multi_device_kernel_t = MultiDeviceKernel>
|
||||
static multi_device_kernel_t *create(program_t *program, const KernelInfoContainer &kernelInfos, cl_int &errcodeRet) {
|
||||
template <typename KernelType = Kernel, typename ProgramType = Program, typename MultiDeviceKernelType = MultiDeviceKernel>
|
||||
static MultiDeviceKernelType *create(ProgramType *program, const KernelInfoContainer &kernelInfos, cl_int &errcodeRet) {
|
||||
KernelVectorType kernels{};
|
||||
kernels.resize(program->getMaxRootDeviceIndex() + 1);
|
||||
|
||||
@@ -44,12 +44,12 @@ class MultiDeviceKernel : public BaseObject<_cl_kernel> {
|
||||
if (kernels[rootDeviceIndex]) {
|
||||
continue;
|
||||
}
|
||||
kernels[rootDeviceIndex] = Kernel::create<kernel_t, program_t>(program, *kernelInfos[rootDeviceIndex], *pDevice, errcodeRet);
|
||||
kernels[rootDeviceIndex] = Kernel::create<KernelType, ProgramType>(program, *kernelInfos[rootDeviceIndex], *pDevice, errcodeRet);
|
||||
if (!kernels[rootDeviceIndex]) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
auto pMultiDeviceKernel = new multi_device_kernel_t(std::move(kernels), kernelInfos);
|
||||
auto pMultiDeviceKernel = new MultiDeviceKernelType(std::move(kernels), kernelInfos);
|
||||
|
||||
return pMultiDeviceKernel;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user