mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-08 22:12:59 +08:00
refactor: remove C-style casts 1/n
Related-To: NEO-15174 Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
43f524d019
commit
0c066b4266
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -14,7 +14,7 @@ cl_int VmeAccelerator::validateVmeArgs(Context *context,
|
||||
size_t descriptorSize,
|
||||
const void *descriptor) {
|
||||
const cl_motion_estimation_desc_intel *descObj =
|
||||
(const cl_motion_estimation_desc_intel *)descriptor;
|
||||
reinterpret_cast<const cl_motion_estimation_desc_intel *>(descriptor);
|
||||
|
||||
DEBUG_BREAK_IF(!context);
|
||||
DEBUG_BREAK_IF(typeId != CL_ACCELERATOR_TYPE_MOTION_ESTIMATION_INTEL);
|
||||
|
||||
@@ -574,7 +574,7 @@ cl_command_queue CL_API_CALL clCreateCommandQueue(cl_context context,
|
||||
cl_device_id device,
|
||||
const cl_command_queue_properties properties,
|
||||
cl_int *errcodeRet) {
|
||||
TRACING_ENTER(ClCreateCommandQueue, &context, &device, (cl_command_queue_properties *)&properties, &errcodeRet);
|
||||
TRACING_ENTER(ClCreateCommandQueue, &context, &device, &properties, &errcodeRet);
|
||||
cl_command_queue commandQueue = nullptr;
|
||||
ErrorCodeHelper err(errcodeRet, CL_SUCCESS);
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
@@ -1292,7 +1292,7 @@ cl_int CL_API_CALL clSetMemObjectDestructorCallback(cl_mem memobj,
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
API_ENTER(&retVal);
|
||||
DBG_LOG_INPUTS("memobj", memobj, "funcNotify", reinterpret_cast<void *>(funcNotify), "userData", userData);
|
||||
retVal = validateObjects(memobj, (void *)funcNotify);
|
||||
retVal = validateObjects(memobj, reinterpret_cast<void *>(funcNotify));
|
||||
|
||||
if (CL_SUCCESS != retVal) {
|
||||
TRACING_EXIT(ClSetMemObjectDestructorCallback, &retVal);
|
||||
@@ -1717,11 +1717,10 @@ cl_int CL_API_CALL clGetProgramInfo(cl_program program,
|
||||
"paramValueSize", paramValueSize,
|
||||
"paramValue", NEO::fileLoggerInstance().infoPointerToString(paramValue, paramValueSize),
|
||||
"paramValueSizeRet", paramValueSizeRet);
|
||||
retVal = validateObjects(program);
|
||||
Program *pProgram = nullptr;
|
||||
retVal = validateObjects(withCastToInternal(program, &pProgram));
|
||||
|
||||
if (CL_SUCCESS == retVal) {
|
||||
Program *pProgram = (Program *)(program);
|
||||
|
||||
retVal = pProgram->getInfo(
|
||||
paramName,
|
||||
paramValueSize,
|
||||
@@ -2695,7 +2694,7 @@ cl_int CL_API_CALL clEnqueueFillBuffer(cl_command_queue commandQueue,
|
||||
withCastToInternal(commandQueue, &pCommandQueue),
|
||||
withCastToInternal(buffer, &pBuffer),
|
||||
pattern,
|
||||
(PatternSize)patternSize,
|
||||
static_cast<PatternSize>(patternSize),
|
||||
EventWaitList(numEventsInWaitList, eventWaitList));
|
||||
|
||||
if (CL_SUCCESS == retVal) {
|
||||
@@ -3166,7 +3165,7 @@ cl_int CL_API_CALL clEnqueueCopyImageToBuffer(cl_command_queue commandQueue,
|
||||
cl_uint numEventsInWaitList,
|
||||
const cl_event *eventWaitList,
|
||||
cl_event *event) {
|
||||
TRACING_ENTER(ClEnqueueCopyImageToBuffer, &commandQueue, &srcImage, &dstBuffer, &srcOrigin, ®ion, (size_t *)&dstOffset, &numEventsInWaitList, &eventWaitList, &event);
|
||||
TRACING_ENTER(ClEnqueueCopyImageToBuffer, &commandQueue, &srcImage, &dstBuffer, &srcOrigin, ®ion, &dstOffset, &numEventsInWaitList, &eventWaitList, &event);
|
||||
cl_int retVal = CL_SUCCESS;
|
||||
API_ENTER(&retVal);
|
||||
|
||||
@@ -4651,13 +4650,13 @@ cl_int CL_API_CALL clGetKernelSuggestedLocalWorkSizeKHR(cl_command_queue command
|
||||
return retVal;
|
||||
}
|
||||
|
||||
#define RETURN_FUNC_PTR_IF_EXIST(name) \
|
||||
{ \
|
||||
if (!strcmp(funcName, #name)) { \
|
||||
void *ret = ((void *)(name)); \
|
||||
TRACING_EXIT(ClGetExtensionFunctionAddress, (void **)&ret); \
|
||||
return ret; \
|
||||
} \
|
||||
#define RETURN_FUNC_PTR_IF_EXIST(name) \
|
||||
{ \
|
||||
if (!strcmp(funcName, #name)) { \
|
||||
void *ret = reinterpret_cast<void *>(name); \
|
||||
TRACING_EXIT(ClGetExtensionFunctionAddress, reinterpret_cast<void **>(&ret)); \
|
||||
return ret; \
|
||||
} \
|
||||
}
|
||||
void *CL_API_CALL clGetExtensionFunctionAddress(const char *funcName) {
|
||||
TRACING_ENTER(ClGetExtensionFunctionAddress, &funcName);
|
||||
@@ -5247,7 +5246,7 @@ cl_int CL_API_CALL clSetKernelExecInfo(cl_kernel kernel,
|
||||
return retVal;
|
||||
}
|
||||
size_t numPointers = paramValueSize / sizeof(void *);
|
||||
size_t *pSvmPtrList = (size_t *)paramValue;
|
||||
auto pSvmPtrList = reinterpret_cast<void **>(const_cast<void *>(paramValue));
|
||||
|
||||
if (paramName == CL_KERNEL_EXEC_INFO_SVM_PTRS) {
|
||||
pMultiDeviceKernel->clearSvmKernelExecInfo();
|
||||
@@ -5256,7 +5255,7 @@ cl_int CL_API_CALL clSetKernelExecInfo(cl_kernel kernel,
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < numPointers; i++) {
|
||||
auto svmData = pMultiDeviceKernel->getContext().getSVMAllocsManager()->getSVMAlloc((const void *)pSvmPtrList[i]);
|
||||
auto svmData = pMultiDeviceKernel->getContext().getSVMAllocsManager()->getSVMAlloc(pSvmPtrList[i]);
|
||||
if (svmData == nullptr) {
|
||||
retVal = CL_INVALID_VALUE;
|
||||
TRACING_EXIT(ClSetKernelExecInfo, &retVal);
|
||||
@@ -5802,7 +5801,7 @@ cl_int CL_API_CALL clEnqueueSVMMigrateMem(cl_command_queue commandQueue,
|
||||
return retVal;
|
||||
}
|
||||
if (sizes != nullptr && sizes[i] != 0) {
|
||||
svmData = pSvmAllocMgr->getSVMAlloc(reinterpret_cast<void *>((size_t)svmPointers[i] + sizes[i] - 1));
|
||||
svmData = pSvmAllocMgr->getSVMAlloc(ptrOffset(svmPointers[i], sizes[i] - 1));
|
||||
if (svmData == nullptr) {
|
||||
retVal = CL_INVALID_VALUE;
|
||||
TRACING_EXIT(ClEnqueueSvmMigrateMem, &retVal);
|
||||
|
||||
@@ -584,7 +584,7 @@ TaskCountType CommandQueue::getTaskLevelFromWaitList(TaskCountType taskLevel,
|
||||
cl_uint numEventsInWaitList,
|
||||
const cl_event *eventWaitList) {
|
||||
for (auto iEvent = 0u; iEvent < numEventsInWaitList; ++iEvent) {
|
||||
auto pEvent = (Event *)(eventWaitList[iEvent]);
|
||||
auto pEvent = static_cast<const Event *>(eventWaitList[iEvent]);
|
||||
TaskCountType eventTaskLevel = pEvent->peekTaskLevel();
|
||||
taskLevel = std::max(taskLevel, eventTaskLevel);
|
||||
}
|
||||
|
||||
@@ -1071,7 +1071,7 @@ void CommandQueueHw<GfxFamily>::enqueueBlocked(
|
||||
std::move(printfHandler),
|
||||
preemptionMode,
|
||||
multiDispatchInfo.peekMainKernel(),
|
||||
(uint32_t)multiDispatchInfo.size(),
|
||||
static_cast<uint32_t>(multiDispatchInfo.size()),
|
||||
multiRootDeviceSyncNode);
|
||||
}
|
||||
if (storeTimestampPackets) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2024 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -41,10 +41,12 @@ cl_int CommandQueueHw<GfxFamily>::enqueueFillBuffer(
|
||||
}
|
||||
|
||||
if (patternSize == 1) {
|
||||
int patternInt = (uint32_t)((*(uint8_t *)pattern << 24) | (*(uint8_t *)pattern << 16) | (*(uint8_t *)pattern << 8) | *(uint8_t *)pattern);
|
||||
auto patternValue = *reinterpret_cast<const uint8_t *>(pattern);
|
||||
int patternInt = static_cast<uint32_t>((patternValue << 24) | (patternValue << 16) | (patternValue << 8) | patternValue);
|
||||
memcpy_s(patternAllocation->getUnderlyingBuffer(), sizeof(uint32_t), &patternInt, sizeof(uint32_t));
|
||||
} else if (patternSize == 2) {
|
||||
int patternInt = (uint32_t)((*(uint16_t *)pattern << 16) | *(uint16_t *)pattern);
|
||||
auto patternValue = *reinterpret_cast<const uint16_t *>(pattern);
|
||||
int patternInt = static_cast<uint32_t>((patternValue << 16) | patternValue);
|
||||
memcpy_s(patternAllocation->getUnderlyingBuffer(), sizeof(uint32_t), &patternInt, sizeof(uint32_t));
|
||||
} else {
|
||||
memcpy_s(patternAllocation->getUnderlyingBuffer(), patternSize, pattern, patternSize);
|
||||
|
||||
@@ -510,10 +510,12 @@ cl_int CommandQueueHw<GfxFamily>::enqueueSVMMemFill(void *svmPtr,
|
||||
}
|
||||
|
||||
if (patternSize == 1) {
|
||||
int patternInt = (uint32_t)((*(uint8_t *)pattern << 24) | (*(uint8_t *)pattern << 16) | (*(uint8_t *)pattern << 8) | *(uint8_t *)pattern);
|
||||
auto patternValue = *reinterpret_cast<const uint8_t *>(pattern);
|
||||
int patternInt = static_cast<uint32_t>((patternValue << 24) | (patternValue << 16) | (patternValue << 8) | patternValue);
|
||||
memcpy_s(patternAllocation->getUnderlyingBuffer(), sizeof(uint32_t), &patternInt, sizeof(uint32_t));
|
||||
} else if (patternSize == 2) {
|
||||
int patternInt = (uint32_t)((*(uint16_t *)pattern << 16) | *(uint16_t *)pattern);
|
||||
auto patternValue = *reinterpret_cast<const uint16_t *>(pattern);
|
||||
int patternInt = static_cast<uint32_t>((patternValue << 16) | patternValue);
|
||||
memcpy_s(patternAllocation->getUnderlyingBuffer(), sizeof(uint32_t), &patternInt, sizeof(uint32_t));
|
||||
} else {
|
||||
memcpy_s(patternAllocation->getUnderlyingBuffer(), patternSize, pattern, patternSize);
|
||||
|
||||
@@ -55,7 +55,7 @@ Context::Context(
|
||||
}
|
||||
|
||||
Context::~Context() {
|
||||
gtpinNotifyContextDestroy((cl_context)this);
|
||||
gtpinNotifyContextDestroy(static_cast<cl_context>(this));
|
||||
|
||||
if (multiRootDeviceTimestampPacketAllocator.get() != nullptr) {
|
||||
multiRootDeviceTimestampPacketAllocator.reset();
|
||||
@@ -330,7 +330,7 @@ cl_int Context::getInfo(cl_context_info paramName, size_t paramValueSize,
|
||||
break;
|
||||
|
||||
case CL_CONTEXT_NUM_DEVICES:
|
||||
numDevices = (cl_uint)(devices.size());
|
||||
numDevices = static_cast<cl_uint>(devices.size());
|
||||
valueSize = sizeof(numDevices);
|
||||
pValue = &numDevices;
|
||||
break;
|
||||
@@ -375,7 +375,7 @@ bool Context::containsMultipleSubDevices(uint32_t rootDeviceIndex) const {
|
||||
}
|
||||
|
||||
ClDevice *Context::getDevice(size_t deviceOrdinal) const {
|
||||
return (ClDevice *)devices[deviceOrdinal];
|
||||
return static_cast<ClDevice *>(devices[deviceOrdinal]);
|
||||
}
|
||||
|
||||
cl_int Context::getSupportedImageFormats(
|
||||
|
||||
@@ -911,8 +911,8 @@ void Event::addCallback(Callback::ClbFuncT fn, cl_int type, void *data) {
|
||||
// All enqueued callbacks shall be called before the event object is destroyed."
|
||||
// That's why each registered calback increments the internal refcount
|
||||
incRefInternal();
|
||||
DBG_LOG(EventsDebugEnable, "event", this, "addCallback", "ECallbackTarget", (uint32_t)type);
|
||||
callbacks[(uint32_t)target].pushFrontOne(*new Callback(this, fn, type, data));
|
||||
DBG_LOG(EventsDebugEnable, "event", this, "addCallback", "ECallbackTarget", static_cast<uint32_t>(type));
|
||||
callbacks[static_cast<uint32_t>(target)].pushFrontOne(*new Callback(this, fn, type, data));
|
||||
|
||||
// Callback added after event reached its "completed" state
|
||||
if (updateStatusAndCheckCompletion()) {
|
||||
@@ -943,7 +943,7 @@ void Event::executeCallbacks(int32_t executionStatusIn) {
|
||||
}
|
||||
|
||||
// run through all needed callback targets and execute callbacks
|
||||
for (uint32_t i = 0; i <= (uint32_t)target; ++i) {
|
||||
for (uint32_t i = 0; i <= static_cast<uint32_t>(target); ++i) {
|
||||
auto cb = callbacks[i].detachNodes();
|
||||
auto curr = cb;
|
||||
while (curr != nullptr) {
|
||||
@@ -951,7 +951,7 @@ void Event::executeCallbacks(int32_t executionStatusIn) {
|
||||
if (terminated) {
|
||||
curr->overrideCallbackExecutionStatusTarget(execStatus);
|
||||
}
|
||||
DBG_LOG(EventsDebugEnable, "event", this, "executing callback", "ECallbackTarget", (uint32_t)target);
|
||||
DBG_LOG(EventsDebugEnable, "event", this, "executing callback", "ECallbackTarget", static_cast<uint32_t>(target));
|
||||
curr->execute();
|
||||
decRefInternal();
|
||||
delete curr;
|
||||
|
||||
@@ -168,12 +168,12 @@ class Event : public BaseObject<_cl_event>, public IDNode<Event> {
|
||||
DEBUG_BREAK_IF(true);
|
||||
return false;
|
||||
}
|
||||
return (callbacks[(uint32_t)target].peekHead() != nullptr);
|
||||
return (callbacks[static_cast<uint32_t>(target)].peekHead() != nullptr);
|
||||
}
|
||||
|
||||
bool peekHasCallbacks() {
|
||||
for (uint32_t i = 0; i < (uint32_t)ECallbackTarget::max; ++i) {
|
||||
if (peekHasCallbacks((ECallbackTarget)i)) {
|
||||
for (uint32_t i = 0; i < static_cast<uint32_t>(ECallbackTarget::max); ++i) {
|
||||
if (peekHasCallbacks(static_cast<ECallbackTarget>(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -373,7 +373,7 @@ class Event : public BaseObject<_cl_event>, public IDNode<Event> {
|
||||
cl_command_type cmdType{};
|
||||
|
||||
// callbacks to be executed when this event changes its execution state
|
||||
IFList<Callback, true, true> callbacks[(uint32_t)ECallbackTarget::max];
|
||||
IFList<Callback, true, true> callbacks[static_cast<uint32_t>(ECallbackTarget::max)];
|
||||
|
||||
// can be accessed only with transitionExecutionState
|
||||
// this is to ensure state consitency event when doning lock-free multithreading
|
||||
|
||||
@@ -24,7 +24,7 @@ InterfaceDescriptorType *HardwareCommandsHelper<GfxFamily>::getInterfaceDescript
|
||||
const IndirectHeap &indirectHeap,
|
||||
uint64_t offsetInterfaceDescriptor,
|
||||
InterfaceDescriptorType *inlineInterfaceDescriptor) {
|
||||
return static_cast<InterfaceDescriptorType *>(ptrOffset(indirectHeap.getCpuBase(), (size_t)offsetInterfaceDescriptor));
|
||||
return static_cast<InterfaceDescriptorType *>(ptrOffset(indirectHeap.getCpuBase(), static_cast<size_t>(offsetInterfaceDescriptor)));
|
||||
}
|
||||
|
||||
template <typename GfxFamily>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2024 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -51,15 +51,15 @@ void gtpinNotifyContextCreate(cl_context context) {
|
||||
auto pDevice = pContext->getDevice(0);
|
||||
UNRECOVERABLE_IF(pDevice == nullptr);
|
||||
auto >pinHelper = pDevice->getGTPinGfxCoreHelper();
|
||||
gtpinPlatformInfo.gen_version = (gtpin::GTPIN_GEN_VERSION)gtpinHelper.getGenVersion();
|
||||
gtpinPlatformInfo.gen_version = static_cast<gtpin::GTPIN_GEN_VERSION>(gtpinHelper.getGenVersion());
|
||||
gtpinPlatformInfo.device_id = static_cast<uint32_t>(pDevice->getHardwareInfo().platform.usDeviceID);
|
||||
(*gtpinCallbacks.onContextCreate)((context_handle_t)context, >pinPlatformInfo, &pIgcInit);
|
||||
(*gtpinCallbacks.onContextCreate)(reinterpret_cast<context_handle_t>(context), >pinPlatformInfo, &pIgcInit);
|
||||
}
|
||||
}
|
||||
|
||||
void gtpinNotifyContextDestroy(cl_context context) {
|
||||
if (isGTPinInitialized) {
|
||||
(*gtpinCallbacks.onContextDestroy)((context_handle_t)context);
|
||||
(*gtpinCallbacks.onContextDestroy)(reinterpret_cast<context_handle_t>(context));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,13 +87,13 @@ void gtpinNotifyKernelCreate(cl_kernel kernel) {
|
||||
instrument_params_in_t paramsIn = {};
|
||||
|
||||
paramsIn.kernel_type = GTPIN_KERNEL_TYPE_CS;
|
||||
paramsIn.simd = (GTPIN_SIMD_WIDTH)kernelInfo.getMaxSimdSize();
|
||||
paramsIn.orig_kernel_binary = (uint8_t *)pKernel->getKernelHeap();
|
||||
paramsIn.simd = static_cast<GTPIN_SIMD_WIDTH>(kernelInfo.getMaxSimdSize());
|
||||
paramsIn.orig_kernel_binary = reinterpret_cast<const uint8_t *>(pKernel->getKernelHeap());
|
||||
paramsIn.orig_kernel_size = static_cast<uint32_t>(pKernel->getKernelHeapSize());
|
||||
paramsIn.buffer_type = GTPIN_BUFFER_BINDFULL;
|
||||
paramsIn.buffer_desc.BTI = static_cast<uint32_t>(gtpinBTI);
|
||||
paramsIn.igc_hash_id = kernelInfo.shaderHashCode;
|
||||
paramsIn.kernel_name = (char *)kernelInfo.kernelDescriptor.kernelMetadata.kernelName.c_str();
|
||||
paramsIn.kernel_name = const_cast<char *>(kernelInfo.kernelDescriptor.kernelMetadata.kernelName.c_str());
|
||||
paramsIn.igc_info = kernelInfo.igcInfoForGtpin;
|
||||
if (kernelInfo.debugData.vIsa != nullptr) {
|
||||
paramsIn.debug_data = kernelInfo.debugData.vIsa;
|
||||
@@ -105,7 +105,7 @@ void gtpinNotifyKernelCreate(cl_kernel kernel) {
|
||||
paramsIn.debug_data_size = static_cast<uint32_t>(pMultiDeviceKernel->getProgram()->getDebugDataSize(rootDeviceIndex));
|
||||
}
|
||||
instrument_params_out_t paramsOut = {0};
|
||||
(*gtpinCallbacks.onKernelCreate)((context_handle_t)(cl_context)context, ¶msIn, ¶msOut);
|
||||
(*gtpinCallbacks.onKernelCreate)(reinterpret_cast<context_handle_t>(context), ¶msIn, ¶msOut);
|
||||
// Substitute ISA of created kernel with instrumented code
|
||||
pKernel->substituteKernelHeap(paramsOut.inst_kernel_binary, paramsOut.inst_kernel_size);
|
||||
pKernel->setKernelId(paramsOut.kernel_id);
|
||||
@@ -120,22 +120,22 @@ void gtpinNotifyKernelSubmit(cl_kernel kernel, void *pCmdQueue) {
|
||||
auto pMultiDeviceKernel = castToObjectOrAbort<MultiDeviceKernel>(kernel);
|
||||
auto pKernel = pMultiDeviceKernel->getKernel(rootDeviceIndex);
|
||||
Context *pContext = &(pKernel->getContext());
|
||||
cl_context context = (cl_context)pContext;
|
||||
auto context = static_cast<cl_context>(pContext);
|
||||
uint64_t kernelId = pKernel->getKernelId();
|
||||
command_buffer_handle_t commandBuffer = (command_buffer_handle_t)((uintptr_t)(sequenceCount++));
|
||||
auto commandBuffer = reinterpret_cast<command_buffer_handle_t>(static_cast<uintptr_t>(sequenceCount++));
|
||||
uint32_t kernelOffset = 0;
|
||||
resource_handle_t resource = 0;
|
||||
// Notify GT-Pin that abstract "command buffer" was created
|
||||
(*gtpinCallbacks.onCommandBufferCreate)((context_handle_t)context, commandBuffer);
|
||||
(*gtpinCallbacks.onCommandBufferCreate)(reinterpret_cast<context_handle_t>(context), commandBuffer);
|
||||
// Notify GT-Pin that kernel was submited for execution
|
||||
(*gtpinCallbacks.onKernelSubmit)(commandBuffer, kernelId, &kernelOffset, &resource);
|
||||
// Create new record in Kernel Execution Queue describing submited kernel
|
||||
pKernel->setStartOffset(kernelOffset);
|
||||
gtpinkexec_t kExec;
|
||||
kExec.pKernel = pKernel;
|
||||
kExec.gtpinResource = (cl_mem)resource;
|
||||
kExec.gtpinResource = reinterpret_cast<cl_mem>(resource);
|
||||
kExec.commandBuffer = commandBuffer;
|
||||
kExec.pCommandQueue = (CommandQueue *)pCmdQueue;
|
||||
kExec.pCommandQueue = reinterpret_cast<CommandQueue *>(pCmdQueue);
|
||||
std::unique_lock<GTPinLockType> lock{kernelExecQueueLock};
|
||||
kernelExecQueue.push_back(kExec);
|
||||
lock.unlock();
|
||||
@@ -158,7 +158,7 @@ void gtpinNotifyKernelSubmit(cl_kernel kernel, void *pCmdQueue) {
|
||||
device.getMemoryManager()->getPageFaultManager()->moveAllocationToGpuDomain(reinterpret_cast<void *>(gpuAllocation->getGpuAddress()));
|
||||
}
|
||||
} else {
|
||||
cl_mem buffer = (cl_mem)resource;
|
||||
cl_mem buffer = reinterpret_cast<cl_mem>(resource);
|
||||
auto pBuffer = castToObjectOrAbort<Buffer>(buffer);
|
||||
pBuffer->setArgStateful(pSurfaceState, false, false, false, false, device,
|
||||
pContext->getNumDevices());
|
||||
@@ -168,7 +168,7 @@ void gtpinNotifyKernelSubmit(cl_kernel kernel, void *pCmdQueue) {
|
||||
|
||||
void gtpinNotifyPreFlushTask(void *pCmdQueue) {
|
||||
if (isGTPinInitialized) {
|
||||
pCmdQueueForFlushTask = (CommandQueue *)pCmdQueue;
|
||||
pCmdQueueForFlushTask = reinterpret_cast<CommandQueue *>(pCmdQueue);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ void gtpinNotifyUpdateResidencyList(void *pKernel, void *pResVec) {
|
||||
for (size_t n = 0; n < numElems; n++) {
|
||||
if ((kernelExecQueue[n].pKernel == pKernel) && !kernelExecQueue[n].isResourceResident && kernelExecQueue[n].gtpinResource) {
|
||||
// It's time for kernel to update its residency list with its GT-Pin resource
|
||||
std::vector<Surface *> *pResidencyVector = (std::vector<Surface *> *)pResVec;
|
||||
std::vector<Surface *> *pResidencyVector = reinterpret_cast<std::vector<Surface *> *>(pResVec);
|
||||
cl_mem gtpinBuffer = kernelExecQueue[n].gtpinResource;
|
||||
auto pBuffer = castToObjectOrAbort<Buffer>(gtpinBuffer);
|
||||
auto rootDeviceIndex = kernelExecQueue[n].pCommandQueue->getDevice().getRootDeviceIndex();
|
||||
|
||||
@@ -45,7 +45,7 @@ void GTPinGfxCoreHelperHw<GfxFamily>::addSurfaceState(Kernel *pKernel) const {
|
||||
using BINDING_TABLE_STATE = typename GfxFamily::BINDING_TABLE_STATE;
|
||||
BINDING_TABLE_STATE *pNewBTS = reinterpret_cast<BINDING_TABLE_STATE *>(pNewSsh + newSurfaceStateSize + currBTCount * btsSize);
|
||||
*pNewBTS = GfxFamily::cmdInitBindingTableState;
|
||||
pNewBTS->setSurfaceStatePointer((uint64_t)currBTOffset);
|
||||
pNewBTS->setSurfaceStatePointer(currBTOffset);
|
||||
}
|
||||
pKernel->resizeSurfaceStateHeap(pNewSsh, sshSize + sizeToEnlarge, currBTCount + 1, newSurfaceStateSize);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2023 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -121,7 +121,7 @@ cl_int validateObject(const NonZeroBufferSize &nzbs) {
|
||||
}
|
||||
|
||||
cl_int validateObject(const PatternSize &ps) {
|
||||
switch ((cl_int)ps) {
|
||||
switch (static_cast<cl_int>(ps)) {
|
||||
case 128:
|
||||
case 64:
|
||||
case 32:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2023 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -37,8 +37,8 @@ inline void convertFillColor(const void *fillColor,
|
||||
float fFillColor[4] = {0.0f};
|
||||
|
||||
for (auto i = 0; i < 4; i++) {
|
||||
iFillColor[i] = *((int32_t *)fillColor + i);
|
||||
fFillColor[i] = *((float *)fillColor + i);
|
||||
iFillColor[i] = reinterpret_cast<const int32_t *>(fillColor)[i];
|
||||
fFillColor[i] = reinterpret_cast<const float *>(fillColor)[i];
|
||||
}
|
||||
|
||||
if (oldImageFormat.image_channel_order == CL_A) {
|
||||
|
||||
@@ -201,7 +201,7 @@ size_t HardwareCommandsHelper<GfxFamily>::sendInterfaceDescriptorData(
|
||||
kernelDescriptor.kernelAttributes.numGrfRequired, threadsPerThreadGroup, *walkerCmd);
|
||||
|
||||
*pInterfaceDescriptor = interfaceDescriptor;
|
||||
return (size_t)offsetInterfaceDescriptor;
|
||||
return static_cast<size_t>(offsetInterfaceDescriptor);
|
||||
}
|
||||
|
||||
template <typename GfxFamily>
|
||||
|
||||
@@ -436,7 +436,7 @@ cl_int Kernel::cloneKernel(Kernel *pSourceKernel) {
|
||||
break;
|
||||
case SVM_ALLOC_OBJ:
|
||||
setArgSvmAlloc(i, const_cast<void *>(pSourceKernel->getKernelArgInfo(i).value),
|
||||
(GraphicsAllocation *)pSourceKernel->getKernelArgInfo(i).object,
|
||||
reinterpret_cast<GraphicsAllocation *>(pSourceKernel->getKernelArgInfo(i).object),
|
||||
pSourceKernel->getKernelArgInfo(i).allocId);
|
||||
break;
|
||||
case BUFFER_OBJ:
|
||||
@@ -732,13 +732,13 @@ cl_int Kernel::getSubGroupInfo(cl_kernel_sub_group_info paramName,
|
||||
}
|
||||
case CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE_KHR: {
|
||||
for (size_t i = 0; i < numDimensions; i++) {
|
||||
wgs *= ((size_t *)inputValue)[i];
|
||||
wgs *= reinterpret_cast<const size_t *>(inputValue)[i];
|
||||
}
|
||||
return changeGetInfoStatusToCLResultType(
|
||||
info.set<size_t>((wgs / maxSimdSize) + std::min(static_cast<size_t>(1), wgs % maxSimdSize))); // add 1 if WGS % maxSimdSize != 0
|
||||
}
|
||||
case CL_KERNEL_LOCAL_SIZE_FOR_SUB_GROUP_COUNT: {
|
||||
auto subGroupsNum = *(size_t *)inputValue;
|
||||
auto subGroupsNum = *reinterpret_cast<const size_t *>(inputValue);
|
||||
auto workGroupSize = subGroupsNum * largestCompiledSIMDSize;
|
||||
// return workgroup size in first dimension, the rest shall be 1 in positive case
|
||||
if (workGroupSize > maxRequiredWorkGroupSize) {
|
||||
@@ -1208,7 +1208,7 @@ inline void Kernel::makeArgsResident(CommandStreamReceiver &commandStreamReceive
|
||||
for (decltype(numArgs) argIndex = 0; argIndex < numArgs; argIndex++) {
|
||||
if (kernelArguments[argIndex].object) {
|
||||
if (kernelArguments[argIndex].type == SVM_ALLOC_OBJ) {
|
||||
auto pSVMAlloc = (GraphicsAllocation *)kernelArguments[argIndex].object;
|
||||
auto pSVMAlloc = reinterpret_cast<GraphicsAllocation *>(kernelArguments[argIndex].object);
|
||||
auto pageFaultManager = executionEnvironment.memoryManager->getPageFaultManager();
|
||||
if (pageFaultManager &&
|
||||
this->isUnifiedMemorySyncRequired) {
|
||||
@@ -1375,7 +1375,7 @@ void Kernel::getResidency(std::vector<Surface *> &dst) {
|
||||
this->isUnifiedMemorySyncRequired) {
|
||||
needsMigration = true;
|
||||
}
|
||||
auto pSVMAlloc = (GraphicsAllocation *)kernelArguments[argIndex].object;
|
||||
auto pSVMAlloc = reinterpret_cast<GraphicsAllocation *>(kernelArguments[argIndex].object);
|
||||
dst.push_back(new GeneralSurface(pSVMAlloc, needsMigration));
|
||||
} else if (Kernel::isMemObj(kernelArguments[argIndex].type)) {
|
||||
auto clMem = const_cast<cl_mem>(static_cast<const _cl_mem *>(kernelArguments[argIndex].object));
|
||||
@@ -1799,10 +1799,10 @@ bool Kernel::hasPrintfOutput() const {
|
||||
|
||||
void Kernel::resetSharedObjectsPatchAddresses() {
|
||||
for (size_t i = 0; i < getKernelArgsNumber(); i++) {
|
||||
auto clMem = (cl_mem)kernelArguments[i].object;
|
||||
auto clMem = kernelArguments[i].object;
|
||||
auto memObj = castToObject<MemObj>(clMem);
|
||||
if (memObj && memObj->peekSharingHandler()) {
|
||||
setArg((uint32_t)i, sizeof(cl_mem), &clMem);
|
||||
setArg(static_cast<uint32_t>(i), sizeof(cl_mem), &clMem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -795,7 +795,7 @@ cl_int Image::getImageInfo(cl_image_info paramName,
|
||||
retParam = imageDesc.image_width;
|
||||
if (this->baseMipLevel) {
|
||||
retParam = imageDesc.image_width >> this->baseMipLevel;
|
||||
retParam = std::max(retParam, (size_t)1);
|
||||
retParam = std::max(retParam, static_cast<size_t>(1));
|
||||
}
|
||||
srcParam = &retParam;
|
||||
break;
|
||||
@@ -805,7 +805,7 @@ cl_int Image::getImageInfo(cl_image_info paramName,
|
||||
retParam = imageDesc.image_height * !((imageDesc.image_type == CL_MEM_OBJECT_IMAGE1D) || (imageDesc.image_type == CL_MEM_OBJECT_IMAGE1D_ARRAY) || (imageDesc.image_type == CL_MEM_OBJECT_IMAGE1D_BUFFER));
|
||||
if ((retParam != 0) && (this->baseMipLevel > 0)) {
|
||||
retParam = retParam >> this->baseMipLevel;
|
||||
retParam = std::max(retParam, (size_t)1);
|
||||
retParam = std::max(retParam, static_cast<size_t>(1));
|
||||
}
|
||||
srcParam = &retParam;
|
||||
break;
|
||||
@@ -815,7 +815,7 @@ cl_int Image::getImageInfo(cl_image_info paramName,
|
||||
retParam = imageDesc.image_depth * (imageDesc.image_type == CL_MEM_OBJECT_IMAGE3D);
|
||||
if ((retParam != 0) && (this->baseMipLevel > 0)) {
|
||||
retParam = retParam >> this->baseMipLevel;
|
||||
retParam = std::max(retParam, (size_t)1);
|
||||
retParam = std::max(retParam, static_cast<size_t>(1));
|
||||
}
|
||||
srcParam = &retParam;
|
||||
break;
|
||||
|
||||
@@ -89,7 +89,7 @@ void ImageHw<GfxFamily>::setImageArg(void *memory, bool setAsMediaBlockImage, ui
|
||||
surfaceState->setShaderChannelSelectAlpha(RENDER_SURFACE_STATE::SHADER_CHANNEL_SELECT_ONE);
|
||||
}
|
||||
|
||||
surfaceState->setNumberOfMultisamples((typename RENDER_SURFACE_STATE::NUMBER_OF_MULTISAMPLES)mcsSurfaceInfo.multisampleCount);
|
||||
surfaceState->setNumberOfMultisamples(static_cast<typename RENDER_SURFACE_STATE::NUMBER_OF_MULTISAMPLES>(mcsSurfaceInfo.multisampleCount));
|
||||
|
||||
if (imageDesc.num_samples > 1) {
|
||||
setAuxParamsForMultisamples(surfaceState, rootDeviceIndex);
|
||||
@@ -129,7 +129,7 @@ void ImageHw<GfxFamily>::setAuxParamsForMultisamples(RENDER_SURFACE_STATE *surfa
|
||||
} else if (mcsGmm->unifiedAuxTranslationCapable()) {
|
||||
EncodeSurfaceState<GfxFamily>::setImageAuxParamsForCCS(surfaceState, mcsGmm);
|
||||
} else {
|
||||
surfaceState->setAuxiliarySurfaceMode((typename RENDER_SURFACE_STATE::AUXILIARY_SURFACE_MODE)1);
|
||||
surfaceState->setAuxiliarySurfaceMode(static_cast<typename RENDER_SURFACE_STATE::AUXILIARY_SURFACE_MODE>(1));
|
||||
surfaceState->setAuxiliarySurfacePitch(mcsSurfaceInfo.pitch);
|
||||
surfaceState->setAuxiliarySurfaceQPitch(mcsSurfaceInfo.qPitch);
|
||||
surfaceState->setAuxiliarySurfaceBaseAddress(mcsAllocation->getGpuAddress());
|
||||
|
||||
@@ -60,7 +60,7 @@ void PrintfHandler::prepareDispatch(const MultiDispatchInfo &multiDispatchInfo)
|
||||
|
||||
const auto &printfSurfaceArg = kernel->getKernelInfo().kernelDescriptor.payloadMappings.implicitArgs.printfSurfaceAddress;
|
||||
auto printfPatchAddress = ptrOffset(reinterpret_cast<uintptr_t *>(kernel->getCrossThreadData()), printfSurfaceArg.stateless);
|
||||
patchWithRequiredSize(printfPatchAddress, printfSurfaceArg.pointerSize, (uintptr_t)printfSurface->getGpuAddressToPatch());
|
||||
patchWithRequiredSize(printfPatchAddress, printfSurfaceArg.pointerSize, printfSurface->getGpuAddressToPatch());
|
||||
if (isValidOffset(printfSurfaceArg.bindful)) {
|
||||
auto surfaceState = ptrOffset(reinterpret_cast<uintptr_t *>(kernel->getSurfaceStateHeap()), printfSurfaceArg.bindful);
|
||||
void *addressToPatch = printfSurface->getUnderlyingBuffer();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2023 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -189,7 +189,7 @@ cl_int CL_API_CALL clGetGLObjectInfo(cl_mem memobj, cl_gl_object_type *glObjectT
|
||||
retValue = validateObjects(memobj);
|
||||
if (retValue == CL_SUCCESS) {
|
||||
auto pMemObj = castToObject<MemObj>(memobj);
|
||||
auto handler = (GlSharing *)pMemObj->peekSharingHandler();
|
||||
auto handler = static_cast<GlSharing *>(pMemObj->peekSharingHandler());
|
||||
if (handler != nullptr) {
|
||||
handler->getGlObjectInfo(glObjectType, glObjectName);
|
||||
} else {
|
||||
@@ -213,7 +213,7 @@ cl_int CL_API_CALL clGetGLTextureInfo(cl_mem memobj, cl_gl_texture_info paramNam
|
||||
retValue = validateObjects(memobj);
|
||||
if (retValue == CL_SUCCESS) {
|
||||
auto pMemObj = castToObject<MemObj>(memobj);
|
||||
auto glTexture = (GlTexture *)pMemObj->peekSharingHandler();
|
||||
auto glTexture = static_cast<GlTexture *>(pMemObj->peekSharingHandler());
|
||||
retValue = glTexture->getGlTextureInfo(paramName, paramValueSize, paramValue, paramValueSizeRet);
|
||||
}
|
||||
TRACING_EXIT(ClGetGlTextureInfo, &retValue);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2023 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -14,7 +14,7 @@
|
||||
namespace NEO {
|
||||
class Context;
|
||||
class Image;
|
||||
class GlTexture : GlSharing {
|
||||
class GlTexture : public GlSharing {
|
||||
public:
|
||||
static Image *createSharedGlTexture(Context *context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texture,
|
||||
cl_int *errcodeRet);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2023-2024 Intel Corporation
|
||||
* Copyright (C) 2023-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -100,7 +100,7 @@ std::string GlSharingBuilderFactory::getExtensions(DriverInfo *driverInfo) {
|
||||
void *GlSharingBuilderFactory::getExtensionFunctionAddress(const std::string &functionName) {
|
||||
if (debugManager.flags.EnableFormatQuery.get() &&
|
||||
functionName == "clGetSupportedGLTextureFormatsINTEL") {
|
||||
return ((void *)(clGetSupportedGLTextureFormatsINTEL));
|
||||
return reinterpret_cast<void *>(clGetSupportedGLTextureFormatsINTEL);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2023 Intel Corporation
|
||||
* Copyright (C) 2018-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -30,7 +30,7 @@ bool VaSharingContextBuilder::processProperties(cl_context_properties &propertyT
|
||||
|
||||
switch (propertyType) {
|
||||
case CL_CONTEXT_VA_API_DISPLAY_INTEL:
|
||||
contextData->vaDisplay = (VADisplay)propertyValue;
|
||||
contextData->vaDisplay = reinterpret_cast<VADisplay>(propertyValue);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -71,11 +71,11 @@ void VaSharingBuilderFactory::fillGlobalDispatchTable() {
|
||||
crtGlobalDispatchTable.clEnqueueAcquireVA_APIMediaSurfacesINTEL = clEnqueueAcquireVA_APIMediaSurfacesINTEL;
|
||||
}
|
||||
|
||||
#define RETURN_FUNC_PTR_IF_EXIST(name) \
|
||||
{ \
|
||||
if (functionName == #name) { \
|
||||
return ((void *)(name)); \
|
||||
} \
|
||||
#define RETURN_FUNC_PTR_IF_EXIST(name) \
|
||||
{ \
|
||||
if (functionName == #name) { \
|
||||
return reinterpret_cast<void *>(name); \
|
||||
} \
|
||||
}
|
||||
void *VaSharingBuilderFactory::getExtensionFunctionAddress(const std::string &functionName) {
|
||||
RETURN_FUNC_PTR_IF_EXIST(clCreateFromVA_APIMediaSurfaceINTEL);
|
||||
|
||||
@@ -396,7 +396,7 @@ class ClCreateCommandQueueTracer : NEO::NonCopyableAndNonMovableClass {
|
||||
|
||||
void enter(cl_context *context,
|
||||
cl_device_id *device,
|
||||
cl_command_queue_properties *properties,
|
||||
const cl_command_queue_properties *properties,
|
||||
cl_int **errcodeRet) {
|
||||
DEBUG_BREAK_IF(state != TRACING_NOTIFY_STATE_NOTHING_CALLED);
|
||||
|
||||
@@ -4928,7 +4928,7 @@ class ClEnqueueCopyImageToBufferTracer : NEO::NonCopyableAndNonMovableClass {
|
||||
cl_mem *dstBuffer,
|
||||
const size_t **srcOrigin,
|
||||
const size_t **region,
|
||||
size_t *dstOffset,
|
||||
const size_t *dstOffset,
|
||||
cl_uint *numEventsInWaitList,
|
||||
const cl_event **eventWaitList,
|
||||
cl_event **event) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2019-2024 Intel Corporation
|
||||
* Copyright (C) 2019-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -257,7 +257,7 @@ typedef struct _cl_params_clCreateBuffer {
|
||||
typedef struct _cl_params_clCreateCommandQueue {
|
||||
cl_context *context;
|
||||
cl_device_id *device;
|
||||
cl_command_queue_properties *properties;
|
||||
const cl_command_queue_properties *properties;
|
||||
cl_int **errcodeRet;
|
||||
} cl_params_clCreateCommandQueue;
|
||||
|
||||
@@ -852,7 +852,7 @@ typedef struct _cl_params_clEnqueueCopyImageToBuffer {
|
||||
cl_mem *dstBuffer;
|
||||
const size_t **srcOrigin;
|
||||
const size_t **region;
|
||||
size_t *dstOffset;
|
||||
const size_t *dstOffset;
|
||||
cl_uint *numEventsInWaitList;
|
||||
const cl_event **eventWaitList;
|
||||
cl_event **event;
|
||||
|
||||
@@ -110,7 +110,7 @@ const std::string ClFileLogger<debugLevel>::getEvents(const uintptr_t *input, ui
|
||||
for (uint32_t i = 0; i < numOfEvents; i++) {
|
||||
if (input != nullptr) {
|
||||
cl_event event = (reinterpret_cast<const cl_event *>(input))[i];
|
||||
os << "cl_event " << event << ", Event " << (Event *)event << ", ";
|
||||
os << "cl_event " << event << ", Event " << static_cast<Event *>(event) << ", ";
|
||||
}
|
||||
}
|
||||
return os.str();
|
||||
|
||||
@@ -150,7 +150,7 @@ TEST(ZebinManipulatorTests, GivenValidZebinWhenItIsDisassembledAndAssembledBackT
|
||||
char **asmNameOutputs;
|
||||
|
||||
retVal = oclocInvoke(static_cast<uint32_t>(asmArgs.size()), asmArgs.data(),
|
||||
disasmNumOutputs, const_cast<const uint8_t **>(disasmDataOutputs), disasmLenOutputs, (const char **)disasmNameOutputs,
|
||||
disasmNumOutputs, const_cast<const uint8_t **>(disasmDataOutputs), disasmLenOutputs, const_cast<const char **>(disasmNameOutputs),
|
||||
0, nullptr, nullptr, nullptr,
|
||||
&asmNumOutputs, &asmDataOutputs, &asmLenOutputs, &asmNameOutputs);
|
||||
EXPECT_EQ(OCLOC_SUCCESS, retVal);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2022-2024 Intel Corporation
|
||||
* Copyright (C) 2022-2025 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -22,7 +22,7 @@ char *mockGetenv(const char *name) noexcept {
|
||||
using getenvMockFuncPtr = char *(*)(const char *);
|
||||
|
||||
TEST(CompilerCache, GivenDefaultCacheConfigThenValuesAreProperlyPopulated) {
|
||||
VariableBackup<getenvMockFuncPtr> getenvBkp((getenvMockFuncPtr *)(&NEO::IoFunctions::getenvPtr), &mockGetenv);
|
||||
VariableBackup<getenvMockFuncPtr> getenvBkp(reinterpret_cast<getenvMockFuncPtr *>(&NEO::IoFunctions::getenvPtr), &mockGetenv);
|
||||
|
||||
auto cacheConfig = NEO::getDefaultCompilerCacheConfig();
|
||||
EXPECT_STREQ("ocloc_cache", cacheConfig.cacheDir.c_str());
|
||||
@@ -38,7 +38,7 @@ TEST(CompilerCache, GivenEnvVariableWhenDefaultConfigIsCreatedThenValuesArePrope
|
||||
mockableEnvs["NEO_CACHE_DIR"] = "ult/directory/";
|
||||
|
||||
VariableBackup<decltype(mockableEnvValues)> mockableEnvValuesBackup(&mockableEnvValues, mockableEnvs);
|
||||
VariableBackup<getenvMockFuncPtr> getenvBkp((getenvMockFuncPtr *)(&NEO::IoFunctions::getenvPtr), &mockGetenv);
|
||||
VariableBackup<getenvMockFuncPtr> getenvBkp(reinterpret_cast<getenvMockFuncPtr *>(&NEO::IoFunctions::getenvPtr), &mockGetenv);
|
||||
|
||||
auto cacheConfig = NEO::getDefaultCompilerCacheConfig();
|
||||
EXPECT_STREQ("ult/directory/", cacheConfig.cacheDir.c_str());
|
||||
|
||||
Reference in New Issue
Block a user