fix code issues reported by clang 14
Signed-off-by: Artur Harasimiuk <artur.harasimiuk@intel.com>
This commit is contained in:
parent
96dd1de20c
commit
a6490062a9
|
@ -872,7 +872,6 @@ HWTEST_F(CommandListArbitrationPolicyTest, whenCommandListIsResetThenOriginalThr
|
|||
EXPECT_NE(nullptr, commandList);
|
||||
EXPECT_NE(nullptr, commandList->commandContainer.getCommandStream());
|
||||
|
||||
bool found;
|
||||
uint64_t originalThreadArbitrationPolicy = std::numeric_limits<uint64_t>::max();
|
||||
{
|
||||
GenCmdList parsedCommandList;
|
||||
|
@ -890,7 +889,6 @@ HWTEST_F(CommandListArbitrationPolicyTest, whenCommandListIsResetThenOriginalThr
|
|||
EXPECT_EQ(NEO::DebugControlReg2::getRegData(NEO::ThreadArbitrationPolicy::RoundRobin),
|
||||
cmd->getDataDword());
|
||||
originalThreadArbitrationPolicy = cmd->getDataDword();
|
||||
found = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1311,8 +1311,7 @@ TEST_F(DeviceCreateCommandQueueTest, givenLowPriorityDescAndWithoutLowPriorityCs
|
|||
|
||||
ze_command_queue_handle_t commandQueueHandle = {};
|
||||
|
||||
ze_result_t res{};
|
||||
EXPECT_THROW(res = device->createCommandQueue(&desc, &commandQueueHandle), std::exception);
|
||||
EXPECT_THROW(device->createCommandQueue(&desc, &commandQueueHandle), std::exception);
|
||||
}
|
||||
|
||||
using MultiDeviceCreateCommandQueueTest = Test<MultiDeviceFixture>;
|
||||
|
|
|
@ -184,7 +184,6 @@ HWTEST_F(CommandQueueSynchronizeTest, givenCallToSynchronizeThenCorrectEnableTim
|
|||
queue->csr = csr.get();
|
||||
|
||||
uint64_t timeout = 10;
|
||||
int64_t timeoutMicrosecondsExpected = timeout;
|
||||
|
||||
queue->synchronize(timeout);
|
||||
|
||||
|
@ -193,7 +192,6 @@ HWTEST_F(CommandQueueSynchronizeTest, givenCallToSynchronizeThenCorrectEnableTim
|
|||
EXPECT_TRUE(csr->enableTimeoutSet);
|
||||
|
||||
timeout = std::numeric_limits<uint64_t>::max();
|
||||
timeoutMicrosecondsExpected = NEO::TimeoutControls::maxTimeout;
|
||||
|
||||
queue->synchronize(timeout);
|
||||
|
||||
|
@ -276,8 +274,6 @@ HWTEST_F(CommandQueueSynchronizeTest, givenDebugOverrideEnabledWhenCallToSynchro
|
|||
queue->csr = csr.get();
|
||||
|
||||
uint64_t timeout = 10;
|
||||
bool enableTimeoutExpected = true;
|
||||
int64_t timeoutMicrosecondsExpected = timeout;
|
||||
|
||||
queue->synchronize(timeout);
|
||||
|
||||
|
@ -286,8 +282,6 @@ HWTEST_F(CommandQueueSynchronizeTest, givenDebugOverrideEnabledWhenCallToSynchro
|
|||
EXPECT_TRUE(csr->enableTimeoutSet);
|
||||
|
||||
timeout = std::numeric_limits<uint64_t>::max();
|
||||
enableTimeoutExpected = false;
|
||||
timeoutMicrosecondsExpected = NEO::TimeoutControls::maxTimeout;
|
||||
|
||||
queue->synchronize(timeout);
|
||||
|
||||
|
|
|
@ -2027,8 +2027,6 @@ HWTEST2_F(SetKernelArg, givenImageAndBindfulKernelWhenSetArgImageThenCopySurface
|
|||
createKernel();
|
||||
|
||||
auto &imageArg = const_cast<NEO::ArgDescImage &>(kernel->kernelImmData->getDescriptor().payloadMappings.explicitArgs[3].template as<NEO::ArgDescImage>());
|
||||
auto addressingMode = const_cast<NEO::KernelDescriptor::AddressingMode &>(kernel->kernelImmData->getDescriptor().kernelAttributes.imageAddressingMode);
|
||||
addressingMode = NEO::KernelDescriptor::Bindful;
|
||||
imageArg.bindless = undefined<CrossThreadDataOffset>;
|
||||
imageArg.bindful = 0x40;
|
||||
ze_image_desc_t desc = {};
|
||||
|
|
|
@ -356,7 +356,6 @@ cl_int CommandQueueHw<GfxFamily>::enqueueSVMMemcpy(cl_bool blockingCopy,
|
|||
MultiDispatchInfo dispatchInfo;
|
||||
BuiltinOpParams operationParams;
|
||||
Surface *surfaces[2];
|
||||
cl_command_type cmdType;
|
||||
cl_int dispatchResult = CL_SUCCESS;
|
||||
|
||||
if (copyType == SvmToHost) {
|
||||
|
@ -385,7 +384,6 @@ cl_int CommandQueueHw<GfxFamily>::enqueueSVMMemcpy(cl_bool blockingCopy,
|
|||
|
||||
HostPtrSurface srcHostPtrSurf(const_cast<void *>(srcGpuPtr), size, true);
|
||||
GeneralSurface dstSvmSurf(dstAllocation);
|
||||
cmdType = CL_COMMAND_WRITE_BUFFER;
|
||||
if (size != 0) {
|
||||
bool status = csr.createAllocationForHostSurface(srcHostPtrSurf, false);
|
||||
if (!status) {
|
||||
|
@ -417,7 +415,6 @@ cl_int CommandQueueHw<GfxFamily>::enqueueSVMMemcpy(cl_bool blockingCopy,
|
|||
|
||||
HostPtrSurface srcHostPtrSurf(const_cast<void *>(srcGpuPtr), size);
|
||||
HostPtrSurface dstHostPtrSurf(dstGpuPtr, size);
|
||||
cmdType = CL_COMMAND_WRITE_BUFFER;
|
||||
if (size != 0) {
|
||||
bool status = csr.createAllocationForHostSurface(srcHostPtrSurf, false);
|
||||
status &= csr.createAllocationForHostSurface(dstHostPtrSurf, true);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2018-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
|
@ -107,23 +107,19 @@ HWTEST_P(AUBCopyBufferRect, WhenCopyingThenExpectationsMet) {
|
|||
uint8_t src[rowPitch * slicePitch];
|
||||
memset(src, 0, sizeof(src));
|
||||
|
||||
auto tDst = pDestMemory;
|
||||
auto tSrc = ptrOffset(pSrcMemory, srcOrigin[0] + srcOrigin[1] * rowPitch + srcOrigin[2] * slicePitch);
|
||||
auto tRef = ptrOffset(src, dstOrigin[0] + dstOrigin[1] * rowPitch + dstOrigin[2] * slicePitch);
|
||||
|
||||
for (unsigned int z = 0; z < regionZ; z++) {
|
||||
auto pDst = tDst;
|
||||
auto pSrc = tSrc;
|
||||
auto pRef = tRef;
|
||||
|
||||
for (unsigned int y = 0; y < regionY; y++) {
|
||||
memcpy(pRef, pSrc, region[0]);
|
||||
|
||||
pDst += rowPitch;
|
||||
pSrc += rowPitch;
|
||||
pRef += rowPitch;
|
||||
}
|
||||
tDst += slicePitch;
|
||||
tSrc += slicePitch;
|
||||
tRef += slicePitch;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2021 Intel Corporation
|
||||
* Copyright (C) 2019-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
|
@ -65,9 +65,6 @@ HWTEST_F(UnifiedMemoryAubTest, givenSharedMemoryAllocWhenWriteIntoGPUPartThenVal
|
|||
expectNotEqualMemory<FamilyType>(unifiedMemoryPtr, unifiedMemoryPtr, dataSize);
|
||||
expectMemory<FamilyType>(unifiedMemoryPtr, input.data(), dataSize);
|
||||
|
||||
auto mockRead = reinterpret_cast<char *>(unifiedMemoryPtr)[0];
|
||||
mockRead = 0;
|
||||
|
||||
expectMemory<FamilyType>(unifiedMemoryPtr, unifiedMemoryPtr, dataSize);
|
||||
|
||||
freeUSM(unifiedMemoryPtr, unifiedMemoryType);
|
||||
|
|
|
@ -1727,11 +1727,9 @@ TEST_F(VmeBuiltInTests, WhenValidatingImagesThenCorrectResponses) {
|
|||
std::unique_ptr<Image> image1(ImageHelper<ImageVmeValidFormat>::create(pContext));
|
||||
|
||||
cl_mem srcImgMem = 0;
|
||||
cl_mem refImgMem = 0;
|
||||
EXPECT_EQ(CL_INVALID_KERNEL_ARGS, vmeBuilder.validateImages(Vec3<size_t>{3, 3, 0}, Vec3<size_t>{0, 0, 0}));
|
||||
|
||||
srcImgMem = image1.get();
|
||||
refImgMem = 0;
|
||||
vmeBuilder.setExplicitArg(srcImgArgNum, sizeof(srcImgMem), &srcImgMem, err);
|
||||
EXPECT_EQ(CL_INVALID_KERNEL_ARGS, vmeBuilder.validateImages(Vec3<size_t>{3, 3, 0}, Vec3<size_t>{0, 0, 0}));
|
||||
}
|
||||
|
|
|
@ -1081,15 +1081,18 @@ HWTEST_F(EnqueueKernelTest, givenCsrInBatchingModeWhenCommandIsFlushedThenFlushS
|
|||
|
||||
auto status = clWaitForEvents(1, &event);
|
||||
|
||||
EXPECT_EQ(CL_SUCCESS, status);
|
||||
EXPECT_EQ(1, neoEvent->getRefInternalCount());
|
||||
EXPECT_EQ(1u, mockCsr->flushStamp->peekStamp());
|
||||
EXPECT_EQ(1u, neoEvent->flushStamp->peekStamp());
|
||||
EXPECT_EQ(1u, pCmdQ->flushStamp->peekStamp());
|
||||
|
||||
status = clFinish(pCmdQ);
|
||||
EXPECT_EQ(CL_SUCCESS, status);
|
||||
EXPECT_EQ(1u, pCmdQ->flushStamp->peekStamp());
|
||||
|
||||
status = clReleaseEvent(event);
|
||||
EXPECT_EQ(CL_SUCCESS, status);
|
||||
}
|
||||
|
||||
HWTEST_F(EnqueueKernelTest, givenCsrInBatchingModeWhenNonBlockingMapFollowsNdrCallThenFlushStampIsUpdatedProperly) {
|
||||
|
@ -1133,15 +1136,18 @@ HWTEST_F(EnqueueKernelTest, givenCsrInBatchingModeWhenCommandWithEventIsFollowed
|
|||
|
||||
auto status = clWaitForEvents(1, &event);
|
||||
|
||||
EXPECT_EQ(CL_SUCCESS, status);
|
||||
EXPECT_EQ(1, neoEvent->getRefInternalCount());
|
||||
EXPECT_EQ(1u, mockCsr->flushStamp->peekStamp());
|
||||
EXPECT_EQ(1u, neoEvent->flushStamp->peekStamp());
|
||||
EXPECT_EQ(1u, pCmdQ->flushStamp->peekStamp());
|
||||
|
||||
status = clFinish(pCmdQ);
|
||||
EXPECT_EQ(CL_SUCCESS, status);
|
||||
EXPECT_EQ(1u, pCmdQ->flushStamp->peekStamp());
|
||||
|
||||
status = clReleaseEvent(event);
|
||||
EXPECT_EQ(CL_SUCCESS, status);
|
||||
}
|
||||
|
||||
HWTEST_F(EnqueueKernelTest, givenCsrInBatchingModeWhenClFlushIsCalledThenQueueFlushStampIsUpdated) {
|
||||
|
|
|
@ -425,10 +425,6 @@ HWTEST_F(DispatchFlagsTests, givenCommandComputeKernelWhenSubmitThenPassCorrectD
|
|||
bool flushDC = false;
|
||||
bool slmUsed = false;
|
||||
bool ndRangeKernel = false;
|
||||
bool requiresCoherency = false;
|
||||
for (auto &surface : surfaces) {
|
||||
requiresCoherency |= surface->IsCoherent;
|
||||
}
|
||||
std::unique_ptr<Command> command(new CommandComputeKernel(*mockCmdQ, kernelOperation, surfaces, flushDC, slmUsed, ndRangeKernel, nullptr, preemptionMode, kernel, 1));
|
||||
command->submit(20, false);
|
||||
|
||||
|
|
|
@ -58,7 +58,6 @@ void EncodeDispatchKernel<Family>::encode(CommandContainer &container,
|
|||
const HardwareInfo &hwInfo = args.device->getHardwareInfo();
|
||||
|
||||
LinearStream *listCmdBufferStream = container.getCommandStream();
|
||||
size_t sshOffset = 0;
|
||||
|
||||
auto threadDims = static_cast<const uint32_t *>(args.pThreadGroupDimensions);
|
||||
const Vec3<size_t> threadStartVec{0, 0, 0};
|
||||
|
@ -94,7 +93,6 @@ void EncodeDispatchKernel<Family>::encode(CommandContainer &container,
|
|||
container.prepareBindfulSsh();
|
||||
if (bindingTableStateCount > 0u) {
|
||||
auto ssh = container.getHeapWithRequiredSizeAndAlignment(HeapType::SURFACE_STATE, args.dispatchInterface->getSurfaceStateHeapDataSize(), BINDING_TABLE_STATE::SURFACESTATEPOINTER_ALIGN_SIZE);
|
||||
sshOffset = ssh->getUsed();
|
||||
bindingTablePointer = static_cast<uint32_t>(EncodeSurfaceState<Family>::pushBindingTableAndSurfaceStates(
|
||||
*ssh, bindingTableStateCount,
|
||||
args.dispatchInterface->getSurfaceStateHeapData(),
|
||||
|
|
|
@ -60,7 +60,6 @@ void EncodeDispatchKernel<Family>::encode(CommandContainer &container,
|
|||
auto pImplicitArgs = args.dispatchInterface->getImplicitArgs();
|
||||
|
||||
LinearStream *listCmdBufferStream = container.getCommandStream();
|
||||
size_t sshOffset = 0;
|
||||
|
||||
auto threadDims = static_cast<const uint32_t *>(args.pThreadGroupDimensions);
|
||||
const Vec3<size_t> threadStartVec{0, 0, 0};
|
||||
|
@ -114,7 +113,6 @@ void EncodeDispatchKernel<Family>::encode(CommandContainer &container,
|
|||
container.prepareBindfulSsh();
|
||||
if (bindingTableStateCount > 0u) {
|
||||
auto ssh = container.getHeapWithRequiredSizeAndAlignment(HeapType::SURFACE_STATE, args.dispatchInterface->getSurfaceStateHeapDataSize(), BINDING_TABLE_STATE::SURFACESTATEPOINTER_ALIGN_SIZE);
|
||||
sshOffset = ssh->getUsed();
|
||||
bindingTablePointer = static_cast<uint32_t>(EncodeSurfaceState<Family>::pushBindingTableAndSurfaceStates(
|
||||
*ssh, bindingTableStateCount,
|
||||
args.dispatchInterface->getSurfaceStateHeapData(),
|
||||
|
|
|
@ -32,7 +32,7 @@ constexpr bool isWhitespace(char c) {
|
|||
}
|
||||
|
||||
constexpr bool isLetter(char c) {
|
||||
return ((c >= 'a') & (c <= 'z')) | ((c >= 'A') & (c <= 'Z'));
|
||||
return ((c >= 'a') & (c <= 'z')) || ((c >= 'A') & (c <= 'Z'));
|
||||
}
|
||||
|
||||
constexpr bool isNumber(char c) {
|
||||
|
@ -40,19 +40,19 @@ constexpr bool isNumber(char c) {
|
|||
}
|
||||
|
||||
constexpr bool isAlphaNumeric(char c) {
|
||||
return isLetter(c) | isNumber(c);
|
||||
return isLetter(c) || isNumber(c);
|
||||
}
|
||||
|
||||
constexpr bool isNameIdentifierCharacter(char c) {
|
||||
return isAlphaNumeric(c) | ('_' == c);
|
||||
return isAlphaNumeric(c) || ('_' == c);
|
||||
}
|
||||
|
||||
constexpr bool isNameIdentifierBeginningCharacter(char c) {
|
||||
return isLetter(c) | ('_' == c);
|
||||
return isLetter(c) || ('_' == c);
|
||||
}
|
||||
|
||||
constexpr bool isSign(char c) {
|
||||
return ('+' == c) | ('-' == c);
|
||||
return ('+' == c) || ('-' == c);
|
||||
}
|
||||
|
||||
inline bool isSpecificNameIdentifier(ConstStringRef wholeText, const char *parsePos, ConstStringRef pattern) {
|
||||
|
@ -60,7 +60,7 @@ inline bool isSpecificNameIdentifier(ConstStringRef wholeText, const char *parse
|
|||
bool hasEnoughText = (reinterpret_cast<uintptr_t>(parsePos) + pattern.size() <= reinterpret_cast<uintptr_t>(wholeText.end()));
|
||||
bool isEnd = (reinterpret_cast<uintptr_t>(parsePos) + pattern.size() == reinterpret_cast<uintptr_t>(wholeText.end()));
|
||||
bool matched = hasEnoughText &&
|
||||
((pattern == ConstStringRef(parsePos, pattern.size())) & (isEnd || (false == isNameIdentifierCharacter(parsePos[pattern.size()]))));
|
||||
((pattern == ConstStringRef(parsePos, pattern.size())) && (isEnd || (false == isNameIdentifierCharacter(parsePos[pattern.size()]))));
|
||||
return matched;
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ constexpr const char *consumeNumberOrSign(ConstStringRef wholeText, const char *
|
|||
if (isNumber(*parsePos)) {
|
||||
auto it = parsePos + 1;
|
||||
while (it < parseEnd) {
|
||||
if (false == (isNumber(*it) | ('.' == *it))) {
|
||||
if (false == (isNumber(*it) || ('.' == *it))) {
|
||||
break;
|
||||
}
|
||||
++it;
|
||||
|
@ -644,7 +644,7 @@ inline bool YamlParser::readValueChecked<bool>(const Node &node, bool &outValue)
|
|||
case 1:
|
||||
return true;
|
||||
case 2:
|
||||
return ((token.cstrref()[1] == 'o') | (token.cstrref()[1] == 'O'));
|
||||
return ((token.cstrref()[1] == 'o') || (token.cstrref()[1] == 'O'));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -671,7 +671,7 @@ inline bool YamlParser::readValueChecked<bool>(const Node &node, bool &outValue)
|
|||
return false;
|
||||
case 2:
|
||||
outValue = true;
|
||||
return ((token.cstrref()[1] == 'n') | (token.cstrref()[1] == 'N'));
|
||||
return ((token.cstrref()[1] == 'n') || (token.cstrref()[1] == 'N'));
|
||||
case 3:
|
||||
outValue = false;
|
||||
return equalsCaseInsensitive(ConstStringRef("ff"), ConstStringRef(token.cstrref().begin() + 1, 2));
|
||||
|
|
|
@ -435,12 +435,10 @@ bool DirectSubmissionHw<GfxFamily, Dispatcher>::dispatchCommandBuffer(BatchBuffe
|
|||
size_t cycleSize = getSizeSwitchRingBufferSection();
|
||||
size_t requiredMinimalSize = dispatchSize + cycleSize + getSizeEnd();
|
||||
|
||||
bool buffersSwitched = false;
|
||||
getCommandBufferPositionGpuAddress(ringCommandStream.getSpace(0));
|
||||
|
||||
if (ringCommandStream.getAvailableSpace() < requiredMinimalSize) {
|
||||
switchRingBuffers();
|
||||
buffersSwitched = true;
|
||||
}
|
||||
|
||||
handleNewResourcesSubmission();
|
||||
|
|
|
@ -113,7 +113,7 @@ void populateKernelDescriptor(KernelDescriptor &dst, const SPatchKernelAttribute
|
|||
if (it != std::string::npos) {
|
||||
it += attributeReqdSubGroupSizeBeg.size();
|
||||
dst.kernelMetadata.requiredSubGroupSize = 0U;
|
||||
while ((attributes[it] >= '0') & (attributes[it] <= '9')) {
|
||||
while ((attributes[it] >= '0') && (attributes[it] <= '9')) {
|
||||
dst.kernelMetadata.requiredSubGroupSize *= 10;
|
||||
dst.kernelMetadata.requiredSubGroupSize += attributes[it] - '0';
|
||||
++it;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2021 Intel Corporation
|
||||
* Copyright (C) 2019-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
|
@ -235,7 +235,7 @@ constexpr bool equalsCaseInsensitive(const ConstStringRef &lhs, const ConstStrin
|
|||
constexpr auto caseDiff = 'a' - 'A';
|
||||
for (size_t i = 0, e = lhs.size(); i < e; ++i) {
|
||||
|
||||
if ((lhs[i] != rhs[i]) & (lhs[i] + caseDiff != rhs[i]) & (lhs[i] != rhs[i] + caseDiff)) {
|
||||
if ((lhs[i] != rhs[i]) && (lhs[i] + caseDiff != rhs[i]) && (lhs[i] != rhs[i] + caseDiff)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2020-2021 Intel Corporation
|
||||
* Copyright (C) 2020-2022 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
|
@ -205,7 +205,7 @@ TEST(YamlConsumeNumberOrSign, GivenInvalidCharacterThenReturnCurrentParsePositio
|
|||
EXPECT_EQ(plusPlusSeven.begin() + 1, NEO::Yaml::consumeNumberOrSign(plusPlusSeven, plusPlusSeven.begin()));
|
||||
|
||||
for (int c = std::numeric_limits<char>::min(); c <= std::numeric_limits<char>::max(); ++c) {
|
||||
bool isSignOrNumber = NEO::Yaml::isSign(static_cast<char>(c)) | NEO::Yaml::isNumber(static_cast<char>(c));
|
||||
bool isSignOrNumber = NEO::Yaml::isSign(static_cast<char>(c)) || NEO::Yaml::isNumber(static_cast<char>(c));
|
||||
char numberStr[] = {static_cast<char>(c), '\0'};
|
||||
auto expected = numberStr + (isSignOrNumber ? 1 : 0);
|
||||
EXPECT_EQ(expected, NEO::Yaml::consumeNumberOrSign(ConstStringRef::fromArray(numberStr), numberStr)) << c;
|
||||
|
|
Loading…
Reference in New Issue