refactor: correct variable naming

Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
Mateusz Jablonski
2023-11-30 13:33:54 +00:00
committed by Compute-Runtime-Automation
parent d25b3ebcdc
commit b92b5892b8
17 changed files with 150 additions and 152 deletions

View File

@@ -43,14 +43,14 @@ constexpr uint16_t vF1Hbm2ReadIndex = 344;
constexpr uint16_t vF1Hbm2WriteIndex = 348;
constexpr uint16_t vF1Hbm3ReadIndex = 360;
constexpr uint16_t vF1Hbm3WriteIndex = 364;
constexpr uint32_t VF0HbmLRead = 16;
constexpr uint32_t VF0HbmHRead = 2;
constexpr uint32_t VF0HbmLWrite = 8;
constexpr uint32_t VF0HbmHWrite = 2;
constexpr uint32_t VF1HbmLRead = 16;
constexpr uint32_t VF1HbmHRead = 2;
constexpr uint32_t VF1HbmLWrite = 8;
constexpr uint32_t VF1HbmHWrite = 2;
constexpr uint32_t vF0HbmLRead = 16;
constexpr uint32_t vF0HbmHRead = 2;
constexpr uint32_t vF0HbmLWrite = 8;
constexpr uint32_t vF0HbmHWrite = 2;
constexpr uint32_t vF1HbmLRead = 16;
constexpr uint32_t vF1HbmHRead = 2;
constexpr uint32_t vF1HbmLWrite = 8;
constexpr uint32_t vF1HbmHWrite = 2;
constexpr uint8_t vF0VfidValue = 1;
constexpr uint8_t vF0Hbm0ReadValue = 92;
@@ -317,13 +317,13 @@ struct MockMemoryPmt : public L0::Sysman::PlatformMonitoringTech {
} else if (key.compare("VF0_HBM3_WRITE") == 0) {
val = vF0Hbm3WriteValue;
} else if (key.compare("VF0_HBM_READ_L") == 0) {
val = VF0HbmLRead;
val = vF0HbmLRead;
} else if (key.compare("VF0_HBM_READ_H") == 0) {
val = VF0HbmHRead;
val = vF0HbmHRead;
} else if (key.compare("VF0_HBM_WRITE_L") == 0) {
val = VF0HbmLWrite;
val = vF0HbmLWrite;
} else if (key.compare("VF0_HBM_WRITE_H") == 0) {
val = VF0HbmHWrite;
val = vF0HbmHWrite;
} else {
return ZE_RESULT_ERROR_NOT_AVAILABLE;
}
@@ -356,13 +356,13 @@ struct MockMemoryPmt : public L0::Sysman::PlatformMonitoringTech {
} else if (key.compare("VF1_HBM3_WRITE") == 0) {
val = vF1Hbm3WriteValue;
} else if (key.compare("VF1_HBM_READ_L") == 0) {
val = VF1HbmLRead;
val = vF1HbmLRead;
} else if (key.compare("VF1_HBM_READ_H") == 0) {
val = VF1HbmHRead;
val = vF1HbmHRead;
} else if (key.compare("VF1_HBM_WRITE_L") == 0) {
val = VF1HbmLWrite;
val = vF1HbmLWrite;
} else if (key.compare("VF1_HBM_WRITE_H") == 0) {
val = VF1HbmHWrite;
val = vF1HbmHWrite;
} else {
return ZE_RESULT_ERROR_NOT_AVAILABLE;
}

View File

@@ -489,12 +489,12 @@ HWTEST2_F(SysmanDeviceMemoryFixture, GivenValidMemoryHandleWhenCallingzesSysmanM
pSysfsAccess->mockReadReturnStatus.push_back(ZE_RESULT_SUCCESS);
EXPECT_EQ(zesMemoryGetBandwidth(handle, &bandwidth), ZE_RESULT_SUCCESS);
expectedReadCounters |= VF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | VF0HbmLRead;
expectedReadCounters |= vF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | vF0HbmLRead;
expectedReadCounters = expectedReadCounters * transactionSize;
EXPECT_EQ(bandwidth.readCounter, expectedReadCounters);
expectedWriteCounters |= VF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | VF0HbmLWrite;
expectedWriteCounters |= vF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | vF0HbmLWrite;
expectedWriteCounters = expectedWriteCounters * transactionSize;
EXPECT_EQ(bandwidth.writeCounter, expectedWriteCounters);
expectedBandwidth = 128 * hbmRP0Frequency * 1000 * 1000 * 4;
@@ -525,12 +525,12 @@ HWTEST2_F(SysmanDeviceMemoryFixture, GivenValidMemoryHandleWhenCallingzesSysmanM
pSysfsAccess->mockReadReturnStatus.push_back(ZE_RESULT_SUCCESS);
EXPECT_EQ(zesMemoryGetBandwidth(handle, &bandwidth), ZE_RESULT_SUCCESS);
expectedReadCounters |= VF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | VF0HbmLRead;
expectedReadCounters |= vF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | vF0HbmLRead;
expectedReadCounters = expectedReadCounters * transactionSize;
EXPECT_EQ(bandwidth.readCounter, expectedReadCounters);
expectedWriteCounters |= VF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | VF0HbmLWrite;
expectedWriteCounters |= vF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | vF0HbmLWrite;
expectedWriteCounters = expectedWriteCounters * transactionSize;
EXPECT_EQ(bandwidth.writeCounter, expectedWriteCounters);
expectedBandwidth = 128 * hbmRP0Frequency * 1000 * 1000 * 4;

View File

@@ -55,7 +55,6 @@ constexpr uint64_t socFatalMdfiEastCount = 3u;
constexpr uint64_t socFatalMdfiWestCountTile1 = 0u;
constexpr uint64_t socFatalPunitTile1 = 3u;
constexpr uint64_t fatalFpuTile0 = 1u;
constexpr uint64_t FatalL3FabricTile0 = 4u;
constexpr uint64_t euAttention = 10u;
constexpr uint64_t euAttentionTile0 = 5u;
constexpr uint64_t euAttentionTile1 = 2u;

View File

@@ -86,8 +86,8 @@ struct DebugSessionImp : DebugSession {
virtual ze_result_t readGpuMemory(uint64_t memoryHandle, char *output, size_t size, uint64_t gpuVa) = 0;
virtual ze_result_t writeGpuMemory(uint64_t memoryHandle, const char *input, size_t size, uint64_t gpuVa) = 0;
template <class bufferType, bool write>
ze_result_t slmMemoryAccess(EuThread::ThreadId threadId, const zet_debug_memory_space_desc_t *desc, size_t size, bufferType buffer);
template <class BufferType, bool write>
ze_result_t slmMemoryAccess(EuThread::ThreadId threadId, const zet_debug_memory_space_desc_t *desc, size_t size, BufferType buffer);
ze_result_t validateThreadAndDescForMemoryAccess(ze_device_thread_t thread, const zet_debug_memory_space_desc_t *desc);
@@ -186,8 +186,8 @@ struct DebugSessionImp : DebugSession {
uint32_t maxUnitsPerLoop = EXCHANGE_BUFFER_SIZE / slmSendBytesSize;
};
template <class bufferType, bool write>
ze_result_t DebugSessionImp::slmMemoryAccess(EuThread::ThreadId threadId, const zet_debug_memory_space_desc_t *desc, size_t size, bufferType buffer) {
template <class BufferType, bool write>
ze_result_t DebugSessionImp::slmMemoryAccess(EuThread::ThreadId threadId, const zet_debug_memory_space_desc_t *desc, size_t size, BufferType buffer) {
ze_result_t status;
if (!sipSupportsSlm) {

View File

@@ -7881,7 +7881,7 @@ TEST_F(DebugApiLinuxMultitileTest, givenApiThreadAndMultipleTilesWhenGettingDevi
EXPECT_EQ(1u, deviceIndex);
}
template <bool BlockOnFence = false>
template <bool blockOnFence = false>
struct DebugApiLinuxMultiDeviceVmBindFixture : public DebugApiLinuxMultiDeviceFixture, public MockDebugSessionLinuxi915Helper {
void setUp() {
DebugApiLinuxMultiDeviceFixture::setUp();
@@ -7896,7 +7896,7 @@ struct DebugApiLinuxMultiDeviceVmBindFixture : public DebugApiLinuxMultiDeviceFi
handler = new MockIoctlHandler;
session->ioctlHandler.reset(handler);
session->blockOnFenceMode = BlockOnFence;
session->blockOnFenceMode = blockOnFence;
setupSessionClassHandlesAndUuidMap(session.get());
setupVmToTile(session.get());
}

View File

@@ -20,14 +20,14 @@
using namespace NEO;
constexpr uint32_t VF0HbmLRead = 16;
constexpr uint32_t VF0HbmHRead = 2;
constexpr uint32_t VF0HbmLWrite = 8;
constexpr uint32_t VF0HbmHWrite = 2;
constexpr uint32_t VF1HbmLRead = 16;
constexpr uint32_t VF1HbmHRead = 2;
constexpr uint32_t VF1HbmLWrite = 8;
constexpr uint32_t VF1HbmHWrite = 2;
constexpr uint32_t vF0HbmLRead = 16;
constexpr uint32_t vF0HbmHRead = 2;
constexpr uint32_t vF0HbmLWrite = 8;
constexpr uint32_t vF0HbmHWrite = 2;
constexpr uint32_t vF1HbmLRead = 16;
constexpr uint32_t vF1HbmHRead = 2;
constexpr uint32_t vF1HbmLWrite = 8;
constexpr uint32_t vF1HbmHWrite = 2;
constexpr uint16_t vF0VfidIndex = 88;
constexpr uint16_t vF0Hbm0ReadIndex = 92;
constexpr uint16_t vF0Hbm0WriteIndex = 96;
@@ -313,13 +313,13 @@ struct MockMemoryPmt : public PlatformMonitoringTech {
} else if (key.compare("VF0_HBM3_WRITE") == 0) {
val = vF0Hbm3WriteValue;
} else if (key.compare("VF0_HBM_READ_L") == 0) {
val = VF0HbmLRead;
val = vF0HbmLRead;
} else if (key.compare("VF0_HBM_READ_H") == 0) {
val = VF0HbmHRead;
val = vF0HbmHRead;
} else if (key.compare("VF0_HBM_WRITE_L") == 0) {
val = VF0HbmLWrite;
val = vF0HbmLWrite;
} else if (key.compare("VF0_HBM_WRITE_H") == 0) {
val = VF0HbmHWrite;
val = vF0HbmHWrite;
} else {
return ZE_RESULT_ERROR_NOT_AVAILABLE;
}
@@ -352,13 +352,13 @@ struct MockMemoryPmt : public PlatformMonitoringTech {
} else if (key.compare("VF1_HBM3_WRITE") == 0) {
val = vF1Hbm3WriteValue;
} else if (key.compare("VF1_HBM_READ_L") == 0) {
val = VF1HbmLRead;
val = vF1HbmLRead;
} else if (key.compare("VF1_HBM_READ_H") == 0) {
val = VF1HbmHRead;
val = vF1HbmHRead;
} else if (key.compare("VF1_HBM_WRITE_L") == 0) {
val = VF1HbmLWrite;
val = vF1HbmLWrite;
} else if (key.compare("VF1_HBM_WRITE_H") == 0) {
val = VF1HbmHWrite;
val = vF1HbmHWrite;
} else {
return ZE_RESULT_ERROR_NOT_AVAILABLE;
}

View File

@@ -525,12 +525,12 @@ HWTEST2_F(SysmanDeviceMemoryFixture, GivenValidMemoryHandleWhenCallingzesSysmanM
pSysfsAccess->mockReadReturnStatus.push_back(ZE_RESULT_SUCCESS);
EXPECT_EQ(zesMemoryGetBandwidth(handle, &bandwidth), ZE_RESULT_SUCCESS);
expectedReadCounters |= VF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | VF0HbmLRead;
expectedReadCounters |= vF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | vF0HbmLRead;
expectedReadCounters = expectedReadCounters * transactionSize;
EXPECT_EQ(bandwidth.readCounter, expectedReadCounters);
expectedWriteCounters |= VF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | VF0HbmLWrite;
expectedWriteCounters |= vF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | vF0HbmLWrite;
expectedWriteCounters = expectedWriteCounters * transactionSize;
EXPECT_EQ(bandwidth.writeCounter, expectedWriteCounters);
expectedBandwidth = 128 * hbmRP0Frequency * 1000 * 1000 * 4;
@@ -560,12 +560,12 @@ HWTEST2_F(SysmanDeviceMemoryFixture, GivenValidMemoryHandleWhenCallingzesSysmanM
pSysfsAccess->mockReadReturnStatus.push_back(ZE_RESULT_SUCCESS);
EXPECT_EQ(zesMemoryGetBandwidth(handle, &bandwidth), ZE_RESULT_SUCCESS);
expectedReadCounters |= VF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | VF0HbmLRead;
expectedReadCounters |= vF0HbmHRead;
expectedReadCounters = (expectedReadCounters << 32) | vF0HbmLRead;
expectedReadCounters = expectedReadCounters * transactionSize;
EXPECT_EQ(bandwidth.readCounter, expectedReadCounters);
expectedWriteCounters |= VF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | VF0HbmLWrite;
expectedWriteCounters |= vF0HbmHWrite;
expectedWriteCounters = (expectedWriteCounters << 32) | vF0HbmLWrite;
expectedWriteCounters = expectedWriteCounters * transactionSize;
EXPECT_EQ(bandwidth.writeCounter, expectedWriteCounters);
expectedBandwidth = 128 * hbmRP0Frequency * 1000 * 1000 * 4;

View File

@@ -52,7 +52,6 @@ constexpr uint64_t socFatalMdfiEastCount = 3u;
constexpr uint64_t socFatalMdfiWestCountTile1 = 0u;
constexpr uint64_t socFatalPunitTile1 = 3u;
constexpr uint64_t fatalFpuTile0 = 1u;
constexpr uint64_t FatalL3FabricTile0 = 4u;
constexpr uint64_t euAttention = 10u;
constexpr uint64_t euAttentionTile0 = 5u;
constexpr uint64_t euAttentionTile1 = 2u;

View File

@@ -60,7 +60,7 @@ static constexpr uint32_t powConst(uint32_t base, uint32_t currExp) {
return (currExp == 1) ? base : base * powConst(base, currExp - 1);
}
template <SplitDispatch::Dim Dim, SplitDispatch::SplitMode Mode>
template <SplitDispatch::Dim dim, SplitDispatch::SplitMode mode>
class DispatchInfoBuilder {
public:
DispatchInfoBuilder(ClDevice &clDevice) {
@@ -100,19 +100,19 @@ class DispatchInfoBuilder {
}
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d1D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d1D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setArgSvm(SplitDispatch::RegionCoordX x, ArgsT &&...args) {
dispatchInfos[getDispatchId(x)].getKernel()->setArgSvm(std::forward<ArgsT>(args)...);
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d2D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d2D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setArgSvm(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, ArgsT &&...args) {
dispatchInfos[getDispatchId(x, y)].getKernel()->setArgSvm(std::forward<ArgsT>(args)...);
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d3D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d3D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setArgSvm(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, SplitDispatch::RegionCoordZ z, ArgsT &&...args) {
dispatchInfos[getDispatchId(x, y, z)].getKernel()->setArgSvm(std::forward<ArgsT>(args)...);
}
@@ -131,46 +131,46 @@ class DispatchInfoBuilder {
return result;
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d1D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d1D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setArg(SplitDispatch::RegionCoordX x, ArgsT &&...args) {
dispatchInfos[getDispatchId(x)].getKernel()->setArg(std::forward<ArgsT>(args)...);
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d2D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d2D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setArg(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, ArgsT &&...args) {
dispatchInfos[getDispatchId(x, y)].getKernel()->setArg(std::forward<ArgsT>(args)...);
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d3D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d3D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setArg(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, SplitDispatch::RegionCoordZ z, ArgsT &&...args) {
dispatchInfos[getDispatchId(x, y, z)].getKernel()->setArg(std::forward<ArgsT>(args)...);
}
template <SplitDispatch::Dim D = Dim>
typename std::enable_if<(D == SplitDispatch::Dim::d1D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim>
typename std::enable_if<(d == SplitDispatch::Dim::d1D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setKernel(SplitDispatch::RegionCoordX x, Kernel *kern) {
dispatchInfos[getDispatchId(x)].setKernel(kern);
}
template <SplitDispatch::Dim D = Dim>
typename std::enable_if<(D == SplitDispatch::Dim::d2D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim>
typename std::enable_if<(d == SplitDispatch::Dim::d2D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setKernel(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, Kernel *kern) {
dispatchInfos[getDispatchId(x, y)].setKernel(kern);
}
template <SplitDispatch::Dim D = Dim>
typename std::enable_if<(D == SplitDispatch::Dim::d3D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim>
typename std::enable_if<(d == SplitDispatch::Dim::d3D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setKernel(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, SplitDispatch::RegionCoordZ z, Kernel *kern) {
dispatchInfos[getDispatchId(x, y, z)].setKernel(kern);
}
template <SplitDispatch::SplitMode M = Mode>
typename std::enable_if<(M == SplitDispatch::SplitMode::NoSplit) || (M == SplitDispatch::SplitMode::WalkerSplit), void>::type
setDispatchGeometry(const uint32_t dim, const Vec3<size_t> &gws, const Vec3<size_t> &elws, const Vec3<size_t> &offset, const Vec3<size_t> &agws = {0, 0, 0}, const Vec3<size_t> &lws = {0, 0, 0}, const Vec3<size_t> &twgs = {0, 0, 0}, const Vec3<size_t> &nwgs = {0, 0, 0}, const Vec3<size_t> &swgs = {0, 0, 0}) {
template <SplitDispatch::SplitMode m = mode>
typename std::enable_if<(m == SplitDispatch::SplitMode::NoSplit) || (m == SplitDispatch::SplitMode::WalkerSplit), void>::type
setDispatchGeometry(const uint32_t inputDim, const Vec3<size_t> &gws, const Vec3<size_t> &elws, const Vec3<size_t> &offset, const Vec3<size_t> &agws = {0, 0, 0}, const Vec3<size_t> &lws = {0, 0, 0}, const Vec3<size_t> &twgs = {0, 0, 0}, const Vec3<size_t> &nwgs = {0, 0, 0}, const Vec3<size_t> &swgs = {0, 0, 0}) {
auto &dispatchInfo = dispatchInfos[0];
DEBUG_BREAK_IF(dim > static_cast<uint32_t>(Dim) + 1);
dispatchInfo.setDim(dim);
DEBUG_BREAK_IF(inputDim > static_cast<uint32_t>(dim) + 1);
dispatchInfo.setDim(inputDim);
dispatchInfo.setGWS(gws);
dispatchInfo.setEnqueuedWorkgroupSize(elws);
dispatchInfo.setOffsets(offset);
@@ -181,11 +181,11 @@ class DispatchInfoBuilder {
dispatchInfo.setStartOfWorkgroups(swgs);
}
template <SplitDispatch::SplitMode M = Mode>
typename std::enable_if<(M == SplitDispatch::SplitMode::NoSplit) || (M == SplitDispatch::SplitMode::WalkerSplit), void>::type
template <SplitDispatch::SplitMode m = mode>
typename std::enable_if<(m == SplitDispatch::SplitMode::NoSplit) || (m == SplitDispatch::SplitMode::WalkerSplit), void>::type
setDispatchGeometry(const Vec3<size_t> &gws, const Vec3<size_t> &elws, const Vec3<size_t> &offset, const Vec3<size_t> &agws = {0, 0, 0}, const Vec3<size_t> &lws = {0, 0, 0}, const Vec3<size_t> &twgs = {0, 0, 0}, const Vec3<size_t> &nwgs = {0, 0, 0}, const Vec3<size_t> &swgs = {0, 0, 0}) {
auto &dispatchInfo = dispatchInfos[0];
dispatchInfo.setDim(static_cast<uint32_t>(Dim) + 1);
dispatchInfo.setDim(static_cast<uint32_t>(dim) + 1);
dispatchInfo.setGWS(gws);
dispatchInfo.setEnqueuedWorkgroupSize(elws);
dispatchInfo.setOffsets(offset);
@@ -196,12 +196,12 @@ class DispatchInfoBuilder {
dispatchInfo.setStartOfWorkgroups(swgs);
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d1D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d1D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setDispatchGeometry(SplitDispatch::RegionCoordX x,
const Vec3<size_t> &gws, const Vec3<size_t> &elws, const Vec3<size_t> &offset, const Vec3<size_t> &agws = {0, 0, 0}, const Vec3<size_t> &lws = {0, 0, 0}, const Vec3<size_t> &twgs = {0, 0, 0}, const Vec3<size_t> &nwgs = {0, 0, 0}, const Vec3<size_t> &swgs = {0, 0, 0}) {
auto &dispatchInfo = dispatchInfos[getDispatchId(x)];
dispatchInfo.setDim(static_cast<uint32_t>(Dim) + 1);
dispatchInfo.setDim(static_cast<uint32_t>(dim) + 1);
dispatchInfo.setGWS(gws);
dispatchInfo.setEnqueuedWorkgroupSize(elws);
dispatchInfo.setOffsets(offset);
@@ -212,12 +212,12 @@ class DispatchInfoBuilder {
dispatchInfo.setStartOfWorkgroups(swgs);
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d2D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d2D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setDispatchGeometry(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y,
const Vec3<size_t> &gws, const Vec3<size_t> &elws, const Vec3<size_t> &offset, const Vec3<size_t> &agws = {0, 0, 0}, const Vec3<size_t> lws = {0, 0, 0}, const Vec3<size_t> &twgs = {0, 0, 0}, const Vec3<size_t> &nwgs = {0, 0, 0}, const Vec3<size_t> &swgs = {0, 0, 0}) {
auto &dispatchInfo = dispatchInfos[getDispatchId(x, y)];
dispatchInfo.setDim(static_cast<uint32_t>(Dim) + 1);
dispatchInfo.setDim(static_cast<uint32_t>(dim) + 1);
dispatchInfo.setGWS(gws);
dispatchInfo.setEnqueuedWorkgroupSize(elws);
dispatchInfo.setOffsets(offset);
@@ -228,12 +228,12 @@ class DispatchInfoBuilder {
dispatchInfo.setStartOfWorkgroups(swgs);
}
template <SplitDispatch::Dim D = Dim, typename... ArgsT>
typename std::enable_if<(D == SplitDispatch::Dim::d3D) && (Mode != SplitDispatch::SplitMode::NoSplit), void>::type
template <SplitDispatch::Dim d = dim, typename... ArgsT>
typename std::enable_if<(d == SplitDispatch::Dim::d3D) && (mode != SplitDispatch::SplitMode::NoSplit), void>::type
setDispatchGeometry(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, SplitDispatch::RegionCoordZ z,
const Vec3<size_t> &gws, const Vec3<size_t> &elws, const Vec3<size_t> &offset, const Vec3<size_t> &agws = {0, 0, 0}, const Vec3<size_t> &lws = {0, 0, 0}, const Vec3<size_t> &twgs = {0, 0, 0}, const Vec3<size_t> &nwgs = {0, 0, 0}, const Vec3<size_t> &swgs = {0, 0, 0}) {
auto &dispatchInfo = dispatchInfos[getDispatchId(x, y, z)];
dispatchInfo.setDim(static_cast<uint32_t>(Dim) + 1);
dispatchInfo.setDim(static_cast<uint32_t>(dim) + 1);
dispatchInfo.setGWS(gws);
dispatchInfo.setEnqueuedWorkgroupSize(elws);
dispatchInfo.setOffsets(offset);
@@ -304,7 +304,7 @@ class DispatchInfoBuilder {
protected:
static bool supportsSplit() {
return (Mode == SplitDispatch::SplitMode::WalkerSplit);
return (mode == SplitDispatch::SplitMode::WalkerSplit);
}
static bool needsSplit(const DispatchInfo &dispatchInfo) {
@@ -432,19 +432,19 @@ class DispatchInfoBuilder {
}
static constexpr uint32_t getDispatchId(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y, SplitDispatch::RegionCoordZ z) {
return static_cast<uint32_t>(x) + static_cast<uint32_t>(y) * (static_cast<uint32_t>(Mode) + 1) + static_cast<uint32_t>(z) * (static_cast<uint32_t>(Mode) + 1) * (static_cast<uint32_t>(Mode) + 1);
return static_cast<uint32_t>(x) + static_cast<uint32_t>(y) * (static_cast<uint32_t>(mode) + 1) + static_cast<uint32_t>(z) * (static_cast<uint32_t>(mode) + 1) * (static_cast<uint32_t>(mode) + 1);
}
static constexpr uint32_t getDispatchId(SplitDispatch::RegionCoordX x, SplitDispatch::RegionCoordY y) {
return static_cast<uint32_t>(x) + static_cast<uint32_t>(y) * (static_cast<uint32_t>(Mode) + 1);
return static_cast<uint32_t>(x) + static_cast<uint32_t>(y) * (static_cast<uint32_t>(mode) + 1);
}
static constexpr uint32_t getDispatchId(SplitDispatch::RegionCoordX x) {
return static_cast<uint32_t>(x);
}
static const size_t numDispatches = (Mode == SplitDispatch::SplitMode::WalkerSplit) ? 1 : powConst((static_cast<uint32_t>(Mode) + 1), // 1 (middle) 2 (middle + right/bottom) or 3 (lef/top + middle + right/mottom)
(static_cast<uint32_t>(Dim) + 1)); // 1, 2 or 3
static const size_t numDispatches = (mode == SplitDispatch::SplitMode::WalkerSplit) ? 1 : powConst((static_cast<uint32_t>(mode) + 1), // 1 (middle) 2 (middle + right/bottom) or 3 (lef/top + middle + right/mottom)
(static_cast<uint32_t>(dim) + 1)); // 1, 2 or 3
DispatchInfo dispatchInfos[numDispatches];
@@ -457,8 +457,8 @@ class DispatchInfoBuilder {
return x % y ? 1 : 0;
}
static bool isWorkSizeValid(uint32_t dim, const Vec3<size_t> &workSize) {
switch (dim) {
static bool isWorkSizeValid(uint32_t inputDim, const Vec3<size_t> &workSize) {
switch (inputDim) {
case 1:
return workSize.x > 0;
case 2:

View File

@@ -283,7 +283,7 @@ TEST_P(GetPlatformInfoTests, GivenValidParamWhenGettingPlatformInfoStringThenNon
delete[] paramValue;
}
const cl_platform_info PlatformInfoTestValues[] =
const cl_platform_info platformInfoTestValues[] =
{
CL_PLATFORM_PROFILE,
CL_PLATFORM_VERSION,
@@ -295,5 +295,5 @@ const cl_platform_info PlatformInfoTestValues[] =
INSTANTIATE_TEST_CASE_P(api,
GetPlatformInfoTests,
::testing::ValuesIn(PlatformInfoTestValues));
::testing::ValuesIn(platformInfoTestValues));
} // namespace ULT

View File

@@ -49,9 +49,9 @@ TEST(GlArbSyncEvent, whenCreateArbSyncEventNameIsCalledMultipleTimesThenEachCall
NEO::destroyArbSyncEventName(name3);
}
template <bool SignalWaited>
template <bool signalWaited>
inline void glArbSyncObjectWaitServerMock(NEO::OSInterface &osInterface, CL_GL_SYNC_INFO &glSyncInfo) {
glSyncInfo.waitCalled = SignalWaited;
glSyncInfo.waitCalled = signalWaited;
}
struct MockBaseEvent : Event {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2019-2022 Intel Corporation
* Copyright (C) 2019-2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@@ -18,11 +18,11 @@
using FullyEnabledClFileLogger = NEO::ClFileLogger<DebugFunctionalityLevel::Full>;
using FullyDisabledClFileLogger = NEO::ClFileLogger<DebugFunctionalityLevel::None>;
template <bool DebugFunctionality>
class TestLoggerApiEnterWrapper : public NEO::LoggerApiEnterWrapper<DebugFunctionality> {
template <bool debugFunctionality>
class TestLoggerApiEnterWrapper : public NEO::LoggerApiEnterWrapper<debugFunctionality> {
public:
TestLoggerApiEnterWrapper(const char *functionName, int *errCode) : NEO::LoggerApiEnterWrapper<DebugFunctionality>(functionName, errCode) {
if (DebugFunctionality) {
TestLoggerApiEnterWrapper(const char *functionName, int *errCode) : NEO::LoggerApiEnterWrapper<debugFunctionality>(functionName, errCode) {
if (debugFunctionality) {
loggedEnter = true;
}
}

View File

@@ -11,16 +11,16 @@
namespace NEO {
template <PRODUCT_FAMILY Product>
inline void AILConfigurationHw<Product>::modifyKernelIfRequired(std::string &kernel) {
template <PRODUCT_FAMILY product>
inline void AILConfigurationHw<product>::modifyKernelIfRequired(std::string &kernel) {
}
// To avoid a known oneDNN issue in ZEBin handling,
// fall back to legacy (patchtoken) format when dummy kernel used by nGen is detected.
// Only this specific kernel with that exact source code will be affected.
template <PRODUCT_FAMILY Product>
inline bool AILConfigurationHw<Product>::isFallbackToPatchtokensRequired(const std::string &kernelSources) {
template <PRODUCT_FAMILY product>
inline bool AILConfigurationHw<product>::isFallbackToPatchtokensRequired(const std::string &kernelSources) {
std::string_view dummyKernelSource{"kernel void _(){}"};
if (sourcesContain(kernelSources, dummyKernelSource)) {
return true;
@@ -36,17 +36,17 @@ inline bool AILConfigurationHw<Product>::isFallbackToPatchtokensRequired(const s
return false;
}
template <PRODUCT_FAMILY Product>
inline void AILConfigurationHw<Product>::applyExt(RuntimeCapabilityTable &runtimeCapabilityTable) {
template <PRODUCT_FAMILY product>
inline void AILConfigurationHw<product>::applyExt(RuntimeCapabilityTable &runtimeCapabilityTable) {
}
template <PRODUCT_FAMILY Product>
inline bool AILConfigurationHw<Product>::isContextSyncFlagRequired() {
template <PRODUCT_FAMILY product>
inline bool AILConfigurationHw<product>::isContextSyncFlagRequired() {
return false;
}
template <PRODUCT_FAMILY Product>
inline bool AILConfigurationHw<Product>::useLegacyValidationLogic() {
template <PRODUCT_FAMILY product>
inline bool AILConfigurationHw<product>::useLegacyValidationLogic() {
return false;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2020 Intel Corporation
* Copyright (C) 2020-2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@@ -32,10 +32,10 @@ inline bool isAr(const ArrayRef<const uint8_t> binary) {
return NEO::hasSameMagic(arMagic, binary);
}
template <uint32_t MaxLength>
template <uint32_t maxLength>
inline uint64_t readDecimal(const char *decimalAsString) {
uint64_t ret = 0U;
for (uint32_t i = 0; i < MaxLength; ++i) {
for (uint32_t i = 0; i < maxLength; ++i) {
if (('\0' == decimalAsString[i]) || (' ' == decimalAsString[i])) {
break;
}
@@ -57,9 +57,9 @@ inline bool isStringPadding(char character) {
}
}
template <uint32_t MaxLength>
template <uint32_t maxLength>
inline ConstStringRef readUnpaddedString(const char *paddedString) {
uint32_t unpaddedSize = MaxLength - 1;
uint32_t unpaddedSize = maxLength - 1;
for (; unpaddedSize > 0U; --unpaddedSize) {
if (false == isStringPadding(paddedString[unpaddedSize])) {
break;

View File

@@ -649,44 +649,44 @@ uint64_t GfxCoreHelperHw<GfxFamily>::getPatIndex(CacheRegion cacheRegion, CacheP
return -1;
}
template <typename gfxProduct>
bool GfxCoreHelperHw<gfxProduct>::copyThroughLockedPtrEnabled(const HardwareInfo &hwInfo, const ProductHelper &productHelper) const {
template <typename GfxFamily>
bool GfxCoreHelperHw<GfxFamily>::copyThroughLockedPtrEnabled(const HardwareInfo &hwInfo, const ProductHelper &productHelper) const {
if (debugManager.flags.ExperimentalCopyThroughLock.get() != -1) {
return debugManager.flags.ExperimentalCopyThroughLock.get() == 1;
}
return false;
}
template <typename gfxProduct>
uint32_t GfxCoreHelperHw<gfxProduct>::getAmountOfAllocationsToFill() const {
template <typename GfxFamily>
uint32_t GfxCoreHelperHw<GfxFamily>::getAmountOfAllocationsToFill() const {
if (debugManager.flags.SetAmountOfReusableAllocations.get() != -1) {
return debugManager.flags.SetAmountOfReusableAllocations.get();
}
return 0u;
}
template <typename gfxProduct>
bool GfxCoreHelperHw<gfxProduct>::isChipsetUniqueUUIDSupported() const {
template <typename GfxFamily>
bool GfxCoreHelperHw<GfxFamily>::isChipsetUniqueUUIDSupported() const {
return false;
}
template <typename gfxProduct>
bool GfxCoreHelperHw<gfxProduct>::largeGrfModeSupported() const {
template <typename GfxFamily>
bool GfxCoreHelperHw<GfxFamily>::largeGrfModeSupported() const {
return false;
}
template <typename gfxProduct>
bool GfxCoreHelperHw<gfxProduct>::isTimestampShiftRequired() const {
template <typename GfxFamily>
bool GfxCoreHelperHw<GfxFamily>::isTimestampShiftRequired() const {
return true;
}
template <typename gfxProduct>
bool GfxCoreHelperHw<gfxProduct>::isRelaxedOrderingSupported() const {
template <typename GfxFamily>
bool GfxCoreHelperHw<GfxFamily>::isRelaxedOrderingSupported() const {
return false;
}
template <typename gfxProduct>
uint32_t GfxCoreHelperHw<gfxProduct>::overrideMaxWorkGroupSize(uint32_t maxWG) const {
template <typename GfxFamily>
uint32_t GfxCoreHelperHw<GfxFamily>::overrideMaxWorkGroupSize(uint32_t maxWG) const {
return std::min(maxWG, 1024u);
}

View File

@@ -14,16 +14,16 @@ namespace NEO {
namespace HwWalkOrderHelper {
// make sure table below matches Hardware Spec
inline constexpr uint32_t walkOrderPossibilties = 6u;
inline constexpr uint8_t X = 0;
inline constexpr uint8_t Y = 1;
inline constexpr uint8_t Z = 2;
inline constexpr std::array<uint8_t, 3> linearWalk = {X, Y, Z};
inline constexpr std::array<uint8_t, 3> yOrderWalk = {Y, X, Z};
inline constexpr uint8_t x = 0;
inline constexpr uint8_t y = 1;
inline constexpr uint8_t z = 2;
inline constexpr std::array<uint8_t, 3> linearWalk = {x, y, z};
inline constexpr std::array<uint8_t, 3> yOrderWalk = {y, x, z};
inline constexpr std::array<uint8_t, 3> compatibleDimensionOrders[walkOrderPossibilties] = {linearWalk, // 0 1 2
{X, Z, Y}, // 0 2 1
{x, z, y}, // 0 2 1
yOrderWalk, // 1 0 2
{Z, X, Y}, // 1 2 0
{Y, Z, X}, // 2 0 1
{Z, Y, X}}; // 2 1 0
{z, x, y}, // 1 2 0
{y, z, x}, // 2 0 1
{z, y, x}}; // 2 1 0
} // namespace HwWalkOrderHelper
} // namespace NEO

View File

@@ -44,13 +44,13 @@ struct StorageType {
template <uint8_t numBits>
using StorageTypeT = typename StorageType<numBits>::Type;
template <uint8_t IntegerBits, uint8_t FractionalBits, uint8_t TotalBits = IntegerBits + FractionalBits>
template <uint8_t integerBits, uint8_t fractionalBits, uint8_t totalBits = integerBits + fractionalBits>
struct UnsignedFixedPointValue {
UnsignedFixedPointValue(float v) {
fromFloatingPoint(v);
}
StorageTypeT<TotalBits> &getRawAccess() {
StorageTypeT<totalBits> &getRawAccess() {
return storage;
}
@@ -66,7 +66,7 @@ struct UnsignedFixedPointValue {
template <typename FloatingType>
static constexpr FloatingType getMaxRepresentableFloatingPointValue() {
return static_cast<FloatingType>(
static_cast<FloatingType>(maxNBitValue(IntegerBits)) + (static_cast<FloatingType>(maxNBitValue(FractionalBits)) / (1U << FractionalBits)));
static_cast<FloatingType>(maxNBitValue(integerBits)) + (static_cast<FloatingType>(maxNBitValue(fractionalBits)) / (1U << fractionalBits)));
}
template <typename FloatingType>
@@ -77,15 +77,15 @@ struct UnsignedFixedPointValue {
val = (val > maxFloatVal) ? maxFloatVal : val;
// scale to fixed point representation
this->storage = static_cast<StorageTypeT<TotalBits>>(val * (1U << FractionalBits));
this->storage = static_cast<StorageTypeT<totalBits>>(val * (1U << fractionalBits));
}
template <typename FloatingType>
FloatingType asFloatPointType() {
return static_cast<FloatingType>(storage) / (1U << FractionalBits);
return static_cast<FloatingType>(storage) / (1U << fractionalBits);
}
StorageTypeT<TotalBits> storage = 0;
StorageTypeT<totalBits> storage = 0;
};
using FixedU4D8 = UnsignedFixedPointValue<4, 8>;