diff --git a/level_zero/core/source/fabric/linux/fabric_device_iaf.cpp b/level_zero/core/source/fabric/linux/fabric_device_iaf.cpp index 5d706ea478..00657a54bd 100644 --- a/level_zero/core/source/fabric/linux/fabric_device_iaf.cpp +++ b/level_zero/core/source/fabric/linux/fabric_device_iaf.cpp @@ -24,7 +24,7 @@ FabricDeviceIaf::FabricDeviceIaf(Device *device) : device(device) { DeviceImp *deviceImp = static_cast(device); if (deviceImp->numSubDevices == 0) { - //Add one sub-device + // Add one sub-device subDeviceIafs.push_back(std::make_unique(device)); } else { for (const auto &subDevice : deviceImp->subDevices) { diff --git a/level_zero/core/source/gen12lp/rkl/cmdqueue_rkl.cpp b/level_zero/core/source/gen12lp/rkl/cmdqueue_rkl.cpp index 2747ab0fb2..c26c813df8 100644 --- a/level_zero/core/source/gen12lp/rkl/cmdqueue_rkl.cpp +++ b/level_zero/core/source/gen12lp/rkl/cmdqueue_rkl.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -11,7 +11,6 @@ #include "level_zero/core/source/cmdqueue/cmdqueue_hw_base.inl" #include "cmdqueue_extended.inl" -//#include "igfxfmid.h" namespace L0 { template struct CommandQueueHw; diff --git a/level_zero/core/test/black_box_tests/zello_events.cpp b/level_zero/core/test/black_box_tests/zello_events.cpp index 29ba6988de..a9a02d683b 100644 --- a/level_zero/core/test/black_box_tests/zello_events.cpp +++ b/level_zero/core/test/black_box_tests/zello_events.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -61,7 +61,7 @@ bool testEventsDeviceSignalDeviceWait(ze_context_handle_t &context, ze_device_ha ZE_EVENT_SCOPE_FLAG_HOST, (ze_event_scope_flag_t)0); - //Initialize memory + // Initialize memory uint8_t dstValue = 0; uint8_t srcValue = 55; SUCCESS_OR_TERMINATE(zeCommandListAppendMemoryFill(cmdList, dstBuffer, reinterpret_cast(&dstValue), diff --git a/level_zero/core/test/black_box_tests/zello_fence.cpp b/level_zero/core/test/black_box_tests/zello_fence.cpp index 5f228e9060..aeec97b3cd 100644 --- a/level_zero/core/test/black_box_tests/zello_fence.cpp +++ b/level_zero/core/test/black_box_tests/zello_fence.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -166,7 +166,7 @@ bool testFence(ze_context_handle_t &context, ze_device_handle_t &device) { if (verbose) std::cout << "zeFenceHostSynchronize success" << std::endl; - //Tear down + // Tear down SUCCESS_OR_TERMINATE(zeFenceReset(fence)); SUCCESS_OR_TERMINATE(zeCommandListReset(cmdList)); SUCCESS_OR_TERMINATE(zeMemFree(context, dstBuffer)); diff --git a/level_zero/core/test/black_box_tests/zello_host_pointer.cpp b/level_zero/core/test/black_box_tests/zello_host_pointer.cpp index 587895d515..1f6c35e837 100644 --- a/level_zero/core/test/black_box_tests/zello_host_pointer.cpp +++ b/level_zero/core/test/black_box_tests/zello_host_pointer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -42,7 +42,7 @@ void executeGpuKernelAndValidate(ze_driver_handle_t &driverHandle, ze_context_ha pFnzexDriverGetHostPointerBaseAddress zexDriverGetHostPointerBaseAddress = nullptr; SUCCESS_OR_TERMINATE(zeDriverGetExtensionFunctionAddress(driverHandle, "zexDriverGetHostPointerBaseAddress", reinterpret_cast(&zexDriverGetHostPointerBaseAddress))); - //Import memory + // Import memory SUCCESS_OR_TERMINATE(zexDriverImportExternalPointer(driverHandle, srcBuffer, allocSize)); SUCCESS_OR_TERMINATE(zexDriverImportExternalPointer(driverHandle, dstBuffer, allocSize)); diff --git a/level_zero/core/test/black_box_tests/zello_immediate.cpp b/level_zero/core/test/black_box_tests/zello_immediate.cpp index a5284e5a1b..a12391055a 100644 --- a/level_zero/core/test/black_box_tests/zello_immediate.cpp +++ b/level_zero/core/test/black_box_tests/zello_immediate.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -256,12 +256,12 @@ int main(int argc, char *argv[]) { bool outputValidationSuccessful = true; if (outputValidationSuccessful || aubMode) { - //Sync mode with Compute queue + // Sync mode with Compute queue std::cout << "Test case: Sync mode compute queue with Kernel launch \n"; executeGpuKernelAndValidate(context, device, true, outputValidationSuccessful); } if (outputValidationSuccessful || aubMode) { - //Async mode with Compute queue + // Async mode with Compute queue std::cout << "\nTest case: Async mode compute queue with Kernel launch \n"; executeGpuKernelAndValidate(context, device, false, outputValidationSuccessful); } @@ -318,12 +318,12 @@ int main(int argc, char *argv[]) { std::cout << "No Copy queue group found. Skipping further test runs\n"; } else { if (outputValidationSuccessful || aubMode) { - //Sync mode with Copy queue + // Sync mode with Copy queue std::cout << "\nTest case: Sync mode copy queue for memory copy\n"; testCopyBetweenHostMemAndDeviceMem(context, copyQueueDev, true, copyQueueGroup, outputValidationSuccessful); } if (outputValidationSuccessful || aubMode) { - //Async mode with Copy queue + // Async mode with Copy queue std::cout << "\nTest case: Async mode copy queue for memory copy\n"; testCopyBetweenHostMemAndDeviceMem(context, copyQueueDev, false, copyQueueGroup, outputValidationSuccessful); } diff --git a/level_zero/core/test/unit_tests/gen9/test_cmdqueue_enqueuecommandlist_gen9.cpp b/level_zero/core/test/unit_tests/gen9/test_cmdqueue_enqueuecommandlist_gen9.cpp index 13b5d6daaf..18eac242fc 100644 --- a/level_zero/core/test/unit_tests/gen9/test_cmdqueue_enqueuecommandlist_gen9.cpp +++ b/level_zero/core/test/unit_tests/gen9/test_cmdqueue_enqueuecommandlist_gen9.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -215,11 +215,11 @@ GEN9TEST_F(CommandQueueExecuteCommandListsGen9, GivenCmdListsWithDifferentPreemp uint32_t data = ((1 << 1) | (1 << 2)) << 16; EXPECT_EQ(data, lriCmd->getDataDword()); - //next should be BB_START to 1st Mid-Thread Cmd List + // next should be BB_START to 1st Mid-Thread Cmd List auto itorBBStart = find(itorLri, cmdList.end()); EXPECT_NE(itorBBStart, cmdList.end()); - //next should be PIPE_CONTROL and LRI switching to thread-group + // next should be PIPE_CONTROL and LRI switching to thread-group auto itorPipeControl = find(itorBBStart, cmdList.end()); EXPECT_NE(itorPipeControl, cmdList.end()); @@ -230,11 +230,11 @@ GEN9TEST_F(CommandQueueExecuteCommandListsGen9, GivenCmdListsWithDifferentPreemp EXPECT_EQ(0x2580u, lriCmd->getRegisterOffset()); data = (1 << 1) | (((1 << 1) | (1 << 2)) << 16); EXPECT_EQ(data, lriCmd->getDataDword()); - //start of thread-group command list + // start of thread-group command list itorBBStart = find(itorLri, cmdList.end()); EXPECT_NE(itorBBStart, cmdList.end()); - //next should be PIPE_CONTROL and LRI switching to mid-thread again + // next should be PIPE_CONTROL and LRI switching to mid-thread again itorPipeControl = find(itorBBStart, cmdList.end()); EXPECT_NE(itorPipeControl, cmdList.end()); @@ -245,7 +245,7 @@ GEN9TEST_F(CommandQueueExecuteCommandListsGen9, GivenCmdListsWithDifferentPreemp EXPECT_EQ(0x2580u, lriCmd->getRegisterOffset()); data = ((1 << 1) | (1 << 2)) << 16; EXPECT_EQ(data, lriCmd->getDataDword()); - //start of thread-group command list + // start of thread-group command list itorBBStart = find(itorLri, cmdList.end()); EXPECT_NE(itorBBStart, cmdList.end()); diff --git a/level_zero/core/test/unit_tests/main.cpp b/level_zero/core/test/unit_tests/main.cpp index 2f0410aa7a..fa92f57d19 100644 --- a/level_zero/core/test/unit_tests/main.cpp +++ b/level_zero/core/test/unit_tests/main.cpp @@ -129,16 +129,16 @@ void applyWorkarounds() { mockObj.method(2); } - //intialize rand + // intialize rand srand(static_cast(time(nullptr))); - //Create at least on thread to prevent false memory leaks in tests using threads + // Create at least on thread to prevent false memory leaks in tests using threads std::thread t([&]() { }); tempThreadID = t.get_id(); t.join(); - //Create FileLogger to prevent false memory leaks + // Create FileLogger to prevent false memory leaks { NEO::fileLoggerInstance(); } diff --git a/level_zero/core/test/unit_tests/sources/cmdqueue/test_cmdqueue_2.cpp b/level_zero/core/test/unit_tests/sources/cmdqueue/test_cmdqueue_2.cpp index 56f54665ec..2ce582fec3 100644 --- a/level_zero/core/test/unit_tests/sources/cmdqueue/test_cmdqueue_2.cpp +++ b/level_zero/core/test/unit_tests/sources/cmdqueue/test_cmdqueue_2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -466,7 +466,7 @@ HWTEST_F(CommandQueueSynchronizeTest, givenSynchronousCommandQueueWhenTagUpdateF auto commandList = std::unique_ptr(whiteboxCast(CommandList::create(productFamily, device, NEO::EngineGroupType::RenderCompute, 0u, returnValue))); ASSERT_NE(nullptr, commandList); - //1st execute provides all preamble commands + // 1st execute provides all preamble commands ze_command_list_handle_t cmdListHandle = commandList->toHandle(); returnValue = commandQueue->executeCommandLists(1, &cmdListHandle, nullptr, false); EXPECT_EQ(ZE_RESULT_SUCCESS, returnValue); diff --git a/level_zero/core/test/unit_tests/sources/fabric/linux/test_fabric_iaf.cpp b/level_zero/core/test/unit_tests/sources/fabric/linux/test_fabric_iaf.cpp index 1eb8f03ad7..a5e7d6de20 100644 --- a/level_zero/core/test/unit_tests/sources/fabric/linux/test_fabric_iaf.cpp +++ b/level_zero/core/test/unit_tests/sources/fabric/linux/test_fabric_iaf.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -374,7 +374,7 @@ TEST_F(FabricIafEdgeFixture, GivenMultipleDevicesAndSubDevicesWhenCreatingEdgesT fabricSubDeviceIaf1->connections = connection01To11; fabricSubDeviceIaf1->guid = 0xAB; - //SubVertices + // SubVertices auto &fabricVertex00 = fabricVertex0->subVertices[0]; auto fabricSubDeviceIaf00 = static_cast(fabricVertex00->pFabricDeviceInterfaces[FabricDeviceInterface::Type::Iaf].get()); fabricSubDeviceIaf00->connections.clear(); @@ -401,7 +401,7 @@ TEST_F(FabricIafEdgeFixture, GivenMultipleDevicesAndSubDevicesWhenCreatingEdgesT fabricSubDeviceIaf1->connections = connection11To01; fabricSubDeviceIaf1->guid = 0xABCD; - //SubVertices + // SubVertices auto &fabricVertex00 = fabricVertex1->subVertices[0]; auto fabricSubDeviceIaf00 = static_cast(fabricVertex00->pFabricDeviceInterfaces[FabricDeviceInterface::Type::Iaf].get()); fabricSubDeviceIaf00->connections.clear(); @@ -423,7 +423,7 @@ TEST_F(FabricIafEdgeFixture, GivenMultipleDevicesAndSubDevicesWhenCreatingEdgesT constexpr uint32_t root2root = 1; constexpr uint32_t subDevice2root = 4; // 2 root to 2 sub-devices each - constexpr uint32_t subDevice2SubDevice = 4 + 2; //4 MDFI (considering 4 roots with 2 sub-devices); 2 sub-device to sub-device XeLink + constexpr uint32_t subDevice2SubDevice = 4 + 2; // 4 MDFI (considering 4 roots with 2 sub-devices); 2 sub-device to sub-device XeLink EXPECT_EQ(static_cast(driverHandle->fabricEdges.size()), root2root + subDevice2root + subDevice2SubDevice); diff --git a/level_zero/include/zet_intel_gpu_debug.h b/level_zero/include/zet_intel_gpu_debug.h index 6adaef28d5..979081f892 100644 --- a/level_zero/include/zet_intel_gpu_debug.h +++ b/level_zero/include/zet_intel_gpu_debug.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -21,7 +21,7 @@ extern "C" { #ifndef ZET_INTEL_GPU_DEBUG_MINOR #define ZET_INTEL_GPU_DEBUG_MINOR 0 -#endif //!ZET_INTEL_GPU_DEBUG_MINOR +#endif //! ZET_INTEL_GPU_DEBUG_MINOR #if ZET_INTEL_GPU_DEBUG_MAJOR == 1 /////////////////////////////////////////////////////////////////////////////// diff --git a/level_zero/tools/source/debug/linux/prelim/debug_session.cpp b/level_zero/tools/source/debug/linux/prelim/debug_session.cpp index ca7851a9d1..b1c7150720 100644 --- a/level_zero/tools/source/debug/linux/prelim/debug_session.cpp +++ b/level_zero/tools/source/debug/linux/prelim/debug_session.cpp @@ -1798,7 +1798,7 @@ bool DebugSessionLinux::ackIsaEvents(uint32_t deviceIndex, uint64_t isaVa) { if (isa != connection->isaMap[deviceIndex].end()) { - //zebin modules do not store ackEvents per ISA + // zebin modules do not store ackEvents per ISA UNRECOVERABLE_IF(isa->second->ackEvents.size() > 0 && isa->second->perKernelModule == false); for (auto &event : isa->second->ackEvents) { @@ -1849,7 +1849,7 @@ void DebugSessionLinux::cleanRootSessionAfterDetach(uint32_t deviceIndex) { for (const auto &isa : connection->isaMap[deviceIndex]) { - //zebin modules do not store ackEvents per ISA + // zebin modules do not store ackEvents per ISA UNRECOVERABLE_IF(isa.second->ackEvents.size() > 0 && isa.second->perKernelModule == false); for (auto &event : isa.second->ackEvents) { diff --git a/level_zero/tools/source/metrics/metric_oa_source.cpp b/level_zero/tools/source/metrics/metric_oa_source.cpp index d2126d1308..e233c1af56 100644 --- a/level_zero/tools/source/metrics/metric_oa_source.cpp +++ b/level_zero/tools/source/metrics/metric_oa_source.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -24,8 +24,8 @@ std::unique_ptr OaMetricSourceImp::create(const MetricDeviceC } OaMetricSourceImp::OaMetricSourceImp(const MetricDeviceContext &metricDeviceContext) : metricDeviceContext(metricDeviceContext), - metricEnumeration(std::unique_ptr(new (std::nothrow) MetricEnumeration(*this))), - metricsLibrary(std::unique_ptr(new (std::nothrow) MetricsLibrary(*this))) { + metricEnumeration(std::unique_ptr(new(std::nothrow) MetricEnumeration(*this))), + metricsLibrary(std::unique_ptr(new(std::nothrow) MetricsLibrary(*this))) { } OaMetricSourceImp::~OaMetricSourceImp() = default; diff --git a/level_zero/tools/source/sysman/diagnostics/linux/os_diagnostics_imp.cpp b/level_zero/tools/source/sysman/diagnostics/linux/os_diagnostics_imp.cpp index 4827084888..c7007b5142 100644 --- a/level_zero/tools/source/sysman/diagnostics/linux/os_diagnostics_imp.cpp +++ b/level_zero/tools/source/sysman/diagnostics/linux/os_diagnostics_imp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -18,7 +18,7 @@ namespace L0 { const std::string LinuxDiagnosticsImp::deviceDir("device"); -//the sysfs node will be at /sys/class/drm/card/invalidate_lmem_mmaps +// the sysfs node will be at /sys/class/drm/card/invalidate_lmem_mmaps const std::string LinuxDiagnosticsImp::invalidateLmemFile("invalidate_lmem_mmaps"); // the sysfs node will be at /sys/class/drm/card/quiesce_gpu const std::string LinuxDiagnosticsImp::quiescentGpuFile("quiesce_gpu"); @@ -34,14 +34,14 @@ void OsDiagnostics::getSupportedDiagTestsFromFW(void *pOsSysman, std::vector/quiesce_gpu will signal KMD -//to close and clear all allocations, -//ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE will be sent till the kworker confirms that -//all allocations are closed and GPU is be wedged. +// to close and clear all allocations, +// ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE will be sent till the kworker confirms that +// all allocations are closed and GPU is be wedged. // GPU will only be unwedged after warm/cold reset -//writing 1 to /sys/class/drm/card/invalidate_lmem_mmaps clears +// writing 1 to /sys/class/drm/card/invalidate_lmem_mmaps clears // all memory mappings where LMEMBAR is being referenced are invalidated. -//Also prevents new ones from being created. -//It will invalidate LMEM memory mappings only when sysfs entry quiesce_gpu is set. +// Also prevents new ones from being created. +// It will invalidate LMEM memory mappings only when sysfs entry quiesce_gpu is set. ze_result_t LinuxDiagnosticsImp::waitForQuiescentCompletion() { uint32_t count = 0; const int intVal = 1; @@ -60,7 +60,7 @@ ze_result_t LinuxDiagnosticsImp::waitForQuiescentCompletion() { } else { return result; } - } while (count < 10); //limiting to 10 retries as we can endup going into a infinite loop if the cleanup and a process start are out of sync + } while (count < 10); // limiting to 10 retries as we can endup going into a infinite loop if the cleanup and a process start are out of sync result = pSysfsAccess->write(invalidateLmemFile, intVal); if (ZE_RESULT_SUCCESS != result) { return result; diff --git a/level_zero/tools/source/sysman/engine/engine.cpp b/level_zero/tools/source/sysman/engine/engine.cpp index c6d4b20321..f55a7f9aff 100644 --- a/level_zero/tools/source/sysman/engine/engine.cpp +++ b/level_zero/tools/source/sysman/engine/engine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -33,7 +33,7 @@ void EngineHandleContext::createHandle(zes_engine_group_t engineType, uint32_t e } void EngineHandleContext::init(std::vector &deviceHandles) { - std::set> engineGroupInstance = {}; //set contains pair of engine group and struct containing engine instance and subdeviceId + std::set> engineGroupInstance = {}; // set contains pair of engine group and struct containing engine instance and subdeviceId OsEngine::getNumEngineTypeAndInstances(engineGroupInstance, pOsSysman); for (auto itr = engineGroupInstance.begin(); itr != engineGroupInstance.end(); ++itr) { for (const auto &deviceHandle : deviceHandles) { diff --git a/level_zero/tools/source/sysman/events/linux/os_events_imp.cpp b/level_zero/tools/source/sysman/events/linux/os_events_imp.cpp index 0a20478318..22db6fd80d 100644 --- a/level_zero/tools/source/sysman/events/linux/os_events_imp.cpp +++ b/level_zero/tools/source/sysman/events/linux/os_events_imp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -73,7 +73,7 @@ bool LinuxEventsImp::checkIfMemHealthChanged(zes_event_type_flags_t &pEvent) { bool LinuxEventsImp::eventListen(zes_event_type_flags_t &pEvent, uint64_t timeout) { if (registeredEvents & ZES_EVENT_TYPE_FLAG_DEVICE_RESET_REQUIRED) { if (isResetRequired(pEvent)) { - registeredEvents &= ~(ZES_EVENT_TYPE_FLAG_DEVICE_RESET_REQUIRED); //After receiving event unregister it + registeredEvents &= ~(ZES_EVENT_TYPE_FLAG_DEVICE_RESET_REQUIRED); // After receiving event unregister it return true; } } diff --git a/level_zero/tools/source/sysman/fabric_port/linux/fabric_device_access_imp.cpp b/level_zero/tools/source/sysman/fabric_port/linux/fabric_device_access_imp.cpp index 8101b6f623..eddaed01fb 100644 --- a/level_zero/tools/source/sysman/fabric_port/linux/fabric_device_access_imp.cpp +++ b/level_zero/tools/source/sysman/fabric_port/linux/fabric_device_access_imp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -161,7 +161,7 @@ ze_result_t FabricDeviceAccessNl::getPorts(std::vector &po return result; } - //Update fabricPorts + // Update fabricPorts for (const auto &iafPort : iafPorts) { Port port = {}; readIafPort(port, iafPort); diff --git a/level_zero/tools/source/sysman/firmware/linux/os_firmware_imp.cpp b/level_zero/tools/source/sysman/firmware/linux/os_firmware_imp.cpp index 14a75e80f0..bff4df9414 100644 --- a/level_zero/tools/source/sysman/firmware/linux/os_firmware_imp.cpp +++ b/level_zero/tools/source/sysman/firmware/linux/os_firmware_imp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -45,7 +45,7 @@ void LinuxFirmwareImp::osGetFwProperties(zes_firmware_properties_t *pProperties) if (ZE_RESULT_SUCCESS != getFirmwareVersion(osFwType, pProperties)) { strncpy_s(static_cast(pProperties->version), ZES_STRING_PROPERTY_SIZE, unknown.c_str(), ZES_STRING_PROPERTY_SIZE - 1); } - pProperties->canControl = true; //Assuming that user has permission to flash the firmware + pProperties->canControl = true; // Assuming that user has permission to flash the firmware } ze_result_t LinuxFirmwareImp::osFirmwareFlash(void *pImage, uint32_t size) { diff --git a/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics.cpp b/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics.cpp index c954160b9c..72565fe5fe 100644 --- a/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics.cpp +++ b/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -142,7 +142,7 @@ bool queryTest() { status &= queryRun(deviceId, -1, testSettings->metricGroupName.get()); } } else { - //Run for specific device + // Run for specific device status &= queryRun(testSettings->deviceId.get(), testSettings->subDeviceId.get(), testSettings->metricGroupName.get()); } @@ -215,7 +215,7 @@ bool streamTest() { status &= streamRun(deviceId, -1, testSettings->metricGroupName.get()); } } else { - //Run for specific device + // Run for specific device status &= streamRun(testSettings->deviceId.get(), testSettings->subDeviceId.get(), testSettings->metricGroupName.get()); } @@ -276,7 +276,7 @@ bool streamMultiMetricDomainTest() { status &= streamMultiMetricDomainRun(deviceId, -1); } } else { - //Run for specific device + // Run for specific device status &= streamMultiMetricDomainRun(testSettings->deviceId.get(), testSettings->subDeviceId.get()); } @@ -427,7 +427,7 @@ bool streamMtCollectionWorkloadDifferentThreads() { status &= streamMt(deviceId, -1, testSettings->metricGroupName.get()); } } else { - //Run for specific device + // Run for specific device status &= streamMt(testSettings->deviceId.get(), testSettings->subDeviceId.get(), testSettings->metricGroupName.get()); } @@ -592,7 +592,7 @@ bool streamMpCollectionWorkloadDifferentProcess() { if (testSettings->deviceId.get() == -1) { status &= streamMp(0, 0, testSettings->metricGroupName.get()); } else { - //Run for specific device + // Run for specific device status &= streamMp(testSettings->deviceId.get(), testSettings->subDeviceId.get(), testSettings->metricGroupName.get()); } @@ -822,7 +822,7 @@ bool queryImmediateCommandListTest() { status &= queryRun(deviceId, -1, testSettings->metricGroupName.get()); } } else { - //Run for specific device + // Run for specific device status &= queryRun(testSettings->deviceId.get(), testSettings->subDeviceId.get(), testSettings->metricGroupName.get()); } @@ -882,7 +882,7 @@ bool collectIndefinitely() { if (testSettings->deviceId.get() == -1) { testSettings->deviceId.set(0); } - //Run for specific device + // Run for specific device status &= collectStart(testSettings->deviceId.get(), testSettings->subDeviceId.get(), testSettings->metricGroupName.get()); return status; diff --git a/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics_collector.cpp b/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics_collector.cpp index ddd3e0696d..b95ecd3714 100644 --- a/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics_collector.cpp +++ b/level_zero/tools/test/black_box_tests/zello_metrics/zello_metrics_collector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -76,7 +76,7 @@ bool SingleMetricStreamerCollector::start() { &streamerProperties, notificationEvent, &metricStreamer)); - //Initial pause + // Initial pause std::this_thread::sleep_for(std::chrono::milliseconds(100)); return true; } diff --git a/level_zero/tools/test/unit_tests/sources/debug/debug_session_thread_tests.cpp b/level_zero/tools/test/unit_tests/sources/debug/debug_session_thread_tests.cpp index aac418897b..6e07a68010 100644 --- a/level_zero/tools/test/unit_tests/sources/debug/debug_session_thread_tests.cpp +++ b/level_zero/tools/test/unit_tests/sources/debug/debug_session_thread_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -571,7 +571,7 @@ TEST(DebugSessionTest, WhenConvertingThreadIDsForDeviceWithSingleSliceThenSubsli auto sessionMock = std::make_unique(zet_debug_config_t{0x1234}, &deviceImp); ASSERT_NE(nullptr, sessionMock); - //fuse off first slice + // fuse off first slice sessionMock->topologyMap[0].sliceIndices.erase(sessionMock->topologyMap[0].sliceIndices.begin()); hwInfo.gtSystemInfo.SliceCount = 1; sessionMock->topologyMap[0].subsliceIndices.push_back(2); @@ -608,7 +608,7 @@ TEST(DebugSessionTest, WhenConvertingThreadIDsForDeviceWithMultipleSlicesThenSub auto sessionMock = std::make_unique(zet_debug_config_t{0x1234}, &deviceImp); ASSERT_NE(nullptr, sessionMock); - //fuse off first slice + // fuse off first slice sessionMock->topologyMap[0].sliceIndices.erase(sessionMock->topologyMap[0].sliceIndices.begin()); hwInfo.gtSystemInfo.SliceCount = 7; diff --git a/level_zero/tools/test/unit_tests/sources/debug/linux/debug_session_fixtures_linux.h b/level_zero/tools/test/unit_tests/sources/debug/linux/debug_session_fixtures_linux.h index 99e27c5617..43a5d3ad17 100644 --- a/level_zero/tools/test/unit_tests/sources/debug/linux/debug_session_fixtures_linux.h +++ b/level_zero/tools/test/unit_tests/sources/debug/linux/debug_session_fixtures_linux.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -174,7 +174,7 @@ struct MockIoctlHandler : public L0::DebugSessionLinux::IoctlHandler { if (*static_cast(addr) != static_cast(0xaa)) { memoryModifiedInMunmap = true; } - delete[](char *) addr; + delete[] (char *)addr; } else { return -1; } diff --git a/level_zero/tools/test/unit_tests/sources/debug/linux/test_debug_api_linux.cpp b/level_zero/tools/test/unit_tests/sources/debug/linux/test_debug_api_linux.cpp index 205d6953c0..dc94698c95 100644 --- a/level_zero/tools/test/unit_tests/sources/debug/linux/test_debug_api_linux.cpp +++ b/level_zero/tools/test/unit_tests/sources/debug/linux/test_debug_api_linux.cpp @@ -1278,7 +1278,7 @@ TEST_F(DebugApiLinuxTest, GivenDebuggerMmapMemoryAccessFalseWhenCallingReadGpuMe session->clientHandle = MockDebugSessionLinux::mockClientHandle; NEO::SysCalls::closeFuncCalled = 0; - handler->preadRetVal = bufferSize; //16 bytes to read + handler->preadRetVal = bufferSize; // 16 bytes to read char output[bufferSize] = {}; auto retVal = session->readGpuMemory(7, output, bufferSize, 0x23000); EXPECT_EQ(0, retVal); diff --git a/level_zero/tools/test/unit_tests/sources/debug/linux/tile_debug_session_linux_tests.cpp b/level_zero/tools/test/unit_tests/sources/debug/linux/tile_debug_session_linux_tests.cpp index 4cf86a07b7..d3aa247fa4 100644 --- a/level_zero/tools/test/unit_tests/sources/debug/linux/tile_debug_session_linux_tests.cpp +++ b/level_zero/tools/test/unit_tests/sources/debug/linux/tile_debug_session_linux_tests.cpp @@ -260,7 +260,7 @@ TEST_F(TileAttachTest, givenTileDeviceWhenCallingDebugAttachAndDetachManyTimesTh zet_debug_config_t config = {}; config.pid = 0x1234; zet_debug_session_handle_t debugSession0 = nullptr; - rootSession->tileSessions[1].second = true; //prevent destroying root session + rootSession->tileSessions[1].second = true; // prevent destroying root session auto result = zetDebugAttach(neoDevice->getSubDevice(0)->getSpecializedDevice()->toHandle(), &config, &debugSession0); EXPECT_EQ(ZE_RESULT_SUCCESS, result); diff --git a/level_zero/tools/test/unit_tests/sources/metrics/test_metric_ip_sampling_streamer.cpp b/level_zero/tools/test/unit_tests/sources/metrics/test_metric_ip_sampling_streamer.cpp index b7e9e6cfe5..26030e73b3 100644 --- a/level_zero/tools/test/unit_tests/sources/metrics/test_metric_ip_sampling_streamer.cpp +++ b/level_zero/tools/test/unit_tests/sources/metrics/test_metric_ip_sampling_streamer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -426,7 +426,7 @@ TEST_F(MetricIpSamplingStreamerTest, GivenAllInputsAreCorrectWhenReadDataIsCalle EXPECT_EQ(zetMetricStreamerOpen(context->toHandle(), device, metricGroupHandle, &streamerDesc, eventHandle, &streamerHandle), ZE_RESULT_SUCCESS); EXPECT_NE(streamerHandle, nullptr); - //Setup data for both subdevices + // Setup data for both subdevices osInterfaceVector[1]->isfillDataEnabled = true; osInterfaceVector[1]->fillData = 2; osInterfaceVector[1]->fillDataSize = 64 * 20; @@ -469,7 +469,7 @@ TEST_F(MetricIpSamplingStreamerTest, GivenNotEnoughMemoryWhileReadingWhenReadDat EXPECT_EQ(zetMetricStreamerOpen(context->toHandle(), device, metricGroupHandle, &streamerDesc, eventHandle, &streamerHandle), ZE_RESULT_SUCCESS); EXPECT_NE(streamerHandle, nullptr); - //Setup data for both subdevices + // Setup data for both subdevices osInterfaceVector[1]->isfillDataEnabled = true; osInterfaceVector[1]->fillData = 2; osInterfaceVector[1]->fillDataSize = osInterfaceVector[1]->getUnitReportSize() * 20; @@ -483,7 +483,7 @@ TEST_F(MetricIpSamplingStreamerTest, GivenNotEnoughMemoryWhileReadingWhenReadDat EXPECT_NE(rawSize, 0u); std::vector rawData = {}; - //Setup memory for only the first sub-device's read to succeed + // Setup memory for only the first sub-device's read to succeed rawSize = osInterfaceVector[1]->fillDataSize; rawData.resize(rawSize); EXPECT_EQ(zetMetricStreamerReadData(streamerHandle, 75, &rawSize, rawData.data()), ZE_RESULT_SUCCESS); diff --git a/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency.cpp b/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency.cpp index 0229bf478b..abc079fe7b 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency.cpp +++ b/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -930,7 +930,7 @@ TEST_F(FreqMultiDeviceFixture, GivenAffinityMaskIsSetWhenCallingFrequencyPropert EXPECT_EQ(nullptr, properties.pNext); EXPECT_EQ(ZES_FREQ_DOMAIN_GPU, properties.type); EXPECT_TRUE(properties.onSubdevice); - EXPECT_EQ(1u, properties.subdeviceId); //Affinity mask 0.1 is set which means only subdevice 1 is exposed + EXPECT_EQ(1u, properties.subdeviceId); // Affinity mask 0.1 is set which means only subdevice 1 is exposed } } diff --git a/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency_prelim.cpp b/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency_prelim.cpp index ef3f0d0605..d64cd52991 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency_prelim.cpp +++ b/level_zero/tools/test/unit_tests/sources/sysman/frequency/linux/test_zes_frequency_prelim.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -871,7 +871,7 @@ TEST_F(FreqMultiDeviceFixture, GivenAffinityMaskIsSetWhenCallingFrequencyPropert EXPECT_EQ(nullptr, properties.pNext); EXPECT_EQ(ZES_FREQ_DOMAIN_GPU, properties.type); EXPECT_TRUE(properties.onSubdevice); - EXPECT_EQ(1u, properties.subdeviceId); //Affinity mask 0.1 is set which means only subdevice 1 is exposed + EXPECT_EQ(1u, properties.subdeviceId); // Affinity mask 0.1 is set which means only subdevice 1 is exposed } } diff --git a/level_zero/tools/test/unit_tests/sources/sysman/linux/pmu/test_pmu.cpp b/level_zero/tools/test/unit_tests/sources/sysman/linux/pmu/test_pmu.cpp index 5a44cc0e57..8baa1d5325 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/linux/pmu/test_pmu.cpp +++ b/level_zero/tools/test/unit_tests/sources/sysman/linux/pmu/test_pmu.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -121,7 +121,7 @@ TEST_F(SysmanPmuFixture, GivenValidPmuHandleWhenCallingPmuInterfaceOpenAndPerfEv TEST_F(SysmanPmuFixture, GivenValidPmuHandleWhenAndDomainErrorOccursThenDomainErrorIsReturnedBygetErrorNoFunction) { MockPmuInterfaceImpForSysman *pmuInterface = new MockPmuInterfaceImpForSysman(pLinuxSysmanImp); - log(-1.0); //Domain error injected + log(-1.0); // Domain error injected EXPECT_EQ(EDOM, pmuInterface->getErrorNo()); delete pmuInterface; } diff --git a/level_zero/tools/test/unit_tests/sources/sysman/linux/test_sysman.cpp b/level_zero/tools/test/unit_tests/sources/sysman/linux/test_sysman.cpp index 71e180cf16..c0184feccc 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/linux/test_sysman.cpp +++ b/level_zero/tools/test/unit_tests/sources/sysman/linux/test_sysman.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -163,7 +163,7 @@ TEST_F(SysmanDeviceFixture, GivenSetValidDrmHandleForDeviceWhenDoingOsSysmanDevi TEST_F(SysmanDeviceFixture, GivenCreateFsAccessHandleWhenCallinggetFsAccessThenCreatedFsAccessHandleWillBeRetrieved) { if (pLinuxSysmanImp->pFsAccess != nullptr) { - //delete previously allocated pFsAccess + // delete previously allocated pFsAccess delete pLinuxSysmanImp->pFsAccess; pLinuxSysmanImp->pFsAccess = nullptr; } @@ -260,7 +260,7 @@ TEST_F(SysmanDeviceFixture, GivenInvalidPathnameWhenCallingFsAccessExistsThenErr TEST_F(SysmanDeviceFixture, GivenCreateSysfsAccessHandleWhenCallinggetSysfsAccessThenCreatedSysfsAccessHandleHandleWillBeRetrieved) { if (pLinuxSysmanImp->pSysfsAccess != nullptr) { - //delete previously allocated pSysfsAccess + // delete previously allocated pSysfsAccess delete pLinuxSysmanImp->pSysfsAccess; pLinuxSysmanImp->pSysfsAccess = nullptr; } @@ -270,7 +270,7 @@ TEST_F(SysmanDeviceFixture, GivenCreateSysfsAccessHandleWhenCallinggetSysfsAcces TEST_F(SysmanDeviceFixture, GivenCreateProcfsAccessHandleWhenCallinggetProcfsAccessThenCreatedProcfsAccessHandleWillBeRetrieved) { if (pLinuxSysmanImp->pProcfsAccess != nullptr) { - //delete previously allocated pProcfsAccess + // delete previously allocated pProcfsAccess delete pLinuxSysmanImp->pProcfsAccess; pLinuxSysmanImp->pProcfsAccess = nullptr; } @@ -296,7 +296,7 @@ TEST_F(SysmanDeviceFixture, GivenValidDeviceHandleThenSameHandleIsRetrievedFromO TEST_F(SysmanDeviceFixture, GivenPmuInterfaceHandleWhenCallinggetPmuInterfaceThenCreatedPmuInterfaceHandleWillBeRetrieved) { if (pLinuxSysmanImp->pPmuInterface != nullptr) { - //delete previously allocated pPmuInterface + // delete previously allocated pPmuInterface delete pLinuxSysmanImp->pPmuInterface; pLinuxSysmanImp->pPmuInterface = nullptr; } diff --git a/level_zero/tools/test/unit_tests/sources/sysman/pci/windows/mock_pci.h b/level_zero/tools/test/unit_tests/sources/sysman/pci/windows/mock_pci.h index ea1bbbe278..d1a08c0e8e 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/pci/windows/mock_pci.h +++ b/level_zero/tools/test/unit_tests/sources/sysman/pci/windows/mock_pci.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -23,7 +23,7 @@ class PciKmdSysManager : public Mock {}; template <> struct Mock : public PciKmdSysManager { - //PciCurrentDevice, PciParentDevice, PciRootPort + // PciCurrentDevice, PciParentDevice, PciRootPort uint32_t mockDomain[3] = {0, 0, 0}; uint32_t mockBus[3] = {0, 0, 3}; uint32_t mockDevice[3] = {2, 0, 0}; diff --git a/level_zero/tools/test/unit_tests/sources/sysman/ras/linux/test_zes_ras_prelim.cpp b/level_zero/tools/test/unit_tests/sources/sysman/ras/linux/test_zes_ras_prelim.cpp index 69dbeebc07..76fbc87e94 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/ras/linux/test_zes_ras_prelim.cpp +++ b/level_zero/tools/test/unit_tests/sources/sysman/ras/linux/test_zes_ras_prelim.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -742,7 +742,7 @@ TEST_F(SysmanRasAffinityMaskFixture, GivenAffinityMaskIsSetWhenCallingRasPropert EXPECT_EQ(ZE_RESULT_SUCCESS, zesRasGetProperties(handle, &properties)); EXPECT_EQ(properties.pNext, nullptr); EXPECT_EQ(properties.onSubdevice, true); - EXPECT_EQ(properties.subdeviceId, 1u); //Affinity mask 0.1 is set which means only subdevice 1 is exposed + EXPECT_EQ(properties.subdeviceId, 1u); // Affinity mask 0.1 is set which means only subdevice 1 is exposed if (handleIndex == 0u) { EXPECT_EQ(properties.type, ZES_RAS_ERROR_TYPE_CORRECTABLE); diff --git a/level_zero/tools/test/unit_tests/sources/sysman/scheduler/linux/mock_sysfs_scheduler_prelim.h b/level_zero/tools/test/unit_tests/sources/sysman/scheduler/linux/mock_sysfs_scheduler_prelim.h index 0ec28183c6..aa8d2e1b8d 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/scheduler/linux/mock_sysfs_scheduler_prelim.h +++ b/level_zero/tools/test/unit_tests/sources/sysman/scheduler/linux/mock_sysfs_scheduler_prelim.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -245,7 +245,7 @@ class MockSchedulerProcfsAccess : public ProcfsAccess { ADDMETHOD_NOBASE_VOIDRETURN(kill, (const ::pid_t pid)); }; -//class SchedulerSysfsAccess : public SysfsAccess {}; +// class SchedulerSysfsAccess : public SysfsAccess {}; typedef struct SchedulerConfigValues { uint64_t defaultVal; diff --git a/level_zero/tools/test/unit_tests/sources/sysman/standby/linux/test_zes_sysman_standby.cpp b/level_zero/tools/test/unit_tests/sources/sysman/standby/linux/test_zes_sysman_standby.cpp index 058ebd8966..d9541b86a8 100644 --- a/level_zero/tools/test/unit_tests/sources/sysman/standby/linux/test_zes_sysman_standby.cpp +++ b/level_zero/tools/test/unit_tests/sources/sysman/standby/linux/test_zes_sysman_standby.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -416,7 +416,7 @@ TEST_F(StandbyAffinityMaskFixture, GivenAffinityMaskIsSetWhenCallingStandbyPrope EXPECT_EQ(nullptr, properties.pNext); EXPECT_EQ(ZES_STANDBY_TYPE_GLOBAL, properties.type); EXPECT_TRUE(properties.onSubdevice); - EXPECT_EQ(1u, properties.subdeviceId); //Affinity mask 0.1 is set which means only subdevice 1 is exposed + EXPECT_EQ(1u, properties.subdeviceId); // Affinity mask 0.1 is set which means only subdevice 1 is exposed } } diff --git a/manifests/manifest.yml b/manifests/manifest.yml index 72016f76e0..69e8a0f38c 100644 --- a/manifests/manifest.yml +++ b/manifests/manifest.yml @@ -27,7 +27,7 @@ components: branch: master dest_dir: infra fetch_tags: true - revision: v4597 + revision: v4598 type: git internal: branch: master diff --git a/opencl/extensions/public/cl_ext_private.h b/opencl/extensions/public/cl_ext_private.h index a509e0317f..3f99f4b06e 100644 --- a/opencl/extensions/public/cl_ext_private.h +++ b/opencl/extensions/public/cl_ext_private.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -84,12 +84,12 @@ using cl_unified_shared_memory_type_intel = cl_uint; #define CL_MEM_ALLOCATION_HANDLE_INTEL 0x10050 #define CL_MEM_USES_COMPRESSION_INTEL 0x10051 -//Used with createBuffer +// Used with createBuffer #define CL_MEM_ALLOW_UNRESTRICTED_SIZE_INTEL (1 << 23) /****************************** -* UNIFIED MEMORY * -*******************************/ + * UNIFIED MEMORY * + *******************************/ #if !defined(cl_intel_unified_shared_memory) @@ -145,8 +145,8 @@ using cl_unified_shared_memory_type_intel = cl_uint; #define CL_COMMAND_MEMSET_INTEL 0x4204 /****************************** -* THREAD ARBITRATION POLICY * -*******************************/ + * THREAD ARBITRATION POLICY * + *******************************/ /* cl_device_info */ #define CL_DEVICE_SUPPORTED_THREAD_ARBITRATION_POLICY_INTEL 0x4208 @@ -166,8 +166,8 @@ using cl_unified_shared_memory_type_intel = cl_uint; #define CL_KERNEL_EXEC_INFO_THREAD_ARBITRATION_POLICY_STALL_BASED_ROUND_ROBIN_INTEL 0x10026 /****************************** -* SLICE COUNT SELECTING * -*******************************/ + * SLICE COUNT SELECTING * + *******************************/ /* cl_device_info */ #define CL_DEVICE_SLICE_COUNT_INTEL 0x10020 @@ -176,8 +176,8 @@ using cl_unified_shared_memory_type_intel = cl_uint; #define CL_QUEUE_SLICE_COUNT_INTEL 0x10021 /****************************** -* QUEUE FAMILY SELECTING * -*******************************/ + * QUEUE FAMILY SELECTING * + *******************************/ #if !defined(cl_intel_command_queue_families) @@ -220,8 +220,8 @@ typedef struct _cl_queue_family_properties_intel { #endif /****************************** -* DEVICE ATTRIBUTE QUERY * -*******************************/ + * DEVICE ATTRIBUTE QUERY * + *******************************/ #if !defined(cl_intel_device_attribute_query) @@ -251,7 +251,7 @@ typedef cl_uint cl_resource_barrier_type; #define CL_RESOURCE_BARRIER_TYPE_DISCARD 0x3 // DISCARD typedef cl_uint cl_resource_memory_scope; -#define CL_MEMORY_SCOPE_DEVICE 0x0 // INCLUDES CROSS-TILE +#define CL_MEMORY_SCOPE_DEVICE 0x0 // INCLUDES CROSS-TILE #define CL_MEMORY_SCOPE_ALL_SVM_DEVICES 0x1 // CL_MEMORY_SCOPE_DEVICE + CROSS-DEVICE #pragma pack(push, 1) @@ -284,8 +284,8 @@ typedef struct _cl_device_pci_bus_info_khr { #endif /************************************************ -* cl_intel_mem_compression_hints extension * -*************************************************/ + * cl_intel_mem_compression_hints extension * + *************************************************/ #define CL_MEM_COMPRESSED_HINT_INTEL (1u << 21) #define CL_MEM_UNCOMPRESSED_HINT_INTEL (1u << 22) @@ -341,4 +341,4 @@ typedef cl_bitfield cl_command_queue_mdapi_properties_intel; // cl_intel_variable_eu_thread_count #define CL_DEVICE_EU_THREAD_COUNTS_INTEL 0x1000A // placeholder -#define CL_KERNEL_EU_THREAD_COUNT_INTEL 0x1000B // placeholder \ No newline at end of file +#define CL_KERNEL_EU_THREAD_COUNT_INTEL 0x1000B // placeholder \ No newline at end of file diff --git a/opencl/source/api/dispatch.cpp b/opencl/source/api/dispatch.cpp index dd2498788c..3e9c6cca19 100644 --- a/opencl/source/api/dispatch.cpp +++ b/opencl/source/api/dispatch.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -108,9 +108,9 @@ SDispatchTable icdGlobalDispatchTable = clEnqueueCopyBufferRect, /* cl_ext_device_fission */ - nullptr, //clCreateSubDevicesEXT, - nullptr, //clRetainDeviceEXT, - nullptr, //clReleaseDeviceEXT, + nullptr, // clCreateSubDevicesEXT, + nullptr, // clRetainDeviceEXT, + nullptr, // clReleaseDeviceEXT, /* cl_khr_gl_event */ nullptr, @@ -148,12 +148,12 @@ SDispatchTable icdGlobalDispatchTable = nullptr, // clEnqueueReleaseDX9MediaSurfacesKHR, /* cl_khr_egl_image */ - nullptr, //clCreateFromEGLImageKHR, - nullptr, //clEnqueueAcquireEGLObjectsKHR, - nullptr, //clEnqueueReleaseEGLObjectsKHR, + nullptr, // clCreateFromEGLImageKHR, + nullptr, // clEnqueueAcquireEGLObjectsKHR, + nullptr, // clEnqueueReleaseEGLObjectsKHR, /* cl_khr_egl_event */ - nullptr, //clCreateEventFromEGLSyncKHR, + nullptr, // clCreateEventFromEGLSyncKHR, /* OpenCL 2.0 */ clCreateCommandQueueWithProperties, diff --git a/opencl/source/api/dispatch.h b/opencl/source/api/dispatch.h index 18ffbc0fc3..2d6db8b529 100644 --- a/opencl/source/api/dispatch.h +++ b/opencl/source/api/dispatch.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -51,10 +51,10 @@ typedef void(CL_CALLBACK *memobj_logging_fn)(cl_mem, void *); typedef void(CL_CALLBACK *svmfree_logging_fn)(cl_command_queue, cl_uint, void *[], void *); /* -* -* function pointer typedefs -* -*/ + * + * function pointer typedefs + * + */ // Platform APIs typedef CL_API_ENTRY cl_int(CL_API_CALL *KHRpfn_clGetPlatformIDs)( @@ -1096,13 +1096,13 @@ typedef void (*INTELpfn_clEnqueueReleaseVA_APIMediaSurfacesINTEL)(); #endif /* -* -* vendor dispatch table structure -* -* note that the types in the structure KHRicdVendorDispatch mirror the function -* names listed in the string table khrIcdVendorDispatchFunctionNames -* -*/ + * + * vendor dispatch table structure + * + * note that the types in the structure KHRicdVendorDispatch mirror the function + * names listed in the string table khrIcdVendorDispatchFunctionNames + * + */ struct SDispatchTable { KHRpfn_clGetPlatformIDs clGetPlatformIDs; diff --git a/opencl/source/built_ins/vme_dispatch_builder.h b/opencl/source/built_ins/vme_dispatch_builder.h index 4a30756fc6..5646cfcc61 100644 --- a/opencl/source/built_ins/vme_dispatch_builder.h +++ b/opencl/source/built_ins/vme_dispatch_builder.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -189,7 +189,7 @@ class VmeBuiltinDispatchInfoBuilder : public BuiltinDispatchInfoBuilder { } if (false == img->isTiledAllocation()) { - //VME only works with tiled images. + // VME only works with tiled images. return CL_OUT_OF_RESOURCES; } } diff --git a/opencl/source/command_queue/cpu_data_transfer_handler.cpp b/opencl/source/command_queue/cpu_data_transfer_handler.cpp index 165fd33286..8430dcb43b 100644 --- a/opencl/source/command_queue/cpu_data_transfer_handler.cpp +++ b/opencl/source/command_queue/cpu_data_transfer_handler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -91,7 +91,7 @@ void *CommandQueue::cpuDataTransferHandler(TransferProperties &transferPropertie if (outEventObj) { outEventObj->setSubmitTimeStamp(); } - //wait for the completness of previous commands + // wait for the completness of previous commands if (transferProperties.cmdType != CL_COMMAND_UNMAP_MEM_OBJECT) { if (!transferProperties.memObj->isMemObjZeroCopy() || transferProperties.blocking) { finish(); diff --git a/opencl/source/command_queue/enqueue_write_image.h b/opencl/source/command_queue/enqueue_write_image.h index 7a30c4115d..263aeda28b 100644 --- a/opencl/source/command_queue/enqueue_write_image.h +++ b/opencl/source/command_queue/enqueue_write_image.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -64,7 +64,7 @@ cl_int CommandQueueHw::enqueueWriteImage( if (mapAllocation) { surfaces[1] = &mapSurface; mapSurface.setGraphicsAllocation(mapAllocation); - //get offset between base cpu ptr of map allocation and dst ptr + // get offset between base cpu ptr of map allocation and dst ptr size_t srcOffset = ptrDiff(srcPtr, mapAllocation->getUnderlyingBuffer()); srcPtr = reinterpret_cast(mapAllocation->getGpuAddress() + srcOffset); } else { diff --git a/opencl/source/command_queue/gpgpu_walker_xehp_and_later.inl b/opencl/source/command_queue/gpgpu_walker_xehp_and_later.inl index ac10d0f771..4c54e49acd 100644 --- a/opencl/source/command_queue/gpgpu_walker_xehp_and_later.inl +++ b/opencl/source/command_queue/gpgpu_walker_xehp_and_later.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -61,9 +61,9 @@ size_t GpgpuWalkerHelper::setGpgpuWalkerThreadData( walkerCmd->setThreadGroupIdStartingY(static_cast(startWorkGroups[1])); walkerCmd->setThreadGroupIdStartingZ(static_cast(startWorkGroups[2])); - //1) cross-thread inline data will be put into R1, but if kernel uses local ids, then cross-thread should be put further back - //so whenever local ids are driver or hw generated, reserve space by setting right values for emitLocalIds - //2) Auto-generation of local ids should be possible, when in fact local ids are used + // 1) cross-thread inline data will be put into R1, but if kernel uses local ids, then cross-thread should be put further back + // so whenever local ids are driver or hw generated, reserve space by setting right values for emitLocalIds + // 2) Auto-generation of local ids should be possible, when in fact local ids are used if (!localIdsGenerationByRuntime && kernelUsesLocalIds) { uint32_t emitLocalIdsForDim = 0; if (kernelDescriptor.kernelAttributes.localId[0]) { diff --git a/opencl/source/context/driver_diagnostics.cpp b/opencl/source/context/driver_diagnostics.cpp index 3314a1b795..fa923200a5 100644 --- a/opencl/source/context/driver_diagnostics.cpp +++ b/opencl/source/context/driver_diagnostics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -20,48 +20,48 @@ bool DriverDiagnostics::validFlags(cl_diagnostics_verbose_level flags) const { } const char *const DriverDiagnostics::hintFormat[] = { - "Performance hint: clCreateBuffer with pointer %p and size %u doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Buffer is not sharing the same physical memory with CPU.", //CL_BUFFER_DOESNT_MEET_ALIGNMENT_RESTRICTIONS - "Performance hint: clCreateBuffer with pointer %p and size %u meets alignment restrictions and buffer will share the same physical memory with CPU.", //CL_BUFFER_MEETS_ALIGNMENT_RESTRICTIONS - "Performance hint: clCreateBuffer needs to allocate memory for buffer. For subsequent operations the buffer will share the same physical memory with CPU.", //CL_BUFFER_NEEDS_ALLOCATE_MEMORY - "Performance hint: clCreateImage with pointer %p meets alignment restrictions and image will share the same physical memory with CPU.", //CL_IMAGE_MEETS_ALIGNMENT_RESTRICTIONS - "Performance hint: Driver calls internal clFlush on the command queue each time 1 command is enqueued.", //DRIVER_CALLS_INTERNAL_CL_FLUSH - "Performance hint: Profiling adds overhead on all enqueue commands with events.", //PROFILING_ENABLED - "Performance hint: Profiled kernels will be executed with disabled preemption.", //PROFILING_ENABLED_WITH_DISABLED_PREEMPTION - "Performance hint: Subbuffer created from buffer %p shares the same memory with buffer.", //SUBBUFFER_SHARES_MEMORY - "Performance hint: clSVMAlloc with pointer %p and size %u meets alignment restrictions.", //CL_SVM_ALLOC_MEETS_ALIGNMENT_RESTRICTIONS - "Performance hint: clEnqueueReadBuffer call on a buffer %p with pointer %p will require driver to copy the data.Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", //CL_ENQUEUE_READ_BUFFER_REQUIRES_COPY_DATA - "Performance hint: clEnqueueReadBuffer call on a buffer %p with pointer %p will not require any data copy as the buffer shares the same physical memory with CPU.", //CL_ENQUEUE_READ_BUFFER_DOESNT_REQUIRE_COPY_DATA - "Performance hint: Pointer %p and size %u passed to clEnqueueReadBuffer doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Driver needs to disable L3 caching.", //CL_ENQUEUE_READ_BUFFER_DOESNT_MEET_ALIGNMENT_RESTRICTIONS - "Performance hint: clEnqueueReadBufferRect call on a buffer %p with pointer %p will require driver to copy the data.Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", //CL_ENQUEUE_READ_BUFFER_RECT_REQUIRES_COPY_DATA - "Performance hint: clEnqueueReadBufferRect call on a buffer %p with pointer %p will not require any data copy as the buffer shares the same physical memory with CPU.", //CL_ENQUEUE_READ_BUFFER_RECT_DOESNT_REQUIRES_COPY_DATA - "Performance hint: Pointer %p and size %u passed to clEnqueueReadBufferRect doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Driver needs to disable L3 caching.", //CL_ENQUEUE_READ_BUFFER_RECT_DOESNT_MEET_ALIGNMENT_RESTRICTIONS - "Performance hint: clEnqueueWriteBuffer call on a buffer %p require driver to copy the data. Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", //CL_ENQUEUE_WRITE_BUFFER_REQUIRES_COPY_DATA - "Performance hint: clEnqueueWriteBuffer call on a buffer %p with pointer %p will not require any data copy as the buffer shares the same physical memory with CPU.", //CL_ENQUEUE_WRITE_BUFFER_DOESNT_REQUIRE_COPY_DATA - "Performance hint: clEnqueueWriteBufferRect call on a buffer %p require driver to copy the data. Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", //CL_ENQUEUE_WRITE_BUFFER_RECT_REQUIRES_COPY_DATA - "Performance hint: clEnqueueWriteBufferRect call on a buffer %p will not require any data copy as the buffer shares the same physical memory with CPU.", //CL_ENQUEUE_WRITE_BUFFER_RECT_DOESNT_REQUIRE_COPY_DATA - "Performance hint: Pointer %p and size %u passed to clEnqueueReadImage doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Driver needs to disable L3 caching.", //CL_ENQUEUE_READ_IMAGE_DOESNT_MEET_ALIGNMENT_RESTRICTIONS - "Performance hint: clEnqueueReadImage call on an image %p will not require any data copy as the image shares the same physical memory with CPU.", //CL_ENQUEUE_READ_IMAGE_DOESNT_REQUIRES_COPY_DATA - "Performance hint: clEnqueueWriteImage call on an image %p require driver to copy the data.", //CL_ENQUEUE_WRITE_IMAGE_REQUIRES_COPY_DATA - "Performance hint: clEnqueueWriteImage call on an image %p will not require any data copy as the image shares the same physical memory with CPU.", //CL_ENQUEUE_WRITE_IMAGE_DOESNT_REQUIRES_COPY_DATA - "Performance hint: clEnqueueMapBuffer call on a buffer %p will require driver to make a copy as buffer is not sharing the same physical memory with CPU.", //CL_ENQUEUE_MAP_BUFFER_REQUIRES_COPY_DATA - "Performance hint: clEnqueueMapBuffer call on a buffer %p will not require any data copy as buffer shares the same physical memory with CPU.", //CL_ENQUEUE_MAP_BUFFER_DOESNT_REQUIRE_COPY_DATA - "Performance hint: clEnqueueMapImage call on an image %p will require driver to make a copy, as image is not sharing the same physical memory with CPU.", //CL_ENQUEUE_MAP_IMAGE_REQUIRES_COPY_DATA - "Performance hint: clEnqueueMapImage call on an image %p will not require any data copy as image shares the same physical memory with CPU.", //CL_ENQUEUE_MAP_IMAGE_DOESNT_REQUIRE_COPY_DATA - "Performance hint: clEnqueueUnmapMemObject call with pointer %p will not require any data copy.", //CL_ENQUEUE_UNMAP_MEM_OBJ_DOESNT_REQUIRE_COPY_DATA - "Performance hint: clEnqueueUnmapMemObject call with pointer %p will require driver to copy the data to memory object %p.", //CL_ENQUEUE_UNMAP_MEM_OBJ_REQUIRES_COPY_DATA - "Performance hint: clEnqueueSVMMap call with pointer %p will not require any data copy.", //CL_ENQUEUE_SVM_MAP_DOESNT_REQUIRE_COPY_DATA - "Performance hint: Printf detected in kernel %s, it may cause overhead.", //PRINTF_DETECTED_IN_KERNEL - "Performance hint: Null local workgroup size detected ( kernel name: %s ); following sizes will be used for execution : { %u, %u, %u }.", //NULL_LOCAL_WORKGROUP_SIZE - "Performance hint: Local workgroup sizes { %u, %u, %u } selected for this workload ( kernel name: %s ) may not be optimal, consider using following local workgroup size: { %u, %u, %u }.", //BAD_LOCAL_WORKGROUP_SIZE - "Performance hint: Kernel %s register pressure is too high, spill fills will be generated, additional surface needs to be allocated of size %u, consider simplifying your kernel.", //REGISTER_PRESSURE_TOO_HIGH - "Performance hint: Kernel %s private memory usage is too high and exhausts register space, additional surface needs to be allocated of size %u, consider reducing amount of private memory used, avoid using private memory arrays.", //PRIVATE_MEMORY_USAGE_TOO_HIGH - "Performance hint: Kernel %s submission requires coherency with CPU; this will impact performance.", //KERNEL_REQUIRES_COHERENCY - "Performance hint: Kernel %s requires aux translation on argument [%u] = \"%s\"", //KERNEL_ARGUMENT_AUX_TRANSLATION - "Performance hint: Kernel %s requires aux translation for allocation with pointer %p and size %u", //KERNEL_ALLOCATION_AUX_TRANSLATION - "Performance hint: Buffer %p will use compressed memory.", //BUFFER_IS_COMPRESSED - "Performance hint: Buffer %p will not use compressed memory.", //BUFFER_IS_NOT_COMPRESSED - "Performance hint: Image %p will use compressed memory.", //IMAGE_IS_COMPRESSED - "Performance hint: Image %p will not use compressed memory."}; //IMAGE_IS_NOT_COMPRESSED + "Performance hint: clCreateBuffer with pointer %p and size %u doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Buffer is not sharing the same physical memory with CPU.", // CL_BUFFER_DOESNT_MEET_ALIGNMENT_RESTRICTIONS + "Performance hint: clCreateBuffer with pointer %p and size %u meets alignment restrictions and buffer will share the same physical memory with CPU.", // CL_BUFFER_MEETS_ALIGNMENT_RESTRICTIONS + "Performance hint: clCreateBuffer needs to allocate memory for buffer. For subsequent operations the buffer will share the same physical memory with CPU.", // CL_BUFFER_NEEDS_ALLOCATE_MEMORY + "Performance hint: clCreateImage with pointer %p meets alignment restrictions and image will share the same physical memory with CPU.", // CL_IMAGE_MEETS_ALIGNMENT_RESTRICTIONS + "Performance hint: Driver calls internal clFlush on the command queue each time 1 command is enqueued.", // DRIVER_CALLS_INTERNAL_CL_FLUSH + "Performance hint: Profiling adds overhead on all enqueue commands with events.", // PROFILING_ENABLED + "Performance hint: Profiled kernels will be executed with disabled preemption.", // PROFILING_ENABLED_WITH_DISABLED_PREEMPTION + "Performance hint: Subbuffer created from buffer %p shares the same memory with buffer.", // SUBBUFFER_SHARES_MEMORY + "Performance hint: clSVMAlloc with pointer %p and size %u meets alignment restrictions.", // CL_SVM_ALLOC_MEETS_ALIGNMENT_RESTRICTIONS + "Performance hint: clEnqueueReadBuffer call on a buffer %p with pointer %p will require driver to copy the data.Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", // CL_ENQUEUE_READ_BUFFER_REQUIRES_COPY_DATA + "Performance hint: clEnqueueReadBuffer call on a buffer %p with pointer %p will not require any data copy as the buffer shares the same physical memory with CPU.", // CL_ENQUEUE_READ_BUFFER_DOESNT_REQUIRE_COPY_DATA + "Performance hint: Pointer %p and size %u passed to clEnqueueReadBuffer doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Driver needs to disable L3 caching.", // CL_ENQUEUE_READ_BUFFER_DOESNT_MEET_ALIGNMENT_RESTRICTIONS + "Performance hint: clEnqueueReadBufferRect call on a buffer %p with pointer %p will require driver to copy the data.Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", // CL_ENQUEUE_READ_BUFFER_RECT_REQUIRES_COPY_DATA + "Performance hint: clEnqueueReadBufferRect call on a buffer %p with pointer %p will not require any data copy as the buffer shares the same physical memory with CPU.", // CL_ENQUEUE_READ_BUFFER_RECT_DOESNT_REQUIRES_COPY_DATA + "Performance hint: Pointer %p and size %u passed to clEnqueueReadBufferRect doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Driver needs to disable L3 caching.", // CL_ENQUEUE_READ_BUFFER_RECT_DOESNT_MEET_ALIGNMENT_RESTRICTIONS + "Performance hint: clEnqueueWriteBuffer call on a buffer %p require driver to copy the data. Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", // CL_ENQUEUE_WRITE_BUFFER_REQUIRES_COPY_DATA + "Performance hint: clEnqueueWriteBuffer call on a buffer %p with pointer %p will not require any data copy as the buffer shares the same physical memory with CPU.", // CL_ENQUEUE_WRITE_BUFFER_DOESNT_REQUIRE_COPY_DATA + "Performance hint: clEnqueueWriteBufferRect call on a buffer %p require driver to copy the data. Consider using clEnqueueMapBuffer with buffer that shares the same physical memory with CPU.", // CL_ENQUEUE_WRITE_BUFFER_RECT_REQUIRES_COPY_DATA + "Performance hint: clEnqueueWriteBufferRect call on a buffer %p will not require any data copy as the buffer shares the same physical memory with CPU.", // CL_ENQUEUE_WRITE_BUFFER_RECT_DOESNT_REQUIRE_COPY_DATA + "Performance hint: Pointer %p and size %u passed to clEnqueueReadImage doesn't meet alignment restrictions. Size should be aligned to %u bytes and pointer should be aligned to %u. Driver needs to disable L3 caching.", // CL_ENQUEUE_READ_IMAGE_DOESNT_MEET_ALIGNMENT_RESTRICTIONS + "Performance hint: clEnqueueReadImage call on an image %p will not require any data copy as the image shares the same physical memory with CPU.", // CL_ENQUEUE_READ_IMAGE_DOESNT_REQUIRES_COPY_DATA + "Performance hint: clEnqueueWriteImage call on an image %p require driver to copy the data.", // CL_ENQUEUE_WRITE_IMAGE_REQUIRES_COPY_DATA + "Performance hint: clEnqueueWriteImage call on an image %p will not require any data copy as the image shares the same physical memory with CPU.", // CL_ENQUEUE_WRITE_IMAGE_DOESNT_REQUIRES_COPY_DATA + "Performance hint: clEnqueueMapBuffer call on a buffer %p will require driver to make a copy as buffer is not sharing the same physical memory with CPU.", // CL_ENQUEUE_MAP_BUFFER_REQUIRES_COPY_DATA + "Performance hint: clEnqueueMapBuffer call on a buffer %p will not require any data copy as buffer shares the same physical memory with CPU.", // CL_ENQUEUE_MAP_BUFFER_DOESNT_REQUIRE_COPY_DATA + "Performance hint: clEnqueueMapImage call on an image %p will require driver to make a copy, as image is not sharing the same physical memory with CPU.", // CL_ENQUEUE_MAP_IMAGE_REQUIRES_COPY_DATA + "Performance hint: clEnqueueMapImage call on an image %p will not require any data copy as image shares the same physical memory with CPU.", // CL_ENQUEUE_MAP_IMAGE_DOESNT_REQUIRE_COPY_DATA + "Performance hint: clEnqueueUnmapMemObject call with pointer %p will not require any data copy.", // CL_ENQUEUE_UNMAP_MEM_OBJ_DOESNT_REQUIRE_COPY_DATA + "Performance hint: clEnqueueUnmapMemObject call with pointer %p will require driver to copy the data to memory object %p.", // CL_ENQUEUE_UNMAP_MEM_OBJ_REQUIRES_COPY_DATA + "Performance hint: clEnqueueSVMMap call with pointer %p will not require any data copy.", // CL_ENQUEUE_SVM_MAP_DOESNT_REQUIRE_COPY_DATA + "Performance hint: Printf detected in kernel %s, it may cause overhead.", // PRINTF_DETECTED_IN_KERNEL + "Performance hint: Null local workgroup size detected ( kernel name: %s ); following sizes will be used for execution : { %u, %u, %u }.", // NULL_LOCAL_WORKGROUP_SIZE + "Performance hint: Local workgroup sizes { %u, %u, %u } selected for this workload ( kernel name: %s ) may not be optimal, consider using following local workgroup size: { %u, %u, %u }.", // BAD_LOCAL_WORKGROUP_SIZE + "Performance hint: Kernel %s register pressure is too high, spill fills will be generated, additional surface needs to be allocated of size %u, consider simplifying your kernel.", // REGISTER_PRESSURE_TOO_HIGH + "Performance hint: Kernel %s private memory usage is too high and exhausts register space, additional surface needs to be allocated of size %u, consider reducing amount of private memory used, avoid using private memory arrays.", // PRIVATE_MEMORY_USAGE_TOO_HIGH + "Performance hint: Kernel %s submission requires coherency with CPU; this will impact performance.", // KERNEL_REQUIRES_COHERENCY + "Performance hint: Kernel %s requires aux translation on argument [%u] = \"%s\"", // KERNEL_ARGUMENT_AUX_TRANSLATION + "Performance hint: Kernel %s requires aux translation for allocation with pointer %p and size %u", // KERNEL_ALLOCATION_AUX_TRANSLATION + "Performance hint: Buffer %p will use compressed memory.", // BUFFER_IS_COMPRESSED + "Performance hint: Buffer %p will not use compressed memory.", // BUFFER_IS_NOT_COMPRESSED + "Performance hint: Image %p will use compressed memory.", // IMAGE_IS_COMPRESSED + "Performance hint: Image %p will not use compressed memory."}; // IMAGE_IS_NOT_COMPRESSED PerformanceHints DriverDiagnostics::obtainHintForTransferOperation(cl_command_type commandType, bool transferRequired) { PerformanceHints hint; diff --git a/opencl/source/event/async_events_handler.cpp b/opencl/source/event/async_events_handler.cpp index 374ba05bb7..c4cd375f8b 100644 --- a/opencl/source/event/async_events_handler.cpp +++ b/opencl/source/event/async_events_handler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -29,7 +29,7 @@ AsyncEventsHandler::~AsyncEventsHandler() { void AsyncEventsHandler::registerEvent(Event *event) { std::unique_lock lock(asyncMtx); - //Create on first use + // Create on first use openThread(); event->incRefInternal(); diff --git a/opencl/source/event/event.h b/opencl/source/event/event.h index 7468754286..9b42029f12 100644 --- a/opencl/source/event/event.h +++ b/opencl/source/event/event.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -206,13 +206,13 @@ class Event : public BaseObject<_cl_event>, public IDNode { return submittedCmd != nullptr; } - //commands blocked by user event depencies + // commands blocked by user event depencies bool isReadyForSubmission(); // adds a callback (execution state change listener) to this event's list of callbacks void addCallback(Callback::ClbFuncT fn, cl_int type, void *data); - //if(blocking==false), will return with WaitStatus::NotReady instead of blocking while waiting for completion + // if(blocking==false), will return with WaitStatus::NotReady instead of blocking while waiting for completion virtual WaitStatus wait(bool blocking, bool useQuickKmdSleep); bool isUserEvent() const { @@ -347,7 +347,7 @@ class Event : public BaseObject<_cl_event>, public IDNode { // guarantees that newStatus <= oldStatus void transitionExecutionStatus(int32_t newExecutionStatus) const; - //vector storing events that needs to be notified when this event is ready to go + // vector storing events that needs to be notified when this event is ready to go IFRefList childEventsToNotify; void unblockEventsBlockedByThis(int32_t transitionStatus); void submitCommand(bool abortBlockedTasks); @@ -387,9 +387,9 @@ class Event : public BaseObject<_cl_event>, public IDNode { TagNodeBase *timeStampNode = nullptr; TagNodeBase *perfCounterNode = nullptr; std::unique_ptr timestampPacketContainer; - //number of events this event depends on + // number of events this event depends on std::atomic parentCount; - //event parents + // event parents std::vector parentEvents; private: diff --git a/opencl/source/event/event_builder.cpp b/opencl/source/event/event_builder.cpp index 75e4322695..d3c9f1da5e 100644 --- a/opencl/source/event/event_builder.cpp +++ b/opencl/source/event/event_builder.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -52,8 +52,8 @@ void EventBuilder::finalize() { sentinel.addChild(*this->event); for (Event *parent : parentEvents) { - //do not add as child if: - //parent has no parents and is not blocked + // do not add as child if: + // parent has no parents and is not blocked if (!(parent->peekIsBlocked() == false && parent->taskLevel != CompletionStamp::notReady) || (!parent->isEventWithoutCommand() && !parent->peekIsCmdSubmitted())) { parent->addChild(*this->event); diff --git a/opencl/source/helpers/convert_color.h b/opencl/source/helpers/convert_color.h index 6a168a32de..17b2ab085c 100644 --- a/opencl/source/helpers/convert_color.h +++ b/opencl/source/helpers/convert_color.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -88,7 +88,7 @@ inline void convertFillColor(const void *fillColor, iFillColor[i] = static_cast(normalizingFactor * fFillColor[i]); } } else if (oldImageFormat.image_channel_data_type == CL_HALF_FLOAT) { - //float to half convert. + // float to half convert. for (auto i = 0; i < 4; i++) { uint16_t temp = Math::float2Half(fFillColor[i]); iFillColor[i] = temp; diff --git a/opencl/source/helpers/dispatch_info.h b/opencl/source/helpers/dispatch_info.h index 46fa85b2a7..6be963f687 100644 --- a/opencl/source/helpers/dispatch_info.h +++ b/opencl/source/helpers/dispatch_info.h @@ -75,14 +75,14 @@ class DispatchInfo { Kernel *kernel = nullptr; uint32_t dim = 0; - Vec3 gws{0, 0, 0}; //global work size - Vec3 elws{0, 0, 0}; //enqueued local work size - Vec3 offset{0, 0, 0}; //global offset - Vec3 agws{0, 0, 0}; //actual global work size - Vec3 lws{0, 0, 0}; //local work size - Vec3 twgs{0, 0, 0}; //total number of work groups - Vec3 nwgs{0, 0, 0}; //number of work groups - Vec3 swgs{0, 0, 0}; //start of work groups + Vec3 gws{0, 0, 0}; // global work size + Vec3 elws{0, 0, 0}; // enqueued local work size + Vec3 offset{0, 0, 0}; // global offset + Vec3 agws{0, 0, 0}; // actual global work size + Vec3 lws{0, 0, 0}; // local work size + Vec3 twgs{0, 0, 0}; // total number of work groups + Vec3 nwgs{0, 0, 0}; // number of work groups + Vec3 swgs{0, 0, 0}; // start of work groups }; struct MultiDispatchInfo { diff --git a/opencl/source/sharings/gl/windows/gl_sharing_windows.h b/opencl/source/sharings/gl/windows/gl_sharing_windows.h index 79c8b54504..14707df988 100644 --- a/opencl/source/sharings/gl/windows/gl_sharing_windows.h +++ b/opencl/source/sharings/gl/windows/gl_sharing_windows.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -13,7 +13,7 @@ #include namespace NEO { -//OpenGL API names +// OpenGL API names typedef GLboolean(OSAPI *PFNOGLSetSharedOCLContextStateINTEL)(GLDisplay hdcHandle, GLContext contextHandle, GLboolean state, GLvoid *pContextInfo); typedef GLboolean(OSAPI *PFNOGLAcquireSharedBufferINTEL)(GLDisplay hdcHandle, GLContext contextHandle, GLContext backupContextHandle, GLvoid *pBufferInfo); typedef GLboolean(OSAPI *PFNOGLAcquireSharedRenderBufferINTEL)(GLDisplay hdcHandle, GLContext contextHandle, GLContext backupContextHandle, GLvoid *pResourceInfo); @@ -33,7 +33,7 @@ typedef const GLubyte *(OSAPI *PFNglGetStringi)(GLenum name, GLuint index); typedef void(OSAPI *PFNglGetIntegerv)(GLenum pname, GLint *params); typedef void(OSAPI *PFNglBindTexture)(GLenum target, GLuint texture); -//wgl +// wgl typedef BOOL(OSAPI *PFNwglMakeCurrent)(HDC, HGLRC); typedef GLContext(OSAPI *PFNwglCreateContext)(GLDisplay hdcHandle); typedef int(OSAPI *PFNwglShareLists)(GLContext contextHandle, GLContext backupContextHandle); diff --git a/opencl/source/tracing/tracing_types.h b/opencl/source/tracing/tracing_types.h index b69a39c662..13962028f6 100644 --- a/opencl/source/tracing/tracing_types.h +++ b/opencl/source/tracing/tracing_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2020 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -175,7 +175,7 @@ typedef enum _cl_function_id { \param[in] userData User-defined data pointer passed through clCreateTracingHandleINTEL() function - Thread Safety: must be guaranteed by customer + Thread Safety: must be guaranteed by customer */ typedef void (*cl_tracing_callback)(cl_function_id fid, cl_callback_data *callbackData, void *userData); diff --git a/opencl/test/unit_test/api/cl_enqueue_copy_buffer_rect_tests.inl b/opencl/test/unit_test/api/cl_enqueue_copy_buffer_rect_tests.inl index 63d73d50da..cee2fc2606 100644 --- a/opencl/test/unit_test/api/cl_enqueue_copy_buffer_rect_tests.inl +++ b/opencl/test/unit_test/api/cl_enqueue_copy_buffer_rect_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -34,16 +34,16 @@ TEST_F(clEnqueueCopyBufferRectTests, GivenCorrectParametersWhenEnqueingCopyBuffe auto retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -52,16 +52,16 @@ TEST_F(clEnqueueCopyBufferRectTests, GivenCorrectParametersWhenEnqueingCopyBuffe TEST_F(clEnqueueCopyBufferRectTests, GivenNullCommandQueueWhenEnqueingCopyBufferRectThenInvalidCommandQueueErrorIsReturned) { auto retVal = clEnqueueCopyBufferRect( - nullptr, //command_queue - nullptr, //srcBuffer - nullptr, //dstBuffer - nullptr, //srcOrigin - nullptr, //dstOrigin - nullptr, //retion - 0, //srcRowPitch - 0, //srcSlicePitch - 0, //dstRowPitch - 0, //dstSlicePitch + nullptr, // command_queue + nullptr, // srcBuffer + nullptr, // dstBuffer + nullptr, // srcOrigin + nullptr, // dstOrigin + nullptr, // retion + 0, // srcRowPitch + 0, // srcSlicePitch + 0, // dstRowPitch + 0, // dstSlicePitch 0, nullptr, nullptr); @@ -79,16 +79,16 @@ TEST_F(clEnqueueCopyBufferRectTests, GivenQueueIncapableWhenEnqueingCopyBufferRe this->disableQueueCapabilities(CL_QUEUE_CAPABILITY_TRANSFER_BUFFER_RECT_INTEL); auto retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -104,16 +104,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenPitchesEqualZeroAndZerosInRegionWhenCa auto retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region, - 0, //srcRowPitch - 0, //srcSlicePitch - 0, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 0, // srcRowPitch + 0, // srcSlicePitch + 0, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -129,16 +129,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenZeroInRegionWhenCallClEnqueueCopyBuffe auto retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -148,16 +148,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenZeroInRegionWhenCallClEnqueueCopyBuffe retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region1, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -166,16 +166,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenZeroInRegionWhenCallClEnqueueCopyBuffe retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region2, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -184,16 +184,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenZeroInRegionWhenCallClEnqueueCopyBuffe retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region3, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -210,16 +210,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenNonProperSrcBufferSizeWhenCallClEnqueu auto retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -236,16 +236,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenNonProperDstBufferSizeWhenCallClEnqueu auto retVal = clEnqueueCopyBufferRect( pCommandQueue, - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region, - 10, //srcRowPitch - 0, //srcSlicePitch - 10, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 10, // srcRowPitch + 0, // srcSlicePitch + 10, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); @@ -284,16 +284,16 @@ TEST_F(clEnqueueCopyBufferRectTests, givenPitchesEqualZeroAndNotZeroRegionWhenCa auto retVal = clEnqueueCopyBufferRect( commandQueue.get(), - &srcBuffer, //srcBuffer - &dstBuffer, //dstBuffer + &srcBuffer, // srcBuffer + &dstBuffer, // dstBuffer srcOrigin, dstOrigin, region, - 0, //srcRowPitch - 0, //srcSlicePitch - 0, //dstRowPitch - 0, //dstSlicePitch - 0, //numEventsInWaitList + 0, // srcRowPitch + 0, // srcSlicePitch + 0, // dstRowPitch + 0, // dstSlicePitch + 0, // numEventsInWaitList nullptr, nullptr); diff --git a/opencl/test/unit_test/api/cl_enqueue_copy_buffer_to_image_tests.inl b/opencl/test/unit_test/api/cl_enqueue_copy_buffer_to_image_tests.inl index db2a329ac6..a1f36b4578 100644 --- a/opencl/test/unit_test/api/cl_enqueue_copy_buffer_to_image_tests.inl +++ b/opencl/test/unit_test/api/cl_enqueue_copy_buffer_to_image_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -53,13 +53,13 @@ TEST_F(clEnqueueCopyBufferToImageTests, GivenInvalidCmdQueueWhenCopyingBufferToI size_t region[] = {10, 10, 0}; auto retVal = clEnqueueCopyBufferToImage( - nullptr, //commandQueue - nullptr, //srcBuffer - nullptr, //dstBuffer - 0u, //src_offset + nullptr, // commandQueue + nullptr, // srcBuffer + nullptr, // dstBuffer + 0u, // src_offset dstOrigin, region, - 0, //numEventsInWaitList + 0, // numEventsInWaitList nullptr, nullptr); @@ -72,12 +72,12 @@ TEST_F(clEnqueueCopyBufferToImageTests, GivenInvalidSrcBufferWhenCopyingBufferTo auto retVal = clEnqueueCopyBufferToImage( pCommandQueue, - nullptr, //srcBuffer - nullptr, //dstBuffer - 0u, //src_offset + nullptr, // srcBuffer + nullptr, // dstBuffer + 0u, // src_offset dstOrigin, region, - 0, //numEventsInWaitList + 0, // numEventsInWaitList nullptr, nullptr); @@ -99,10 +99,10 @@ TEST_F(clEnqueueCopyBufferToImageTests, GivenValidParametersWhenCopyingBufferToI pCommandQueue, srcBuffer.get(), dstImage, - 0u, //src_offset + 0u, // src_offset dstOrigin, region, - 0, //numEventsInWaitList + 0, // numEventsInWaitList nullptr, nullptr); @@ -127,10 +127,10 @@ TEST_F(clEnqueueCopyBufferToImageTests, GivenQueueIncapableWhenCopyingBufferToIm pCommandQueue, srcBuffer.get(), dstImage, - 0u, //src_offset + 0u, // src_offset dstOrigin, region, - 0, //numEventsInWaitList + 0, // numEventsInWaitList nullptr, nullptr); diff --git a/opencl/test/unit_test/api/cl_enqueue_copy_image_to_buffer_tests.inl b/opencl/test/unit_test/api/cl_enqueue_copy_image_to_buffer_tests.inl index 37d416c7b5..45e0f1c8e4 100644 --- a/opencl/test/unit_test/api/cl_enqueue_copy_image_to_buffer_tests.inl +++ b/opencl/test/unit_test/api/cl_enqueue_copy_image_to_buffer_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -53,12 +53,12 @@ TEST_F(clEnqueueCopyImageToBufferTests, GivenInvalidQueueWhenCopyingImageToBuffe auto retVal = clEnqueueCopyImageToBuffer( nullptr, - nullptr, //srcBuffer - nullptr, //dstBuffer + nullptr, // srcBuffer + nullptr, // dstBuffer srcOrigin, region, - 0, //dstOffset - 0, //numEventsInWaitList + 0, // dstOffset + 0, // numEventsInWaitList nullptr, nullptr); @@ -71,12 +71,12 @@ TEST_F(clEnqueueCopyImageToBufferTests, GivenInvalidBufferWhenCopyingImageToBuff auto retVal = clEnqueueCopyImageToBuffer( pCommandQueue, - nullptr, //srcBuffer - nullptr, //dstBuffer + nullptr, // srcBuffer + nullptr, // dstBuffer srcOrigin, region, - 0, //dstOffset - 0, //numEventsInWaitList + 0, // dstOffset + 0, // numEventsInWaitList nullptr, nullptr); @@ -99,8 +99,8 @@ TEST_F(clEnqueueCopyImageToBufferTests, GivenValidParametersWhenCopyingImageToBu dstBuffer.get(), srcOrigin, region, - 0, //dstOffset - 0, //numEventsInWaitList + 0, // dstOffset + 0, // numEventsInWaitList nullptr, nullptr); @@ -124,8 +124,8 @@ TEST_F(clEnqueueCopyImageToBufferTests, GivenQueueIncapableWhenCopyingImageToBuf dstBuffer.get(), srcOrigin, region, - 0, //dstOffset - 0, //numEventsInWaitList + 0, // dstOffset + 0, // numEventsInWaitList nullptr, nullptr); diff --git a/opencl/test/unit_test/api/cl_enqueue_native_kernel_tests.inl b/opencl/test/unit_test/api/cl_enqueue_native_kernel_tests.inl index 0d2b614174..9b9a4d4018 100644 --- a/opencl/test/unit_test/api/cl_enqueue_native_kernel_tests.inl +++ b/opencl/test/unit_test/api/cl_enqueue_native_kernel_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -23,7 +23,7 @@ TEST_F(clEnqueueNativeKernelTests, GivenAnyParametersWhenExecutingNativeKernelTh nullptr, // mem_list nullptr, // args_mem_loc 0, // num_events - nullptr, //event_list + nullptr, // event_list nullptr // event ); EXPECT_EQ(CL_OUT_OF_HOST_MEMORY, retVal); diff --git a/opencl/test/unit_test/api/cl_enqueue_read_buffer_rect_tests.inl b/opencl/test/unit_test/api/cl_enqueue_read_buffer_rect_tests.inl index a9c55f4876..3a45049c17 100644 --- a/opencl/test/unit_test/api/cl_enqueue_read_buffer_rect_tests.inl +++ b/opencl/test/unit_test/api/cl_enqueue_read_buffer_rect_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -33,12 +33,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenInvalidBufferWhenReadingRectangularRegi buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -59,12 +59,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenNullCommandQueueWhenReadingRectangularR buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -92,12 +92,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenNullHostPtrWhenReadingRectangularRegion buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - nullptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + nullptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -129,12 +129,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenValidParametersWhenReadingRectangularRe buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -159,12 +159,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenQueueIncapableWhenReadingRectangularReg buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -195,12 +195,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenInvalidPitchWhenReadingRectangularRegio buffOrigin, hostOrigin, region, - bufferRowPitch, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + bufferRowPitch, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -214,12 +214,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenInvalidPitchWhenReadingRectangularRegio buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - hostRowPitch, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + hostRowPitch, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -233,12 +233,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenInvalidPitchWhenReadingRectangularRegio buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - bufferSlicePitch, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + bufferSlicePitch, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -252,12 +252,12 @@ TEST_F(clEnqueueReadBufferRectTest, GivenInvalidPitchWhenReadingRectangularRegio buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - hostSlicePitch, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + hostSlicePitch, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -318,12 +318,12 @@ TEST_P(EnqueueReadReadBufferRectTests, GivenNoReadFlagsWhenReadingRectangularReg buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); EXPECT_EQ(CL_INVALID_OPERATION, retVal); diff --git a/opencl/test/unit_test/api/cl_enqueue_write_buffer_rect_tests.inl b/opencl/test/unit_test/api/cl_enqueue_write_buffer_rect_tests.inl index ca78bba7f4..0eaf98222d 100644 --- a/opencl/test/unit_test/api/cl_enqueue_write_buffer_rect_tests.inl +++ b/opencl/test/unit_test/api/cl_enqueue_write_buffer_rect_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -32,12 +32,12 @@ TEST_F(clEnqueueWriteBufferRectTests, GivenInvalidBufferWhenWritingRectangularRe buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -58,12 +58,12 @@ TEST_F(clEnqueueWriteBufferRectTests, GivenNullCommandQueueWhenWritingRectangula buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -91,12 +91,12 @@ TEST_F(clEnqueueWriteBufferRectTests, GivenNullHostPtrWhenWritingRectangularRegi buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - nullptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + nullptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -121,12 +121,12 @@ TEST_F(clEnqueueWriteBufferRectTests, GivenCorrectParametersWhenWritingRectangul buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); @@ -150,12 +150,12 @@ TEST_F(clEnqueueWriteBufferRectTests, GivenQueueIncapableWhenWritingRectangularR buffOrigin, hostOrigin, region, - 10, //bufferRowPitch - 0, //bufferSlicePitch - 10, //hostRowPitch - 0, //hostSlicePitch - ptr, //hostPtr - 0, //numEventsInWaitList + 10, // bufferRowPitch + 0, // bufferSlicePitch + 10, // hostRowPitch + 0, // hostSlicePitch + ptr, // hostPtr + 0, // numEventsInWaitList nullptr, nullptr); diff --git a/opencl/test/unit_test/api/cl_enqueue_write_buffer_tests.inl b/opencl/test/unit_test/api/cl_enqueue_write_buffer_tests.inl index 055ffc3af7..d5b931cb0b 100644 --- a/opencl/test/unit_test/api/cl_enqueue_write_buffer_tests.inl +++ b/opencl/test/unit_test/api/cl_enqueue_write_buffer_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -60,11 +60,11 @@ TEST_F(clEnqueueWriteBufferTests, GivenNullCommandQueueWhenWritingBufferThenInva retVal = clEnqueueWriteBuffer( nullptr, buffer, - CL_FALSE, //blocking write - 0, //offset - 0, //sb + CL_FALSE, // blocking write + 0, // offset + 0, // sb nullptr, - 0, //numEventsInWaitList + 0, // numEventsInWaitList nullptr, nullptr); @@ -77,9 +77,9 @@ TEST_F(clEnqueueWriteBufferTests, GivenNullBufferWhenWritingBufferThenInvalidMem retVal = clEnqueueWriteBuffer( pCommandQueue, nullptr, - CL_FALSE, //blocking write - 0, //offset - 0, //cb + CL_FALSE, // blocking write + 0, // offset + 0, // cb ptr, 0, nullptr, diff --git a/opencl/test/unit_test/api/cl_get_kernel_sub_group_info_khr_tests.inl b/opencl/test/unit_test/api/cl_get_kernel_sub_group_info_khr_tests.inl index db62800ab0..7e35d73ea0 100644 --- a/opencl/test/unit_test/api/cl_get_kernel_sub_group_info_khr_tests.inl +++ b/opencl/test/unit_test/api/cl_get_kernel_sub_group_info_khr_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -284,7 +284,7 @@ TEST_P(KernelSubGroupInfoKhrInputParamsTest, GivenInvalidInputWhenGettingKernelS } TEST_P(KernelSubGroupInfoKhrInputParamsTest, GivenInvalidParamSizeWhenGettingKernelSubGroupInfoThenInvalidValueErrorIsReturned) { - //param_value_size < sizeof(size_t) + // param_value_size < sizeof(size_t) retVal = clGetKernelSubGroupInfoKHR( pMultiDeviceKernel, pClDevice, diff --git a/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_intel_tests.inl b/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_intel_tests.inl index 47a79b2192..f7843a901a 100644 --- a/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_intel_tests.inl +++ b/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_intel_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -96,7 +96,7 @@ TEST_F(clGetKernelSuggestedLocalWorkSizeTests, GivenVariousInputWhenGettingSugge EXPECT_EQ(expectedLws.y, suggestedLocalWorkSize[1]); EXPECT_EQ(expectedLws.z, suggestedLocalWorkSize[2]); - //null global work offset is fine + // null global work offset is fine retVal = clGetKernelSuggestedLocalWorkSizeINTEL(pCommandQueue, pMultiDeviceKernel, 3, nullptr, globalWorkSize, suggestedLocalWorkSize); EXPECT_EQ(CL_SUCCESS, retVal); EXPECT_EQ(expectedLws.x, suggestedLocalWorkSize[0]); diff --git a/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_khr_tests.inl b/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_khr_tests.inl index ad64cb3568..96d80bb935 100644 --- a/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_khr_tests.inl +++ b/opencl/test/unit_test/api/cl_get_kernel_suggested_local_work_size_khr_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -96,7 +96,7 @@ TEST_F(clGetKernelSuggestedLocalWorkSizeKHRTests, GivenVariousInputWhenGettingSu EXPECT_EQ(expectedLws.y, suggestedLocalWorkSize[1]); EXPECT_EQ(expectedLws.z, suggestedLocalWorkSize[2]); - //null global work offset is fine + // null global work offset is fine retVal = clGetKernelSuggestedLocalWorkSizeKHR(pCommandQueue, pMultiDeviceKernel, 3, nullptr, globalWorkSize, suggestedLocalWorkSize); EXPECT_EQ(CL_SUCCESS, retVal); EXPECT_EQ(expectedLws.x, suggestedLocalWorkSize[0]); diff --git a/opencl/test/unit_test/api/cl_release_event_tests.inl b/opencl/test/unit_test/api/cl_release_event_tests.inl index 67fefca709..f75d29c027 100644 --- a/opencl/test/unit_test/api/cl_release_event_tests.inl +++ b/opencl/test/unit_test/api/cl_release_event_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -41,7 +41,7 @@ TEST_F(clEventTests, GivenValidEventWhenReleasingEventThenSuccessIsReturned) { cl_event event = (cl_event)pEvent; auto retVal = clReleaseEvent(event); EXPECT_EQ(CL_SUCCESS, retVal); - //no delete operation. clReleaseEvent should do this for us + // no delete operation. clReleaseEvent should do this for us } TEST_F(clEventTests, GivenValidEventWhenRetainedAndReleasedThenReferenceCountIsUpdated) { diff --git a/opencl/test/unit_test/api/cl_svm_alloc_tests.inl b/opencl/test/unit_test/api/cl_svm_alloc_tests.inl index 1f1f675faa..76d8ec52c1 100644 --- a/opencl/test/unit_test/api/cl_svm_alloc_tests.inl +++ b/opencl/test/unit_test/api/cl_svm_alloc_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -62,24 +62,24 @@ TEST(clSVMAllocTest, givenPlatformWithoutDevicesWhenClSVMAllocIsCalledThenDevice TEST_P(clSVMAllocValidFlagsTests, GivenSvmSupportWhenAllocatingSvmThenSvmIsAllocated) { cl_mem_flags flags = GetParam(); const ClDeviceInfo &devInfo = pDevice->getDeviceInfo(); - //check for svm support + // check for svm support if (devInfo.svmCapabilities != 0) { - //fg svm flag + // fg svm flag if (flags & CL_MEM_SVM_FINE_GRAIN_BUFFER) { - //fg svm flag, fg svm support - expected success + // fg svm flag, fg svm support - expected success if (devInfo.svmCapabilities & CL_DEVICE_SVM_FINE_GRAIN_BUFFER) { auto svmPtr = clSVMAlloc(pContext, flags, 4096 /* Size*/, 128 /* alignment */); EXPECT_NE(nullptr, svmPtr); clSVMFree(pContext, svmPtr); } - //fg svm flag no fg svm support + // fg svm flag no fg svm support else { auto svmPtr = clSVMAlloc(pContext, flags, 4096 /* Size*/, 128 /* alignment */); EXPECT_EQ(nullptr, svmPtr); } } - //no fg svm flag, svm support - expected success + // no fg svm flag, svm support - expected success else { auto svmPtr = clSVMAlloc(pContext, flags, 4096 /* Size*/, 128 /* alignment */); EXPECT_NE(nullptr, svmPtr); @@ -87,7 +87,7 @@ TEST_P(clSVMAllocValidFlagsTests, GivenSvmSupportWhenAllocatingSvmThenSvmIsAlloc clSVMFree(pContext, svmPtr); } } else { - //no svm support -expected fail + // no svm support -expected fail auto svmPtr = clSVMAlloc(pContext, flags, 4096 /* Size*/, 128 /* alignment */); EXPECT_EQ(nullptr, svmPtr); } @@ -125,26 +125,26 @@ TEST_P(clSVMAllocFtrFlagsTests, GivenCorrectFlagsWhenAllocatingSvmThenSvmIsAlloc cl_mem_flags flags = GetParam(); void *svmPtr = nullptr; - //1: no svm - no flags supported + // 1: no svm - no flags supported pHwInfo->capabilityTable.ftrSvm = false; pHwInfo->capabilityTable.ftrSupportsCoherency = false; svmPtr = clSVMAlloc(pContext, flags, 4096, 128); EXPECT_EQ(nullptr, svmPtr); - //2: coarse svm - normal flags supported + // 2: coarse svm - normal flags supported pHwInfo->capabilityTable.ftrSvm = true; svmPtr = clSVMAlloc(pContext, flags, 4096, 128); if (flags & CL_MEM_SVM_FINE_GRAIN_BUFFER) { - //fg svm flags not supported + // fg svm flags not supported EXPECT_EQ(nullptr, svmPtr); } else { - //no fg svm flags supported + // no fg svm flags supported EXPECT_NE(nullptr, svmPtr); clSVMFree(pContext, svmPtr); } - //3: fg svm - all flags supported + // 3: fg svm - all flags supported pHwInfo->capabilityTable.ftrSupportsCoherency = true; svmPtr = clSVMAlloc(pContext, flags, 4096, 128); EXPECT_NE(nullptr, svmPtr); diff --git a/opencl/test/unit_test/api/cl_unified_shared_memory_tests.inl b/opencl/test/unit_test/api/cl_unified_shared_memory_tests.inl index 38b15ba097..150a2c5530 100644 --- a/opencl/test/unit_test/api/cl_unified_shared_memory_tests.inl +++ b/opencl/test/unit_test/api/cl_unified_shared_memory_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -717,7 +717,7 @@ TEST(clUnifiedSharedMemoryTests, whenDeviceSupportSharedMemoryAllocationsAndSyst EXPECT_EQ(retVal, CL_SUCCESS); EXPECT_TRUE(kernel->isAnyKernelArgumentUsingSystemMemory()); - //check if cross thread is updated + // check if cross thread is updated auto crossThreadLocation = reinterpret_cast(ptrOffset(mockKernel.mockKernel->getCrossThreadData(), mockKernel.kernelInfo.argAsPtr(0).stateless)); auto systemAddress = reinterpret_cast(systemPointer); diff --git a/opencl/test/unit_test/aub_tests/command_queue/aub_inline_data_local_id_tests_xehp_and_later.cpp b/opencl/test/unit_test/aub_tests/command_queue/aub_inline_data_local_id_tests_xehp_and_later.cpp index 9a0fc238be..cfe9ea748d 100644 --- a/opencl/test/unit_test/aub_tests/command_queue/aub_inline_data_local_id_tests_xehp_and_later.cpp +++ b/opencl/test/unit_test/aub_tests/command_queue/aub_inline_data_local_id_tests_xehp_and_later.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -180,8 +180,8 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, XeHPAndLaterAubInlineDataTest, givenCrossThreadFitI EXPECT_EQ(expectedEmitLocal, walker->getEmitLocalId()); EXPECT_EQ(0, memcmp(walker->getInlineDataPointer(), kernels[4]->getCrossThreadData(), sizeof(INLINE_DATA))); - //this kernel does nothing, so no expectMemory because only such kernel can fit into single GRF - //this is for sake of testing inline data data copying by COMPUTE_WALKER + // this kernel does nothing, so no expectMemory because only such kernel can fit into single GRF + // this is for sake of testing inline data data copying by COMPUTE_WALKER } HWCMDTEST_F(IGFX_XE_HP_CORE, XeHPAndLaterAubInlineDataTest, givenCrossThreadSizeMoreThanSingleGrfWhenInlineDataAllowedThenCopyGrfCrossThreadToInline) { @@ -461,7 +461,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, XeHPAndLaterAubHwLocalIdsWithSubgroupsTest, givenKe pCmdQ->finish(); - //we expect sequence of local ids from 0..199 + // we expect sequence of local ids from 0..199 auto expectedMemory = reinterpret_cast(variables[0].expectedMemory); auto currentWorkItem = 0u; diff --git a/opencl/test/unit_test/aub_tests/command_queue/aub_postsync_write_tests_xehp_and_later.cpp b/opencl/test/unit_test/aub_tests/command_queue/aub_postsync_write_tests_xehp_and_later.cpp index 3135bc6fe2..02a1aa9dbb 100644 --- a/opencl/test/unit_test/aub_tests/command_queue/aub_postsync_write_tests_xehp_and_later.cpp +++ b/opencl/test/unit_test/aub_tests/command_queue/aub_postsync_write_tests_xehp_and_later.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -94,7 +94,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, PostSyncWriteXeHPTests, givenTwoBatchedEnqueuesWhen std::fill(writePattern2, writePattern2 + sizeof(writePattern2), 1); auto buffer = std::unique_ptr(Buffer::create(&context, CL_MEM_COPY_HOST_PTR, bufferSize, initialMemory, retVal)); - //make sure that GPU copy is used + // make sure that GPU copy is used buffer->forceDisallowCPUCopy = true; cl_event outEvent1, outEvent2; diff --git a/opencl/test/unit_test/aub_tests/command_queue/enqueue_copy_buffer_rect_aub_tests.cpp b/opencl/test/unit_test/aub_tests/command_queue/enqueue_copy_buffer_rect_aub_tests.cpp index 4d4253236b..e4537a9477 100644 --- a/opencl/test/unit_test/aub_tests/command_queue/enqueue_copy_buffer_rect_aub_tests.cpp +++ b/opencl/test/unit_test/aub_tests/command_queue/enqueue_copy_buffer_rect_aub_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -42,7 +42,7 @@ struct CopyBufferRectHw typedef CopyBufferRectHw AUBCopyBufferRect; HWTEST_P(AUBCopyBufferRect, WhenCopyingThenExpectationsMet) { - //3D UINT8 buffer 20x20x20 + // 3D UINT8 buffer 20x20x20 static const size_t rowPitch = 20; static const size_t slicePitch = rowPitch * rowPitch; static const size_t elementCount = slicePitch * rowPitch; @@ -137,10 +137,10 @@ static size_t zero[] = {0}; INSTANTIATE_TEST_CASE_P(AUBCopyBufferRect, AUBCopyBufferRect, ::testing::Combine( - ::testing::Values(0, 3), //srcOrigin + ::testing::Values(0, 3), // srcOrigin ::testing::ValuesIn(zero), ::testing::Values(0, 7), - ::testing::Values(0, 3), //dstPrigin + ::testing::Values(0, 3), // dstPrigin ::testing::ValuesIn(zero), ::testing::Values(0, 7), ::testing::Values(true, false))); diff --git a/opencl/test/unit_test/aub_tests/command_queue/enqueue_fill_image_aub_tests.cpp b/opencl/test/unit_test/aub_tests/command_queue/enqueue_fill_image_aub_tests.cpp index e0ddd415e3..1202a64fd7 100644 --- a/opencl/test/unit_test/aub_tests/command_queue/enqueue_fill_image_aub_tests.cpp +++ b/opencl/test/unit_test/aub_tests/command_queue/enqueue_fill_image_aub_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -92,7 +92,7 @@ struct AubFillImage auto channelOrder = std::get<1>(GetParam()); if (dataType != CL_UNORM_INT8 && (channelOrder == CL_sRGBA || channelOrder == CL_sBGRA)) { - //sRGBA and sBGRA support only unorm int8 type + // sRGBA and sBGRA support only unorm int8 type GTEST_SKIP(); } CommandDeviceFixture::setUp(cl_command_queue_properties(0)); diff --git a/opencl/test/unit_test/aub_tests/command_queue/enqueue_read_buffer_rect_aub_tests.cpp b/opencl/test/unit_test/aub_tests/command_queue/enqueue_read_buffer_rect_aub_tests.cpp index 17016585b4..7750c5258a 100644 --- a/opencl/test/unit_test/aub_tests/command_queue/enqueue_read_buffer_rect_aub_tests.cpp +++ b/opencl/test/unit_test/aub_tests/command_queue/enqueue_read_buffer_rect_aub_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -95,7 +95,7 @@ HWTEST_P(AUBReadBufferRect, Given3dWhenReadingBufferThenExpectationsAreMet) { char *ptr = new char[slicePitch]; memset(ptr, 0, slicePitch); for (unsigned int i = 0; i < rowPitch; i++) { - //one slice will be copied from src. all others should be zeros + // one slice will be copied from src. all others should be zeros if (i == zHostOffs) { AUBCommandStreamFixture::expectMemory(destMemory + slicePitch * i, srcMemory + slicePitch * zBuffOffs, slicePitch); } else { diff --git a/opencl/test/unit_test/aub_tests/command_queue/enqueue_write_image_aub_tests.cpp b/opencl/test/unit_test/aub_tests/command_queue/enqueue_write_image_aub_tests.cpp index 1ed769a2fb..a88ca97c6e 100644 --- a/opencl/test/unit_test/aub_tests/command_queue/enqueue_write_image_aub_tests.cpp +++ b/opencl/test/unit_test/aub_tests/command_queue/enqueue_write_image_aub_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -227,7 +227,7 @@ struct AUBWriteImage auto srcMemoryAligned = alignedMalloc(4 + pixelSize * numPixels, MemoryConstants::cacheLineSize); memset(srcMemoryAligned, 0x0, 4 + pixelSize * numPixels); - auto srcMemoryUnaligned = ptrOffset(reinterpret_cast(srcMemoryAligned), 4); //ensure non cacheline-aligned (but aligned to 4) hostPtr to create non-zerocopy image + auto srcMemoryUnaligned = ptrOffset(reinterpret_cast(srcMemoryAligned), 4); // ensure non cacheline-aligned (but aligned to 4) hostPtr to create non-zerocopy image cl_mem_flags flags = CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE; auto surfaceFormat = Image::getSurfaceFormatFromTable(flags, &imageFormat, pClDevice->getHardwareInfo().capabilityTable.supportsOcl21Features); diff --git a/opencl/test/unit_test/aub_tests/command_stream/aub_walker_partition_tests_xehp_and_later.cpp b/opencl/test/unit_test/aub_tests/command_stream/aub_walker_partition_tests_xehp_and_later.cpp index 9a636e0364..0434497e33 100644 --- a/opencl/test/unit_test/aub_tests/command_stream/aub_walker_partition_tests_xehp_and_later.cpp +++ b/opencl/test/unit_test/aub_tests/command_stream/aub_walker_partition_tests_xehp_and_later.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -116,7 +116,7 @@ struct AubWalkerPartitionFixture : public KernelAUBFixture for (auto partitionId = 0; partitionId < DebugManager.flags.ExperimentalSetWalkerPartitionCount.get(); partitionId++) { expectNotEqualMemory(reinterpret_cast(postSyncAddress), ¬ExpectedValue, sizeof(notExpectedValue)); - postSyncAddress += 16; //next post sync needs to be right after the previous one + postSyncAddress += 16; // next post sync needs to be right after the previous one } auto dstGpuAddress = reinterpret_cast(dstBuffer->getGraphicsAllocation(rootDeviceIndex)->getGpuAddress()); @@ -168,7 +168,7 @@ struct AubWalkerPartitionTest : public AubWalkerPartitionFixture, partitionType = (rand() % 3 + 1); partitionCount = rand() % 16 + 1; - //now generate dimensions that makes sense + // now generate dimensions that makes sense auto goodWorkingSizeGenerated = false; while (!goodWorkingSizeGenerated) { dispatchParamters.localWorkSize[0] = rand() % 128 + 1; @@ -340,7 +340,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, whenPipeControlIsBeingE flushStream(); expectNotEqualMemory(reinterpret_cast(writeAddress), &writeValue, 4u); - //write needs to happen after 8 bytes + // write needs to happen after 8 bytes expectMemory(reinterpret_cast(writeAddress + 8), &writeValue, 4u); } @@ -389,7 +389,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode uint32_t writeValue = 7u; uint32_t pipeControlNotExecutedValue = 0u; - //this pipe control should be executed + // this pipe control should be executed void *pipeControlAddress = taskStream->getSpace(0); PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( @@ -404,10 +404,10 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode pipeControl->setAddressHigh(static_cast(writeAddress >> 32)); }; - //we have now command buffer that has conditional batch buffer end and pipe control that tests whether batch buffer end acted correctly + // we have now command buffer that has conditional batch buffer end and pipe control that tests whether batch buffer end acted correctly - //MAD_GREATER_THAN_IDD If Indirect fetched data is greater than inline data then continue. - //continue test + // MAD_GREATER_THAN_IDD If Indirect fetched data is greater than inline data then continue. + // continue test conditionalBatchBufferEnd->setCompareOperation(CONDITIONAL_BATCH_BUFFER_END::COMPARE_OPERATION::COMPARE_OPERATION_MAD_GREATER_THAN_IDD); *compareAddress = 11; auto inlineData = 10u; @@ -416,7 +416,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode programPipeControl(); flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //terminate test + // terminate test *compareAddress = 10; inlineData = 10u; writeAddress += sizeof(uint64_t); @@ -427,9 +427,9 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &pipeControlNotExecutedValue, sizeof(pipeControlNotExecutedValue)); - //MAD_GREATER_THAN_OR_EQUAL_IDD If Indirect fetched data is greater than or equal to inline data then continue. + // MAD_GREATER_THAN_OR_EQUAL_IDD If Indirect fetched data is greater than or equal to inline data then continue. - //continue test - greater + // continue test - greater conditionalBatchBufferEnd->setCompareOperation(CONDITIONAL_BATCH_BUFFER_END::COMPARE_OPERATION::COMPARE_OPERATION_MAD_GREATER_THAN_OR_EQUAL_IDD); *compareAddress = 11; inlineData = 10u; @@ -442,7 +442,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //continue test - equal + // continue test - equal *compareAddress = 10; inlineData = 10u; @@ -454,7 +454,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //terminate test + // terminate test *compareAddress = 9; inlineData = 10u; writeAddress += sizeof(uint64_t); @@ -465,9 +465,9 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &pipeControlNotExecutedValue, sizeof(pipeControlNotExecutedValue)); - //MAD_LESS_THAN_IDD If Indirect fetched data is less than inline data then continue. + // MAD_LESS_THAN_IDD If Indirect fetched data is less than inline data then continue. - //continue test + // continue test conditionalBatchBufferEnd->setCompareOperation(CONDITIONAL_BATCH_BUFFER_END::COMPARE_OPERATION::COMPARE_OPERATION_MAD_LESS_THAN_IDD); *compareAddress = 9; inlineData = 10u; @@ -480,7 +480,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //terminate test + // terminate test *compareAddress = 10; inlineData = 10u; writeAddress += sizeof(uint64_t); @@ -491,9 +491,9 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &pipeControlNotExecutedValue, sizeof(pipeControlNotExecutedValue)); - //MAD_LESS_THAN_OR_EQUAL_IDD If Indirect fetched data is less than or equal to inline data then continue. + // MAD_LESS_THAN_OR_EQUAL_IDD If Indirect fetched data is less than or equal to inline data then continue. - //continue test - less + // continue test - less conditionalBatchBufferEnd->setCompareOperation(CONDITIONAL_BATCH_BUFFER_END::COMPARE_OPERATION::COMPARE_OPERATION_MAD_LESS_THAN_OR_EQUAL_IDD); *compareAddress = 9; inlineData = 10u; @@ -506,7 +506,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //continue test - equal + // continue test - equal *compareAddress = 10; inlineData = 10u; @@ -518,7 +518,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //terminate test + // terminate test *compareAddress = 11; inlineData = 10u; writeAddress += sizeof(uint64_t); @@ -529,9 +529,9 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &pipeControlNotExecutedValue, sizeof(pipeControlNotExecutedValue)); - //MAD_EQUAL_IDD If Indirect fetched data is equal to inline data then continue. + // MAD_EQUAL_IDD If Indirect fetched data is equal to inline data then continue. - //continue test equal + // continue test equal conditionalBatchBufferEnd->setCompareOperation(CONDITIONAL_BATCH_BUFFER_END::COMPARE_OPERATION::COMPARE_OPERATION_MAD_EQUAL_IDD); *compareAddress = 10; inlineData = 10u; @@ -544,7 +544,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //terminate test + // terminate test *compareAddress = 0; inlineData = 10u; writeAddress += sizeof(uint64_t); @@ -555,9 +555,9 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &pipeControlNotExecutedValue, sizeof(pipeControlNotExecutedValue)); - //MAD_NOT_EQUAL_IDD If Indirect fetched data is not equal to inline data then continue. + // MAD_NOT_EQUAL_IDD If Indirect fetched data is not equal to inline data then continue. - //continue test not equal + // continue test not equal conditionalBatchBufferEnd->setCompareOperation(CONDITIONAL_BATCH_BUFFER_END::COMPARE_OPERATION::COMPARE_OPERATION_MAD_NOT_EQUAL_IDD); *compareAddress = 11; inlineData = 10u; @@ -570,7 +570,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenVariousCompareMode flushStream(); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); - //terminate test + // terminate test *compareAddress = 10; inlineData = 10u; writeAddress += sizeof(uint64_t); @@ -585,11 +585,11 @@ template struct MultiLevelBatchAubFixture : public AUBFixture { void setUp() { if (enableNesting) { - //turn on Batch Buffer nesting + // turn on Batch Buffer nesting DebugManager.flags.AubDumpAddMmioRegistersList.set( "0x1A09C;0x10001000"); } else { - //turn off Batch Buffer nesting + // turn off Batch Buffer nesting DebugManager.flags.AubDumpAddMmioRegistersList.set( "0x1A09C;0x10000000"); } @@ -662,13 +662,13 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithNesting, givenConditionalBa getSimulatedCsr()->initializeEngine(); writeMMIO(0x1A09C, 0x10001000); - //nest to second level + // nest to second level auto batchBufferStart = reinterpret_cast(taskStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(secondLevelBatch->getGpuAddress()); batchBufferStart->setNestedLevelBatchBuffer(BATCH_BUFFER_START::NESTED_LEVEL_BATCH_BUFFER::NESTED_LEVEL_BATCH_BUFFER_NESTED); - //nest to third level + // nest to third level batchBufferStart = reinterpret_cast(secondLevelBatchStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(thirdLevelBatch->getGpuAddress()); @@ -683,7 +683,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithNesting, givenConditionalBa writeAddress += sizeof(uint64_t); auto writeValue = 7u; - //this pipe control should be executed + // this pipe control should be executed PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *secondLevelBatchStream, PostSyncMode::ImmediateData, @@ -722,13 +722,13 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithNesting, givenConditionalBa using BATCH_BUFFER_START = typename FamilyType::MI_BATCH_BUFFER_START; using PIPE_CONTROL = typename FamilyType::PIPE_CONTROL; - //nest to second level + // nest to second level auto batchBufferStart = reinterpret_cast(taskStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(secondLevelBatch->getGpuAddress()); batchBufferStart->setNestedLevelBatchBuffer(BATCH_BUFFER_START::NESTED_LEVEL_BATCH_BUFFER::NESTED_LEVEL_BATCH_BUFFER_NESTED); - //nest to third level + // nest to third level batchBufferStart = reinterpret_cast(secondLevelBatchStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(thirdLevelBatch->getGpuAddress()); @@ -743,7 +743,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithNesting, givenConditionalBa writeAddress += sizeof(uint64_t); auto writeValue = 7u; - //this pipe control should NOT be executed + // this pipe control should NOT be executed PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *secondLevelBatchStream, PostSyncMode::ImmediateData, @@ -768,7 +768,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithNesting, givenConditionalBa writeAddress = helperSurface->getGpuAddress() + sizeof(uint64_t); writeValue = 0u; - //pipe controls are not emitted + // pipe controls are not emitted expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); writeAddress += sizeof(uint64_t); expectMemory(reinterpret_cast(writeAddress), &writeValue, sizeof(writeValue)); @@ -781,14 +781,14 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithNesting, givenCommandBuffer using BATCH_BUFFER_END = typename FamilyType::MI_BATCH_BUFFER_END; using BATCH_BUFFER_START = typename FamilyType::MI_BATCH_BUFFER_START; - //nest to second level + // nest to second level auto batchBufferStart = reinterpret_cast(taskStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(secondLevelBatch->getGpuAddress()); batchBufferStart->setEnableCommandCache(1u); batchBufferStart->setNestedLevelBatchBuffer(BATCH_BUFFER_START::NESTED_LEVEL_BATCH_BUFFER::NESTED_LEVEL_BATCH_BUFFER_NESTED); - //this pipe control should be executed + // this pipe control should be executed PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *secondLevelBatchStream, PostSyncMode::ImmediateData, @@ -811,13 +811,13 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithoutNesting, givenConditiona using CONDITIONAL_BATCH_BUFFER_END = typename FamilyType::MI_CONDITIONAL_BATCH_BUFFER_END; using BATCH_BUFFER_START = typename FamilyType::MI_BATCH_BUFFER_START; - //nest to second level + // nest to second level auto batchBufferStart = reinterpret_cast(taskStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(secondLevelBatch->getGpuAddress()); batchBufferStart->setSecondLevelBatchBuffer(BATCH_BUFFER_START::SECOND_LEVEL_BATCH_BUFFER::SECOND_LEVEL_BATCH_BUFFER_SECOND_LEVEL_BATCH); - //nest to third level + // nest to third level batchBufferStart = reinterpret_cast(secondLevelBatchStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(thirdLevelBatch->getGpuAddress()); @@ -832,7 +832,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithoutNesting, givenConditiona writeAddress += sizeof(uint64_t); auto writeValue = 7u; - //this pipe control should't be executed + // this pipe control should't be executed PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *secondLevelBatchStream, PostSyncMode::ImmediateData, @@ -846,7 +846,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithoutNesting, givenConditiona writeAddress += sizeof(uint64_t); writeValue++; - //and this shouldn't as well, we returned to ring + // and this shouldn't as well, we returned to ring MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *taskStream, PostSyncMode::ImmediateData, writeAddress, writeValue, device->getHardwareInfo(), args); @@ -871,13 +871,13 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithoutNesting, givenConditiona using CONDITIONAL_BATCH_BUFFER_END = typename FamilyType::MI_CONDITIONAL_BATCH_BUFFER_END; using BATCH_BUFFER_START = typename FamilyType::MI_BATCH_BUFFER_START; - //nest to second level + // nest to second level auto batchBufferStart = reinterpret_cast(taskStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(secondLevelBatch->getGpuAddress()); batchBufferStart->setSecondLevelBatchBuffer(BATCH_BUFFER_START::SECOND_LEVEL_BATCH_BUFFER::SECOND_LEVEL_BATCH_BUFFER_SECOND_LEVEL_BATCH); - //nest to third level + // nest to third level batchBufferStart = reinterpret_cast(secondLevelBatchStream->getSpace(sizeof(BATCH_BUFFER_START))); batchBufferStart->init(); batchBufferStart->setBatchBufferStartAddress(thirdLevelBatch->getGpuAddress()); @@ -892,7 +892,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithoutNesting, givenConditiona writeAddress += sizeof(uint64_t); auto writeValue = 7u; - //this pipe control should't be executed + // this pipe control should't be executed PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *secondLevelBatchStream, PostSyncMode::ImmediateData, @@ -906,7 +906,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, MultiLevelBatchTestsWithoutNesting, givenConditiona writeAddress += sizeof(uint64_t); writeValue++; - //and this should , we returned to primary batch + // and this should , we returned to primary batch MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *taskStream, PostSyncMode::ImmediateData, writeAddress, writeValue, device->getHardwareInfo(), args); @@ -1006,11 +1006,11 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenPredicatedCommandB auto expectedGpuAddress = taskStream->getGraphicsAllocation()->getGpuAddress() + WalkerPartition::computeControlSectionOffset(testArgs); - //16 partitions updated atomic to value 16 - //17th partition updated it to 17 and was predicated out of the batch buffer + // 16 partitions updated atomic to value 16 + // 17th partition updated it to 17 and was predicated out of the batch buffer uint32_t expectedValue = 17u; expectMemory(reinterpret_cast(expectedGpuAddress), &expectedValue, sizeof(expectedValue)); - //this is 1 tile scenario + // this is 1 tile scenario uint32_t expectedTileValue = 1u; expectMemory(reinterpret_cast(expectedGpuAddress + 4llu), &expectedTileValue, sizeof(expectedTileValue)); } @@ -1031,7 +1031,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenGeneralPurposeRegi WalkerPartition::programMiLoadRegisterReg(streamCpuPointer, totalBytesProgrammed, generalPurposeRegister5, wparidCCSOffset); WalkerPartition::programWparidMask(streamCpuPointer, totalBytesProgrammed, 4u); WalkerPartition::programWparidPredication(streamCpuPointer, totalBytesProgrammed, true); - //this command must not execute + // this command must not execute taskStream->getSpace(totalBytesProgrammed); PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( @@ -1054,23 +1054,23 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenPredicationWhenItI auto addressShift = 8u; auto writeAddress = helperSurface->getGpuAddress(); - //program WPARID mask to 16 partitions + // program WPARID mask to 16 partitions WalkerPartition::programWparidMask(streamCpuPointer, totalBytesProgrammed, 16u); streamCpuPointer = taskStream->getSpace(totalBytesProgrammed); - //program WPARID to value within 0-19 + // program WPARID to value within 0-19 for (uint32_t wparid = 0u; wparid < 20; wparid++) { totalBytesProgrammed = 0; streamCpuPointer = taskStream->getSpace(0); WalkerPartition::programRegisterWithValue(streamCpuPointer, WalkerPartition::wparidCCSOffset, totalBytesProgrammed, wparid); WalkerPartition::programWparidPredication(streamCpuPointer, totalBytesProgrammed, true); taskStream->getSpace(totalBytesProgrammed); - //emit pipe control + // emit pipe control PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( *taskStream, PostSyncMode::ImmediateData, writeAddress, writeValue, device->getHardwareInfo(), args); - //turn off predication + // turn off predication streamCpuPointer = taskStream->getSpace(0); totalBytesProgrammed = 0; WalkerPartition::programWparidPredication(streamCpuPointer, totalBytesProgrammed, false); @@ -1105,10 +1105,10 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenPredicationWhenItI auto writeAddress = helperSurface->getGpuAddress(); WalkerPartition::programRegisterWithValue(streamCpuPointer, WalkerPartition::addressOffsetCCSOffset, totalBytesProgrammed, addressShift); - //program WPARID mask to 8 partitions + // program WPARID mask to 8 partitions WalkerPartition::programWparidMask(streamCpuPointer, totalBytesProgrammed, 8u); streamCpuPointer = taskStream->getSpace(totalBytesProgrammed); - //program WPARID to value within 0-13 + // program WPARID to value within 0-13 for (uint32_t wparid = 0u; wparid < 13; wparid++) { totalBytesProgrammed = 0; streamCpuPointer = taskStream->getSpace(0); @@ -1116,7 +1116,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenPredicationWhenItI WalkerPartition::programWparidPredication(streamCpuPointer, totalBytesProgrammed, true); taskStream->getSpace(totalBytesProgrammed); - //emit pipe control + // emit pipe control void *pipeControlAddress = taskStream->getSpace(0); PipeControlArgs args; MemorySynchronizationCommands::addBarrierWithPostSyncOperation( @@ -1126,7 +1126,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, AubWalkerPartitionZeroTest, givenPredicationWhenItI auto pipeControl = retrieveSyncPipeControl(pipeControlAddress, device->getHardwareInfo()); ASSERT_NE(nullptr, pipeControl); pipeControl->setWorkloadPartitionIdOffsetEnable(true); - //turn off predication + // turn off predication streamCpuPointer = taskStream->getSpace(0); totalBytesProgrammed = 0; WalkerPartition::programWparidPredication(streamCpuPointer, totalBytesProgrammed, false); diff --git a/opencl/test/unit_test/aub_tests/gen11/batch_buffer/aub_batch_buffer_tests_gen11.h b/opencl/test/unit_test/aub_tests/gen11/batch_buffer/aub_batch_buffer_tests_gen11.h index 5b5421363c..728d326c99 100644 --- a/opencl/test/unit_test/aub_tests/gen11/batch_buffer/aub_batch_buffer_tests_gen11.h +++ b/opencl/test/unit_test/aub_tests/gen11/batch_buffer/aub_batch_buffer_tests_gen11.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -146,7 +146,7 @@ void setupAUBWithBatchBuffer(const NEO::Device *pDevice, aub_stream::EngineType // Poll until HW complete using AubMemDump::CmdServicesMemTraceRegisterPoll; aubFile.registerPoll( - AubMemDump::computeRegisterOffset(mmioBase, 0x2234), //EXECLIST_STATUS + AubMemDump::computeRegisterOffset(mmioBase, 0x2234), // EXECLIST_STATUS 0x00008000, 0x00008000, false, diff --git a/opencl/test/unit_test/aub_tests/gen9/batch_buffer/aub_batch_buffer_tests.h b/opencl/test/unit_test/aub_tests/gen9/batch_buffer/aub_batch_buffer_tests.h index a71d605f43..1b1a56b25f 100644 --- a/opencl/test/unit_test/aub_tests/gen9/batch_buffer/aub_batch_buffer_tests.h +++ b/opencl/test/unit_test/aub_tests/gen9/batch_buffer/aub_batch_buffer_tests.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -148,7 +148,7 @@ void setupAUBWithBatchBuffer(const NEO::Device *pDevice, aub_stream::EngineType // Poll until HW complete using AubMemDump::CmdServicesMemTraceRegisterPoll; aubFile.registerPoll( - AubMemDump::computeRegisterOffset(mmioBase, 0x2234), //EXECLIST_STATUS + AubMemDump::computeRegisterOffset(mmioBase, 0x2234), // EXECLIST_STATUS 0x100, 0x100, false, diff --git a/opencl/test/unit_test/aub_tests/gen9/skl/command_queue/run_kernel_aub_tests_skl.cpp b/opencl/test/unit_test/aub_tests/gen9/skl/command_queue/run_kernel_aub_tests_skl.cpp index 70085c581a..123e967f3d 100644 --- a/opencl/test/unit_test/aub_tests/gen9/skl/command_queue/run_kernel_aub_tests_skl.cpp +++ b/opencl/test/unit_test/aub_tests/gen9/skl/command_queue/run_kernel_aub_tests_skl.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -122,7 +122,7 @@ SKLTEST_F(AUBRunKernelIntegrateTest, GivenOoqExecutionThenExpectationsMet) { retVal); ASSERT_NE(nullptr, destinationBuffer1); - //buffer may not be zero copied + // buffer may not be zero copied pDestinationMemory1 = reinterpret_cast(destinationBuffer1->getGraphicsAllocation(pClDevice->getRootDeviceIndex())->getGpuAddress()); auto destinationBuffer2 = Buffer::create( @@ -133,7 +133,7 @@ SKLTEST_F(AUBRunKernelIntegrateTest, GivenOoqExecutionThenExpectationsMet) { retVal); ASSERT_NE(nullptr, destinationBuffer2); - //buffer may not be zero copied + // buffer may not be zero copied pDestinationMemory2 = reinterpret_cast(destinationBuffer2->getGraphicsAllocation(pClDevice->getRootDeviceIndex())->getGpuAddress()); cl_mem arg2 = intermediateBuffer; @@ -382,7 +382,7 @@ SKLTEST_F(AUBRunKernelIntegrateTest, GivenDeviceSideVmeThenExpectationsMet) { ASSERT_NE(nullptr, shapesBuffer); // kernel decl: - //void block_motion_estimate_intel_noacc( + // void block_motion_estimate_intel_noacc( // __read_only image2d_t srcImg, // IN // __read_only image2d_t refImg, // IN // __global short2* prediMVbuffer, // IN diff --git a/opencl/test/unit_test/command_queue/command_queue_fixture.cpp b/opencl/test/unit_test/command_queue/command_queue_fixture.cpp index f04b96f6ba..648ea10e88 100644 --- a/opencl/test/unit_test/command_queue/command_queue_fixture.cpp +++ b/opencl/test/unit_test/command_queue/command_queue_fixture.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -82,7 +82,7 @@ void CommandQueueHwFixture::setUp( } void CommandQueueHwFixture::tearDown() { - //resolve event dependencies + // resolve event dependencies if (pCmdQ) { auto blocked = pCmdQ->isQueueBlocked(); UNRECOVERABLE_IF(blocked); diff --git a/opencl/test/unit_test/command_queue/command_queue_hw_2_tests.cpp b/opencl/test/unit_test/command_queue/command_queue_hw_2_tests.cpp index 61820e0fdd..fcbb69e408 100644 --- a/opencl/test/unit_test/command_queue/command_queue_hw_2_tests.cpp +++ b/opencl/test/unit_test/command_queue/command_queue_hw_2_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -276,7 +276,7 @@ HWTEST_F(OOQueueHwTest, givenBlockedOutOfOrderCmdQueueAndAsynchronouslyCompleted cmdQHw->taskLevel = 23; cmdQHw->enqueueKernel(mockKernel, 1, &offset, &size, &size, 1, &blockedEvent, nullptr); - //new virtual event is created on enqueue, bind it to the created virtual event + // new virtual event is created on enqueue, bind it to the created virtual event EXPECT_NE(cmdQHw->virtualEvent, &virtualEvent); event.setStatus(CL_SUBMITTED); @@ -285,7 +285,7 @@ HWTEST_F(OOQueueHwTest, givenBlockedOutOfOrderCmdQueueAndAsynchronouslyCompleted EXPECT_FALSE(cmdQHw->isQueueBlocked()); //+1 due to dependency between virtual event & new virtual event - //new virtual event is actually responsible for command delivery + // new virtual event is actually responsible for command delivery EXPECT_EQ(virtualEventTaskLevel + 1, cmdQHw->taskLevel); EXPECT_EQ(virtualEventTaskLevel + 1, mockCSR->lastTaskLevelToFlushTask); } diff --git a/opencl/test/unit_test/command_queue/enqueue_barrier_tests.cpp b/opencl/test/unit_test/command_queue/enqueue_barrier_tests.cpp index d5df8cabbb..12ea38dd55 100644 --- a/opencl/test/unit_test/command_queue/enqueue_barrier_tests.cpp +++ b/opencl/test/unit_test/command_queue/enqueue_barrier_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -53,7 +53,7 @@ HWTEST_F(BarrierTest, givenCsrWithHigherLevelThenCommandQueueWhenEnqueueBarrierI EXPECT_EQ(2u, commandStreamReceiver.peekTaskLevel()); EXPECT_EQ(3u, pCmdQ->taskLevel); - //make sure nothing was added to CommandStream or CSR-CommandStream and command queue still uses this stream + // make sure nothing was added to CommandStream or CSR-CommandStream and command queue still uses this stream EXPECT_EQ(used, commandStream.getUsed()); EXPECT_EQ(&commandStream, &pCmdQ->getCS(0)); @@ -294,7 +294,7 @@ HWTEST_F(BarrierTest, givenEmptyCommandStreamAndBlockedBarrierCommandWhenUserEve size_t barrierCmdStreamSize = NEO::EnqueueOperation::getSizeRequiredCS(CL_COMMAND_BARRIER, false, false, *pCmdQ, nullptr, {}); commandStream.getSpace(commandStream.getMaxAvailableSpace() - barrierCmdStreamSize); - //now trigger event + // now trigger event event2.setStatus(CL_COMPLETE); auto commandStreamStart2 = commandStream.getUsed(); diff --git a/opencl/test/unit_test/command_queue/enqueue_copy_image_tests.cpp b/opencl/test/unit_test/command_queue/enqueue_copy_image_tests.cpp index 5ebf2252d3..3a5600f27a 100644 --- a/opencl/test/unit_test/command_queue/enqueue_copy_image_tests.cpp +++ b/opencl/test/unit_test/command_queue/enqueue_copy_image_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -57,7 +57,7 @@ HWCMDTEST_F(IGFX_GEN8_CORE, EnqueueCopyImageTest, WhenCopyingImageThenGpgpuWalke } HWTEST_F(EnqueueCopyImageTest, WhenCopyingImageThenTaskCountIsAlignedWithCsr) { - //this test case assumes IOQ + // this test case assumes IOQ auto &csr = pDevice->getUltCommandStreamReceiver(); csr.taskCount = pCmdQ->taskCount + 100; csr.taskLevel = pCmdQ->taskLevel + 50; diff --git a/opencl/test/unit_test/command_queue/enqueue_kernel_event_tests.cpp b/opencl/test/unit_test/command_queue/enqueue_kernel_event_tests.cpp index a849716586..ea4968c4f0 100644 --- a/opencl/test/unit_test/command_queue/enqueue_kernel_event_tests.cpp +++ b/opencl/test/unit_test/command_queue/enqueue_kernel_event_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -82,7 +82,7 @@ TEST_F(EventTests, WhenWaitingForEventThenPipeControlIsNotInserted) { retVal = Event::waitForEvents(1, &event); EXPECT_EQ(CL_SUCCESS, retVal); - //we expect event is completed + // we expect event is completed TaskCountType taskCountOfEvent = pEvent->peekTaskCount(); EXPECT_LE(taskCountOfEvent, pCmdQ->getHwTag()); // no more tasks after WFE, no need to write PC diff --git a/opencl/test/unit_test/command_queue/enqueue_kernel_mt_tests.cpp b/opencl/test/unit_test/command_queue/enqueue_kernel_mt_tests.cpp index 992beba26f..395af4596d 100644 --- a/opencl/test/unit_test/command_queue/enqueue_kernel_mt_tests.cpp +++ b/opencl/test/unit_test/command_queue/enqueue_kernel_mt_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -34,7 +34,7 @@ HWTEST_F(EnqueueKernelTest, givenCsrInBatchingModeWhenFinishIsCalledThenBatchesS auto threadCount = 4; auto function = [&]() { - //wait until we are signalled + // wait until we are signalled while (!startEnqueueProcess) ; for (int enqueue = 0; enqueue < enqueueCount; enqueue++) { @@ -51,7 +51,7 @@ HWTEST_F(EnqueueKernelTest, givenCsrInBatchingModeWhenFinishIsCalledThenBatchesS startEnqueueProcess = true; - //call a flush while other threads enqueue, we can't drop anything + // call a flush while other threads enqueue, we can't drop anything while (currentTaskCount < enqueueCount * threadCount) { clFlush(pCmdQ); auto locker = mockCsr->obtainUniqueOwnership(); diff --git a/opencl/test/unit_test/command_queue/enqueue_map_buffer_tests.cpp b/opencl/test/unit_test/command_queue/enqueue_map_buffer_tests.cpp index 7c53dce283..06813906ef 100644 --- a/opencl/test/unit_test/command_queue/enqueue_map_buffer_tests.cpp +++ b/opencl/test/unit_test/command_queue/enqueue_map_buffer_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -107,12 +107,12 @@ TEST_F(EnqueueMapBufferTest, GivenCmdqAndValidArgsWhenMappingBufferThenSuccessIs } TEST_F(EnqueueMapBufferTest, GivenChangesInHostBufferWhenMappingBufferThenChangesArePropagatedToDeviceMemory) { - //size not aligned to cacheline size + // size not aligned to cacheline size int bufferSize = 20; void *ptrHost = malloc(bufferSize); char *charHostPtr = static_cast(ptrHost); - //first fill with data + // first fill with data for (int i = 0; i < bufferSize; i++) { charHostPtr[i] = 1; } @@ -141,10 +141,10 @@ TEST_F(EnqueueMapBufferTest, GivenChangesInHostBufferWhenMappingBufferThenChange EXPECT_EQ(CL_SUCCESS, retVal); EXPECT_EQ(ptrResult, charHostPtr) << "Map Buffer should return host_pointer used during creation with CL_MEM_USE_HOST_PTR"; - //check data + // check data for (int i = 0; i < bufferSize; i++) { EXPECT_EQ(charHostPtr[i], 1); - //change the data + // change the data charHostPtr[i] = 2; } @@ -157,7 +157,7 @@ TEST_F(EnqueueMapBufferTest, GivenChangesInHostBufferWhenMappingBufferThenChange nullptr); EXPECT_EQ(CL_SUCCESS, retVal); - //now map again and see if data propagated + // now map again and see if data propagated clEnqueueMapBuffer( pCmdQ, buffer, @@ -170,7 +170,7 @@ TEST_F(EnqueueMapBufferTest, GivenChangesInHostBufferWhenMappingBufferThenChange nullptr, &retVal); - //check data + // check data for (int i = 0; i < bufferSize; i++) { EXPECT_EQ(charHostPtr[i], 2); } @@ -181,13 +181,13 @@ TEST_F(EnqueueMapBufferTest, GivenChangesInHostBufferWhenMappingBufferThenChange } TEST_F(EnqueueMapBufferTest, GivenChangesInHostBufferWithOffsetWhenMappingBufferThenChangesArePropagatedToDeviceMemory) { - //size not aligned to cacheline size + // size not aligned to cacheline size int bufferSize = 20; void *ptrHost = malloc(bufferSize); char *charHostPtr = static_cast(ptrHost); size_t offset = 4; - //first fill with data + // first fill with data for (int i = 0; i < bufferSize; i++) { charHostPtr[i] = 1; } @@ -216,7 +216,7 @@ TEST_F(EnqueueMapBufferTest, GivenChangesInHostBufferWithOffsetWhenMappingBuffer EXPECT_EQ(CL_SUCCESS, retVal); EXPECT_EQ(ptrResult, charHostPtr + offset) << "Map Buffer should return host_pointer used during creation with CL_MEM_USE_HOST_PTR"; - //check data + // check data for (int i = (int)offset; i < (int)(bufferSize - (int)offset); i++) { EXPECT_EQ(charHostPtr[i], 1); } @@ -314,16 +314,16 @@ HWTEST_F(EnqueueMapBufferTest, givenNonBlockingReadOnlyMapBufferOnZeroCopyBuffer EXPECT_NE(nullptr, ptrResult); EXPECT_EQ(CL_SUCCESS, retVal); - //no dc flush required at this point + // no dc flush required at this point EXPECT_EQ(1u, commandStreamReceiver.peekTaskCount()); taskCount = commandStreamReceiver.peekTaskCount(); EXPECT_EQ(1u, taskCount); auto neoEvent = castToObject(mapEventReturned); - //if task count of csr is higher then event task count with proper dc flushing then we are fine + // if task count of csr is higher then event task count with proper dc flushing then we are fine EXPECT_EQ(1u, neoEvent->getCompletionStamp()); - //this can't be completed as task count is not reached yet + // this can't be completed as task count is not reached yet EXPECT_FALSE(neoEvent->updateStatusAndCheckCompletion()); EXPECT_TRUE(CL_COMMAND_MAP_BUFFER == neoEvent->getCommandType()); @@ -333,11 +333,11 @@ HWTEST_F(EnqueueMapBufferTest, givenNonBlockingReadOnlyMapBufferOnZeroCopyBuffer clSetEventCallback(mapEventReturned, CL_COMPLETE, E2Clb::signalEv2, (void *)&callbackCalled); - //wait for events needs to flush DC as event requires this. + // wait for events needs to flush DC as event requires this. retVal = clWaitForEvents(1, &mapEventReturned); EXPECT_EQ(CL_SUCCESS, retVal); - //wait for event do not sent flushTask + // wait for event do not sent flushTask EXPECT_EQ(1u, commandStreamReceiver.peekTaskCount()); EXPECT_EQ(1u, mockCmdQueue.latestTaskCountWaited); @@ -487,7 +487,7 @@ TEST_F(EnqueueMapBufferTest, givenNonBlockingMapBufferAfterL3IsAlreadyFlushedThe auto ndRcompletionStamp = commandStreamReceiver.peekTaskCount(); - //simulate that NDR is done and DC was flushed + // simulate that NDR is done and DC was flushed auto forcedLatestSentDC = ndRcompletionStamp + 1; *pTagMemory = forcedLatestSentDC; @@ -509,14 +509,14 @@ TEST_F(EnqueueMapBufferTest, givenNonBlockingMapBufferAfterL3IsAlreadyFlushedThe EXPECT_EQ(1u, taskCount); auto neoEvent = castToObject(eventReturned); - //if task count of csr is higher then event task count with proper dc flushing then we are fine + // if task count of csr is higher then event task count with proper dc flushing then we are fine EXPECT_EQ(1u, neoEvent->getCompletionStamp()); EXPECT_TRUE(neoEvent->updateStatusAndCheckCompletion()); - //flush task was not called + // flush task was not called EXPECT_EQ(1u, commandStreamReceiver.peekLatestSentTaskCount()); - //wait for events shouldn't call flush task + // wait for events shouldn't call flush task retVal = clWaitForEvents(1, &eventReturned); EXPECT_EQ(CL_SUCCESS, retVal); diff --git a/opencl/test/unit_test/command_queue/enqueue_read_buffer_rect_fixture.h b/opencl/test/unit_test/command_queue/enqueue_read_buffer_rect_fixture.h index cfc29052d1..2e92cff195 100644 --- a/opencl/test/unit_test/command_queue/enqueue_read_buffer_rect_fixture.h +++ b/opencl/test/unit_test/command_queue/enqueue_read_buffer_rect_fixture.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -22,7 +22,7 @@ struct EnqueueReadBufferRectTest : public CommandEnqueueFixture, context.reset(new MockContext(pCmdQ->getDevice().getSpecializedDevice())); BufferDefaults::context = context.get(); - //For 3D + // For 3D hostPtr = ::alignedMalloc(slicePitch * rowPitch, 4096); auto retVal = CL_INVALID_VALUE; @@ -57,7 +57,7 @@ struct EnqueueReadBufferRectTest : public CommandEnqueueFixture, size_t region[] = {50, 50, 1}; auto retVal = pCmdQ->enqueueReadBufferRect( buffer.get(), - blocking, //non-blocking + blocking, // non-blocking bufferOrigin, hostOrigin, region, diff --git a/opencl/test/unit_test/command_queue/enqueue_svm_tests.cpp b/opencl/test/unit_test/command_queue/enqueue_svm_tests.cpp index 7d3a38900e..453af12354 100644 --- a/opencl/test/unit_test/command_queue/enqueue_svm_tests.cpp +++ b/opencl/test/unit_test/command_queue/enqueue_svm_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -1679,7 +1679,7 @@ HWTEST_F(EnqueueSvmTest, whenInternalAllocationsAreMadeResidentThenOnlyNonSvmAll svmManager->makeInternalAllocationsResident(commandStreamReceiver, InternalMemoryType::DEVICE_UNIFIED_MEMORY); - //only unified memory allocation is made resident + // only unified memory allocation is made resident EXPECT_EQ(1u, residentAllocations.size()); EXPECT_EQ(residentAllocations[0]->getGpuAddress(), castToUint64(unifiedMemoryPtr)); @@ -1703,7 +1703,7 @@ HWTEST_F(EnqueueSvmTest, whenInternalAllocationsAreAddedToResidencyContainerThen residencyContainer, InternalMemoryType::DEVICE_UNIFIED_MEMORY); - //only unified memory allocation is added to residency container + // only unified memory allocation is added to residency container EXPECT_EQ(1u, residencyContainer.size()); EXPECT_EQ(residencyContainer[0]->getGpuAddress(), castToUint64(unifiedMemoryPtr)); @@ -1727,7 +1727,7 @@ HWTEST_F(EnqueueSvmTest, whenInternalAllocationIsTriedToBeAddedTwiceToResidencyC residencyContainer, InternalMemoryType::DEVICE_UNIFIED_MEMORY); - //only unified memory allocation is added to residency container + // only unified memory allocation is added to residency container EXPECT_EQ(1u, residencyContainer.size()); EXPECT_EQ(residencyContainer[0]->getGpuAddress(), castToUint64(unifiedMemoryPtr)); diff --git a/opencl/test/unit_test/command_queue/enqueue_thread_tests.cpp b/opencl/test/unit_test/command_queue/enqueue_thread_tests.cpp index 7c167ffde7..62cad8120b 100644 --- a/opencl/test/unit_test/command_queue/enqueue_thread_tests.cpp +++ b/opencl/test/unit_test/command_queue/enqueue_thread_tests.cpp @@ -53,13 +53,13 @@ class CommandStreamReceiverMock : public UltCommandStreamReceiver { ~CommandStreamReceiverMock() override { EXPECT_FALSE(pClDevice->hasOwnership()); if (expectedToFreeCount == (size_t)-1) { - EXPECT_GT(toFree.size(), 0u); //make sure flush was called + EXPECT_GT(toFree.size(), 0u); // make sure flush was called } else { EXPECT_EQ(toFree.size(), expectedToFreeCount); } auto memoryManager = this->getMemoryManager(); - //Now free memory. if CQ/CSR did the same, we will hit double-free + // Now free memory. if CQ/CSR did the same, we will hit double-free for (auto p : toFree) memoryManager->freeGraphicsMemory(p); } diff --git a/opencl/test/unit_test/command_queue/enqueue_write_buffer_rect_fixture.h b/opencl/test/unit_test/command_queue/enqueue_write_buffer_rect_fixture.h index 1bb5ea02f8..6efa19c580 100644 --- a/opencl/test/unit_test/command_queue/enqueue_write_buffer_rect_fixture.h +++ b/opencl/test/unit_test/command_queue/enqueue_write_buffer_rect_fixture.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -23,7 +23,7 @@ struct EnqueueWriteBufferRectTest : public CommandEnqueueFixture, context.reset(new MockContext(pClDevice)); BufferDefaults::context = context.get(); - //For 3D + // For 3D hostPtr = ::alignedMalloc(slicePitch * rowPitch, 4096); auto retVal = CL_INVALID_VALUE; diff --git a/opencl/test/unit_test/command_queue/ooq_task_tests.cpp b/opencl/test/unit_test/command_queue/ooq_task_tests.cpp index 72b2214308..52ab88dc8b 100644 --- a/opencl/test/unit_test/command_queue/ooq_task_tests.cpp +++ b/opencl/test/unit_test/command_queue/ooq_task_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -59,7 +59,7 @@ TYPED_TEST_P(OOQTaskTypedTests, givenNonBlockingCallWhenDoneOnOutOfOrderQueueThe auto blockingCall = isBlockingCall(TypeParam::Traits::cmdType); auto taskLevelClosed = blockingCall ? 1u : 0u; // for blocking commands task level will be closed - //for non blocking calls make sure that resources are added to defer free list instaed of being destructed in place + // for non blocking calls make sure that resources are added to defer free list instaed of being destructed in place if (!blockingCall) { *tagAddress = 0; } @@ -85,7 +85,7 @@ TYPED_TEST_P(OOQTaskTypedTests, givenTaskWhenEnqueuedOnOutOfOrderQueueThenTaskCo auto tagAddress = commandStreamReceiver.getTagAddress(); auto blockingCall = isBlockingCall(TypeParam::Traits::cmdType); - //for non blocking calls make sure that resources are added to defer free list instaed of being destructed in place + // for non blocking calls make sure that resources are added to defer free list instaed of being destructed in place if (!blockingCall) { *tagAddress = 0; } diff --git a/opencl/test/unit_test/command_queue/read_write_buffer_cpu_copy.cpp b/opencl/test/unit_test/command_queue/read_write_buffer_cpu_copy.cpp index e5ed8d4ec4..55b08f572c 100644 --- a/opencl/test/unit_test/command_queue/read_write_buffer_cpu_copy.cpp +++ b/opencl/test/unit_test/command_queue/read_write_buffer_cpu_copy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -270,7 +270,7 @@ TEST(ReadWriteBufferOnCpu, givenNoHostPtrAndAlignedSizeWhenMemoryAllocationIsInN EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getDevice())); EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(reinterpret_cast(0x1000), MemoryConstants::pageSize, device->getDevice())); reinterpret_cast(buffer->getGraphicsAllocation(device->getRootDeviceIndex()))->overrideMemoryPool(MemoryPool::SystemCpuInaccessible); - //read write on CPU is allowed, but not preferred. We can access this memory via Lock. + // read write on CPU is allowed, but not preferred. We can access this memory via Lock. EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getDevice())); EXPECT_FALSE(buffer->isReadWriteOnCpuPreferred(reinterpret_cast(0x1000), MemoryConstants::pageSize, device->getDevice())); } diff --git a/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_2_tests.cpp b/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_2_tests.cpp index 1297de6e10..a2848be9be 100644 --- a/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_2_tests.cpp +++ b/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_2_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -175,7 +175,7 @@ HWTEST_F(CommandStreamReceiverFlushTaskTests, GivenEmptyQueueWhenFinishingThenTa mockCmdQueue.finish(); EXPECT_EQ(0u, commandStreamReceiver.peekLatestSentTaskCount()); mockCmdQueue.finish(); - //nothings sent to the HW, no need to bump tags + // nothings sent to the HW, no need to bump tags EXPECT_EQ(0u, commandStreamReceiver.peekLatestSentTaskCount()); EXPECT_EQ(0u, mockCmdQueue.latestTaskCountWaited); } @@ -270,7 +270,7 @@ HWTEST_F(CommandStreamReceiverFlushTaskTests, GivenDcFlushWhenFinishingThenTaskC EXPECT_EQ(retVal, CL_SUCCESS); EXPECT_EQ(1u, commandStreamReceiver.peekLatestSentTaskCount()); - //cleanup + // cleanup retVal = mockCmdQueue.enqueueUnmapMemObject(buffer, ptr, 0, nullptr, nullptr); EXPECT_EQ(retVal, CL_SUCCESS); @@ -291,7 +291,7 @@ HWCMDTEST_F(IGFX_GEN8_CORE, CommandStreamReceiverFlushTaskTests, givenPowerOfTwo ASSERT_NE(itorCmd, cmdList.end()); auto cmdGpGpuWalker = genCmdCast(*itorCmd); - //execution masks should be all active + // execution masks should be all active EXPECT_EQ(0xffffffffu, cmdGpGpuWalker->getBottomExecutionMask()); EXPECT_EQ(0xffffffffu, cmdGpGpuWalker->getRightExecutionMask()); } @@ -778,7 +778,7 @@ HWCMDTEST_F(IGFX_GEN8_CORE, CommandStreamReceiverFlushTaskTests, givenTwoConsecu } } - //now re-try to see if SBA is not programmed + // now re-try to see if SBA is not programmed scratchSize *= 2; kernel.kernelInfo.setPerThreadScratchSize(scratchSize, 0); @@ -1229,7 +1229,7 @@ HWCMDTEST_F(IGFX_GEN8_CORE, CommandStreamReceiverFlushTaskTests, givenCsrInNonDi EXPECT_TRUE(mockedSubmissionsAggregator->peekCmdBufferList().peekIsEmpty()); - //surfaces are non resident + // surfaces are non resident auto &surfacesForResidency = mockCsr->getResidencyAllocations(); EXPECT_EQ(0u, surfacesForResidency.size()); } @@ -1290,16 +1290,16 @@ HWTEST_F(CommandStreamReceiverFlushTaskTests, givenPageTableManagerPointerWhenCa EXPECT_FALSE(bcsCsr->pageTableManagerInitialized); EXPECT_FALSE(bcsCsr2->pageTableManagerInitialized); - auto blitProperties = BlitProperties::constructPropertiesForCopy(graphicsAllocation, //dstAllocation - graphicsAllocation, //srcAllocation - 0, //dstOffset - 0, //srcOffset - 0, //copySize - 0, //srcRowPitch - 0, //srcSlicePitch - 0, //dstRowPitch - 0, //dstSlicePitch - bcsCsr->getClearColorAllocation() //clearColorAllocation + auto blitProperties = BlitProperties::constructPropertiesForCopy(graphicsAllocation, // dstAllocation + graphicsAllocation, // srcAllocation + 0, // dstOffset + 0, // srcOffset + 0, // copySize + 0, // srcRowPitch + 0, // srcSlicePitch + 0, // dstRowPitch + 0, // dstSlicePitch + bcsCsr->getClearColorAllocation() // clearColorAllocation ); BlitPropertiesContainer container; container.push_back(blitProperties); @@ -1335,16 +1335,16 @@ HWTEST_F(CommandStreamReceiverFlushTaskTests, givenPageTableManagerPointerWhenCa EXPECT_FALSE(bcsCsr->pageTableManagerInitialized); - auto blitProperties = BlitProperties::constructPropertiesForCopy(graphicsAllocation, //dstAllocation - graphicsAllocation, //srcAllocation - 0, //dstOffset - 0, //srcOffset - 0, //copySize - 0, //srcRowPitch - 0, //srcSlicePitch - 0, //dstRowPitch - 0, //dstSlicePitch - bcsCsr->getClearColorAllocation() //clearColorAllocation + auto blitProperties = BlitProperties::constructPropertiesForCopy(graphicsAllocation, // dstAllocation + graphicsAllocation, // srcAllocation + 0, // dstOffset + 0, // srcOffset + 0, // copySize + 0, // srcRowPitch + 0, // srcSlicePitch + 0, // dstRowPitch + 0, // dstSlicePitch + bcsCsr->getClearColorAllocation() // clearColorAllocation ); BlitPropertiesContainer container; container.push_back(blitProperties); @@ -1376,16 +1376,16 @@ HWTEST_F(CommandStreamReceiverFlushTaskTests, givenNullPageTableManagerWhenCallB EXPECT_FALSE(bcsCsr->pageTableManagerInitialized); EXPECT_FALSE(bcsCsr2->pageTableManagerInitialized); - auto blitProperties = BlitProperties::constructPropertiesForCopy(graphicsAllocation, //dstAllocation - graphicsAllocation, //srcAllocation - 0, //dstOffset - 0, //srcOffset - 0, //copySize - 0, //srcRowPitch - 0, //srcSlicePitch - 0, //dstRowPitch - 0, //dstSlicePitch - bcsCsr->getClearColorAllocation() //clearColorAllocation + auto blitProperties = BlitProperties::constructPropertiesForCopy(graphicsAllocation, // dstAllocation + graphicsAllocation, // srcAllocation + 0, // dstOffset + 0, // srcOffset + 0, // copySize + 0, // srcRowPitch + 0, // srcSlicePitch + 0, // dstRowPitch + 0, // dstSlicePitch + bcsCsr->getClearColorAllocation() // clearColorAllocation ); BlitPropertiesContainer container; container.push_back(blitProperties); diff --git a/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_tests_xehp_and_later.cpp b/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_tests_xehp_and_later.cpp index d1e8b50ee8..c0f064b69e 100644 --- a/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_tests_xehp_and_later.cpp +++ b/opencl/test/unit_test/command_stream/command_stream_receiver_flush_task_tests_xehp_and_later.cpp @@ -360,7 +360,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, CommandStreamReceiverFlushTaskXeHPAndLaterTests, gi mockCsr.getCS(1024u); auto &csrCommandStream = mockCsr.commandStream; - //we do level change that will emit PPC, fill all the space so only BB end fits. + // we do level change that will emit PPC, fill all the space so only BB end fits. taskLevel++; auto ppcSize = MemorySynchronizationCommands::getSizeForSingleBarrier(false); auto fillSize = MemoryConstants::cacheLineSize - ppcSize - sizeof(typename FamilyType::MI_BATCH_BUFFER_END); @@ -659,7 +659,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, CommandStreamReceiverFlushTaskXeHPAndLaterTests, gi EXPECT_TRUE(mockedSubmissionsAggregator->peekCmdBufferList().peekIsEmpty()); - //surfaces are non resident + // surfaces are non resident auto &surfacesForResidency = mockCsr->getResidencyAllocations(); EXPECT_EQ(0u, surfacesForResidency.size()); } @@ -705,7 +705,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, CommandStreamReceiverFlushTaskXeHPAndLaterTests, gi EXPECT_FALSE(cmdBufferList.peekIsEmpty()); auto cmdBuffer = cmdBufferList.peekHead(); - //preemption allocation + sip kernel + // preemption allocation + sip kernel size_t csrSurfaceCount = (pDevice->getPreemptionMode() == PreemptionMode::MidThread) ? 2 : 0; csrSurfaceCount -= pDevice->getHardwareInfo().capabilityTable.supportsImages ? 0 : 1; csrSurfaceCount += mockCsr->globalFenceAllocation ? 1 : 0; @@ -714,7 +714,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, CommandStreamReceiverFlushTaskXeHPAndLaterTests, gi EXPECT_EQ(4u + csrSurfaceCount, cmdBuffer->surfaces.size()); - //copy those surfaces + // copy those surfaces std::vector residentSurfaces = cmdBuffer->surfaces; for (auto &graphicsAllocation : residentSurfaces) { diff --git a/opencl/test/unit_test/command_stream/command_stream_receiver_hw_1_tests.cpp b/opencl/test/unit_test/command_stream/command_stream_receiver_hw_1_tests.cpp index f8945bb9c2..12caf89da5 100644 --- a/opencl/test/unit_test/command_stream/command_stream_receiver_hw_1_tests.cpp +++ b/opencl/test/unit_test/command_stream/command_stream_receiver_hw_1_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -1216,19 +1216,19 @@ HWTEST_P(BcsDetaliedTestsWithParams, givenBltSizeWithLeftoverWhenDispatchedThenP auto allocation = buffer->getGraphicsAllocation(pDevice->getRootDeviceIndex()); auto memoryManager = static_cast(pDevice->getMemoryManager()); memoryManager->returnFakeAllocation = true; - auto blitProperties = BlitProperties::constructPropertiesForReadWrite(std::get<1>(GetParam()), //blitDirection - csr, allocation, //commandStreamReceiver - nullptr, //memObjAllocation - hostPtr, //preallocatedHostAllocation - allocation->getGpuAddress(), //memObjGpuVa - 0, //hostAllocGpuVa - hostPtrOffset, //hostPtrOffset - copyOffset, //copyOffset - bltSize, //copySize - dstRowPitch, //hostRowPitch - dstSlicePitch, //hostSlicePitch - srcRowPitch, //gpuRowPitch - srcSlicePitch //gpuSlicePitch + auto blitProperties = BlitProperties::constructPropertiesForReadWrite(std::get<1>(GetParam()), // blitDirection + csr, allocation, // commandStreamReceiver + nullptr, // memObjAllocation + hostPtr, // preallocatedHostAllocation + allocation->getGpuAddress(), // memObjGpuVa + 0, // hostAllocGpuVa + hostPtrOffset, // hostPtrOffset + copyOffset, // copyOffset + bltSize, // copySize + dstRowPitch, // hostRowPitch + dstSlicePitch, // hostSlicePitch + srcRowPitch, // gpuRowPitch + srcSlicePitch // gpuSlicePitch ); memoryManager->returnFakeAllocation = false; @@ -1321,19 +1321,19 @@ HWTEST_P(BcsDetaliedTestsWithParams, givenBltSizeWithLeftoverWhenDispatchedThenP auto memoryManager = static_cast(pDevice->getMemoryManager()); memoryManager->returnFakeAllocation = true; - auto blitProperties = BlitProperties::constructPropertiesForReadWrite(std::get<1>(GetParam()), //blitDirection - csr, allocation, //commandStreamReceiver - nullptr, //memObjAllocation - hostPtr, //preallocatedHostAllocation - allocation->getGpuAddress(), //memObjGpuVa - 0, //hostAllocGpuVa - hostPtrOffset, //hostPtrOffset - copyOffset, //copyOffset - bltSize, //copySize - dstRowPitch, //hostRowPitch - dstSlicePitch, //hostSlicePitch - srcRowPitch, //gpuRowPitch - srcSlicePitch //gpuSlicePitch + auto blitProperties = BlitProperties::constructPropertiesForReadWrite(std::get<1>(GetParam()), // blitDirection + csr, allocation, // commandStreamReceiver + nullptr, // memObjAllocation + hostPtr, // preallocatedHostAllocation + allocation->getGpuAddress(), // memObjGpuVa + 0, // hostAllocGpuVa + hostPtrOffset, // hostPtrOffset + copyOffset, // copyOffset + bltSize, // copySize + dstRowPitch, // hostRowPitch + dstSlicePitch, // hostSlicePitch + srcRowPitch, // gpuRowPitch + srcSlicePitch // gpuSlicePitch ); memoryManager->returnFakeAllocation = false; @@ -1417,16 +1417,16 @@ HWTEST_P(BcsDetaliedTestsWithParams, givenBltSizeWithLeftoverWhenDispatchedThenP size_t buffer2SlicePitch = std::get<0>(GetParam()).srcSlicePitch; auto allocation = buffer1->getGraphicsAllocation(pDevice->getRootDeviceIndex()); - auto blitProperties = BlitProperties::constructPropertiesForCopy(allocation, //dstAllocation - allocation, //srcAllocation - buffer1Offset, //dstOffset - buffer2Offset, //srcOffset - bltSize, //copySize - buffer1RowPitch, //srcRowPitch - buffer1SlicePitch, //srcSlicePitch - buffer2RowPitch, //dstRowPitch - buffer2SlicePitch, //dstSlicePitch - csr.getClearColorAllocation() //clearColorAllocation + auto blitProperties = BlitProperties::constructPropertiesForCopy(allocation, // dstAllocation + allocation, // srcAllocation + buffer1Offset, // dstOffset + buffer2Offset, // srcOffset + bltSize, // copySize + buffer1RowPitch, // srcRowPitch + buffer1SlicePitch, // srcSlicePitch + buffer2RowPitch, // dstRowPitch + buffer2SlicePitch, // dstSlicePitch + csr.getClearColorAllocation() // clearColorAllocation ); flushBcsTask(&csr, blitProperties, true, *pDevice); diff --git a/opencl/test/unit_test/command_stream/command_stream_receiver_hw_3_tests.cpp b/opencl/test/unit_test/command_stream/command_stream_receiver_hw_3_tests.cpp index c25a5efe77..0d1f32eb68 100644 --- a/opencl/test/unit_test/command_stream/command_stream_receiver_hw_3_tests.cpp +++ b/opencl/test/unit_test/command_stream/command_stream_receiver_hw_3_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -25,7 +25,7 @@ TEST_F(ScratchSpaceControllerTest, whenScratchSpaceControllerIsDestroyedThenItRe MockScratchSpaceController scratchSpaceController(pDevice->getRootDeviceIndex(), *pDevice->getExecutionEnvironment(), *pDevice->getGpgpuCommandStreamReceiver().getInternalAllocationStorage()); scratchSpaceController.privateScratchAllocation = pDevice->getExecutionEnvironment()->memoryManager->allocateGraphicsMemoryInPreferredPool(MockAllocationProperties{pDevice->getRootDeviceIndex(), MemoryConstants::pageSize}, nullptr); EXPECT_NE(nullptr, scratchSpaceController.privateScratchAllocation); - //no memory leak is expected + // no memory leak is expected } HWTEST_F(BcsTests, given3dImageWhenBlitBufferIsCalledThenBlitCmdIsFoundZtimes) { diff --git a/opencl/test/unit_test/command_stream/experimental_command_buffer_tests.cpp b/opencl/test/unit_test/command_stream/experimental_command_buffer_tests.cpp index 566560cb37..39c50fd526 100644 --- a/opencl/test/unit_test/command_stream/experimental_command_buffer_tests.cpp +++ b/opencl/test/unit_test/command_stream/experimental_command_buffer_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -94,7 +94,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe GenCmdList::iterator end = hwParserExCmdBuffer.cmdList.end(); if (MemorySynchronizationCommands::isBarrierWaRequired(pDevice->getHardwareInfo())) { - //1st PIPE_CONTROL with CS Stall + // 1st PIPE_CONTROL with CS Stall ASSERT_NE(end, it); pipeControl = genCmdCast(*it); ASSERT_NE(nullptr, pipeControl); @@ -105,7 +105,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe } } - //2nd PIPE_CONTROL with ts addr + // 2nd PIPE_CONTROL with ts addr uint64_t timeStampAddress = mockExCmdBuffer->timestamps->getGpuAddress(); ASSERT_NE(end, it); pipeControl = genCmdCast(*it); @@ -118,7 +118,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe it++; } - //MI_SEMAPHORE_WAIT + // MI_SEMAPHORE_WAIT it++; ASSERT_NE(end, it); semaphoreCmd = genCmdCast(*it); @@ -128,7 +128,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe EXPECT_EQ(MI_SEMAPHORE_WAIT::COMPARE_OPERATION_SAD_EQUAL_SDD, semaphoreCmd->getCompareOperation()); if (MemorySynchronizationCommands::isBarrierWaRequired(pDevice->getHardwareInfo())) { - //3rd PIPE_CONTROL with CS stall + // 3rd PIPE_CONTROL with CS stall it++; ASSERT_NE(end, it); pipeControl = genCmdCast(*it); @@ -139,7 +139,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe } } - //4th PIPE_CONTROL with ts addr + // 4th PIPE_CONTROL with ts addr timeStampAddress = mockExCmdBuffer->timestamps->getGpuAddress() + sizeof(uint64_t); it++; ASSERT_NE(end, it); @@ -153,7 +153,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe it++; } - //BB_END + // BB_END it++; ASSERT_NE(end, it); bbEnd = genCmdCast(*it); @@ -205,7 +205,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe flushTask(commandStreamReceiver); - //two pairs of TS + // two pairs of TS constexpr uint32_t expectedTsOffset = 4 * sizeof(uint64_t); EXPECT_EQ(expectedTsOffset, mockExCmdBuffer->timestampsOffset); constexpr uint32_t expectedExOffset = 0; @@ -236,7 +236,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe } } - //2nd PIPE_CONTROL + // 2nd PIPE_CONTROL uint64_t timeStampAddress = mockExCmdBuffer->timestamps->getGpuAddress() + 2 * sizeof(uint64_t); ASSERT_NE(end, it); pipeControl = genCmdCast(*it); @@ -244,7 +244,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe EXPECT_EQ(1u, pipeControl->getCommandStreamerStallEnable()); EXPECT_EQ(PIPE_CONTROL::POST_SYNC_OPERATION_WRITE_TIMESTAMP, pipeControl->getPostSyncOperation()); EXPECT_EQ(timeStampAddress, NEO::UnitTestHelper::getPipeControlPostSyncAddress(*pipeControl)); - //omit SEMAPHORE_WAIT and 3rd PIPE_CONTROL + // omit SEMAPHORE_WAIT and 3rd PIPE_CONTROL if (MemorySynchronizationCommands::isBarrierWaRequired(pDevice->getHardwareInfo())) { it++; if (UnitTestHelper::isAdditionalSynchronizationRequired()) { @@ -253,7 +253,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe } it++; - //get 4th PIPE_CONTROL + // get 4th PIPE_CONTROL timeStampAddress = mockExCmdBuffer->timestamps->getGpuAddress() + 3 * sizeof(uint64_t); it++; if (UnitTestHelper::isAdditionalSynchronizationRequired()) { @@ -273,7 +273,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe commandStreamReceiver.storeMakeResidentAllocations = true; MemoryManager *memoryManager = commandStreamReceiver.getMemoryManager(); - //Make two allocations, since CSR will try to reuse it also + // Make two allocations, since CSR will try to reuse it also auto rootDeviceIndex = pDevice->getRootDeviceIndex(); auto allocation = memoryManager->allocateGraphicsMemoryWithProperties({rootDeviceIndex, 3 * MemoryConstants::pageSize64k, AllocationType::COMMAND_BUFFER, pDevice->getDeviceBitfield()}); storage->storeAllocation(std::unique_ptr(allocation), REUSABLE_ALLOCATION); @@ -306,7 +306,7 @@ HWTEST_F(MockExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhe ASSERT_NE(nullptr, mockExCmdBuffer->currentStream->getGraphicsAllocation()); uintptr_t oldCmdBufferAddress = reinterpret_cast(mockExCmdBuffer->currentStream->getGraphicsAllocation()); uint64_t oldExCmdBufferGpuAddr = mockExCmdBuffer->currentStream->getGraphicsAllocation()->getGpuAddress(); - //leave space for single DWORD + // leave space for single DWORD mockExCmdBuffer->currentStream->getSpace(mockExCmdBuffer->currentStream->getAvailableSpace() - sizeof(uint32_t)); HardwareParse hwParserCsr; @@ -352,7 +352,7 @@ HWTEST_F(ExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhenCom auto &commandStreamReceiver = pDevice->getUltCommandStreamReceiver(); flushTask(commandStreamReceiver); - //forced dtor to get printed timestamps + // forced dtor to get printed timestamps testing::internal::CaptureStdout(); commandStreamReceiver.setExperimentalCmdBuffer(std::move(std::unique_ptr(nullptr))); std::string output = testing::internal::GetCapturedStdout(); @@ -362,7 +362,7 @@ HWTEST_F(ExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhenCom HWTEST_F(ExperimentalCommandBufferTest, givenEnabledExperimentalCmdBufferWhenCommandStreamReceiverIsNotFlushedThenExpectNoPrintAfterDtor) { auto &commandStreamReceiver = pDevice->getUltCommandStreamReceiver(); - //forced dtor to try to get printed timestamps + // forced dtor to try to get printed timestamps testing::internal::CaptureStdout(); commandStreamReceiver.setExperimentalCmdBuffer(std::move(std::unique_ptr(nullptr))); std::string output = testing::internal::GetCapturedStdout(); diff --git a/opencl/test/unit_test/command_stream/submissions_aggregator_tests.cpp b/opencl/test/unit_test/command_stream/submissions_aggregator_tests.cpp index 87586fdd53..0c58542d80 100644 --- a/opencl/test/unit_test/command_stream/submissions_aggregator_tests.cpp +++ b/opencl/test/unit_test/command_stream/submissions_aggregator_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -42,7 +42,7 @@ TEST(SubmissionsAggregator, givenCommandBufferWhenItIsPassedToSubmissionsAggrega EXPECT_EQ(cmdBuffer, submissionsAggregator.peekCommandBuffersList().peekHead()); EXPECT_EQ(cmdBuffer, submissionsAggregator.peekCommandBuffersList().peekTail()); EXPECT_EQ(cmdBuffer->surfaces.size(), 0u); - //idlist holds the ownership + // idlist holds the ownership } TEST(SubmissionsAggregator, givenTwoCommandBuffersWhenMergeResourcesIsCalledThenDuplicatesAreEliminated) { @@ -96,7 +96,7 @@ TEST(SubmissionsAggregator, givenTwoCommandBuffersWhenMergeResourcesIsCalledThen submissionsAggregator.aggregateCommandBuffers(resourcePackage, totalUsedSize, totalMemoryBudget, 0u); - //command buffer 2 is aggregated to command buffer 1 + // command buffer 2 is aggregated to command buffer 1 auto primaryBatchInstepctionId = submissionsAggregator.peekCommandBuffersList().peekHead()->inspectionId; EXPECT_EQ(primaryBatchInstepctionId, submissionsAggregator.peekCommandBuffersList().peekHead()->next->inspectionId); EXPECT_EQ(submissionsAggregator.peekCommandBuffersList().peekHead(), cmdBuffer); @@ -157,7 +157,7 @@ TEST(SubmissionsAggregator, givenSubmissionAggregatorWhenThreeCommandBuffersAreS submissionsAggregator.aggregateCommandBuffers(resourcePackage, totalUsedSize, totalMemoryBudget, 0u); - //command buffer 3 and 2 is aggregated to command buffer 1 + // command buffer 3 and 2 is aggregated to command buffer 1 auto primaryBatchInstepctionId = submissionsAggregator.peekCommandBuffersList().peekHead()->inspectionId; EXPECT_EQ(primaryBatchInstepctionId, submissionsAggregator.peekCommandBuffersList().peekHead()->next->inspectionId); @@ -184,20 +184,20 @@ TEST(SubmissionsAggregator, givenMultipleCommandBuffersWhenTheyAreAggreagateWith MockGraphicsAllocation alloc6(nullptr, 6); MockGraphicsAllocation alloc7(nullptr, 7); - //14 bytes consumed + // 14 bytes consumed cmdBuffer->surfaces.push_back(&alloc5); cmdBuffer->surfaces.push_back(&alloc6); cmdBuffer->surfaces.push_back(&alloc5); cmdBuffer->surfaces.push_back(&alloc3); cmdBuffer->surfaces.push_back(&alloc6); - //12 bytes total , only 7 new + // 12 bytes total , only 7 new cmdBuffer2->surfaces.push_back(&alloc1); cmdBuffer2->surfaces.push_back(&alloc2); cmdBuffer2->surfaces.push_back(&alloc5); cmdBuffer2->surfaces.push_back(&alloc4); - //12 bytes total, only 7 new + // 12 bytes total, only 7 new cmdBuffer3->surfaces.push_back(&alloc7); cmdBuffer3->surfaces.push_back(&alloc5); @@ -211,7 +211,7 @@ TEST(SubmissionsAggregator, givenMultipleCommandBuffersWhenTheyAreAggreagateWith submissionsAggregator.aggregateCommandBuffers(resourcePackage, totalUsedSize, totalMemoryBudget, 0u); - //command buffer 2 is aggregated to command buffer 1, comand buffer 3 becomes command buffer 2 + // command buffer 2 is aggregated to command buffer 1, comand buffer 3 becomes command buffer 2 EXPECT_EQ(submissionsAggregator.peekCommandBuffersList().peekHead(), cmdBuffer); EXPECT_EQ(submissionsAggregator.peekCommandBuffersList().peekTail(), cmdBuffer3); EXPECT_EQ(cmdBuffer->next, cmdBuffer2); @@ -240,20 +240,20 @@ TEST(SubmissionsAggregator, givenMultipleCommandBuffersWhenAggregateIsCalledMult MockGraphicsAllocation alloc6(nullptr, 6); MockGraphicsAllocation alloc7(nullptr, 7); - //14 bytes consumed + // 14 bytes consumed cmdBuffer->surfaces.push_back(&alloc5); cmdBuffer->surfaces.push_back(&alloc6); cmdBuffer->surfaces.push_back(&alloc5); cmdBuffer->surfaces.push_back(&alloc3); cmdBuffer->surfaces.push_back(&alloc6); - //12 bytes total , only 7 new + // 12 bytes total , only 7 new cmdBuffer2->surfaces.push_back(&alloc1); cmdBuffer2->surfaces.push_back(&alloc2); cmdBuffer2->surfaces.push_back(&alloc5); cmdBuffer2->surfaces.push_back(&alloc4); - //12 bytes total, only 7 new + // 12 bytes total, only 7 new cmdBuffer3->surfaces.push_back(&alloc7); cmdBuffer3->surfaces.push_back(&alloc5); @@ -267,18 +267,18 @@ TEST(SubmissionsAggregator, givenMultipleCommandBuffersWhenAggregateIsCalledMult submissionsAggregator.aggregateCommandBuffers(resourcePackage, totalUsedSize, totalMemoryBudget, 0u); - //command buffers not aggregated due to too low limit + // command buffers not aggregated due to too low limit EXPECT_EQ(submissionsAggregator.peekCommandBuffersList().peekHead(), cmdBuffer); EXPECT_EQ(cmdBuffer->next, cmdBuffer2); EXPECT_EQ(submissionsAggregator.peekCommandBuffersList().peekTail(), cmdBuffer3); - //budget is now larger we can fit everything + // budget is now larger we can fit everything totalMemoryBudget = 28; resourcePackage.clear(); totalUsedSize = 0; submissionsAggregator.aggregateCommandBuffers(resourcePackage, totalUsedSize, totalMemoryBudget, 0u); - //all cmd buffers are merged to 1 + // all cmd buffers are merged to 1 EXPECT_EQ(cmdBuffer3->inspectionId, cmdBuffer2->inspectionId); EXPECT_EQ(cmdBuffer->inspectionId, cmdBuffer2->inspectionId); @@ -300,10 +300,10 @@ TEST(SubmissionsAggregator, givenMultipleCommandBuffersWithDifferentGraphicsAllo MockGraphicsAllocation alloc5(nullptr, 5); MockGraphicsAllocation alloc7(nullptr, 7); - //5 bytes consumed + // 5 bytes consumed cmdBuffer->surfaces.push_back(&alloc5); - //10 bytes total + // 10 bytes total cmdBuffer2->surfaces.push_back(&alloc1); cmdBuffer2->surfaces.push_back(&alloc2); cmdBuffer2->batchBuffer.commandBufferAllocation = &alloc7; @@ -336,7 +336,7 @@ TEST(SubmissionsAggregator, givenTwoCommandBufferWhereSecondContainsFirstOnResou cmdBuffer->batchBuffer.commandBufferAllocation = &cmdBufferAllocation1; cmdBuffer2->batchBuffer.commandBufferAllocation = &cmdBufferAllocation2; - //cmdBuffer2 has commandBufferAllocation on the surface list + // cmdBuffer2 has commandBufferAllocation on the surface list cmdBuffer2->surfaces.push_back(&cmdBufferAllocation1); cmdBuffer2->surfaces.push_back(&alloc7); @@ -351,7 +351,7 @@ TEST(SubmissionsAggregator, givenTwoCommandBufferWhereSecondContainsFirstOnResou submissionsAggregator.aggregateCommandBuffers(resourcePackage, totalUsedSize, totalMemoryBudget, 0u); - //resource pack shuold have 3 surfaces + // resource pack shuold have 3 surfaces EXPECT_EQ(3u, resourcePackage.size()); EXPECT_EQ(14u, totalUsedSize); } @@ -370,7 +370,7 @@ TEST(SubmissionsAggregator, givenTwoCommandBufferWhereSecondContainsTheFirstComm cmdBuffer->batchBuffer.commandBufferAllocation = &cmdBufferAllocation1; cmdBuffer2->batchBuffer.commandBufferAllocation = &cmdBufferAllocation1; - //cmdBuffer2 has commandBufferAllocation on the surface list + // cmdBuffer2 has commandBufferAllocation on the surface list cmdBuffer2->surfaces.push_back(&alloc7); cmdBuffer->surfaces.push_back(&alloc5); @@ -383,7 +383,7 @@ TEST(SubmissionsAggregator, givenTwoCommandBufferWhereSecondContainsTheFirstComm submissionsAggregator.aggregateCommandBuffers(resourcePackage, totalUsedSize, totalMemoryBudget, 0u); - //resource pack shuold have 3 surfaces + // resource pack shuold have 3 surfaces EXPECT_EQ(2u, resourcePackage.size()); EXPECT_EQ(12u, totalUsedSize); } diff --git a/opencl/test/unit_test/context/context_negative_tests.cpp b/opencl/test/unit_test/context/context_negative_tests.cpp index 638a1ed1e8..e0cb29b4dc 100644 --- a/opencl/test/unit_test/context/context_negative_tests.cpp +++ b/opencl/test/unit_test/context/context_negative_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -26,7 +26,7 @@ typedef Test ContextFailureInjection; TEST_F(ContextFailureInjection, GivenFailedAllocationInjectionWhenCreatingContextThenOutOfHostMemoryErrorIsReturned) { DebugManagerStateRestore restorer; - DebugManager.flags.ExperimentalSmallBufferPoolAllocator.set(0); //failing to allocate pool buffer is non-critical + DebugManager.flags.ExperimentalSmallBufferPoolAllocator.set(0); // failing to allocate pool buffer is non-critical auto device = std::make_unique(MockDevice::createWithNewExecutionEnvironment(nullptr)); cl_device_id deviceID = device.get(); diff --git a/opencl/test/unit_test/context/context_tests.cpp b/opencl/test/unit_test/context/context_tests.cpp index 7739a14c9c..515e81d935 100644 --- a/opencl/test/unit_test/context/context_tests.cpp +++ b/opencl/test/unit_test/context/context_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -188,7 +188,7 @@ TEST_F(ContextTest, givenSpecialCmdQueueWithContextWhenBeingCreatedNextAutoDelet context.overrideSpecialQueueAndDecrementRefCount(cmdQ, 0u); EXPECT_EQ(1, context.getRefInternalCount()); - //special queue is to be deleted implicitly by context + // special queue is to be deleted implicitly by context } TEST_F(ContextTest, givenSpecialCmdQueueWithContextWhenBeingCreatedNextDeletedThenContextRefCountShouldNeitherBeIncrementedNorNextDecremented) { diff --git a/opencl/test/unit_test/event/async_events_handler_tests.cpp b/opencl/test/unit_test/event/async_events_handler_tests.cpp index 9cea668c1b..5da8d5a50b 100644 --- a/opencl/test/unit_test/event/async_events_handler_tests.cpp +++ b/opencl/test/unit_test/event/async_events_handler_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -31,7 +31,7 @@ class AsyncEventsHandlerTests : public ::testing::Test { handler.reset(new MockHandler()); } int getExecutionStatus() { - //return execution status without updating + // return execution status without updating return executionStatus.load(); } void setTaskStamp(TaskCountType taskLevel, TaskCountType taskCount) { diff --git a/opencl/test/unit_test/event/user_events_tests.cpp b/opencl/test/unit_test/event/user_events_tests.cpp index b747200bfa..dc7c8e628a 100644 --- a/opencl/test/unit_test/event/user_events_tests.cpp +++ b/opencl/test/unit_test/event/user_events_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -168,24 +168,24 @@ TEST_F(MockEventTests, GivenBlockedUserEventWhenEnqueueingNdRangeWithoutReturnEv auto &csr = pCmdQ->getGpgpuCommandStreamReceiver(); auto taskCount = csr.peekTaskCount(); - //call NDR + // call NDR auto retVal = callOneWorkItemNDRKernel(eventWaitList, 1); auto taskCountAfter = csr.peekTaskCount(); - //queue should be in blocked state at this moment, task level should be inherited from user event + // queue should be in blocked state at this moment, task level should be inherited from user event EXPECT_EQ(CompletionStamp::notReady, pCmdQ->taskLevel); - //queue should be in blocked state at this moment, task count should be inherited from user event + // queue should be in blocked state at this moment, task count should be inherited from user event EXPECT_EQ(CompletionStamp::notReady, pCmdQ->taskCount); - //queue should be in blocked state + // queue should be in blocked state EXPECT_EQ(pCmdQ->isQueueBlocked(), true); - //and virtual event should be created + // and virtual event should be created ASSERT_NE(nullptr, pCmdQ->virtualEvent); - //check if kernel was in fact not submitted + // check if kernel was in fact not submitted EXPECT_EQ(taskCountAfter, taskCount); EXPECT_EQ(CL_SUCCESS, retVal); @@ -201,30 +201,30 @@ TEST_F(MockEventTests, GivenBlockedUserEventWhenEnqueueingNdRangeWithReturnEvent auto &csr = pCmdQ->getGpgpuCommandStreamReceiver(); auto taskCount = csr.peekTaskCount(); - //call NDR + // call NDR auto retVal = callOneWorkItemNDRKernel(eventWaitList, 1, &retEvent); auto taskCountAfter = csr.peekTaskCount(); - //queue should be in blocked state at this moment, task level should be inherited from user event + // queue should be in blocked state at this moment, task level should be inherited from user event EXPECT_EQ(CompletionStamp::notReady, pCmdQ->taskLevel); - //queue should be in blocked state at this moment, task count should be inherited from user event + // queue should be in blocked state at this moment, task count should be inherited from user event EXPECT_EQ(CompletionStamp::notReady, pCmdQ->taskCount); - //queue should be in blocked state + // queue should be in blocked state EXPECT_EQ(pCmdQ->isQueueBlocked(), true); - //and virtual event should be created + // and virtual event should be created ASSERT_NE(nullptr, pCmdQ->virtualEvent); - //that matches the retEvent + // that matches the retEvent EXPECT_EQ(retEvent, pCmdQ->virtualEvent); - //check if kernel was in fact not submitted + // check if kernel was in fact not submitted EXPECT_EQ(taskCountAfter, taskCount); - //and if normal event inherited status from user event + // and if normal event inherited status from user event Event *returnEvent = castToObject(retEvent); EXPECT_EQ(returnEvent->taskLevel, CompletionStamp::notReady); @@ -242,18 +242,18 @@ TEST_F(MockEventTests, WhenAddingChildEventThenConnectionIsCreatedAndCountOnRetu cl_event *eventWaitList = &userEvent; - //call NDR + // call NDR callOneWorkItemNDRKernel(eventWaitList, 1, &retEvent); - //check if dependency count is increased + // check if dependency count is increased Event *returnEvent = castToObject(retEvent); EXPECT_EQ(1U, returnEvent->peekNumEventsBlockingThis()); - //check if user event knows his childs + // check if user event knows his childs EXPECT_TRUE(uEvent->peekHasChildEvents()); - //make sure that proper event is set as child + // make sure that proper event is set as child Event *childEvent = pCmdQ->virtualEvent; EXPECT_EQ(childEvent, uEvent->peekChildEvents()->ref); @@ -281,31 +281,31 @@ TEST_F(MockEventTests, WhenAddingTwoChildEventsThenConnectionIsCreatedAndCountOn cl_event eventWaitList[] = {uEvent.get(), uEvent2.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); - //check if dependency count is increased + // check if dependency count is increased Event *returnEvent = castToObject(retEvent); ASSERT_EQ(2U, returnEvent->peekNumEventsBlockingThis()); - //check if user event knows his childs + // check if user event knows his childs EXPECT_TRUE(uEvent->peekHasChildEvents()); - //check if user event knows his childs + // check if user event knows his childs EXPECT_TRUE(uEvent2->peekHasChildEvents()); - //make sure that proper event is set as child + // make sure that proper event is set as child Event *childEvent = pCmdQ->virtualEvent; EXPECT_EQ(childEvent, uEvent->peekChildEvents()->ref); EXPECT_FALSE(childEvent->isReadyForSubmission()); - //make sure that proper event is set as child + // make sure that proper event is set as child EXPECT_EQ(childEvent, uEvent2->peekChildEvents()->ref); - //signal one user event, child event after this operation isn't ready for submission + // signal one user event, child event after this operation isn't ready for submission uEvent->setStatus(0); - //check if user event knows his children + // check if user event knows his children EXPECT_FALSE(uEvent->peekHasChildEvents()); EXPECT_EQ(1U, returnEvent->peekNumEventsBlockingThis()); EXPECT_FALSE(returnEvent->isReadyForSubmission()); @@ -323,49 +323,49 @@ TEST_F(MockEventTests, GivenTwoUserEvenstWhenCountOnNdr1IsInjectedThenItIsPropag cl_event eventWaitList[] = {uEvent.get(), uEvent2.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR, no return Event + // call NDR, no return Event auto retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, nullptr); EXPECT_EQ(CL_SUCCESS, retVal); - //check if dependency count is increased + // check if dependency count is increased Event *returnEvent1 = castToObject(pCmdQ->virtualEvent); ASSERT_EQ(2U, returnEvent1->peekNumEventsBlockingThis()); - //check if user event knows his childs + // check if user event knows his childs EXPECT_TRUE(uEvent->peekHasChildEvents()); - //check if user event knows his childs + // check if user event knows his childs EXPECT_TRUE(uEvent2->peekHasChildEvents()); - //make sure that proper event is set as child + // make sure that proper event is set as child Event *childEvent = pCmdQ->virtualEvent; EXPECT_EQ(childEvent, uEvent->peekChildEvents()->ref); - //make sure that proper event is set as child + // make sure that proper event is set as child EXPECT_EQ(childEvent, uEvent2->peekChildEvents()->ref); - //call NDR, no events, Virtual Event mustn't leak and will be bind to previous Virtual Event + // call NDR, no events, Virtual Event mustn't leak and will be bind to previous Virtual Event retVal = callOneWorkItemNDRKernel(); EXPECT_EQ(CL_SUCCESS, retVal); - //queue must be in blocked state + // queue must be in blocked state EXPECT_EQ(pCmdQ->isQueueBlocked(), true); - //check if virtual event2 is a child of virtual event 1 + // check if virtual event2 is a child of virtual event 1 VirtualEvent *returnEvent2 = castToObject(pCmdQ->virtualEvent); ASSERT_TRUE(returnEvent1->peekHasChildEvents()); EXPECT_EQ(returnEvent2, returnEvent1->peekChildEvents()->ref); - //now signal both parents and see if all childs are notified + // now signal both parents and see if all childs are notified uEvent->setStatus(CL_COMPLETE); uEvent2->setStatus(CL_COMPLETE); - //queue shoud be in unblocked state + // queue shoud be in unblocked state EXPECT_EQ(pCmdQ->isQueueBlocked(), false); - //finish returns immidieatly + // finish returns immidieatly retVal = clFinish(pCmdQ); EXPECT_EQ(CL_SUCCESS, retVal); } @@ -393,7 +393,7 @@ TEST_F(MockEventTests, GivenUserEventSignalingWhenFinishThenExecutionIsNotBlocke cl_event eventWaitList[] = {uEvent.get(), uEvent2.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR, no return Event + // call NDR, no return Event auto retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, nullptr); EXPECT_EQ(CL_SUCCESS, retVal); @@ -410,13 +410,13 @@ TEST_F(MockEventTests, WhenCompletingUserEventThenStatusPropagatedToNormalEvent) cl_event eventWaitList[] = {uEvent.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); - //set user event status + // set user event status uEvent->setStatus(CL_COMPLETE); - //wait for returned event + // wait for returned event auto retVal = clWaitForEvents(1, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); @@ -437,7 +437,7 @@ HWTEST_F(EventTests, WhenSignalingThenUserEventObtainsProperTaskLevel) { csr.taskLevel = 2; csr.taskCount = 5; uEvent.setStatus(CL_COMPLETE); - //even though csr taskLevel has changed, user event taskLevel should remain constant + // even though csr taskLevel has changed, user event taskLevel should remain constant EXPECT_EQ(0u, uEvent.taskLevel); } @@ -450,19 +450,19 @@ TEST_F(MockEventTests, GivenUserEventWhenSettingStatusCompleteThenTaskLevelIsUpd cl_event eventWaitList[] = {uEvent.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); - //check if dependency count is increased + // check if dependency count is increased Event *returnEvent = castToObject(retEvent); EXPECT_EQ(CompletionStamp::notReady, returnEvent->taskLevel); EXPECT_EQ(CompletionStamp::notReady, returnEvent->peekTaskCount()); - //now set user event for complete status, this triggers update of childs. + // now set user event for complete status, this triggers update of childs. uEvent->setStatus(CL_COMPLETE); - //child event should have the same taskLevel as parentEvent, as parent event is top of the tree and doesn't have any commands. + // child event should have the same taskLevel as parentEvent, as parent event is top of the tree and doesn't have any commands. EXPECT_EQ(returnEvent->taskLevel, taskLevel); EXPECT_EQ(csr.peekTaskCount(), returnEvent->peekTaskCount()); @@ -477,15 +477,15 @@ TEST_F(MockEventTests, GivenCompleteParentWhenWaitingForEventsThenChildrenAreCom cl_event eventWaitList[] = {uEvent.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); - //check if dependency count is increased + // check if dependency count is increased Event *returnEvent = castToObject(retEvent); EXPECT_EQ(CompletionStamp::notReady, returnEvent->taskLevel); - //now set user event for complete status, this triggers update of childs. + // now set user event for complete status, this triggers update of childs. uEvent->setStatus(CL_COMPLETE); retVal = clWaitForEvents(1, &retEvent); @@ -500,7 +500,7 @@ TEST_F(EventTests, WhenStatusIsAbortedWhenWaitingForEventsThenErrorIsReturned) { cl_event eventWaitList[] = {&uEvent}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //negative values indicate abortion + // negative values indicate abortion uEvent.setStatus(-1); retVal = clWaitForEvents(sizeOfWaitList, eventWaitList); @@ -517,11 +517,11 @@ TEST_F(MockEventTests, GivenAbortedUserEventWhenEnqueingNdrThenDoNotFlushToCsr) auto &csr = pCmdQ->getGpgpuCommandStreamReceiver(); auto taskCount = csr.peekTaskCount(); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); - //negative values indicate abortion + // negative values indicate abortion uEvent->setStatus(-1); auto taskCountAfter = csr.peekTaskCount(); @@ -576,15 +576,15 @@ TEST_F(MockEventTests, GivenAbortedParentWhenDestroyingChildEventThenDoNotProces auto &csr = pCmdQ->getGpgpuCommandStreamReceiver(); auto taskCount = csr.peekTaskCount(); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); - //call second NDR to create Virtual Event + // call second NDR to create Virtual Event retVal = callOneWorkItemNDRKernel(&retEvent, 1, nullptr); EXPECT_EQ(CL_SUCCESS, retVal); - //negative values indicate abortion + // negative values indicate abortion uEvent->setStatus(-1); auto taskCountAfter = csr.peekTaskCount(); @@ -613,11 +613,11 @@ TEST_F(MockEventTests, GivenAbortedUserEventWhenWaitingForEventThenErrorIsReturn int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); cl_event retEvent = nullptr; - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); - //negative values indicate abortion + // negative values indicate abortion uEvent->setStatus(-1); eventWaitList[0] = retEvent; @@ -636,11 +636,11 @@ TEST_F(MockEventTests, GivenAbortedUserEventAndTwoInputsWhenWaitingForEventThenE int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); cl_event retEvent = nullptr; - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); - //negative values indicate abortion + // negative values indicate abortion uEvent->setStatus(-1); eventWaitList[0] = retEvent; @@ -662,14 +662,14 @@ TEST_F(MockEventTests, GivenAbortedQueueWhenFinishingThenSuccessIsReturned) { cl_event eventWaitList[] = {uEvent.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList); EXPECT_EQ(CL_SUCCESS, retVal); - //negative values indicate abortion + // negative values indicate abortion uEvent->setStatus(-1); - //make sure we didn't asked CSR for task level for this event, as it is aborted + // make sure we didn't asked CSR for task level for this event, as it is aborted EXPECT_NE(taskLevel, uEvent->taskLevel); retVal = clFinish(pCmdQ); @@ -681,10 +681,10 @@ TEST_F(MockEventTests, GivenUserEventWhenEnqueingThenDependantPacketIsRegistered cl_event eventWaitList[] = {uEvent.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList); - //virtual event should register for this command packet + // virtual event should register for this command packet ASSERT_NE(nullptr, pCmdQ->virtualEvent); EXPECT_NE(nullptr, pCmdQ->virtualEvent->peekCommand()); EXPECT_FALSE(pCmdQ->virtualEvent->peekIsCmdSubmitted()); @@ -695,10 +695,10 @@ TEST_F(MockEventTests, GivenUserEventWhenEnqueingThenCommandPacketContainsValidC cl_event eventWaitList[] = {uEvent.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList); - //virtual event should register for this command packet + // virtual event should register for this command packet ASSERT_NE(nullptr, pCmdQ->virtualEvent); auto cmd = static_cast(pCmdQ->virtualEvent->peekCommand()); EXPECT_NE(0u, cmd->getCommandStream()->getUsed()); @@ -712,14 +712,14 @@ TEST_F(MockEventTests, WhenStatusIsSetThenBlockedPacketsAreSent) { int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList); EXPECT_EQ(CL_SUCCESS, retVal); - //task level untouched as queue blocked by user event + // task level untouched as queue blocked by user event EXPECT_EQ(csr.peekTaskLevel(), 0u); - //virtual event have stored command packet + // virtual event have stored command packet Event *childEvent = pCmdQ->virtualEvent; EXPECT_NE(nullptr, childEvent); EXPECT_NE(nullptr, childEvent->peekCommand()); @@ -727,7 +727,7 @@ TEST_F(MockEventTests, WhenStatusIsSetThenBlockedPacketsAreSent) { EXPECT_NE(nullptr, childEvent->peekCommand()); - //signal the input user event + // signal the input user event uEvent->setStatus(0); EXPECT_EQ(csr.peekTaskLevel(), 1u); @@ -739,19 +739,19 @@ TEST_F(MockEventTests, WhenFinishingThenVirtualEventIsNullAndReleaseEventReturns int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); cl_event retEvent; - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); uEvent->setStatus(0); - //call finish multiple times + // call finish multiple times retVal |= clFinish(pCmdQ); retVal |= clFinish(pCmdQ); retVal |= clFinish(pCmdQ); EXPECT_EQ(CL_SUCCESS, retVal); - //Virtual Event is gone, but retEvent still lives. + // Virtual Event is gone, but retEvent still lives. EXPECT_EQ(nullptr, pCmdQ->virtualEvent); retVal = clReleaseEvent(retEvent); EXPECT_EQ(CL_SUCCESS, retVal); @@ -766,7 +766,7 @@ TEST_F(MockEventTests, givenBlockedQueueThenCommandStreamDoesNotChangeWhileEnque auto &cs = pCmdQ->getCS(1024); auto used = cs.getSpace(0); - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); @@ -778,7 +778,7 @@ TEST_F(MockEventTests, givenBlockedQueueThenCommandStreamDoesNotChangeWhileEnque auto used3 = cs.getSpace(0); - //call finish multiple times + // call finish multiple times retVal |= clFinish(pCmdQ); EXPECT_EQ(CL_SUCCESS, retVal); @@ -814,7 +814,7 @@ TEST_F(EventTests, givenUserEventThatHasCallbackAndBlockQueueWhenQueueIsQueriedF int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); cl_event retEvent; - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); ASSERT_EQ(retVal, CL_SUCCESS); @@ -850,7 +850,7 @@ TEST_F(EventTests, GivenEventCallbackWithWaitWhenWaitingForEventsThenSuccessIsRe }; cl_event retEvent; - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(nullptr, 0, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); @@ -881,7 +881,7 @@ TEST_F(EventTests, GivenEventCallbackWithoutWaitWhenWaitingForEventsThenSuccessI }; cl_event retEvent; - //call NDR + // call NDR retVal = callOneWorkItemNDRKernel(nullptr, 0, &retEvent); EXPECT_EQ(CL_SUCCESS, retVal); diff --git a/opencl/test/unit_test/helpers/base_object_tests.cpp b/opencl/test/unit_test/helpers/base_object_tests.cpp index ce65fbe406..79400ce9f3 100644 --- a/opencl/test/unit_test/helpers/base_object_tests.cpp +++ b/opencl/test/unit_test/helpers/base_object_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -94,11 +94,11 @@ class MockObject : public MockObjectBase { typedef ::testing::Types< MockPlatform, IntelAccelerator, - //Context, - //Program, - //Kernel, - //Sampler - //others... + // Context, + // Program, + // Kernel, + // Sampler + // others... MockCommandQueue> BaseObjectTypes; @@ -224,7 +224,7 @@ TYPED_TEST(BaseObjectTests, WhenCastingToDispatchTableThenEntriesAreCorrect) { TEST(BaseObjectTests, WhenSettingSharedContextFlagThenItIsSetCorrectly) { MockContext newContext; - //cast to cl_context + // cast to cl_context cl_context clContext = &newContext; EXPECT_FALSE(newContext.isSharedContext); diff --git a/opencl/test/unit_test/helpers/dispatch_info_builder_tests.cpp b/opencl/test/unit_test/helpers/dispatch_info_builder_tests.cpp index 85cf4e60a1..fcd1525765 100644 --- a/opencl/test/unit_test/helpers/dispatch_info_builder_tests.cpp +++ b/opencl/test/unit_test/helpers/dispatch_info_builder_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -297,7 +297,7 @@ TEST_F(DispatchInfoBuilderTest, GivenSplitWhenCheckingIfBuiltinThenReturnTrue) { EXPECT_TRUE(dispatchInfo.getKernel()->isBuiltIn); } - //2D + // 2D diBuilder2D->setKernel(RegionCoordX::Left, RegionCoordY::Bottom, pKernel); diBuilder2D->setDispatchGeometry(RegionCoordX::Left, RegionCoordY::Bottom, Vec3(256, 256, 0), Vec3(16, 16, 0), Vec3(0, 0, 0)); MultiDispatchInfo mdi2D; @@ -307,7 +307,7 @@ TEST_F(DispatchInfoBuilderTest, GivenSplitWhenCheckingIfBuiltinThenReturnTrue) { EXPECT_TRUE(dispatchInfo.getKernel()->isBuiltIn); } - //3D + // 3D diBuilder3D->setKernel(RegionCoordX::Right, RegionCoordY::Bottom, RegionCoordZ::Back, pKernel); diBuilder3D->setDispatchGeometry(RegionCoordX::Right, RegionCoordY::Bottom, RegionCoordZ::Back, Vec3(256, 256, 256), Vec3(16, 16, 16), Vec3(0, 0, 0)); MultiDispatchInfo mdi3D; @@ -499,7 +499,7 @@ TEST_F(DispatchInfoBuilderTest, GivenSplitWhenGettingWalkerInfoThenCorrectValues dispatchId++; } - //2D + // 2D diBuilder2D->setKernel(pKernel); diBuilder2D->setDispatchGeometry(Vec3(256, 256, 0), Vec3(15, 15, 0), Vec3(0, 0, 0)); MultiDispatchInfo mdi2D; @@ -582,7 +582,7 @@ TEST_F(DispatchInfoBuilderTest, GivenSplitWhenGettingWalkerInfoThenCorrectValues dispatchId++; } - //3D + // 3D diBuilder3D->setKernel(pKernel); diBuilder3D->setDispatchGeometry(Vec3(256, 256, 256), Vec3(15, 15, 15), Vec3(0, 0, 0)); MultiDispatchInfo mdi3D; @@ -902,7 +902,7 @@ TEST_F(DispatchInfoBuilderTest, GivenSplitWhenSettingKernelArgThenAddressesAreCo builder1D.bake(mdi2D); builder1D.bake(mdi3D); - //Set arg + // Set arg clearCrossThreadData(); builder1D.setArg(SplitDispatch::RegionCoordX::Left, static_cast(0), sizeof(cl_mem *), pVal); for (auto &dispatchInfo : mdi1D) { @@ -919,7 +919,7 @@ TEST_F(DispatchInfoBuilderTest, GivenSplitWhenSettingKernelArgThenAddressesAreCo EXPECT_EQ(buffer->getCpuAddress(), *reinterpret_cast((dispatchInfo.getKernel()->getCrossThreadData() + 0x10))); } - //Set arg SVM + // Set arg SVM clearCrossThreadData(); builder1D.setArgSvm(SplitDispatch::RegionCoordX::Left, 1, sizeof(svmPtr), svmPtr, nullptr, 0u); for (auto &dispatchInfo : mdi1D) { diff --git a/opencl/test/unit_test/helpers/hardware_commands_helper_tests.cpp b/opencl/test/unit_test/helpers/hardware_commands_helper_tests.cpp index 3b96c7c63e..eeaac811ef 100644 --- a/opencl/test/unit_test/helpers/hardware_commands_helper_tests.cpp +++ b/opencl/test/unit_test/helpers/hardware_commands_helper_tests.cpp @@ -753,7 +753,7 @@ HWCMDTEST_F(IGFX_GEN8_CORE, HardwareCommandsTest, WhenGettingBindingTableStateTh program.setGlobalSurface(nullptr); program.setConstantSurface(nullptr); - //exhaust space to trigger reload + // exhaust space to trigger reload ssh.getSpace(ssh.getAvailableSpace()); dsh.getSpace(dsh.getAvailableSpace()); } @@ -866,7 +866,7 @@ HWCMDTEST_F(IGFX_GEN8_CORE, HardwareCommandsTest, GivenKernelWithInvalidSamplerS auto isCcsUsed = EngineHelpers::isCcs(cmdQ.getGpgpuEngine().osContext->getEngineType()); auto kernelUsesLocalIds = HardwareCommandsHelper::kernelUsesLocalIds(*mockKernelWithInternal->mockKernel); - //Undefined Offset, Defined BorderColorOffset + // Undefined Offset, Defined BorderColorOffset mockKernelWithInternal->kernelInfo.setSamplerTable(0, 2, undefined); HardwareCommandsHelper::sendIndirectState( @@ -891,7 +891,7 @@ HWCMDTEST_F(IGFX_GEN8_CORE, HardwareCommandsTest, GivenKernelWithInvalidSamplerS EXPECT_EQ(0U, interfaceDescriptor->getSamplerStatePointer()); EXPECT_EQ(0U, interfaceDescriptor->getSamplerCount()); - //Defined Offset, Undefined BorderColorOffset + // Defined Offset, Undefined BorderColorOffset mockKernelWithInternal->kernelInfo.setSamplerTable(undefined, 2, 0); HardwareCommandsHelper::sendIndirectState( diff --git a/opencl/test/unit_test/helpers/kmd_notify_tests.cpp b/opencl/test/unit_test/helpers/kmd_notify_tests.cpp index 8fcc6dce7f..25e710f94e 100644 --- a/opencl/test/unit_test/helpers/kmd_notify_tests.cpp +++ b/opencl/test/unit_test/helpers/kmd_notify_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -154,7 +154,7 @@ HWTEST_F(KmdNotifyTests, givenNotReadyTaskCountWhenWaitUntilCompletionCalledThen csr->waitForCompletionWithTimeoutResult = WaitStatus::NotReady; - //we have unrecoverable for this case, this will throw. + // we have unrecoverable for this case, this will throw. EXPECT_THROW(cmdQ->waitUntilComplete(taskCountToWait, {}, flushStampToWait, false), std::exception); EXPECT_EQ(1u, csr->waitForFlushStampCalled); EXPECT_EQ(flushStampToWait, csr->waitForFlushStampParamsPassed[0].flushStampToWait); diff --git a/opencl/test/unit_test/linux/main_linux_dll.cpp b/opencl/test/unit_test/linux/main_linux_dll.cpp index 04104cc5b2..2aa04a2a05 100644 --- a/opencl/test/unit_test/linux/main_linux_dll.cpp +++ b/opencl/test/unit_test/linux/main_linux_dll.cpp @@ -598,7 +598,7 @@ TEST_F(DrmTests, GivenFailOnParamBoostWhenCreatingDrmThenDrmIsCreated) { failOnParamBoost = -1; auto drm = DrmWrap::createDrm(*rootDeviceEnvironment); - //non-fatal error - issue warning only + // non-fatal error - issue warning only EXPECT_NE(drm, nullptr); } diff --git a/opencl/test/unit_test/main.cpp b/opencl/test/unit_test/main.cpp index a7cefdf908..3dad4b98aa 100644 --- a/opencl/test/unit_test/main.cpp +++ b/opencl/test/unit_test/main.cpp @@ -90,16 +90,16 @@ void applyWorkarounds() { ss >> val; } - //intialize rand + // intialize rand srand(static_cast(time(nullptr))); - //Create at least on thread to prevent false memory leaks in tests using threads + // Create at least on thread to prevent false memory leaks in tests using threads std::thread t([&]() { }); tempThreadID = t.get_id(); t.join(); - //Create FileLogger to prevent false memory leaks + // Create FileLogger to prevent false memory leaks { NEO::fileLoggerInstance(); } @@ -190,7 +190,7 @@ int main(int argc, char **argv) { dumpTestStats = true; ++i; dumpTestStatsFileName = std::string(argv[i]); - } else if (!strcmp("--disable_pagefaulting_tests", argv[i])) { //disable tests which raise page fault signal during execution + } else if (!strcmp("--disable_pagefaulting_tests", argv[i])) { // disable tests which raise page fault signal during execution NEO::PagaFaultManagerTestConfig::disabled = true; } else if (!strcmp("--tbx", argv[i])) { if (testMode == TestMode::AubTests) { diff --git a/opencl/test/unit_test/mem_obj/buffer_set_arg_tests.cpp b/opencl/test/unit_test/mem_obj/buffer_set_arg_tests.cpp index 9f04e4149c..6678af9f38 100644 --- a/opencl/test/unit_test/mem_obj/buffer_set_arg_tests.cpp +++ b/opencl/test/unit_test/mem_obj/buffer_set_arg_tests.cpp @@ -217,7 +217,7 @@ TEST_F(BufferSetArgTest, givenCurbeTokenThatSizeIs4BytesWhenStatelessArgIsPatche auto pKernelArg = (void **)(pKernel->getCrossThreadData() + pKernelInfo->argAsPtr(0).stateless); - //fill 8 bytes with 0xffffffffffffffff; + // fill 8 bytes with 0xffffffffffffffff; uint64_t fillValue = -1; uint64_t *pointer64bytes = (uint64_t *)pKernelArg; *pointer64bytes = fillValue; @@ -227,7 +227,7 @@ TEST_F(BufferSetArgTest, givenCurbeTokenThatSizeIs4BytesWhenStatelessArgIsPatche buffer->setArgStateless(pKernelArg, sizeOf4Bytes, pClDevice->getRootDeviceIndex(), false); - //make sure only 4 bytes are patched + // make sure only 4 bytes are patched auto bufferAddress = buffer->getGraphicsAllocation(pClDevice->getRootDeviceIndex())->getGpuAddress(); uint32_t address32bits = static_cast(bufferAddress); uint64_t curbeValue = *pointer64bytes; diff --git a/opencl/test/unit_test/mem_obj/image_array_size_tests.cpp b/opencl/test/unit_test/mem_obj/image_array_size_tests.cpp index d50d12dc05..6b69822c37 100644 --- a/opencl/test/unit_test/mem_obj/image_array_size_tests.cpp +++ b/opencl/test/unit_test/mem_obj/image_array_size_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -232,7 +232,7 @@ TEST_P(CheckImageType, GivenImageTypeWhenImageTypeIsCheckedThenProperValueIsRetu } static cl_mem_object_type AllImageTypes[] = { - 0, //negative scenario + 0, // negative scenario CL_MEM_OBJECT_IMAGE1D, CL_MEM_OBJECT_IMAGE1D_BUFFER, CL_MEM_OBJECT_IMAGE2D, @@ -246,7 +246,7 @@ INSTANTIATE_TEST_CASE_P( testing::ValuesIn(AllImageTypes)); static cl_mem_object_type AllImageTypesWithBadOne[] = { - 0, //negative scenario + 0, // negative scenario CL_MEM_OBJECT_BUFFER, CL_MEM_OBJECT_IMAGE1D, CL_MEM_OBJECT_IMAGE1D_BUFFER, diff --git a/opencl/test/unit_test/mem_obj/image_set_arg_tests.cpp b/opencl/test/unit_test/mem_obj/image_set_arg_tests.cpp index ac3f685f7b..7842beb015 100644 --- a/opencl/test/unit_test/mem_obj/image_set_arg_tests.cpp +++ b/opencl/test/unit_test/mem_obj/image_set_arg_tests.cpp @@ -842,7 +842,7 @@ HWTEST_F(ImageSetArgTest, GivenImageWithClLuminanceFormatWhenSettingKernelArgThe auto surfaceState = reinterpret_cast( ptrOffset(pKernel->getSurfaceStateHeap(), pKernelInfo->argAsImg(0).bindful)); - //for CL_LUMINANCE format we override channels to RED to be spec compliant. + // for CL_LUMINANCE format we override channels to RED to be spec compliant. EXPECT_EQ(RENDER_SURFACE_STATE::SHADER_CHANNEL_SELECT_RED, surfaceState->getShaderChannelSelectRed()); EXPECT_EQ(RENDER_SURFACE_STATE::SHADER_CHANNEL_SELECT_RED, surfaceState->getShaderChannelSelectGreen()); EXPECT_EQ(RENDER_SURFACE_STATE::SHADER_CHANNEL_SELECT_RED, surfaceState->getShaderChannelSelectBlue()); diff --git a/opencl/test/unit_test/mem_obj/image_tests.cpp b/opencl/test/unit_test/mem_obj/image_tests.cpp index 1b179b384f..abbaba3b37 100644 --- a/opencl/test/unit_test/mem_obj/image_tests.cpp +++ b/opencl/test/unit_test/mem_obj/image_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -36,7 +36,7 @@ using namespace NEO; static const unsigned int testImageDimensions = 45; auto channelType = CL_UNORM_INT8; auto channelOrder = CL_RGBA; -auto const elementSize = 4; //sizeof CL_RGBA * CL_UNORM_INT8 +auto const elementSize = 4; // sizeof CL_RGBA * CL_UNORM_INT8 class CreateImageTest : public ClDeviceFixture, public testing::TestWithParam, @@ -579,7 +579,7 @@ TEST(TestCreateImageUseHostPtr, GivenDifferenHostPtrAlignmentsWhenCheckingMemory imageDesc.image_height = height; imageDesc.image_depth = 0; imageDesc.image_array_size = 0; - imageDesc.image_row_pitch = alignUp(alignUp(width, 4) * 4, 0x80); //row pitch for tiled img + imageDesc.image_row_pitch = alignUp(alignUp(width, 4) * 4, 0x80); // row pitch for tiled img imageDesc.image_slice_pitch = 0; void *pageAlignedPointer = alignedMalloc(imageDesc.image_row_pitch * height * 1 * 4 + 256, 4096); @@ -765,7 +765,7 @@ TEST_P(CreateImageHostPtr, WhenCheckingAddressThenAlllocationDependsOnSizeRelati } if (flags & CL_MEM_USE_HOST_PTR) { - //if size fits within a page then zero copy can be applied, if not RT needs to do a copy of image + // if size fits within a page then zero copy can be applied, if not RT needs to do a copy of image auto computedSize = imageDesc.image_width * elementSize * alignUp(imageDesc.image_height, 4) * imageDesc.image_array_size; auto ptrSize = imageDesc.image_width * elementSize * imageDesc.image_height * imageDesc.image_array_size; auto alignedRequiredSize = alignSizeWholePage(static_cast(pHostPtr), computedSize); diff --git a/opencl/test/unit_test/mem_obj/map_operations_handler_tests.cpp b/opencl/test/unit_test/mem_obj/map_operations_handler_tests.cpp index 8aa062ac9c..539e2b2b85 100644 --- a/opencl/test/unit_test/mem_obj/map_operations_handler_tests.cpp +++ b/opencl/test/unit_test/mem_obj/map_operations_handler_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -132,14 +132,14 @@ TEST_F(MapOperationsHandlerTests, givenReadOnlyOverlappingPtrWhenAddingThenRetur const std::tuple overlappingCombinations[] = { // mappedPtrStart, mappedPtrLength, requestPtrStart, requestPtrLength, expectOverlap - std::make_tuple((void *)5000, 50, (void *)4000, 1, false), //requested before, non-overlapping - std::make_tuple((void *)5000, 50, (void *)4999, 10, true), //requested before, overlapping inside - std::make_tuple((void *)5000, 50, (void *)4999, 100, true), //requested before, overlapping outside - std::make_tuple((void *)5000, 50, (void *)5001, 1, true), //requested inside, overlapping inside - std::make_tuple((void *)5000, 50, (void *)5001, 100, true), //requested inside, overlapping outside - std::make_tuple((void *)5000, 50, (void *)6000, 1, false), //requested after, non-overlapping - std::make_tuple((void *)5000, 50, (void *)5000, 1, true), //requested on start, overlapping inside - std::make_tuple((void *)5000, 50, (void *)5000, 100, true), //requested on start, overlapping outside + std::make_tuple((void *)5000, 50, (void *)4000, 1, false), // requested before, non-overlapping + std::make_tuple((void *)5000, 50, (void *)4999, 10, true), // requested before, overlapping inside + std::make_tuple((void *)5000, 50, (void *)4999, 100, true), // requested before, overlapping outside + std::make_tuple((void *)5000, 50, (void *)5001, 1, true), // requested inside, overlapping inside + std::make_tuple((void *)5000, 50, (void *)5001, 100, true), // requested inside, overlapping outside + std::make_tuple((void *)5000, 50, (void *)6000, 1, false), // requested after, non-overlapping + std::make_tuple((void *)5000, 50, (void *)5000, 1, true), // requested on start, overlapping inside + std::make_tuple((void *)5000, 50, (void *)5000, 100, true), // requested on start, overlapping outside }; struct MapOperationsHandlerOverlapTests : public ::testing::WithParamInterface>, diff --git a/opencl/test/unit_test/mem_obj/pipe_tests.cpp b/opencl/test/unit_test/mem_obj/pipe_tests.cpp index 6396339168..eb7f9b445d 100644 --- a/opencl/test/unit_test/mem_obj/pipe_tests.cpp +++ b/opencl/test/unit_test/mem_obj/pipe_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -17,7 +17,7 @@ using namespace NEO; -//Tests for pipes +// Tests for pipes class PipeTest : public ::testing::Test, public MemoryManagementFixture { public: diff --git a/opencl/test/unit_test/mem_obj/zero_copy_tests.cpp b/opencl/test/unit_test/mem_obj/zero_copy_tests.cpp index d4cb3798b3..9db92168d5 100644 --- a/opencl/test/unit_test/mem_obj/zero_copy_tests.cpp +++ b/opencl/test/unit_test/mem_obj/zero_copy_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -71,7 +71,7 @@ std::tuple Inputs[] = {std::make_tup TEST_P(ZeroCopyBufferTest, GivenCacheAlignedPointerWhenCreatingBufferThenZeroCopy) { char *passedPtr = (char *)host_ptr; - //misalign the pointer + // misalign the pointer if (MisalignPointer && passedPtr) { passedPtr += 1; } @@ -90,7 +90,7 @@ TEST_P(ZeroCopyBufferTest, GivenCacheAlignedPointerWhenCreatingBufferThenZeroCop EXPECT_NE(nullptr, buffer->getCpuAddress()); - //check if buffer always have properly aligned storage ( PAGE ) + // check if buffer always have properly aligned storage ( PAGE ) EXPECT_EQ(alignUp(buffer->getCpuAddress(), MemoryConstants::cacheLineSize), buffer->getCpuAddress()); delete buffer; diff --git a/opencl/test/unit_test/mock_gl/windows/mock_opengl32.cpp b/opencl/test/unit_test/mock_gl/windows/mock_opengl32.cpp index 7c9fba8202..964844a4ed 100644 --- a/opencl/test/unit_test/mock_gl/windows/mock_opengl32.cpp +++ b/opencl/test/unit_test/mock_gl/windows/mock_opengl32.cpp @@ -1,11 +1,11 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * */ -#define _GDI32_ //It causes that definitions of functions are not loaded from dll in file wingdi.h because they are in this file. +#define _GDI32_ // It causes that definitions of functions are not loaded from dll in file wingdi.h because they are in this file. #include "opencl/test/unit_test/helpers/windows/mock_function.h" #include "opencl/test/unit_test/mocks/gl/windows/mock_gl_sharing_windows.h" diff --git a/opencl/test/unit_test/mt_tests/api/cl_create_user_event_tests_mt.inl b/opencl/test/unit_test/mt_tests/api/cl_create_user_event_tests_mt.inl index 07897d158e..08dbf283b0 100644 --- a/opencl/test/unit_test/mt_tests/api/cl_create_user_event_tests_mt.inl +++ b/opencl/test/unit_test/mt_tests/api/cl_create_user_event_tests_mt.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -29,16 +29,16 @@ TEST_F(clCreateUserEventMtTests, GivenClCompleteEventWhenWaitingForEventThenWait waitForEventsCompleted = true; }); - //wait for the thread to start + // wait for the thread to start while (!threadStarted) ; - //now wait a while. + // now wait a while. while (!waitForEventsCompleted && counter++ < deadline) ; ASSERT_EQ(waitForEventsCompleted, false) << "WaitForEvents returned while user event is not signaled!"; - //set event to CL_COMPLETE + // set event to CL_COMPLETE retVal = clSetUserEventStatus(userEvent, CL_COMPLETE); t.join(); diff --git a/opencl/test/unit_test/mt_tests/event/user_events_tests_mt.cpp b/opencl/test/unit_test/mt_tests/event/user_events_tests_mt.cpp index 09c64a25c3..50df8800ec 100644 --- a/opencl/test/unit_test/mt_tests/event/user_events_tests_mt.cpp +++ b/opencl/test/unit_test/mt_tests/event/user_events_tests_mt.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -21,7 +21,7 @@ TEST_F(MockEventTests, GivenEventCreatedFromUserEventsThatIsNotSignaledThenDoNot cl_event eventWaitList[] = {uEvent.get()}; int sizeOfWaitList = sizeof(eventWaitList) / sizeof(cl_event); - //call NDR + // call NDR auto retVal = callOneWorkItemNDRKernel(eventWaitList, sizeOfWaitList, &retEvent); auto &csr = pCmdQ->getGpgpuCommandStreamReceiver(); @@ -36,14 +36,14 @@ TEST_F(MockEventTests, GivenEventCreatedFromUserEventsThatIsNotSignaledThenDoNot std::thread t([&]() { threadStarted = true; - //call WaitForEvents + // call WaitForEvents clWaitForEvents(1, &retEvent); waitForEventsCompleted = true; }); - //wait for the thread to start + // wait for the thread to start while (!threadStarted) ; - //now wait a while. + // now wait a while. while (!waitForEventsCompleted && counter++ < deadline) ; @@ -53,7 +53,7 @@ TEST_F(MockEventTests, GivenEventCreatedFromUserEventsThatIsNotSignaledThenDoNot EXPECT_EQ(taskLevelBeforeWaitForEvents, csr.peekTaskLevel()); - //set event to CL_COMPLETE + // set event to CL_COMPLETE uEvent->setStatus(CL_COMPLETE); t.join(); diff --git a/opencl/test/unit_test/os_interface/linux/drm_gem_close_worker_tests.cpp b/opencl/test/unit_test/os_interface/linux/drm_gem_close_worker_tests.cpp index e65353cffb..4f403b2db6 100644 --- a/opencl/test/unit_test/os_interface/linux/drm_gem_close_worker_tests.cpp +++ b/opencl/test/unit_test/os_interface/linux/drm_gem_close_worker_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -53,7 +53,7 @@ class DrmMockForWorker : public Drm { class DrmGemCloseWorkerFixture { public: DrmGemCloseWorkerFixture() : executionEnvironment(defaultHwInfo.get()){}; - //max loop count for while + // max loop count for while static const uint32_t deadCntInit = 10 * 1000 * 1000; DrmMemoryManager *mm; @@ -118,13 +118,13 @@ TEST_F(DrmGemCloseWorkerTests, GivenMultipleThreadsWhenClosingGemThenSucceeds) { worker->push(bo); - //wait for worker to complete or deadCnt drops + // wait for worker to complete or deadCnt drops while (!worker->isEmpty() && (deadCnt-- > 0)) - sched_yield(); //yield to another threads + sched_yield(); // yield to another threads worker->close(false); - //and check if GEM was closed + // and check if GEM was closed EXPECT_EQ(1, this->drmMock->gem_close_cnt.load()); delete worker; @@ -139,11 +139,11 @@ TEST_F(DrmGemCloseWorkerTests, GivenMultipleThreadsAndCloseFalseWhenClosingGemTh worker->push(bo); worker->close(false); - //wait for worker to complete or deadCnt drops + // wait for worker to complete or deadCnt drops while (!worker->isEmpty() && (deadCnt-- > 0)) - sched_yield(); //yield to another threads + sched_yield(); // yield to another threads - //and check if GEM was closed + // and check if GEM was closed EXPECT_EQ(1, this->drmMock->gem_close_cnt.load()); delete worker; diff --git a/opencl/test/unit_test/platform/platform_icd_tests.cpp b/opencl/test/unit_test/platform/platform_icd_tests.cpp index 767beee473..027609bd4a 100644 --- a/opencl/test/unit_test/platform/platform_icd_tests.cpp +++ b/opencl/test/unit_test/platform/platform_icd_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -55,7 +55,7 @@ class PlatformTestedSharingBuilderFactory : public SharingBuilderFactory { return "--extension--"; }; void fillGlobalDispatchTable() override { - icdGlobalDispatchTable.clCreateFromGLBuffer = (decltype(icdGlobalDispatchTable.clCreateFromGLBuffer)) & fakeGlF; + icdGlobalDispatchTable.clCreateFromGLBuffer = (decltype(icdGlobalDispatchTable.clCreateFromGLBuffer))&fakeGlF; }; void *getExtensionFunctionAddress(const std::string &functionName) override { return nullptr; diff --git a/opencl/test/unit_test/program/kernel_data.cpp b/opencl/test/unit_test/program/kernel_data.cpp index ac6698f90a..969b8c4200 100644 --- a/opencl/test/unit_test/program/kernel_data.cpp +++ b/opencl/test/unit_test/program/kernel_data.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -348,7 +348,7 @@ TEST_F(KernelDataTest, whenWorkgroupOrderIsSpecifiedViaPatchTokenThenProperWorkG executionEnvironment.Token = PATCH_TOKEN_EXECUTION_ENVIRONMENT; executionEnvironment.Size = sizeof(SPatchExecutionEnvironment); - //dim0 : [0 : 1]; dim1 : [2 : 3]; dim2 : [4 : 5] + // dim0 : [0 : 1]; dim1 : [2 : 3]; dim2 : [4 : 5] executionEnvironment.WorkgroupWalkOrderDims = 1 | (2 << 2); pPatchList = &executionEnvironment; @@ -367,7 +367,7 @@ TEST_F(KernelDataTest, whenWorkgroupOrderIsSpecifiedViaPatchToken2ThenProperWork executionEnvironment.Token = PATCH_TOKEN_EXECUTION_ENVIRONMENT; executionEnvironment.Size = sizeof(SPatchExecutionEnvironment); - //dim0 : [0 : 1]; dim1 : [2 : 3]; dim2 : [4 : 5] + // dim0 : [0 : 1]; dim1 : [2 : 3]; dim2 : [4 : 5] executionEnvironment.WorkgroupWalkOrderDims = 2 | (1 << 4); pPatchList = &executionEnvironment; @@ -1261,7 +1261,7 @@ TEST_F(KernelDataTest, givenRelocationTablePatchTokenThenLinkerInputIsCreated) { token.Token = PATCH_TOKEN_PROGRAM_RELOCATION_TABLE; token.Size = static_cast(sizeof(SPatchFunctionTableInfo)); token.NumEntries = 0; - kernelHeapSize = 0x100; //force creating kernel allocation for ISA + kernelHeapSize = 0x100; // force creating kernel allocation for ISA auto kernelHeapData = std::make_unique(kernelHeapSize); pKernelHeap = kernelHeapData.get(); diff --git a/opencl/test/unit_test/program/process_elf_binary_tests.cpp b/opencl/test/unit_test/program/process_elf_binary_tests.cpp index be568cb8ba..b8e00c09a8 100644 --- a/opencl/test/unit_test/program/process_elf_binary_tests.cpp +++ b/opencl/test/unit_test/program/process_elf_binary_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -66,11 +66,11 @@ TEST_F(ProcessElfBinaryTests, GivenValidBinaryWhenCreatingProgramFromBinaryThenS } TEST_F(ProcessElfBinaryTests, GivenValidSpirBinaryWhenCreatingProgramFromBinaryThenSuccessIsReturned) { - //clCreateProgramWithIL => SPIR-V stored as source code + // clCreateProgramWithIL => SPIR-V stored as source code const uint32_t spirvBinary[2] = {0x03022307, 0x07230203}; size_t spirvBinarySize = sizeof(spirvBinary); - //clCompileProgram => SPIR-V stored as IR binary + // clCompileProgram => SPIR-V stored as IR binary program->isSpirV = true; program->irBinary = makeCopy(spirvBinary, spirvBinarySize); program->irBinarySize = spirvBinarySize; @@ -79,20 +79,20 @@ TEST_F(ProcessElfBinaryTests, GivenValidSpirBinaryWhenCreatingProgramFromBinaryT EXPECT_NE(0u, program->irBinarySize); EXPECT_TRUE(program->getIsSpirV()); - //clGetProgramInfo => SPIR-V stored as ELF binary + // clGetProgramInfo => SPIR-V stored as ELF binary cl_int retVal = program->packDeviceBinary(*device); EXPECT_EQ(CL_SUCCESS, retVal); EXPECT_NE(nullptr, program->buildInfos[rootDeviceIndex].packedDeviceBinary); EXPECT_NE(0u, program->buildInfos[rootDeviceIndex].packedDeviceBinarySize); - //use ELF reader to parse and validate ELF binary + // use ELF reader to parse and validate ELF binary std::string decodeErrors; std::string decodeWarnings; auto elf = NEO::Elf::decodeElf(ArrayRef(reinterpret_cast(program->buildInfos[rootDeviceIndex].packedDeviceBinary.get()), program->buildInfos[rootDeviceIndex].packedDeviceBinarySize), decodeErrors, decodeWarnings); auto header = elf.elfFileHeader; ASSERT_NE(nullptr, header); - //check if ELF binary contains section SECTION_HEADER_TYPE_SPIRV + // check if ELF binary contains section SECTION_HEADER_TYPE_SPIRV bool hasSpirvSection = false; for (const auto &elfSectionHeader : elf.sectionHeaders) { if (elfSectionHeader.header->type == NEO::Elf::SHT_OPENCL_SPIRV) { @@ -102,7 +102,7 @@ TEST_F(ProcessElfBinaryTests, GivenValidSpirBinaryWhenCreatingProgramFromBinaryT } EXPECT_TRUE(hasSpirvSection); - //clCreateProgramWithBinary => new program should recognize SPIR-V binary + // clCreateProgramWithBinary => new program should recognize SPIR-V binary program->isSpirV = false; auto elfBinary = makeCopy(program->buildInfos[rootDeviceIndex].packedDeviceBinary.get(), program->buildInfos[rootDeviceIndex].packedDeviceBinarySize); retVal = program->createProgramFromBinary(elfBinary.get(), program->buildInfos[rootDeviceIndex].packedDeviceBinarySize, *device); diff --git a/opencl/test/unit_test/program/program_data_tests.cpp b/opencl/test/unit_test/program/program_data_tests.cpp index d591975261..947ea46bc1 100644 --- a/opencl/test/unit_test/program/program_data_tests.cpp +++ b/opencl/test/unit_test/program/program_data_tests.cpp @@ -162,7 +162,7 @@ void ProgramDataTestBase::buildAndDecodeProgramPatchList() { pCurPtr += programPatchListSize; auto rootDeviceIndex = pPlatform->getClDevice(0)->getRootDeviceIndex(); - //as we use mock compiler in unit test, replace the genBinary here. + // as we use mock compiler in unit test, replace the genBinary here. pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinary = makeCopy(pProgramData, headerSize + programBinaryHeader.PatchListSize); pProgram->buildInfos[rootDeviceIndex].unpackedDeviceBinarySize = headerSize + programBinaryHeader.PatchListSize; diff --git a/opencl/test/unit_test/sampler/sampler_tests.cpp b/opencl/test/unit_test/sampler/sampler_tests.cpp index 035136d57f..b9093867cd 100644 --- a/opencl/test/unit_test/sampler/sampler_tests.cpp +++ b/opencl/test/unit_test/sampler/sampler_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -63,7 +63,7 @@ TEST_P(CreateSampler, GivenModeWhenSamplerIsCreatedThenParamsAreSetCorrectly) { EXPECT_EQ(addressingMode, sampler->getAddressingMode()); EXPECT_EQ(filterMode, sampler->getFilterMode()); - //check for SnapWA + // check for SnapWA bool snapWaNeeded = addressingMode == CL_ADDRESS_CLAMP && filterMode == CL_FILTER_NEAREST; auto snapWaValue = snapWaNeeded ? iOpenCL::CONSTANT_REGISTER_BOOLEAN_TRUE : iOpenCL::CONSTANT_REGISTER_BOOLEAN_FALSE; EXPECT_EQ(snapWaValue, sampler->getSnapWaValue()); diff --git a/opencl/test/unit_test/scenarios/blocked_enqueue_with_callback_scenario_tests.cpp b/opencl/test/unit_test/scenarios/blocked_enqueue_with_callback_scenario_tests.cpp index 26b24e87c8..348294c6e9 100644 --- a/opencl/test/unit_test/scenarios/blocked_enqueue_with_callback_scenario_tests.cpp +++ b/opencl/test/unit_test/scenarios/blocked_enqueue_with_callback_scenario_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -38,7 +38,7 @@ void CL_CALLBACK callback(cl_event event, cl_int status, void *data) { cl_event callbackEvent = callbackData->signalCallbackDoneEvent; clSetUserEventStatus(callbackEvent, CL_COMPLETE); // No need to reatin and release this synchronization event - //clReleaseEvent(callbackEvent); + // clReleaseEvent(callbackEvent); } } diff --git a/opencl/test/unit_test/sharings/gl/windows/gl_texture_tests.cpp b/opencl/test/unit_test/sharings/gl/windows/gl_texture_tests.cpp index 24ef5c4d89..56f3e5142e 100644 --- a/opencl/test/unit_test/sharings/gl/windows/gl_texture_tests.cpp +++ b/opencl/test/unit_test/sharings/gl/windows/gl_texture_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -103,7 +103,7 @@ TEST_F(GlSharingTextureTests, givenMockGlWhen1dGlTextureIsCreatedThenMemObjectHa EXPECT_EQ(1, glSharing->dllParam->getParam("GLAcquireSharedTextureCalled")); EXPECT_EQ(CL_SUCCESS, retVal); - EXPECT_EQ(textureId, glSharing->dllParam->getTextureInfo().name); //input + EXPECT_EQ(textureId, glSharing->dllParam->getTextureInfo().name); // input auto handler = glTexture->peekSharingHandler(); ASSERT_NE(nullptr, handler); diff --git a/opencl/test/unit_test/xe_hp_core/xehp/test_local_work_size_xehp.inl b/opencl/test/unit_test/xe_hp_core/xehp/test_local_work_size_xehp.inl index ca24a0b006..ac8447d754 100644 --- a/opencl/test/unit_test/xe_hp_core/xehp/test_local_work_size_xehp.inl +++ b/opencl/test/unit_test/xe_hp_core/xehp/test_local_work_size_xehp.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -59,7 +59,7 @@ XEHPTEST_F(XeHPComputeWorkgroupSizeTest, giveXeHpA0WhenKernelIsaIsBelowThreshold } mockKernel.kernelInfo.kernelDescriptor.kernelAttributes.barrierCount = 0u; - //on B0 algorithm is disabled + // on B0 algorithm is disabled hwInfo.platform.usRevId = productHelper.getHwRevIdFromStepping(REVISION_B, hwInfo); { auto expectedLws = computeWorkgroupSize(dispatchInfo); diff --git a/shared/offline_compiler/source/multi_command.cpp b/shared/offline_compiler/source/multi_command.cpp index 886682ecaa..fde18b39de 100644 --- a/shared/offline_compiler/source/multi_command.cpp +++ b/shared/offline_compiler/source/multi_command.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -112,7 +112,7 @@ int MultiCommand::initialize(const std::vector &args) { } } - //save file with builds arguments to vector of strings, line by line + // save file with builds arguments to vector of strings, line by line if (argHelper->fileExists(pathToCommandFile)) { argHelper->readFileToVectorOfStrings(pathToCommandFile, lines); if (lines.empty()) { diff --git a/shared/offline_compiler/source/ocloc_api.cpp b/shared/offline_compiler/source/ocloc_api.cpp index 4f5cd07e71..1fbf27bc4c 100644 --- a/shared/offline_compiler/source/ocloc_api.cpp +++ b/shared/offline_compiler/source/ocloc_api.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -60,12 +60,12 @@ int oclocInvoke(unsigned int numArgs, const char *argv[], int oclocFreeOutput(uint32_t *numOutputs, uint8_t ***dataOutputs, uint64_t **lenOutputs, char ***nameOutputs) { for (uint32_t i = 0; i < *numOutputs; i++) { - delete[](*dataOutputs)[i]; - delete[](*nameOutputs)[i]; + delete[] (*dataOutputs)[i]; + delete[] (*nameOutputs)[i]; } - delete[](*dataOutputs); - delete[](*lenOutputs); - delete[](*nameOutputs); + delete[] (*dataOutputs); + delete[] (*lenOutputs); + delete[] (*nameOutputs); return 0; } diff --git a/shared/source/aub/aub_subcapture.h b/shared/source/aub/aub_subcapture.h index e1830e8392..9f572edd2e 100644 --- a/shared/source/aub/aub_subcapture.h +++ b/shared/source/aub/aub_subcapture.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -20,9 +20,9 @@ class SettingsReader; class AubSubCaptureCommon { public: enum class SubCaptureMode { - Off = 0, //subcapture off - Filter, //subcapture kernel specified by filter (static regkey) - Toggle //toggle subcapture on/off (dynamic regkey) + Off = 0, // subcapture off + Filter, // subcapture kernel specified by filter (static regkey) + Toggle // toggle subcapture on/off (dynamic regkey) } subCaptureMode = SubCaptureMode::Off; struct SubCaptureFilter { diff --git a/shared/source/aub_mem_dump/aub_alloc_dump.inl b/shared/source/aub_mem_dump/aub_alloc_dump.inl index 87b77ac5dd..e374a8ba9a 100644 --- a/shared/source/aub_mem_dump/aub_alloc_dump.inl +++ b/shared/source/aub_mem_dump/aub_alloc_dump.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -168,7 +168,7 @@ void dumpImageInTreFormat(GraphicsAllocation &gfxAllocation, AubMemDump::AubFile using RENDER_SURFACE_STATE = typename GfxFamily::RENDER_SURFACE_STATE; auto gmm = gfxAllocation.getDefaultGmm(); if ((gmm->gmmResourceInfo->getNumSamples() > 1) || (gfxAllocation.isCompressionEnabled())) { - DEBUG_BREAK_IF(true); //unsupported + DEBUG_BREAK_IF(true); // unsupported return; } diff --git a/shared/source/aub_mem_dump/aub_mem_dump.h b/shared/source/aub_mem_dump/aub_mem_dump.h index 754104019d..c5727f2719 100644 --- a/shared/source/aub_mem_dump/aub_mem_dump.h +++ b/shared/source/aub_mem_dump/aub_mem_dump.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -321,12 +321,12 @@ struct LrcaHelper { uint32_t numNoops0 = 3; - uint32_t offsetLRI1 = offsetLRI0 + (1 + numRegsLRI0 * 2 + numNoops0) * sizeof(uint32_t); //offsetLRI == 0x21 * sizeof(uint32_t); + uint32_t offsetLRI1 = offsetLRI0 + (1 + numRegsLRI0 * 2 + numNoops0) * sizeof(uint32_t); // offsetLRI == 0x21 * sizeof(uint32_t); uint32_t numRegsLRI1 = 9; uint32_t numNoops1 = 13; - uint32_t offsetLRI2 = offsetLRI1 + (1 + numRegsLRI1 * 2 + numNoops1) * sizeof(uint32_t); //offsetLR2 == 0x41 * sizeof(uint32_t); + uint32_t offsetLRI2 = offsetLRI1 + (1 + numRegsLRI1 * 2 + numNoops1) * sizeof(uint32_t); // offsetLR2 == 0x41 * sizeof(uint32_t); uint32_t numRegsLRI2 = 1; uint32_t offsetRingRegisters = offsetLRI0 + (3 * sizeof(uint32_t)); diff --git a/shared/source/aub_mem_dump/aub_mem_dump_pvc_and_later.inl b/shared/source/aub_mem_dump/aub_mem_dump_pvc_and_later.inl index dee046b69b..f6e27e2ada 100644 --- a/shared/source/aub_mem_dump/aub_mem_dump_pvc_and_later.inl +++ b/shared/source/aub_mem_dump/aub_mem_dump_pvc_and_later.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -170,7 +170,7 @@ const MMIOList AUBFamilyMapper::globalMMIO = { MMIOPair(0x0000B098, 0x00300010), MMIOPair(0x0000B09C, 0x00300010), - //PAT_INDEX + // PAT_INDEX MMIOPair(0x00004100, 0x0000000), MMIOPair(0x00004104, 0x0000000), MMIOPair(0x00004108, 0x0000000), @@ -180,42 +180,42 @@ const MMIOList AUBFamilyMapper::globalMMIO = { MMIOPair(0x00004118, 0x0000000), MMIOPair(0x0000411c, 0x0000000), - MMIOPair(0x00004b80, 0xffff1001), //GACB_PERF_CTRL_REG - MMIOPair(0x00007000, 0xffff0000), //CACHE_MODE_0 - MMIOPair(0x00007004, 0xffff0000), //CACHE_MODE_1 - MMIOPair(0x000043F8, 0x00000000), //Gen12 (A-step) chicken bit for AuxT granularity - MMIOPair(0x00009008, 0x00000200), //IDICR - MMIOPair(0x0000900c, 0x00001b40), //SNPCR - MMIOPair(0x0000b120, 0x14000002), //LTCDREG - MMIOPair(0x00042080, 0x00000000), //CHICKEN_MISC_1 - MMIOPair(0x000020D4, 0xFFFF0000), //Chicken bit for CSFE - MMIOPair(0x0000B0A0, 0x00000000), //SCRATCH 2 for LNCF unit - MMIOPair(0x000094D4, 0x00000000), //Slice unit Level Clock Gating Control + MMIOPair(0x00004b80, 0xffff1001), // GACB_PERF_CTRL_REG + MMIOPair(0x00007000, 0xffff0000), // CACHE_MODE_0 + MMIOPair(0x00007004, 0xffff0000), // CACHE_MODE_1 + MMIOPair(0x000043F8, 0x00000000), // Gen12 (A-step) chicken bit for AuxT granularity + MMIOPair(0x00009008, 0x00000200), // IDICR + MMIOPair(0x0000900c, 0x00001b40), // SNPCR + MMIOPair(0x0000b120, 0x14000002), // LTCDREG + MMIOPair(0x00042080, 0x00000000), // CHICKEN_MISC_1 + MMIOPair(0x000020D4, 0xFFFF0000), // Chicken bit for CSFE + MMIOPair(0x0000B0A0, 0x00000000), // SCRATCH 2 for LNCF unit + MMIOPair(0x000094D4, 0x00000000), // Slice unit Level Clock Gating Control // Capture Perf MMIO register programming - MMIOPair(0x0000B004, 0x2FC0100B), //KM_ARBITER_CTRL_REG - MMIOPair(0x0000B404, 0x00000160), //KM_GLOBAL_INVALIDATION_REG - MMIOPair(0x00008708, 0x00000000), //KM_GEN12_IDI_CONTROL_REGISTER + MMIOPair(0x0000B004, 0x2FC0100B), // KM_ARBITER_CTRL_REG + MMIOPair(0x0000B404, 0x00000160), // KM_GLOBAL_INVALIDATION_REG + MMIOPair(0x00008708, 0x00000000), // KM_GEN12_IDI_CONTROL_REGISTER // Tiled Resources VA Translation Table L3 Pointer - MMIOPair(0x00004410, 0xffffffff), //GEN12_TRTT_NULL_TILE_REG - MMIOPair(0x00004414, 0xfffffffe), //GEN12_TRTT_INVD_TILE_REG - MMIOPair(0x00004404, 0x000000ff), //GEN12_TRTT_VA_MASKDATA_REG - MMIOPair(0x00004408, 0x00000000), //LDWORD GMM_GEN12_TRTT_L3_POINTER - MMIOPair(0x0000440C, 0x00000000), //UDWORD GMM_GEN12_TRTT_L3_POINTER - MMIOPair(0x00004400, 0x00000001), //GEN12_TRTT_TABLE_CONTROL - MMIOPair(0x00004DFC, 0x00000000), //GEN9_TR_CHICKEN_BIT_VECTOR + MMIOPair(0x00004410, 0xffffffff), // GEN12_TRTT_NULL_TILE_REG + MMIOPair(0x00004414, 0xfffffffe), // GEN12_TRTT_INVD_TILE_REG + MMIOPair(0x00004404, 0x000000ff), // GEN12_TRTT_VA_MASKDATA_REG + MMIOPair(0x00004408, 0x00000000), // LDWORD GMM_GEN12_TRTT_L3_POINTER + MMIOPair(0x0000440C, 0x00000000), // UDWORD GMM_GEN12_TRTT_L3_POINTER + MMIOPair(0x00004400, 0x00000001), // GEN12_TRTT_TABLE_CONTROL + MMIOPair(0x00004DFC, 0x00000000), // GEN9_TR_CHICKEN_BIT_VECTOR }; static const MMIOList mmioListRCS = { - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x00002058), 0x00000000), //CTX_WA_PTR_RCSUNIT - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000020a8), 0x00000000), //IMR - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x00002058), 0x00000000), // CTX_WA_PTR_RCSUNIT + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000020a8), 0x00000000), // IMR + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE - MMIOPair(0x00002090, 0xffff0000), //CHICKEN_PWR_CTX_RASTER_1 - MMIOPair(0x000020e0, 0xffff4000), //FF_SLICE_CS_CHICKEN1_RCSUNIT - MMIOPair(0x000020e4, 0xffff0000), //FF_SLICE_CS_CHICKEN2_RCSUNIT - MMIOPair(0x000020ec, 0xffff0051), //CS_DEBUG_MODE1 + MMIOPair(0x00002090, 0xffff0000), // CHICKEN_PWR_CTX_RASTER_1 + MMIOPair(0x000020e0, 0xffff4000), // FF_SLICE_CS_CHICKEN1_RCSUNIT + MMIOPair(0x000020e4, 0xffff0000), // FF_SLICE_CS_CHICKEN2_RCSUNIT + MMIOPair(0x000020ec, 0xffff0051), // CS_DEBUG_MODE1 // FORCE_TO_NONPRIV MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024d0), 0x00007014), @@ -231,31 +231,31 @@ static const MMIOList mmioListRCS = { MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024f8), 0x0000e000), MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024fc), 0x0000e000), - MMIOPair(0x00002580, 0xffff0005), //CS_CHICKEN1 - MMIOPair(0x0000e194, 0xffff0002), //CHICKEN_SAMPLER_2 + MMIOPair(0x00002580, 0xffff0005), // CS_CHICKEN1 + MMIOPair(0x0000e194, 0xffff0002), // CHICKEN_SAMPLER_2 - MMIOPair(0x0000B134, 0xA0000000) //L3ALLOCREG + MMIOPair(0x0000B134, 0xA0000000) // L3ALLOCREG }; static const MMIOList mmioListBCS = { - MMIOPair(AubMemDump::computeRegisterOffset(bcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(bcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static const MMIOList mmioListVCS = { - MMIOPair(AubMemDump::computeRegisterOffset(vcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(vcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static const MMIOList mmioListVECS = { - MMIOPair(AubMemDump::computeRegisterOffset(vecs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(vecs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static MMIOList mmioListCCSInstance(uint32_t mmioBase) { MMIOList mmioList; - mmioList.push_back(MMIOPair(0x0000ce90, 0x00030003)); //GFX_MULT_CTXT_CTL - enable multi-context with 4CCS - mmioList.push_back(MMIOPair(0x0000b170, 0x00030003)); //MULT_CTXT_CTL - enable multi-context with 4CCS - mmioList.push_back(MMIOPair(0x00014800, 0xFFFF0001)); //RCU_MODE - mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x0000229c), 0xffff8280)); //GFX_MODE + mmioList.push_back(MMIOPair(0x0000ce90, 0x00030003)); // GFX_MULT_CTXT_CTL - enable multi-context with 4CCS + mmioList.push_back(MMIOPair(0x0000b170, 0x00030003)); // MULT_CTXT_CTL - enable multi-context with 4CCS + mmioList.push_back(MMIOPair(0x00014800, 0xFFFF0001)); // RCU_MODE + mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x0000229c), 0xffff8280)); // GFX_MODE // FORCE_TO_NONPRIV mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x000024d0), 0x0000e000)); @@ -271,7 +271,7 @@ static MMIOList mmioListCCSInstance(uint32_t mmioBase) { mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x000024f8), 0x0000e000)); mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x000024fc), 0x0000e000)); - mmioList.push_back(MMIOPair(0x0000B234, 0xA0000000)); //L3ALLOCREG_CCS0 + mmioList.push_back(MMIOPair(0x0000B234, 0xA0000000)); // L3ALLOCREG_CCS0 return mmioList; }; diff --git a/shared/source/aub_mem_dump/aub_mem_dump_xehp_and_later.inl b/shared/source/aub_mem_dump/aub_mem_dump_xehp_and_later.inl index 06a42912a7..a596d0bd70 100644 --- a/shared/source/aub_mem_dump/aub_mem_dump_xehp_and_later.inl +++ b/shared/source/aub_mem_dump/aub_mem_dump_xehp_and_later.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -152,7 +152,7 @@ const MMIOList AUBFamilyMapper::globalMMIO = { MMIOPair(0x0000B098, 0x00300010), MMIOPair(0x0000B09C, 0x00300010), - //PAT_INDEX + // PAT_INDEX MMIOPair(0x00004100, 0x0000000), MMIOPair(0x00004104, 0x0000000), MMIOPair(0x00004108, 0x0000000), @@ -162,42 +162,42 @@ const MMIOList AUBFamilyMapper::globalMMIO = { MMIOPair(0x00004118, 0x0000000), MMIOPair(0x0000411c, 0x0000000), - MMIOPair(0x00004b80, 0xffff1001), //GACB_PERF_CTRL_REG - MMIOPair(0x00007000, 0xffff0000), //CACHE_MODE_0 - MMIOPair(0x00007004, 0xffff0000), //CACHE_MODE_1 - MMIOPair(0x000043F8, 0x00000000), //Gen12 (A-step) chicken bit for AuxT granularity - MMIOPair(0x00009008, 0x00000200), //IDICR - MMIOPair(0x0000900c, 0x00001b40), //SNPCR - MMIOPair(0x0000b120, 0x14000002), //LTCDREG - MMIOPair(0x00042080, 0x00000000), //CHICKEN_MISC_1 - MMIOPair(0x000020D4, 0xFFFF0000), //Chicken bit for CSFE - MMIOPair(0x0000B0A0, 0x00000000), //SCRATCH 2 for LNCF unit - MMIOPair(0x000094D4, 0x00000000), //Slice unit Level Clock Gating Control + MMIOPair(0x00004b80, 0xffff1001), // GACB_PERF_CTRL_REG + MMIOPair(0x00007000, 0xffff0000), // CACHE_MODE_0 + MMIOPair(0x00007004, 0xffff0000), // CACHE_MODE_1 + MMIOPair(0x000043F8, 0x00000000), // Gen12 (A-step) chicken bit for AuxT granularity + MMIOPair(0x00009008, 0x00000200), // IDICR + MMIOPair(0x0000900c, 0x00001b40), // SNPCR + MMIOPair(0x0000b120, 0x14000002), // LTCDREG + MMIOPair(0x00042080, 0x00000000), // CHICKEN_MISC_1 + MMIOPair(0x000020D4, 0xFFFF0000), // Chicken bit for CSFE + MMIOPair(0x0000B0A0, 0x00000000), // SCRATCH 2 for LNCF unit + MMIOPair(0x000094D4, 0x00000000), // Slice unit Level Clock Gating Control // Capture Perf MMIO register programming - MMIOPair(0x0000B004, 0x2FC0100B), //KM_ARBITER_CTRL_REG - MMIOPair(0x0000B404, 0x00000160), //KM_GLOBAL_INVALIDATION_REG - MMIOPair(0x00008708, 0x00000000), //KM_GEN12_IDI_CONTROL_REGISTER + MMIOPair(0x0000B004, 0x2FC0100B), // KM_ARBITER_CTRL_REG + MMIOPair(0x0000B404, 0x00000160), // KM_GLOBAL_INVALIDATION_REG + MMIOPair(0x00008708, 0x00000000), // KM_GEN12_IDI_CONTROL_REGISTER // Tiled Resources VA Translation Table L3 Pointer - MMIOPair(0x00004410, 0xffffffff), //GEN12_TRTT_NULL_TILE_REG - MMIOPair(0x00004414, 0xfffffffe), //GEN12_TRTT_INVD_TILE_REG - MMIOPair(0x00004404, 0x000000ff), //GEN12_TRTT_VA_MASKDATA_REG - MMIOPair(0x00004408, 0x00000000), //LDWORD GMM_GEN12_TRTT_L3_POINTER - MMIOPair(0x0000440C, 0x00000000), //UDWORD GMM_GEN12_TRTT_L3_POINTER - MMIOPair(0x00004400, 0x00000001), //GEN12_TRTT_TABLE_CONTROL - MMIOPair(0x00004DFC, 0x00000000), //GEN9_TR_CHICKEN_BIT_VECTOR + MMIOPair(0x00004410, 0xffffffff), // GEN12_TRTT_NULL_TILE_REG + MMIOPair(0x00004414, 0xfffffffe), // GEN12_TRTT_INVD_TILE_REG + MMIOPair(0x00004404, 0x000000ff), // GEN12_TRTT_VA_MASKDATA_REG + MMIOPair(0x00004408, 0x00000000), // LDWORD GMM_GEN12_TRTT_L3_POINTER + MMIOPair(0x0000440C, 0x00000000), // UDWORD GMM_GEN12_TRTT_L3_POINTER + MMIOPair(0x00004400, 0x00000001), // GEN12_TRTT_TABLE_CONTROL + MMIOPair(0x00004DFC, 0x00000000), // GEN9_TR_CHICKEN_BIT_VECTOR }; static const MMIOList mmioListRCS = { - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x00002058), 0x00000000), //CTX_WA_PTR_RCSUNIT - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000020a8), 0x00000000), //IMR - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x00002058), 0x00000000), // CTX_WA_PTR_RCSUNIT + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000020a8), 0x00000000), // IMR + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE - MMIOPair(0x00002090, 0xffff0000), //CHICKEN_PWR_CTX_RASTER_1 - MMIOPair(0x000020e0, 0xffff4000), //FF_SLICE_CS_CHICKEN1_RCSUNIT - MMIOPair(0x000020e4, 0xffff0000), //FF_SLICE_CS_CHICKEN2_RCSUNIT - MMIOPair(0x000020ec, 0xffff0051), //CS_DEBUG_MODE1 + MMIOPair(0x00002090, 0xffff0000), // CHICKEN_PWR_CTX_RASTER_1 + MMIOPair(0x000020e0, 0xffff4000), // FF_SLICE_CS_CHICKEN1_RCSUNIT + MMIOPair(0x000020e4, 0xffff0000), // FF_SLICE_CS_CHICKEN2_RCSUNIT + MMIOPair(0x000020ec, 0xffff0051), // CS_DEBUG_MODE1 // FORCE_TO_NONPRIV MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024d0), 0x00007014), @@ -213,31 +213,31 @@ static const MMIOList mmioListRCS = { MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024f8), 0x0000e000), MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024fc), 0x0000e000), - MMIOPair(0x00002580, 0xffff0005), //CS_CHICKEN1 - MMIOPair(0x0000e194, 0xffff0002), //CHICKEN_SAMPLER_2 + MMIOPair(0x00002580, 0xffff0005), // CS_CHICKEN1 + MMIOPair(0x0000e194, 0xffff0002), // CHICKEN_SAMPLER_2 - MMIOPair(0x0000B134, 0xA0000000) //L3ALLOCREG + MMIOPair(0x0000B134, 0xA0000000) // L3ALLOCREG }; static const MMIOList mmioListBCS = { - MMIOPair(AubMemDump::computeRegisterOffset(bcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(bcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static const MMIOList mmioListVCS = { - MMIOPair(AubMemDump::computeRegisterOffset(vcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(vcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static const MMIOList mmioListVECS = { - MMIOPair(AubMemDump::computeRegisterOffset(vecs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(vecs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static MMIOList mmioListCCSInstance(uint32_t mmioBase) { MMIOList mmioList; - mmioList.push_back(MMIOPair(0x0000ce90, 0x00030003)); //GFX_MULT_CTXT_CTL - enable multi-context with 4CCS - mmioList.push_back(MMIOPair(0x0000b170, 0x00030003)); //MULT_CTXT_CTL - enable multi-context with 4CCS - mmioList.push_back(MMIOPair(0x00014800, 0xFFFF0001)); //RCU_MODE - mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x0000229c), 0xffff8280)); //GFX_MODE + mmioList.push_back(MMIOPair(0x0000ce90, 0x00030003)); // GFX_MULT_CTXT_CTL - enable multi-context with 4CCS + mmioList.push_back(MMIOPair(0x0000b170, 0x00030003)); // MULT_CTXT_CTL - enable multi-context with 4CCS + mmioList.push_back(MMIOPair(0x00014800, 0xFFFF0001)); // RCU_MODE + mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x0000229c), 0xffff8280)); // GFX_MODE // FORCE_TO_NONPRIV mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x000024d0), 0x0000e000)); @@ -253,7 +253,7 @@ static MMIOList mmioListCCSInstance(uint32_t mmioBase) { mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x000024f8), 0x0000e000)); mmioList.push_back(MMIOPair(AubMemDump::computeRegisterOffset(mmioBase, 0x000024fc), 0x0000e000)); - mmioList.push_back(MMIOPair(0x0000B234, 0xA0000000)); //L3ALLOCREG_CCS0 + mmioList.push_back(MMIOPair(0x0000B234, 0xA0000000)); // L3ALLOCREG_CCS0 return mmioList; }; diff --git a/shared/source/command_container/implicit_scaling.cpp b/shared/source/command_container/implicit_scaling.cpp index 40fd76022a..89efd18363 100644 --- a/shared/source/command_container/implicit_scaling.cpp +++ b/shared/source/command_container/implicit_scaling.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ bool ImplicitScalingHelper::isImplicitScalingEnabled(const DeviceBitfield &devic if (DebugManager.flags.EnableWalkerPartition.get() != -1) { partitionWalker = !!DebugManager.flags.EnableWalkerPartition.get(); } - //we can't do this without local memory + // we can't do this without local memory partitionWalker &= OSInterface::osEnableLocalMemory; return partitionWalker; diff --git a/shared/source/command_stream/aub_command_stream_receiver.cpp b/shared/source/command_stream/aub_command_stream_receiver.cpp index 0d638c7e37..84b80ca6e9 100644 --- a/shared/source/command_stream/aub_command_stream_receiver.cpp +++ b/shared/source/command_stream/aub_command_stream_receiver.cpp @@ -117,8 +117,8 @@ bool AubFileStream::init(uint32_t stepping, uint32_t device) { header.metal = 0; header.device = device; header.csxSwizzling = CmdServicesMemTraceVersion::CsxSwizzlingValues::Disabled; - //Which recording method used: - // Phys is required for GGTT memory to be written directly to phys vs through aperture. + // Which recording method used: + // Phys is required for GGTT memory to be written directly to phys vs through aperture. header.recordingMethod = CmdServicesMemTraceVersion::RecordingMethodValues::Phy; header.pch = CmdServicesMemTraceVersion::PchValues::Default; header.captureTool = CmdServicesMemTraceVersion::CaptureToolValues::GenKmdCapture; @@ -141,7 +141,7 @@ void AubFileStream::writeMemory(uint64_t physAddress, const void *memory, size_t auto sizeRemainder = size % sizeof(uint32_t); if (sizeRemainder) { - //if input size is not 4 byte aligned, write extra zeros to AUB + // if input size is not 4 byte aligned, write extra zeros to AUB uint32_t zero = 0; write(reinterpret_cast(&zero), sizeof(uint32_t) - sizeRemainder); } @@ -263,7 +263,7 @@ void AubFileStream::expectMemory(uint64_t physAddress, const void *memory, size_ auto remainder = sizeThisIteration & (sizeof(uint32_t) - 1); if (remainder) { - //if size is not 4 byte aligned, write extra zeros to AUB + // if size is not 4 byte aligned, write extra zeros to AUB uint32_t zero = 0; write(reinterpret_cast(&zero), sizeof(uint32_t) - remainder); } @@ -289,7 +289,7 @@ bool AubFileStream::addComment(const char *message) { write(message, messageLen); auto remainder = messageLen & (sizeof(uint32_t) - 1); if (remainder) { - //if size is not 4 byte aligned, write extra zeros to AUB + // if size is not 4 byte aligned, write extra zeros to AUB uint32_t zero = 0; write(reinterpret_cast(&zero), sizeof(uint32_t) - remainder); } diff --git a/shared/source/command_stream/aub_command_stream_receiver_hw_base.inl b/shared/source/command_stream/aub_command_stream_receiver_hw_base.inl index c13919a98a..c8fe144dff 100644 --- a/shared/source/command_stream/aub_command_stream_receiver_hw_base.inl +++ b/shared/source/command_stream/aub_command_stream_receiver_hw_base.inl @@ -607,7 +607,7 @@ void AUBCommandStreamReceiverHw::pollForCompletionImpl() { const uint32_t mask = getMaskAndValueForPollForCompletion(); const uint32_t value = mask; stream->registerPoll( - AubMemDump::computeRegisterOffset(mmioBase, 0x2234), //EXECLIST_STATUS + AubMemDump::computeRegisterOffset(mmioBase, 0x2234), // EXECLIST_STATUS mask, value, pollNotEqual, @@ -712,7 +712,7 @@ void AUBCommandStreamReceiverHw::writeMMIO(uint32_t offset, uint32_t template void AUBCommandStreamReceiverHw::expectMMIO(uint32_t mmioRegister, uint32_t expectedValue) { if (hardwareContextController) { - //Add support for expectMMIO to AubStream + // Add support for expectMMIO to AubStream return; } this->getAubStream()->expectMMIO(mmioRegister, expectedValue); diff --git a/shared/source/command_stream/command_stream_receiver_simulated_common_hw_xehp_and_later.inl b/shared/source/command_stream/command_stream_receiver_simulated_common_hw_xehp_and_later.inl index dce2f9b734..82c1c043fa 100644 --- a/shared/source/command_stream/command_stream_receiver_simulated_common_hw_xehp_and_later.inl +++ b/shared/source/command_stream/command_stream_receiver_simulated_common_hw_xehp_and_later.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -22,16 +22,16 @@ void CommandStreamReceiverSimulatedCommonHw::initGlobalMMIO() { } if (this->localMemoryEnabled) { - MMIOPair guCntl = {0x00101010, 0x00000080}; //GU_CNTL + MMIOPair guCntl = {0x00101010, 0x00000080}; // GU_CNTL stream->writeMMIO(guCntl.first, guCntl.second); - MMIOPair lmemCfg = {0x0000cf58, 0x80000000}; //LMEM_CFG + MMIOPair lmemCfg = {0x0000cf58, 0x80000000}; // LMEM_CFG stream->writeMMIO(lmemCfg.first, lmemCfg.second); MMIOPair tileAddrRange[] = {{0x00004900, 0x0001}, {0x00004904, 0x0001}, {0x00004908, 0x0001}, - {0x0000490c, 0x0001}}; //XEHP_TILE_ADDR_RANGE + {0x0000490c, 0x0001}}; // XEHP_TILE_ADDR_RANGE const uint32_t numberOfTiles = 4; const uint32_t localMemorySizeGB = static_cast(AubHelper::getPerTileLocalMemorySize(&this->peekHwInfo()) / MemoryConstants::gigaByte); diff --git a/shared/source/command_stream/experimental_command_buffer.inl b/shared/source/command_stream/experimental_command_buffer.inl index ba9cd5e8ba..a190b19a5e 100644 --- a/shared/source/command_stream/experimental_command_buffer.inl +++ b/shared/source/command_stream/experimental_command_buffer.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -38,15 +38,15 @@ size_t ExperimentalCommandBuffer::programExperimentalCommandBuffer() { size_t returnOffset = currentStream->getUsed(); - //begin timestamp + // begin timestamp addTimeStampPipeControl(); addExperimentalCommands(); - //end timestamp + // end timestamp addTimeStampPipeControl(); - //end + // end auto pCmd = currentStream->getSpaceForCmd(); *pCmd = GfxFamily::cmdInitBatchBufferEnd; @@ -80,7 +80,7 @@ void ExperimentalCommandBuffer::addTimeStampPipeControl() { *commandStreamReceiver->peekExecutionEnvironment().rootDeviceEnvironments[commandStreamReceiver->getRootDeviceIndex()]->getHardwareInfo(), args); - //moving to next chunk + // moving to next chunk timestampsOffset += sizeof(uint64_t); DEBUG_BREAK_IF(timestamps->getUnderlyingBufferSize() < timestampsOffset); diff --git a/shared/source/command_stream/scratch_space_controller_base.cpp b/shared/source/command_stream/scratch_space_controller_base.cpp index 05e5d9ebf9..48ad8fa987 100644 --- a/shared/source/command_stream/scratch_space_controller_base.cpp +++ b/shared/source/command_stream/scratch_space_controller_base.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -61,14 +61,14 @@ uint64_t ScratchSpaceControllerBase::calculateNewGSH() { return gsh; } uint64_t ScratchSpaceControllerBase::getScratchPatchAddress() { - //for 32 bit scratch space pointer is being programmed in Media VFE State and is relative to 0 as General State Base Address - //for 64 bit, scratch space pointer is being programmed as "General State Base Address - scratchSpaceOffsetFor64bit" - // and "0 + scratchSpaceOffsetFor64bit" is being programmed in Media VFE state + // for 32 bit scratch space pointer is being programmed in Media VFE State and is relative to 0 as General State Base Address + // for 64 bit, scratch space pointer is being programmed as "General State Base Address - scratchSpaceOffsetFor64bit" + // and "0 + scratchSpaceOffsetFor64bit" is being programmed in Media VFE state uint64_t scratchAddress = 0; if (scratchAllocation) { scratchAddress = scratchAllocation->getGpuAddressToPatch(); if (is64bit && !getMemoryManager()->peekForce32BitAllocations()) { - //this is to avoid scractch allocation offset "0" + // this is to avoid scractch allocation offset "0" scratchAddress = ScratchSpaceConstants::scratchSpaceOffsetFor64Bit; } } diff --git a/shared/source/direct_submission/windows/wddm_direct_submission.inl b/shared/source/direct_submission/windows/wddm_direct_submission.inl index 820052b9d7..a80e2610af 100644 --- a/shared/source/direct_submission/windows/wddm_direct_submission.inl +++ b/shared/source/direct_submission/windows/wddm_direct_submission.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -50,7 +50,7 @@ WddmDirectSubmission::~WddmDirectSubmission() { template bool WddmDirectSubmission::allocateOsResources() { - //for now only WDDM2.0 + // for now only WDDM2.0 UNRECOVERABLE_IF(wddm->getWddmVersion() != WddmVersion::WDDM_2_0); bool ret = wddm->getWddmInterface()->createMonitoredFence(ringFence); diff --git a/shared/source/gen12lp/aub_mem_dump_gen12lp.cpp b/shared/source/gen12lp/aub_mem_dump_gen12lp.cpp index 5066e3e5c5..40df6d7b72 100644 --- a/shared/source/gen12lp/aub_mem_dump_gen12lp.cpp +++ b/shared/source/gen12lp/aub_mem_dump_gen12lp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -147,7 +147,7 @@ const MMIOList AUBFamilyMapper::globalMMIO = { MMIOPair(0x0000B098, 0x00300010), MMIOPair(0x0000B09C, 0x00300010), - //PAT_INDEX + // PAT_INDEX MMIOPair(0x00004100, 0x0000000), MMIOPair(0x00004104, 0x0000000), MMIOPair(0x00004108, 0x0000000), @@ -157,24 +157,24 @@ const MMIOList AUBFamilyMapper::globalMMIO = { MMIOPair(0x00004118, 0x0000000), MMIOPair(0x0000411c, 0x0000000), - MMIOPair(0x00004b80, 0xffff1001), //GACB_PERF_CTRL_REG - MMIOPair(0x00007000, 0xffff0000), //CACHE_MODE_0 - MMIOPair(0x00007004, 0xffff0000), //CACHE_MODE_1 - MMIOPair(0x00009008, 0x00000200), //IDICR - MMIOPair(0x0000900c, 0x00001b40), //SNPCR - MMIOPair(0x0000b120, 0x14000002), //LTCDREG - MMIOPair(0x00042080, 0x00000000), //CHICKEN_MISC_1 + MMIOPair(0x00004b80, 0xffff1001), // GACB_PERF_CTRL_REG + MMIOPair(0x00007000, 0xffff0000), // CACHE_MODE_0 + MMIOPair(0x00007004, 0xffff0000), // CACHE_MODE_1 + MMIOPair(0x00009008, 0x00000200), // IDICR + MMIOPair(0x0000900c, 0x00001b40), // SNPCR + MMIOPair(0x0000b120, 0x14000002), // LTCDREG + MMIOPair(0x00042080, 0x00000000), // CHICKEN_MISC_1 }; static const MMIOList mmioListRCS = { - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x00002058), 0x00000000), //CTX_WA_PTR_RCSUNIT - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000020a8), 0x00000000), //IMR - MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x00002058), 0x00000000), // CTX_WA_PTR_RCSUNIT + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000020a8), 0x00000000), // IMR + MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE - MMIOPair(0x00002090, 0xffff0000), //CHICKEN_PWR_CTX_RASTER_1 - MMIOPair(0x000020e0, 0xffff4000), //FF_SLICE_CS_CHICKEN1_RCSUNIT - MMIOPair(0x000020e4, 0xffff0000), //FF_SLICE_CS_CHICKEN2_RCSUNIT - MMIOPair(0x000020ec, 0xffff0051), //CS_DEBUG_MODE1 + MMIOPair(0x00002090, 0xffff0000), // CHICKEN_PWR_CTX_RASTER_1 + MMIOPair(0x000020e0, 0xffff4000), // FF_SLICE_CS_CHICKEN1_RCSUNIT + MMIOPair(0x000020e4, 0xffff0000), // FF_SLICE_CS_CHICKEN2_RCSUNIT + MMIOPair(0x000020ec, 0xffff0051), // CS_DEBUG_MODE1 // FORCE_TO_NONPRIV MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024d0), 0x00007014), @@ -190,28 +190,28 @@ static const MMIOList mmioListRCS = { MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024f8), 0x0000e000), MMIOPair(AubMemDump::computeRegisterOffset(rcs.mmioBase, 0x000024fc), 0x0000e000), - MMIOPair(0x00002580, 0xffff0005), //CS_CHICKEN1 - MMIOPair(0x0000e194, 0xffff0002), //CHICKEN_SAMPLER_2 + MMIOPair(0x00002580, 0xffff0005), // CS_CHICKEN1 + MMIOPair(0x0000e194, 0xffff0002), // CHICKEN_SAMPLER_2 - MMIOPair(0x0000B134, 0xD0000020) //L3ALLOCREG + MMIOPair(0x0000B134, 0xD0000020) // L3ALLOCREG }; static const MMIOList mmioListBCS = { - MMIOPair(AubMemDump::computeRegisterOffset(bcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(bcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static const MMIOList mmioListVCS = { - MMIOPair(AubMemDump::computeRegisterOffset(vcs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(vcs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static const MMIOList mmioListVECS = { - MMIOPair(AubMemDump::computeRegisterOffset(vecs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(AubMemDump::computeRegisterOffset(vecs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE }; static const MMIOList mmioListCCS = { - MMIOPair(0x0000ce90, 0x00010001), //GFX_MULT_CTXT_CTL - MMIOPair(0x00014800, 0x00010001), //RCU_MODE - MMIOPair(AubMemDump::computeRegisterOffset(ccs.mmioBase, 0x0000229c), 0xffff8280), //GFX_MODE + MMIOPair(0x0000ce90, 0x00010001), // GFX_MULT_CTXT_CTL + MMIOPair(0x00014800, 0x00010001), // RCU_MODE + MMIOPair(AubMemDump::computeRegisterOffset(ccs.mmioBase, 0x0000229c), 0xffff8280), // GFX_MODE // FORCE_TO_NONPRIV MMIOPair(AubMemDump::computeRegisterOffset(ccs.mmioBase, 0x000024d0), 0x00007014), @@ -227,7 +227,7 @@ static const MMIOList mmioListCCS = { MMIOPair(AubMemDump::computeRegisterOffset(ccs.mmioBase, 0x000024f8), 0x0000e000), MMIOPair(AubMemDump::computeRegisterOffset(ccs.mmioBase, 0x000024fc), 0x0000e000), - MMIOPair(0x0000B234, 0xD0000020) //L3ALLOCREG_CCS0 + MMIOPair(0x0000B234, 0xD0000020) // L3ALLOCREG_CCS0 }; const MMIOList *AUBFamilyMapper::perEngineMMIO[aub_stream::NUM_ENGINES] = { diff --git a/shared/source/gen12lp/command_stream_receiver_simulated_common_hw_gen12lp.cpp b/shared/source/gen12lp/command_stream_receiver_simulated_common_hw_gen12lp.cpp index 03c9b398ba..433052e43f 100644 --- a/shared/source/gen12lp/command_stream_receiver_simulated_common_hw_gen12lp.cpp +++ b/shared/source/gen12lp/command_stream_receiver_simulated_common_hw_gen12lp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -17,7 +17,7 @@ void CommandStreamReceiverSimulatedCommonHw::initGlobalMMIO() { } if (this->isLocalMemoryEnabled()) { - MMIOPair lmemCfg = {0x0000cf58, 0x80000000}; //LMEM_CFG + MMIOPair lmemCfg = {0x0000cf58, 0x80000000}; // LMEM_CFG stream->writeMMIO(lmemCfg.first, lmemCfg.second); } } diff --git a/shared/source/gen8/hw_cmds_base.h b/shared/source/gen8/hw_cmds_base.h index 6259bf9d48..d341556164 100644 --- a/shared/source/gen8/hw_cmds_base.h +++ b/shared/source/gen8/hw_cmds_base.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -15,7 +15,7 @@ #include #include -//forward declaration for parsing logic +// forward declaration for parsing logic template struct CmdParse; diff --git a/shared/source/generated/xe_hp_core/hw_cmds_generated_xe_hp_core.inl b/shared/source/generated/xe_hp_core/hw_cmds_generated_xe_hp_core.inl index b2aea223a7..461cf739a1 100644 --- a/shared/source/generated/xe_hp_core/hw_cmds_generated_xe_hp_core.inl +++ b/shared/source/generated/xe_hp_core/hw_cmds_generated_xe_hp_core.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -5685,7 +5685,7 @@ typedef struct tagMI_ARB_CHECK { } inline void setPreFetchDisable(const uint32_t value) { TheStructure.Common.Pre_FetchDisable = value; - TheStructure.Common.MaskBits = 1 << 0; //PreFetchDisable is at bit0, so set bit0 of mask to 1 + TheStructure.Common.MaskBits = 1 << 0; // PreFetchDisable is at bit0, so set bit0 of mask to 1 } inline uint32_t getPreFetchDisable() const { return TheStructure.Common.Pre_FetchDisable; diff --git a/shared/source/generated/xe_hpc_core/hw_cmds_generated_xe_hpc_core.inl b/shared/source/generated/xe_hpc_core/hw_cmds_generated_xe_hpc_core.inl index d6ce276d9b..4c0701543d 100644 --- a/shared/source/generated/xe_hpc_core/hw_cmds_generated_xe_hpc_core.inl +++ b/shared/source/generated/xe_hpc_core/hw_cmds_generated_xe_hpc_core.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -6011,7 +6011,7 @@ typedef struct tagMI_ARB_CHECK { } inline void setPreParserDisable(const bool value) { TheStructure.Common.PreParserDisable = value; - TheStructure.Common.MaskBits = 1 << 0; //PreParserDisable is at bit0, so set bit0 of mask to 1 + TheStructure.Common.MaskBits = 1 << 0; // PreParserDisable is at bit0, so set bit0 of mask to 1 } inline bool getPreParserDisable() const { return TheStructure.Common.PreParserDisable; diff --git a/shared/source/generated/xe_hpg_core/hw_cmds_generated_xe_hpg_core.inl b/shared/source/generated/xe_hpg_core/hw_cmds_generated_xe_hpg_core.inl index b27a191343..51e60940fb 100644 --- a/shared/source/generated/xe_hpg_core/hw_cmds_generated_xe_hpg_core.inl +++ b/shared/source/generated/xe_hpg_core/hw_cmds_generated_xe_hpg_core.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -5768,7 +5768,7 @@ typedef struct tagMI_ARB_CHECK { } inline void setPreFetchDisable(const uint32_t value) { TheStructure.Common.Pre_FetchDisable = value; - TheStructure.Common.MaskBits = 1 << 0; //PreFetchDisable is at bit0, so set bit0 of mask to 1 + TheStructure.Common.MaskBits = 1 << 0; // PreFetchDisable is at bit0, so set bit0 of mask to 1 } inline uint32_t getPreFetchDisable() const { return TheStructure.Common.Pre_FetchDisable; diff --git a/shared/source/helpers/basic_math.h b/shared/source/helpers/basic_math.h index 69e84a3ede..3f7e47d600 100644 --- a/shared/source/helpers/basic_math.h +++ b/shared/source/helpers/basic_math.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -106,11 +106,11 @@ inline uint16_t float2Half(float f) { uint32_t fsign = (u.u >> 16) & 0x8000; float x = std::fabs(f); - //Nan + // Nan if (x != x) { u.u >>= (24 - 11); u.u &= 0x7fff; - u.u |= 0x0200; //silence the NaN + u.u |= 0x0200; // silence the NaN return u.u | fsign; } diff --git a/shared/source/helpers/hw_walk_order.h b/shared/source/helpers/hw_walk_order.h index 85374b88e1..5d3fbed473 100644 --- a/shared/source/helpers/hw_walk_order.h +++ b/shared/source/helpers/hw_walk_order.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -12,7 +12,7 @@ namespace NEO { namespace HwWalkOrderHelper { -//make sure table below matches Hardware Spec +// make sure table below matches Hardware Spec inline constexpr uint32_t walkOrderPossibilties = 6u; inline constexpr uint8_t X = 0; inline constexpr uint8_t Y = 1; diff --git a/shared/source/helpers/register_offsets.h b/shared/source/helpers/register_offsets.h index 4a67407df2..2f62c0ccbd 100644 --- a/shared/source/helpers/register_offsets.h +++ b/shared/source/helpers/register_offsets.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -14,7 +14,7 @@ inline constexpr uint32_t L3SQC_REG4 = 0xB118; inline constexpr uint32_t GPGPU_WALKER_COOKIE_VALUE_BEFORE_WALKER = 0xFFFFFFFF; inline constexpr uint32_t GPGPU_WALKER_COOKIE_VALUE_AFTER_WALKER = 0x00000000; -//Threads Dimension X/Y/Z +// Threads Dimension X/Y/Z inline constexpr uint32_t GPUGPU_DISPATCHDIMX = 0x2500; inline constexpr uint32_t GPUGPU_DISPATCHDIMY = 0x2504; inline constexpr uint32_t GPUGPU_DISPATCHDIMZ = 0x2508; @@ -42,7 +42,7 @@ inline constexpr uint32_t CS_PREDICATE_RESULT = 0x2418; inline constexpr uint32_t CS_PREDICATE_RESULT_2 = 0x23BC; inline constexpr uint32_t SEMA_WAIT_POLL = 0x0224c; -//Alu opcodes +// Alu opcodes inline constexpr uint32_t NUM_ALU_INST_FOR_READ_MODIFY_WRITE = 4; enum class AluRegisters : uint32_t { diff --git a/shared/source/helpers/uint16_avx2.h b/shared/source/helpers/uint16_avx2.h index fb93157b8f..00abf81ad8 100644 --- a/shared/source/helpers/uint16_avx2.h +++ b/shared/source/helpers/uint16_avx2.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ struct uint16x16_t { } uint16x16_t(uint16_t a) { - value = _mm256_set1_epi16(a); //AVX + value = _mm256_set1_epi16(a); // AVX } explicit uint16x16_t(const void *alignedPtr) { @@ -54,33 +54,33 @@ struct uint16x16_t { inline void load(const void *alignedPtr) { DEBUG_BREAK_IF(!isAligned<32>(alignedPtr)); - value = _mm256_load_si256(reinterpret_cast(alignedPtr)); //AVX + value = _mm256_load_si256(reinterpret_cast(alignedPtr)); // AVX } inline void loadUnaligned(const void *ptr) { - value = _mm256_loadu_si256(reinterpret_cast(ptr)); //AVX + value = _mm256_loadu_si256(reinterpret_cast(ptr)); // AVX } inline void store(void *alignedPtr) { DEBUG_BREAK_IF(!isAligned<32>(alignedPtr)); - _mm256_store_si256(reinterpret_cast<__m256i *>(alignedPtr), value); //AVX + _mm256_store_si256(reinterpret_cast<__m256i *>(alignedPtr), value); // AVX } inline void storeUnaligned(void *ptr) { - _mm256_storeu_si256(reinterpret_cast<__m256i *>(ptr), value); //AVX + _mm256_storeu_si256(reinterpret_cast<__m256i *>(ptr), value); // AVX } inline operator bool() const { - return _mm256_testz_si256(value, mask().value) ? false : true; //AVX + return _mm256_testz_si256(value, mask().value) ? false : true; // AVX } inline uint16x16_t &operator-=(const uint16x16_t &a) { - value = _mm256_sub_epi16(value, a.value); //AVX2 + value = _mm256_sub_epi16(value, a.value); // AVX2 return *this; } inline uint16x16_t &operator+=(const uint16x16_t &a) { - value = _mm256_add_epi16(value, a.value); //AVX2 + value = _mm256_add_epi16(value, a.value); // AVX2 return *this; } @@ -88,13 +88,13 @@ struct uint16x16_t { uint16x16_t result; result.value = _mm256_xor_si256(mask().value, - _mm256_cmpgt_epi16(b.value, a.value)); //AVX2 + _mm256_cmpgt_epi16(b.value, a.value)); // AVX2 return result; } inline friend uint16x16_t operator&&(const uint16x16_t &a, const uint16x16_t &b) { uint16x16_t result; - result.value = _mm256_and_si256(a.value, b.value); //AVX2 + result.value = _mm256_and_si256(a.value, b.value); // AVX2 return result; } @@ -104,7 +104,7 @@ struct uint16x16_t { // Have to swap arguments to get intended calling semantics result.value = - _mm256_blendv_epi8(b.value, a.value, mask.value); //AVX2 + _mm256_blendv_epi8(b.value, a.value, mask.value); // AVX2 return result; } }; diff --git a/shared/source/helpers/uint16_sse4.h b/shared/source/helpers/uint16_sse4.h index 804a985818..d9178ff39f 100644 --- a/shared/source/helpers/uint16_sse4.h +++ b/shared/source/helpers/uint16_sse4.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -31,7 +31,7 @@ struct uint16x8_t { } uint16x8_t(uint16_t a) { - value = _mm_set1_epi16(a); //SSE2 + value = _mm_set1_epi16(a); // SSE2 } explicit uint16x8_t(const void *alignedPtr) { @@ -57,33 +57,33 @@ struct uint16x8_t { inline void load(const void *alignedPtr) { DEBUG_BREAK_IF(!isAligned<16>(alignedPtr)); - value = _mm_load_si128(reinterpret_cast(alignedPtr)); //SSE2 + value = _mm_load_si128(reinterpret_cast(alignedPtr)); // SSE2 } inline void loadUnaligned(const void *ptr) { - value = _mm_loadu_si128(reinterpret_cast(ptr)); //SSE2 + value = _mm_loadu_si128(reinterpret_cast(ptr)); // SSE2 } inline void store(void *alignedPtr) { DEBUG_BREAK_IF(!isAligned<16>(alignedPtr)); - _mm_store_si128(reinterpret_cast<__m128i *>(alignedPtr), value); //SSE2 + _mm_store_si128(reinterpret_cast<__m128i *>(alignedPtr), value); // SSE2 } inline void storeUnaligned(void *ptr) { - _mm_storeu_si128(reinterpret_cast<__m128i *>(ptr), value); //SSE2 + _mm_storeu_si128(reinterpret_cast<__m128i *>(ptr), value); // SSE2 } inline operator bool() const { - return _mm_test_all_zeros(value, mask().value) ? false : true; //SSE4.1 alternatives? + return _mm_test_all_zeros(value, mask().value) ? false : true; // SSE4.1 alternatives? } inline uint16x8_t &operator-=(const uint16x8_t &a) { - value = _mm_sub_epi16(value, a.value); //SSE2 + value = _mm_sub_epi16(value, a.value); // SSE2 return *this; } inline uint16x8_t &operator+=(const uint16x8_t &a) { - value = _mm_add_epi16(value, a.value); //SSE2 + value = _mm_add_epi16(value, a.value); // SSE2 return *this; } @@ -91,13 +91,13 @@ struct uint16x8_t { uint16x8_t result; result.value = _mm_xor_si128(mask().value, - _mm_cmplt_epi16(a.value, b.value)); //SSE2 + _mm_cmplt_epi16(a.value, b.value)); // SSE2 return result; } inline friend uint16x8_t operator&&(const uint16x8_t &a, const uint16x8_t &b) { uint16x8_t result; - result.value = _mm_and_si128(a.value, b.value); //SSE2 + result.value = _mm_and_si128(a.value, b.value); // SSE2 return result; } @@ -107,7 +107,7 @@ struct uint16x8_t { // Have to swap arguments to get intended calling semantics result.value = - _mm_blendv_epi8(b.value, a.value, mask.value); //SSE4.1 alternatives? + _mm_blendv_epi8(b.value, a.value, mask.value); // SSE4.1 alternatives? return result; } }; diff --git a/shared/source/helpers/x86_64/local_id_gen.cpp b/shared/source/helpers/x86_64/local_id_gen.cpp index 921e5ce3c8..ba1346f27e 100644 --- a/shared/source/helpers/x86_64/local_id_gen.cpp +++ b/shared/source/helpers/x86_64/local_id_gen.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -43,7 +43,7 @@ LocalIDHelper::LocalIDHelper() { LocalIDHelper LocalIDHelper::initializer; -//traditional function to generate local IDs +// traditional function to generate local IDs void generateLocalIDs(void *buffer, uint16_t simd, const std::array &localWorkgroupSize, const std::array &dimensionsOrder, bool isImageOnlyKernel, uint32_t grfSize) { auto threadsPerWorkGroup = static_cast(getThreadsPerWG(simd, localWorkgroupSize[0] * localWorkgroupSize[1] * localWorkgroupSize[2])); bool useLayoutForImages = isImageOnlyKernel && isCompatibleWithLayoutForImages(localWorkgroupSize, dimensionsOrder, simd); diff --git a/shared/source/os_interface/linux/engine_info.cpp b/shared/source/os_interface/linux/engine_info.cpp index ab45bb9bb2..10a4f5a029 100644 --- a/shared/source/os_interface/linux/engine_info.cpp +++ b/shared/source/os_interface/linux/engine_info.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -143,7 +143,7 @@ void EngineInfo::setSupportedEnginesInfo(const RootDeviceEnvironment &rootDevice uint32_t EngineInfo::getEngineTileIndex(const EngineClassInstance &engine) { uint32_t tile = 0; if (tileToEngineMap.empty()) { - return tile; //Empty map + return tile; // Empty map } for (auto itr = tileToEngineMap.begin(); itr != tileToEngineMap.end(); itr++) { diff --git a/shared/source/os_interface/linux/local/dg1/ioctl_helper_dg1.cpp b/shared/source/os_interface/linux/local/dg1/ioctl_helper_dg1.cpp index b4b646f388..41bf928242 100644 --- a/shared/source/os_interface/linux/local/dg1/ioctl_helper_dg1.cpp +++ b/shared/source/os_interface/linux/local/dg1/ioctl_helper_dg1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -26,7 +26,7 @@ uint32_t IoctlHelperImpl::createGemExt(const MemRegionsVec &memClass if (ret == 0) { return ret; } - //fallback to PROD_DG1 kernel + // fallback to PROD_DG1 kernel handle = 0u; uint32_t regionsSize = static_cast(memClassInstances.size()); std::vector regions(regionsSize); diff --git a/shared/source/os_interface/os_context.cpp b/shared/source/os_interface/os_context.cpp index 6b17598de0..2c885dbf2e 100644 --- a/shared/source/os_interface/os_context.cpp +++ b/shared/source/os_interface/os_context.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -121,7 +121,7 @@ bool OsContext::checkDirectSubmissionSupportsEngine(const DirectSubmissionProper startOnInit = renderOverrideKey == 1 ? true : false; } } else { - //assume else is CCS + // assume else is CCS int32_t computeOverrideKey = DebugManager.flags.DirectSubmissionOverrideComputeSupport.get(); if (computeOverrideKey != -1) { supported = computeOverrideKey == 0 ? false : true; @@ -129,7 +129,7 @@ bool OsContext::checkDirectSubmissionSupportsEngine(const DirectSubmissionProper } } - //enable start in context only when default support is overridden and enabled + // enable start in context only when default support is overridden and enabled if (supported && !directSubmissionProperty.engineSupported) { startInContext = true; } diff --git a/shared/source/page_fault_manager/windows/cpu_page_fault_manager_windows.cpp b/shared/source/page_fault_manager/windows/cpu_page_fault_manager_windows.cpp index f3cdea2a79..6c375530be 100644 --- a/shared/source/page_fault_manager/windows/cpu_page_fault_manager_windows.cpp +++ b/shared/source/page_fault_manager/windows/cpu_page_fault_manager_windows.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -23,11 +23,11 @@ PageFaultManagerWindows::PageFaultManagerWindows() { pageFaultHandler = [this](struct _EXCEPTION_POINTERS *exceptionInfo) { if (exceptionInfo->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { if (this->verifyPageFault(reinterpret_cast(exceptionInfo->ExceptionRecord->ExceptionInformation[1]))) { - //this is our fault that we serviced, continue app execution + // this is our fault that we serviced, continue app execution return EXCEPTION_CONTINUE_EXECUTION; } } - //not our exception + // not our exception return EXCEPTION_CONTINUE_SEARCH; }; diff --git a/shared/source/xe_hpc_core/command_stream_receiver_hw_xe_hpc_core.cpp b/shared/source/xe_hpc_core/command_stream_receiver_hw_xe_hpc_core.cpp index d7855d1015..4980d37d7e 100644 --- a/shared/source/xe_hpc_core/command_stream_receiver_hw_xe_hpc_core.cpp +++ b/shared/source/xe_hpc_core/command_stream_receiver_hw_xe_hpc_core.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -76,7 +76,7 @@ void BlitCommandsHelper::appendBlitCommandsMemCopy(const BlitProperties } auto cachePolicy = GMM_RESOURCE_USAGE_OCL_BUFFER; - //if transfer size bigger then L3 size, copy with L3 disabled + // if transfer size bigger then L3 size, copy with L3 disabled if (blitProperites.copySize.x * blitProperites.copySize.y * blitProperites.copySize.z * blitProperites.bytesPerPixel >= (rootDeviceEnvironment.getHardwareInfo()->gtSystemInfo.L3CacheSizeInKb * KB / 2)) { cachePolicy = GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED; } diff --git a/shared/test/common/helpers/memory_management.cpp b/shared/test/common/helpers/memory_management.cpp index 3558d7fb6d..f44838b5d7 100644 --- a/shared/test/common/helpers/memory_management.cpp +++ b/shared/test/common/helpers/memory_management.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -272,7 +272,7 @@ size_t enumerateLeak(size_t indexAllocationTop, size_t indexDeallocationTop, boo if (eventDeallocation.address == eventAllocation.address && eventDeallocation.event != AllocationEvent::EVENT_UNKNOWN) { - //this memory was once freed, now it is allocated but not freed + // this memory was once freed, now it is allocated but not freed if (requireCallStack && eventDeallocation.frames == 0) { potentialLeak = true; potentialLeakIndex = currentIndex; diff --git a/shared/test/common/libult/linux/drm_mock.h b/shared/test/common/libult/linux/drm_mock.h index 5da9bd7ebc..ad4e0e0678 100644 --- a/shared/test/common/libult/linux/drm_mock.h +++ b/shared/test/common/libult/linux/drm_mock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -215,34 +215,34 @@ class DrmMock : public Drm { bool queryPageFaultSupportCalled = false; - //DRM_IOCTL_I915_GEM_EXECBUFFER2 + // DRM_IOCTL_I915_GEM_EXECBUFFER2 std::vector execBuffers{}; std::vector receivedBos{}; int execBufferResult = 0; - //DRM_IOCTL_I915_GEM_CREATE + // DRM_IOCTL_I915_GEM_CREATE uint64_t createParamsSize = 0; uint32_t createParamsHandle = 0; - //DRM_IOCTL_I915_GEM_SET_TILING + // DRM_IOCTL_I915_GEM_SET_TILING uint32_t setTilingMode = 0; uint32_t setTilingHandle = 0; uint32_t setTilingStride = 0; - //DRM_IOCTL_PRIME_FD_TO_HANDLE + // DRM_IOCTL_PRIME_FD_TO_HANDLE uint32_t outputHandle = 0; int32_t inputFd = 0; int fdToHandleRetVal = 0; - //DRM_IOCTL_HANDLE_TO_FD + // DRM_IOCTL_HANDLE_TO_FD int32_t outputFd = 0; bool incrementOutputFdAfterCall = false; - //DRM_IOCTL_I915_GEM_USERPTR + // DRM_IOCTL_I915_GEM_USERPTR uint32_t returnHandle = 0; uint64_t gpuMemSize = 3u * MemoryConstants::gigaByte; - //DRM_IOCTL_I915_QUERY + // DRM_IOCTL_I915_QUERY QueryItem storedQueryItem = {}; - //DRM_IOCTL_I915_GEM_WAIT + // DRM_IOCTL_I915_GEM_WAIT GemWait receivedGemWait = {}; - //DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT + // DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT uint32_t storedDrmContextId{}; - //DRM_IOCTL_GEM_CLOSE + // DRM_IOCTL_GEM_CLOSE int storedRetValForGemClose = 0; GemVmControl receivedGemVmControl{}; diff --git a/shared/test/common/mocks/mock_elf.h b/shared/test/common/mocks/mock_elf.h index 742b5dfafa..4587bb42aa 100644 --- a/shared/test/common/mocks/mock_elf.h +++ b/shared/test/common/mocks/mock_elf.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -118,7 +118,7 @@ struct MockElfEncoder : public NEO::Elf::ElfEncoder { auto symTabSectionHeader = elfEncoder.getSectionHeader(symTabSectionIndex); symTabSectionHeader->info = 2; - symTabSectionHeader->link = elfEncoder.getLastSectionHeaderIndex() + 1; //strtab section added as last + symTabSectionHeader->link = elfEncoder.getLastSectionHeaderIndex() + 1; // strtab section added as last return elfEncoder.encode(); } }; diff --git a/shared/test/common/os_interface/linux/device_command_stream_fixture.h b/shared/test/common/os_interface/linux/device_command_stream_fixture.h index f195630f9d..25ca2b2fcb 100644 --- a/shared/test/common/os_interface/linux/device_command_stream_fixture.h +++ b/shared/test/common/os_interface/linux/device_command_stream_fixture.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -170,45 +170,45 @@ class DrmMockCustom : public Drm { std::atomic ioctl_res; std::atomic ioctl_res_ext; - //DRM_IOCTL_I915_GEM_EXECBUFFER2 + // DRM_IOCTL_I915_GEM_EXECBUFFER2 NEO::MockExecBuffer execBuffer{}; - //First exec object + // First exec object NEO::MockExecObject execBufferBufferObjects{}; - //DRM_IOCTL_I915_GEM_CREATE + // DRM_IOCTL_I915_GEM_CREATE uint64_t createParamsSize = 0; uint32_t createParamsHandle = 0; - //DRM_IOCTL_I915_GEM_SET_TILING + // DRM_IOCTL_I915_GEM_SET_TILING uint32_t setTilingMode = 0; uint32_t setTilingHandle = 0; uint32_t setTilingStride = 0; - //DRM_IOCTL_I915_GEM_GET_TILING + // DRM_IOCTL_I915_GEM_GET_TILING uint32_t getTilingModeOut = 0; uint32_t getTilingHandleIn = 0; - //DRM_IOCTL_PRIME_FD_TO_HANDLE + // DRM_IOCTL_PRIME_FD_TO_HANDLE uint32_t outputHandle = 0; int32_t inputFd = 0; - //DRM_IOCTL_PRIME_HANDLE_TO_FD + // DRM_IOCTL_PRIME_HANDLE_TO_FD uint32_t inputHandle = 0; int32_t outputFd = 0; bool incrementOutputFdAfterCall = false; int32_t inputFlags = 0; - //DRM_IOCTL_I915_GEM_USERPTR + // DRM_IOCTL_I915_GEM_USERPTR uint32_t returnHandle = 0; - //DRM_IOCTL_I915_GEM_SET_DOMAIN + // DRM_IOCTL_I915_GEM_SET_DOMAIN uint32_t setDomainHandle = 0; uint32_t setDomainReadDomains = 0; uint32_t setDomainWriteDomain = 0; - //DRM_IOCTL_I915_GETPARAM + // DRM_IOCTL_I915_GETPARAM NEO::GetParam recordedGetParam = {0}; int getParamRetValue = 0; - //DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM + // DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM NEO::GemContextParam recordedGetContextParam = {0}; uint64_t getContextParamRetValue = 0; - //DRM_IOCTL_I915_GEM_WAIT + // DRM_IOCTL_I915_GEM_WAIT int64_t gemWaitTimeout = 0; - //DRM_IOCTL_I915_GEM_MMAP_OFFSET + // DRM_IOCTL_I915_GEM_MMAP_OFFSET uint32_t mmapOffsetHandle = 0; uint32_t mmapOffsetPad = 0; uint64_t mmapOffsetExpected = 0; @@ -216,7 +216,7 @@ class DrmMockCustom : public Drm { bool failOnMmapOffset = false; bool failOnPrimeFdToHandle = false; - //DRM_IOCTL_I915_GEM_CREATE_EXT + // DRM_IOCTL_I915_GEM_CREATE_EXT uint64_t createExtSize = 0; uint32_t createExtHandle = 0; uint64_t createExtExtensions = 0; diff --git a/shared/test/common/os_interface/linux/device_command_stream_fixture_context.h b/shared/test/common/os_interface/linux/device_command_stream_fixture_context.h index a3d6b43429..e7c4e99104 100644 --- a/shared/test/common/os_interface/linux/device_command_stream_fixture_context.h +++ b/shared/test/common/os_interface/linux/device_command_stream_fixture_context.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -10,16 +10,16 @@ #include "shared/test/common/libult/linux/drm_mock_prelim_context.h" struct DrmMockCustomPrelimContext { - //PRELIM_DRM_IOCTL_I915_GEM_CREATE_EXT + // PRELIM_DRM_IOCTL_I915_GEM_CREATE_EXT uint64_t createExtSize = 0; uint32_t createExtHandle = 0; uint64_t createExtExtensions = 0; - //PRELIM_DRM_IOCTL_I915_GEM_WAIT_USER_FENCE + // PRELIM_DRM_IOCTL_I915_GEM_WAIT_USER_FENCE WaitUserFence receivedGemWaitUserFence{}; uint32_t gemWaitUserFenceCalled = 0; - //PRELIM_DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE + // PRELIM_DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE uint64_t completionAddress = 0; uint64_t completionValue = 0; diff --git a/shared/test/common/os_interface/linux/drm_command_stream_fixture.h b/shared/test/common/os_interface/linux/drm_command_stream_fixture.h index 101a68a05d..a21516d502 100644 --- a/shared/test/common/os_interface/linux/drm_command_stream_fixture.h +++ b/shared/test/common/os_interface/linux/drm_command_stream_fixture.h @@ -35,7 +35,7 @@ class DrmCommandStreamTest : public ::testing::Test { template void setUpT() { - //make sure this is disabled, we don't want to test this now + // make sure this is disabled, we don't want to test this now DebugManager.flags.EnableForcePin.set(false); mock = new DrmMock(mockFd, *executionEnvironment.rootDeviceEnvironments[0]); @@ -68,7 +68,7 @@ class DrmCommandStreamTest : public ::testing::Test { // Memory manager creates pinBB with ioctl, expect one call EXPECT_EQ(1u, mock->ioctlCallsCount); - //assert we have memory manager + // assert we have memory manager ASSERT_NE(nullptr, memoryManager); mock->ioctlCount.reset(); mock->ioctlTearDownExpected.reset(); @@ -118,7 +118,7 @@ class DrmCommandStreamEnhancedTemplate : public ::testing::Test { executionEnvironment->incRefInternal(); executionEnvironment->initGmm(); this->dbgState = std::make_unique(); - //make sure this is disabled, we don't want to test this now + // make sure this is disabled, we don't want to test this now DebugManager.flags.EnableForcePin.set(false); mock = new DrmType(*executionEnvironment->rootDeviceEnvironments[rootDeviceIndex]); @@ -199,7 +199,7 @@ class DrmCommandStreamEnhancedWithFailingExecTemplate : public ::testing::Test { executionEnvironment->incRefInternal(); executionEnvironment->initGmm(); this->dbgState = std::make_unique(); - //make sure this is disabled, we don't want to test this now + // make sure this is disabled, we don't want to test this now DebugManager.flags.EnableForcePin.set(false); mock = new T(*executionEnvironment->rootDeviceEnvironments[rootDeviceIndex]); diff --git a/shared/test/common/os_interface/linux/drm_memory_manager_fixture.cpp b/shared/test/common/os_interface/linux/drm_memory_manager_fixture.cpp index 05c461e736..c7d09176a2 100644 --- a/shared/test/common/os_interface/linux/drm_memory_manager_fixture.cpp +++ b/shared/test/common/os_interface/linux/drm_memory_manager_fixture.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -62,7 +62,7 @@ void DrmMemoryManagerFixture::setUp(DrmMockCustom *mock, bool localMemoryEnabled memoryManager = new (std::nothrow) TestedDrmMemoryManager(localMemoryEnabled, false, false, *executionEnvironment); executionEnvironment->memoryManager.reset(memoryManager); - //assert we have memory manager + // assert we have memory manager ASSERT_NE(nullptr, memoryManager); if (memoryManager->getgemCloseWorker()) { memoryManager->getgemCloseWorker()->close(true); diff --git a/shared/test/unit_test/compiler_interface/compiler_cache_tests.cpp b/shared/test/unit_test/compiler_interface/compiler_cache_tests.cpp index 56aecae933..891f1de3f0 100644 --- a/shared/test/unit_test/compiler_interface/compiler_cache_tests.cpp +++ b/shared/test/unit_test/compiler_interface/compiler_cache_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -36,13 +36,13 @@ TEST(HashGeneration, givenMisalignedBufferWhenPassedToUpdateFunctionThenProperPt char *misalignedPtr = (char *)originalPtr; misalignedPtr++; - //values really used + // values really used misalignedPtr[0] = 1; misalignedPtr[1] = 2; misalignedPtr[2] = 3; misalignedPtr[3] = 4; misalignedPtr[4] = 5; - //values not used should be ommitted + // values not used should be ommitted misalignedPtr[5] = 6; misalignedPtr[6] = 7; @@ -82,9 +82,9 @@ TEST(HashGeneration, givenMisalignedBufferWithSizeOneWhenPassedToUpdateFunctionT char *misalignedPtr = (char *)originalPtr; misalignedPtr++; - //values really used + // values really used misalignedPtr[0] = 1; - //values not used should be ommitted + // values not used should be ommitted misalignedPtr[1] = 2; misalignedPtr[2] = 3; misalignedPtr[3] = 4; diff --git a/shared/test/unit_test/device_binary_format/elf/elf_decoder_tests.cpp b/shared/test/unit_test/device_binary_format/elf/elf_decoder_tests.cpp index f013056c9e..00194a5cdd 100644 --- a/shared/test/unit_test/device_binary_format/elf/elf_decoder_tests.cpp +++ b/shared/test/unit_test/device_binary_format/elf/elf_decoder_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -120,7 +120,7 @@ class TestElf { auto symTabSectionHeader = elfEncoder.getSectionHeader(symTabSectionIndex); symTabSectionHeader->info = 4; // one greater than last LOCAL symbol - symTabSectionHeader->link = elfEncoder.getLastSectionHeaderIndex() + 1; //strtab section added as last + symTabSectionHeader->link = elfEncoder.getLastSectionHeaderIndex() + 1; // strtab section added as last return elfEncoder.encode(); } const int64_t relaAddend = 16; @@ -477,7 +477,7 @@ TEST(ElfDecoder, WhenElfContainsInvalidSymbolSectionHeaderThenDecodingFailsAndEr sectionHeader0.type = SECTION_HEADER_TYPE::SHT_SYMTAB; sectionHeader0.size = sizeof(sectionHeader0); sectionHeader0.offset = header.shOff; - sectionHeader0.entsize = sizeof(ElfSymbolEntry) + 4; //invalid entSize + sectionHeader0.entsize = sizeof(ElfSymbolEntry) + 4; // invalid entSize storage.insert(storage.end(), reinterpret_cast(§ionHeader0), reinterpret_cast(§ionHeader0 + 1)); diff --git a/shared/test/unit_test/direct_submission/windows/wddm_direct_submission_tests.cpp b/shared/test/unit_test/direct_submission/windows/wddm_direct_submission_tests.cpp index 579a7df8e7..b9f8064e2e 100644 --- a/shared/test/unit_test/direct_submission/windows/wddm_direct_submission_tests.cpp +++ b/shared/test/unit_test/direct_submission/windows/wddm_direct_submission_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -188,7 +188,7 @@ HWTEST_F(WddmDirectSubmissionTest, givenWddmWhenAllocateOsResourcesResidencyFail } EXPECT_EQ(0u, wddmMockInterface->createMonitoredFenceCalled); - //expect 2 makeResident calls, due to fail on 1st and then retry (which also fails) + // expect 2 makeResident calls, due to fail on 1st and then retry (which also fails) EXPECT_EQ(2u, wddm->makeResidentResult.called); EXPECT_EQ(expectedAllocationsCnt, wddm->makeResidentResult.handleCount); } diff --git a/shared/test/unit_test/encoders/test_encode_dispatch_kernel_dg2_and_later.cpp b/shared/test/unit_test/encoders/test_encode_dispatch_kernel_dg2_and_later.cpp index 8fd15198ff..caff77c260 100644 --- a/shared/test/unit_test/encoders/test_encode_dispatch_kernel_dg2_and_later.cpp +++ b/shared/test/unit_test/encoders/test_encode_dispatch_kernel_dg2_and_later.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -81,7 +81,7 @@ HWTEST2_F(CommandEncodeStatesTestDg2AndLater, GivenVariousSlmTotalSizesAndSettin {0, PREFERRED_SLM_ALLOCATION_SIZE::PREFERRED_SLM_ALLOCATION_SIZE_0K}, {16 * KB, PREFERRED_SLM_ALLOCATION_SIZE::PREFERRED_SLM_ALLOCATION_SIZE_16K}, {32 * KB, PREFERRED_SLM_ALLOCATION_SIZE::PREFERRED_SLM_ALLOCATION_SIZE_32K}, - //since we can't set 48KB as SLM size for workgroup, we need to ask for 64KB here. + // since we can't set 48KB as SLM size for workgroup, we need to ask for 64KB here. {64 * KB, PREFERRED_SLM_ALLOCATION_SIZE::PREFERRED_SLM_ALLOCATION_SIZE_64K}, }; diff --git a/shared/test/unit_test/encoders/walker_partition_tests_pvc_and_later.cpp b/shared/test/unit_test/encoders/walker_partition_tests_pvc_and_later.cpp index 4290d99268..6cfc4a4732 100644 --- a/shared/test/unit_test/encoders/walker_partition_tests_pvc_and_later.cpp +++ b/shared/test/unit_test/encoders/walker_partition_tests_pvc_and_later.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -61,10 +61,10 @@ HWTEST2_F(WalkerPartitionPvcAndLaterTests, givenProgramBatchBufferStartCommandWh EXPECT_EQ(expectedUsedSize, totalBytesProgrammed); if (gfxCoreFamily == IGFX_XE_HPC_CORE) { - //bits 57-63 are zeroed + // bits 57-63 are zeroed EXPECT_EQ((gpuAddress & 0x1FFFFFFFFFFFFFF), batchBufferStart->getBatchBufferStartAddress()); } else { - //bits 48-63 are zeroed + // bits 48-63 are zeroed EXPECT_EQ((gpuAddress & 0xFFFFFFFFFFFF), batchBufferStart->getBatchBufferStartAddress()); } EXPECT_TRUE(batchBufferStart->getPredicationEnable()); diff --git a/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_1.cpp b/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_1.cpp index 4ea9228e16..51f98131fc 100644 --- a/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_1.cpp +++ b/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -89,7 +89,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenWalkerPartitionWhenConst auto batchBufferStart = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStart); EXPECT_TRUE(batchBufferStart->getPredicationEnable()); - //address routes to WALKER section which is before control section + // address routes to WALKER section which is before control section auto address = batchBufferStart->getBatchBufferStartAddress(); EXPECT_EQ(address, gpuVirtualAddress + expectedCommandUsedSize - walkerSectionCommands); parsedOffset += sizeof(WalkerPartition::BATCH_BUFFER_START); @@ -125,7 +125,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenWalkerPartitionWhenConst parsedOffset += sizeof(WalkerPartition::MI_SEMAPHORE_WAIT); - //final batch buffer start that routes at the end of the batch buffer + // final batch buffer start that routes at the end of the batch buffer auto batchBufferStartFinal = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); EXPECT_NE(nullptr, batchBufferStartFinal); EXPECT_EQ(batchBufferStartFinal->getBatchBufferStartAddress(), gpuVirtualAddress + optionalBatchBufferEndOffset); @@ -1115,7 +1115,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenDebugModesForWalkerParti auto batchBufferStart = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStart); EXPECT_TRUE(batchBufferStart->getPredicationEnable()); - //address routes to WALKER section which is before control section + // address routes to WALKER section which is before control section auto address = batchBufferStart->getBatchBufferStartAddress(); EXPECT_EQ(address, gpuVirtualAddress + expectedCommandUsedSize - walkerSectionCommands); parsedOffset += sizeof(WalkerPartition::BATCH_BUFFER_START); @@ -1144,7 +1144,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenDebugModesForWalkerParti miSemaphoreWait = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); } - //final batch buffer start that routes at the end of the batch buffer + // final batch buffer start that routes at the end of the batch buffer auto batchBufferStartFinal = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); EXPECT_NE(nullptr, batchBufferStartFinal); EXPECT_EQ(batchBufferStartFinal->getBatchBufferStartAddress(), gpuVirtualAddress + optionalBatchBufferEndOffset); @@ -1336,7 +1336,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenStaticPartitionIsPreferr auto batchBufferStart = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStart); EXPECT_TRUE(batchBufferStart->getPredicationEnable()); - //address routes to WALKER section which is before control section + // address routes to WALKER section which is before control section auto address = batchBufferStart->getBatchBufferStartAddress(); EXPECT_EQ(address, gpuVirtualAddress + expectedCommandUsedSize - walkerSectionCommands); parsedOffset += sizeof(WalkerPartition::BATCH_BUFFER_START); @@ -1374,7 +1374,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenStaticPartitionIsPreferr EXPECT_EQ(wparidCCSOffset, loadRegisterMem->getRegisterAddress()); parsedOffset += sizeof(WalkerPartition::LOAD_REGISTER_MEM); - //final batch buffer start that routes at the end of the batch buffer + // final batch buffer start that routes at the end of the batch buffer auto batchBufferStartFinal = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); EXPECT_NE(nullptr, batchBufferStartFinal); EXPECT_EQ(batchBufferStartFinal->getBatchBufferStartAddress(), gpuVirtualAddress + totalProgrammedSize); diff --git a/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_2.cpp b/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_2.cpp index 8212b02f14..06b18e17d1 100644 --- a/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_2.cpp +++ b/shared/test/unit_test/encoders/walker_partition_tests_xehp_and_later_2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -309,7 +309,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenMiAtomicWhenItIsProgramm EXPECT_FALSE(miAtomic->getWorkloadPartitionIdOffsetEnable()); auto memoryAddress = UnitTestHelper::getAtomicMemoryAddress(*miAtomic); - //bits 48-63 are zeroed + // bits 48-63 are zeroed EXPECT_EQ((gpuAddress & 0xFFFFFFFFFFFF), memoryAddress); } @@ -381,7 +381,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenProgramBatchBufferStartC ASSERT_NE(nullptr, batchBufferStart); EXPECT_EQ(expectedUsedSize, totalBytesProgrammed); - //bits 48-63 are zeroed + // bits 48-63 are zeroed EXPECT_EQ((gpuAddress & 0xFFFFFFFFFFFF), batchBufferStart->getBatchBufferStartAddress()); EXPECT_TRUE(batchBufferStart->getPredicationEnable()); @@ -427,7 +427,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenProgramComputeWalkerWhen EXPECT_EQ(COMPUTE_WALKER::PARTITION_TYPE::PARTITION_TYPE_Z, walkerCommand->getPartitionType()); EXPECT_EQ(6u, walkerCommand->getPartitionSize()); - //if we program with partition Count == 1 then do not trigger partition stuff + // if we program with partition Count == 1 then do not trigger partition stuff walker.setPartitionType(COMPUTE_WALKER::PARTITION_TYPE::PARTITION_TYPE_DISABLED); walkerCommandAddress = cmdBufferAddress; programPartitionedWalker(cmdBufferAddress, totalBytesProgrammed, &walker, 1u, false); @@ -919,7 +919,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenSelfCleanupSectionWhenDe auto batchBufferStart = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStart); EXPECT_TRUE(batchBufferStart->getPredicationEnable()); - //address routes to WALKER section which is before control section + // address routes to WALKER section which is before control section auto address = batchBufferStart->getBatchBufferStartAddress(); EXPECT_EQ(address, gpuVirtualAddress + expectedCommandUsedSize - walkerSectionCommands); parsedOffset += sizeof(WalkerPartition::BATCH_BUFFER_START); @@ -961,7 +961,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenSelfCleanupSectionWhenDe parsedOffset += sizeof(WalkerPartition::MI_SEMAPHORE_WAIT); - //final batch buffer start that routes at the end of the batch buffer + // final batch buffer start that routes at the end of the batch buffer auto batchBufferStartFinal = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStartFinal); EXPECT_EQ(batchBufferStartFinal->getBatchBufferStartAddress(), gpuVirtualAddress + cleanupSectionOffset); @@ -1123,7 +1123,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenSelfCleanupAndAtomicsUse auto batchBufferStart = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStart); EXPECT_TRUE(batchBufferStart->getPredicationEnable()); - //address routes to WALKER section which is before control section + // address routes to WALKER section which is before control section auto address = batchBufferStart->getBatchBufferStartAddress(); EXPECT_EQ(address, gpuVirtualAddress + expectedCommandUsedSize - walkerSectionCommands); parsedOffset += sizeof(WalkerPartition::BATCH_BUFFER_START); @@ -1166,7 +1166,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenSelfCleanupAndAtomicsUse parsedOffset += sizeof(WalkerPartition::MI_SEMAPHORE_WAIT); - //final batch buffer start that routes at the end of the batch buffer + // final batch buffer start that routes at the end of the batch buffer auto batchBufferStartFinal = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStartFinal); EXPECT_EQ(batchBufferStartFinal->getBatchBufferStartAddress(), gpuVirtualAddress + cleanupSectionOffset); @@ -1329,7 +1329,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenDynamicPartitioningWhenP auto batchBufferStart = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStart); EXPECT_TRUE(batchBufferStart->getPredicationEnable()); - //address routes to WALKER section which is before control section + // address routes to WALKER section which is before control section auto address = batchBufferStart->getBatchBufferStartAddress(); EXPECT_EQ(address, gpuVirtualAddress + expectedCommandUsedSize - walkerSectionCommands); parsedOffset += sizeof(WalkerPartition::BATCH_BUFFER_START); @@ -1340,7 +1340,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, WalkerPartitionTests, givenDynamicPartitioningWhenP EXPECT_EQ(miSetPredicate->getPredicateEnable(), MI_SET_PREDICATE::PREDICATE_ENABLE::PREDICATE_ENABLE_PREDICATE_DISABLE); parsedOffset += sizeof(WalkerPartition::MI_SET_PREDICATE); - //final batch buffer start that routes at the end of the batch buffer + // final batch buffer start that routes at the end of the batch buffer auto batchBufferStartFinal = genCmdCast *>(ptrOffset(cmdBuffer, parsedOffset)); ASSERT_NE(nullptr, batchBufferStartFinal); EXPECT_EQ(batchBufferStartFinal->getBatchBufferStartAddress(), gpuVirtualAddress + cleanupSectionOffset); diff --git a/shared/test/unit_test/helpers/flattened_id_tests.cpp b/shared/test/unit_test/helpers/flattened_id_tests.cpp index b6f352d43e..94ce8f19b7 100644 --- a/shared/test/unit_test/helpers/flattened_id_tests.cpp +++ b/shared/test/unit_test/helpers/flattened_id_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -251,7 +251,7 @@ INSTANTIATE_TEST_CASE_P(AllCombinations, FlattenedIDFixture, ::testing::Combine( // NOTE: You'll need a unique test prefix INSTANTIATE_TEST_CASE_P(SingleTest, FlattenedIDFixture, ::testing::Combine( - ::testing::Values(32), //SIMD - ::testing::Values(5), //LWSX - ::testing::Values(6), //LWSY - ::testing::Values(7))); //LWSZ + ::testing::Values(32), // SIMD + ::testing::Values(5), // LWSX + ::testing::Values(6), // LWSY + ::testing::Values(7))); // LWSZ diff --git a/shared/test/unit_test/helpers/flush_stamp_tests.cpp b/shared/test/unit_test/helpers/flush_stamp_tests.cpp index 11f070ade4..5e00e5a3ae 100644 --- a/shared/test/unit_test/helpers/flush_stamp_tests.cpp +++ b/shared/test/unit_test/helpers/flush_stamp_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ TEST(FlushStampTest, WhenSettingStampValueThenItIsSet) { TEST(FlushStampTest, WhenReplacingStampObjectThenRefCountIsUpdated) { FlushStampTracker flushStampTracker(true); - EXPECT_EQ(1, flushStampTracker.getStampReference()->getRefInternalCount()); //obj to release + EXPECT_EQ(1, flushStampTracker.getStampReference()->getRefInternalCount()); // obj to release auto stampObj = new FlushStampTrackingObj(); EXPECT_EQ(0, stampObj->getRefInternalCount()); // no owner diff --git a/shared/test/unit_test/helpers/local_id_tests.cpp b/shared/test/unit_test/helpers/local_id_tests.cpp index 85ce43478b..a318ba6d15 100644 --- a/shared/test/unit_test/helpers/local_id_tests.cpp +++ b/shared/test/unit_test/helpers/local_id_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -488,8 +488,8 @@ INSTANTIATE_TEST_CASE_P(LayoutForImagesTests, LocalIdsLayoutForImagesTest, ::tes // NOTE: You'll need a unique test prefix INSTANTIATE_TEST_CASE_P(SingleTest, LocalIDFixture, ::testing::Combine( - ::testing::Values(32), //SIMD - ::testing::Values(32), //GRF - ::testing::Values(5), //LWSX - ::testing::Values(6), //LWSY - ::testing::Values(7))); //LWSZ + ::testing::Values(32), // SIMD + ::testing::Values(32), // GRF + ::testing::Values(5), // LWSX + ::testing::Values(6), // LWSY + ::testing::Values(7))); // LWSZ diff --git a/shared/test/unit_test/helpers/string_tests.cpp b/shared/test/unit_test/helpers/string_tests.cpp index ec66c3725a..6c3169f22d 100644 --- a/shared/test/unit_test/helpers/string_tests.cpp +++ b/shared/test/unit_test/helpers/string_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -16,9 +16,9 @@ TEST(StringHelpers, GivenParamsWhenUsingStrncpyThenReturnIsCorrect) { char dst[1024] = ""; char src[1024] = "HelloWorld"; - //preconditions + // preconditions ASSERT_EQ(sizeof(dst), sizeof(src)); - //String must be smaller than array capacity + // String must be smaller than array capacity ASSERT_LT(strlen(src), sizeof(src)); auto ret = strncpy_s(nullptr, 1024, src, 1024); @@ -147,9 +147,9 @@ TEST(StringHelpers, GivenParamsWhenUsingMemcpyThenReturnIsCorrect) { char dst[1024] = ""; char src[1024] = "HelloWorld"; - //preconditions + // preconditions ASSERT_EQ(sizeof(dst), sizeof(src)); - //String must be smaller than array capacity + // String must be smaller than array capacity ASSERT_LT(strlen(src), sizeof(src)); auto ret = memcpy_s(nullptr, sizeof(dst), src, sizeof(src)); diff --git a/shared/test/unit_test/main.cpp b/shared/test/unit_test/main.cpp index 6a824140e7..45999d199a 100644 --- a/shared/test/unit_test/main.cpp +++ b/shared/test/unit_test/main.cpp @@ -90,16 +90,16 @@ void applyWorkarounds() { ss >> val; } - //intialize rand + // intialize rand srand(static_cast(time(nullptr))); - //Create at least on thread to prevent false memory leaks in tests using threads + // Create at least on thread to prevent false memory leaks in tests using threads std::thread t([&]() { }); tempThreadID = t.get_id(); t.join(); - //Create FileLogger to prevent false memory leaks + // Create FileLogger to prevent false memory leaks { NEO::fileLoggerInstance(); } @@ -184,7 +184,7 @@ int main(int argc, char **argv) { dumpTestStats = true; ++i; dumpTestStatsFileName = std::string(argv[i]); - } else if (!strcmp("--disable_pagefaulting_tests", argv[i])) { //disable tests which raise page fault signal during execution + } else if (!strcmp("--disable_pagefaulting_tests", argv[i])) { // disable tests which raise page fault signal during execution NEO::PagaFaultManagerTestConfig::disabled = true; } else if (!strcmp("--tbx", argv[i])) { if (testMode == TestMode::AubTests) { diff --git a/shared/test/unit_test/memory_manager/deferrable_allocation_deletion_tests.cpp b/shared/test/unit_test/memory_manager/deferrable_allocation_deletion_tests.cpp index 45d09e268a..2561248d27 100644 --- a/shared/test/unit_test/memory_manager/deferrable_allocation_deletion_tests.cpp +++ b/shared/test/unit_test/memory_manager/deferrable_allocation_deletion_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -166,7 +166,7 @@ TEST_F(DeferrableAllocationDeletionTest, givenNotUsedAllocationWhenApplyDeletion EXPECT_FALSE(allocation->isUsed()); EXPECT_EQ(0u, memoryManager->freeGraphicsMemoryCalled); while (!asyncDeleter->doWorkInBackground) - std::this_thread::yield(); //wait for start async thread work + std::this_thread::yield(); // wait for start async thread work std::unique_lock lock(asyncDeleter->queueMutex); asyncDeleter->allowExit = true; lock.unlock(); diff --git a/shared/test/unit_test/memory_manager/host_ptr_manager_tests.cpp b/shared/test/unit_test/memory_manager/host_ptr_manager_tests.cpp index 179124512b..061ecb8ec4 100644 --- a/shared/test/unit_test/memory_manager/host_ptr_manager_tests.cpp +++ b/shared/test/unit_test/memory_manager/host_ptr_manager_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -433,7 +433,7 @@ TEST_F(HostPtrManagerTest, GivenHostPtrFilledWith3TripleFragmentsWhenAskedForPop ASSERT_EQ(3u, reqs.requiredFragmentsCount); FragmentStorage fragments[maxFragmentsCount]; - //check all fragments + // check all fragments for (int i = 0; i < maxFragmentsCount; i++) { fragments[i].fragmentCpuPointer = const_cast(reqs.allocationFragments[i].allocationPtr); fragments[i].fragmentSize = reqs.allocationFragments[i].allocationSize; @@ -701,7 +701,7 @@ TEST_F(HostPtrManagerTest, GivenHostPtrManagerFilledWithBigFragmentWhenAskedForF EXPECT_EQ(OverlapStatus::FRAGMENT_NOT_OVERLAPING_WITH_ANY_OTHER, overlapStatus); EXPECT_EQ(nullptr, oustideFragment); - //partialOverlap + // partialOverlap auto ptrPartial = (void *)(((uintptr_t)bigPtr + bigSize) - 100); auto partialBigSize = MemoryConstants::pageSize * 100; diff --git a/shared/test/unit_test/memory_manager/internal_allocation_storage_tests.cpp b/shared/test/unit_test/memory_manager/internal_allocation_storage_tests.cpp index ad15a98d07..20e5de78bc 100644 --- a/shared/test/unit_test/memory_manager/internal_allocation_storage_tests.cpp +++ b/shared/test/unit_test/memory_manager/internal_allocation_storage_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -53,26 +53,26 @@ TEST_F(InternalAllocationStorageTest, whenCleanAllocationListThenRemoveOnlyCompl storage->storeAllocation(std::unique_ptr(allocation2), TEMPORARY_ALLOCATION); storage->storeAllocation(std::unique_ptr(allocation3), TEMPORARY_ALLOCATION); - //head point to alloc 2, tail points to alloc3 + // head point to alloc 2, tail points to alloc3 EXPECT_TRUE(csr->getTemporaryAllocations().peekContains(*allocation)); EXPECT_TRUE(csr->getTemporaryAllocations().peekContains(*allocation2)); EXPECT_TRUE(csr->getTemporaryAllocations().peekContains(*allocation3)); EXPECT_EQ(-1, verifyDListOrder(csr->getTemporaryAllocations().peekHead(), allocation, allocation2, allocation3)); - //now remove element form the middle + // now remove element form the middle storage->cleanAllocationList(6, TEMPORARY_ALLOCATION); EXPECT_TRUE(csr->getTemporaryAllocations().peekContains(*allocation)); EXPECT_FALSE(csr->getTemporaryAllocations().peekContains(*allocation2)); EXPECT_TRUE(csr->getTemporaryAllocations().peekContains(*allocation3)); EXPECT_EQ(-1, verifyDListOrder(csr->getTemporaryAllocations().peekHead(), allocation, allocation3)); - //now remove head + // now remove head storage->cleanAllocationList(11, TEMPORARY_ALLOCATION); EXPECT_FALSE(csr->getTemporaryAllocations().peekContains(*allocation)); EXPECT_FALSE(csr->getTemporaryAllocations().peekContains(*allocation2)); EXPECT_TRUE(csr->getTemporaryAllocations().peekContains(*allocation3)); - //now remove tail + // now remove tail storage->cleanAllocationList(16, TEMPORARY_ALLOCATION); EXPECT_TRUE(csr->getTemporaryAllocations().peekIsEmpty()); } diff --git a/shared/test/unit_test/memory_manager/multi_graphics_allocation_tests.cpp b/shared/test/unit_test/memory_manager/multi_graphics_allocation_tests.cpp index 7e423683be..060eb9cb10 100644 --- a/shared/test/unit_test/memory_manager/multi_graphics_allocation_tests.cpp +++ b/shared/test/unit_test/memory_manager/multi_graphics_allocation_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -124,11 +124,11 @@ struct MultiGraphicsAllocationTests : ::testing::Test { TEST_F(MultiGraphicsAllocationTests, whenCreatingMultiGraphicsAllocationWithSharedStorageThenMigrationIsNotRequired) { AllocationProperties allocationProperties{0u, - true, //allocateMemory + true, // allocateMemory MemoryConstants::pageSize, AllocationType::BUFFER_HOST_MEMORY, - false, //multiOsContextCapable - false, //isMultiStorageAllocationParam + false, // multiOsContextCapable + false, // isMultiStorageAllocationParam systemMemoryBitfield}; auto ptr = memoryManager->createMultiGraphicsAllocationInSystemMemoryPool(rootDeviceIndices, allocationProperties, multiGraphicsAllocation); @@ -147,11 +147,11 @@ TEST_F(MultiGraphicsAllocationTests, whenCreatingMultiGraphicsAllocationWithExis uint8_t hostPtr[MemoryConstants::pageSize]{}; AllocationProperties allocationProperties{0u, - false, //allocateMemory + false, // allocateMemory MemoryConstants::pageSize, AllocationType::BUFFER_HOST_MEMORY, - false, //multiOsContextCapable - false, //isMultiStorageAllocationParam + false, // multiOsContextCapable + false, // isMultiStorageAllocationParam systemMemoryBitfield}; multiGraphicsAllocation.addAllocation(memoryManager->allocateGraphicsMemoryWithProperties(allocationProperties, hostPtr)); @@ -168,11 +168,11 @@ TEST_F(MultiGraphicsAllocationTests, whenCreatingMultiGraphicsAllocationWithExis TEST_F(MultiGraphicsAllocationTests, whenCreatingMultiGraphicsAllocationWithSeparatedStorageThenMigrationIsRequired) { AllocationProperties allocationProperties{0u, - true, //allocateMemory + true, // allocateMemory MemoryConstants::pageSize, AllocationType::BUFFER_HOST_MEMORY, - false, //multiOsContextCapable - false, //isMultiStorageAllocationParam + false, // multiOsContextCapable + false, // isMultiStorageAllocationParam systemMemoryBitfield}; multiGraphicsAllocation.addAllocation(memoryManager->allocateGraphicsMemoryWithProperties(allocationProperties)); @@ -188,11 +188,11 @@ TEST_F(MultiGraphicsAllocationTests, whenCreatingMultiGraphicsAllocationWithSepa TEST_F(MultiGraphicsAllocationTests, givenMultiGraphicsAllocationThatRequiresMigrationWhenCopyOrMoveMultiGraphicsAllocationThenTheCopyStillRequiresMigration) { AllocationProperties allocationProperties{0u, - true, //allocateMemory + true, // allocateMemory MemoryConstants::pageSize, AllocationType::BUFFER_HOST_MEMORY, - false, //multiOsContextCapable - false, //isMultiStorageAllocationParam + false, // multiOsContextCapable + false, // isMultiStorageAllocationParam systemMemoryBitfield}; multiGraphicsAllocation.addAllocation(memoryManager->allocateGraphicsMemoryWithProperties(allocationProperties)); @@ -220,11 +220,11 @@ struct MigrationSyncDataTests : public MultiGraphicsAllocationTests { void SetUp() override { MultiGraphicsAllocationTests::SetUp(); AllocationProperties allocationProperties{0u, - true, //allocateMemory + true, // allocateMemory MemoryConstants::pageSize, AllocationType::BUFFER_HOST_MEMORY, - false, //multiOsContextCapable - false, //isMultiStorageAllocationParam + false, // multiOsContextCapable + false, // isMultiStorageAllocationParam systemMemoryBitfield}; multiGraphicsAllocation.addAllocation(memoryManager->allocateGraphicsMemoryWithProperties(allocationProperties)); diff --git a/shared/test/unit_test/os_interface/linux/drm_buffer_object_tests.cpp b/shared/test/unit_test/os_interface/linux/drm_buffer_object_tests.cpp index b59ae691c5..ce5b203e40 100644 --- a/shared/test/unit_test/os_interface/linux/drm_buffer_object_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/drm_buffer_object_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -55,14 +55,14 @@ TEST_F(DrmBufferObjectTest, GivenDetectedGpuHangDuringEvictUnusedAllocationsWhen } TEST_F(DrmBufferObjectTest, WhenSettingTilingThenCallSucceeds) { - mock->ioctl_expected.total = 1; //set_tiling + mock->ioctl_expected.total = 1; // set_tiling auto tilingY = mock->getIoctlHelper()->getDrmParamValue(DrmParam::TilingY); auto ret = bo->setTiling(tilingY, 0); EXPECT_TRUE(ret); } TEST_F(DrmBufferObjectTest, WhenSettingSameTilingThenCallSucceeds) { - mock->ioctl_expected.total = 0; //set_tiling + mock->ioctl_expected.total = 0; // set_tiling auto tilingY = mock->getIoctlHelper()->getDrmParamValue(DrmParam::TilingY); bo->tilingMode = tilingY; auto ret = bo->setTiling(tilingY, 0); @@ -70,7 +70,7 @@ TEST_F(DrmBufferObjectTest, WhenSettingSameTilingThenCallSucceeds) { } TEST_F(DrmBufferObjectTest, GivenInvalidTilingWhenSettingTilingThenCallFails) { - mock->ioctl_expected.total = 1; //set_tiling + mock->ioctl_expected.total = 1; // set_tiling auto tilingY = mock->getIoctlHelper()->getDrmParamValue(DrmParam::TilingY); mock->ioctl_res = -1; auto ret = bo->setTiling(tilingY, 0); @@ -91,7 +91,7 @@ TEST_F(DrmBufferObjectTest, givenAddressThatWhenSizeIsAddedCrosses32BitBoundaryW bo->setAddress(((uint64_t)1u << 32) - 0x1000u); bo->setSize(0x1000); bo->fillExecObject(execObject, osContext.get(), 0, 1); - //base address + size > size of 32bit address space + // base address + size > size of 32bit address space EXPECT_TRUE(execObject.has48BAddressSupportFlag()); } @@ -102,7 +102,7 @@ TEST_F(DrmBufferObjectTest, givenAddressThatWhenSizeIsAddedWithin32BitBoundaryWh bo->setAddress(((uint64_t)1u << 32) - 0x1000u); bo->setSize(0xFFF); bo->fillExecObject(execObject, osContext.get(), 0, 1); - //base address + size < size of 32bit address space + // base address + size < size of 32bit address space EXPECT_TRUE(execObject.has48BAddressSupportFlag()); } diff --git a/shared/test/unit_test/os_interface/linux/drm_command_stream_tests.cpp b/shared/test/unit_test/os_interface/linux/drm_command_stream_tests.cpp index 4c2c6da56e..eb14f6bdf3 100644 --- a/shared/test/unit_test/os_interface/linux/drm_command_stream_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/drm_command_stream_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -16,7 +16,7 @@ namespace NEO { extern ApiSpecificConfig::ApiType apiTypeForUlts; -} //namespace NEO +} // namespace NEO using namespace NEO; HWTEST_TEMPLATED_F(DrmCommandStreamTest, givenL0ApiConfigWhenCreatingDrmCsrThenEnableImmediateDispatch) { diff --git a/shared/test/unit_test/os_interface/linux/drm_command_stream_xehp_and_later_tests.cpp b/shared/test/unit_test/os_interface/linux/drm_command_stream_xehp_and_later_tests.cpp index 627e88a5ec..290902f6fe 100644 --- a/shared/test/unit_test/os_interface/linux/drm_command_stream_xehp_and_later_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/drm_command_stream_xehp_and_later_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -163,7 +163,7 @@ HWCMDTEST_F(IGFX_XE_HP_CORE, DrmCommandStreamMultiTileMemExecTest, GivenDrmSuppo volatile TagAddressType *completionAddress = defaultEngine.commandStreamReceiver->getTagAddress(); completionAddress += (TagAllocationLayout::completionFenceOffset / sizeof(TagAddressType)); - *completionAddress = 2; //1st context is ready + *completionAddress = 2; // 1st context is ready completionAddress += (postSyncOffset / sizeof(TagAddressType)); *completionAddress = 1; diff --git a/shared/test/unit_test/os_interface/linux/drm_memory_manager_localmem_upstream_tests.cpp b/shared/test/unit_test/os_interface/linux/drm_memory_manager_localmem_upstream_tests.cpp index beb829a0e8..d59924ac75 100644 --- a/shared/test/unit_test/os_interface/linux/drm_memory_manager_localmem_upstream_tests.cpp +++ b/shared/test/unit_test/os_interface/linux/drm_memory_manager_localmem_upstream_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -653,7 +653,7 @@ TEST_F(DrmMemoryManagerLocalMemoryWithCustomMockTest, givenDrmMemoryManagerWithL using DrmMemoryManagerFailInjectionTest = Test; HWTEST2_F(DrmMemoryManagerFailInjectionTest, givenEnabledLocalMemoryWhenNewFailsThenAllocateInDevicePoolReturnsStatusErrorAndNullallocation, NonDefaultIoctlsSupported) { - mock->ioctl_expected.total = -1; //don't care + mock->ioctl_expected.total = -1; // don't care class MockGfxPartition : public GfxPartition { public: MockGfxPartition() : GfxPartition(reservedCpuAddressRange) { diff --git a/shared/test/unit_test/os_interface/linux/drm_mock_impl.h b/shared/test/unit_test/os_interface/linux/drm_mock_impl.h index 4952d7e913..06cb7675ab 100644 --- a/shared/test/unit_test/os_interface/linux/drm_mock_impl.h +++ b/shared/test/unit_test/os_interface/linux/drm_mock_impl.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -27,13 +27,13 @@ class DrmTipMock : public DrmMock { uint32_t i915QuerySuccessCount = std::numeric_limits::max(); uint32_t queryMemoryRegionInfoSuccessCount = std::numeric_limits::max(); - //DRM_IOCTL_I915_GEM_CREATE_EXT + // DRM_IOCTL_I915_GEM_CREATE_EXT drm_i915_gem_create_ext createExt{}; MemoryClassInstance memRegions{}; uint32_t numRegions = 0; int gemCreateExtRetVal = 0; - //DRM_IOCTL_I915_GEM_MMAP_OFFSET + // DRM_IOCTL_I915_GEM_MMAP_OFFSET __u64 mmapOffsetFlagsReceived = 0; __u64 offset = 0; int mmapOffsetRetVal = 0; diff --git a/shared/test/unit_test/os_interface/windows/wddm_command_stream_l0_tests.cpp b/shared/test/unit_test/os_interface/windows/wddm_command_stream_l0_tests.cpp index 7ac77cb6c5..2f5239a831 100644 --- a/shared/test/unit_test/os_interface/windows/wddm_command_stream_l0_tests.cpp +++ b/shared/test/unit_test/os_interface/windows/wddm_command_stream_l0_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -39,7 +39,7 @@ namespace NEO { extern ApiSpecificConfig::ApiType apiTypeForUlts; -} //namespace NEO +} // namespace NEO using namespace NEO; template diff --git a/shared/test/unit_test/utilities/debug_file_reader_tests.inl b/shared/test/unit_test/utilities/debug_file_reader_tests.inl index bee6cbbf43..e67474e0bc 100644 --- a/shared/test/unit_test/utilities/debug_file_reader_tests.inl +++ b/shared/test/unit_test/utilities/debug_file_reader_tests.inl @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -134,7 +134,7 @@ TEST(SettingsFileReader, givenHexNumbersSemiColonSeparatedListInInputStreamWhenP auto reader = std::make_unique(); ASSERT_NE(nullptr, reader); - //No settings should be parsed initially + // No settings should be parsed initially EXPECT_EQ(0u, reader->getStringSettingsCount()); std::stringstream inputLineWithSemiColonList("KeyName = 0x1234;0x5555"); diff --git a/shared/test/unit_test/utilities/heap_allocator_tests.cpp b/shared/test/unit_test/utilities/heap_allocator_tests.cpp index bdd28df867..89b4e9a1a0 100644 --- a/shared/test/unit_test/utilities/heap_allocator_tests.cpp +++ b/shared/test/unit_test/utilities/heap_allocator_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -771,7 +771,7 @@ TEST(HeapAllocatorTest, WhenMemoryIsAllocatedThenAllocationsDoNotOverlap) { } } - //at this point we should be able to allocate full size + // at this point we should be able to allocate full size size_t totalSize = (size_t)(allocatorSize - reqAlignment); auto finalPtr = heapAllocator->allocate(totalSize); EXPECT_NE(0llu, finalPtr); diff --git a/shared/test/unit_test/utilities/perf_profiler_tests.cpp b/shared/test/unit_test/utilities/perf_profiler_tests.cpp index ff4ec44833..b8b540e126 100644 --- a/shared/test/unit_test/utilities/perf_profiler_tests.cpp +++ b/shared/test/unit_test/utilities/perf_profiler_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation * * SPDX-License-Identifier: MIT * @@ -68,7 +68,7 @@ TEST(PerfProfiler, WhenDestroyingAllThenAllObjectsAreDestroyed) { EXPECT_EQ(0, PerfProfiler::getCurrentCounter()); PerfProfilerMock::addNullObjects(); // skip null objects EXPECT_EQ(1, PerfProfiler::getCurrentCounter()); - PerfProfiler::destroyAll(); //destroy no object although counter is incorrect + PerfProfiler::destroyAll(); // destroy no object although counter is incorrect EXPECT_EQ(0, PerfProfiler::getCurrentCounter()); EXPECT_EQ(nullptr, PerfProfiler::getObject(0)); }