mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-03 14:55:24 +08:00
fix: Overhead in zeDeviceGetGlobalTimestamps
Related-To: NEO-11908 There is overhead when submission method is used for zeDeviceGetGlobalTimestamps. This fixes it. Signed-off-by: Chandio, Bibrak Qamar <bibrak.qamar.chandio@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
4f12ee40e9
commit
8cf4804fcd
@@ -76,24 +76,29 @@ TEST_F(OSTimeWinTest, given36BitGpuTimeStampWhenGpuTimeStampOverflowThenGpuTimeD
|
||||
|
||||
deviceTime->gpuCpuTimeValue.gpuTimeStamp = 200ll;
|
||||
error = osTime->getGpuCpuTime(&gpuCpuTime);
|
||||
EXPECT_EQ(error, TimeQueryStatus::success);
|
||||
EXPECT_EQ(200ull, gpuCpuTime.gpuTimeStamp);
|
||||
|
||||
osTime->maxGpuTimeStamp = 1ull << 36;
|
||||
|
||||
deviceTime->gpuCpuTimeValue.gpuTimeStamp = 10ull; // read below initial value
|
||||
error = osTime->getGpuCpuTime(&gpuCpuTime);
|
||||
EXPECT_EQ(error, TimeQueryStatus::success);
|
||||
EXPECT_EQ(osTime->maxGpuTimeStamp + 10ull, gpuCpuTime.gpuTimeStamp);
|
||||
|
||||
deviceTime->gpuCpuTimeValue.gpuTimeStamp = 30ull; // second read below initial value
|
||||
error = osTime->getGpuCpuTime(&gpuCpuTime);
|
||||
EXPECT_EQ(error, TimeQueryStatus::success);
|
||||
EXPECT_EQ(osTime->maxGpuTimeStamp + 30ull, gpuCpuTime.gpuTimeStamp);
|
||||
|
||||
deviceTime->gpuCpuTimeValue.gpuTimeStamp = 110ull;
|
||||
error = osTime->getGpuCpuTime(&gpuCpuTime);
|
||||
EXPECT_EQ(error, TimeQueryStatus::success);
|
||||
EXPECT_EQ(osTime->maxGpuTimeStamp + 110ull, gpuCpuTime.gpuTimeStamp);
|
||||
|
||||
deviceTime->gpuCpuTimeValue.gpuTimeStamp = 70ull; // second overflow
|
||||
error = osTime->getGpuCpuTime(&gpuCpuTime);
|
||||
EXPECT_EQ(error, TimeQueryStatus::success);
|
||||
EXPECT_EQ(2ull * osTime->maxGpuTimeStamp + 70ull, gpuCpuTime.gpuTimeStamp);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user