performance: Reuse GPU timestamp instead of KMD escape

This can be enabled only if related
debug flag will be set.

Related-To: NEO-10615

Signed-off-by: Szymon Morek <szymon.morek@intel.com>
This commit is contained in:
Szymon Morek
2024-04-30 10:59:04 +00:00
committed by Compute-Runtime-Automation
parent c1004b77bf
commit 83e8ae4a20
17 changed files with 601 additions and 74 deletions

View File

@@ -1048,7 +1048,7 @@ ze_result_t DeviceImp::getProperties(ze_device_properties_t *pDeviceProperties)
ze_result_t DeviceImp::getGlobalTimestamps(uint64_t *hostTimestamp, uint64_t *deviceTimestamp) {
NEO::TimeStampData queueTimeStamp;
bool retVal = this->neoDevice->getOSTime()->getGpuCpuTime(&queueTimeStamp);
bool retVal = this->neoDevice->getOSTime()->getGpuCpuTime(&queueTimeStamp, true);
if (!retVal)
return ZE_RESULT_ERROR_DEVICE_LOST;

View File

@@ -489,7 +489,7 @@ void Event::setReferenceTs(uint64_t currentCpuTimeStamp) {
const auto recalculate =
(currentCpuTimeStamp - referenceTs.cpuTimeinNS) > timestampRefreshIntervalInNanoSec;
if (referenceTs.cpuTimeinNS == 0 || recalculate) {
device->getNEODevice()->getOSTime()->getGpuCpuTime(&referenceTs);
device->getNEODevice()->getOSTime()->getGpuCpuTime(&referenceTs, true);
}
}