2017-12-21 07:45:38 +08:00
|
|
|
/*
|
2020-01-21 18:00:03 +08:00
|
|
|
* Copyright (C) 2017-2020 Intel Corporation
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
2018-09-19 03:29:07 +08:00
|
|
|
* SPDX-License-Identifier: MIT
|
2017-12-21 07:45:38 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2020-02-23 05:50:57 +08:00
|
|
|
#include "opencl/source/event/event.h"
|
2019-02-27 18:39:32 +08:00
|
|
|
|
2020-02-24 05:44:01 +08:00
|
|
|
#include "shared/source/command_stream/command_stream_receiver.h"
|
|
|
|
#include "shared/source/device/device.h"
|
|
|
|
#include "shared/source/helpers/aligned_memory.h"
|
|
|
|
#include "shared/source/helpers/get_info.h"
|
|
|
|
#include "shared/source/helpers/timestamp_packet.h"
|
|
|
|
#include "shared/source/memory_manager/internal_allocation_storage.h"
|
|
|
|
#include "shared/source/utilities/range.h"
|
|
|
|
#include "shared/source/utilities/stackvec.h"
|
|
|
|
#include "shared/source/utilities/tag_allocator.h"
|
2020-02-24 17:22:30 +08:00
|
|
|
|
2020-02-26 21:21:01 +08:00
|
|
|
#include "opencl/extensions/public/cl_ext_private.h"
|
2020-02-23 05:50:57 +08:00
|
|
|
#include "opencl/source/api/cl_types.h"
|
|
|
|
#include "opencl/source/command_queue/command_queue.h"
|
|
|
|
#include "opencl/source/context/context.h"
|
|
|
|
#include "opencl/source/event/async_events_handler.h"
|
|
|
|
#include "opencl/source/event/event_tracker.h"
|
|
|
|
#include "opencl/source/helpers/get_info_status_mapper.h"
|
|
|
|
#include "opencl/source/helpers/hardware_commands_helper.h"
|
|
|
|
#include "opencl/source/mem_obj/mem_obj.h"
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2019-05-20 17:19:27 +08:00
|
|
|
#define OCLRT_NUM_TIMESTAMP_BITS (32)
|
|
|
|
|
2019-03-26 18:59:46 +08:00
|
|
|
namespace NEO {
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
Event::Event(
|
|
|
|
Context *ctx,
|
|
|
|
CommandQueue *cmdQueue,
|
|
|
|
cl_command_type cmdType,
|
|
|
|
uint32_t taskLevel,
|
|
|
|
uint32_t taskCount)
|
|
|
|
: taskLevel(taskLevel),
|
|
|
|
currentCmdQVirtualEvent(false),
|
|
|
|
cmdToSubmit(nullptr),
|
|
|
|
submittedCmd(nullptr),
|
|
|
|
ctx(ctx),
|
|
|
|
cmdQueue(cmdQueue),
|
|
|
|
cmdType(cmdType),
|
|
|
|
dataCalculated(false),
|
|
|
|
taskCount(taskCount) {
|
2019-03-26 18:59:46 +08:00
|
|
|
if (NEO::DebugManager.flags.EventsTrackerEnable.get()) {
|
2018-05-29 19:30:39 +08:00
|
|
|
EventsTracker::getEventsTracker().notifyCreation(this);
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
parentCount = 0;
|
|
|
|
executionStatus = CL_QUEUED;
|
|
|
|
flushStamp.reset(new FlushStampTracker(true));
|
|
|
|
|
|
|
|
DBG_LOG(EventsDebugEnable, "Event()", this);
|
|
|
|
|
|
|
|
// Event can live longer than command queue that created it,
|
|
|
|
// hence command queue refCount must be incremented
|
|
|
|
// non-null command queue is only passed when Base Event object is created
|
|
|
|
// any other Event types must increment refcount when setting command queue
|
|
|
|
if (cmdQueue != nullptr) {
|
|
|
|
cmdQueue->incRefInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((this->ctx == nullptr) && (cmdQueue != nullptr)) {
|
|
|
|
this->ctx = &cmdQueue->getContext();
|
2019-07-15 20:28:09 +08:00
|
|
|
if (cmdQueue->getGpgpuCommandStreamReceiver().peekTimestampPacketWriteEnabled()) {
|
2018-11-27 20:07:41 +08:00
|
|
|
timestampPacketContainer = std::make_unique<TimestampPacketContainer>();
|
2018-10-03 05:37:30 +08:00
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (this->ctx != nullptr) {
|
|
|
|
this->ctx->incRefInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
queueTimeStamp = {0, 0};
|
|
|
|
submitTimeStamp = {0, 0};
|
|
|
|
startTimeStamp = 0;
|
|
|
|
endTimeStamp = 0;
|
|
|
|
completeTimeStamp = 0;
|
|
|
|
|
|
|
|
profilingEnabled = !isUserEvent() &&
|
|
|
|
(cmdQueue ? cmdQueue->getCommandQueueProperties() & CL_QUEUE_PROFILING_ENABLE : false);
|
|
|
|
profilingCpuPath = ((cmdType == CL_COMMAND_MAP_BUFFER) || (cmdType == CL_COMMAND_MAP_IMAGE)) && profilingEnabled;
|
|
|
|
|
|
|
|
perfCountersEnabled = cmdQueue ? cmdQueue->isPerfCountersEnabled() : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Event::Event(
|
|
|
|
CommandQueue *cmdQueue,
|
|
|
|
cl_command_type cmdType,
|
|
|
|
uint32_t taskLevel,
|
|
|
|
uint32_t taskCount)
|
|
|
|
: Event(nullptr, cmdQueue, cmdType, taskLevel, taskCount) {
|
|
|
|
}
|
|
|
|
|
|
|
|
Event::~Event() {
|
2019-03-26 18:59:46 +08:00
|
|
|
if (NEO::DebugManager.flags.EventsTrackerEnable.get()) {
|
2018-05-29 19:30:39 +08:00
|
|
|
EventsTracker::getEventsTracker().notifyDestruction(this);
|
|
|
|
}
|
|
|
|
|
2018-01-05 18:33:30 +08:00
|
|
|
DBG_LOG(EventsDebugEnable, "~Event()", this);
|
|
|
|
//no commands should be registred
|
|
|
|
DEBUG_BREAK_IF(this->cmdToSubmit.load());
|
2017-12-22 18:44:41 +08:00
|
|
|
|
2018-01-05 18:33:30 +08:00
|
|
|
submitCommand(true);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2018-01-05 18:33:30 +08:00
|
|
|
int32_t lastStatus = executionStatus;
|
2019-07-09 23:26:49 +08:00
|
|
|
if (isStatusCompleted(lastStatus) == false) {
|
2018-01-05 18:33:30 +08:00
|
|
|
transitionExecutionStatus(-1);
|
|
|
|
DEBUG_BREAK_IF(peekHasCallbacks() || peekHasChildEvents());
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2018-01-05 18:33:30 +08:00
|
|
|
// Note from OCL spec:
|
|
|
|
// "All callbacks registered for an event object must be called.
|
|
|
|
// All enqueued callbacks shall be called before the event object is destroyed."
|
|
|
|
if (peekHasCallbacks()) {
|
|
|
|
executeCallbacks(lastStatus);
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2018-01-05 18:33:30 +08:00
|
|
|
{
|
|
|
|
// clean-up submitted command if needed
|
|
|
|
std::unique_ptr<Command> submittedCommand(submittedCmd.exchange(nullptr));
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2018-01-05 18:33:30 +08:00
|
|
|
if (cmdQueue != nullptr) {
|
|
|
|
if (timeStampNode != nullptr) {
|
2018-11-27 20:07:41 +08:00
|
|
|
timeStampNode->returnTag();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-01-05 18:33:30 +08:00
|
|
|
if (perfCounterNode != nullptr) {
|
2018-11-27 20:07:41 +08:00
|
|
|
perfCounterNode->returnTag();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-11-15 21:47:52 +08:00
|
|
|
cmdQueue->decRefInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx != nullptr) {
|
2018-01-05 18:33:30 +08:00
|
|
|
ctx->decRefInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
// in case event did not unblock child events before
|
|
|
|
unblockEventsBlockedByThis(executionStatus);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
cl_int Event::getEventProfilingInfo(cl_profiling_info paramName,
|
|
|
|
size_t paramValueSize,
|
|
|
|
void *paramValue,
|
|
|
|
size_t *paramValueSizeRet) {
|
2018-02-27 22:21:59 +08:00
|
|
|
cl_int retVal;
|
|
|
|
const void *src = nullptr;
|
2020-05-18 22:13:59 +08:00
|
|
|
size_t srcSize = GetInfo::invalidSourceSize;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
// CL_PROFILING_INFO_NOT_AVAILABLE if event refers to the clEnqueueSVMFree command
|
2017-12-22 18:44:41 +08:00
|
|
|
if (isUserEvent() != CL_FALSE || // or is a user event object.
|
|
|
|
!updateStatusAndCheckCompletion() || //if the execution status of the command identified by event is not CL_COMPLETE
|
|
|
|
!profilingEnabled) // the CL_QUEUE_PROFILING_ENABLE flag is not set for the command-queue,
|
2017-12-21 07:45:38 +08:00
|
|
|
{
|
|
|
|
return CL_PROFILING_INFO_NOT_AVAILABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if paramValue is NULL, it is ignored
|
2018-02-27 22:21:59 +08:00
|
|
|
switch (paramName) {
|
|
|
|
case CL_PROFILING_COMMAND_QUEUED:
|
|
|
|
src = &queueTimeStamp.CPUTimeinNS;
|
2018-09-12 17:17:36 +08:00
|
|
|
if (DebugManager.flags.ReturnRawGpuTimestamps.get()) {
|
|
|
|
src = &queueTimeStamp.GPUTimeStamp;
|
|
|
|
}
|
2018-02-27 22:21:59 +08:00
|
|
|
srcSize = sizeof(cl_ulong);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CL_PROFILING_COMMAND_SUBMIT:
|
|
|
|
src = &submitTimeStamp.CPUTimeinNS;
|
2018-09-12 17:17:36 +08:00
|
|
|
if (DebugManager.flags.ReturnRawGpuTimestamps.get()) {
|
|
|
|
src = &submitTimeStamp.GPUTimeStamp;
|
|
|
|
}
|
2018-02-27 22:21:59 +08:00
|
|
|
srcSize = sizeof(cl_ulong);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CL_PROFILING_COMMAND_START:
|
|
|
|
calcProfilingData();
|
|
|
|
src = &startTimeStamp;
|
|
|
|
srcSize = sizeof(cl_ulong);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CL_PROFILING_COMMAND_END:
|
|
|
|
calcProfilingData();
|
|
|
|
src = &endTimeStamp;
|
|
|
|
srcSize = sizeof(cl_ulong);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CL_PROFILING_COMMAND_COMPLETE:
|
|
|
|
calcProfilingData();
|
|
|
|
src = &completeTimeStamp;
|
|
|
|
srcSize = sizeof(cl_ulong);
|
|
|
|
break;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2018-02-27 17:33:10 +08:00
|
|
|
case CL_PROFILING_COMMAND_PERFCOUNTERS_INTEL:
|
|
|
|
if (!perfCountersEnabled) {
|
2017-12-21 07:45:38 +08:00
|
|
|
return CL_INVALID_VALUE;
|
2018-02-27 17:33:10 +08:00
|
|
|
}
|
2019-05-20 17:19:27 +08:00
|
|
|
if (!cmdQueue->getPerfCounters()->getApiReport(paramValueSize,
|
|
|
|
paramValue,
|
|
|
|
paramValueSizeRet,
|
|
|
|
updateStatusAndCheckCompletion())) {
|
2018-02-27 17:33:10 +08:00
|
|
|
return CL_PROFILING_INFO_NOT_AVAILABLE;
|
|
|
|
}
|
|
|
|
return CL_SUCCESS;
|
|
|
|
default:
|
|
|
|
return CL_INVALID_VALUE;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-02-27 22:21:59 +08:00
|
|
|
|
2020-05-18 22:13:59 +08:00
|
|
|
auto getInfoStatus = GetInfo::getInfo(paramValue, paramValueSize, src, srcSize);
|
|
|
|
retVal = changeGetInfoStatusToCLResultType(getInfoStatus);
|
|
|
|
GetInfo::setParamValueReturnSize(paramValueSizeRet, srcSize, getInfoStatus);
|
2018-02-27 22:21:59 +08:00
|
|
|
|
|
|
|
return retVal;
|
2019-03-26 18:59:46 +08:00
|
|
|
} // namespace NEO
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
uint32_t Event::getCompletionStamp() const {
|
|
|
|
return this->taskCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::updateCompletionStamp(uint32_t taskCount, uint32_t tasklevel, FlushStamp flushStamp) {
|
|
|
|
this->taskCount = taskCount;
|
|
|
|
this->taskLevel = tasklevel;
|
|
|
|
this->flushStamp->setStamp(flushStamp);
|
|
|
|
}
|
|
|
|
|
|
|
|
cl_ulong Event::getDelta(cl_ulong startTime,
|
|
|
|
cl_ulong endTime) {
|
2019-11-28 01:00:52 +08:00
|
|
|
cl_ulong Max = maxNBitValue(OCLRT_NUM_TIMESTAMP_BITS);
|
2017-12-21 07:45:38 +08:00
|
|
|
cl_ulong Delta = 0;
|
|
|
|
|
|
|
|
startTime &= Max;
|
|
|
|
endTime &= Max;
|
|
|
|
|
|
|
|
if (startTime > endTime) {
|
|
|
|
Delta = Max - startTime;
|
|
|
|
Delta += endTime;
|
|
|
|
} else {
|
|
|
|
Delta = endTime - startTime;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Delta;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Event::calcProfilingData() {
|
2019-01-21 18:44:56 +08:00
|
|
|
if (!dataCalculated && !profilingCpuPath) {
|
|
|
|
if (timestampPacketContainer && timestampPacketContainer->peekNodes().size() > 0) {
|
|
|
|
const auto timestamps = timestampPacketContainer->peekNodes();
|
2020-05-12 18:14:52 +08:00
|
|
|
auto isMultiOsContextCapable = this->getCommandQueue()->getGpgpuCommandStreamReceiver().isMultiOsContextCapable();
|
|
|
|
|
2020-05-29 19:04:11 +08:00
|
|
|
if (DebugManager.flags.PrintTimestampPacketContents.get()) {
|
|
|
|
for (auto i = 0u; i < timestamps.size(); i++) {
|
|
|
|
for (auto j = 0u; j < timestamps[i]->tagForCpuAccess->packetsUsed; j++) {
|
|
|
|
const auto &packet = timestamps[i]->tagForCpuAccess->packets[j];
|
|
|
|
std::cout << "Timestamp " << i << ", packet " << j << ": "
|
|
|
|
<< "global start: " << packet.globalStart << ", "
|
|
|
|
<< "global end: " << packet.globalEnd << ", "
|
|
|
|
<< "context start: " << packet.contextStart << ", "
|
|
|
|
<< "context end: " << packet.contextEnd << std::endl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-12 18:14:52 +08:00
|
|
|
if (isMultiOsContextCapable) {
|
|
|
|
auto globalStartTS = timestamps[0]->tagForCpuAccess->packets[0].globalStart;
|
|
|
|
uint64_t globalEndTS = timestamps[0]->tagForCpuAccess->packets[0].globalEnd;
|
|
|
|
|
|
|
|
for (const auto ×tamp : timestamps) {
|
|
|
|
for (auto i = 0u; i < timestamp->tagForCpuAccess->packetsUsed; ++i) {
|
|
|
|
const auto &packet = timestamp->tagForCpuAccess->packets[i];
|
|
|
|
if (globalStartTS > packet.globalStart) {
|
|
|
|
globalStartTS = packet.globalStart;
|
|
|
|
}
|
|
|
|
if (globalEndTS < packet.globalEnd) {
|
|
|
|
globalEndTS = packet.globalEnd;
|
|
|
|
}
|
|
|
|
}
|
2019-01-21 18:44:56 +08:00
|
|
|
}
|
2020-05-12 18:14:52 +08:00
|
|
|
calculateProfilingDataInternal(globalStartTS, globalEndTS, &globalEndTS, globalStartTS);
|
|
|
|
} else {
|
|
|
|
auto contextStartTS = timestamps[0]->tagForCpuAccess->packets[0].contextStart;
|
|
|
|
uint64_t contextEndTS = timestamps[0]->tagForCpuAccess->packets[0].contextEnd;
|
|
|
|
auto globalStartTS = timestamps[0]->tagForCpuAccess->packets[0].globalStart;
|
|
|
|
|
|
|
|
for (const auto ×tamp : timestamps) {
|
|
|
|
const auto &packet = timestamp->tagForCpuAccess->packets[0];
|
|
|
|
if (contextStartTS > packet.contextStart) {
|
|
|
|
contextStartTS = packet.contextStart;
|
|
|
|
}
|
|
|
|
if (contextEndTS < packet.contextEnd) {
|
|
|
|
contextEndTS = packet.contextEnd;
|
|
|
|
}
|
|
|
|
if (globalStartTS > packet.globalStart) {
|
|
|
|
globalStartTS = packet.globalStart;
|
|
|
|
}
|
2019-01-21 18:44:56 +08:00
|
|
|
}
|
2020-05-12 18:14:52 +08:00
|
|
|
calculateProfilingDataInternal(contextStartTS, contextEndTS, &contextEndTS, globalStartTS);
|
2019-01-21 18:44:56 +08:00
|
|
|
}
|
|
|
|
} else if (timeStampNode) {
|
2019-01-31 20:32:02 +08:00
|
|
|
calculateProfilingDataInternal(
|
2019-04-11 19:47:38 +08:00
|
|
|
timeStampNode->tagForCpuAccess->ContextStartTS,
|
|
|
|
timeStampNode->tagForCpuAccess->ContextEndTS,
|
|
|
|
&timeStampNode->tagForCpuAccess->ContextCompleteTS,
|
|
|
|
timeStampNode->tagForCpuAccess->GlobalStartTS);
|
2019-01-21 18:44:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return dataCalculated;
|
|
|
|
}
|
|
|
|
|
2019-01-31 20:32:02 +08:00
|
|
|
void Event::calculateProfilingDataInternal(uint64_t contextStartTS, uint64_t contextEndTS, uint64_t *contextCompleteTS, uint64_t globalStartTS) {
|
2019-01-21 18:44:56 +08:00
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
uint64_t gpuDuration = 0;
|
|
|
|
uint64_t cpuDuration = 0;
|
|
|
|
|
|
|
|
uint64_t gpuCompleteDuration = 0;
|
|
|
|
uint64_t cpuCompleteDuration = 0;
|
|
|
|
|
2020-04-21 18:51:00 +08:00
|
|
|
auto &hwHelper = HwHelper::get(this->cmdQueue->getDevice().getHardwareInfo().platform.eRenderCoreFamily);
|
|
|
|
auto frequency = cmdQueue->getDevice().getDeviceInfo().profilingTimerResolution;
|
|
|
|
auto gpuTimeStamp = queueTimeStamp.GPUTimeStamp;
|
|
|
|
|
|
|
|
int64_t c0 = queueTimeStamp.CPUTimeinNS - hwHelper.getGpuTimeStampInNS(gpuTimeStamp, frequency);
|
2020-05-12 18:50:20 +08:00
|
|
|
|
|
|
|
startTimeStamp = static_cast<uint64_t>(globalStartTS * frequency) + c0;
|
|
|
|
if (startTimeStamp < queueTimeStamp.CPUTimeinNS) {
|
|
|
|
c0 += static_cast<uint64_t>((1ULL << (hwHelper.getGlobalTimeStampBits())) * frequency);
|
|
|
|
startTimeStamp = static_cast<uint64_t>(globalStartTS * frequency) + c0;
|
|
|
|
}
|
|
|
|
|
2019-01-21 18:44:56 +08:00
|
|
|
/* calculation based on equation
|
|
|
|
CpuTime = GpuTime * scalar + const( == c0)
|
|
|
|
scalar = DeltaCpu( == dCpu) / DeltaGpu( == dGpu)
|
|
|
|
to determine the value of the const we can use one pair of values
|
|
|
|
const = CpuTimeQueue - GpuTimeQueue * scalar
|
|
|
|
*/
|
|
|
|
|
|
|
|
//If device enqueue has not updated complete timestamp, assign end timestamp
|
|
|
|
gpuDuration = getDelta(contextStartTS, contextEndTS);
|
|
|
|
if (*contextCompleteTS == 0) {
|
|
|
|
*contextCompleteTS = contextEndTS;
|
|
|
|
gpuCompleteDuration = gpuDuration;
|
|
|
|
} else {
|
|
|
|
gpuCompleteDuration = getDelta(contextStartTS, *contextCompleteTS);
|
|
|
|
}
|
|
|
|
cpuDuration = static_cast<uint64_t>(gpuDuration * frequency);
|
|
|
|
cpuCompleteDuration = static_cast<uint64_t>(gpuCompleteDuration * frequency);
|
2018-09-12 17:17:36 +08:00
|
|
|
|
2019-01-21 18:44:56 +08:00
|
|
|
endTimeStamp = startTimeStamp + cpuDuration;
|
|
|
|
completeTimeStamp = startTimeStamp + cpuCompleteDuration;
|
2018-09-12 17:17:36 +08:00
|
|
|
|
2019-01-21 18:44:56 +08:00
|
|
|
if (DebugManager.flags.ReturnRawGpuTimestamps.get()) {
|
|
|
|
startTimeStamp = contextStartTS;
|
|
|
|
endTimeStamp = contextEndTS;
|
|
|
|
completeTimeStamp = *contextCompleteTS;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2019-01-21 18:44:56 +08:00
|
|
|
|
|
|
|
dataCalculated = true;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2018-03-21 17:00:49 +08:00
|
|
|
inline bool Event::wait(bool blocking, bool useQuickKmdSleep) {
|
2020-01-23 18:57:37 +08:00
|
|
|
while (this->taskCount == CompletionStamp::levelNotReady) {
|
2017-12-21 07:45:38 +08:00
|
|
|
if (blocking == false) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-21 17:00:49 +08:00
|
|
|
cmdQueue->waitUntilComplete(taskCount.load(), flushStamp->peekStamp(), useQuickKmdSleep);
|
2017-12-21 07:45:38 +08:00
|
|
|
updateExecutionStatus();
|
|
|
|
|
2020-01-23 18:57:37 +08:00
|
|
|
DEBUG_BREAK_IF(this->taskLevel == CompletionStamp::levelNotReady && this->executionStatus >= 0);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2019-07-15 20:28:09 +08:00
|
|
|
auto *allocationStorage = cmdQueue->getGpgpuCommandStreamReceiver().getInternalAllocationStorage();
|
2018-10-26 19:05:31 +08:00
|
|
|
allocationStorage->cleanAllocationList(this->taskCount, TEMPORARY_ALLOCATION);
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::updateExecutionStatus() {
|
2020-01-23 18:57:37 +08:00
|
|
|
if (taskLevel == CompletionStamp::levelNotReady) {
|
2017-12-21 07:45:38 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t statusSnapshot = executionStatus;
|
2019-07-09 23:26:49 +08:00
|
|
|
if (isStatusCompleted(statusSnapshot)) {
|
2017-12-21 07:45:38 +08:00
|
|
|
executeCallbacks(statusSnapshot);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (peekIsBlocked()) {
|
|
|
|
transitionExecutionStatus(CL_QUEUED);
|
|
|
|
executeCallbacks(CL_QUEUED);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (statusSnapshot == CL_QUEUED) {
|
2019-07-09 00:07:46 +08:00
|
|
|
bool abortBlockedTasks = isStatusCompletedByTermination(statusSnapshot);
|
2017-12-21 07:45:38 +08:00
|
|
|
submitCommand(abortBlockedTasks);
|
|
|
|
transitionExecutionStatus(CL_SUBMITTED);
|
|
|
|
executeCallbacks(CL_SUBMITTED);
|
|
|
|
unblockEventsBlockedByThis(CL_SUBMITTED);
|
|
|
|
// Note : Intentional fallthrough (no return) to check for CL_COMPLETE
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((cmdQueue != nullptr) && (cmdQueue->isCompleted(getCompletionStamp()))) {
|
|
|
|
transitionExecutionStatus(CL_COMPLETE);
|
|
|
|
executeCallbacks(CL_COMPLETE);
|
|
|
|
unblockEventsBlockedByThis(CL_COMPLETE);
|
2019-07-15 20:28:09 +08:00
|
|
|
auto *allocationStorage = cmdQueue->getGpgpuCommandStreamReceiver().getInternalAllocationStorage();
|
2018-10-26 19:05:31 +08:00
|
|
|
allocationStorage->cleanAllocationList(this->taskCount, TEMPORARY_ALLOCATION);
|
2017-12-21 07:45:38 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
transitionExecutionStatus(CL_SUBMITTED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::addChild(Event &childEvent) {
|
|
|
|
childEvent.parentCount++;
|
|
|
|
childEvent.incRefInternal();
|
|
|
|
childEventsToNotify.pushRefFrontOne(childEvent);
|
|
|
|
DBG_LOG(EventsDebugEnable, "addChild: Parent event:", this, "child:", &childEvent);
|
|
|
|
if (DebugManager.flags.TrackParentEvents.get()) {
|
|
|
|
childEvent.parentEvents.push_back(this);
|
|
|
|
}
|
|
|
|
if (executionStatus == CL_COMPLETE) {
|
|
|
|
unblockEventsBlockedByThis(CL_COMPLETE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::unblockEventsBlockedByThis(int32_t transitionStatus) {
|
|
|
|
|
|
|
|
int32_t status = transitionStatus;
|
|
|
|
(void)status;
|
2019-07-09 23:26:49 +08:00
|
|
|
DEBUG_BREAK_IF(!(isStatusCompleted(status) || (peekIsSubmitted(status))));
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2020-01-23 18:57:37 +08:00
|
|
|
uint32_t taskLevelToPropagate = CompletionStamp::levelNotReady;
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2019-07-09 00:07:46 +08:00
|
|
|
if (isStatusCompletedByTermination(transitionStatus) == false) {
|
2017-12-21 07:45:38 +08:00
|
|
|
//if we are event on top of the tree , obtain taskLevel from CSR
|
2020-01-23 18:57:37 +08:00
|
|
|
if (taskLevel == CompletionStamp::levelNotReady) {
|
2019-10-15 21:01:57 +08:00
|
|
|
this->taskLevel = getTaskLevel(); // NOLINT(clang-analyzer-optin.cplusplus.VirtualCall)
|
2017-12-21 07:45:38 +08:00
|
|
|
taskLevelToPropagate = this->taskLevel;
|
|
|
|
} else {
|
|
|
|
taskLevelToPropagate = taskLevel + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
auto childEventRef = childEventsToNotify.detachNodes();
|
|
|
|
while (childEventRef != nullptr) {
|
|
|
|
auto childEvent = childEventRef->ref;
|
|
|
|
|
|
|
|
childEvent->unblockEventBy(*this, taskLevelToPropagate, transitionStatus);
|
|
|
|
|
|
|
|
childEvent->decRefInternal();
|
|
|
|
auto next = childEventRef->next;
|
|
|
|
delete childEventRef;
|
|
|
|
childEventRef = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Event::setStatus(cl_int status) {
|
|
|
|
int32_t prevStatus = executionStatus;
|
|
|
|
|
|
|
|
DBG_LOG(EventsDebugEnable, "setStatus event", this, " new status", status, "previousStatus", prevStatus);
|
|
|
|
|
2019-07-09 23:26:49 +08:00
|
|
|
if (isStatusCompleted(prevStatus)) {
|
2017-12-21 07:45:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status == prevStatus) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-09 00:07:46 +08:00
|
|
|
if (peekIsBlocked() && (isStatusCompletedByTermination(status) == false)) {
|
2017-12-21 07:45:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-09 23:26:49 +08:00
|
|
|
if ((status == CL_SUBMITTED) || (isStatusCompleted(status))) {
|
2019-07-09 00:07:46 +08:00
|
|
|
bool abortBlockedTasks = isStatusCompletedByTermination(status);
|
2017-12-21 07:45:38 +08:00
|
|
|
submitCommand(abortBlockedTasks);
|
|
|
|
}
|
|
|
|
|
|
|
|
this->incRefInternal();
|
|
|
|
transitionExecutionStatus(status);
|
2019-07-09 23:26:49 +08:00
|
|
|
if (isStatusCompleted(status) || (status == CL_SUBMITTED)) {
|
2017-12-21 07:45:38 +08:00
|
|
|
unblockEventsBlockedByThis(status);
|
|
|
|
}
|
|
|
|
executeCallbacks(status);
|
|
|
|
this->decRefInternal();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-29 19:30:39 +08:00
|
|
|
void Event::transitionExecutionStatus(int32_t newExecutionStatus) const {
|
|
|
|
int32_t prevStatus = executionStatus;
|
|
|
|
DBG_LOG(EventsDebugEnable, "transitionExecutionStatus event", this, " new status", newExecutionStatus, "previousStatus", prevStatus);
|
|
|
|
|
|
|
|
while (prevStatus > newExecutionStatus) {
|
|
|
|
executionStatus.compare_exchange_weak(prevStatus, newExecutionStatus);
|
|
|
|
}
|
2019-03-26 18:59:46 +08:00
|
|
|
if (NEO::DebugManager.flags.EventsTrackerEnable.get()) {
|
2018-05-29 19:30:39 +08:00
|
|
|
EventsTracker::getEventsTracker().notifyTransitionedExecutionStatus();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
void Event::submitCommand(bool abortTasks) {
|
|
|
|
std::unique_ptr<Command> cmdToProcess(cmdToSubmit.exchange(nullptr));
|
|
|
|
if (cmdToProcess.get() != nullptr) {
|
2019-02-08 16:24:56 +08:00
|
|
|
std::unique_lock<CommandStreamReceiver::MutexType> lockCSR;
|
|
|
|
if (this->cmdQueue) {
|
2019-07-15 20:28:09 +08:00
|
|
|
lockCSR = this->getCommandQueue()->getGpgpuCommandStreamReceiver().obtainUniqueOwnership();
|
2019-02-08 16:24:56 +08:00
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
if ((this->isProfilingEnabled()) && (this->cmdQueue != nullptr)) {
|
|
|
|
if (timeStampNode) {
|
2019-07-15 20:28:09 +08:00
|
|
|
this->cmdQueue->getGpgpuCommandStreamReceiver().makeResident(*timeStampNode->getBaseGraphicsAllocation());
|
2018-12-21 20:05:21 +08:00
|
|
|
cmdToProcess->timestamp = timeStampNode;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
if (profilingCpuPath) {
|
|
|
|
setSubmitTimeStamp();
|
|
|
|
setStartTimeStamp();
|
|
|
|
} else {
|
|
|
|
this->cmdQueue->getDevice().getOSTime()->getCpuGpuTime(&submitTimeStamp);
|
|
|
|
}
|
|
|
|
if (perfCountersEnabled && perfCounterNode) {
|
2019-07-15 20:28:09 +08:00
|
|
|
this->cmdQueue->getGpgpuCommandStreamReceiver().makeResident(*perfCounterNode->getBaseGraphicsAllocation());
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
auto &complStamp = cmdToProcess->submit(taskLevel, abortTasks);
|
|
|
|
if (profilingCpuPath && this->isProfilingEnabled() && (this->cmdQueue != nullptr)) {
|
|
|
|
setEndTimeStamp();
|
|
|
|
}
|
|
|
|
updateTaskCount(complStamp.taskCount);
|
|
|
|
flushStamp->setStamp(complStamp.flushStamp);
|
2017-12-22 18:44:41 +08:00
|
|
|
submittedCmd.exchange(cmdToProcess.release());
|
2017-12-21 07:45:38 +08:00
|
|
|
} else if (profilingCpuPath && endTimeStamp == 0) {
|
|
|
|
setEndTimeStamp();
|
|
|
|
}
|
2020-01-23 18:57:37 +08:00
|
|
|
if (this->taskCount == CompletionStamp::levelNotReady) {
|
2017-12-21 07:45:38 +08:00
|
|
|
if (!this->isUserEvent() && this->eventWithoutCommand) {
|
|
|
|
if (this->cmdQueue) {
|
2019-07-15 20:28:09 +08:00
|
|
|
auto lockCSR = this->getCommandQueue()->getGpgpuCommandStreamReceiver().obtainUniqueOwnership();
|
|
|
|
updateTaskCount(this->cmdQueue->getGpgpuCommandStreamReceiver().peekTaskCount());
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
}
|
2019-07-09 18:55:26 +08:00
|
|
|
//make sure that task count is synchronized for events with kernels
|
|
|
|
if (!this->eventWithoutCommand && !abortTasks) {
|
|
|
|
this->synchronizeTaskCount();
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cl_int Event::waitForEvents(cl_uint numEvents,
|
|
|
|
const cl_event *eventList) {
|
|
|
|
if (numEvents == 0) {
|
|
|
|
return CL_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
//flush all command queues
|
|
|
|
for (const cl_event *it = eventList, *end = eventList + numEvents; it != end; ++it) {
|
|
|
|
Event *event = castToObjectOrAbort<Event>(*it);
|
|
|
|
if (event->cmdQueue) {
|
2020-01-23 18:57:37 +08:00
|
|
|
if (event->taskLevel != CompletionStamp::levelNotReady) {
|
2017-12-21 07:45:38 +08:00
|
|
|
event->cmdQueue->flush();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
using WorkerListT = StackVec<cl_event, 64>;
|
|
|
|
WorkerListT workerList1(eventList, eventList + numEvents);
|
|
|
|
WorkerListT workerList2;
|
|
|
|
workerList2.reserve(numEvents);
|
|
|
|
|
|
|
|
// pointers to workerLists - for fast swap operations
|
|
|
|
WorkerListT *currentlyPendingEvents = &workerList1;
|
|
|
|
WorkerListT *pendingEventsLeft = &workerList2;
|
|
|
|
|
|
|
|
while (currentlyPendingEvents->size() > 0) {
|
|
|
|
for (auto &e : *currentlyPendingEvents) {
|
|
|
|
Event *event = castToObjectOrAbort<Event>(e);
|
|
|
|
if (event->peekExecutionStatus() < CL_COMPLETE) {
|
|
|
|
return CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST;
|
|
|
|
}
|
|
|
|
|
2018-03-21 17:00:49 +08:00
|
|
|
if (event->wait(false, false) == false) {
|
2017-12-21 07:45:38 +08:00
|
|
|
pendingEventsLeft->push_back(event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::swap(currentlyPendingEvents, pendingEventsLeft);
|
|
|
|
pendingEventsLeft->clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
return CL_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t Event::getTaskLevel() {
|
|
|
|
return taskLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void Event::unblockEventBy(Event &event, uint32_t taskLevel, int32_t transitionStatus) {
|
|
|
|
int32_t numEventsBlockingThis = --parentCount;
|
|
|
|
DEBUG_BREAK_IF(numEventsBlockingThis < 0);
|
|
|
|
|
|
|
|
int32_t blockerStatus = transitionStatus;
|
2019-07-09 23:26:49 +08:00
|
|
|
DEBUG_BREAK_IF(!(isStatusCompleted(blockerStatus) || peekIsSubmitted(blockerStatus)));
|
2017-12-21 07:45:38 +08:00
|
|
|
|
2019-07-09 00:07:46 +08:00
|
|
|
if ((numEventsBlockingThis > 0) && (isStatusCompletedByTermination(blockerStatus) == false)) {
|
2017-12-21 07:45:38 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
DBG_LOG(EventsDebugEnable, "Event", this, "is unblocked by", &event);
|
|
|
|
|
2020-01-23 18:57:37 +08:00
|
|
|
if (this->taskLevel == CompletionStamp::levelNotReady) {
|
2019-07-15 20:28:09 +08:00
|
|
|
this->taskLevel = std::max(cmdQueue->getGpgpuCommandStreamReceiver().peekTaskLevel(), taskLevel);
|
2017-12-20 17:20:17 +08:00
|
|
|
} else {
|
|
|
|
this->taskLevel = std::max(this->taskLevel.load(), taskLevel);
|
|
|
|
}
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
int32_t statusToPropagate = CL_SUBMITTED;
|
2019-07-09 00:07:46 +08:00
|
|
|
if (isStatusCompletedByTermination(blockerStatus)) {
|
2017-12-21 07:45:38 +08:00
|
|
|
statusToPropagate = blockerStatus;
|
|
|
|
}
|
|
|
|
setStatus(statusToPropagate);
|
|
|
|
|
|
|
|
//event may be completed after this operation, transtition the state to not block others.
|
|
|
|
this->updateExecutionStatus();
|
|
|
|
}
|
|
|
|
|
2017-12-22 18:44:41 +08:00
|
|
|
bool Event::updateStatusAndCheckCompletion() {
|
|
|
|
auto currentStatus = updateEventAndReturnCurrentStatus();
|
2019-07-09 23:26:49 +08:00
|
|
|
return isStatusCompleted(currentStatus);
|
2017-12-22 18:44:41 +08:00
|
|
|
}
|
|
|
|
|
2017-12-21 07:45:38 +08:00
|
|
|
bool Event::isReadyForSubmission() {
|
2020-01-23 18:57:37 +08:00
|
|
|
return taskLevel != CompletionStamp::levelNotReady ? true : false;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Event::addCallback(Callback::ClbFuncT fn, cl_int type, void *data) {
|
|
|
|
ECallbackTarget target = translateToCallbackTarget(type);
|
|
|
|
if (target == ECallbackTarget::Invalid) {
|
|
|
|
DEBUG_BREAK_IF(true);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
incRefInternal();
|
|
|
|
|
|
|
|
// Note from spec :
|
|
|
|
// "All callbacks registered for an event object must be called.
|
|
|
|
// All enqueued callbacks shall be called before the event object is destroyed."
|
|
|
|
// That's why each registered calback increments the internal refcount
|
2017-12-22 18:44:41 +08:00
|
|
|
incRefInternal();
|
|
|
|
DBG_LOG(EventsDebugEnable, "event", this, "addCallback", "ECallbackTarget", (uint32_t)type);
|
|
|
|
callbacks[(uint32_t)target].pushFrontOne(*new Callback(this, fn, type, data));
|
2017-12-21 07:45:38 +08:00
|
|
|
|
|
|
|
// Callback added after event reached its "completed" state
|
2017-12-22 18:44:41 +08:00
|
|
|
if (updateStatusAndCheckCompletion()) {
|
2017-12-21 07:45:38 +08:00
|
|
|
int32_t status = executionStatus;
|
|
|
|
DBG_LOG(EventsDebugEnable, "event", this, "addCallback executing callbacks with status", status);
|
|
|
|
executeCallbacks(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (peekHasCallbacks() && !isUserEvent() && DebugManager.flags.EnableAsyncEventsHandler.get()) {
|
2020-03-17 19:37:38 +08:00
|
|
|
ctx->getAsyncEventsHandler().registerEvent(this);
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
decRefInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::executeCallbacks(int32_t executionStatusIn) {
|
|
|
|
int32_t execStatus = executionStatusIn;
|
2019-07-09 00:07:46 +08:00
|
|
|
bool terminated = isStatusCompletedByTermination(execStatus);
|
2017-12-21 07:45:38 +08:00
|
|
|
ECallbackTarget target;
|
|
|
|
if (terminated) {
|
|
|
|
target = ECallbackTarget::Completed;
|
|
|
|
} else {
|
|
|
|
target = translateToCallbackTarget(execStatus);
|
|
|
|
if (target == ECallbackTarget::Invalid) {
|
|
|
|
DEBUG_BREAK_IF(true);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run through all needed callback targets and execute callbacks
|
|
|
|
for (uint32_t i = 0; i <= (uint32_t)target; ++i) {
|
|
|
|
auto cb = callbacks[i].detachNodes();
|
|
|
|
auto curr = cb;
|
|
|
|
while (curr != nullptr) {
|
|
|
|
auto next = curr->next;
|
|
|
|
if (terminated) {
|
|
|
|
curr->overrideCallbackExecutionStatusTarget(execStatus);
|
|
|
|
}
|
|
|
|
DBG_LOG(EventsDebugEnable, "event", this, "executing callback", "ECallbackTarget", (uint32_t)target);
|
|
|
|
curr->execute();
|
|
|
|
decRefInternal();
|
|
|
|
delete curr;
|
|
|
|
curr = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::tryFlushEvent() {
|
|
|
|
//only if event is not completed, completed event has already been flushed
|
2018-09-07 16:00:52 +08:00
|
|
|
if (cmdQueue && updateStatusAndCheckCompletion() == false) {
|
2017-12-21 07:45:38 +08:00
|
|
|
//flush the command queue only if it is not blocked event
|
2020-01-23 18:57:37 +08:00
|
|
|
if (taskLevel != CompletionStamp::levelNotReady) {
|
2019-07-15 20:28:09 +08:00
|
|
|
cmdQueue->getGpgpuCommandStreamReceiver().flushBatchedSubmissions();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::setQueueTimeStamp() {
|
|
|
|
if (this->profilingEnabled && (this->cmdQueue != nullptr)) {
|
|
|
|
this->cmdQueue->getDevice().getOSTime()->getCpuTime(&queueTimeStamp.CPUTimeinNS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::setSubmitTimeStamp() {
|
|
|
|
if (this->profilingEnabled && (this->cmdQueue != nullptr)) {
|
|
|
|
this->cmdQueue->getDevice().getOSTime()->getCpuTime(&submitTimeStamp.CPUTimeinNS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::setStartTimeStamp() {
|
|
|
|
if (this->profilingEnabled && (this->cmdQueue != nullptr)) {
|
|
|
|
this->cmdQueue->getDevice().getOSTime()->getCpuTime(&startTimeStamp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Event::setEndTimeStamp() {
|
|
|
|
if (this->profilingEnabled && (this->cmdQueue != nullptr)) {
|
|
|
|
this->cmdQueue->getDevice().getOSTime()->getCpuTime(&endTimeStamp);
|
|
|
|
completeTimeStamp = endTimeStamp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-15 21:47:52 +08:00
|
|
|
TagNode<HwTimeStamps> *Event::getHwTimeStampNode() {
|
2017-12-21 07:45:38 +08:00
|
|
|
if (!timeStampNode) {
|
2019-07-15 20:28:09 +08:00
|
|
|
timeStampNode = cmdQueue->getGpgpuCommandStreamReceiver().getEventTsAllocator()->getTag();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-11-15 21:47:52 +08:00
|
|
|
return timeStampNode;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 21:47:52 +08:00
|
|
|
TagNode<HwPerfCounter> *Event::getHwPerfCounterNode() {
|
2019-05-20 17:19:27 +08:00
|
|
|
|
|
|
|
if (!perfCounterNode && cmdQueue->getPerfCounters()) {
|
|
|
|
const uint32_t gpuReportSize = cmdQueue->getPerfCounters()->getGpuReportSize();
|
2019-07-15 20:28:09 +08:00
|
|
|
perfCounterNode = cmdQueue->getGpgpuCommandStreamReceiver().getEventPerfCountAllocator(gpuReportSize)->getTag();
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
2018-11-15 21:47:52 +08:00
|
|
|
return perfCounterNode;
|
2017-12-21 07:45:38 +08:00
|
|
|
}
|
|
|
|
|
2018-11-27 20:07:41 +08:00
|
|
|
void Event::addTimestampPacketNodes(const TimestampPacketContainer &inputTimestampPacketContainer) {
|
2018-10-03 05:37:30 +08:00
|
|
|
timestampPacketContainer->assignAndIncrementNodesRefCounts(inputTimestampPacketContainer);
|
2018-08-28 20:11:25 +08:00
|
|
|
}
|
|
|
|
|
2018-10-03 05:37:30 +08:00
|
|
|
TimestampPacketContainer *Event::getTimestampPacketNodes() const { return timestampPacketContainer.get(); }
|
2019-02-21 07:15:42 +08:00
|
|
|
|
|
|
|
bool Event::checkUserEventDependencies(cl_uint numEventsInWaitList, const cl_event *eventWaitList) {
|
|
|
|
bool userEventsDependencies = false;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < numEventsInWaitList; i++) {
|
|
|
|
auto event = castToObjectOrAbort<Event>(eventWaitList[i]);
|
|
|
|
if (!event->isReadyForSubmission()) {
|
|
|
|
userEventsDependencies = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return userEventsDependencies;
|
|
|
|
}
|
|
|
|
|
2019-03-26 18:59:46 +08:00
|
|
|
} // namespace NEO
|