mirror of
https://git.jami.net/savoirfairelinux/jami-daemon.git
synced 2025-08-07 22:02:12 +08:00
trace: add audio tracing and plotting script
Change-Id: Ie5c8fe1d9cc4e5a38a623a1cd4755c00ad4f4904
This commit is contained in:

committed by
Sébastien Blin

parent
cb3d3c3c85
commit
53f60a1aa4
@ -33,13 +33,13 @@ Enabling tracepoints
|
||||
--------------------
|
||||
|
||||
To enable tracepoints in Jami, you should configure the project using the
|
||||
``--enable-tracepoint`` feature. You also need ``lttng-ust >= 2.13.0`` and
|
||||
``--enable-tracepoints`` feature. You also need ``lttng-ust >= 2.13.0`` and
|
||||
``liburcu >= 0.13.0``.
|
||||
|
||||
How to define a tracepoint
|
||||
--------------------------
|
||||
|
||||
To define a new tracepoints, you need to add the definition in src/jami/tracepoint.h
|
||||
To define a new tracepoints, you need to add the definition in src/jami/tracepoint-def.h
|
||||
|
||||
It's recommended to use the ``LTTNG_UST_TRACEPOINT_EVENT`` macro and avoid using
|
||||
the others except if you know what you're doing.
|
||||
@ -56,7 +56,7 @@ NOTE! As the documentation of LTTng says, the concatenation of provider name +
|
||||
tracepoint name must **not exceed 254 characters** or you will get bite.
|
||||
|
||||
For example, here's the definition of a tracepoint for the scheduled executor in
|
||||
src/jami/tracepoint.h::
|
||||
src/jami/tracepoint-def.h::
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "sip/siptransport.h"
|
||||
|
||||
#include "client/videomanager.h"
|
||||
#include "tracepoint.h"
|
||||
#ifdef ENABLE_VIDEO
|
||||
#include "call.h"
|
||||
#include "video/video_input.h"
|
||||
@ -255,6 +256,7 @@ Conference::Conference(const std::shared_ptr<Account>& account)
|
||||
|
||||
parser_.onVoiceActivity(
|
||||
[&](const auto& streamId, bool state) { setVoiceActivity(streamId, state); });
|
||||
jami_tracepoint(conference_begin, id_.c_str());
|
||||
}
|
||||
|
||||
Conference::~Conference()
|
||||
@ -306,6 +308,7 @@ Conference::~Conference()
|
||||
confAVStreams.clear();
|
||||
}
|
||||
#endif // ENABLE_PLUGIN
|
||||
jami_tracepoint(conference_end, id_.c_str());
|
||||
}
|
||||
|
||||
Conference::State
|
||||
@ -655,6 +658,10 @@ Conference::addParticipant(const std::string& participant_id)
|
||||
{
|
||||
JAMI_DBG("Adding call %s to conference %s", participant_id.c_str(), id_.c_str());
|
||||
|
||||
jami_tracepoint(conference_add_participant,
|
||||
id_.c_str(),
|
||||
participant_id.c_str());
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(participantsMtx_);
|
||||
if (!participants_.insert(participant_id).second)
|
||||
|
@ -143,6 +143,92 @@ LTTNG_UST_TRACEPOINT_EVENT(
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
audio_input_read_from_device_end,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
const char*, id
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
audio_layer_put_recorded_end,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
audio_layer_get_to_play_end,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
call_start,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
const char*, id
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
call_end,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
const char*, id
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
conference_begin,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
const char*, id
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
conference_end,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
const char*, id
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
|
||||
)
|
||||
)
|
||||
|
||||
LTTNG_UST_TRACEPOINT_EVENT(
|
||||
jami,
|
||||
conference_add_participant,
|
||||
LTTNG_UST_TP_ARGS(
|
||||
const char*, conference_id,
|
||||
const char*, participant_id
|
||||
),
|
||||
LTTNG_UST_TP_FIELDS(
|
||||
lttng_ust_field_integer(uint64_t, id, strtoull(conference_id, NULL, 16))
|
||||
lttng_ust_field_integer(uint64_t, participant_id, strtoull(participant_id, NULL, 16))
|
||||
)
|
||||
)
|
||||
|
||||
#endif /* TRACEPOINT_DEF_H */
|
||||
|
||||
#include <lttng/tracepoint-event.h>
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "ringbuffer.h"
|
||||
#include "ringbufferpool.h"
|
||||
#include "smartools.h"
|
||||
#include "tracepoint.h"
|
||||
|
||||
#include <future>
|
||||
#include <memory>
|
||||
@ -136,6 +137,8 @@ AudioInput::readFromDevice()
|
||||
if (bufferPool.getInternalAudioFormat() != format_)
|
||||
audioFrame = resampler_->resample(std::move(audioFrame), format_);
|
||||
resizer_->enqueue(std::move(audioFrame));
|
||||
|
||||
jami_tracepoint(audio_input_read_from_device_end, id_.c_str());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "client/ring_signal.h"
|
||||
|
||||
#include "audio-processing/null_audio_processor.h"
|
||||
#include "tracepoint.h"
|
||||
#if HAVE_WEBRTC_AP
|
||||
#include "audio-processing/webrtc.h"
|
||||
#endif
|
||||
@ -132,9 +133,9 @@ static inline bool
|
||||
shouldUseAudioProcessorEchoCancel(bool hasNativeAEC, const std::string& echoCancellerPref)
|
||||
{
|
||||
return
|
||||
// user doesn't care which and there is a system AEC
|
||||
// user doesn't care which and there is not a system AEC
|
||||
(echoCancellerPref == "auto" && !hasNativeAEC)
|
||||
// use specifically wants audioProcessor
|
||||
// user specifically wants audioProcessor
|
||||
or (echoCancellerPref == "audioProcessor");
|
||||
}
|
||||
|
||||
@ -158,6 +159,8 @@ AudioLayer::createAudioProcessor()
|
||||
auto nb_channels = std::max(audioFormat_.nb_channels, audioInputFormat_.nb_channels);
|
||||
auto sample_rate = std::max(audioFormat_.sample_rate, audioInputFormat_.sample_rate);
|
||||
|
||||
// round sample_rate to nearest multiple of 16000 and clamp it.
|
||||
sample_rate = 16000u * ((sample_rate / 16000u) + 1u);
|
||||
sample_rate = std::clamp(sample_rate, 16000u, 48000u);
|
||||
|
||||
AudioFormat formatForProcessor {sample_rate, nb_channels};
|
||||
@ -318,6 +321,8 @@ AudioLayer::getToPlay(AudioFormat format, size_t writableSamples)
|
||||
break;
|
||||
}
|
||||
|
||||
jami_tracepoint(audio_layer_get_to_play_end);
|
||||
|
||||
return playbackBuf;
|
||||
}
|
||||
|
||||
@ -334,6 +339,8 @@ AudioLayer::putRecorded(std::shared_ptr<AudioFrame>&& frame)
|
||||
} else {
|
||||
mainRingBuffer_->put(std::move(frame));
|
||||
}
|
||||
|
||||
jami_tracepoint(audio_layer_put_recorded_end, );
|
||||
}
|
||||
|
||||
} // namespace jami
|
||||
|
@ -64,6 +64,8 @@
|
||||
#include <opendht/thread_pool.h>
|
||||
#include <fmt/ranges.h>
|
||||
|
||||
#include "tracepoint.h"
|
||||
|
||||
namespace jami {
|
||||
|
||||
using sip_utils::CONST_PJ_STR;
|
||||
@ -106,6 +108,8 @@ SIPCall::SIPCall(const std::shared_ptr<SIPAccountBase>& account,
|
||||
, enableIce_(account->isIceForMediaEnabled())
|
||||
, srtpEnabled_(account->isSrtpEnabled())
|
||||
{
|
||||
jami_tracepoint(call_start, callId.c_str());
|
||||
|
||||
if (account->getUPnPActive())
|
||||
upnp_.reset(new upnp::Controller());
|
||||
|
||||
|
1
tools/trace/audio/.gitignore
vendored
Normal file
1
tools/trace/audio/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
out.png
|
152
tools/trace/audio/audio.py
Normal file
152
tools/trace/audio/audio.py
Normal file
@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick-and-dirty script to plot lttng trace data of the jami audio system
|
||||
|
||||
Depends on `python3-bt2` package:
|
||||
https://babeltrace.org/docs/v2.0/python/bt2/
|
||||
|
||||
If using a virtual environment, you need to link the system babeltrace2 python3
|
||||
package to your virtual environment, something like:
|
||||
`ln -s /usr/lib/python3/dist-packages/bt2 "$VIRTUAL_ENV"/lib/python3*/site-packages/`
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
import dataclasses
|
||||
from typing import Dict, List
|
||||
import sys
|
||||
|
||||
import bt2
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Intervals:
|
||||
"""Keep track of timestamps and the interval intervals between them"""
|
||||
|
||||
intervals: List[int] = dataclasses.field(default_factory=list)
|
||||
times: List[int] = dataclasses.field(default_factory=list)
|
||||
last_timestamp: int = 0
|
||||
|
||||
def add_event(self, timestamp: int):
|
||||
"""Add a new timestamp and calculate the interval"""
|
||||
if self.last_timestamp == 0:
|
||||
self.intervals.append(0)
|
||||
else:
|
||||
self.intervals.append(timestamp - self.last_timestamp)
|
||||
|
||||
self.times.append(timestamp)
|
||||
|
||||
self.last_timestamp = timestamp
|
||||
|
||||
def filter(self, earliest: int, latest: int):
|
||||
"""Filter out entries that are not in between `earliest` and `latest` timestamps"""
|
||||
new_intervals = []
|
||||
new_times = []
|
||||
for i, val in enumerate(self.times):
|
||||
if val > earliest and val < latest:
|
||||
new_times.append(val)
|
||||
new_intervals.append(self.intervals[i])
|
||||
|
||||
self.times = new_times
|
||||
self.intervals = new_intervals
|
||||
|
||||
|
||||
def analyze(filename: str):
|
||||
"""Process trace file"""
|
||||
# running_averages: Dict[str, RunningAverage] = {}
|
||||
intervals: Dict[str, Intervals] = {}
|
||||
desired_event_regex = re.compile("jami:(call|audio|conference)")
|
||||
|
||||
for message in bt2.TraceCollectionMessageIterator(filename):
|
||||
# pylint: disable=protected-access
|
||||
if not isinstance(message, bt2._EventMessageConst):
|
||||
continue
|
||||
ns_from_origin = message.default_clock_snapshot.ns_from_origin
|
||||
name = message.event.name
|
||||
|
||||
if not desired_event_regex.match(name):
|
||||
continue
|
||||
|
||||
if "id" in message.event.payload_field and name != "jami:conference_add_participant":
|
||||
name = f"{name}({str(message.event.payload_field['id'])})"
|
||||
|
||||
if not name in intervals:
|
||||
# running_averages[name] = RunningAverage()
|
||||
intervals[name] = Intervals()
|
||||
|
||||
# running_averages[name].add_val(ns_from_origin)
|
||||
intervals[name].add_event(ns_from_origin)
|
||||
|
||||
# print(f"{ns_from_origin / 10e9:.9f}: {name}")
|
||||
|
||||
for key, val in intervals.items():
|
||||
print(
|
||||
f"event: {key}, average: {(sum(val.intervals) / len(val.intervals)) / 1e9:.9f}"
|
||||
)
|
||||
|
||||
earliest_recorded = intervals["jami:audio_layer_put_recorded_end"].times[0]
|
||||
latest_recorded = intervals["jami:audio_layer_put_recorded_end"].times[-1]
|
||||
|
||||
earliest_recorded += int(1e9) # start graph 1 second later
|
||||
|
||||
print(
|
||||
f"earliest: {earliest_recorded / 1e9 :.9f}, latest: {latest_recorded / 1e9:.9f}"
|
||||
)
|
||||
|
||||
for _, interval_val in intervals.items():
|
||||
interval_val.filter(earliest_recorded, latest_recorded)
|
||||
|
||||
# convert from unix timestamp to seconds since start
|
||||
for _, interval_val in intervals.items():
|
||||
for i, _ in enumerate(interval_val.times):
|
||||
interval_val.times[i] -= earliest_recorded
|
||||
|
||||
plot(intervals, filename)
|
||||
|
||||
|
||||
def plot(intervals: Dict[str, Intervals], filename: str):
|
||||
"""Plot audio event data"""
|
||||
add_part = intervals["jami:conference_add_participant"]
|
||||
|
||||
fig, axis = plt.subplots(figsize=(20, 9))
|
||||
|
||||
axis.set_xlabel("seconds since start")
|
||||
axis.set_ylabel("interval between events (seconds)")
|
||||
axis.set_title("jami:audio*")
|
||||
|
||||
# only plot audio events
|
||||
events_to_plot_regex = re.compile("jami:audio")
|
||||
|
||||
# plot each desired interval
|
||||
for interval_key, interval_val in intervals.items():
|
||||
if not events_to_plot_regex.match(interval_key):
|
||||
continue
|
||||
|
||||
axis.plot(
|
||||
[i / 1e9 for i in interval_val.times],
|
||||
[i / 1e9 for i in interval_val.intervals],
|
||||
label=interval_key,
|
||||
)
|
||||
|
||||
# generate vertical line for each time a new participant is added to conference
|
||||
for time in add_part.times:
|
||||
axis.axvline(
|
||||
time / 1e9,
|
||||
color="black",
|
||||
ls="--",
|
||||
)
|
||||
|
||||
axis.legend()
|
||||
|
||||
# save it as out.png
|
||||
filename = "out.png"
|
||||
fig.savefig(filename, dpi=100)
|
||||
print(f"png saved as {filename}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("usage: python3 audio.py <lttng trace directory>", file=sys.stderr)
|
||||
sys.exit(-1)
|
||||
analyze(sys.argv[1])
|
65
tools/trace/audio/lttng-setup.sh
Executable file
65
tools/trace/audio/lttng-setup.sh
Executable file
@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
|
||||
# this script sets up an lttng session and enables triggers for call start and conference end
|
||||
# ideally, this script would be more configurable, right now it's hardcoded and hacky
|
||||
|
||||
if lttng status | grep -q "Recording session"; then
|
||||
echo "lttng session already active, exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SESSION="jami"
|
||||
|
||||
# Prepare a capture session
|
||||
|
||||
lttng create "$SESSION"
|
||||
|
||||
# Enable kernel and user channels
|
||||
|
||||
# lttng enable-channel -k --subbuf-size 4096K kernel
|
||||
lttng enable-channel -u --subbuf-size 1024K user
|
||||
|
||||
# Enable all kernel syscalls tracing
|
||||
# lttng enable-event --kernel --syscall --all --channel=kernel
|
||||
|
||||
# Enable sched_* (scheduler) functions tracing
|
||||
# lttng enable-event --kernel --channel=kernel "$(lttng list --kernel | grep sched_ | awk '{ print $1 }' | xargs | sed -e 's/ /,/g')"
|
||||
|
||||
# Enable net_* functions tracing
|
||||
# lttng enable-event --kernel --channel=kernel "$(lttng list --kernel | grep net_ | awk '{ print $1 }' | xargs | sed -e 's/ /,/g')"
|
||||
|
||||
# Userspace tracing
|
||||
# Add support for tracef() traces
|
||||
# You need to link application with @-llttng-ust@
|
||||
# lttng enable-event --userspace 'lttng_ust_tracef:*' --channel=user
|
||||
|
||||
# Add context for libc (including malloc, etc)
|
||||
export LD_PRELOAD="$LD_PRELOAD":liblttng-ust-libc-wrapper.so
|
||||
lttng enable-event --userspace 'lttng_ust_libc:*' --channel=user
|
||||
lttng add-context --userspace -t vtid -t procname
|
||||
|
||||
# Add context for pthread (including mutexes)
|
||||
export LD_PRELOAD="$LD_PRELOAD":liblttng-ust-pthread-wrapper.so
|
||||
lttng enable-event --userspace 'lttng_ust_pthread:*' --channel=user
|
||||
|
||||
# Add context for function profiling
|
||||
export LD_PRELOAD="$LD_PRELOAD":liblttng-ust-cyg-profile.so
|
||||
lttng enable-event --userspace --all --channel=user
|
||||
lttng add-context --userspace -t vpid -t vtid -t procname
|
||||
|
||||
# loop over lines in our for-loop, since triggers might have spaces in their names
|
||||
OLDIFS="$IFS"
|
||||
IFS='
|
||||
'
|
||||
|
||||
# remove all triggers
|
||||
for trigger in $(lttng list-triggers | sed -nr 's/- name: (.*)$/\1/p'); do
|
||||
echo "removing trigger: $trigger"
|
||||
lttng remove-trigger "$trigger"
|
||||
done
|
||||
|
||||
IFS=$OLDIFS
|
||||
|
||||
# add start and end trigger
|
||||
lttng add-trigger --name "jami call start" --condition=event-rule-matches --type=user --name='jami:call_start' --action=start-session $SESSION
|
||||
lttng add-trigger --name "jami conference end" --condition=event-rule-matches --type=user --name="jami:conference_end" --action=stop-session $SESSION
|
Reference in New Issue
Block a user