Remove some comment

Correcting some sound issue
Using fix FRAME_PER_BUFFER is better than 0
This commit is contained in:
yanmorin
2005-10-27 18:37:26 +00:00
parent 1e4ded3cbd
commit 59659d29c7
9 changed files with 132 additions and 160 deletions

View File

@ -90,7 +90,7 @@ AudioLayer::openDevice (int index)
// we could put paFramesPerBufferUnspecified instead of FRAME_PER_BUFFER to be variable
portaudio::StreamParameters const params(inParams, outParams,
SAMPLING_RATE, paFramesPerBufferUnspecified, paNoFlag /*paPrimeOutputBuffersUsingStreamCallback | paNeverDropInput*/);
SAMPLING_RATE, FRAME_PER_BUFFER /*paFramesPerBufferUnspecified*/, paNoFlag /*paPrimeOutputBuffersUsingStreamCallback | paNeverDropInput*/);
// Create (and open) a new Stream, using the AudioLayer::audioCallback
_stream = new portaudio::MemFunCallbackStream<AudioLayer>(params,
@ -103,7 +103,6 @@ AudioLayer::startStream(void)
{
ost::MutexLock guard(_mutex);
if (_stream && !_stream->isActive()) {
_debug("Thread: start audiolayer stream\n");
_stream->start();
}
}
@ -113,9 +112,10 @@ AudioLayer::stopStream(void)
{
ost::MutexLock guard(_mutex);
if (_stream && !_stream->isStopped()) {
_debug("Thread: stop audiolayer stream\n");
_stream->stop();
_stream->stop();
_mainSndRingBuffer.flush();
_urgentRingBuffer.flush();
_micRingBuffer.flush();
}
}
@ -225,13 +225,11 @@ AudioLayer::audioCallback (const void *inputBuffer, void *outputBuffer,
toGet = (normalAvail < (int)framesPerBuffer * NBCHARFORTWOINT16) ? normalAvail : framesPerBuffer * NBCHARFORTWOINT16;
if (toGet) {
_mainSndRingBuffer.Get(out, toGet, spkrVolume);
} else {
toGet = framesPerBuffer;
_mainSndRingBuffer.PutZero(toGet);
_mainSndRingBuffer.Get(out, toGet, 100);
}
_mainSndRingBuffer.Get(out, toGet, spkrVolume);
} else {
bzero(out, framesPerBuffer * NBCHARFORTWOINT16);
}
}
}
// Additionally handle the mic's audio stream

View File

@ -47,27 +47,16 @@ AudioRtp::~AudioRtp (void) {
int
AudioRtp::createNewSession (SipCall *ca) {
// Start RTP Send/Receive threads
ca->enable_audio = 1;
if (Manager::instance().getConfigInt(SIGNALISATION,SYMMETRIC)) {
_symmetric = true;
} else {
_symmetric = false;
}
// Start RTP Send/Receive threads
ca->enable_audio = 1;
_symmetric = Manager::instance().getConfigInt(SIGNALISATION,SYMMETRIC) ? true : false;
_RTXThread = new AudioRtpRTX (ca, Manager::instance().getAudioDriver(), _symmetric);
_RTXThread = new AudioRtpRTX (ca, Manager::instance().getAudioDriver(),
_symmetric);
// Start PortAudio
//Manager::instance().getAudioDriver()->flushMic();
//Manager::instance().getAudioDriver()->startStream();
//_debug("AudioRtp::createNewSession: starting RTX thread\n");
if (_RTXThread->start() != 0) {
return -1;
}
return 0;
if (_RTXThread->start() != 0) {
return -1;
}
return 0;
}
@ -91,12 +80,13 @@ AudioRtpRTX::AudioRtpRTX (SipCall *sipcall, AudioLayer* driver, bool sym) : _cod
std::string localipConfig = _ca->getLocalIp();
ost::InetHostAddress local_ip(localipConfig.c_str());
_debug("AudioRtpRTX ctor : Local IP:port %s:%d\tsymmetric:%d\n", local_ip.getHostname(), _ca->getLocalAudioPort(), _sym);
if (!_sym) {
_sessionRecv = new ost::RTPSession (local_ip, _ca->getLocalAudioPort());
_sessionSend = new ost::RTPSession (local_ip);
_session = NULL;
} else {
_debug("Symmetric RTP Session on local: %s:%d\n", localipConfig.c_str(), _ca->getLocalAudioPort());
_session = new ost::SymmetricRTPSession (local_ip, _ca->getLocalAudioPort());
_sessionRecv = NULL;
_sessionSend = NULL;
@ -124,42 +114,38 @@ AudioRtpRTX::~AudioRtpRTX () {
void
AudioRtpRTX::initAudioRtpSession (void)
{
ost::InetHostAddress remote_ip(_ca->getRemoteSdpAudioIp());
if (!remote_ip) {
_debug("RTP: Target IP address [%s] is not correct!\n", _ca->getRemoteSdpAudioIp());
return;
}
// Initialization
if (!_sym) {
//_sessionRecv->setSchedulingTimeout (10000);
_sessionRecv->setExpireTimeout(1000000);
_sessionSend->setSchedulingTimeout(10000);
_sessionSend->setExpireTimeout(1000000);
} else {
_session->setSchedulingTimeout(10000);
_session->setExpireTimeout(1000000);
}
ost::InetHostAddress remote_ip(_ca->getRemoteSdpAudioIp());
if (!remote_ip) {
_debug("RTP: Target IP address [%s] is not correct!\n", _ca->getRemoteSdpAudioIp());
return;
}
if (!_sym) {
if (!_sessionSend->addDestination (remote_ip,
(unsigned short) _ca->getRemoteSdpAudioPort())) {
_debug("RTX send: could not connect to port %d\n",
_ca->getRemoteSdpAudioPort());
return;
}
_debug("RTP(Send): Added sessionSend destination %s:%d\n",
remote_ip.getHostname(), (unsigned short) _ca->getRemoteSdpAudioPort());
// Initialization
if (!_sym) {
//_sessionRecv->setSchedulingTimeout (10000);
_sessionRecv->setExpireTimeout(1000000);
_sessionSend->setSchedulingTimeout(10000);
_sessionSend->setExpireTimeout(1000000);
} else {
_session->setSchedulingTimeout(10000);
_session->setExpireTimeout(1000000);
}
if (!_sym) {
if (!_sessionSend->addDestination (remote_ip, (unsigned short) _ca->getRemoteSdpAudioPort())) {
_debug("RTX send: could not connect to port %d\n", _ca->getRemoteSdpAudioPort());
return;
}
_debug("RTP(Send): Added sessionSend destination %s:%d\n", remote_ip.getHostname(), (unsigned short) _ca->getRemoteSdpAudioPort());
//setPayloadFormat(StaticPayloadFormat(sptPCMU));
//_debug("Payload Format: %d\n", _ca->payload);
_sessionRecv->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _ca->payload));
_sessionSend->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _ca->payload));
_sessionRecv->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _ca->payload));
_sessionSend->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _ca->payload));
setCancel(cancelImmediate);
_sessionSend->setMark(true);
setCancel(cancelImmediate);
} else {
@ -173,42 +159,46 @@ AudioRtpRTX::initAudioRtpSession (void)
_session->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _ca->payload));
setCancel(cancelImmediate);
}
Manager::instance().getAudioDriver()->flushMic();
Manager::instance().getAudioDriver()->flushMain();
_debug("== AudioRtpRTX::initAudioRtpSession end == \n");
}
void
// AudioRtpRTX::sendSessionFromMic (unsigned char* data_to_send, int16* data_from_mic, int16* data_from_mic_tmp, int timestamp, int micVolume)
AudioRtpRTX::sendSessionFromMic (unsigned char* data_to_send, int16* data_from_mic, int16* data_from_mic_tmp, int timestamp)
AudioRtpRTX::sendSessionFromMic (unsigned char* data_to_send, int16* data_from_mic_stereo, int16* data_from_mic_mono, int timestamp)
{
int k;
int compSize;
// Control volume for micro
int availFromMic = Manager::instance().getAudioDriver()->micRingBuffer().AvailForGet();
int availBytesFromMic = Manager::instance().getAudioDriver()->micRingBuffer().AvailForGet();
int maxBytesToGet = RTP_FRAMES2SEND * 2 * 2; // * channels * int16/byte
int bytesAvail;
if (availFromMic < (int)RTP_FRAMES2SEND) {
bytesAvail = availFromMic;
// take the lower
if (availBytesFromMic < maxBytesToGet) {
bytesAvail = availBytesFromMic;
} else {
bytesAvail = (int)RTP_FRAMES2SEND;
bytesAvail = maxBytesToGet;
}
// Get bytes from micRingBuffer to data_from_mic
Manager::instance().getAudioDriver()->micRingBuffer().Get(data_from_mic, SAMPLES_SIZE(bytesAvail), 100);
Manager::instance().getAudioDriver()->startStream();
Manager::instance().getAudioDriver()->micRingBuffer().Get(data_from_mic_stereo, bytesAvail, 100);
// control volume and stereo->mono
for (int j = 0; j < RTP_FRAMES2SEND; j++) {
// the j is in int16 RTP_FRAMES2SEND
// data_from_mic_mono = 0 to RTP_FRAME2SEND [in int16]
for (int j = 0, k=0; j < bytesAvail/4; j++) {
k = j<<1;
data_from_mic_tmp[j] = (int16)(0.5f*(data_from_mic[k] + data_from_mic[k+1]));
//micVolume/100);
data_from_mic_mono[j] = (int16)(0.5f*(data_from_mic_stereo[k] + data_from_mic_stereo[k+1]));
}
if ( bytesAvail != maxBytesToGet ) {
// fill end with 0...
bzero(data_from_mic_mono + (bytesAvail/4), (maxBytesToGet-bytesAvail)/2);
}
if ( _ca != NULL ) {
// Encode acquired audio sample
AudioCodec* ac = _ca->getAudioCodec();
if ( ac != NULL ) {
compSize = ac->codecEncode (data_to_send, data_from_mic_tmp, RTP_FRAMES2SEND*2);
// for the mono: range = 0 to RTP_FRAME2SEND * sizeof(int16)
// codecEncode(char *dest, int16* src, size in bytes of the src)
int compSize = ac->codecEncode (data_to_send, data_from_mic_mono, RTP_FRAMES2SEND*2);
// encode divise by two
// Send encoded audio sample over the network
if (!_sym) {
_sessionSend->putData(timestamp, data_to_send, compSize);
@ -220,21 +210,18 @@ AudioRtpRTX::sendSessionFromMic (unsigned char* data_to_send, int16* data_from_m
}
void
AudioRtpRTX::receiveSessionForSpkr (int16* data_for_speakers,
// int16* data_for_speakers_tmp, int spkrVolume, int& countTime)
int16* data_for_speakers_tmp, int& countTime)
AudioRtpRTX::receiveSessionForSpkr (int16* data_for_speakers_stereo, int16* data_for_speakers_recv, int& countTime)
{
int k;
const ost::AppDataUnit* adu = NULL;
const ost::AppDataUnit* adu = NULL;
// Get audio data stream
if (!_sym) {
adu = _sessionRecv->getData(_sessionRecv->getFirstTimestamp());
} else {
adu = _session->getData(_session->getFirstTimestamp());
}
if (adu == NULL) {
Manager::instance().getAudioDriver()->flushMain();
//Manager::instance().getAudioDriver()->flushMain();
return;
}
@ -246,20 +233,25 @@ AudioRtpRTX::receiveSessionForSpkr (int16* data_for_speakers,
int expandedSize = 0;
AudioCodec* ac = _codecBuilder.alloc(payload, "");
if (ac != NULL) {
expandedSize = ac->codecDecode (data_for_speakers, data, size);
}
// codecDecode(int16 *dest, char* src, size in bytes of the src)
// decode multiply by two
// size shall be RTP_FRAME2SEND or lower
expandedSize = ac->codecDecode(data_for_speakers_recv, data, size);
}
ac = NULL;
// control volume for speakers and mono->stereo
for (int j = 0; j < expandedSize; j++) {
// expandedSize is in bytes for data_for_speakers_recv
// data_for_speakers_recv are in int16
for (int j = 0, k=0; j < expandedSize/2; j++) {
k = j<<1; // fast multiply by two
data_for_speakers_tmp[k] = data_for_speakers_tmp[k+1] = data_for_speakers[j];
// * spkrVolume/100;
data_for_speakers_stereo[k] = data_for_speakers_stereo[k+1] = data_for_speakers_recv[j];
}
// If the current call is the call which is answered
// Set decoded data to sound device
Manager::instance().getAudioDriver()->putMain(data_for_speakers_tmp, SAMPLES_SIZE(RTP_FRAMES2SEND));
// expandedSize is in mono/bytes, since we double in stereo, we send two time more
Manager::instance().getAudioDriver()->putMain(data_for_speakers_stereo, expandedSize*2);
//}
// Notify (with a beep) an incoming call when there is already a call
@ -270,80 +262,69 @@ AudioRtpRTX::receiveSessionForSpkr (int16* data_for_speakers,
Manager::instance().notificationIncomingCall();
}
}
Manager::instance().getAudioDriver()->startStream();
delete adu; adu = NULL;
}
void
AudioRtpRTX::run (void) {
// int micVolume;
// int spkrVolume;
unsigned char *data_to_send;
int16 *data_from_mic;
int16 *data_from_mic_tmp;
int timestamp;
int16 *data_for_speakers = NULL;
int16 *data_for_speakers_tmp = NULL;
int countTime = 0;
data_from_mic = new int16[SIZEDATA];
data_from_mic_tmp = new int16[SIZEDATA];
data_to_send = new unsigned char[SIZEDATA];
data_for_speakers = new int16[SIZEDATA];
data_for_speakers_tmp = new int16[SIZEDATA*2];
//mic, we receive from soundcard in stereo, and we send encoded
//encoding before sending
int16 *data_from_mic_stereo = new int16[RTP_FRAMES2SEND*2];
int16 *data_from_mic_mono = new int16[RTP_FRAMES2SEND];
unsigned char *char_to_send = new unsigned char[RTP_FRAMES2SEND]; // two time more for codec
// Init the session
initAudioRtpSession();
timestamp = 0;
//spkr, we receive from rtp in mono and we send in stereo
//decoding after receiving
int16 *data_for_speakers_recv = new int16[RTP_FRAMES2SEND];
int16 *data_for_speakers_stereo = new int16[RTP_FRAMES2SEND*2];
// TODO: get frameSize from user config
int frameSize = 20; // 20ms frames
TimerPort::setTimer(frameSize);
// Init the session
initAudioRtpSession();
// flush stream:
ManagerImpl& manager = Manager::instance();
AudioLayer *audiolayer = manager.getAudioDriver();
audiolayer->urgentRingBuffer().flush();
// start running the packet queue scheduler.
// start running the packet queue scheduler.
//_debug("Thread: start session of AudioRtpRTX\n");
if (!_sym) {
_sessionRecv->startRunning();
_sessionSend->startRunning();
} else {
_session->startRunning();
}
if (!_sym) {
_sessionRecv->startRunning();
_sessionSend->startRunning();
} else {
_session->startRunning();
_debug("Session is now: %d active?\n", _session->isActive());
}
int timestamp = 0; // for mic
int countTime = 0; // for receive
// TODO: get frameSize from user config
int frameSize = 20; // 20ms frames
TimerPort::setTimer(frameSize);
audiolayer->flushMic();
while (!testCancel() && _ca != NULL && _ca->enable_audio != -1) {
//micVolume = manager.getMicVolume();
//spkrVolume = manager.getSpkrVolume();
////////////////////////////
// Send session
////////////////////////////
//sendSessionFromMic(data_to_send, data_from_mic, data_from_mic_tmp, timestamp, micVolume);
sendSessionFromMic(data_to_send, data_from_mic, data_from_mic_tmp, timestamp);
sendSessionFromMic(char_to_send, data_from_mic_stereo, data_from_mic_mono, timestamp);
timestamp += RTP_FRAMES2SEND;
////////////////////////////
// Recv session
////////////////////////////
//receiveSessionForSpkr(data_for_speakers, data_for_speakers_tmp, spkrVolume, countTime);
receiveSessionForSpkr(data_for_speakers, data_for_speakers_tmp, countTime);
receiveSessionForSpkr(data_for_speakers_stereo, data_for_speakers_recv, countTime);
// Let's wait for the next transmit cycle
Thread::sleep(TimerPort::getTimer());
TimerPort::incTimer(frameSize); // 'frameSize' ms
}
delete [] data_for_speakers_tmp; data_for_speakers_tmp = 0;
delete [] data_for_speakers; data_for_speakers = 0;
delete [] data_to_send; data_to_send = 0;
delete [] data_from_mic_tmp; data_from_mic_tmp = 0;
delete [] data_from_mic; data_from_mic = 0;
delete [] data_for_speakers_stereo; data_for_speakers_stereo = 0;
delete [] data_for_speakers_recv; data_for_speakers_recv = 0;
delete [] char_to_send; char_to_send = 0;
delete [] data_from_mic_mono; data_from_mic_mono = 0;
delete [] data_from_mic_stereo; data_from_mic_stereo = 0;
audiolayer->stopStream();
}

View File

@ -29,7 +29,6 @@
#define RTP_FRAMES2SEND 160
#define SIZEDATA SAMPLES_SIZE(RTP_FRAMES2SEND)
class AudioLayer;
class SipCall;

View File

@ -62,14 +62,6 @@ RingBuffer::AvailForPut() const {
return (mBufferSize-4) - Len();
}
void
RingBuffer::PutZero(int toZero)
{
unsigned char p[toZero];
bzero(p, toZero);
Put(p, toZero);
}
// This one puts some data inside the ring buffer.
// Change the volume if it's not 100
int

View File

@ -44,7 +44,6 @@ class RingBuffer {
//
int AvailForPut (void) const;
int Put (void*, int, unsigned short volume = 100);
void PutZero(int);
//
// For the reader only:

View File

@ -49,6 +49,7 @@ public:
* @return the number of int16 sent (nb*2)
*/
int getNext(int16* output, int nb, short volume=100);
void reset() { _pos = 0; }
private:
/**

View File

@ -123,6 +123,9 @@ void
TelephoneTone::setCurrentTone(Tone::TONEID toneId)
{
_currentTone = toneId;
if ( _currentTone != Tone::TONE_NULL ) {
_tone[_currentTone]->reset();
}
}
Tone*

View File

@ -572,16 +572,14 @@ ManagerImpl::playDtmf(char code)
}
AudioLayer *audiolayer = getAudioDriver();
_toneMutex.enterMutex();
audiolayer->urgentRingBuffer().flush();
// Put buffer to urgentRingBuffer
// put the size in bytes...
// so size * CHANNELS * 2 (bytes for the int16)
int nbInt16InChar = sizeof(int16)/sizeof(char);
int toSend = audiolayer->urgentRingBuffer().AvailForPut();
if (toSend > size * CHANNELS * nbInt16InChar ) {
if (toSend > (size * CHANNELS * nbInt16InChar)) {
toSend = size * CHANNELS * nbInt16InChar;
}
audiolayer->urgentRingBuffer().Put(buf_ctrl_vol, toSend);
@ -589,14 +587,13 @@ ManagerImpl::playDtmf(char code)
// We activate the stream if it's not active yet.
if (!audiolayer->isStreamActive()) {
audiolayer->startStream();
audiolayer->sleep(pulselen);
audiolayer->urgentRingBuffer().flush();
audiolayer->stopStream();
//TODO: Is this really what we want?
//audiolayer->sleep(pulselen);
//audiolayer->stopStream();
} else {
audiolayer->sleep(pulselen); // in milliseconds
}
_toneMutex.leaveMutex();
//setZonetone(false);
delete[] buf_ctrl_vol; buf_ctrl_vol = 0;
returnValue = true;
}

View File

@ -487,12 +487,13 @@ SipVoIPLink::onhold (CALLID id)
eXosip_lock ();
// Send request
_audiortp.closeRtpSession();
i = eXosip_call_send_request (did, invite);
eXosip_unlock ();
// Disable audio
sipcall->enable_audio = false;
_audiortp.closeRtpSession();
return i;
}
@ -609,7 +610,8 @@ SipVoIPLink::refuse (CALLID id)
int
SipVoIPLink::getEvent (void)
{
eXosip_event_t* event = eXosip_event_wait (0, 1);
// wait for 0 s, 50 ms
eXosip_event_t* event = eXosip_event_wait (0, 50);
eXosip_lock();
eXosip_automatic_action();
eXosip_unlock();
@ -786,11 +788,11 @@ SipVoIPLink::getEvent (void)
}
break;
case EXOSIP_CALL_RELEASED:
id = findCallId(event);
if (id!=0) {
Manager::instance().peerHungupCall(id);
deleteSipCall(id);
}
//id = findCallId(event);
//if (id!=0) {
//Manager::instance().peerHungupCall(id);
//deleteSipCall(id);
//}
break;
case EXOSIP_CALL_REQUESTFAILURE:
id = findCallId(event);