From 1d7dec739a4a7a06cfe70e4f76d85e577ae24f7f Mon Sep 17 00:00:00 2001 From: Hans Petter Selasky Date: Thu, 20 Aug 2020 18:34:34 +0200 Subject: [PATCH] Switch all Jamulus audio sample processing to use floats instead of a mix of double and int16_t . MacOS, Linux and Android already do this, and ASIO also supports it. Change recording format to be 24-bit WAV, to get the full precision of the mixed audio! This patch gives the user the full resolution of the audio device, both when receiving audio and transmitting audio. Cleanup LSB/MSB sample processing in ASIO driver while at it. LSB and MSB indicate little-endian and big-endian data format presumably. Signed-off-by: Hans Petter Selasky --- android/sound.cpp | 22 +- android/sound.h | 6 +- linux/sound.cpp | 15 +- linux/sound.h | 6 +- mac/sound.cpp | 24 +- mac/sound.h | 4 +- src/buffer.h | 20 +- src/channel.cpp | 46 +-- src/channel.h | 28 +- src/client.cpp | 106 ++--- src/client.h | 26 +- src/global.h | 2 - src/recorder/cwavestream.h | 4 +- src/recorder/jamcontroller.cpp | 2 +- src/recorder/jamcontroller.h | 4 +- src/recorder/jamrecorder.cpp | 10 +- src/recorder/jamrecorder.h | 6 +- src/server.cpp | 147 +++---- src/server.h | 22 +- src/soundbase.cpp | 4 +- src/soundbase.h | 8 +- src/util.cpp | 120 +++--- src/util.h | 72 ++-- src/vstmain.cpp | 8 +- src/vstsound.h | 8 +- windows/sound.cpp | 716 +++++++++++---------------------- windows/sound.h | 53 ++- 27 files changed, 642 insertions(+), 847 deletions(-) diff --git a/android/sound.cpp b/android/sound.cpp index bb7105df28..52825ad7af 100644 --- a/android/sound.cpp +++ b/android/sound.cpp @@ -27,7 +27,7 @@ /* Implementation *************************************************************/ -CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -193,7 +193,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) iOpenSLBufferSizeStereo = 2 * iOpenSLBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iOpenSLBufferSizeStereo ); + vecfTmpAudioSndCrdStereo.Init ( iOpenSLBufferSizeStereo ); // TEST #if ( SYSTEM_SAMPLE_RATE_HZ != 48000 ) @@ -205,7 +205,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) // 48 kHz / 16 kHz = factor 3 (note that the buffer size mono might // be divisible by three, therefore we will get a lot of drop outs) iModifiedInBufSize = iOpenSLBufferSizeMono / 3; -vecsTmpAudioInSndCrd.Init ( iModifiedInBufSize ); +vecfTmpAudioInSndCrd.Init ( iModifiedInBufSize ); return iOpenSLBufferSizeMono; } @@ -238,7 +238,7 @@ oboe::DataCallbackResult CSound::onAudioReady ( oboe::AudioStream* oboeStream, v memset ( audioData, 0, sizeof(float) * numFrames * oboeStream->getChannelCount() ); // Only copy data if we have data to copy, otherwise fill with silence - if ( !pSound->vecsTmpAudioSndCrdStereo.empty() ) + if ( !pSound->vecfTmpAudioSndCrdStereo.empty() ) { for ( int frmNum = 0; frmNum < numFrames; ++frmNum ) { @@ -246,11 +246,9 @@ oboe::DataCallbackResult CSound::onAudioReady ( oboe::AudioStream* oboeStream, v { // copy sample received from server into output buffer - // convert to 32 bit - const int32_t iCurSam = static_cast ( - pSound->vecsTmpAudioSndCrdStereo [frmNum * oboeStream->getChannelCount() + channelNum] ); - - floatData[frmNum * oboeStream->getChannelCount() + channelNum] = (float) iCurSam / _MAXSHORT; + const float fCurSam = + pSound->vecfTmpAudioSndCrdStereo [frmNum * oboeStream->getChannelCount() + channelNum]; + floatData[frmNum * oboeStream->getChannelCount() + channelNum] = fCurSam; } } } @@ -282,13 +280,13 @@ oboe::DataCallbackResult CSound::onAudioReady ( oboe::AudioStream* oboeStream, v { for ( int channelNum = 0; channelNum < oboeStream->getChannelCount(); channelNum++ ) { - pSound->vecsTmpAudioSndCrdStereo[frmNum * oboeStream->getChannelCount() + channelNum] = - (short) floatData[frmNum * oboeStream->getChannelCount() + channelNum] * _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo [frmNum * oboeStream->getChannelCount() + channelNum] = + floatData[frmNum * oboeStream->getChannelCount() + channelNum]; } } // Tell parent class that we've put some data ready to send to the server - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); } // locker.unlock(); diff --git a/android/sound.h b/android/sound.h index b97c3ffe4e..1caeea55d5 100644 --- a/android/sound.h +++ b/android/sound.h @@ -36,7 +36,7 @@ class CSound : public CSoundBase, public oboe::AudioStreamCallback//, public IRenderableAudio, public IRestartable { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -54,7 +54,7 @@ class CSound : public CSoundBase, public oboe::AudioStreamCallback//, public IRe // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; static void android_message_handler ( QtMsgType type, const QMessageLogContext& context, @@ -74,7 +74,7 @@ class CSound : public CSoundBase, public oboe::AudioStreamCallback//, public IRe }; // TEST -CVector vecsTmpAudioInSndCrd; +CVector vecfTmpAudioInSndCrd; int iModifiedInBufSize; int iOpenSLBufferSizeMono; diff --git a/linux/sound.cpp b/linux/sound.cpp index 2e54fe2c24..74e22162fa 100755 --- a/linux/sound.cpp +++ b/linux/sound.cpp @@ -228,7 +228,7 @@ int CSound::Init ( const int /* iNewPrefMonoBufferSize */ ) iJACKBufferSizeStero = 2 * iJACKBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iJACKBufferSizeStero ); + vecfTmpAudioSndCrdStereo.Init ( iJACKBufferSizeStero ); return iJACKBufferSizeMono; } @@ -259,16 +259,13 @@ int CSound::process ( jack_nframes_t nframes, void* arg ) { for ( i = 0; i < pSound->iJACKBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i] = - (short) ( in_left[i] * _MAXSHORT ); - - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = - (short) ( in_right[i] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = in_left[i]; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = in_right[i]; } } // call processing callback function - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); // get output data pointer jack_default_audio_sample_t* out_left = @@ -285,10 +282,10 @@ int CSound::process ( jack_nframes_t nframes, void* arg ) for ( i = 0; i < pSound->iJACKBufferSizeMono; i++ ) { out_left[i] = (jack_default_audio_sample_t) - pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo[2 * i]; out_right[i] = (jack_default_audio_sample_t) - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1]; } } } diff --git a/linux/sound.h b/linux/sound.h index c64397e2c9..2412d1c04b 100755 --- a/linux/sound.h +++ b/linux/sound.h @@ -60,7 +60,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool bNoAutoJackConnect, @@ -78,7 +78,7 @@ class CSound : public CSoundBase // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; int iJACKBufferSizeMono; int iJACKBufferSizeStero; bool bJackWasShutDown; @@ -111,7 +111,7 @@ class CSound : public CSoundBase Q_OBJECT public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), void* pParg, const int iCtrlMIDIChannel, const bool , diff --git a/mac/sound.cpp b/mac/sound.cpp index 1d92e8f056..1a4f946034 100755 --- a/mac/sound.cpp +++ b/mac/sound.cpp @@ -26,7 +26,7 @@ /* Implementation *************************************************************/ -CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -848,7 +848,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) iCoreAudioBufferSizeStereo = 2 * iCoreAudioBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo ); + vecfTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo ); return iCoreAudioBufferSizeMono; } @@ -970,8 +970,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { // copy left and right channels separately - pSound->vecsTmpAudioSndCrdStereo[2 * i] = (short) ( pLeftData[iNumChanPerFrameLeft * i + iSelInInterlChLeft] * _MAXSHORT ); - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = (short) ( pRightData[iNumChanPerFrameRight * i + iSelInInterlChRight] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = pLeftData[iNumChanPerFrameLeft * i + iSelInInterlChLeft]; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = pRightData[iNumChanPerFrameRight * i + iSelInInterlChRight]; } // add an additional optional channel @@ -982,8 +982,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i] = Double2Short ( - pSound->vecsTmpAudioSndCrdStereo[2 * i] + pLeftData[iNumChanPerFrameLeft * i + iSelAddInInterlChLeft] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = clipFloat ( + pSound->vecfTmpAudioSndCrdStereo[2 * i] + pLeftData[iNumChanPerFrameLeft * i + iSelAddInInterlChLeft] ); } } @@ -994,19 +994,19 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = Double2Short ( - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] + pRightData[iNumChanPerFrameRight * i + iSelAddInInterlChRight] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = clipFloat ( + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] + pRightData[iNumChanPerFrameRight * i + iSelAddInInterlChRight] ); } } } else { // incompatible sizes, clear work buffer - pSound->vecsTmpAudioSndCrdStereo.Reset ( 0 ); + pSound->vecfTmpAudioSndCrdStereo.Reset ( 0 ); } // call processing callback function - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); } if ( ( inDevice == pSound->CurrentAudioOutputDeviceID ) && outOutputData ) @@ -1028,8 +1028,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { // copy left and right channels separately - pLeftData[iNumChanPerFrameLeft * i + iSelOutInterlChLeft] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT; - pRightData[iNumChanPerFrameRight * i + iSelOutInterlChRight] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT; + pLeftData[iNumChanPerFrameLeft * i + iSelOutInterlChLeft] = (Float32) pSound->vecfTmpAudioSndCrdStereo[2 * i]; + pRightData[iNumChanPerFrameRight * i + iSelOutInterlChRight] = (Float32) pSound->vecfTmpAudioSndCrdStereo[2 * i + 1]; } } } diff --git a/mac/sound.h b/mac/sound.h index ce2eda21c4..2b13ad9e88 100755 --- a/mac/sound.h +++ b/mac/sound.h @@ -36,7 +36,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -63,7 +63,7 @@ class CSound : public CSoundBase // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; int iCoreAudioBufferSizeMono; int iCoreAudioBufferSizeStereo; AudioDeviceID CurrentAudioInputDeviceID; diff --git a/src/buffer.h b/src/buffer.h index 3caeb18b9e..d06a1a25f9 100755 --- a/src/buffer.h +++ b/src/buffer.h @@ -500,16 +500,16 @@ template class CConvBuf } } - void PutAll ( const CVector& vecsData ) + void PutAll ( const CVector& vecfData ) { iGetPos = 0; - std::copy ( vecsData.begin(), - vecsData.begin() + iBufferSize, // note that input vector might be larger then memory size + std::copy ( vecfData.begin(), + vecfData.begin() + iBufferSize, // note that input vector might be larger then memory size vecMemory.begin() ); } - bool Put ( const CVector& vecsData, + bool Put ( const CVector& vecfData, const int iVecSize ) { // calculate the input size and the end position after copying @@ -519,8 +519,8 @@ template class CConvBuf if ( iEnd <= iBufferSize ) { // copy new data in internal buffer - std::copy ( vecsData.begin(), - vecsData.begin() + iVecSize, + std::copy ( vecfData.begin(), + vecfData.begin() + iVecSize, vecMemory.begin() + iPutPos ); // set buffer pointer one block further @@ -540,7 +540,7 @@ template class CConvBuf return vecMemory; } - void GetAll ( CVector& vecsData, + void GetAll ( CVector& vecfData, const int iVecSize ) { iPutPos = 0; @@ -548,10 +548,10 @@ template class CConvBuf // copy data from internal buffer in given buffer std::copy ( vecMemory.begin(), vecMemory.begin() + iVecSize, - vecsData.begin() ); + vecfData.begin() ); } - bool Get ( CVector& vecsData, + bool Get ( CVector& vecfData, const int iVecSize ) { // calculate the input size and the end position after copying @@ -563,7 +563,7 @@ template class CConvBuf // copy new data from internal buffer std::copy ( vecMemory.begin() + iGetPos, vecMemory.begin() + iGetPos + iVecSize, - vecsData.begin() ); + vecfData.begin() ); // set buffer pointer one block further iGetPos = iEnd; diff --git a/src/channel.cpp b/src/channel.cpp index 8e74f3f5e8..a9a0881dc4 100755 --- a/src/channel.cpp +++ b/src/channel.cpp @@ -27,8 +27,8 @@ // CChannel implementation ***************************************************** CChannel::CChannel ( const bool bNIsServer ) : - vecdGains ( MAX_NUM_CHANNELS, 1.0 ), - vecdPannings ( MAX_NUM_CHANNELS, 0.5 ), + vecfGains ( MAX_NUM_CHANNELS, 1.0f ), + vecfPannings ( MAX_NUM_CHANNELS, 0.5f ), iCurSockBufNumFrames ( INVALID_INDEX ), bDoAutoSockBufSize ( true ), iFadeInCnt ( 0 ), @@ -36,7 +36,7 @@ CChannel::CChannel ( const bool bNIsServer ) : bIsEnabled ( false ), bIsServer ( bNIsServer ), iAudioFrameSizeSamples ( DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES ), - SignalLevelMeter ( false, 0.5 ) // server mode with mono out and faster smoothing + SignalLevelMeter ( false, 0.5f ) // server mode with mono out and faster smoothing { // reset network transport properties ResetNetworkTransportProperties(); @@ -255,8 +255,8 @@ bool CChannel::SetSockBufNumFrames ( const int iNewNumFrames, return ReturnValue; // set error flag } -void CChannel::SetGain ( const int iChanID, - const double dNewGain ) +void CChannel::SetGain ( const int iChanID, + const float dNewGain ) { QMutexLocker locker ( &Mutex ); @@ -264,27 +264,27 @@ void CChannel::SetGain ( const int iChanID, if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { // signal mute change - if ( ( vecdGains[iChanID] == 0 ) && ( dNewGain > 0 ) ) + if ( ( vecfGains[iChanID] == 0 ) && ( dNewGain > 0 ) ) { emit MuteStateHasChanged ( iChanID, false ); } - if ( ( vecdGains[iChanID] > 0 ) && ( dNewGain == 0 ) ) + if ( ( vecfGains[iChanID] > 0 ) && ( dNewGain == 0 ) ) { emit MuteStateHasChanged ( iChanID, true ); } - vecdGains[iChanID] = dNewGain; + vecfGains[iChanID] = dNewGain; } } -double CChannel::GetGain ( const int iChanID ) +float CChannel::GetGain ( const int iChanID ) { QMutexLocker locker ( &Mutex ); // get value (make sure channel ID is in range) if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { - return vecdGains[iChanID]; + return vecfGains[iChanID]; } else { @@ -292,26 +292,26 @@ double CChannel::GetGain ( const int iChanID ) } } -void CChannel::SetPan ( const int iChanID, - const double dNewPan ) +void CChannel::SetPan ( const int iChanID, + const float dNewPan ) { QMutexLocker locker ( &Mutex ); // set value (make sure channel ID is in range) if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { - vecdPannings[iChanID] = dNewPan; + vecfPannings[iChanID] = dNewPan; } } -double CChannel::GetPan ( const int iChanID ) +float CChannel::GetPan ( const int iChanID ) { QMutexLocker locker ( &Mutex ); // get value (make sure channel ID is in range) if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { - return vecdPannings[iChanID]; + return vecfPannings[iChanID]; } else { @@ -379,14 +379,14 @@ void CChannel::OnJittBufSizeChange ( int iNewJitBufSize ) } } -void CChannel::OnChangeChanGain ( int iChanID, - double dNewGain ) +void CChannel::OnChangeChanGain ( int iChanID, + float dNewGain ) { SetGain ( iChanID, dNewGain ); } -void CChannel::OnChangeChanPan ( int iChanID, - double dNewPan ) +void CChannel::OnChangeChanPan ( int iChanID, + float dNewPan ) { SetPan ( iChanID, dNewPan ); } @@ -678,12 +678,12 @@ void CChannel::PrepAndSendPacket ( CHighPrioSocket* pSocket, } } -double CChannel::UpdateAndGetLevelForMeterdB ( const CVector& vecsAudio, - const int iInSize, - const bool bIsStereoIn ) +float CChannel::UpdateAndGetLevelForMeterdB ( const CVector& vecfAudio, + const int iInSize, + const bool bIsStereoIn ) { // update the signal level meter and immediately return the current value - SignalLevelMeter.Update ( vecsAudio, + SignalLevelMeter.Update ( vecfAudio, iInSize, bIsStereoIn ); diff --git a/src/channel.h b/src/channel.h index fca896b9f2..bd46e58e86 100755 --- a/src/channel.h +++ b/src/channel.h @@ -108,17 +108,17 @@ class CChannel : public QObject void CreateVersionAndOSMes() { Protocol.CreateVersionAndOSMes(); } void CreateMuteStateHasChangedMes ( const int iChanID, const bool bIsMuted ) { Protocol.CreateMuteStateHasChangedMes ( iChanID, bIsMuted ); } - void SetGain ( const int iChanID, const double dNewGain ); - double GetGain ( const int iChanID ); - double GetFadeInGain() { return static_cast ( iFadeInCnt ) / iFadeInCntMax; } + void SetGain ( const int iChanID, const float dNewGain ); + float GetGain ( const int iChanID ); + float GetFadeInGain() { return static_cast ( iFadeInCnt ) / iFadeInCntMax; } - void SetPan ( const int iChanID, const double dNewPan ); - double GetPan ( const int iChanID ); + void SetPan ( const int iChanID, const float dNewPan ); + float GetPan ( const int iChanID ); - void SetRemoteChanGain ( const int iId, const double dGain ) + void SetRemoteChanGain ( const int iId, const float dGain ) { Protocol.CreateChanGainMes ( iId, dGain ); } - void SetRemoteChanPan ( const int iId, const double dPan ) + void SetRemoteChanPan ( const int iId, const float dPan ) { Protocol.CreateChanPanMes ( iId, dPan ); } bool SetSockBufNumFrames ( const int iNewNumFrames, @@ -176,9 +176,9 @@ class CChannel : public QObject bool ChannelLevelsRequired() const { return bChannelLevelsRequired; } - double UpdateAndGetLevelForMeterdB ( const CVector& vecsAudio, - const int iInSize, - const bool bIsStereoIn ); + float UpdateAndGetLevelForMeterdB ( const CVector& vecfAudio, + const int iInSize, + const bool bIsStereoIn ); protected: bool ProtocolIsEnabled(); @@ -201,8 +201,8 @@ class CChannel : public QObject CChannelCoreInfo ChannelInfo; // mixer and effect settings - CVector vecdGains; - CVector vecdPannings; + CVector vecfGains; + CVector vecfPannings; // network jitter-buffer CNetBufWithStats SockBuf; @@ -241,8 +241,8 @@ class CChannel : public QObject public slots: void OnSendProtMessage ( CVector vecMessage ); void OnJittBufSizeChange ( int iNewJitBufSize ); - void OnChangeChanGain ( int iChanID, double dNewGain ); - void OnChangeChanPan ( int iChanID, double dNewPan ); + void OnChangeChanGain ( int iChanID, float dNewGain ); + void OnChangeChanPan ( int iChanID, float dNewPan ); void OnChangeChanInfo ( CChannelCoreInfo ChanInfo ); void OnNetTranspPropsReceived ( CNetworkTransportProps NetworkTransportProps ); void OnReqNetTranspProps(); diff --git a/src/client.cpp b/src/client.cpp index 4190645861..f77c6036d4 100755 --- a/src/client.cpp +++ b/src/client.cpp @@ -365,9 +365,9 @@ void CClient::SetDoAutoSockBufSize ( const bool bValue ) CreateServerJitterBufferMessage(); } -void CClient::SetRemoteChanGain ( const int iId, - const double dGain, - const bool bIsMyOwnFader ) +void CClient::SetRemoteChanGain ( const int iId, + const float dGain, + const bool bIsMyOwnFader ) { // if this gain is for my own channel, apply the value for the Mute Myself function if ( bIsMyOwnFader ) @@ -889,7 +889,7 @@ void CClient::Init() vecCeltData.Init ( iCeltNumCodedBytes ); vecZeros.Init ( iStereoBlockSizeSam, 0 ); - vecsStereoSndCrdMuteStream.Init ( iStereoBlockSizeSam ); + vecfStereoSndCrdMuteStream.Init ( iStereoBlockSizeSam ); dMuteOutStreamGain = 1.0; @@ -934,13 +934,13 @@ void CClient::Init() bIsInitializationPhase = true; } -void CClient::AudioCallback ( CVector& psData, void* arg ) +void CClient::AudioCallback ( CVector& vfData, void* arg ) { // get the pointer to the object CClient* pMyClientObj = static_cast ( arg ); // process audio data - pMyClientObj->ProcessSndCrdAudioData ( psData ); + pMyClientObj->ProcessSndCrdAudioData ( vfData ); /* // TEST do a soundcard jitter measurement @@ -949,13 +949,13 @@ JitterMeas.Measure(); */ } -void CClient::ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ) +void CClient::ProcessSndCrdAudioData ( CVector& vecfStereoSndCrd ) { // check if a conversion buffer is required or not if ( bSndCrdConversionBufferRequired ) { // add new sound card block in conversion buffer - SndCrdConversionBufferIn.Put ( vecsStereoSndCrd, vecsStereoSndCrd.Size() ); + SndCrdConversionBufferIn.Put ( vecfStereoSndCrd, vecfStereoSndCrd.Size() ); // process all available blocks of data while ( SndCrdConversionBufferIn.GetAvailData() >= iStereoBlockSizeSam ) @@ -970,17 +970,17 @@ void CClient::ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ) } // get processed sound card block out of the conversion buffer - SndCrdConversionBufferOut.Get ( vecsStereoSndCrd, vecsStereoSndCrd.Size() ); + SndCrdConversionBufferOut.Get ( vecfStereoSndCrd, vecfStereoSndCrd.Size() ); } else { // regular case: no conversion buffer required // process audio data - ProcessAudioDataIntern ( vecsStereoSndCrd ); + ProcessAudioDataIntern ( vecfStereoSndCrd ); } } -void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) +void CClient::ProcessAudioDataIntern ( CVector& vecfStereoSndCrd ) { int i, j, iUnused; unsigned char* pCurCodedData; @@ -989,7 +989,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // Transmit signal --------------------------------------------------------- // update stereo signal level meter (not needed in headless mode) #ifndef HEADLESS - SignalLevelMeter.Update ( vecsStereoSndCrd, + SignalLevelMeter.Update ( vecfStereoSndCrd, iMonoBlockSizeSam, true ); #endif @@ -997,43 +997,43 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // add reverberation effect if activated if ( iReverbLevel != 0 ) { - AudioReverb.Process ( vecsStereoSndCrd, + AudioReverb.Process ( vecfStereoSndCrd, bReverbOnLeftChan, - static_cast ( iReverbLevel ) / AUD_REVERB_MAX / 4 ); + static_cast ( iReverbLevel ) / AUD_REVERB_MAX / 4 ); } // apply pan (audio fader) and mix mono signals if ( !( ( iAudioInFader == AUD_FADER_IN_MIDDLE ) && ( eAudioChannelConf == CC_STEREO ) ) ) { // calculate pan gain in the range 0 to 1, where 0.5 is the middle position - const double dPan = static_cast ( iAudioInFader ) / AUD_FADER_IN_MAX; + const float dPan = static_cast ( iAudioInFader ) / AUD_FADER_IN_MAX; if ( eAudioChannelConf == CC_STEREO ) { // for stereo only apply pan attenuation on one channel (same as pan in the server) - const double dGainL = MathUtils::GetLeftPan ( dPan, false ); - const double dGainR = MathUtils::GetRightPan ( dPan, false ); + const float dGainL = MathUtils::GetLeftPan ( dPan, false ); + const float dGainR = MathUtils::GetRightPan ( dPan, false ); for ( i = 0, j = 0; i < iMonoBlockSizeSam; i++, j += 2 ) { // note that the gain is always <= 1, therefore a simple cast is // ok since we never can get an overload - vecsStereoSndCrd[j + 1] = static_cast ( dGainR * vecsStereoSndCrd[j + 1] ); - vecsStereoSndCrd[j] = static_cast ( dGainL * vecsStereoSndCrd[j] ); + vecfStereoSndCrd[j + 1] = dGainR * vecfStereoSndCrd[j + 1]; + vecfStereoSndCrd[j] = dGainL * vecfStereoSndCrd[j]; } } else { // for mono implement a cross-fade between channels and mix them, for // mono-in/stereo-out use no attenuation in pan center - const double dGainL = MathUtils::GetLeftPan ( dPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); - const double dGainR = MathUtils::GetRightPan ( dPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); + const float dGainL = MathUtils::GetLeftPan ( dPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); + const float dGainR = MathUtils::GetRightPan ( dPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); for ( i = 0, j = 0; i < iMonoBlockSizeSam; i++, j += 2 ) { - // note that we need the Double2Short for stereo pan mode - vecsStereoSndCrd[i] = Double2Short ( - dGainL * vecsStereoSndCrd[j] + dGainR * vecsStereoSndCrd[j + 1] ); + // clip samples for stereo pan mode + vecfStereoSndCrd[i] = clipFloat ( + dGainL * vecfStereoSndCrd[j] + dGainR * vecfStereoSndCrd[j + 1] ); } } } @@ -1049,7 +1049,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // overwrite input values) for ( i = iMonoBlockSizeSam - 1, j = iStereoBlockSizeSam - 2; i >= 0; i--, j -= 2 ) { - vecsStereoSndCrd[j] = vecsStereoSndCrd[j + 1] = vecsStereoSndCrd[i]; + vecfStereoSndCrd[j] = vecfStereoSndCrd[j + 1] = vecfStereoSndCrd[i]; } } @@ -1060,19 +1060,19 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) { if ( bMuteOutStream ) { - iUnused = opus_custom_encode ( CurOpusEncoder, - &vecZeros[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples, - &vecCeltData[0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( CurOpusEncoder, + &vecZeros[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples, + &vecCeltData[0], + iCeltNumCodedBytes ); } else { - iUnused = opus_custom_encode ( CurOpusEncoder, - &vecsStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples, - &vecCeltData[0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( CurOpusEncoder, + &vecfStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples, + &vecCeltData[0], + iCeltNumCodedBytes ); } } @@ -1087,7 +1087,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // in case of mute stream, store local data if ( bMuteOutStream ) { - vecsStereoSndCrdMuteStream = vecsStereoSndCrd; + vecfStereoSndCrdMuteStream = vecfStereoSndCrd; } for ( i = 0; i < iSndCrdFrameSizeFactor; i++ ) @@ -1116,11 +1116,11 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // OPUS decoding if ( CurOpusDecoder != nullptr ) { - iUnused = opus_custom_decode ( CurOpusDecoder, - pCurCodedData, - iCeltNumCodedBytes, - &vecsStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples ); + iUnused = opus_custom_decode_float ( CurOpusDecoder, + pCurCodedData, + iCeltNumCodedBytes, + &vecfStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples ); } } @@ -1129,8 +1129,8 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) { for ( i = 0; i < iStereoBlockSizeSam; i++ ) { - vecsStereoSndCrd[i] = Double2Short ( - vecsStereoSndCrd[i] + vecsStereoSndCrdMuteStream[i] * dMuteOutStreamGain ); + vecfStereoSndCrd[i] = clipFloat ( + vecfStereoSndCrd[i] + vecfStereoSndCrdMuteStream[i] * dMuteOutStreamGain ); } } @@ -1144,14 +1144,14 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // overwrite input values) for ( i = iMonoBlockSizeSam - 1, j = iStereoBlockSizeSam - 2; i >= 0; i--, j -= 2 ) { - vecsStereoSndCrd[j] = vecsStereoSndCrd[j + 1] = vecsStereoSndCrd[i]; + vecfStereoSndCrd[j] = vecfStereoSndCrd[j + 1] = vecfStereoSndCrd[i]; } } } else { // if not connected, clear data - vecsStereoSndCrd.Reset ( 0 ); + vecfStereoSndCrd.Reset ( 0 ); } // update socket buffer size @@ -1162,7 +1162,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) int CClient::EstimatedOverallDelay ( const int iPingTimeMs ) { - const double dSystemBlockDurationMs = static_cast ( iOPUSFrameSizeSamples ) / + const float dSystemBlockDurationMs = static_cast ( iOPUSFrameSizeSamples ) / SYSTEM_SAMPLE_RATE_HZ * 1000; // If the jitter buffers are set effectively, i.e. they are exactly the @@ -1170,18 +1170,18 @@ int CClient::EstimatedOverallDelay ( const int iPingTimeMs ) // length. Since that is usually not the case but the buffers are usually // a bit larger than necessary, we introduce some factor for compensation. // Consider the jitter buffer on the client and on the server side, too. - const double dTotalJitterBufferDelayMs = dSystemBlockDurationMs * - static_cast ( GetSockBufNumFrames() + + const float dTotalJitterBufferDelayMs = dSystemBlockDurationMs * + static_cast ( GetSockBufNumFrames() + GetServerSockBufNumFrames() ) * 0.7; // consider delay introduced by the sound card conversion buffer by using // "GetSndCrdConvBufAdditionalDelayMonoBlSize()" - double dTotalSoundCardDelayMs = GetSndCrdConvBufAdditionalDelayMonoBlSize() * + float dTotalSoundCardDelayMs = GetSndCrdConvBufAdditionalDelayMonoBlSize() * 1000 / SYSTEM_SAMPLE_RATE_HZ; // try to get the actual input/output sound card delay from the audio // interface, per definition it is not available if a 0 is returned - const double dSoundCardInputOutputLatencyMs = Sound.GetInOutLatencyMs(); + const float dSoundCardInputOutputLatencyMs = Sound.GetInOutLatencyMs(); if ( dSoundCardInputOutputLatencyMs == 0.0 ) { @@ -1203,13 +1203,13 @@ int CClient::EstimatedOverallDelay ( const int iPingTimeMs ) // network packets are of the same size as the audio packets per definition // if no sound card conversion buffer is used - const double dDelayToFillNetworkPacketsMs = + const float dDelayToFillNetworkPacketsMs = GetSystemMonoBlSize() * 1000.0 / SYSTEM_SAMPLE_RATE_HZ; // OPUS additional delay at small frame sizes is half a frame size - const double dAdditionalAudioCodecDelayMs = dSystemBlockDurationMs / 2; + const float dAdditionalAudioCodecDelayMs = dSystemBlockDurationMs / 2; - const double dTotalBufferDelayMs = + const float dTotalBufferDelayMs = dDelayToFillNetworkPacketsMs + dTotalJitterBufferDelayMs + dTotalSoundCardDelayMs + diff --git a/src/client.h b/src/client.h index d7d470dfb5..fc79f2afce 100755 --- a/src/client.h +++ b/src/client.h @@ -117,8 +117,8 @@ class CClient : public QObject bool IsRunning() { return Sound.IsRunning(); } bool SetServerAddr ( QString strNAddr ); - double GetLevelForMeterdBLeft() { return SignalLevelMeter.GetLevelForMeterdBLeftOrMono(); } - double GetLevelForMeterdBRight() { return SignalLevelMeter.GetLevelForMeterdBRight(); } + float GetLevelForMeterdBLeft() { return SignalLevelMeter.GetLevelForMeterdBLeftOrMono(); } + float GetLevelForMeterdBRight() { return SignalLevelMeter.GetLevelForMeterdBRight(); } bool GetAndResetbJitterBufferOKFlag(); @@ -243,9 +243,9 @@ class CClient : public QObject void SetMuteOutStream ( const bool bDoMute ) { bMuteOutStream = bDoMute; } - void SetRemoteChanGain ( const int iId, const double dGain, const bool bIsMyOwnFader ); + void SetRemoteChanGain ( const int iId, const float dGain, const bool bIsMyOwnFader ); - void SetRemoteChanPan ( const int iId, const double dPan ) + void SetRemoteChanPan ( const int iId, const float dPan ) { Channel.SetRemoteChanPan ( iId, dPan ); } void SetRemoteInfo() { Channel.SetRemoteInfo ( ChannelInfo ); } @@ -288,11 +288,11 @@ class CClient : public QObject protected: // callback function must be static, otherwise it does not work - static void AudioCallback ( CVector& psData, void* arg ); + static void AudioCallback ( CVector& vfData, void* arg ); void Init(); - void ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ); - void ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ); + void ProcessSndCrdAudioData ( CVector& vecfStereoSndCrd ); + void ProcessAudioDataIntern ( CVector& vecfStereoSndCrd ); int PreparePingMessage(); int EvaluatePingMessage ( const int iMs ); @@ -323,7 +323,7 @@ class CClient : public QObject int iNumAudioChannels; bool bIsInitializationPhase; bool bMuteOutStream; - double dMuteOutStreamGain; + float dMuteOutStreamGain; CVector vecCeltData; CHighPrioSocket Socket; @@ -342,11 +342,11 @@ class CClient : public QObject bool bSndCrdConversionBufferRequired; int iSndCardMonoBlockSizeSamConvBuff; - CBufferBase SndCrdConversionBufferIn; - CBufferBase SndCrdConversionBufferOut; - CVector vecDataConvBuf; - CVector vecsStereoSndCrdMuteStream; - CVector vecZeros; + CBufferBase SndCrdConversionBufferIn; + CBufferBase SndCrdConversionBufferOut; + CVector vecDataConvBuf; + CVector vecfStereoSndCrdMuteStream; + CVector vecZeros; bool bFraSiFactPrefSupported; bool bFraSiFactDefSupported; diff --git a/src/global.h b/src/global.h index 6e52c1a6dd..1a8f409ecf 100755 --- a/src/global.h +++ b/src/global.h @@ -244,8 +244,6 @@ LED bar: lbr // server welcome message title (do not change for compatibility!) #define WELCOME_MESSAGE_PREFIX "Server Welcome Message: " -#define _MAXSHORT 32767 -#define _MINSHORT ( -32768 ) #define INVALID_INDEX -1 // define invalid index as a negative value (a valid index must always be >= 0) #if HAVE_STDINT_H diff --git a/src/recorder/cwavestream.h b/src/recorder/cwavestream.h index 54b91633c6..061a72d3b6 100644 --- a/src/recorder/cwavestream.h +++ b/src/recorder/cwavestream.h @@ -31,7 +31,7 @@ namespace recorder { inline QString secondsAt48K( const qint64 frames, const int frameSize ) { - return QString::number( static_cast( frames * frameSize ) / 48000, 'f', 14 ); + return QString::number( static_cast( frames * frameSize ) / 48000, 'f', 14 ); } struct STrackItem @@ -79,7 +79,7 @@ class FmtSubChunk static const uint32_t sampleRate = 48000; // because it's Jamulus const uint32_t byteRate; // sampleRate * numChannels * bitsPerSample/8 const uint16_t blockAlign; // numChannels * bitsPerSample/8 - static const uint16_t bitsPerSample = 16; + static const uint16_t bitsPerSample = 24; }; class DataSubChunkHdr diff --git a/src/recorder/jamcontroller.cpp b/src/recorder/jamcontroller.cpp index 75e2b4568a..801f480a2e 100755 --- a/src/recorder/jamcontroller.cpp +++ b/src/recorder/jamcontroller.cpp @@ -150,7 +150,7 @@ void CJamController::SetRecordingDir ( QString newRecordingDir, QObject::connect( this, &CJamController::ClientDisconnected, pJamRecorder, &CJamRecorder::OnDisconnected ); - qRegisterMetaType> ( "CVector" ); + qRegisterMetaType > ( "CVector" ); QObject::connect( this, &CJamController::AudioFrame, pJamRecorder, &CJamRecorder::OnFrame ); diff --git a/src/recorder/jamcontroller.h b/src/recorder/jamcontroller.h index 82b714f430..cfa5e57a30 100755 --- a/src/recorder/jamcontroller.h +++ b/src/recorder/jamcontroller.h @@ -67,10 +67,10 @@ class CJamController : public QObject const QString stChName, const CHostAddress RecHostAddr, const int iNumAudChan, - const CVector vecsData ); + const CVector vecfData ); }; } -Q_DECLARE_METATYPE(int16_t) +Q_DECLARE_METATYPE(float) diff --git a/src/recorder/jamrecorder.cpp b/src/recorder/jamrecorder.cpp index 43f1343983..98892d4af0 100644 --- a/src/recorder/jamrecorder.cpp +++ b/src/recorder/jamrecorder.cpp @@ -71,13 +71,15 @@ CJamClient::CJamClient(const qint64 frame, const int _numChannels, const QString * @param _name The client's current name * @param pcm The PCM data */ -void CJamClient::Frame(const QString _name, const CVector& pcm, int iServerFrameSizeSamples) +void CJamClient::Frame(const QString _name, const CVector& pcm, int iServerFrameSizeSamples) { name = _name; for(int i = 0; i < numChannels * iServerFrameSizeSamples; i++) { - *out << pcm[i]; + /* samples must be stored in little endian order */ + const int sample24 = pcm[i] * ((1 << 23) - 1); + *out << ( uint8_t ) sample24 << ( uint8_t ) ( sample24 >> 8 ) << ( uint8_t )( sample24 >> 16 ); } frameCount++; @@ -166,7 +168,7 @@ void CJamSession::DisconnectClient(int iChID) * * Also manages the overall current frame counter for the session. */ -void CJamSession::Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples) +void CJamSession::Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples) { if ( iChID == chIdDisconnected ) { @@ -515,7 +517,7 @@ void CJamRecorder::OnDisconnected(int iChID) * * Ensures recording has started. */ -void CJamRecorder::OnFrame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data) +void CJamRecorder::OnFrame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data) { // Make sure we are ready if ( !isRecording ) diff --git a/src/recorder/jamrecorder.h b/src/recorder/jamrecorder.h index d5c97c77d5..fa263292b1 100644 --- a/src/recorder/jamrecorder.h +++ b/src/recorder/jamrecorder.h @@ -71,7 +71,7 @@ class CJamClient : public QObject public: CJamClient(const qint64 frame, const int numChannels, const QString name, const CHostAddress address, const QDir recordBaseDir); - void Frame(const QString name, const CVector& pcm, int iServerFrameSizeSamples); + void Frame(const QString name, const CVector& pcm, int iServerFrameSizeSamples); void Disconnect(); @@ -107,7 +107,7 @@ class CJamSession : public QObject CJamSession(QDir recordBaseDir); - void Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples); + void Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples); void End(); @@ -198,7 +198,7 @@ public slots: /** * @brief Handle a frame of data to process */ - void OnFrame ( const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data ); + void OnFrame ( const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data ); }; } diff --git a/src/server.cpp b/src/server.cpp index a0808efac8..e5ca66abfa 100755 --- a/src/server.cpp +++ b/src/server.cpp @@ -338,11 +338,11 @@ CServer::CServer ( const int iNewMaxNumChan, // allocate worst case memory for the temporary vectors vecChanIDsCurConChan.Init ( iMaxNumChannels ); - vecvecdGains.Init ( iMaxNumChannels ); - vecvecdPannings.Init ( iMaxNumChannels ); - vecvecsData.Init ( iMaxNumChannels ); - vecvecsSendData.Init ( iMaxNumChannels ); - vecvecsIntermediateProcBuf.Init ( iMaxNumChannels ); + vecvecfGains.Init ( iMaxNumChannels ); + vecvecfPannings.Init ( iMaxNumChannels ); + vecvecfData.Init ( iMaxNumChannels ); + vecvecfSendData.Init ( iMaxNumChannels ); + vecvecfIntermediateProcBuf.Init ( iMaxNumChannels ); vecvecbyCodedData.Init ( iMaxNumChannels ); vecNumAudioChannels.Init ( iMaxNumChannels ); vecNumFrameSizeConvBlocks.Init ( iMaxNumChannels ); @@ -352,18 +352,18 @@ CServer::CServer ( const int iNewMaxNumChan, for ( i = 0; i < iMaxNumChannels; i++ ) { // init vectors storing information of all channels - vecvecdGains[i].Init ( iMaxNumChannels ); - vecvecdPannings[i].Init ( iMaxNumChannels ); + vecvecfGains[i].Init ( iMaxNumChannels ); + vecvecfPannings[i].Init ( iMaxNumChannels ); // we always use stereo audio buffers (which is the worst case) - vecvecsData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + vecvecfData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); // (note that we only allocate iMaxNumChannels buffers for the send // and coded data because of the OMP implementation) - vecvecsSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + vecvecfSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); - // allocate worst case memory for intermediate processing buffers in double precision - vecvecsIntermediateProcBuf[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + // allocate worst case memory for intermediate processing buffers in single precision + vecvecfIntermediateProcBuf[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); // allocate worst case memory for the coded data vecvecbyCodedData[i].Init ( MAX_SIZE_BYTES_NETW_BUF ); @@ -489,7 +489,7 @@ CServer::CServer ( const int iNewMaxNumChan, QObject::connect ( this, &CServer::ClientDisconnected, &JamController, &recorder::CJamController::ClientDisconnected ); - qRegisterMetaType> ( "CVector" ); + qRegisterMetaType > ( "CVector" ); QObject::connect ( this, &CServer::AudioFrame, &JamController, &recorder::CJamController::AudioFrame ); @@ -884,17 +884,17 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // get gains of all connected channels for ( int j = 0; j < iNumClients; j++ ) { - // The second index of "vecvecdGains" does not represent + // The second index of "vecvecfGains" does not represent // the channel ID! Therefore we have to use // "vecChanIDsCurConChan" to query the IDs of the currently // connected channels - vecvecdGains[i][j] = vecChannels[iCurChanID].GetGain ( vecChanIDsCurConChan[j] ); + vecvecfGains[i][j] = vecChannels[iCurChanID].GetGain ( vecChanIDsCurConChan[j] ); // consider audio fade-in - vecvecdGains[i][j] *= vecChannels[vecChanIDsCurConChan[j]].GetFadeInGain(); + vecvecfGains[i][j] *= vecChannels[vecChanIDsCurConChan[j]].GetFadeInGain(); // panning - vecvecdPannings[i][j] = vecChannels[iCurChanID].GetPan ( vecChanIDsCurConChan[j] ); + vecvecfPannings[i][j] = vecChannels[iCurChanID].GetPan ( vecChanIDsCurConChan[j] ); } // flag for updating channel levels (if at least one clients wants it) @@ -909,7 +909,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // is false and the Get() function is not called at all. Therefore if the buffer is not needed // we do not spend any time in the function but go directly inside the if condition. if ( ( vecUseDoubleSysFraSizeConvBuf[i] == 0 ) || - !DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecsData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ) ) + !DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecfData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ) ) { // get current number of OPUS coded bytes const int iCeltNumCodedBytes = vecChannels[iCurChanID].GetNetwFrameSize(); @@ -946,11 +946,11 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // OPUS decode received data stream if ( CurOpusDecoder != nullptr ) { - iUnused = opus_custom_decode ( CurOpusDecoder, - pCurCodedData, - iCeltNumCodedBytes, - &vecvecsData[i][iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i]], - iClientFrameSizeSamples ); + iUnused = opus_custom_decode_float ( CurOpusDecoder, + pCurCodedData, + iCeltNumCodedBytes, + &vecvecfData[i][iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i]], + iClientFrameSizeSamples ); } } @@ -958,8 +958,8 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // and read out the small frame size immediately for further processing if ( vecUseDoubleSysFraSizeConvBuf[i] != 0 ) { - DoubleFrameSizeConvBufIn[iCurChanID].PutAll ( vecvecsData[i] ); - DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecsData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ); + DoubleFrameSizeConvBufIn[iCurChanID].PutAll ( vecvecfData[i] ); + DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecfData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ); } } } @@ -984,7 +984,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE { bSendChannelLevels = CreateLevelsForAllConChannels ( iNumClients, vecNumAudioChannels, - vecvecsData, + vecvecfData, vecChannelLevels ); } @@ -1011,7 +1011,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE vecChannels[iCurChanID].GetName(), vecChannels[iCurChanID].GetAddress(), vecNumAudioChannels[iChanCnt], - vecvecsData[iChanCnt] ); + vecvecfData[iChanCnt] ); } // processing without multithreading @@ -1077,15 +1077,15 @@ void CServer::MixEncodeTransmitDataBlocks ( const int iStartChanCnt, void CServer::MixEncodeTransmitData ( const int iChanCnt, const int iNumClients ) { - int i, j, k, iUnused; - CVector& vecdIntermProcBuf = vecvecsIntermediateProcBuf[iChanCnt]; // use reference for faster access - CVector& vecsSendData = vecvecsSendData[iChanCnt]; // use reference for faster access + int i, j, k, iUnused; + CVector& vecfIntermProcBuf = vecvecfIntermediateProcBuf[iChanCnt]; // use reference for faster access + CVector& vecfSendData = vecvecfSendData[iChanCnt]; // use reference for faster access // get actual ID of current channel const int iCurChanID = vecChanIDsCurConChan[iChanCnt]; // init intermediate processing vector with zeros since we mix all channels on that vector - vecdIntermProcBuf.Reset ( 0 ); + vecfIntermProcBuf.Reset ( 0 ); // distinguish between stereo and mono mode if ( vecNumAudioChannels[iChanCnt] == 1 ) @@ -1094,18 +1094,18 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( j = 0; j < iNumClients; j++ ) { // get a reference to the audio data and gain of the current client - const CVector& vecsData = vecvecsData[j]; - const double dGain = vecvecdGains[iChanCnt][j]; + const CVector& vecfData = vecvecfData[j]; + const float dGain = vecvecfGains[iChanCnt][j]; // if channel gain is 1, avoid multiplication for speed optimization - if ( dGain == static_cast ( 1.0 ) ) + if ( dGain == 1.0f ) { if ( vecNumAudioChannels[j] == 1 ) { // mono for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecdIntermProcBuf[i] += vecsData[i]; + vecfIntermProcBuf[i] += vecfData[i]; } } else @@ -1113,8 +1113,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo: apply stereo-to-mono attenuation for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { - vecdIntermProcBuf[i] += - ( static_cast ( vecsData[k] ) + vecsData[k + 1] ) / 2; + vecfIntermProcBuf[i] += ( vecfData[k] + vecfData[k + 1] ) / 2; } } } @@ -1125,7 +1124,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // mono for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecdIntermProcBuf[i] += vecsData[i] * dGain; + vecfIntermProcBuf[i] += vecfData[i] * dGain; } } else @@ -1133,17 +1132,21 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo: apply stereo-to-mono attenuation for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { - vecdIntermProcBuf[i] += dGain * - ( static_cast ( vecsData[k] ) + vecsData[k + 1] ) / 2; + vecfIntermProcBuf[i] += dGain * + ( static_cast ( vecfData[k] ) + vecfData[k + 1] ) / 2; } } } } - // convert from double to short with clipping + // When adding multiple sound sources together + // the resulting signal level may exceed the maximum + // audio range which is from -1.0f to 1.0f inclusivly. + // Clip the intermediate sound buffer to be within + // the expected range for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecsSendData[i] = Double2Short ( vecdIntermProcBuf[i] ); + vecfSendData[i] = clipFloat ( vecfIntermProcBuf[i] ); } } else @@ -1152,17 +1155,17 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( j = 0; j < iNumClients; j++ ) { // get a reference to the audio data and gain/pan of the current client - const CVector& vecsData = vecvecsData[j]; - const double dGain = vecvecdGains[iChanCnt][j]; - const double dPan = vecvecdPannings[iChanCnt][j]; + const CVector& vecfData = vecvecfData[j]; + const float dGain = vecvecfGains[iChanCnt][j]; + const float dPan = vecvecfPannings[iChanCnt][j]; // calculate combined gain/pan for each stereo channel where we define // the panning that center equals full gain for both channels - const double dGainL = MathUtils::GetLeftPan ( dPan, false ) * dGain; - const double dGainR = MathUtils::GetRightPan ( dPan, false ) * dGain; + const float dGainL = MathUtils::GetLeftPan ( dPan, false ) * dGain; + const float dGainR = MathUtils::GetRightPan ( dPan, false ) * dGain; // if channel gain is 1, avoid multiplication for speed optimization - if ( ( dGainL == static_cast ( 1.0 ) ) && ( dGainR == static_cast ( 1.0 ) ) ) + if ( ( dGainL == 1.0f ) && ( dGainR == 1.0f ) ) { if ( vecNumAudioChannels[j] == 1 ) { @@ -1170,8 +1173,8 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { // left/right channel - vecdIntermProcBuf[k] += vecsData[i]; - vecdIntermProcBuf[k + 1] += vecsData[i]; + vecfIntermProcBuf[k] += vecfData[i]; + vecfIntermProcBuf[k + 1] += vecfData[i]; } } else @@ -1179,7 +1182,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ ) { - vecdIntermProcBuf[i] += vecsData[i]; + vecfIntermProcBuf[i] += vecfData[i]; } } } @@ -1191,8 +1194,8 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { // left/right channel - vecdIntermProcBuf[k] += vecsData[i] * dGainL; - vecdIntermProcBuf[k + 1] += vecsData[i] * dGainR; + vecfIntermProcBuf[k] += vecfData[i] * dGainL; + vecfIntermProcBuf[k + 1] += vecfData[i] * dGainR; } } else @@ -1201,17 +1204,21 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i += 2 ) { // left/right channel - vecdIntermProcBuf[i] += vecsData[i] * dGainL; - vecdIntermProcBuf[i + 1] += vecsData[i + 1] * dGainR; + vecfIntermProcBuf[i] += vecfData[i] * dGainL; + vecfIntermProcBuf[i + 1] += vecfData[i + 1] * dGainR; } } } } - // convert from double to short with clipping + // When adding multiple sound sources together + // the resulting signal level may exceed the maximum + // audio range which is from -1.0f to 1.0f inclusivly. + // Clip the intermediate sound buffer to be within + // the expected range for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ ) { - vecsSendData[i] = Double2Short ( vecdIntermProcBuf[i] ); + vecfSendData[i] = clipFloat ( vecfIntermProcBuf[i] ); } } @@ -1255,12 +1262,12 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // is false and the Get() function is not called at all. Therefore if the buffer is not needed // we do not spend any time in the function but go directly inside the if condition. if ( ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] == 0 ) || - DoubleFrameSizeConvBufOut[iCurChanID].Put ( vecsSendData, SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) + DoubleFrameSizeConvBufOut[iCurChanID].Put ( vecfSendData, SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) { if ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] != 0 ) { // get the large frame from the conversion buffer - DoubleFrameSizeConvBufOut[iCurChanID].GetAll ( vecsSendData, DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); + DoubleFrameSizeConvBufOut[iCurChanID].GetAll ( vecfSendData, DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); } for ( int iB = 0; iB < vecNumFrameSizeConvBlocks[iChanCnt]; iB++ ) @@ -1272,11 +1279,11 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // optimization it would be better to set it only if the network frame size is changed opus_custom_encoder_ctl ( pCurOpusEncoder, OPUS_SET_BITRATE ( CalcBitRateBitsPerSecFromCodedBytes ( iCeltNumCodedBytes, iClientFrameSizeSamples ) ) ); - iUnused = opus_custom_encode ( pCurOpusEncoder, - &vecsSendData[iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], - iClientFrameSizeSamples, - &vecvecbyCodedData[iChanCnt][0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( pCurOpusEncoder, + &vecfSendData[iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], + iClientFrameSizeSamples, + &vecvecbyCodedData[iChanCnt][0], + iCeltNumCodedBytes ); } // send separate mix to current clients @@ -1665,10 +1672,10 @@ void CServer::customEvent ( QEvent* pEvent ) } /// @brief Compute frame peak level for each client -bool CServer::CreateLevelsForAllConChannels ( const int iNumClients, - const CVector& vecNumAudioChannels, - const CVector > vecvecsData, - CVector& vecLevelsOut ) +bool CServer::CreateLevelsForAllConChannels ( const int iNumClients, + const CVector& vecNumAudioChannels, + const CVector > vecvecfData, + CVector& vecLevelsOut ) { bool bLevelsWereUpdated = false; @@ -1681,13 +1688,13 @@ bool CServer::CreateLevelsForAllConChannels ( const int i for ( int j = 0; j < iNumClients; j++ ) { // update and get signal level for meter in dB for each channel - const double dCurSigLevelForMeterdB = vecChannels[vecChanIDsCurConChan[j]]. - UpdateAndGetLevelForMeterdB ( vecvecsData[j], + const float dCurSigLevelForMeterdB = vecChannels[vecChanIDsCurConChan[j]]. + UpdateAndGetLevelForMeterdB ( vecvecfData[j], iServerFrameSizeSamples, vecNumAudioChannels[j] > 1 ); // map value to integer for transmission via the protocol (4 bit available) - vecLevelsOut[j] = static_cast ( std::ceil ( dCurSigLevelForMeterdB ) ); + vecLevelsOut[j] = static_cast ( std::ceilf ( dCurSigLevelForMeterdB ) ); } } diff --git a/src/server.h b/src/server.h index e74b6e1bca..225bf7c4f5 100755 --- a/src/server.h +++ b/src/server.h @@ -327,7 +327,7 @@ class CServer : bool CreateLevelsForAllConChannels ( const int iNumClients, const CVector& vecNumAudioChannels, - const CVector > vecvecsData, + const CVector > vecvecfData, CVector& vecLevelsOut ); // do not use the vector class since CChannel does not have appropriate @@ -349,22 +349,22 @@ class CServer : OpusCustomDecoder* OpusDecoderMono[MAX_NUM_CHANNELS]; OpusCustomEncoder* OpusEncoderStereo[MAX_NUM_CHANNELS]; OpusCustomDecoder* OpusDecoderStereo[MAX_NUM_CHANNELS]; - CConvBuf DoubleFrameSizeConvBufIn[MAX_NUM_CHANNELS]; - CConvBuf DoubleFrameSizeConvBufOut[MAX_NUM_CHANNELS]; + CConvBuf DoubleFrameSizeConvBufIn[MAX_NUM_CHANNELS]; + CConvBuf DoubleFrameSizeConvBufOut[MAX_NUM_CHANNELS]; CVector vstrChatColors; CVector vecChanIDsCurConChan; - CVector > vecvecdGains; - CVector > vecvecdPannings; - CVector > vecvecsData; + CVector > vecvecfGains; + CVector > vecvecfPannings; + CVector > vecvecfData; CVector vecNumAudioChannels; CVector vecNumFrameSizeConvBlocks; CVector vecUseDoubleSysFraSizeConvBuf; CVector vecAudioComprType; - CVector > vecvecsSendData; - CVector > vecvecsIntermediateProcBuf; - CVector > vecvecbyCodedData; + CVector > vecvecfSendData; + CVector > vecvecfIntermediateProcBuf; + CVector> vecvecbyCodedData; // Channel levels CVector vecChannelLevels; @@ -411,7 +411,7 @@ class CServer : const QString stChName, const CHostAddress RecHostAddr, const int iNumAudChan, - const CVector vecsData ); + const CVector vecfData ); void CLVersionAndOSReceived ( CHostAddress InetAddr, COSUtil::EOpSystemType eOSType, @@ -509,4 +509,4 @@ public slots: void OnHandledSignal ( int sigNum ); }; -Q_DECLARE_METATYPE(CVector) +Q_DECLARE_METATYPE(CVector) diff --git a/src/soundbase.cpp b/src/soundbase.cpp index 51382dea2a..04ad4f9710 100755 --- a/src/soundbase.cpp +++ b/src/soundbase.cpp @@ -27,7 +27,7 @@ /* Implementation *************************************************************/ CSoundBase::CSoundBase ( const QString& strNewSystemDriverTechniqueName, - void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + void (*fpNewProcessCallback) ( CVector& vfData, void* pParg ), void* pParg, const int iNewCtrlMIDIChannel ) : fpProcessCallback ( fpNewProcessCallback ), @@ -249,7 +249,7 @@ printf ( "\n" ); { // we are assuming that the controller number is the same // as the audio fader index and the range is 0-127 - const int iFaderLevel = static_cast ( static_cast ( + const int iFaderLevel = static_cast ( static_cast ( qMin ( vMIDIPaketBytes[2], uint8_t ( 127 ) ) ) / 127 * AUD_MIX_FADER_MAX ); // Behringer X-TOUCH: offset of 0x46 diff --git a/src/soundbase.h b/src/soundbase.h index 8182a37239..dc92cb75d8 100755 --- a/src/soundbase.h +++ b/src/soundbase.h @@ -51,7 +51,7 @@ class CSoundBase : public QThread public: CSoundBase ( const QString& strNewSystemDriverTechniqueName, - void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + void (*fpNewProcessCallback) ( CVector& vfData, void* pParg ), void* pParg, const int iNewCtrlMIDIChannel ); @@ -121,13 +121,13 @@ class CSoundBase : public QThread } // function pointer to callback function - void (*fpProcessCallback) ( CVector& psData, void* arg ); + void (*fpProcessCallback) ( CVector& vfData, void* arg ); void* pProcessCallbackArg; // callback function call for derived classes - void ProcessCallback ( CVector& psData ) + void ProcessCallback ( CVector& vfData ) { - (*fpProcessCallback) ( psData, pProcessCallbackArg ); + (*fpProcessCallback) ( vfData, pProcessCallbackArg ); } void ParseMIDIMessage ( const CVector& vMIDIPaketBytes ); diff --git a/src/util.cpp b/src/util.cpp index 5b513fc94f..03c0686acd 100755 --- a/src/util.cpp +++ b/src/util.cpp @@ -28,7 +28,7 @@ /* Implementation *************************************************************/ // Input level meter implementation -------------------------------------------- -void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, +void CStereoSignalLevelMeter::Update ( const CVector& vecfAudio, const int iMonoBlockSizeSam, const bool bIsStereoIn ) { @@ -36,14 +36,14 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, // // Speed optimization: // - we only make use of the negative values and ignore the positive ones (since - // int16 has range {-32768, 32767}) -> we do not need to call the fabs() function + // float has the range {-1, 1}) -> we do not need to call the fabsf() function // - we only evaluate every third sample // // With these speed optimizations we might loose some information in // special cases but for the average music signals the following code // should give good results. - short sMinLOrMono = 0; - short sMinR = 0; + float fMinLOrMono = 0; + float fMinR = 0; if ( bIsStereoIn ) { @@ -51,14 +51,14 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, for ( int i = 0; i < 2 * iMonoBlockSizeSam; i += 6 ) // 2 * 3 = 6 -> stereo { // left (or mono) and right channel - sMinLOrMono = std::min ( sMinLOrMono, vecsAudio[i] ); - sMinR = std::min ( sMinR, vecsAudio[i + 1] ); + fMinLOrMono = fminf ( fMinLOrMono, vecfAudio[i] ); + fMinR = fminf ( fMinR, vecfAudio[i + 1] ); } // in case of mono out use minimum of both channels if ( !bIsStereoOut ) { - sMinLOrMono = std::min ( sMinLOrMono, sMinR ); + fMinLOrMono = fminf ( fMinLOrMono, fMinR ); } } else @@ -66,66 +66,66 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, // mono in for ( int i = 0; i < iMonoBlockSizeSam; i += 3 ) { - sMinLOrMono = std::min ( sMinLOrMono, vecsAudio[i] ); + fMinLOrMono = fminf ( fMinLOrMono, vecfAudio[i] ); } } // apply smoothing, if in stereo out mode, do this for two channels - dCurLevelLOrMono = UpdateCurLevel ( dCurLevelLOrMono, -sMinLOrMono ); + fCurLevelLOrMono = UpdateCurLevel ( fCurLevelLOrMono, -fMinLOrMono ); if ( bIsStereoOut ) { - dCurLevelR = UpdateCurLevel ( dCurLevelR, -sMinR ); + fCurLevelR = UpdateCurLevel ( fCurLevelR, -fMinR ); } } -double CStereoSignalLevelMeter::UpdateCurLevel ( double dCurLevel, - const double dMax ) +float CStereoSignalLevelMeter::UpdateCurLevel ( float fCurLevel, + const float fMax ) { // decrease max with time - if ( dCurLevel >= METER_FLY_BACK ) + if ( fCurLevel >= METER_FLY_BACK ) { - dCurLevel *= dSmoothingFactor; + fCurLevel *= fSmoothingFactor; } else { - dCurLevel = 0; + fCurLevel = 0; } // update current level -> only use maximum - if ( dMax > dCurLevel ) + if ( fMax > fCurLevel ) { - return dMax; + return fMax; } else { - return dCurLevel; + return fCurLevel; } } -double CStereoSignalLevelMeter::CalcLogResultForMeter ( const double& dLinearLevel ) +float CStereoSignalLevelMeter::CalcLogResultForMeter ( const float& fLinearLevel ) { - const double dNormLevel = dLinearLevel / _MAXSHORT; + const float fNormLevel = fLinearLevel; // logarithmic measure - double dLevelForMeterdB = -100000.0; // large negative value + float fLevelForMeterdB = -100000.0; // large negative value - if ( dNormLevel > 0 ) + if ( fNormLevel > 0 ) { - dLevelForMeterdB = 20.0 * log10 ( dNormLevel ); + fLevelForMeterdB = 20.0f * log10f ( fNormLevel ); } // map to signal level meter (linear transformation of the input // level range to the level meter range) - dLevelForMeterdB -= LOW_BOUND_SIG_METER; - dLevelForMeterdB *= NUM_STEPS_LED_BAR / ( UPPER_BOUND_SIG_METER - LOW_BOUND_SIG_METER ); + fLevelForMeterdB -= LOW_BOUND_SIG_METER; + fLevelForMeterdB *= NUM_STEPS_LED_BAR / ( UPPER_BOUND_SIG_METER - LOW_BOUND_SIG_METER ); - if ( dLevelForMeterdB < 0 ) + if ( fLevelForMeterdB < 0 ) { - dLevelForMeterdB = 0; + fLevelForMeterdB = 0; } - return dLevelForMeterdB; + return fLevelForMeterdB; } @@ -194,21 +194,21 @@ uint32_t CCRC::GetCRC() void CAudioReverb::Init ( const EAudChanConf eNAudioChannelConf, const int iNStereoBlockSizeSam, const int iSampleRate, - const double rT60 ) + const float rT60 ) { // store parameters eAudioChannelConf = eNAudioChannelConf; iStereoBlockSizeSam = iNStereoBlockSizeSam; // delay lengths for 44100 Hz sample rate - int lengths[9] = { 1116, 1356, 1422, 1617, 225, 341, 441, 211, 179 }; - const double scaler = static_cast ( iSampleRate ) / 44100.0; + int lengths[9] = { 1116, 1356, 1422, 1617, 225, 341, 441, 211, 179 }; + const float scaler = static_cast ( iSampleRate ) / 44100.0; if ( scaler != 1.0 ) { for ( int i = 0; i < 9; i++ ) { - int delay = static_cast ( floor ( scaler * lengths[i] ) ); + int delay = static_cast ( floorf ( scaler * lengths[i] ) ); if ( ( delay & 1 ) == 0 ) { @@ -255,7 +255,9 @@ bool CAudioReverb::isPrime ( const int number ) if ( number & 1 ) { - for ( int i = 3; i < static_cast ( sqrt ( static_cast ( number ) ) ) + 1; i += 2 ) + const int max = static_cast ( sqrtf ( static_cast ( number ) ) ) + 1; + + for ( int i = 3; i < max; i += 2 ) { if ( ( number % i ) == 0 ) { @@ -289,37 +291,37 @@ void CAudioReverb::Clear() outLeftDelay.Reset ( 0 ); } -void CAudioReverb::setT60 ( const double rT60, - const int iSampleRate ) +void CAudioReverb::setT60 ( const float rT60, + const int iSampleRate ) { // set the reverberation T60 decay time for ( int i = 0; i < 4; i++ ) { - combCoefficient[i] = pow ( 10.0, static_cast ( -3.0 * + combCoefficient[i] = powf ( 10.0f, static_cast ( -3.0f * combDelays[i].Size() / ( rT60 * iSampleRate ) ) ); } } -void CAudioReverb::COnePole::setPole ( const double dPole ) +void CAudioReverb::COnePole::setPole ( const float dPole ) { // calculate IIR filter coefficients based on the pole value dA = -dPole; - dB = 1.0 - dPole; + dB = 1.0f - dPole; } -double CAudioReverb::COnePole::Calc ( const double dIn ) +float CAudioReverb::COnePole::Calc ( const float fIn ) { // calculate IIR filter - dLastSample = dB * dIn - dA * dLastSample; + dLastSample = dB * fIn - dA * dLastSample; return dLastSample; } -void CAudioReverb::Process ( CVector& vecsStereoInOut, - const bool bReverbOnLeftChan, - const double dAttenuation ) +void CAudioReverb::Process ( CVector& vecfStereoInOut, + const bool bReverbOnLeftChan, + const float fAttenuation ) { - double dMixedInput, temp, temp0, temp1, temp2; + float fMixedInput, temp, temp0, temp1, temp2; for ( int i = 0; i < iStereoBlockSizeSam; i += 2 ) { @@ -327,23 +329,23 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, // shall be input for the right channel) if ( eAudioChannelConf == CC_STEREO ) { - dMixedInput = 0.5 * ( vecsStereoInOut[i] + vecsStereoInOut[i + 1] ); + fMixedInput = 0.5f * ( vecfStereoInOut[i] + vecfStereoInOut[i + 1] ); } else { if ( bReverbOnLeftChan ) { - dMixedInput = vecsStereoInOut[i]; + fMixedInput = vecfStereoInOut[i]; } else { - dMixedInput = vecsStereoInOut[i + 1]; + fMixedInput = vecfStereoInOut[i + 1]; } } temp = allpassDelays[0].Get(); temp0 = allpassCoefficient * temp; - temp0 += dMixedInput; + temp0 += fMixedInput; allpassDelays[0].Add ( temp0 ); temp0 = - ( allpassCoefficient * temp0 ) + temp; @@ -359,17 +361,17 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, allpassDelays[2].Add ( temp2 ); temp2 = - ( allpassCoefficient * temp2 ) + temp; - const double temp3 = temp2 + combFilters[0].Calc ( combCoefficient[0] * combDelays[0].Get() ); - const double temp4 = temp2 + combFilters[1].Calc ( combCoefficient[1] * combDelays[1].Get() ); - const double temp5 = temp2 + combFilters[2].Calc ( combCoefficient[2] * combDelays[2].Get() ); - const double temp6 = temp2 + combFilters[3].Calc ( combCoefficient[3] * combDelays[3].Get() ); + const float temp3 = temp2 + combFilters[0].Calc ( combCoefficient[0] * combDelays[0].Get() ); + const float temp4 = temp2 + combFilters[1].Calc ( combCoefficient[1] * combDelays[1].Get() ); + const float temp5 = temp2 + combFilters[2].Calc ( combCoefficient[2] * combDelays[2].Get() ); + const float temp6 = temp2 + combFilters[3].Calc ( combCoefficient[3] * combDelays[3].Get() ); combDelays[0].Add ( temp3 ); combDelays[1].Add ( temp4 ); combDelays[2].Add ( temp5 ); combDelays[3].Add ( temp6 ); - const double filtout = temp3 + temp4 + temp5 + temp6; + const float filtout = temp3 + temp4 + temp5 + temp6; outLeftDelay.Add ( filtout ); outRightDelay.Add ( filtout ); @@ -378,16 +380,16 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, // reverberation effect on both channels) if ( ( eAudioChannelConf == CC_STEREO ) || bReverbOnLeftChan ) { - vecsStereoInOut[i] = Double2Short ( - ( 1.0 - dAttenuation ) * vecsStereoInOut[i] + - 0.5 * dAttenuation * outLeftDelay.Get() ); + vecfStereoInOut[i] = clipFloat ( + ( 1.0f - fAttenuation ) * vecfStereoInOut[i] + + 0.5f * fAttenuation * outLeftDelay.Get() ); } if ( ( eAudioChannelConf == CC_STEREO ) || !bReverbOnLeftChan ) { - vecsStereoInOut[i + 1] = Double2Short ( - ( 1.0 - dAttenuation ) * vecsStereoInOut[i + 1] + - 0.5 * dAttenuation * outRightDelay.Get() ); + vecfStereoInOut[i + 1] = clipFloat ( + ( 1.0f - fAttenuation ) * vecfStereoInOut[i + 1] + + 0.5f * fAttenuation * outRightDelay.Get() ); } } } diff --git a/src/util.h b/src/util.h index f90264ffb8..4f98be1a0f 100755 --- a/src/util.h +++ b/src/util.h @@ -74,22 +74,10 @@ class CClient; // forward declaration of CClient /* Global functions ***********************************************************/ -// converting double to short -inline short Double2Short ( const double dInput ) +// range check audio samples +static inline float clipFloat ( const float fInput ) { - // lower bound - if ( dInput < _MINSHORT ) - { - return _MINSHORT; - } - - // upper bound - if ( dInput > _MAXSHORT ) - { - return _MAXSHORT; - } - - return static_cast ( dInput ); + return qBound ( -1.0f, fInput, 1.0f ); } // debug error handling @@ -739,32 +727,32 @@ class CStereoSignalLevelMeter // TODO Calculate smoothing factor from sample rate and frame size (64 or 128 samples frame size). // But tests with 128 and 64 samples frame size have shown that the meter fly back // is ok for both numbers of samples frame size with a factor of 0.97. - CStereoSignalLevelMeter ( const bool bNIsStereoOut = true, - const double dNSmoothingFactor = 0.97 ) : - dSmoothingFactor ( dNSmoothingFactor ), bIsStereoOut ( bNIsStereoOut ) { Reset(); } + CStereoSignalLevelMeter ( const bool bNIsStereoOut = true, + const float fNSmoothingFactor = 0.97f ) : + fSmoothingFactor ( fNSmoothingFactor ), bIsStereoOut ( bNIsStereoOut ) { Reset(); } - void Update ( const CVector& vecsAudio, + void Update ( const CVector& vecfAudio, const int iInSize, const bool bIsStereoIn ); - double GetLevelForMeterdBLeftOrMono() { return CalcLogResultForMeter ( dCurLevelLOrMono ); } - double GetLevelForMeterdBRight() { return CalcLogResultForMeter ( dCurLevelR ); } - static double CalcLogResultForMeter ( const double& dLinearLevel ); + float GetLevelForMeterdBLeftOrMono() { return CalcLogResultForMeter ( fCurLevelLOrMono ); } + float GetLevelForMeterdBRight() { return CalcLogResultForMeter ( fCurLevelR ); } + static float CalcLogResultForMeter ( const float& fLinearLevel ); void Reset() { - dCurLevelLOrMono = 0.0; - dCurLevelR = 0.0; + fCurLevelLOrMono = 0.0f; + fCurLevelR = 0.0f; } protected: - double UpdateCurLevel ( double dCurLevel, - const double dMax ); + float UpdateCurLevel ( float fCurLevel, + const float fMax ); - double dCurLevelLOrMono; - double dCurLevelR; - double dSmoothingFactor; - bool bIsStereoOut; + float fCurLevelLOrMono; + float fCurLevelR; + float fSmoothingFactor; + bool bIsStereoOut; }; @@ -1171,35 +1159,35 @@ class CAudioReverb void Init ( const EAudChanConf eNAudioChannelConf, const int iNStereoBlockSizeSam, const int iSampleRate, - const double rT60 = 1.1 ); + const float rT60 = 1.1f ); void Clear(); - void Process ( CVector& vecsStereoInOut, - const bool bReverbOnLeftChan, - const double dAttenuation ); + void Process ( CVector& vecfStereoInOut, + const bool bReverbOnLeftChan, + const float fAttenuation ); protected: - void setT60 ( const double rT60, const int iSampleRate ); + void setT60 ( const float rT60, const int iSampleRate ); bool isPrime ( const int number ); class COnePole { public: COnePole() : dA ( 0 ), dB ( 0 ) { Reset(); } - void setPole ( const double dPole ); - double Calc ( const double dIn ); + void setPole ( const float dPole ); + float Calc ( const float dIn ); void Reset() { dLastSample = 0; } protected: - double dA; - double dB; - double dLastSample; + float dA; + float dB; + float dLastSample; }; EAudChanConf eAudioChannelConf; int iStereoBlockSizeSam; - CFIFO allpassDelays[3]; - CFIFO combDelays[4]; + CFIFO allpassDelays[3]; + CFIFO combDelays[4]; COnePole combFilters[4]; CFIFO outLeftDelay; CFIFO outRightDelay; diff --git a/src/vstmain.cpp b/src/vstmain.cpp index 6044c909f5..0fd55aff4b 100755 --- a/src/vstmain.cpp +++ b/src/vstmain.cpp @@ -101,8 +101,8 @@ void CLlconVST::processReplacing ( float** pvIn, // copy input data for ( i = 0, j = 0; i < iNumSamples; i++, j += 2 ) { - Client.GetSound()->vecsTmpAudioSndCrdStereo[j] = pfIn0[i]; - Client.GetSound()->vecsTmpAudioSndCrdStereo[j + 1] = pfIn1[i]; + Client.GetSound()->vecfTmpAudioSndCrdStereo[j] = pfIn0[i]; + Client.GetSound()->vecfTmpAudioSndCrdStereo[j + 1] = pfIn1[i]; } // call processing callback function @@ -111,7 +111,7 @@ void CLlconVST::processReplacing ( float** pvIn, // copy output data for ( i = 0, j = 0; i < iNumSamples; i++, j += 2 ) { - pfOut0[i] = Client.GetSound()->vecsTmpAudioSndCrdStereo[j]; - pfOut1[i] = Client.GetSound()->vecsTmpAudioSndCrdStereo[j + 1]; + pfOut0[i] = Client.GetSound()->vecfTmpAudioSndCrdStereo[j]; + pfOut1[i] = Client.GetSound()->vecfTmpAudioSndCrdStereo[j + 1]; } } diff --git a/src/vstsound.h b/src/vstsound.h index 04ea94a675..7709cf3e92 100755 --- a/src/vstsound.h +++ b/src/vstsound.h @@ -34,26 +34,26 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), void* arg ) : + CSound ( void (*fpNewCallback) ( CVector& vfData, void* arg ), void* arg ) : CSoundBase ( true, fpNewCallback, arg ), iVSTMonoBufferSize ( 0 ) {} // special VST functions void SetMonoBufferSize ( const int iNVBS ) { iVSTMonoBufferSize = iNVBS; } void VSTProcessCallback() { - CSoundBase::ProcessCallback ( vecsTmpAudioSndCrdStereo ); + CSoundBase::ProcessCallback ( vecfTmpAudioSndCrdStereo ); } virtual int Init ( const int ) { // init base class CSoundBase::Init ( iVSTMonoBufferSize ); - vecsTmpAudioSndCrdStereo.Init ( 2 * iVSTMonoBufferSize /* stereo */); + vecfTmpAudioSndCrdStereo.Init ( 2 * iVSTMonoBufferSize /* stereo */); return iVSTMonoBufferSize; } // this vector must be accessible from the outside (quick hack solution) - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; protected: int iVSTMonoBufferSize; diff --git a/windows/sound.cpp b/windows/sound.cpp index f660afd63f..efbf43f8b9 100755 --- a/windows/sound.cpp +++ b/windows/sound.cpp @@ -149,9 +149,6 @@ QString CSound::CheckDeviceCapabilities() lNumOutChan = MAX_NUM_IN_OUT_CHANNELS; } - // query channel infos for all available input channels - bool bInputChMixingSupported = true; - for ( int i = 0; i < lNumInChan; i++ ) { // setup for input channels @@ -174,11 +171,6 @@ QString CSound::CheckDeviceCapabilities() // store the name of the channel and check if channel mixing is supported channelInputName[i] = channelInfosInput[i].name; - - if ( !CheckSampleTypeSupportedForCHMixing ( channelInfosInput[i].type ) ) - { - bInputChMixingSupported = false; - } } // query channel infos for all available output channels @@ -204,7 +196,7 @@ QString CSound::CheckDeviceCapabilities() } // special case with 4 input channels: support adding channels - if ( ( lNumInChan == 4 ) && bInputChMixingSupported ) + if ( lNumInChan == 4 ) { // add four mixed channels (i.e. 4 normal, 4 mixed channels) lNumInChanPlusAddChan = 8; @@ -404,7 +396,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) ASIOSetSampleRate ( SYSTEM_SAMPLE_RATE_HZ ); // create memory for intermediate audio buffer - vecsMultChanAudioSndCrd.Init ( iASIOBufferSizeStereo ); + vecfMultChanAudioSndCrd.Init ( iASIOBufferSizeStereo ); // create and activate ASIO buffers (buffer size in samples), // dispose old buffers (if any) @@ -483,7 +475,7 @@ void CSound::Stop() } } -CSound::CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -588,515 +580,343 @@ bool CSound::CheckSampleTypeSupported ( const ASIOSampleType SamType ) ( SamType == ASIOSTInt32MSB24 ) ); } -bool CSound::CheckSampleTypeSupportedForCHMixing ( const ASIOSampleType SamType ) -{ - // check for supported sample types for audio channel mixing (see bufferSwitch) - return ( ( SamType == ASIOSTInt16LSB ) || - ( SamType == ASIOSTInt24LSB ) || - ( SamType == ASIOSTInt32LSB ) ); -} +static constexpr double FACTOR16 = 32767.0; +static constexpr double FACTOR16_INV = 1.0 / 32767.0; -void CSound::bufferSwitch ( long index, ASIOBool ) -{ - int iCurSample; +struct sample16LSB { + int16_t data[1]; + float get() const { + return (data[0] * FACTOR16_INV); + } + void put(const float value) { + data[0] = (int16_t)(value * FACTOR16); + } +}; + +struct sample16MSB { + uint8_t data[2]; + float get() const { + const int16_t temp = data[1] | (data[0] << 8); + return (temp * FACTOR16_INV); + } + void put(const float value) { + const int16_t temp = (int16_t) (value * FACTOR16); + data[0] = (uint8_t)(temp >> 8); + data[1] = (uint8_t)(temp); + } +}; + +static constexpr double FACTOR24 = 2147483647.0; +static constexpr double FACTOR24_INV = 1.0 / 2147483647.0; + +struct sample24LSB { + uint8_t data[3]; + float get() const { + const int32_t temp = (data[0] << 8) | (data[1] << 16) | (data[2] << 24); + return (temp * FACTOR24_INV); + } + void put(const float value) { + const int32_t temp = (int32_t) (value * FACTOR24); + data[0] = (uint8_t)(temp >> 8); + data[1] = (uint8_t)(temp >> 16); + data[2] = (uint8_t)(temp >> 24); + } +}; + +struct sample24MSB { + uint8_t data[3]; + float get() const { + const int32_t temp = (data[2] << 8) | (data[1] << 16) | (data[0] << 24); + return (temp * FACTOR24_INV); + } + void put(const float value) { + const int32_t temp = (int32_t) (value * FACTOR24); + data[0] = (uint8_t)(temp >> 24); + data[1] = (uint8_t)(temp >> 16); + data[2] = (uint8_t)(temp >> 8); + } +}; + +static constexpr double FACTOR24 = 2147483647.0; +static constexpr double FACTOR24_INV = 1.0 / 2147483647.0; + +struct sample32LSB { + int32_t data[1]; + float get() const { + return (data[0] * FACTOR32_INV); + } + void put(const float value) { + data[0] = (int32_t) (value * FACTOR32); + } +}; + +struct sample32MSB { + uint8_t data[4]; + float get() const { + const int32_t temp = (data[3] << 0) | (data[2] << 8) | + (data[1] << 16) | (data[0] << 24); + return (temp * FACTOR32_INV); + } + void put(const float value) { + const int32_t temp = (int32_t) (value * FACTOR32); + data[0] = (uint8_t)(temp >> 24); + data[1] = (uint8_t)(temp >> 16); + data[2] = (uint8_t)(temp >> 8); + data[3] = (uint8_t)(temp >> 0); + } +}; - // get references to class members - int& iASIOBufferSizeMono = pSound->iASIOBufferSizeMono; - CVector& vecsMultChanAudioSndCrd = pSound->vecsMultChanAudioSndCrd; +union sampleFloat32Data { + uint8_t data[4]; + float value; +}; +struct sampleFloat32LSB { + float data[1]; + float get() const { + return (data[0]); + } + void put(const float value) { + data[0] = value; + } +}; + +struct sampleFloat32MSB { + uint8_t data[4]; + float get() const { + sampleFloat32Data temp; + temp.data[0] = data[3]; + temp.data[1] = data[2]; + temp.data[2] = data[1]; + temp.data[3] = data[0]; + return (temp.value); + } + void put(const float value) { + sampleFloat32Data temp; + temp.value = value; + data[0] = temp.data[3]; + data[1] = temp.data[2]; + data[2] = temp.data[1]; + data[3] = temp.data[0]; + } +}; + +union sampleFloat64Data { + uint8_t data[8]; + double value; +}; + +struct sampleFloat64LSB { + double data[1]; + float get() const { + return (data[0]); + } + void put(const float value) { + data[0] = value; + } +}; + +struct sampleFloat64MSB { + uint8_t data[8]; + float get() const { + sampleFloat64Data temp; + temp.data[0] = data[7]; + temp.data[1] = data[6]; + temp.data[2] = data[5]; + temp.data[3] = data[4]; + temp.data[4] = data[3]; + temp.data[5] = data[2]; + temp.data[6] = data[1]; + temp.data[7] = data[0]; + return (temp.value); + } + void put(const float value) { + sampleFloat64Data temp; + temp.value = value; + data[0] = temp.data[7]; + data[1] = temp.data[6]; + data[2] = temp.data[5]; + data[3] = temp.data[4]; + data[4] = temp.data[3]; + data[5] = temp.data[2]; + data[6] = temp.data[1]; + data[7] = temp.data[0]; + } +}; + +void CSound::bufferSwitch ( long index, ASIOBool ) +{ // perform the processing for input and output pSound->ASIOMutex.lock(); // get mutex lock { // CAPTURE ------------------------------------------------------------- for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ ) { - int iSelCH, iSelAddCH; + int iSelAddCH; + int iSelCH; - GetSelCHAndAddCH ( pSound->vSelectedInputChannels[i], pSound->lNumInChan, - iSelCH, iSelAddCH ); + GetSelCHAndAddCH ( pSound->vSelectedInputChannels[i], pSound->lNumInChan, + iSelCH, iSelAddCH ); // copy new captured block in thread transfer buffer (copy // mono data interleaved in stereo buffer) switch ( pSound->channelInfosInput[iSelCH].type ) { case ASIOSTInt16LSB: - { - // no type conversion required, just copy operation - int16_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = pASIOBuf[iCurSample]; - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - int16_t* pASIOBufAdd = static_cast ( pSound->bufferInfos[iSelAddCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Double2Short ( (double) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (double) pASIOBufAdd[iCurSample] ); - } - } + pSound->bufferSwitchImport ( 1, index, i ); break; - } - - case ASIOSTInt24LSB: - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 ); - iCurSam >>= 8; - vecsMultChanAudioSndCrd[2 * iCurSample + i] = static_cast ( iCurSam ); - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelAddCH].buffers[index] ) + iCurSample * 3, 3 ); - iCurSam >>= 8; - - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Double2Short ( (double) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (double) static_cast ( iCurSam ) ); - } - } + case ASIOSTInt16MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB: - { - int32_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( pASIOBuf[iCurSample] >> 16 ); - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - int32_t* pASIOBufAdd = static_cast ( pSound->bufferInfos[iSelAddCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Double2Short ( (double) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (double) static_cast ( pASIOBufAdd[iCurSample] >> 16 ) ); - } - } + case ASIOSTInt24LSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - } - case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT ); - } + case ASIOSTInt24MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT ); - } + case ASIOSTInt32LSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFF ); - } + case ASIOSTInt32MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0x3FFFF ) >> 2 ); - } + case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFF ) >> 4 ); - } + case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFFF ) >> 8 ); - } + case ASIOSTFloat64LSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt16MSB: -// NOT YET TESTED - // flip bits - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Flip16Bits ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] ) )[iCurSample] ); - } + case ASIOSTFloat64MSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt24MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // because the bits are flipped, we do not have to perform the - // shift by 8 bits - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 ); - - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Flip16Bits ( static_cast ( iCurSam ) ); - } + case ASIOSTInt32LSB16: + pSound->bufferSwitchImport ( 1<<16, index, i ); break; - case ASIOSTInt32MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // flip bits and convert to 16 bit - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) >> 16 ); - } + case ASIOSTInt32MSB16: + pSound->bufferSwitchImport ( 1<<16, index, i ); break; - case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT ); - } + case ASIOSTInt32LSB18: + pSound->bufferSwitchImport ( 1<<14, index, i ); break; - case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - Flip64Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT ); - } + case ASIOSTInt32MSB18: + pSound->bufferSwitchImport ( 1<<14, index, i ); break; - case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFF ); - } + case ASIOSTInt32LSB20: + pSound->bufferSwitchImport ( 1<<12, index, i ); break; - case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0x3FFFF ) >> 2 ); - } + case ASIOSTInt32MSB20: + pSound->bufferSwitchImport ( 1<<12, index, i ); break; - case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFF ) >> 4 ); - } + case ASIOSTInt32LSB24: + pSound->bufferSwitchImport ( 1<<8, index, i ); break; - case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFFF ) >> 8 ); - } + case ASIOSTInt32MSB24: + pSound->bufferSwitchImport ( 1<<8, index, i ); break; } } // call processing callback function - pSound->ProcessCallback ( vecsMultChanAudioSndCrd ); + pSound->ProcessCallback ( pSound->vecfMultChanAudioSndCrd ); // PLAYBACK ------------------------------------------------------------ for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ ) { - const int iSelCH = pSound->lNumInChan + pSound->vSelectedOutputChannels[i]; - // copy data from sound card in output buffer (copy // interleaved stereo data in mono sound card buffer) switch ( pSound->channelInfosOutput[pSound->vSelectedOutputChannels[i]].type ) { case ASIOSTInt16LSB: - { - // no type conversion required, just copy operation - int16_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); + pSound->bufferSwitchExport ( 1, index, i ); + break; - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - pASIOBuf[iCurSample] = vecsMultChanAudioSndCrd[2 * iCurSample + i]; - } + case ASIOSTInt16MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - } case ASIOSTInt24LSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert current sample in 24 bit format - int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - iCurSam <<= 8; + pSound->bufferSwitchExport ( 1, index, i ); + break; - memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 ); - } + case ASIOSTInt24MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; case ASIOSTInt32LSB: - { - int32_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); + pSound->bufferSwitchExport ( 1, index, i ); + break; - pASIOBuf[iCurSample] = ( iCurSam << 16 ); - } + case ASIOSTInt32MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - } case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const float fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - fCurSam / _MAXSHORT; - } + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const double fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - fCurSam / _MAXSHORT; - } + case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - iCurSam; - } + case ASIOSTFloat64LSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 2 ); - } + case ASIOSTFloat64MSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 4 ); - } + case ASIOSTInt32LSB16: + pSound->bufferSwitchExport ( 1<<16, index, i ); break; - case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 8 ); - } + case ASIOSTInt32MSB16: + pSound->bufferSwitchExport ( 1<<16, index, i ); break; - case ASIOSTInt16MSB: -// NOT YET TESTED - // flip bits - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - ( (int16_t*) pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip16Bits ( vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - } + case ASIOSTInt32LSB18: + pSound->bufferSwitchExport ( 1<<14, index, i ); break; - case ASIOSTInt24MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // because the bits are flipped, we do not have to perform the - // shift by 8 bits - int32_t iCurSam = static_cast ( Flip16Bits ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ) ); - - memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 ); - } + case ASIOSTInt32MSB18: + pSound->bufferSwitchExport ( 1<<14, index, i ); break; - case ASIOSTInt32MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit and flip bits - int iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 16 ); - } + case ASIOSTInt32LSB20: + pSound->bufferSwitchExport ( 1<<12, index, i ); break; - case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const float fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - static_cast ( Flip32Bits ( static_cast ( - fCurSam / _MAXSHORT ) ) ); - } + case ASIOSTInt32MSB20: + pSound->bufferSwitchExport ( 1<<12, index, i ); break; - case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const double fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - static_cast ( Flip64Bits ( static_cast ( - fCurSam / _MAXSHORT ) ) ); - } + case ASIOSTInt32LSB24: + pSound->bufferSwitchExport ( 1<<8, index, i ); break; - case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam ); - } - break; - - case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 2 ); - } - break; - - case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 4 ); - } - break; - - case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 8 ); - } + case ASIOSTInt32MSB24: + pSound->bufferSwitchExport ( 1<<8, index, i ); break; } } @@ -1139,57 +959,3 @@ long CSound::asioMessages ( long selector, return ret; } - -int16_t CSound::Flip16Bits ( const int16_t iIn ) -{ - uint16_t iMask = ( 1 << 15 ); - int16_t iOut = 0; - - for ( unsigned int i = 0; i < 16; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} - -int32_t CSound::Flip32Bits ( const int32_t iIn ) -{ - uint32_t iMask = ( static_cast ( 1 ) << 31 ); - int32_t iOut = 0; - - for ( unsigned int i = 0; i < 32; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} - -int64_t CSound::Flip64Bits ( const int64_t iIn ) -{ - uint64_t iMask = ( static_cast ( 1 ) << 63 ); - int64_t iOut = 0; - - for ( unsigned int i = 0; i < 64; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} diff --git a/windows/sound.h b/windows/sound.h index a73e99199e..0133fae101 100755 --- a/windows/sound.h +++ b/windows/sound.h @@ -46,7 +46,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -84,7 +84,6 @@ class CSound : public CSoundBase int GetActualBufferSize ( const int iDesiredBufferSizeMono ); QString CheckDeviceCapabilities(); bool CheckSampleTypeSupported ( const ASIOSampleType SamType ); - bool CheckSampleTypeSupportedForCHMixing ( const ASIOSampleType SamType ); void ResetChannelMapping(); int iASIOBufferSizeMono; @@ -97,15 +96,10 @@ class CSound : public CSoundBase CVector vSelectedInputChannels; CVector vSelectedOutputChannels; - CVector vecsMultChanAudioSndCrd; + CVector vecfMultChanAudioSndCrd; QMutex ASIOMutex; - // utility functions - static int16_t Flip16Bits ( const int16_t iIn ); - static int32_t Flip32Bits ( const int32_t iIn ); - static int64_t Flip64Bits ( const int64_t iIn ); - // audio hardware buffer info struct sHWBufferInfo { @@ -124,6 +118,49 @@ class CSound : public CSoundBase bool bASIOPostOutput; ASIOCallbacks asioCallbacks; + // templates + template void bufferSwitchImport ( const int iGain, + const long index, const int iCH) + { + int iSelAddCH; + int iSelCH; + + GetSelCHAndAddCH ( vSelectedInputChannels[iCH], lNumInChan, iSelCH, iSelAddCH ); + + const T *pASIOBuf = static_cast ( bufferInfos[iSelCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + vecfMultChanAudioSndCrd[2 * iCurSample + iCH] = pASIOBuf[iCurSample].get() * iGain; + } + + if ( iSelAddCH >= 0 ) + { + // mix input channels case + const T *pASIOBufAdd = + static_cast ( bufferInfos[iSelAddCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + vecfMultChanAudioSndCrd[2 * iCurSample + iCH] = + clipFloat ( vecfMultChanAudioSndCrd[2 * iCurSample + iCH] + + pASIOBufAdd[iCurSample].get() * iGain ); + } + } + } + + template void bufferSwitchExport(const int iGain, + const long index, const int iCH) + { + const int iSelCH = lNumInChan + vSelectedOutputChannels[iCH]; + T *pASIOBuf = static_cast ( bufferInfos[iSelCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + pASIOBuf[iCurSample].put( vecfMultChanAudioSndCrd[2 * iCurSample + iCH] / iGain ); + } + } + // callbacks static void bufferSwitch ( long index, ASIOBool processNow ); static ASIOTime* bufferSwitchTimeInfo ( ASIOTime* timeInfo, long index, ASIOBool processNow );