diff --git a/ChangeLog b/ChangeLog index bfb13d9291..98448e0097 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,8 @@ 3.5.12git <- NOTE: the release version number will be 3.5.13 +- switched all audio sample processing to use floats, coded by hselasky (#544) + - bug fix: reduced server list is displayed instead of the normal list (#657) diff --git a/android/sound.cpp b/android/sound.cpp index bb7105df28..e6588b7bc9 100644 --- a/android/sound.cpp +++ b/android/sound.cpp @@ -27,7 +27,7 @@ /* Implementation *************************************************************/ -CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -193,7 +193,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) iOpenSLBufferSizeStereo = 2 * iOpenSLBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iOpenSLBufferSizeStereo ); + vecfTmpAudioSndCrdStereo.Init ( iOpenSLBufferSizeStereo ); // TEST #if ( SYSTEM_SAMPLE_RATE_HZ != 48000 ) @@ -205,7 +205,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) // 48 kHz / 16 kHz = factor 3 (note that the buffer size mono might // be divisible by three, therefore we will get a lot of drop outs) iModifiedInBufSize = iOpenSLBufferSizeMono / 3; -vecsTmpAudioInSndCrd.Init ( iModifiedInBufSize ); +vecfTmpAudioInSndCrd.Init ( iModifiedInBufSize ); return iOpenSLBufferSizeMono; } @@ -238,19 +238,17 @@ oboe::DataCallbackResult CSound::onAudioReady ( oboe::AudioStream* oboeStream, v memset ( audioData, 0, sizeof(float) * numFrames * oboeStream->getChannelCount() ); // Only copy data if we have data to copy, otherwise fill with silence - if ( !pSound->vecsTmpAudioSndCrdStereo.empty() ) + if ( !pSound->vecfTmpAudioSndCrdStereo.empty() ) { for ( int frmNum = 0; frmNum < numFrames; ++frmNum ) { for ( int channelNum = 0; channelNum < oboeStream->getChannelCount(); channelNum++ ) { // copy sample received from server into output buffer + const float fCurSam = + pSound->vecfTmpAudioSndCrdStereo[frmNum * oboeStream->getChannelCount() + channelNum]; - // convert to 32 bit - const int32_t iCurSam = static_cast ( - pSound->vecsTmpAudioSndCrdStereo [frmNum * oboeStream->getChannelCount() + channelNum] ); - - floatData[frmNum * oboeStream->getChannelCount() + channelNum] = (float) iCurSam / _MAXSHORT; + floatData[frmNum * oboeStream->getChannelCount() + channelNum] = fCurSam; } } } @@ -282,13 +280,13 @@ oboe::DataCallbackResult CSound::onAudioReady ( oboe::AudioStream* oboeStream, v { for ( int channelNum = 0; channelNum < oboeStream->getChannelCount(); channelNum++ ) { - pSound->vecsTmpAudioSndCrdStereo[frmNum * oboeStream->getChannelCount() + channelNum] = - (short) floatData[frmNum * oboeStream->getChannelCount() + channelNum] * _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo [frmNum * oboeStream->getChannelCount() + channelNum] = + floatData[frmNum * oboeStream->getChannelCount() + channelNum]; } } // Tell parent class that we've put some data ready to send to the server - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); } // locker.unlock(); diff --git a/android/sound.h b/android/sound.h index b97c3ffe4e..5094e20402 100644 --- a/android/sound.h +++ b/android/sound.h @@ -36,7 +36,7 @@ class CSound : public CSoundBase, public oboe::AudioStreamCallback//, public IRenderableAudio, public IRestartable { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -54,7 +54,7 @@ class CSound : public CSoundBase, public oboe::AudioStreamCallback//, public IRe // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; static void android_message_handler ( QtMsgType type, const QMessageLogContext& context, @@ -74,7 +74,7 @@ class CSound : public CSoundBase, public oboe::AudioStreamCallback//, public IRe }; // TEST -CVector vecsTmpAudioInSndCrd; +CVector vecfTmpAudioInSndCrd; int iModifiedInBufSize; int iOpenSLBufferSizeMono; diff --git a/linux/sound.cpp b/linux/sound.cpp index 2e54fe2c24..6e5d190024 100755 --- a/linux/sound.cpp +++ b/linux/sound.cpp @@ -228,7 +228,7 @@ int CSound::Init ( const int /* iNewPrefMonoBufferSize */ ) iJACKBufferSizeStero = 2 * iJACKBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iJACKBufferSizeStero ); + vecfTmpAudioSndCrdStereo.Init ( iJACKBufferSizeStero ); return iJACKBufferSizeMono; } @@ -259,16 +259,13 @@ int CSound::process ( jack_nframes_t nframes, void* arg ) { for ( i = 0; i < pSound->iJACKBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i] = - (short) ( in_left[i] * _MAXSHORT ); - - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = - (short) ( in_right[i] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = in_left[i]; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = in_right[i]; } } // call processing callback function - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); // get output data pointer jack_default_audio_sample_t* out_left = @@ -285,10 +282,10 @@ int CSound::process ( jack_nframes_t nframes, void* arg ) for ( i = 0; i < pSound->iJACKBufferSizeMono; i++ ) { out_left[i] = (jack_default_audio_sample_t) - pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo[2 * i]; out_right[i] = (jack_default_audio_sample_t) - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1]; } } } diff --git a/linux/sound.h b/linux/sound.h index c64397e2c9..0181fa17bd 100755 --- a/linux/sound.h +++ b/linux/sound.h @@ -60,7 +60,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool bNoAutoJackConnect, @@ -78,7 +78,7 @@ class CSound : public CSoundBase // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; int iJACKBufferSizeMono; int iJACKBufferSizeStero; bool bJackWasShutDown; @@ -111,7 +111,7 @@ class CSound : public CSoundBase Q_OBJECT public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* pParg ), void* pParg, const int iCtrlMIDIChannel, const bool , @@ -122,12 +122,12 @@ class CSound : public CSoundBase this, &CSound::OnTimer ); } virtual ~CSound() {} virtual int Init ( const int iNewPrefMonoBufferSize ) { CSoundBase::Init ( iNewPrefMonoBufferSize ); - vecsTemp.Init ( 2 * iNewPrefMonoBufferSize ); + vecfTemp.Init ( 2 * iNewPrefMonoBufferSize ); return iNewPrefMonoBufferSize; } CHighPrecisionTimer HighPrecisionTimer; - CVector vecsTemp; + CVector vecfTemp; public slots: - void OnTimer() { vecsTemp.Reset ( 0 ); if ( IsRunning() ) { ProcessCallback ( vecsTemp ); } } + void OnTimer() { vecfTemp.Reset ( 0 ); if ( IsRunning() ) { ProcessCallback ( vecfTemp ); } } }; #endif // WITH_SOUND diff --git a/mac/sound.cpp b/mac/sound.cpp index 1d92e8f056..e314d0f0ed 100755 --- a/mac/sound.cpp +++ b/mac/sound.cpp @@ -26,7 +26,7 @@ /* Implementation *************************************************************/ -CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -848,7 +848,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) iCoreAudioBufferSizeStereo = 2 * iCoreAudioBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo ); + vecfTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo ); return iCoreAudioBufferSizeMono; } @@ -970,8 +970,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { // copy left and right channels separately - pSound->vecsTmpAudioSndCrdStereo[2 * i] = (short) ( pLeftData[iNumChanPerFrameLeft * i + iSelInInterlChLeft] * _MAXSHORT ); - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = (short) ( pRightData[iNumChanPerFrameRight * i + iSelInInterlChRight] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = pLeftData[iNumChanPerFrameLeft * i + iSelInInterlChLeft]; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = pRightData[iNumChanPerFrameRight * i + iSelInInterlChRight]; } // add an additional optional channel @@ -982,8 +982,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i] = Double2Short ( - pSound->vecsTmpAudioSndCrdStereo[2 * i] + pLeftData[iNumChanPerFrameLeft * i + iSelAddInInterlChLeft] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = ClipFloat ( + pSound->vecfTmpAudioSndCrdStereo[2 * i] + pLeftData[iNumChanPerFrameLeft * i + iSelAddInInterlChLeft] ); } } @@ -994,19 +994,19 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = Double2Short ( - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] + pRightData[iNumChanPerFrameRight * i + iSelAddInInterlChRight] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = ClipFloat ( + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] + pRightData[iNumChanPerFrameRight * i + iSelAddInInterlChRight] ); } } } else { // incompatible sizes, clear work buffer - pSound->vecsTmpAudioSndCrdStereo.Reset ( 0 ); + pSound->vecfTmpAudioSndCrdStereo.Reset ( 0 ); } // call processing callback function - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); } if ( ( inDevice == pSound->CurrentAudioOutputDeviceID ) && outOutputData ) @@ -1028,8 +1028,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { // copy left and right channels separately - pLeftData[iNumChanPerFrameLeft * i + iSelOutInterlChLeft] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT; - pRightData[iNumChanPerFrameRight * i + iSelOutInterlChRight] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT; + pLeftData[iNumChanPerFrameLeft * i + iSelOutInterlChLeft] = (Float32) pSound->vecfTmpAudioSndCrdStereo[2 * i]; + pRightData[iNumChanPerFrameRight * i + iSelOutInterlChRight] = (Float32) pSound->vecfTmpAudioSndCrdStereo[2 * i + 1]; } } } diff --git a/mac/sound.h b/mac/sound.h index ce2eda21c4..c34547b6b1 100755 --- a/mac/sound.h +++ b/mac/sound.h @@ -36,7 +36,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -63,7 +63,7 @@ class CSound : public CSoundBase // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; int iCoreAudioBufferSizeMono; int iCoreAudioBufferSizeStereo; AudioDeviceID CurrentAudioInputDeviceID; diff --git a/src/buffer.h b/src/buffer.h index 3caeb18b9e..f75268c3a3 100755 --- a/src/buffer.h +++ b/src/buffer.h @@ -500,16 +500,16 @@ template class CConvBuf } } - void PutAll ( const CVector& vecsData ) + void PutAll ( const CVector& vecData ) { iGetPos = 0; - std::copy ( vecsData.begin(), - vecsData.begin() + iBufferSize, // note that input vector might be larger then memory size + std::copy ( vecData.begin(), + vecData.begin() + iBufferSize, // note that input vector might be larger then memory size vecMemory.begin() ); } - bool Put ( const CVector& vecsData, + bool Put ( const CVector& vecData, const int iVecSize ) { // calculate the input size and the end position after copying @@ -519,8 +519,8 @@ template class CConvBuf if ( iEnd <= iBufferSize ) { // copy new data in internal buffer - std::copy ( vecsData.begin(), - vecsData.begin() + iVecSize, + std::copy ( vecData.begin(), + vecData.begin() + iVecSize, vecMemory.begin() + iPutPos ); // set buffer pointer one block further @@ -540,7 +540,7 @@ template class CConvBuf return vecMemory; } - void GetAll ( CVector& vecsData, + void GetAll ( CVector& vecData, const int iVecSize ) { iPutPos = 0; @@ -548,10 +548,10 @@ template class CConvBuf // copy data from internal buffer in given buffer std::copy ( vecMemory.begin(), vecMemory.begin() + iVecSize, - vecsData.begin() ); + vecData.begin() ); } - bool Get ( CVector& vecsData, + bool Get ( CVector& vecData, const int iVecSize ) { // calculate the input size and the end position after copying @@ -563,7 +563,7 @@ template class CConvBuf // copy new data from internal buffer std::copy ( vecMemory.begin() + iGetPos, vecMemory.begin() + iGetPos + iVecSize, - vecsData.begin() ); + vecData.begin() ); // set buffer pointer one block further iGetPos = iEnd; diff --git a/src/channel.cpp b/src/channel.cpp index 8e74f3f5e8..51a42ce344 100755 --- a/src/channel.cpp +++ b/src/channel.cpp @@ -27,8 +27,8 @@ // CChannel implementation ***************************************************** CChannel::CChannel ( const bool bNIsServer ) : - vecdGains ( MAX_NUM_CHANNELS, 1.0 ), - vecdPannings ( MAX_NUM_CHANNELS, 0.5 ), + vecfGains ( MAX_NUM_CHANNELS, 1.0f ), + vecfPannings ( MAX_NUM_CHANNELS, 0.5f ), iCurSockBufNumFrames ( INVALID_INDEX ), bDoAutoSockBufSize ( true ), iFadeInCnt ( 0 ), @@ -36,7 +36,7 @@ CChannel::CChannel ( const bool bNIsServer ) : bIsEnabled ( false ), bIsServer ( bNIsServer ), iAudioFrameSizeSamples ( DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES ), - SignalLevelMeter ( false, 0.5 ) // server mode with mono out and faster smoothing + SignalLevelMeter ( false, 0.5f ) // server mode with mono out and faster smoothing { // reset network transport properties ResetNetworkTransportProperties(); @@ -255,8 +255,8 @@ bool CChannel::SetSockBufNumFrames ( const int iNewNumFrames, return ReturnValue; // set error flag } -void CChannel::SetGain ( const int iChanID, - const double dNewGain ) +void CChannel::SetGain ( const int iChanID, + const float fNewGain ) { QMutexLocker locker ( &Mutex ); @@ -264,27 +264,27 @@ void CChannel::SetGain ( const int iChanID, if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { // signal mute change - if ( ( vecdGains[iChanID] == 0 ) && ( dNewGain > 0 ) ) + if ( ( vecfGains[iChanID] == 0 ) && ( fNewGain > 0 ) ) { emit MuteStateHasChanged ( iChanID, false ); } - if ( ( vecdGains[iChanID] > 0 ) && ( dNewGain == 0 ) ) + if ( ( vecfGains[iChanID] > 0 ) && ( fNewGain == 0 ) ) { emit MuteStateHasChanged ( iChanID, true ); } - vecdGains[iChanID] = dNewGain; + vecfGains[iChanID] = fNewGain; } } -double CChannel::GetGain ( const int iChanID ) +float CChannel::GetGain ( const int iChanID ) { QMutexLocker locker ( &Mutex ); // get value (make sure channel ID is in range) if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { - return vecdGains[iChanID]; + return vecfGains[iChanID]; } else { @@ -292,26 +292,26 @@ double CChannel::GetGain ( const int iChanID ) } } -void CChannel::SetPan ( const int iChanID, - const double dNewPan ) +void CChannel::SetPan ( const int iChanID, + const float fNewPan ) { QMutexLocker locker ( &Mutex ); // set value (make sure channel ID is in range) if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { - vecdPannings[iChanID] = dNewPan; + vecfPannings[iChanID] = fNewPan; } } -double CChannel::GetPan ( const int iChanID ) +float CChannel::GetPan ( const int iChanID ) { QMutexLocker locker ( &Mutex ); // get value (make sure channel ID is in range) if ( ( iChanID >= 0 ) && ( iChanID < MAX_NUM_CHANNELS ) ) { - return vecdPannings[iChanID]; + return vecfPannings[iChanID]; } else { @@ -379,16 +379,16 @@ void CChannel::OnJittBufSizeChange ( int iNewJitBufSize ) } } -void CChannel::OnChangeChanGain ( int iChanID, - double dNewGain ) +void CChannel::OnChangeChanGain ( int iChanID, + float fNewGain ) { - SetGain ( iChanID, dNewGain ); + SetGain ( iChanID, fNewGain ); } -void CChannel::OnChangeChanPan ( int iChanID, - double dNewPan ) +void CChannel::OnChangeChanPan ( int iChanID, + float fNewPan ) { - SetPan ( iChanID, dNewPan ); + SetPan ( iChanID, fNewPan ); } void CChannel::OnChangeChanInfo ( CChannelCoreInfo ChanInfo ) @@ -678,12 +678,12 @@ void CChannel::PrepAndSendPacket ( CHighPrioSocket* pSocket, } } -double CChannel::UpdateAndGetLevelForMeterdB ( const CVector& vecsAudio, - const int iInSize, - const bool bIsStereoIn ) +float CChannel::UpdateAndGetLevelForMeterdB ( const CVector& vecfAudio, + const int iInSize, + const bool bIsStereoIn ) { // update the signal level meter and immediately return the current value - SignalLevelMeter.Update ( vecsAudio, + SignalLevelMeter.Update ( vecfAudio, iInSize, bIsStereoIn ); diff --git a/src/channel.h b/src/channel.h index fca896b9f2..8ac6705882 100755 --- a/src/channel.h +++ b/src/channel.h @@ -108,18 +108,18 @@ class CChannel : public QObject void CreateVersionAndOSMes() { Protocol.CreateVersionAndOSMes(); } void CreateMuteStateHasChangedMes ( const int iChanID, const bool bIsMuted ) { Protocol.CreateMuteStateHasChangedMes ( iChanID, bIsMuted ); } - void SetGain ( const int iChanID, const double dNewGain ); - double GetGain ( const int iChanID ); - double GetFadeInGain() { return static_cast ( iFadeInCnt ) / iFadeInCntMax; } + void SetGain ( const int iChanID, const float fNewGain ); + float GetGain ( const int iChanID ); + float GetFadeInGain() { return static_cast ( iFadeInCnt ) / iFadeInCntMax; } - void SetPan ( const int iChanID, const double dNewPan ); - double GetPan ( const int iChanID ); + void SetPan ( const int iChanID, const float fNewPan ); + float GetPan ( const int iChanID ); - void SetRemoteChanGain ( const int iId, const double dGain ) - { Protocol.CreateChanGainMes ( iId, dGain ); } + void SetRemoteChanGain ( const int iId, const float fGain ) + { Protocol.CreateChanGainMes ( iId, fGain ); } - void SetRemoteChanPan ( const int iId, const double dPan ) - { Protocol.CreateChanPanMes ( iId, dPan ); } + void SetRemoteChanPan ( const int iId, const float fPan ) + { Protocol.CreateChanPanMes ( iId, fPan ); } bool SetSockBufNumFrames ( const int iNewNumFrames, const bool bPreserve = false ); @@ -176,9 +176,9 @@ class CChannel : public QObject bool ChannelLevelsRequired() const { return bChannelLevelsRequired; } - double UpdateAndGetLevelForMeterdB ( const CVector& vecsAudio, - const int iInSize, - const bool bIsStereoIn ); + float UpdateAndGetLevelForMeterdB ( const CVector& vecfAudio, + const int iInSize, + const bool bIsStereoIn ); protected: bool ProtocolIsEnabled(); @@ -201,8 +201,8 @@ class CChannel : public QObject CChannelCoreInfo ChannelInfo; // mixer and effect settings - CVector vecdGains; - CVector vecdPannings; + CVector vecfGains; + CVector vecfPannings; // network jitter-buffer CNetBufWithStats SockBuf; @@ -241,8 +241,8 @@ class CChannel : public QObject public slots: void OnSendProtMessage ( CVector vecMessage ); void OnJittBufSizeChange ( int iNewJitBufSize ); - void OnChangeChanGain ( int iChanID, double dNewGain ); - void OnChangeChanPan ( int iChanID, double dNewPan ); + void OnChangeChanGain ( int iChanID, float fNewGain ); + void OnChangeChanPan ( int iChanID, float fNewPan ); void OnChangeChanInfo ( CChannelCoreInfo ChanInfo ); void OnNetTranspPropsReceived ( CNetworkTransportProps NetworkTransportProps ); void OnReqNetTranspProps(); diff --git a/src/client.cpp b/src/client.cpp index c0e83e99b6..20cc430817 100755 --- a/src/client.cpp +++ b/src/client.cpp @@ -44,7 +44,7 @@ CClient::CClient ( const quint16 iPortNumber, iNumAudioChannels ( 1 ), bIsInitializationPhase ( true ), bMuteOutStream ( false ), - dMuteOutStreamGain ( 1.0 ), + fMuteOutStreamGain ( 1.0f ), Socket ( &Channel, iPortNumber ), Sound ( AudioCallback, this, iCtrlMIDIChannel, bNoAutoJackConnect, strNClientName ), iAudioInFader ( AUD_FADER_IN_MIDDLE ), @@ -368,17 +368,17 @@ void CClient::SetDoAutoSockBufSize ( const bool bValue ) CreateServerJitterBufferMessage(); } -void CClient::SetRemoteChanGain ( const int iId, - const double dGain, - const bool bIsMyOwnFader ) +void CClient::SetRemoteChanGain ( const int iId, + const float fGain, + const bool bIsMyOwnFader ) { // if this gain is for my own channel, apply the value for the Mute Myself function if ( bIsMyOwnFader ) { - dMuteOutStreamGain = dGain; + fMuteOutStreamGain = fGain; } - Channel.SetRemoteChanGain ( iId, dGain ); + Channel.SetRemoteChanGain ( iId, fGain ); } bool CClient::SetServerAddr ( QString strNAddr ) @@ -819,7 +819,6 @@ void CClient::Init() // since we use double size frame size for OPUS, we have to adjust the frame size factor iSndCrdFrameSizeFactor /= 2; eAudioCompressionType = CT_OPUS; - } } @@ -892,9 +891,9 @@ void CClient::Init() vecCeltData.Init ( iCeltNumCodedBytes ); vecZeros.Init ( iStereoBlockSizeSam, 0 ); - vecsStereoSndCrdMuteStream.Init ( iStereoBlockSizeSam ); + vecfStereoSndCrdMuteStream.Init ( iStereoBlockSizeSam ); - dMuteOutStreamGain = 1.0; + fMuteOutStreamGain = 1.0f; opus_custom_encoder_ctl ( CurOpusEncoder, OPUS_SET_BITRATE ( @@ -937,13 +936,13 @@ void CClient::Init() bIsInitializationPhase = true; } -void CClient::AudioCallback ( CVector& psData, void* arg ) +void CClient::AudioCallback ( CVector& vfData, void* arg ) { // get the pointer to the object CClient* pMyClientObj = static_cast ( arg ); // process audio data - pMyClientObj->ProcessSndCrdAudioData ( psData ); + pMyClientObj->ProcessSndCrdAudioData ( vfData ); /* // TEST do a soundcard jitter measurement @@ -952,13 +951,13 @@ JitterMeas.Measure(); */ } -void CClient::ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ) +void CClient::ProcessSndCrdAudioData ( CVector& vecfStereoSndCrd ) { // check if a conversion buffer is required or not if ( bSndCrdConversionBufferRequired ) { // add new sound card block in conversion buffer - SndCrdConversionBufferIn.Put ( vecsStereoSndCrd, vecsStereoSndCrd.Size() ); + SndCrdConversionBufferIn.Put ( vecfStereoSndCrd, vecfStereoSndCrd.Size() ); // process all available blocks of data while ( SndCrdConversionBufferIn.GetAvailData() >= iStereoBlockSizeSam ) @@ -973,17 +972,17 @@ void CClient::ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ) } // get processed sound card block out of the conversion buffer - SndCrdConversionBufferOut.Get ( vecsStereoSndCrd, vecsStereoSndCrd.Size() ); + SndCrdConversionBufferOut.Get ( vecfStereoSndCrd, vecfStereoSndCrd.Size() ); } else { // regular case: no conversion buffer required // process audio data - ProcessAudioDataIntern ( vecsStereoSndCrd ); + ProcessAudioDataIntern ( vecfStereoSndCrd ); } } -void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) +void CClient::ProcessAudioDataIntern ( CVector& vecfStereoSndCrd ) { int i, j, iUnused; unsigned char* pCurCodedData; @@ -992,7 +991,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // Transmit signal --------------------------------------------------------- // update stereo signal level meter (not needed in headless mode) #ifndef HEADLESS - SignalLevelMeter.Update ( vecsStereoSndCrd, + SignalLevelMeter.Update ( vecfStereoSndCrd, iMonoBlockSizeSam, true ); #endif @@ -1000,43 +999,43 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // add reverberation effect if activated if ( iReverbLevel != 0 ) { - AudioReverb.Process ( vecsStereoSndCrd, + AudioReverb.Process ( vecfStereoSndCrd, bReverbOnLeftChan, - static_cast ( iReverbLevel ) / AUD_REVERB_MAX / 4 ); + static_cast ( iReverbLevel ) / AUD_REVERB_MAX / 4 ); } // apply pan (audio fader) and mix mono signals if ( !( ( iAudioInFader == AUD_FADER_IN_MIDDLE ) && ( eAudioChannelConf == CC_STEREO ) ) ) { // calculate pan gain in the range 0 to 1, where 0.5 is the middle position - const double dPan = static_cast ( iAudioInFader ) / AUD_FADER_IN_MAX; + const float fPan = static_cast ( iAudioInFader ) / AUD_FADER_IN_MAX; if ( eAudioChannelConf == CC_STEREO ) { // for stereo only apply pan attenuation on one channel (same as pan in the server) - const double dGainL = MathUtils::GetLeftPan ( dPan, false ); - const double dGainR = MathUtils::GetRightPan ( dPan, false ); + const float fGainL = MathUtils::GetLeftPan ( fPan, false ); + const float fGainR = MathUtils::GetRightPan ( fPan, false ); for ( i = 0, j = 0; i < iMonoBlockSizeSam; i++, j += 2 ) { // note that the gain is always <= 1, therefore a simple cast is // ok since we never can get an overload - vecsStereoSndCrd[j + 1] = static_cast ( dGainR * vecsStereoSndCrd[j + 1] ); - vecsStereoSndCrd[j] = static_cast ( dGainL * vecsStereoSndCrd[j] ); + vecfStereoSndCrd[j + 1] = fGainR * vecfStereoSndCrd[j + 1]; + vecfStereoSndCrd[j] = fGainL * vecfStereoSndCrd[j]; } } else { // for mono implement a cross-fade between channels and mix them, for // mono-in/stereo-out use no attenuation in pan center - const double dGainL = MathUtils::GetLeftPan ( dPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); - const double dGainR = MathUtils::GetRightPan ( dPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); + const float fGainL = MathUtils::GetLeftPan ( fPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); + const float fGainR = MathUtils::GetRightPan ( fPan, eAudioChannelConf != CC_MONO_IN_STEREO_OUT ); for ( i = 0, j = 0; i < iMonoBlockSizeSam; i++, j += 2 ) { - // note that we need the Double2Short for stereo pan mode - vecsStereoSndCrd[i] = Double2Short ( - dGainL * vecsStereoSndCrd[j] + dGainR * vecsStereoSndCrd[j + 1] ); + // clip samples for stereo pan mode + vecfStereoSndCrd[i] = ClipFloat ( + fGainL * vecfStereoSndCrd[j] + fGainR * vecfStereoSndCrd[j + 1] ); } } } @@ -1052,7 +1051,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // overwrite input values) for ( i = iMonoBlockSizeSam - 1, j = iStereoBlockSizeSam - 2; i >= 0; i--, j -= 2 ) { - vecsStereoSndCrd[j] = vecsStereoSndCrd[j + 1] = vecsStereoSndCrd[i]; + vecfStereoSndCrd[j] = vecfStereoSndCrd[j + 1] = vecfStereoSndCrd[i]; } } @@ -1063,19 +1062,19 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) { if ( bMuteOutStream ) { - iUnused = opus_custom_encode ( CurOpusEncoder, - &vecZeros[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples, - &vecCeltData[0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( CurOpusEncoder, + &vecZeros[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples, + &vecCeltData[0], + iCeltNumCodedBytes ); } else { - iUnused = opus_custom_encode ( CurOpusEncoder, - &vecsStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples, - &vecCeltData[0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( CurOpusEncoder, + &vecfStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples, + &vecCeltData[0], + iCeltNumCodedBytes ); } } @@ -1090,7 +1089,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // in case of mute stream, store local data if ( bMuteOutStream ) { - vecsStereoSndCrdMuteStream = vecsStereoSndCrd; + vecfStereoSndCrdMuteStream = vecfStereoSndCrd; } for ( i = 0; i < iSndCrdFrameSizeFactor; i++ ) @@ -1119,11 +1118,11 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // OPUS decoding if ( CurOpusDecoder != nullptr ) { - iUnused = opus_custom_decode ( CurOpusDecoder, - pCurCodedData, - iCeltNumCodedBytes, - &vecsStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples ); + iUnused = opus_custom_decode_float ( CurOpusDecoder, + pCurCodedData, + iCeltNumCodedBytes, + &vecfStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples ); } } @@ -1132,8 +1131,8 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) { for ( i = 0; i < iStereoBlockSizeSam; i++ ) { - vecsStereoSndCrd[i] = Double2Short ( - vecsStereoSndCrd[i] + vecsStereoSndCrdMuteStream[i] * dMuteOutStreamGain ); + vecfStereoSndCrd[i] = ClipFloat ( + vecfStereoSndCrd[i] + vecfStereoSndCrdMuteStream[i] * fMuteOutStreamGain ); } } @@ -1147,14 +1146,14 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // overwrite input values) for ( i = iMonoBlockSizeSam - 1, j = iStereoBlockSizeSam - 2; i >= 0; i--, j -= 2 ) { - vecsStereoSndCrd[j] = vecsStereoSndCrd[j + 1] = vecsStereoSndCrd[i]; + vecfStereoSndCrd[j] = vecfStereoSndCrd[j + 1] = vecfStereoSndCrd[i]; } } } else { // if not connected, clear data - vecsStereoSndCrd.Reset ( 0 ); + vecfStereoSndCrd.Reset ( 0 ); } // update socket buffer size @@ -1165,7 +1164,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) int CClient::EstimatedOverallDelay ( const int iPingTimeMs ) { - const double dSystemBlockDurationMs = static_cast ( iOPUSFrameSizeSamples ) / + const float fSystemBlockDurationMs = static_cast ( iOPUSFrameSizeSamples ) / SYSTEM_SAMPLE_RATE_HZ * 1000; // If the jitter buffers are set effectively, i.e. they are exactly the @@ -1173,20 +1172,20 @@ int CClient::EstimatedOverallDelay ( const int iPingTimeMs ) // length. Since that is usually not the case but the buffers are usually // a bit larger than necessary, we introduce some factor for compensation. // Consider the jitter buffer on the client and on the server side, too. - const double dTotalJitterBufferDelayMs = dSystemBlockDurationMs * - static_cast ( GetSockBufNumFrames() + + const float fTotalJitterBufferDelayMs = fSystemBlockDurationMs * + static_cast ( GetSockBufNumFrames() + GetServerSockBufNumFrames() ) * 0.7; // consider delay introduced by the sound card conversion buffer by using // "GetSndCrdConvBufAdditionalDelayMonoBlSize()" - double dTotalSoundCardDelayMs = GetSndCrdConvBufAdditionalDelayMonoBlSize() * + float fTotalSoundCardDelayMs = GetSndCrdConvBufAdditionalDelayMonoBlSize() * 1000 / SYSTEM_SAMPLE_RATE_HZ; // try to get the actual input/output sound card delay from the audio // interface, per definition it is not available if a 0 is returned - const double dSoundCardInputOutputLatencyMs = Sound.GetInOutLatencyMs(); + const float fSoundCardInputOutputLatencyMs = Sound.GetInOutLatencyMs(); - if ( dSoundCardInputOutputLatencyMs == 0.0 ) + if ( fSoundCardInputOutputLatencyMs == 0.0 ) { // use an alternative approach for estimating the sound card delay: // @@ -1194,29 +1193,29 @@ int CClient::EstimatedOverallDelay ( const int iPingTimeMs ) // output, therefore we have "3 *" instead of "2 *" (for input and output) // the actual sound card buffer size // "GetSndCrdConvBufAdditionalDelayMonoBlSize" - dTotalSoundCardDelayMs += + fTotalSoundCardDelayMs += ( 3 * GetSndCrdActualMonoBlSize() ) * 1000 / SYSTEM_SAMPLE_RATE_HZ; } else { // add the actual sound card latency in ms - dTotalSoundCardDelayMs += dSoundCardInputOutputLatencyMs; + fTotalSoundCardDelayMs += fSoundCardInputOutputLatencyMs; } // network packets are of the same size as the audio packets per definition // if no sound card conversion buffer is used - const double dDelayToFillNetworkPacketsMs = + const float fDelayToFillNetworkPacketsMs = GetSystemMonoBlSize() * 1000.0 / SYSTEM_SAMPLE_RATE_HZ; // OPUS additional delay at small frame sizes is half a frame size - const double dAdditionalAudioCodecDelayMs = dSystemBlockDurationMs / 2; + const float fAdditionalAudioCodecDelayMs = fSystemBlockDurationMs / 2; - const double dTotalBufferDelayMs = - dDelayToFillNetworkPacketsMs + - dTotalJitterBufferDelayMs + - dTotalSoundCardDelayMs + - dAdditionalAudioCodecDelayMs; + const float fTotalBufferDelayMs = + fDelayToFillNetworkPacketsMs + + fTotalJitterBufferDelayMs + + fTotalSoundCardDelayMs + + fAdditionalAudioCodecDelayMs; - return MathUtils::round ( dTotalBufferDelayMs + iPingTimeMs ); + return MathUtils::round ( fTotalBufferDelayMs + iPingTimeMs ); } diff --git a/src/client.h b/src/client.h index b2dd419d71..c1fa7a3fb2 100755 --- a/src/client.h +++ b/src/client.h @@ -117,8 +117,8 @@ class CClient : public QObject bool IsRunning() { return Sound.IsRunning(); } bool SetServerAddr ( QString strNAddr ); - double GetLevelForMeterdBLeft() { return SignalLevelMeter.GetLevelForMeterdBLeftOrMono(); } - double GetLevelForMeterdBRight() { return SignalLevelMeter.GetLevelForMeterdBRight(); } + float GetLevelForMeterdBLeft() { return SignalLevelMeter.GetLevelForMeterdBLeftOrMono(); } + float GetLevelForMeterdBRight() { return SignalLevelMeter.GetLevelForMeterdBRight(); } bool GetAndResetbJitterBufferOKFlag(); @@ -243,10 +243,10 @@ class CClient : public QObject void SetMuteOutStream ( const bool bDoMute ) { bMuteOutStream = bDoMute; } - void SetRemoteChanGain ( const int iId, const double dGain, const bool bIsMyOwnFader ); + void SetRemoteChanGain ( const int iId, const float fGain, const bool bIsMyOwnFader ); - void SetRemoteChanPan ( const int iId, const double dPan ) - { Channel.SetRemoteChanPan ( iId, dPan ); } + void SetRemoteChanPan ( const int iId, const float fPan ) + { Channel.SetRemoteChanPan ( iId, fPan ); } void SetRemoteInfo() { Channel.SetRemoteInfo ( ChannelInfo ); } @@ -288,11 +288,11 @@ class CClient : public QObject protected: // callback function must be static, otherwise it does not work - static void AudioCallback ( CVector& psData, void* arg ); + static void AudioCallback ( CVector& vfData, void* arg ); void Init(); - void ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ); - void ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ); + void ProcessSndCrdAudioData ( CVector& vecfStereoSndCrd ); + void ProcessAudioDataIntern ( CVector& vecfStereoSndCrd ); int PreparePingMessage(); int EvaluatePingMessage ( const int iMs ); @@ -323,7 +323,7 @@ class CClient : public QObject int iNumAudioChannels; bool bIsInitializationPhase; bool bMuteOutStream; - double dMuteOutStreamGain; + float fMuteOutStreamGain; CVector vecCeltData; CHighPrioSocket Socket; @@ -342,11 +342,11 @@ class CClient : public QObject bool bSndCrdConversionBufferRequired; int iSndCardMonoBlockSizeSamConvBuff; - CBufferBase SndCrdConversionBufferIn; - CBufferBase SndCrdConversionBufferOut; - CVector vecDataConvBuf; - CVector vecsStereoSndCrdMuteStream; - CVector vecZeros; + CBufferBase SndCrdConversionBufferIn; + CBufferBase SndCrdConversionBufferOut; + CVector vecDataConvBuf; + CVector vecfStereoSndCrdMuteStream; + CVector vecZeros; bool bFraSiFactPrefSupported; bool bFraSiFactDefSupported; diff --git a/src/global.h b/src/global.h index 6e52c1a6dd..e46f501727 100755 --- a/src/global.h +++ b/src/global.h @@ -168,8 +168,8 @@ LED bar: lbr #define MAX_NUM_STORED_FADER_SETTINGS 250 // range for signal level meter -#define LOW_BOUND_SIG_METER ( -50.0 ) // dB -#define UPPER_BOUND_SIG_METER ( 0.0 ) // dB +#define LOW_BOUND_SIG_METER ( -50.0f ) // dB +#define UPPER_BOUND_SIG_METER ( 0.0f ) // dB // defines for LED level meter CLevelMeter #define NUM_STEPS_LED_BAR 8 @@ -244,8 +244,6 @@ LED bar: lbr // server welcome message title (do not change for compatibility!) #define WELCOME_MESSAGE_PREFIX "Server Welcome Message: " -#define _MAXSHORT 32767 -#define _MINSHORT ( -32768 ) #define INVALID_INDEX -1 // define invalid index as a negative value (a valid index must always be >= 0) #if HAVE_STDINT_H diff --git a/src/recorder/cwavestream.h b/src/recorder/cwavestream.h index 54b91633c6..061a72d3b6 100644 --- a/src/recorder/cwavestream.h +++ b/src/recorder/cwavestream.h @@ -31,7 +31,7 @@ namespace recorder { inline QString secondsAt48K( const qint64 frames, const int frameSize ) { - return QString::number( static_cast( frames * frameSize ) / 48000, 'f', 14 ); + return QString::number( static_cast( frames * frameSize ) / 48000, 'f', 14 ); } struct STrackItem @@ -79,7 +79,7 @@ class FmtSubChunk static const uint32_t sampleRate = 48000; // because it's Jamulus const uint32_t byteRate; // sampleRate * numChannels * bitsPerSample/8 const uint16_t blockAlign; // numChannels * bitsPerSample/8 - static const uint16_t bitsPerSample = 16; + static const uint16_t bitsPerSample = 24; }; class DataSubChunkHdr diff --git a/src/recorder/jamcontroller.cpp b/src/recorder/jamcontroller.cpp index 75e2b4568a..801f480a2e 100755 --- a/src/recorder/jamcontroller.cpp +++ b/src/recorder/jamcontroller.cpp @@ -150,7 +150,7 @@ void CJamController::SetRecordingDir ( QString newRecordingDir, QObject::connect( this, &CJamController::ClientDisconnected, pJamRecorder, &CJamRecorder::OnDisconnected ); - qRegisterMetaType> ( "CVector" ); + qRegisterMetaType > ( "CVector" ); QObject::connect( this, &CJamController::AudioFrame, pJamRecorder, &CJamRecorder::OnFrame ); diff --git a/src/recorder/jamcontroller.h b/src/recorder/jamcontroller.h index 82b714f430..cfa5e57a30 100755 --- a/src/recorder/jamcontroller.h +++ b/src/recorder/jamcontroller.h @@ -67,10 +67,10 @@ class CJamController : public QObject const QString stChName, const CHostAddress RecHostAddr, const int iNumAudChan, - const CVector vecsData ); + const CVector vecfData ); }; } -Q_DECLARE_METATYPE(int16_t) +Q_DECLARE_METATYPE(float) diff --git a/src/recorder/jamrecorder.cpp b/src/recorder/jamrecorder.cpp index 06577f3f72..8b1b686f67 100755 --- a/src/recorder/jamrecorder.cpp +++ b/src/recorder/jamrecorder.cpp @@ -71,13 +71,15 @@ CJamClient::CJamClient(const qint64 frame, const int _numChannels, const QString * @param _name The client's current name * @param pcm The PCM data */ -void CJamClient::Frame(const QString _name, const CVector& pcm, int iServerFrameSizeSamples) +void CJamClient::Frame(const QString _name, const CVector& pcm, int iServerFrameSizeSamples) { name = _name; for(int i = 0; i < numChannels * iServerFrameSizeSamples; i++) { - *out << pcm[i]; + /* samples must be stored in little endian order */ + const int sample24 = pcm[i] * ((1 << 23) - 1); + *out << ( uint8_t ) sample24 << ( uint8_t ) ( sample24 >> 8 ) << ( uint8_t )( sample24 >> 16 ); } frameCount++; @@ -166,7 +168,7 @@ void CJamSession::DisconnectClient(int iChID) * * Also manages the overall current frame counter for the session. */ -void CJamSession::Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples) +void CJamSession::Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples) { if ( iChID == chIdDisconnected ) { @@ -529,7 +531,7 @@ void CJamRecorder::OnDisconnected(int iChID) * * Ensures recording has started. */ -void CJamRecorder::OnFrame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data) +void CJamRecorder::OnFrame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data) { // Make sure we are ready if ( !isRecording ) diff --git a/src/recorder/jamrecorder.h b/src/recorder/jamrecorder.h index a42fe0c457..b00d29817d 100755 --- a/src/recorder/jamrecorder.h +++ b/src/recorder/jamrecorder.h @@ -72,7 +72,7 @@ class CJamClient : public QObject public: CJamClient(const qint64 frame, const int numChannels, const QString name, const CHostAddress address, const QDir recordBaseDir); - void Frame(const QString name, const CVector& pcm, int iServerFrameSizeSamples); + void Frame(const QString name, const CVector& pcm, int iServerFrameSizeSamples); void Disconnect(); @@ -108,7 +108,7 @@ class CJamSession : public QObject CJamSession(QDir recordBaseDir); - void Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples); + void Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples); void End(); @@ -200,7 +200,7 @@ public slots: /** * @brief Handle a frame of data to process */ - void OnFrame ( const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data ); + void OnFrame ( const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data ); }; } diff --git a/src/server.cpp b/src/server.cpp index 8613a18803..48af6f9e57 100755 --- a/src/server.cpp +++ b/src/server.cpp @@ -338,11 +338,11 @@ CServer::CServer ( const int iNewMaxNumChan, // allocate worst case memory for the temporary vectors vecChanIDsCurConChan.Init ( iMaxNumChannels ); - vecvecdGains.Init ( iMaxNumChannels ); - vecvecdPannings.Init ( iMaxNumChannels ); - vecvecsData.Init ( iMaxNumChannels ); - vecvecsSendData.Init ( iMaxNumChannels ); - vecvecsIntermediateProcBuf.Init ( iMaxNumChannels ); + vecvecfGains.Init ( iMaxNumChannels ); + vecvecfPannings.Init ( iMaxNumChannels ); + vecvecfData.Init ( iMaxNumChannels ); + vecvecfSendData.Init ( iMaxNumChannels ); + vecvecfIntermediateProcBuf.Init ( iMaxNumChannels ); vecvecbyCodedData.Init ( iMaxNumChannels ); vecNumAudioChannels.Init ( iMaxNumChannels ); vecNumFrameSizeConvBlocks.Init ( iMaxNumChannels ); @@ -352,18 +352,18 @@ CServer::CServer ( const int iNewMaxNumChan, for ( i = 0; i < iMaxNumChannels; i++ ) { // init vectors storing information of all channels - vecvecdGains[i].Init ( iMaxNumChannels ); - vecvecdPannings[i].Init ( iMaxNumChannels ); + vecvecfGains[i].Init ( iMaxNumChannels ); + vecvecfPannings[i].Init ( iMaxNumChannels ); // we always use stereo audio buffers (which is the worst case) - vecvecsData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + vecvecfData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); // (note that we only allocate iMaxNumChannels buffers for the send // and coded data because of the OMP implementation) - vecvecsSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + vecvecfSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); - // allocate worst case memory for intermediate processing buffers in double precision - vecvecsIntermediateProcBuf[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + // allocate worst case memory for intermediate processing buffers in single precision + vecvecfIntermediateProcBuf[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); // allocate worst case memory for the coded data vecvecbyCodedData[i].Init ( MAX_SIZE_BYTES_NETW_BUF ); @@ -489,7 +489,7 @@ CServer::CServer ( const int iNewMaxNumChan, QObject::connect ( this, &CServer::ClientDisconnected, &JamController, &recorder::CJamController::ClientDisconnected ); - qRegisterMetaType> ( "CVector" ); + qRegisterMetaType > ( "CVector" ); QObject::connect ( this, &CServer::AudioFrame, &JamController, &recorder::CJamController::AudioFrame ); @@ -884,24 +884,24 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // get gains of all connected channels for ( int j = 0; j < iNumClients; j++ ) { - // The second index of "vecvecdGains" does not represent + // The second index of "vecvecfGains" does not represent // the channel ID! Therefore we have to use // "vecChanIDsCurConChan" to query the IDs of the currently // connected channels - vecvecdGains[i][j] = vecChannels[iCurChanID].GetGain ( vecChanIDsCurConChan[j] ); + vecvecfGains[i][j] = vecChannels[iCurChanID].GetGain ( vecChanIDsCurConChan[j] ); // consider audio fade-in - vecvecdGains[i][j] *= vecChannels[vecChanIDsCurConChan[j]].GetFadeInGain(); + vecvecfGains[i][j] *= vecChannels[vecChanIDsCurConChan[j]].GetFadeInGain(); // use the fade in of the current channel for all other connected clients // as well to avoid the client volumes are at 100% when joining a server (#628) if ( j != i ) { - vecvecdGains[i][j] *= vecChannels[iCurChanID].GetFadeInGain(); + vecvecfGains[i][j] *= vecChannels[iCurChanID].GetFadeInGain(); } // panning - vecvecdPannings[i][j] = vecChannels[iCurChanID].GetPan ( vecChanIDsCurConChan[j] ); + vecvecfPannings[i][j] = vecChannels[iCurChanID].GetPan ( vecChanIDsCurConChan[j] ); } // flag for updating channel levels (if at least one clients wants it) @@ -916,7 +916,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // is false and the Get() function is not called at all. Therefore if the buffer is not needed // we do not spend any time in the function but go directly inside the if condition. if ( ( vecUseDoubleSysFraSizeConvBuf[i] == 0 ) || - !DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecsData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ) ) + !DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecfData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ) ) { // get current number of OPUS coded bytes const int iCeltNumCodedBytes = vecChannels[iCurChanID].GetNetwFrameSize(); @@ -953,11 +953,11 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // OPUS decode received data stream if ( CurOpusDecoder != nullptr ) { - iUnused = opus_custom_decode ( CurOpusDecoder, - pCurCodedData, - iCeltNumCodedBytes, - &vecvecsData[i][iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i]], - iClientFrameSizeSamples ); + iUnused = opus_custom_decode_float ( CurOpusDecoder, + pCurCodedData, + iCeltNumCodedBytes, + &vecvecfData[i][iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i]], + iClientFrameSizeSamples ); } } @@ -965,8 +965,8 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // and read out the small frame size immediately for further processing if ( vecUseDoubleSysFraSizeConvBuf[i] != 0 ) { - DoubleFrameSizeConvBufIn[iCurChanID].PutAll ( vecvecsData[i] ); - DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecsData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ); + DoubleFrameSizeConvBufIn[iCurChanID].PutAll ( vecvecfData[i] ); + DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecfData[i], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[i] ); } } } @@ -991,7 +991,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE { bSendChannelLevels = CreateLevelsForAllConChannels ( iNumClients, vecNumAudioChannels, - vecvecsData, + vecvecfData, vecChannelLevels ); } @@ -1018,7 +1018,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE vecChannels[iCurChanID].GetName(), vecChannels[iCurChanID].GetAddress(), vecNumAudioChannels[iChanCnt], - vecvecsData[iChanCnt] ); + vecvecfData[iChanCnt] ); } // processing without multithreading @@ -1084,15 +1084,15 @@ void CServer::MixEncodeTransmitDataBlocks ( const int iStartChanCnt, void CServer::MixEncodeTransmitData ( const int iChanCnt, const int iNumClients ) { - int i, j, k, iUnused; - CVector& vecdIntermProcBuf = vecvecsIntermediateProcBuf[iChanCnt]; // use reference for faster access - CVector& vecsSendData = vecvecsSendData[iChanCnt]; // use reference for faster access + int i, j, k, iUnused; + CVector& vecfIntermProcBuf = vecvecfIntermediateProcBuf[iChanCnt]; // use reference for faster access + CVector& vecfSendData = vecvecfSendData[iChanCnt]; // use reference for faster access // get actual ID of current channel const int iCurChanID = vecChanIDsCurConChan[iChanCnt]; // init intermediate processing vector with zeros since we mix all channels on that vector - vecdIntermProcBuf.Reset ( 0 ); + vecfIntermProcBuf.Reset ( 0 ); // distinguish between stereo and mono mode if ( vecNumAudioChannels[iChanCnt] == 1 ) @@ -1101,18 +1101,18 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( j = 0; j < iNumClients; j++ ) { // get a reference to the audio data and gain of the current client - const CVector& vecsData = vecvecsData[j]; - const double dGain = vecvecdGains[iChanCnt][j]; + const CVector& vecfData = vecvecfData[j]; + const float fGain = vecvecfGains[iChanCnt][j]; // if channel gain is 1, avoid multiplication for speed optimization - if ( dGain == static_cast ( 1.0 ) ) + if ( fGain == 1.0f ) { if ( vecNumAudioChannels[j] == 1 ) { // mono for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecdIntermProcBuf[i] += vecsData[i]; + vecfIntermProcBuf[i] += vecfData[i]; } } else @@ -1120,8 +1120,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo: apply stereo-to-mono attenuation for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { - vecdIntermProcBuf[i] += - ( static_cast ( vecsData[k] ) + vecsData[k + 1] ) / 2; + vecfIntermProcBuf[i] += ( vecfData[k] + vecfData[k + 1] ) / 2; } } } @@ -1132,7 +1131,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // mono for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecdIntermProcBuf[i] += vecsData[i] * dGain; + vecfIntermProcBuf[i] += vecfData[i] * fGain; } } else @@ -1140,17 +1139,21 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo: apply stereo-to-mono attenuation for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { - vecdIntermProcBuf[i] += dGain * - ( static_cast ( vecsData[k] ) + vecsData[k + 1] ) / 2; + vecfIntermProcBuf[i] += fGain * + ( vecfData[k] + vecfData[k + 1] ) / 2; } } } } - // convert from double to short with clipping + // When adding multiple sound sources together + // the resulting signal level may exceed the maximum + // audio range which is from -1.0f to 1.0f inclusivly. + // Clip the intermediate sound buffer to be within + // the expected range for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecsSendData[i] = Double2Short ( vecdIntermProcBuf[i] ); + vecfSendData[i] = ClipFloat ( vecfIntermProcBuf[i] ); } } else @@ -1159,17 +1162,17 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( j = 0; j < iNumClients; j++ ) { // get a reference to the audio data and gain/pan of the current client - const CVector& vecsData = vecvecsData[j]; - const double dGain = vecvecdGains[iChanCnt][j]; - const double dPan = vecvecdPannings[iChanCnt][j]; + const CVector& vecfData = vecvecfData[j]; + const float fGain = vecvecfGains[iChanCnt][j]; + const float fPan = vecvecfPannings[iChanCnt][j]; // calculate combined gain/pan for each stereo channel where we define // the panning that center equals full gain for both channels - const double dGainL = MathUtils::GetLeftPan ( dPan, false ) * dGain; - const double dGainR = MathUtils::GetRightPan ( dPan, false ) * dGain; + const float fGainL = MathUtils::GetLeftPan ( fPan, false ) * fGain; + const float fGainR = MathUtils::GetRightPan ( fPan, false ) * fGain; // if channel gain is 1, avoid multiplication for speed optimization - if ( ( dGainL == static_cast ( 1.0 ) ) && ( dGainR == static_cast ( 1.0 ) ) ) + if ( ( fGainL == 1.0f ) && ( fGainR == 1.0f ) ) { if ( vecNumAudioChannels[j] == 1 ) { @@ -1177,8 +1180,8 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { // left/right channel - vecdIntermProcBuf[k] += vecsData[i]; - vecdIntermProcBuf[k + 1] += vecsData[i]; + vecfIntermProcBuf[k] += vecfData[i]; + vecfIntermProcBuf[k + 1] += vecfData[i]; } } else @@ -1186,7 +1189,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ ) { - vecdIntermProcBuf[i] += vecsData[i]; + vecfIntermProcBuf[i] += vecfData[i]; } } } @@ -1198,8 +1201,8 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { // left/right channel - vecdIntermProcBuf[k] += vecsData[i] * dGainL; - vecdIntermProcBuf[k + 1] += vecsData[i] * dGainR; + vecfIntermProcBuf[k] += vecfData[i] * fGainL; + vecfIntermProcBuf[k + 1] += vecfData[i] * fGainR; } } else @@ -1208,17 +1211,21 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i += 2 ) { // left/right channel - vecdIntermProcBuf[i] += vecsData[i] * dGainL; - vecdIntermProcBuf[i + 1] += vecsData[i + 1] * dGainR; + vecfIntermProcBuf[i] += vecfData[i] * fGainL; + vecfIntermProcBuf[i + 1] += vecfData[i + 1] * fGainR; } } } } - // convert from double to short with clipping + // When adding multiple sound sources together + // the resulting signal level may exceed the maximum + // audio range which is from -1.0f to 1.0f inclusivly. + // Clip the intermediate sound buffer to be within + // the expected range for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ ) { - vecsSendData[i] = Double2Short ( vecdIntermProcBuf[i] ); + vecfSendData[i] = ClipFloat ( vecfIntermProcBuf[i] ); } } @@ -1262,12 +1269,12 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // is false and the Get() function is not called at all. Therefore if the buffer is not needed // we do not spend any time in the function but go directly inside the if condition. if ( ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] == 0 ) || - DoubleFrameSizeConvBufOut[iCurChanID].Put ( vecsSendData, SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) + DoubleFrameSizeConvBufOut[iCurChanID].Put ( vecfSendData, SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) { if ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] != 0 ) { // get the large frame from the conversion buffer - DoubleFrameSizeConvBufOut[iCurChanID].GetAll ( vecsSendData, DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); + DoubleFrameSizeConvBufOut[iCurChanID].GetAll ( vecfSendData, DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); } for ( int iB = 0; iB < vecNumFrameSizeConvBlocks[iChanCnt]; iB++ ) @@ -1279,11 +1286,11 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // optimization it would be better to set it only if the network frame size is changed opus_custom_encoder_ctl ( pCurOpusEncoder, OPUS_SET_BITRATE ( CalcBitRateBitsPerSecFromCodedBytes ( iCeltNumCodedBytes, iClientFrameSizeSamples ) ) ); - iUnused = opus_custom_encode ( pCurOpusEncoder, - &vecsSendData[iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], - iClientFrameSizeSamples, - &vecvecbyCodedData[iChanCnt][0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( pCurOpusEncoder, + &vecfSendData[iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], + iClientFrameSizeSamples, + &vecvecbyCodedData[iChanCnt][0], + iCeltNumCodedBytes ); } // send separate mix to current clients @@ -1522,13 +1529,13 @@ bool CServer::PutAudioData ( const CVector& vecbyRecBuf, // time reset gains/pans of this channel ID for all other channels for ( int i = 0; i < iMaxNumChannels; i++ ) { - vecChannels[iCurChanID].SetGain ( i, 1.0 ); - vecChannels[iCurChanID].SetPan ( i, 0.5 ); + vecChannels[iCurChanID].SetGain ( i, 1.0f ); + vecChannels[iCurChanID].SetPan ( i, 0.5f ); // other channels (we do not distinguish the case if // i == iCurChanID for simplicity) - vecChannels[i].SetGain ( iCurChanID, 1.0 ); - vecChannels[i].SetPan ( iCurChanID, 0.5 ); + vecChannels[i].SetGain ( iCurChanID, 1.0f ); + vecChannels[i].SetPan ( iCurChanID, 0.5f ); } } else @@ -1672,10 +1679,10 @@ void CServer::customEvent ( QEvent* pEvent ) } /// @brief Compute frame peak level for each client -bool CServer::CreateLevelsForAllConChannels ( const int iNumClients, - const CVector& vecNumAudioChannels, - const CVector > vecvecsData, - CVector& vecLevelsOut ) +bool CServer::CreateLevelsForAllConChannels ( const int iNumClients, + const CVector& vecNumAudioChannels, + const CVector > vecvecfData, + CVector& vecLevelsOut ) { bool bLevelsWereUpdated = false; @@ -1688,13 +1695,13 @@ bool CServer::CreateLevelsForAllConChannels ( const int i for ( int j = 0; j < iNumClients; j++ ) { // update and get signal level for meter in dB for each channel - const double dCurSigLevelForMeterdB = vecChannels[vecChanIDsCurConChan[j]]. - UpdateAndGetLevelForMeterdB ( vecvecsData[j], + const float fCurSigLevelForMeterdB = vecChannels[vecChanIDsCurConChan[j]]. + UpdateAndGetLevelForMeterdB ( vecvecfData[j], iServerFrameSizeSamples, vecNumAudioChannels[j] > 1 ); // map value to integer for transmission via the protocol (4 bit available) - vecLevelsOut[j] = static_cast ( std::ceil ( dCurSigLevelForMeterdB ) ); + vecLevelsOut[j] = static_cast ( ceilf ( fCurSigLevelForMeterdB ) ); } } diff --git a/src/server.h b/src/server.h index 1217bff9a9..0c92069e52 100755 --- a/src/server.h +++ b/src/server.h @@ -322,10 +322,10 @@ class CServer : bool bUseMultithreading; QFutureSynchronizer FutureSynchronizer; - bool CreateLevelsForAllConChannels ( const int iNumClients, - const CVector& vecNumAudioChannels, - const CVector > vecvecsData, - CVector& vecLevelsOut ); + bool CreateLevelsForAllConChannels ( const int iNumClients, + const CVector& vecNumAudioChannels, + const CVector > vecvecfData, + CVector& vecLevelsOut ); // do not use the vector class since CChannel does not have appropriate // copy constructor/operator @@ -346,21 +346,21 @@ class CServer : OpusCustomDecoder* OpusDecoderMono[MAX_NUM_CHANNELS]; OpusCustomEncoder* OpusEncoderStereo[MAX_NUM_CHANNELS]; OpusCustomDecoder* OpusDecoderStereo[MAX_NUM_CHANNELS]; - CConvBuf DoubleFrameSizeConvBufIn[MAX_NUM_CHANNELS]; - CConvBuf DoubleFrameSizeConvBufOut[MAX_NUM_CHANNELS]; + CConvBuf DoubleFrameSizeConvBufIn[MAX_NUM_CHANNELS]; + CConvBuf DoubleFrameSizeConvBufOut[MAX_NUM_CHANNELS]; CVector vstrChatColors; CVector vecChanIDsCurConChan; - CVector > vecvecdGains; - CVector > vecvecdPannings; - CVector > vecvecsData; + CVector > vecvecfGains; + CVector > vecvecfPannings; + CVector > vecvecfData; CVector vecNumAudioChannels; CVector vecNumFrameSizeConvBlocks; CVector vecUseDoubleSysFraSizeConvBuf; CVector vecAudioComprType; - CVector > vecvecsSendData; - CVector > vecvecsIntermediateProcBuf; + CVector > vecvecfSendData; + CVector > vecvecfIntermediateProcBuf; CVector > vecvecbyCodedData; // Channel levels @@ -404,11 +404,11 @@ class CServer : void Stopped(); void ClientDisconnected ( const int iChID ); void SvrRegStatusChanged(); - void AudioFrame ( const int iChID, - const QString stChName, - const CHostAddress RecHostAddr, - const int iNumAudChan, - const CVector vecsData ); + void AudioFrame ( const int iChID, + const QString stChName, + const CHostAddress RecHostAddr, + const int iNumAudChan, + const CVector vecfData ); void CLVersionAndOSReceived ( CHostAddress InetAddr, COSUtil::EOpSystemType eOSType, @@ -506,4 +506,4 @@ public slots: void OnHandledSignal ( int sigNum ); }; -Q_DECLARE_METATYPE(CVector) +Q_DECLARE_METATYPE ( CVector ) diff --git a/src/soundbase.cpp b/src/soundbase.cpp index 51382dea2a..04ad4f9710 100755 --- a/src/soundbase.cpp +++ b/src/soundbase.cpp @@ -27,7 +27,7 @@ /* Implementation *************************************************************/ CSoundBase::CSoundBase ( const QString& strNewSystemDriverTechniqueName, - void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + void (*fpNewProcessCallback) ( CVector& vfData, void* pParg ), void* pParg, const int iNewCtrlMIDIChannel ) : fpProcessCallback ( fpNewProcessCallback ), @@ -249,7 +249,7 @@ printf ( "\n" ); { // we are assuming that the controller number is the same // as the audio fader index and the range is 0-127 - const int iFaderLevel = static_cast ( static_cast ( + const int iFaderLevel = static_cast ( static_cast ( qMin ( vMIDIPaketBytes[2], uint8_t ( 127 ) ) ) / 127 * AUD_MIX_FADER_MAX ); // Behringer X-TOUCH: offset of 0x46 diff --git a/src/soundbase.h b/src/soundbase.h index 8182a37239..dc92cb75d8 100755 --- a/src/soundbase.h +++ b/src/soundbase.h @@ -51,7 +51,7 @@ class CSoundBase : public QThread public: CSoundBase ( const QString& strNewSystemDriverTechniqueName, - void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + void (*fpNewProcessCallback) ( CVector& vfData, void* pParg ), void* pParg, const int iNewCtrlMIDIChannel ); @@ -121,13 +121,13 @@ class CSoundBase : public QThread } // function pointer to callback function - void (*fpProcessCallback) ( CVector& psData, void* arg ); + void (*fpProcessCallback) ( CVector& vfData, void* arg ); void* pProcessCallbackArg; // callback function call for derived classes - void ProcessCallback ( CVector& psData ) + void ProcessCallback ( CVector& vfData ) { - (*fpProcessCallback) ( psData, pProcessCallbackArg ); + (*fpProcessCallback) ( vfData, pProcessCallbackArg ); } void ParseMIDIMessage ( const CVector& vMIDIPaketBytes ); diff --git a/src/util.cpp b/src/util.cpp index 9627388959..ed266f5ecb 100755 --- a/src/util.cpp +++ b/src/util.cpp @@ -28,7 +28,7 @@ /* Implementation *************************************************************/ // Input level meter implementation -------------------------------------------- -void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, +void CStereoSignalLevelMeter::Update ( const CVector& vecfAudio, const int iMonoBlockSizeSam, const bool bIsStereoIn ) { @@ -36,14 +36,14 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, // // Speed optimization: // - we only make use of the negative values and ignore the positive ones (since - // int16 has range {-32768, 32767}) -> we do not need to call the fabs() function + // float has the range {-1, 1}) -> we do not need to call the fabsf() function // - we only evaluate every third sample // // With these speed optimizations we might loose some information in // special cases but for the average music signals the following code // should give good results. - short sMinLOrMono = 0; - short sMinR = 0; + float fMinLOrMono = 0; + float fMinR = 0; if ( bIsStereoIn ) { @@ -51,14 +51,14 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, for ( int i = 0; i < 2 * iMonoBlockSizeSam; i += 6 ) // 2 * 3 = 6 -> stereo { // left (or mono) and right channel - sMinLOrMono = std::min ( sMinLOrMono, vecsAudio[i] ); - sMinR = std::min ( sMinR, vecsAudio[i + 1] ); + fMinLOrMono = fminf ( fMinLOrMono, vecfAudio[i] ); + fMinR = fminf ( fMinR, vecfAudio[i + 1] ); } // in case of mono out use minimum of both channels if ( !bIsStereoOut ) { - sMinLOrMono = std::min ( sMinLOrMono, sMinR ); + fMinLOrMono = fminf ( fMinLOrMono, fMinR ); } } else @@ -66,66 +66,71 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, // mono in for ( int i = 0; i < iMonoBlockSizeSam; i += 3 ) { - sMinLOrMono = std::min ( sMinLOrMono, vecsAudio[i] ); + fMinLOrMono = fminf ( fMinLOrMono, vecfAudio[i] ); } } // apply smoothing, if in stereo out mode, do this for two channels - dCurLevelLOrMono = UpdateCurLevel ( dCurLevelLOrMono, -sMinLOrMono ); + fCurLevelLOrMono = UpdateCurLevel ( fCurLevelLOrMono, -fMinLOrMono ); if ( bIsStereoOut ) { - dCurLevelR = UpdateCurLevel ( dCurLevelR, -sMinR ); + fCurLevelR = UpdateCurLevel ( fCurLevelR, -fMinR ); } } -double CStereoSignalLevelMeter::UpdateCurLevel ( double dCurLevel, - const double dMax ) +float CStereoSignalLevelMeter::UpdateCurLevel ( float fCurLevel, + const float fMax ) { // decrease max with time - if ( dCurLevel >= METER_FLY_BACK ) + if ( fCurLevel >= METER_FLY_BACK ) { - dCurLevel *= dSmoothingFactor; + fCurLevel *= fSmoothingFactor; } else { - dCurLevel = 0; + fCurLevel = 0; } // update current level -> only use maximum - if ( dMax > dCurLevel ) + if ( fMax > fCurLevel ) { - return dMax; + return fMax; } else { - return dCurLevel; + return fCurLevel; } } -double CStereoSignalLevelMeter::CalcLogResultForMeter ( const double& dLinearLevel ) +float CStereoSignalLevelMeter::CalcLogResultForMeter ( const float& fLinearLevel ) { - const double dNormLevel = dLinearLevel / _MAXSHORT; + // With #544 by hselasky the signal processing was changed from short to float. + // With the code using short we defined the clipping to be if a value of 32768 + // was detected. We did this by normalizing by 32767 and the clipping was then + // a bit larger than 1. To get the same behavior also with the new float type, + // we have to multiply with a factor 32768 / 32767 = 1.000030518509476. + const float fNormLevel = fLinearLevel * 1.000030518509476f; // logarithmic measure - double dLevelForMeterdB = -100000.0; // large negative value + float fLevelForMeterdB = -100000.0f; // large negative value - if ( dNormLevel > 0 ) + if ( fNormLevel > 0 ) { - dLevelForMeterdB = 20.0 * log10 ( dNormLevel ); + fLevelForMeterdB = 20.0f * log10f ( fNormLevel ); } // map to signal level meter (linear transformation of the input // level range to the level meter range) - dLevelForMeterdB -= LOW_BOUND_SIG_METER; - dLevelForMeterdB *= NUM_STEPS_LED_BAR / ( UPPER_BOUND_SIG_METER - LOW_BOUND_SIG_METER ); + fLevelForMeterdB -= LOW_BOUND_SIG_METER; + fLevelForMeterdB *= NUM_STEPS_LED_BAR / ( UPPER_BOUND_SIG_METER - LOW_BOUND_SIG_METER ); - if ( dLevelForMeterdB < 0 ) + if ( fLevelForMeterdB < 0 ) { - dLevelForMeterdB = 0; + fLevelForMeterdB = 0; } - return dLevelForMeterdB; + return fLevelForMeterdB; } @@ -194,21 +199,21 @@ uint32_t CCRC::GetCRC() void CAudioReverb::Init ( const EAudChanConf eNAudioChannelConf, const int iNStereoBlockSizeSam, const int iSampleRate, - const double rT60 ) + const float rT60 ) { // store parameters eAudioChannelConf = eNAudioChannelConf; iStereoBlockSizeSam = iNStereoBlockSizeSam; // delay lengths for 44100 Hz sample rate - int lengths[9] = { 1116, 1356, 1422, 1617, 225, 341, 441, 211, 179 }; - const double scaler = static_cast ( iSampleRate ) / 44100.0; + int lengths[9] = { 1116, 1356, 1422, 1617, 225, 341, 441, 211, 179 }; + const float scaler = static_cast ( iSampleRate ) / 44100.0f; if ( scaler != 1.0 ) { for ( int i = 0; i < 9; i++ ) { - int delay = static_cast ( floor ( scaler * lengths[i] ) ); + int delay = static_cast ( floorf ( scaler * lengths[i] ) ); if ( ( delay & 1 ) == 0 ) { @@ -232,7 +237,7 @@ void CAudioReverb::Init ( const EAudChanConf eNAudioChannelConf, for ( int i = 0; i < 4; i++ ) { combDelays[i].Init ( lengths[i] ); - combFilters[i].setPole ( 0.2 ); + combFilters[i].setPole ( 0.2f ); } setT60 ( rT60, iSampleRate ); @@ -255,7 +260,9 @@ bool CAudioReverb::isPrime ( const int number ) if ( number & 1 ) { - for ( int i = 3; i < static_cast ( sqrt ( static_cast ( number ) ) ) + 1; i += 2 ) + const int iMax = static_cast ( sqrtf ( static_cast ( number ) ) ) + 1; + + for ( int i = 3; i < iMax; i += 2 ) { if ( ( number % i ) == 0 ) { @@ -289,37 +296,37 @@ void CAudioReverb::Clear() outLeftDelay.Reset ( 0 ); } -void CAudioReverb::setT60 ( const double rT60, - const int iSampleRate ) +void CAudioReverb::setT60 ( const float rT60, + const int iSampleRate ) { // set the reverberation T60 decay time for ( int i = 0; i < 4; i++ ) { - combCoefficient[i] = pow ( 10.0, static_cast ( -3.0 * + combCoefficient[i] = powf ( 10.0f, static_cast ( -3.0f * combDelays[i].Size() / ( rT60 * iSampleRate ) ) ); } } -void CAudioReverb::COnePole::setPole ( const double dPole ) +void CAudioReverb::COnePole::setPole ( const float dPole ) { // calculate IIR filter coefficients based on the pole value dA = -dPole; - dB = 1.0 - dPole; + dB = 1.0f - dPole; } -double CAudioReverb::COnePole::Calc ( const double dIn ) +float CAudioReverb::COnePole::Calc ( const float fIn ) { // calculate IIR filter - dLastSample = dB * dIn - dA * dLastSample; + dLastSample = dB * fIn - dA * dLastSample; return dLastSample; } -void CAudioReverb::Process ( CVector& vecsStereoInOut, - const bool bReverbOnLeftChan, - const double dAttenuation ) +void CAudioReverb::Process ( CVector& vecfStereoInOut, + const bool bReverbOnLeftChan, + const float fAttenuation ) { - double dMixedInput, temp, temp0, temp1, temp2; + float fMixedInput, temp, temp0, temp1, temp2; for ( int i = 0; i < iStereoBlockSizeSam; i += 2 ) { @@ -327,23 +334,23 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, // shall be input for the right channel) if ( eAudioChannelConf == CC_STEREO ) { - dMixedInput = 0.5 * ( vecsStereoInOut[i] + vecsStereoInOut[i + 1] ); + fMixedInput = 0.5f * ( vecfStereoInOut[i] + vecfStereoInOut[i + 1] ); } else { if ( bReverbOnLeftChan ) { - dMixedInput = vecsStereoInOut[i]; + fMixedInput = vecfStereoInOut[i]; } else { - dMixedInput = vecsStereoInOut[i + 1]; + fMixedInput = vecfStereoInOut[i + 1]; } } temp = allpassDelays[0].Get(); temp0 = allpassCoefficient * temp; - temp0 += dMixedInput; + temp0 += fMixedInput; allpassDelays[0].Add ( temp0 ); temp0 = - ( allpassCoefficient * temp0 ) + temp; @@ -359,17 +366,17 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, allpassDelays[2].Add ( temp2 ); temp2 = - ( allpassCoefficient * temp2 ) + temp; - const double temp3 = temp2 + combFilters[0].Calc ( combCoefficient[0] * combDelays[0].Get() ); - const double temp4 = temp2 + combFilters[1].Calc ( combCoefficient[1] * combDelays[1].Get() ); - const double temp5 = temp2 + combFilters[2].Calc ( combCoefficient[2] * combDelays[2].Get() ); - const double temp6 = temp2 + combFilters[3].Calc ( combCoefficient[3] * combDelays[3].Get() ); + const float temp3 = temp2 + combFilters[0].Calc ( combCoefficient[0] * combDelays[0].Get() ); + const float temp4 = temp2 + combFilters[1].Calc ( combCoefficient[1] * combDelays[1].Get() ); + const float temp5 = temp2 + combFilters[2].Calc ( combCoefficient[2] * combDelays[2].Get() ); + const float temp6 = temp2 + combFilters[3].Calc ( combCoefficient[3] * combDelays[3].Get() ); combDelays[0].Add ( temp3 ); combDelays[1].Add ( temp4 ); combDelays[2].Add ( temp5 ); combDelays[3].Add ( temp6 ); - const double filtout = temp3 + temp4 + temp5 + temp6; + const float filtout = temp3 + temp4 + temp5 + temp6; outLeftDelay.Add ( filtout ); outRightDelay.Add ( filtout ); @@ -378,16 +385,16 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, // reverberation effect on both channels) if ( ( eAudioChannelConf == CC_STEREO ) || bReverbOnLeftChan ) { - vecsStereoInOut[i] = Double2Short ( - ( 1.0 - dAttenuation ) * vecsStereoInOut[i] + - 0.5 * dAttenuation * outLeftDelay.Get() ); + vecfStereoInOut[i] = ClipFloat ( + ( 1.0f - fAttenuation ) * vecfStereoInOut[i] + + 0.5f * fAttenuation * outLeftDelay.Get() ); } if ( ( eAudioChannelConf == CC_STEREO ) || !bReverbOnLeftChan ) { - vecsStereoInOut[i + 1] = Double2Short ( - ( 1.0 - dAttenuation ) * vecsStereoInOut[i + 1] + - 0.5 * dAttenuation * outRightDelay.Get() ); + vecfStereoInOut[i + 1] = ClipFloat ( + ( 1.0f - fAttenuation ) * vecfStereoInOut[i + 1] + + 0.5f * fAttenuation * outRightDelay.Get() ); } } } diff --git a/src/util.h b/src/util.h old mode 100755 new mode 100644 index dcf9d56875..029c7d5ecc --- a/src/util.h +++ b/src/util.h @@ -69,27 +69,15 @@ class CClient; // forward declaration of CClient /* Definitions ****************************************************************/ -#define METER_FLY_BACK 2 +#define METER_FLY_BACK 0.00006103515625f // 2 / 32768 #define INVALID_MIDI_CH -1 // invalid MIDI channel definition /* Global functions ***********************************************************/ -// converting double to short -inline short Double2Short ( const double dInput ) +// range check audio samples +static inline float ClipFloat ( const float fInput ) { - // lower bound - if ( dInput < _MINSHORT ) - { - return _MINSHORT; - } - - // upper bound - if ( dInput > _MAXSHORT ) - { - return _MAXSHORT; - } - - return static_cast ( dInput ); + return qBound ( -1.0f, fInput, 1.0f ); } // debug error handling @@ -739,32 +727,32 @@ class CStereoSignalLevelMeter // TODO Calculate smoothing factor from sample rate and frame size (64 or 128 samples frame size). // But tests with 128 and 64 samples frame size have shown that the meter fly back // is ok for both numbers of samples frame size with a factor of 0.99. - CStereoSignalLevelMeter ( const bool bNIsStereoOut = true, - const double dNSmoothingFactor = 0.99 ) : - dSmoothingFactor ( dNSmoothingFactor ), bIsStereoOut ( bNIsStereoOut ) { Reset(); } + CStereoSignalLevelMeter ( const bool bNIsStereoOut = true, + const float fNSmoothingFactor = 0.99f ) : + fSmoothingFactor ( fNSmoothingFactor ), bIsStereoOut ( bNIsStereoOut ) { Reset(); } - void Update ( const CVector& vecsAudio, + void Update ( const CVector& vecfAudio, const int iInSize, const bool bIsStereoIn ); - double GetLevelForMeterdBLeftOrMono() { return CalcLogResultForMeter ( dCurLevelLOrMono ); } - double GetLevelForMeterdBRight() { return CalcLogResultForMeter ( dCurLevelR ); } - static double CalcLogResultForMeter ( const double& dLinearLevel ); + float GetLevelForMeterdBLeftOrMono() { return CalcLogResultForMeter ( fCurLevelLOrMono ); } + float GetLevelForMeterdBRight() { return CalcLogResultForMeter ( fCurLevelR ); } + static float CalcLogResultForMeter ( const float& fLinearLevel ); void Reset() { - dCurLevelLOrMono = 0.0; - dCurLevelR = 0.0; + fCurLevelLOrMono = 0.0f; + fCurLevelR = 0.0f; } protected: - double UpdateCurLevel ( double dCurLevel, - const double dMax ); + float UpdateCurLevel ( float fCurLevel, + const float fMax ); - double dCurLevelLOrMono; - double dCurLevelR; - double dSmoothingFactor; - bool bIsStereoOut; + float fCurLevelLOrMono; + float fCurLevelR; + float fSmoothingFactor; + bool bIsStereoOut; }; @@ -1171,35 +1159,35 @@ class CAudioReverb void Init ( const EAudChanConf eNAudioChannelConf, const int iNStereoBlockSizeSam, const int iSampleRate, - const double rT60 = 1.1 ); + const float rT60 = 1.1f ); void Clear(); - void Process ( CVector& vecsStereoInOut, - const bool bReverbOnLeftChan, - const double dAttenuation ); + void Process ( CVector& vecfStereoInOut, + const bool bReverbOnLeftChan, + const float fAttenuation ); protected: - void setT60 ( const double rT60, const int iSampleRate ); + void setT60 ( const float rT60, const int iSampleRate ); bool isPrime ( const int number ); class COnePole { public: COnePole() : dA ( 0 ), dB ( 0 ) { Reset(); } - void setPole ( const double dPole ); - double Calc ( const double dIn ); - void Reset() { dLastSample = 0; } + void setPole ( const float dPole ); + float Calc ( const float dIn ); + void Reset() { dLastSample = 0; } protected: - double dA; - double dB; - double dLastSample; + float dA; + float dB; + float dLastSample; }; EAudChanConf eAudioChannelConf; int iStereoBlockSizeSam; - CFIFO allpassDelays[3]; - CFIFO combDelays[4]; + CFIFO allpassDelays[3]; + CFIFO combDelays[4]; COnePole combFilters[4]; CFIFO outLeftDelay; CFIFO outRightDelay; diff --git a/src/vstmain.cpp b/src/vstmain.cpp index 6044c909f5..0fd55aff4b 100755 --- a/src/vstmain.cpp +++ b/src/vstmain.cpp @@ -101,8 +101,8 @@ void CLlconVST::processReplacing ( float** pvIn, // copy input data for ( i = 0, j = 0; i < iNumSamples; i++, j += 2 ) { - Client.GetSound()->vecsTmpAudioSndCrdStereo[j] = pfIn0[i]; - Client.GetSound()->vecsTmpAudioSndCrdStereo[j + 1] = pfIn1[i]; + Client.GetSound()->vecfTmpAudioSndCrdStereo[j] = pfIn0[i]; + Client.GetSound()->vecfTmpAudioSndCrdStereo[j + 1] = pfIn1[i]; } // call processing callback function @@ -111,7 +111,7 @@ void CLlconVST::processReplacing ( float** pvIn, // copy output data for ( i = 0, j = 0; i < iNumSamples; i++, j += 2 ) { - pfOut0[i] = Client.GetSound()->vecsTmpAudioSndCrdStereo[j]; - pfOut1[i] = Client.GetSound()->vecsTmpAudioSndCrdStereo[j + 1]; + pfOut0[i] = Client.GetSound()->vecfTmpAudioSndCrdStereo[j]; + pfOut1[i] = Client.GetSound()->vecfTmpAudioSndCrdStereo[j + 1]; } } diff --git a/src/vstsound.h b/src/vstsound.h index 04ea94a675..7709cf3e92 100755 --- a/src/vstsound.h +++ b/src/vstsound.h @@ -34,26 +34,26 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), void* arg ) : + CSound ( void (*fpNewCallback) ( CVector& vfData, void* arg ), void* arg ) : CSoundBase ( true, fpNewCallback, arg ), iVSTMonoBufferSize ( 0 ) {} // special VST functions void SetMonoBufferSize ( const int iNVBS ) { iVSTMonoBufferSize = iNVBS; } void VSTProcessCallback() { - CSoundBase::ProcessCallback ( vecsTmpAudioSndCrdStereo ); + CSoundBase::ProcessCallback ( vecfTmpAudioSndCrdStereo ); } virtual int Init ( const int ) { // init base class CSoundBase::Init ( iVSTMonoBufferSize ); - vecsTmpAudioSndCrdStereo.Init ( 2 * iVSTMonoBufferSize /* stereo */); + vecfTmpAudioSndCrdStereo.Init ( 2 * iVSTMonoBufferSize /* stereo */); return iVSTMonoBufferSize; } // this vector must be accessible from the outside (quick hack solution) - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; protected: int iVSTMonoBufferSize; diff --git a/windows/sound.cpp b/windows/sound.cpp index f660afd63f..230f208745 100755 --- a/windows/sound.cpp +++ b/windows/sound.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2004-2020 * * Author(s): - * Volker Fischer + * Volker Fischer, hselasky * * Description: * Sound card interface for Windows operating systems @@ -149,9 +149,6 @@ QString CSound::CheckDeviceCapabilities() lNumOutChan = MAX_NUM_IN_OUT_CHANNELS; } - // query channel infos for all available input channels - bool bInputChMixingSupported = true; - for ( int i = 0; i < lNumInChan; i++ ) { // setup for input channels @@ -174,11 +171,6 @@ QString CSound::CheckDeviceCapabilities() // store the name of the channel and check if channel mixing is supported channelInputName[i] = channelInfosInput[i].name; - - if ( !CheckSampleTypeSupportedForCHMixing ( channelInfosInput[i].type ) ) - { - bInputChMixingSupported = false; - } } // query channel infos for all available output channels @@ -204,7 +196,7 @@ QString CSound::CheckDeviceCapabilities() } // special case with 4 input channels: support adding channels - if ( ( lNumInChan == 4 ) && bInputChMixingSupported ) + if ( lNumInChan == 4 ) { // add four mixed channels (i.e. 4 normal, 4 mixed channels) lNumInChanPlusAddChan = 8; @@ -404,7 +396,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) ASIOSetSampleRate ( SYSTEM_SAMPLE_RATE_HZ ); // create memory for intermediate audio buffer - vecsMultChanAudioSndCrd.Init ( iASIOBufferSizeStereo ); + vecfMultChanAudioSndCrd.Init ( iASIOBufferSizeStereo ); // create and activate ASIO buffers (buffer size in samples), // dispose old buffers (if any) @@ -483,7 +475,7 @@ void CSound::Stop() } } -CSound::CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -588,519 +580,421 @@ bool CSound::CheckSampleTypeSupported ( const ASIOSampleType SamType ) ( SamType == ASIOSTInt32MSB24 ) ); } -bool CSound::CheckSampleTypeSupportedForCHMixing ( const ASIOSampleType SamType ) +// Might want to comment that use of double in factors below avoids nasty conversion surprises +static constexpr double FACTOR16 = 32767.0; +static constexpr double FACTOR16_INV = 1.0 / 32768.0; // Notice diff in last digit from other factor, ala Port Audio + +struct sample16LSB { - // check for supported sample types for audio channel mixing (see bufferSwitch) - return ( ( SamType == ASIOSTInt16LSB ) || - ( SamType == ASIOSTInt24LSB ) || - ( SamType == ASIOSTInt32LSB ) ); -} + int16_t data[1]; -void CSound::bufferSwitch ( long index, ASIOBool ) + float Get() const + { + return data[0] * FACTOR16_INV; + } + + void Put ( const float value ) + { + data[0] = static_cast ( value * FACTOR16 ); + } +}; + +struct sample16MSB +{ + uint8_t data[2]; + + float Get() const + { + const int16_t temp = data[1] | ( data[0] << 8 ); + + return temp * FACTOR16_INV; + } + + void Put ( const float value ) + { + const int16_t temp = static_cast ( value * FACTOR16 ); + + data[0] = static_cast ( temp >> 8 ); + data[1] = static_cast ( temp ); + } +}; + +// Might want to comment that use of double in factors below avoids nasty conversion surprises +static constexpr double FACTOR24 = 2147483647.0; +static constexpr double FACTOR24_INV = 1.0 / 2147483648.0; // Notice diff in last digit from other factor, ala Port Audio + +struct sample24LSB +{ + uint8_t data[3]; + + float Get() const + { + const int32_t temp = ( data[0] << 8 ) | ( data[1] << 16 ) | ( data[2] << 24 ); + + return temp * FACTOR24_INV; + } + + void Put ( const float value ) + { + const int32_t temp = static_cast ( value * FACTOR24 ); + + data[0] = static_cast ( temp >> 8 ); + data[1] = static_cast ( temp >> 16 ); + data[2] = static_cast ( temp >> 24 ); + } +}; + +struct sample24MSB +{ + uint8_t data[3]; + + float Get() const + { + const int32_t temp = ( data[2] << 8 ) | ( data[1] << 16 ) | ( data[0] << 24 ); + + return temp * FACTOR24_INV; + } + + void Put ( const float value ) + { + const int32_t temp = static_cast ( value * FACTOR24 ); + + data[0] = static_cast ( temp >> 24 ); + data[1] = static_cast ( temp >> 16 ); + data[2] = static_cast ( temp >> 8 ); + } +}; + +// Might want to comment that use of double in factors below avoids nasty conversion surprises. +static constexpr double FACTOR32 = 2147483647.0; +static constexpr double FACTOR32_INV = 1.0 / 2147483648.0; // Notice diff in last digit from other factor, ala Port Audio + +struct sample32LSB +{ + int32_t data[1]; + + float Get() const + { + return ( data[0] * FACTOR32_INV ); + } + + void Put ( const float value ) + { + data[0] = static_cast ( value * FACTOR32 ); + } +}; + +struct sample32MSB +{ + uint8_t data[4]; + + float Get() const + { + const int32_t temp = ( data[3] << 0 ) | ( data[2] << 8 ) | + ( data[1] << 16 ) | ( data[0] << 24 ); + + return temp * FACTOR32_INV; + } + + void Put ( const float value ) + { + const int32_t temp = static_cast ( value * FACTOR32 ); + + data[0] = static_cast ( temp >> 24 ); + data[1] = static_cast ( temp >> 16 ); + data[2] = static_cast ( temp >> 8 ); + data[3] = static_cast ( temp >> 0 ); + } +}; + +union sampleFloat32Data +{ + uint8_t data[4]; + float value; +}; + +struct sampleFloat32LSB +{ + float data[1]; + + float Get() const + { + return data[0]; + } + + void Put ( const float value ) + { + data[0] = value; + } +}; + +struct sampleFloat32MSB +{ + uint8_t data[4]; + + float Get() const + { + sampleFloat32Data temp; + + temp.data[0] = data[3]; + temp.data[1] = data[2]; + temp.data[2] = data[1]; + temp.data[3] = data[0]; + + return temp.value; + } + + void Put ( const float value ) + { + sampleFloat32Data temp; + + temp.value = value; + + data[0] = temp.data[3]; + data[1] = temp.data[2]; + data[2] = temp.data[1]; + data[3] = temp.data[0]; + } +}; + +union sampleFloat64Data { - int iCurSample; + uint8_t data[8]; + double value; +}; - // get references to class members - int& iASIOBufferSizeMono = pSound->iASIOBufferSizeMono; - CVector& vecsMultChanAudioSndCrd = pSound->vecsMultChanAudioSndCrd; +struct sampleFloat64LSB +{ + double data[1]; + float Get() const + { + return data[0]; + } + + void Put ( const float value ) + { + data[0] = value; + } +}; + +struct sampleFloat64MSB +{ + uint8_t data[8]; + + float Get() const + { + sampleFloat64Data temp; + + temp.data[0] = data[7]; + temp.data[1] = data[6]; + temp.data[2] = data[5]; + temp.data[3] = data[4]; + temp.data[4] = data[3]; + temp.data[5] = data[2]; + temp.data[6] = data[1]; + temp.data[7] = data[0]; + + return temp.value; + } + + void Put ( const float value ) + { + sampleFloat64Data temp; + + temp.value = value; + + data[0] = temp.data[7]; + data[1] = temp.data[6]; + data[2] = temp.data[5]; + data[3] = temp.data[4]; + data[4] = temp.data[3]; + data[5] = temp.data[2]; + data[6] = temp.data[1]; + data[7] = temp.data[0]; + } +}; + +void CSound::bufferSwitch ( long index, ASIOBool ) +{ // perform the processing for input and output pSound->ASIOMutex.lock(); // get mutex lock { // CAPTURE ------------------------------------------------------------- for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ ) { - int iSelCH, iSelAddCH; + int iSelAddCH; + int iSelCH; - GetSelCHAndAddCH ( pSound->vSelectedInputChannels[i], pSound->lNumInChan, - iSelCH, iSelAddCH ); + GetSelCHAndAddCH ( pSound->vSelectedInputChannels[i], + pSound->lNumInChan, + iSelCH, + iSelAddCH ); // copy new captured block in thread transfer buffer (copy // mono data interleaved in stereo buffer) switch ( pSound->channelInfosInput[iSelCH].type ) { case ASIOSTInt16LSB: - { - // no type conversion required, just copy operation - int16_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = pASIOBuf[iCurSample]; - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - int16_t* pASIOBufAdd = static_cast ( pSound->bufferInfos[iSelAddCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Double2Short ( (double) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (double) pASIOBufAdd[iCurSample] ); - } - } + pSound->bufferSwitchImport ( 1, index, i ); break; - } - - case ASIOSTInt24LSB: - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 ); - iCurSam >>= 8; - vecsMultChanAudioSndCrd[2 * iCurSample + i] = static_cast ( iCurSam ); - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelAddCH].buffers[index] ) + iCurSample * 3, 3 ); - iCurSam >>= 8; - - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Double2Short ( (double) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (double) static_cast ( iCurSam ) ); - } - } + case ASIOSTInt16MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB: - { - int32_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( pASIOBuf[iCurSample] >> 16 ); - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - int32_t* pASIOBufAdd = static_cast ( pSound->bufferInfos[iSelAddCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Double2Short ( (double) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (double) static_cast ( pASIOBufAdd[iCurSample] >> 16 ) ); - } - } + case ASIOSTInt24LSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - } - case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT ); - } + case ASIOSTInt24MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT ); - } + case ASIOSTInt32LSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFF ); - } + case ASIOSTInt32MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0x3FFFF ) >> 2 ); - } + case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFF ) >> 4 ); - } + case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFFF ) >> 8 ); - } + case ASIOSTFloat64LSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt16MSB: -// NOT YET TESTED - // flip bits - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Flip16Bits ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] ) )[iCurSample] ); - } + case ASIOSTFloat64MSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt24MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // because the bits are flipped, we do not have to perform the - // shift by 8 bits - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 ); - - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Flip16Bits ( static_cast ( iCurSam ) ); - } + case ASIOSTInt32LSB16: + pSound->bufferSwitchImport ( 1 << 16, index, i ); break; - case ASIOSTInt32MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // flip bits and convert to 16 bit - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) >> 16 ); - } + case ASIOSTInt32MSB16: + pSound->bufferSwitchImport ( 1 << 16, index, i ); break; - case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT ); - } + case ASIOSTInt32LSB18: + pSound->bufferSwitchImport ( 1 << 14, index, i ); break; - case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - Flip64Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT ); - } + case ASIOSTInt32MSB18: + pSound->bufferSwitchImport ( 1 << 14, index, i ); break; - case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFF ); - } + case ASIOSTInt32LSB20: + pSound->bufferSwitchImport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0x3FFFF ) >> 2 ); - } + case ASIOSTInt32MSB20: + pSound->bufferSwitchImport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFF ) >> 4 ); - } + case ASIOSTInt32LSB24: + pSound->bufferSwitchImport ( 1 << 8, index, i ); break; - case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFFF ) >> 8 ); - } + case ASIOSTInt32MSB24: + pSound->bufferSwitchImport ( 1 << 8, index, i ); break; } } // call processing callback function - pSound->ProcessCallback ( vecsMultChanAudioSndCrd ); + pSound->ProcessCallback ( pSound->vecfMultChanAudioSndCrd ); // PLAYBACK ------------------------------------------------------------ for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ ) { - const int iSelCH = pSound->lNumInChan + pSound->vSelectedOutputChannels[i]; - // copy data from sound card in output buffer (copy // interleaved stereo data in mono sound card buffer) switch ( pSound->channelInfosOutput[pSound->vSelectedOutputChannels[i]].type ) { case ASIOSTInt16LSB: - { - // no type conversion required, just copy operation - int16_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - pASIOBuf[iCurSample] = vecsMultChanAudioSndCrd[2 * iCurSample + i]; - } + pSound->bufferSwitchExport ( 1, index, i ); break; - } - case ASIOSTInt24LSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert current sample in 24 bit format - int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - iCurSam <<= 8; - - memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 ); - } + case ASIOSTInt16MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB: - { - int32_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - pASIOBuf[iCurSample] = ( iCurSam << 16 ); - } + case ASIOSTInt24LSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - } - case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const float fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - fCurSam / _MAXSHORT; - } + case ASIOSTInt24MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const double fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - fCurSam / _MAXSHORT; - } + case ASIOSTInt32LSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - iCurSam; - } + case ASIOSTInt32MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 2 ); - } + case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 4 ); - } + case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 8 ); - } + case ASIOSTFloat64LSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt16MSB: -// NOT YET TESTED - // flip bits - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - ( (int16_t*) pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip16Bits ( vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - } + case ASIOSTFloat64MSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt24MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // because the bits are flipped, we do not have to perform the - // shift by 8 bits - int32_t iCurSam = static_cast ( Flip16Bits ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ) ); - - memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 ); - } + case ASIOSTInt32LSB16: + pSound->bufferSwitchExport ( 1 << 16, index, i ); break; - case ASIOSTInt32MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit and flip bits - int iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 16 ); - } + case ASIOSTInt32MSB16: + pSound->bufferSwitchExport ( 1 << 16, index, i ); break; - case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const float fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - static_cast ( Flip32Bits ( static_cast ( - fCurSam / _MAXSHORT ) ) ); - } + case ASIOSTInt32LSB18: + pSound->bufferSwitchExport ( 1 << 14, index, i ); break; - case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const double fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - static_cast ( Flip64Bits ( static_cast ( - fCurSam / _MAXSHORT ) ) ); - } + case ASIOSTInt32MSB18: + pSound->bufferSwitchExport ( 1 << 14, index, i ); break; - case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam ); - } + case ASIOSTInt32LSB20: + pSound->bufferSwitchExport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 2 ); - } + case ASIOSTInt32MSB20: + pSound->bufferSwitchExport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 4 ); - } + case ASIOSTInt32LSB24: + pSound->bufferSwitchExport ( 1 << 8, index, i ); break; - case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 8 ); - } + case ASIOSTInt32MSB24: + pSound->bufferSwitchExport ( 1 << 8, index, i ); break; } } + // Finally if the driver supports the ASIOOutputReady() optimization, // do it here, all data are in place ----------------------------------- if ( pSound->bASIOPostOutput ) @@ -1139,57 +1033,3 @@ long CSound::asioMessages ( long selector, return ret; } - -int16_t CSound::Flip16Bits ( const int16_t iIn ) -{ - uint16_t iMask = ( 1 << 15 ); - int16_t iOut = 0; - - for ( unsigned int i = 0; i < 16; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} - -int32_t CSound::Flip32Bits ( const int32_t iIn ) -{ - uint32_t iMask = ( static_cast ( 1 ) << 31 ); - int32_t iOut = 0; - - for ( unsigned int i = 0; i < 32; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} - -int64_t CSound::Flip64Bits ( const int64_t iIn ) -{ - uint64_t iMask = ( static_cast ( 1 ) << 63 ); - int64_t iOut = 0; - - for ( unsigned int i = 0; i < 64; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} diff --git a/windows/sound.h b/windows/sound.h index a73e99199e..43fbef85d2 100755 --- a/windows/sound.h +++ b/windows/sound.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2020 * * Author(s): - * Volker Fischer + * Volker Fischer, hselasky * ****************************************************************************** * @@ -46,7 +46,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -78,33 +78,27 @@ class CSound : public CSoundBase virtual double GetInOutLatencyMs() { return dInOutLatencyMs; } protected: - virtual QString LoadAndInitializeDriver ( int iIdx, - bool bOpenDriverSetup ); - virtual void UnloadCurrentDriver(); - int GetActualBufferSize ( const int iDesiredBufferSizeMono ); - QString CheckDeviceCapabilities(); - bool CheckSampleTypeSupported ( const ASIOSampleType SamType ); - bool CheckSampleTypeSupportedForCHMixing ( const ASIOSampleType SamType ); - void ResetChannelMapping(); - - int iASIOBufferSizeMono; - int iASIOBufferSizeStereo; - - long lNumInChan; - long lNumInChanPlusAddChan; // includes additional "added" channels - long lNumOutChan; - double dInOutLatencyMs; - CVector vSelectedInputChannels; - CVector vSelectedOutputChannels; - - CVector vecsMultChanAudioSndCrd; - - QMutex ASIOMutex; - - // utility functions - static int16_t Flip16Bits ( const int16_t iIn ); - static int32_t Flip32Bits ( const int32_t iIn ); - static int64_t Flip64Bits ( const int64_t iIn ); + virtual QString LoadAndInitializeDriver ( int iIdx, + bool bOpenDriverSetup ); + virtual void UnloadCurrentDriver(); + int GetActualBufferSize ( const int iDesiredBufferSizeMono ); + QString CheckDeviceCapabilities(); + bool CheckSampleTypeSupported ( const ASIOSampleType SamType ); + void ResetChannelMapping(); + + int iASIOBufferSizeMono; + int iASIOBufferSizeStereo; + + long lNumInChan; + long lNumInChanPlusAddChan; // includes additional "added" channels + long lNumOutChan; + double dInOutLatencyMs; + CVector vSelectedInputChannels; + CVector vSelectedOutputChannels; + + CVector vecfMultChanAudioSndCrd; + + QMutex ASIOMutex; // audio hardware buffer info struct sHWBufferInfo @@ -124,6 +118,51 @@ class CSound : public CSoundBase bool bASIOPostOutput; ASIOCallbacks asioCallbacks; + // templates + template void bufferSwitchImport ( const int iGain, + const long index, + const int iCH) + { + int iSelAddCH; + int iSelCH; + + GetSelCHAndAddCH ( vSelectedInputChannels[iCH], lNumInChan, iSelCH, iSelAddCH ); + + const T* pASIOBuf = static_cast ( bufferInfos[iSelCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + vecfMultChanAudioSndCrd[2 * iCurSample + iCH] = pASIOBuf[iCurSample].Get() * iGain; + } + + if ( iSelAddCH >= 0 ) + { + // mix input channels case + const T* pASIOBufAdd = static_cast ( bufferInfos[iSelAddCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + vecfMultChanAudioSndCrd[2 * iCurSample + iCH] = + ClipFloat ( vecfMultChanAudioSndCrd[2 * iCurSample + iCH] + + pASIOBufAdd[iCurSample].Get() * iGain ); + } + } + } + + template void bufferSwitchExport ( const int iGain, + const long index, + const int iCH ) + { + const int iSelCH = lNumInChan + vSelectedOutputChannels[iCH]; + + T* pASIOBuf = static_cast ( bufferInfos[iSelCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + pASIOBuf[iCurSample].Put ( vecfMultChanAudioSndCrd[2 * iCurSample + iCH] / iGain ); + } + } + // callbacks static void bufferSwitch ( long index, ASIOBool processNow ); static ASIOTime* bufferSwitchTimeInfo ( ASIOTime* timeInfo, long index, ASIOBool processNow );