diff --git a/ChangeLog b/ChangeLog index 272802dc79..b96b7fb341 100644 --- a/ChangeLog +++ b/ChangeLog @@ -21,6 +21,9 @@ - bug fix: reduced server list is displayed instead of the normal list (#657) +TODO BUG FIX: the new Windows audio interface gives ugly clipping (which the old one does not)!!! + +TODO switched all remaining audio sample processing to use floats, coded by hselasky (#544) diff --git a/android/sound.cpp b/android/sound.cpp index 19bb79c41f..d082680ff2 100644 --- a/android/sound.cpp +++ b/android/sound.cpp @@ -27,7 +27,7 @@ /* Implementation *************************************************************/ -CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , diff --git a/android/sound.h b/android/sound.h index 2ef2af74f1..7d932636e8 100644 --- a/android/sound.h +++ b/android/sound.h @@ -34,7 +34,7 @@ class CSound : public CSoundBase, public oboe::AudioStreamCallback { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , diff --git a/libs/oboe b/libs/oboe index 10bb6fa3e3..d25993c5db 160000 --- a/libs/oboe +++ b/libs/oboe @@ -1 +1 @@ -Subproject commit 10bb6fa3e3dc5dbe225df0b6aa7d3e3794b6b2e2 +Subproject commit d25993c5dbe4750cd8d51a3729b6def6f9d20d77 diff --git a/linux/sound.cpp b/linux/sound.cpp index 995381feef..cfc30a8502 100755 --- a/linux/sound.cpp +++ b/linux/sound.cpp @@ -228,7 +228,7 @@ int CSound::Init ( const int /* iNewPrefMonoBufferSize */ ) iJACKBufferSizeStero = 2 * iJACKBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iJACKBufferSizeStero ); + vecfTmpAudioSndCrdStereo.Init ( iJACKBufferSizeStero ); return iJACKBufferSizeMono; } @@ -259,16 +259,13 @@ int CSound::process ( jack_nframes_t nframes, void* arg ) { for ( i = 0; i < pSound->iJACKBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i] = - (short) ( in_left[i] * _MAXSHORT ); - - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = - (short) ( in_right[i] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = in_left[i]; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = in_right[i]; } } // call processing callback function - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); // get output data pointer jack_default_audio_sample_t* out_left = @@ -285,10 +282,10 @@ int CSound::process ( jack_nframes_t nframes, void* arg ) for ( i = 0; i < pSound->iJACKBufferSizeMono; i++ ) { out_left[i] = (jack_default_audio_sample_t) - pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo[2 * i]; out_right[i] = (jack_default_audio_sample_t) - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1]; } } } diff --git a/linux/sound.h b/linux/sound.h index 2336b07f26..4de54295bb 100755 --- a/linux/sound.h +++ b/linux/sound.h @@ -60,7 +60,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool bNoAutoJackConnect, @@ -78,7 +78,7 @@ class CSound : public CSoundBase // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; int iJACKBufferSizeMono; int iJACKBufferSizeStero; bool bJackWasShutDown; @@ -111,7 +111,7 @@ class CSound : public CSoundBase Q_OBJECT public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* pParg ), void* pParg, const int iCtrlMIDIChannel, const bool , @@ -122,12 +122,12 @@ class CSound : public CSoundBase this, &CSound::OnTimer ); } virtual ~CSound() {} virtual int Init ( const int iNewPrefMonoBufferSize ) { CSoundBase::Init ( iNewPrefMonoBufferSize ); - vecsTemp.Init ( 2 * iNewPrefMonoBufferSize ); + vecfTemp.Init ( 2 * iNewPrefMonoBufferSize ); return iNewPrefMonoBufferSize; } CHighPrecisionTimer HighPrecisionTimer; - CVector vecsTemp; + CVector vecfTemp; public slots: - void OnTimer() { vecsTemp.Reset ( 0 ); if ( IsRunning() ) { ProcessCallback ( vecsTemp ); } } + void OnTimer() { vecfTemp.Reset ( 0 ); if ( IsRunning() ) { ProcessCallback ( vecfTemp ); } } }; #endif // WITH_SOUND diff --git a/mac/sound.cpp b/mac/sound.cpp old mode 100755 new mode 100644 index eeba9a6d97..e314d0f0ed --- a/mac/sound.cpp +++ b/mac/sound.cpp @@ -26,7 +26,7 @@ /* Implementation *************************************************************/ -CSound::CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -848,7 +848,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) iCoreAudioBufferSizeStereo = 2 * iCoreAudioBufferSizeMono; // create memory for intermediate audio buffer - vecsTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo ); + vecfTmpAudioSndCrdStereo.Init ( iCoreAudioBufferSizeStereo ); return iCoreAudioBufferSizeMono; } @@ -970,8 +970,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { // copy left and right channels separately - pSound->vecsTmpAudioSndCrdStereo[2 * i] = (short) ( pLeftData[iNumChanPerFrameLeft * i + iSelInInterlChLeft] * _MAXSHORT ); - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = (short) ( pRightData[iNumChanPerFrameRight * i + iSelInInterlChRight] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = pLeftData[iNumChanPerFrameLeft * i + iSelInInterlChLeft]; + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = pRightData[iNumChanPerFrameRight * i + iSelInInterlChRight]; } // add an additional optional channel @@ -982,8 +982,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i] = Float2Short ( - pSound->vecsTmpAudioSndCrdStereo[2 * i] + pLeftData[iNumChanPerFrameLeft * i + iSelAddInInterlChLeft] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i] = ClipFloat ( + pSound->vecfTmpAudioSndCrdStereo[2 * i] + pLeftData[iNumChanPerFrameLeft * i + iSelAddInInterlChLeft] ); } } @@ -994,19 +994,19 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] = Float2Short ( - pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] + pRightData[iNumChanPerFrameRight * i + iSelAddInInterlChRight] * _MAXSHORT ); + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] = ClipFloat ( + pSound->vecfTmpAudioSndCrdStereo[2 * i + 1] + pRightData[iNumChanPerFrameRight * i + iSelAddInInterlChRight] ); } } } else { // incompatible sizes, clear work buffer - pSound->vecsTmpAudioSndCrdStereo.Reset ( 0 ); + pSound->vecfTmpAudioSndCrdStereo.Reset ( 0 ); } // call processing callback function - pSound->ProcessCallback ( pSound->vecsTmpAudioSndCrdStereo ); + pSound->ProcessCallback ( pSound->vecfTmpAudioSndCrdStereo ); } if ( ( inDevice == pSound->CurrentAudioOutputDeviceID ) && outOutputData ) @@ -1028,8 +1028,8 @@ OSStatus CSound::callbackIO ( AudioDeviceID inDevice, for ( int i = 0; i < iCoreAudioBufferSizeMono; i++ ) { // copy left and right channels separately - pLeftData[iNumChanPerFrameLeft * i + iSelOutInterlChLeft] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i] / _MAXSHORT; - pRightData[iNumChanPerFrameRight * i + iSelOutInterlChRight] = (Float32) pSound->vecsTmpAudioSndCrdStereo[2 * i + 1] / _MAXSHORT; + pLeftData[iNumChanPerFrameLeft * i + iSelOutInterlChLeft] = (Float32) pSound->vecfTmpAudioSndCrdStereo[2 * i]; + pRightData[iNumChanPerFrameRight * i + iSelOutInterlChRight] = (Float32) pSound->vecfTmpAudioSndCrdStereo[2 * i + 1]; } } } diff --git a/mac/sound.h b/mac/sound.h index ce2eda21c4..c34547b6b1 100755 --- a/mac/sound.h +++ b/mac/sound.h @@ -36,7 +36,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewProcessCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewProcessCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -63,7 +63,7 @@ class CSound : public CSoundBase // these variables should be protected but cannot since we want // to access them from the callback function - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; int iCoreAudioBufferSizeMono; int iCoreAudioBufferSizeStereo; AudioDeviceID CurrentAudioInputDeviceID; diff --git a/src/buffer.h b/src/buffer.h index 3caeb18b9e..f75268c3a3 100755 --- a/src/buffer.h +++ b/src/buffer.h @@ -500,16 +500,16 @@ template class CConvBuf } } - void PutAll ( const CVector& vecsData ) + void PutAll ( const CVector& vecData ) { iGetPos = 0; - std::copy ( vecsData.begin(), - vecsData.begin() + iBufferSize, // note that input vector might be larger then memory size + std::copy ( vecData.begin(), + vecData.begin() + iBufferSize, // note that input vector might be larger then memory size vecMemory.begin() ); } - bool Put ( const CVector& vecsData, + bool Put ( const CVector& vecData, const int iVecSize ) { // calculate the input size and the end position after copying @@ -519,8 +519,8 @@ template class CConvBuf if ( iEnd <= iBufferSize ) { // copy new data in internal buffer - std::copy ( vecsData.begin(), - vecsData.begin() + iVecSize, + std::copy ( vecData.begin(), + vecData.begin() + iVecSize, vecMemory.begin() + iPutPos ); // set buffer pointer one block further @@ -540,7 +540,7 @@ template class CConvBuf return vecMemory; } - void GetAll ( CVector& vecsData, + void GetAll ( CVector& vecData, const int iVecSize ) { iPutPos = 0; @@ -548,10 +548,10 @@ template class CConvBuf // copy data from internal buffer in given buffer std::copy ( vecMemory.begin(), vecMemory.begin() + iVecSize, - vecsData.begin() ); + vecData.begin() ); } - bool Get ( CVector& vecsData, + bool Get ( CVector& vecData, const int iVecSize ) { // calculate the input size and the end position after copying @@ -563,7 +563,7 @@ template class CConvBuf // copy new data from internal buffer std::copy ( vecMemory.begin() + iGetPos, vecMemory.begin() + iGetPos + iVecSize, - vecsData.begin() ); + vecData.begin() ); // set buffer pointer one block further iGetPos = iEnd; diff --git a/src/channel.cpp b/src/channel.cpp index 677ac492e7..0259d83284 100755 --- a/src/channel.cpp +++ b/src/channel.cpp @@ -36,7 +36,7 @@ CChannel::CChannel ( const bool bNIsServer ) : bIsEnabled ( false ), bIsServer ( bNIsServer ), iAudioFrameSizeSamples ( DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES ), - SignalLevelMeter ( false, 0.5 ) // server mode with mono out and faster smoothing + SignalLevelMeter ( false, 0.5f ) // server mode with mono out and faster smoothing { // reset network transport properties ResetNetworkTransportProperties(); @@ -675,12 +675,12 @@ void CChannel::PrepAndSendPacket ( CHighPrioSocket* pSocket, } } -double CChannel::UpdateAndGetLevelForMeterdB ( const CVector& vecsAudio, - const int iInSize, - const bool bIsStereoIn ) +float CChannel::UpdateAndGetLevelForMeterdB ( const CVector& vecfAudio, + const int iInSize, + const bool bIsStereoIn ) { // update the signal level meter and immediately return the current value - SignalLevelMeter.Update ( vecsAudio, + SignalLevelMeter.Update ( vecfAudio, iInSize, bIsStereoIn ); diff --git a/src/channel.h b/src/channel.h old mode 100755 new mode 100644 index 530eb08981..918d4ae127 --- a/src/channel.h +++ b/src/channel.h @@ -176,9 +176,9 @@ void CreateReqChannelLevelListMes() { Protocol.CreateReqChannelLevelListMes(); } CNetworkTransportProps GetNetworkTransportPropsFromCurrentSettings(); - double UpdateAndGetLevelForMeterdB ( const CVector& vecsAudio, - const int iInSize, - const bool bIsStereoIn ); + float UpdateAndGetLevelForMeterdB ( const CVector& vecfAudio, + const int iInSize, + const bool bIsStereoIn ); protected: bool ProtocolIsEnabled(); diff --git a/src/client.cpp b/src/client.cpp old mode 100755 new mode 100644 index 9a3c2f8815..273bb2a98b --- a/src/client.cpp +++ b/src/client.cpp @@ -810,7 +810,6 @@ void CClient::Init() // since we use double size frame size for OPUS, we have to adjust the frame size factor iSndCrdFrameSizeFactor /= 2; eAudioCompressionType = CT_OPUS; - } } @@ -883,7 +882,7 @@ void CClient::Init() vecCeltData.Init ( iCeltNumCodedBytes ); vecZeros.Init ( iStereoBlockSizeSam, 0 ); - vecsStereoSndCrdMuteStream.Init ( iStereoBlockSizeSam ); + vecfStereoSndCrdMuteStream.Init ( iStereoBlockSizeSam ); fMuteOutStreamGain = 1.0f; @@ -928,13 +927,13 @@ void CClient::Init() bIsInitializationPhase = true; } -void CClient::AudioCallback ( CVector& psData, void* arg ) +void CClient::AudioCallback ( CVector& vfData, void* arg ) { // get the pointer to the object CClient* pMyClientObj = static_cast ( arg ); // process audio data - pMyClientObj->ProcessSndCrdAudioData ( psData ); + pMyClientObj->ProcessSndCrdAudioData ( vfData ); /* // TEST do a soundcard jitter measurement @@ -943,13 +942,13 @@ JitterMeas.Measure(); */ } -void CClient::ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ) +void CClient::ProcessSndCrdAudioData ( CVector& vecfStereoSndCrd ) { // check if a conversion buffer is required or not if ( bSndCrdConversionBufferRequired ) { // add new sound card block in conversion buffer - SndCrdConversionBufferIn.Put ( vecsStereoSndCrd, vecsStereoSndCrd.Size() ); + SndCrdConversionBufferIn.Put ( vecfStereoSndCrd, vecfStereoSndCrd.Size() ); // process all available blocks of data while ( SndCrdConversionBufferIn.GetAvailData() >= iStereoBlockSizeSam ) @@ -964,17 +963,17 @@ void CClient::ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ) } // get processed sound card block out of the conversion buffer - SndCrdConversionBufferOut.Get ( vecsStereoSndCrd, vecsStereoSndCrd.Size() ); + SndCrdConversionBufferOut.Get ( vecfStereoSndCrd, vecfStereoSndCrd.Size() ); } else { // regular case: no conversion buffer required // process audio data - ProcessAudioDataIntern ( vecsStereoSndCrd ); + ProcessAudioDataIntern ( vecfStereoSndCrd ); } } -void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) +void CClient::ProcessAudioDataIntern ( CVector& vecfStereoSndCrd ) { int i, j, iUnused; unsigned char* pCurCodedData; @@ -983,7 +982,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // Transmit signal --------------------------------------------------------- // update stereo signal level meter (not needed in headless mode) #ifndef HEADLESS - SignalLevelMeter.Update ( vecsStereoSndCrd, + SignalLevelMeter.Update ( vecfStereoSndCrd, iMonoBlockSizeSam, true ); #endif @@ -991,7 +990,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // add reverberation effect if activated if ( iReverbLevel != 0 ) { - AudioReverb.Process ( vecsStereoSndCrd, + AudioReverb.Process ( vecfStereoSndCrd, bReverbOnLeftChan, static_cast ( iReverbLevel ) / AUD_REVERB_MAX / 4 ); } @@ -1012,8 +1011,8 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) { // note that the gain is always <= 1, therefore a simple cast is // ok since we never can get an overload - vecsStereoSndCrd[j + 1] = static_cast ( fGainR * vecsStereoSndCrd[j + 1] ); - vecsStereoSndCrd[j] = static_cast ( fGainL * vecsStereoSndCrd[j] ); + vecfStereoSndCrd[j + 1] = fGainR * vecfStereoSndCrd[j + 1]; + vecfStereoSndCrd[j] = fGainL * vecfStereoSndCrd[j]; } } else @@ -1025,9 +1024,9 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) for ( i = 0, j = 0; i < iMonoBlockSizeSam; i++, j += 2 ) { - // note that we need the Float2Short for stereo pan mode - vecsStereoSndCrd[i] = Float2Short ( - fGainL * vecsStereoSndCrd[j] + fGainR * vecsStereoSndCrd[j + 1] ); + // clip samples for stereo pan mode + vecfStereoSndCrd[i] = ClipFloat ( + fGainL * vecfStereoSndCrd[j] + fGainR * vecfStereoSndCrd[j + 1] ); } } } @@ -1043,7 +1042,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // overwrite input values) for ( i = iMonoBlockSizeSam - 1, j = iStereoBlockSizeSam - 2; i >= 0; i--, j -= 2 ) { - vecsStereoSndCrd[j] = vecsStereoSndCrd[j + 1] = vecsStereoSndCrd[i]; + vecfStereoSndCrd[j] = vecfStereoSndCrd[j + 1] = vecfStereoSndCrd[i]; } } @@ -1054,19 +1053,19 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) { if ( bMuteOutStream ) { - iUnused = opus_custom_encode ( CurOpusEncoder, - &vecZeros[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples, - &vecCeltData[0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( CurOpusEncoder, + &vecZeros[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples, + &vecCeltData[0], + iCeltNumCodedBytes ); } else { - iUnused = opus_custom_encode ( CurOpusEncoder, - &vecsStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples, - &vecCeltData[0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( CurOpusEncoder, + &vecfStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples, + &vecCeltData[0], + iCeltNumCodedBytes ); } } @@ -1081,7 +1080,7 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // in case of mute stream, store local data if ( bMuteOutStream ) { - vecsStereoSndCrdMuteStream = vecsStereoSndCrd; + vecfStereoSndCrdMuteStream = vecfStereoSndCrd; } for ( i = 0; i < iSndCrdFrameSizeFactor; i++ ) @@ -1110,11 +1109,11 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // OPUS decoding if ( CurOpusDecoder != nullptr ) { - iUnused = opus_custom_decode ( CurOpusDecoder, - pCurCodedData, - iCeltNumCodedBytes, - &vecsStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], - iOPUSFrameSizeSamples ); + iUnused = opus_custom_decode_float ( CurOpusDecoder, + pCurCodedData, + iCeltNumCodedBytes, + &vecfStereoSndCrd[i * iNumAudioChannels * iOPUSFrameSizeSamples], + iOPUSFrameSizeSamples ); } } @@ -1123,8 +1122,8 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) { for ( i = 0; i < iStereoBlockSizeSam; i++ ) { - vecsStereoSndCrd[i] = Float2Short ( - vecsStereoSndCrd[i] + vecsStereoSndCrdMuteStream[i] * fMuteOutStreamGain ); + vecfStereoSndCrd[i] = ClipFloat ( + vecfStereoSndCrd[i] + vecfStereoSndCrdMuteStream[i] * fMuteOutStreamGain ); } } @@ -1138,14 +1137,14 @@ void CClient::ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ) // overwrite input values) for ( i = iMonoBlockSizeSam - 1, j = iStereoBlockSizeSam - 2; i >= 0; i--, j -= 2 ) { - vecsStereoSndCrd[j] = vecsStereoSndCrd[j + 1] = vecsStereoSndCrd[i]; + vecfStereoSndCrd[j] = vecfStereoSndCrd[j + 1] = vecfStereoSndCrd[i]; } } } else { // if not connected, clear data - vecsStereoSndCrd.Reset ( 0 ); + vecfStereoSndCrd.Reset ( 0 ); } // update socket buffer size diff --git a/src/client.h b/src/client.h index b07539b37c..ede8e9b90f 100755 --- a/src/client.h +++ b/src/client.h @@ -117,8 +117,8 @@ class CClient : public QObject bool IsRunning() { return Sound.IsRunning(); } bool SetServerAddr ( QString strNAddr ); - double GetLevelForMeterdBLeft() { return SignalLevelMeter.GetLevelForMeterdBLeftOrMono(); } - double GetLevelForMeterdBRight() { return SignalLevelMeter.GetLevelForMeterdBRight(); } + float GetLevelForMeterdBLeft() { return SignalLevelMeter.GetLevelForMeterdBLeftOrMono(); } + float GetLevelForMeterdBRight() { return SignalLevelMeter.GetLevelForMeterdBRight(); } bool GetAndResetbJitterBufferOKFlag(); @@ -285,11 +285,11 @@ class CClient : public QObject protected: // callback function must be static, otherwise it does not work - static void AudioCallback ( CVector& psData, void* arg ); + static void AudioCallback ( CVector& vfData, void* arg ); void Init(); - void ProcessSndCrdAudioData ( CVector& vecsStereoSndCrd ); - void ProcessAudioDataIntern ( CVector& vecsStereoSndCrd ); + void ProcessSndCrdAudioData ( CVector& vecfStereoSndCrd ); + void ProcessAudioDataIntern ( CVector& vecfStereoSndCrd ); int PreparePingMessage(); int EvaluatePingMessage ( const int iMs ); @@ -339,11 +339,11 @@ class CClient : public QObject bool bSndCrdConversionBufferRequired; int iSndCardMonoBlockSizeSamConvBuff; - CBufferBase SndCrdConversionBufferIn; - CBufferBase SndCrdConversionBufferOut; - CVector vecDataConvBuf; - CVector vecsStereoSndCrdMuteStream; - CVector vecZeros; + CBufferBase SndCrdConversionBufferIn; + CBufferBase SndCrdConversionBufferOut; + CVector vecDataConvBuf; + CVector vecfStereoSndCrdMuteStream; + CVector vecZeros; bool bFraSiFactPrefSupported; bool bFraSiFactDefSupported; diff --git a/src/global.h b/src/global.h index 45e3e48273..14887fcda3 100755 --- a/src/global.h +++ b/src/global.h @@ -168,8 +168,8 @@ LED bar: lbr #define MAX_NUM_STORED_FADER_SETTINGS 250 // range for signal level meter -#define LOW_BOUND_SIG_METER ( -50.0 ) // dB -#define UPPER_BOUND_SIG_METER ( 0.0 ) // dB +#define LOW_BOUND_SIG_METER ( -50.0f ) // dB +#define UPPER_BOUND_SIG_METER ( 0.0f ) // dB // defines for LED level meter CLevelMeter #define NUM_STEPS_LED_BAR 8 @@ -244,8 +244,6 @@ LED bar: lbr // server welcome message title (do not change for compatibility!) #define WELCOME_MESSAGE_PREFIX "Server Welcome Message: " -#define _MAXSHORT 32767 -#define _MINSHORT ( -32768 ) #define INVALID_INDEX -1 // define invalid index as a negative value (a valid index must always be >= 0) #if HAVE_STDINT_H diff --git a/src/recorder/cwavestream.h b/src/recorder/cwavestream.h index 54b91633c6..061a72d3b6 100644 --- a/src/recorder/cwavestream.h +++ b/src/recorder/cwavestream.h @@ -31,7 +31,7 @@ namespace recorder { inline QString secondsAt48K( const qint64 frames, const int frameSize ) { - return QString::number( static_cast( frames * frameSize ) / 48000, 'f', 14 ); + return QString::number( static_cast( frames * frameSize ) / 48000, 'f', 14 ); } struct STrackItem @@ -79,7 +79,7 @@ class FmtSubChunk static const uint32_t sampleRate = 48000; // because it's Jamulus const uint32_t byteRate; // sampleRate * numChannels * bitsPerSample/8 const uint16_t blockAlign; // numChannels * bitsPerSample/8 - static const uint16_t bitsPerSample = 16; + static const uint16_t bitsPerSample = 24; }; class DataSubChunkHdr diff --git a/src/recorder/jamcontroller.cpp b/src/recorder/jamcontroller.cpp index 75e2b4568a..801f480a2e 100755 --- a/src/recorder/jamcontroller.cpp +++ b/src/recorder/jamcontroller.cpp @@ -150,7 +150,7 @@ void CJamController::SetRecordingDir ( QString newRecordingDir, QObject::connect( this, &CJamController::ClientDisconnected, pJamRecorder, &CJamRecorder::OnDisconnected ); - qRegisterMetaType> ( "CVector" ); + qRegisterMetaType > ( "CVector" ); QObject::connect( this, &CJamController::AudioFrame, pJamRecorder, &CJamRecorder::OnFrame ); diff --git a/src/recorder/jamcontroller.h b/src/recorder/jamcontroller.h index 82b714f430..cfa5e57a30 100755 --- a/src/recorder/jamcontroller.h +++ b/src/recorder/jamcontroller.h @@ -67,10 +67,10 @@ class CJamController : public QObject const QString stChName, const CHostAddress RecHostAddr, const int iNumAudChan, - const CVector vecsData ); + const CVector vecfData ); }; } -Q_DECLARE_METATYPE(int16_t) +Q_DECLARE_METATYPE(float) diff --git a/src/recorder/jamrecorder.cpp b/src/recorder/jamrecorder.cpp index 06577f3f72..8b1b686f67 100755 --- a/src/recorder/jamrecorder.cpp +++ b/src/recorder/jamrecorder.cpp @@ -71,13 +71,15 @@ CJamClient::CJamClient(const qint64 frame, const int _numChannels, const QString * @param _name The client's current name * @param pcm The PCM data */ -void CJamClient::Frame(const QString _name, const CVector& pcm, int iServerFrameSizeSamples) +void CJamClient::Frame(const QString _name, const CVector& pcm, int iServerFrameSizeSamples) { name = _name; for(int i = 0; i < numChannels * iServerFrameSizeSamples; i++) { - *out << pcm[i]; + /* samples must be stored in little endian order */ + const int sample24 = pcm[i] * ((1 << 23) - 1); + *out << ( uint8_t ) sample24 << ( uint8_t ) ( sample24 >> 8 ) << ( uint8_t )( sample24 >> 16 ); } frameCount++; @@ -166,7 +168,7 @@ void CJamSession::DisconnectClient(int iChID) * * Also manages the overall current frame counter for the session. */ -void CJamSession::Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples) +void CJamSession::Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples) { if ( iChID == chIdDisconnected ) { @@ -529,7 +531,7 @@ void CJamRecorder::OnDisconnected(int iChID) * * Ensures recording has started. */ -void CJamRecorder::OnFrame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data) +void CJamRecorder::OnFrame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data) { // Make sure we are ready if ( !isRecording ) diff --git a/src/recorder/jamrecorder.h b/src/recorder/jamrecorder.h index a42fe0c457..b00d29817d 100755 --- a/src/recorder/jamrecorder.h +++ b/src/recorder/jamrecorder.h @@ -72,7 +72,7 @@ class CJamClient : public QObject public: CJamClient(const qint64 frame, const int numChannels, const QString name, const CHostAddress address, const QDir recordBaseDir); - void Frame(const QString name, const CVector& pcm, int iServerFrameSizeSamples); + void Frame(const QString name, const CVector& pcm, int iServerFrameSizeSamples); void Disconnect(); @@ -108,7 +108,7 @@ class CJamSession : public QObject CJamSession(QDir recordBaseDir); - void Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples); + void Frame(const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data, int iServerFrameSizeSamples); void End(); @@ -200,7 +200,7 @@ public slots: /** * @brief Handle a frame of data to process */ - void OnFrame ( const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data ); + void OnFrame ( const int iChID, const QString name, const CHostAddress address, const int numAudioChannels, const CVector data ); }; } diff --git a/src/server.cpp b/src/server.cpp old mode 100755 new mode 100644 index 71144285e3..efd514b08d --- a/src/server.cpp +++ b/src/server.cpp @@ -338,8 +338,8 @@ CServer::CServer ( const int iNewMaxNumChan, vecChanIDsCurConChan.Init ( iMaxNumChannels ); vecvecfGains.Init ( iMaxNumChannels ); vecvecfPannings.Init ( iMaxNumChannels ); - vecvecsData.Init ( iMaxNumChannels ); - vecvecsSendData.Init ( iMaxNumChannels ); + vecvecfData.Init ( iMaxNumChannels ); + vecvecfSendData.Init ( iMaxNumChannels ); vecvecfIntermediateProcBuf.Init ( iMaxNumChannels ); vecvecbyCodedData.Init ( iMaxNumChannels ); vecNumAudioChannels.Init ( iMaxNumChannels ); @@ -354,11 +354,11 @@ CServer::CServer ( const int iNewMaxNumChan, vecvecfPannings[i].Init ( iMaxNumChannels ); // we always use stereo audio buffers (which is the worst case) - vecvecsData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + vecvecfData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); // (note that we only allocate iMaxNumChannels buffers for the send // and coded data because of the OMP implementation) - vecvecsSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); + vecvecfSendData[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); // allocate worst case memory for intermediate processing buffers in float precision vecvecfIntermediateProcBuf[i].Init ( 2 /* stereo */ * DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES /* worst case buffer size */ ); @@ -483,7 +483,7 @@ CServer::CServer ( const int iNewMaxNumChan, QObject::connect ( this, &CServer::ClientDisconnected, &JamController, &recorder::CJamController::ClientDisconnected ); - qRegisterMetaType> ( "CVector" ); + qRegisterMetaType > ( "CVector" ); QObject::connect ( this, &CServer::AudioFrame, &JamController, &recorder::CJamController::AudioFrame ); @@ -860,7 +860,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE // calculate levels for all connected clients const bool bSendChannelLevels = CreateLevelsForAllConChannels ( iNumClients, vecNumAudioChannels, - vecvecsData, + vecvecfData, vecChannelLevels ); for ( int iChanCnt = 0; iChanCnt < iNumClients; iChanCnt++ ) @@ -886,7 +886,7 @@ static CTimingMeas JitterMeas ( 1000, "test2.dat" ); JitterMeas.Measure(); // TE vecChannels[iCurChanID].GetName(), vecChannels[iCurChanID].GetAddress(), vecNumAudioChannels[iChanCnt], - vecvecsData[iChanCnt] ); + vecvecfData[iChanCnt] ); } // processing without multithreading @@ -1028,7 +1028,7 @@ void CServer::DecodeReceiveData ( const int iChanCnt, // get gains of all connected channels for ( int j = 0; j < iNumClients; j++ ) { - // The second index of "vecvecdGains" does not represent + // The second index of "vecvecfGains" does not represent // the channel ID! Therefore we have to use // "vecChanIDsCurConChan" to query the IDs of the currently // connected channels @@ -1054,7 +1054,7 @@ void CServer::DecodeReceiveData ( const int iChanCnt, // is false and the Get() function is not called at all. Therefore if the buffer is not needed // we do not spend any time in the function but go directly inside the if condition. if ( ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] == 0 ) || - !DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecsData[iChanCnt], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) + !DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecfData[iChanCnt], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) { // get current number of OPUS coded bytes const int iCeltNumCodedBytes = vecChannels[iCurChanID].GetNetwFrameSize(); @@ -1094,11 +1094,11 @@ void CServer::DecodeReceiveData ( const int iChanCnt, // OPUS decode received data stream if ( CurOpusDecoder != nullptr ) { - iUnused = opus_custom_decode ( CurOpusDecoder, - pCurCodedData, - iCeltNumCodedBytes, - &vecvecsData[iChanCnt][iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], - iClientFrameSizeSamples ); + iUnused = opus_custom_decode_float ( CurOpusDecoder, + pCurCodedData, + iCeltNumCodedBytes, + &vecvecfData[iChanCnt][iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], + iClientFrameSizeSamples ); } } @@ -1106,8 +1106,8 @@ void CServer::DecodeReceiveData ( const int iChanCnt, // and read out the small frame size immediately for further processing if ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] != 0 ) { - DoubleFrameSizeConvBufIn[iCurChanID].PutAll ( vecvecsData[iChanCnt] ); - DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecsData[iChanCnt], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); + DoubleFrameSizeConvBufIn[iCurChanID].PutAll ( vecvecfData[iChanCnt] ); + DoubleFrameSizeConvBufIn[iCurChanID].Get ( vecvecfData[iChanCnt], SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); } } @@ -1118,9 +1118,9 @@ void CServer::DecodeReceiveData ( const int iChanCnt, void CServer::MixEncodeTransmitData ( const int iChanCnt, const int iNumClients ) { - int i, j, k, iUnused; - CVector& vecfIntermProcBuf = vecvecfIntermediateProcBuf[iChanCnt]; // use reference for faster access - CVector& vecsSendData = vecvecsSendData[iChanCnt]; // use reference for faster access + int i, j, k, iUnused; + CVector& vecfIntermProcBuf = vecvecfIntermediateProcBuf[iChanCnt]; // use reference for faster access + CVector& vecfSendData = vecvecfSendData[iChanCnt]; // use reference for faster access // get actual ID of current channel const int iCurChanID = vecChanIDsCurConChan[iChanCnt]; @@ -1135,8 +1135,8 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( j = 0; j < iNumClients; j++ ) { // get a reference to the audio data and gain of the current client - const CVector& vecsData = vecvecsData[j]; - const float fGain = vecvecfGains[iChanCnt][j]; + const CVector& vecfData = vecvecfData[j]; + const float fGain = vecvecfGains[iChanCnt][j]; // if channel gain is 1, avoid multiplication for speed optimization if ( fGain == 1.0f ) @@ -1146,7 +1146,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // mono for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecfIntermProcBuf[i] += vecsData[i]; + vecfIntermProcBuf[i] += vecfData[i]; } } else @@ -1154,8 +1154,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo: apply stereo-to-mono attenuation for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { - vecfIntermProcBuf[i] += - ( static_cast ( vecsData[k] ) + vecsData[k + 1] ) / 2; + vecfIntermProcBuf[i] += ( vecfData[k] + vecfData[k + 1] ) / 2; } } } @@ -1166,7 +1165,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // mono for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecfIntermProcBuf[i] += vecsData[i] * fGain; + vecfIntermProcBuf[i] += vecfData[i] * fGain; } } else @@ -1175,16 +1174,20 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { vecfIntermProcBuf[i] += fGain * - ( static_cast ( vecsData[k] ) + vecsData[k + 1] ) / 2; + ( vecfData[k] + vecfData[k + 1] ) / 2; } } } } - // convert from double to short with clipping + // When adding multiple sound sources together + // the resulting signal level may exceed the maximum + // audio range which is from -1.0f to 1.0f inclusivly. + // Clip the intermediate sound buffer to be within + // the expected range for ( i = 0; i < iServerFrameSizeSamples; i++ ) { - vecsSendData[i] = Float2Short ( vecfIntermProcBuf[i] ); + vecfSendData[i] = ClipFloat ( vecfIntermProcBuf[i] ); } } else @@ -1193,9 +1196,9 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( j = 0; j < iNumClients; j++ ) { // get a reference to the audio data and gain/pan of the current client - const CVector& vecsData = vecvecsData[j]; - const float fGain = vecvecfGains[iChanCnt][j]; - const float fPan = vecvecfPannings[iChanCnt][j]; + const CVector& vecfData = vecvecfData[j]; + const float fGain = vecvecfGains[iChanCnt][j]; + const float fPan = vecvecfPannings[iChanCnt][j]; // calculate combined gain/pan for each stereo channel where we define // the panning that center equals full gain for both channels @@ -1211,8 +1214,8 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { // left/right channel - vecfIntermProcBuf[k] += vecsData[i]; - vecfIntermProcBuf[k + 1] += vecsData[i]; + vecfIntermProcBuf[k] += vecfData[i]; + vecfIntermProcBuf[k + 1] += vecfData[i]; } } else @@ -1220,7 +1223,7 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // stereo for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ ) { - vecfIntermProcBuf[i] += vecsData[i]; + vecfIntermProcBuf[i] += vecfData[i]; } } } @@ -1232,8 +1235,8 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0, k = 0; i < iServerFrameSizeSamples; i++, k += 2 ) { // left/right channel - vecfIntermProcBuf[k] += vecsData[i] * fGainL; - vecfIntermProcBuf[k + 1] += vecsData[i] * fGainR; + vecfIntermProcBuf[k] += vecfData[i] * fGainL; + vecfIntermProcBuf[k + 1] += vecfData[i] * fGainR; } } else @@ -1242,17 +1245,21 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i += 2 ) { // left/right channel - vecfIntermProcBuf[i] += vecsData[i] * fGainL; - vecfIntermProcBuf[i + 1] += vecsData[i + 1] * fGainR; + vecfIntermProcBuf[i] += vecfData[i] * fGainL; + vecfIntermProcBuf[i + 1] += vecfData[i + 1] * fGainR; } } } } - // convert from double to short with clipping + // When adding multiple sound sources together + // the resulting signal level may exceed the maximum + // audio range which is from -1.0f to 1.0f inclusivly. + // Clip the intermediate sound buffer to be within + // the expected range for ( i = 0; i < ( 2 * iServerFrameSizeSamples ); i++ ) { - vecsSendData[i] = Float2Short ( vecfIntermProcBuf[i] ); + vecfSendData[i] = ClipFloat ( vecfIntermProcBuf[i] ); } } @@ -1296,12 +1303,12 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // is false and the Get() function is not called at all. Therefore if the buffer is not needed // we do not spend any time in the function but go directly inside the if condition. if ( ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] == 0 ) || - DoubleFrameSizeConvBufOut[iCurChanID].Put ( vecsSendData, SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) + DoubleFrameSizeConvBufOut[iCurChanID].Put ( vecfSendData, SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ) ) { if ( vecUseDoubleSysFraSizeConvBuf[iChanCnt] != 0 ) { // get the large frame from the conversion buffer - DoubleFrameSizeConvBufOut[iCurChanID].GetAll ( vecsSendData, DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); + DoubleFrameSizeConvBufOut[iCurChanID].GetAll ( vecfSendData, DOUBLE_SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt] ); } for ( int iB = 0; iB < vecNumFrameSizeConvBlocks[iChanCnt]; iB++ ) @@ -1313,11 +1320,11 @@ void CServer::MixEncodeTransmitData ( const int iChanCnt, // optimization it would be better to set it only if the network frame size is changed opus_custom_encoder_ctl ( pCurOpusEncoder, OPUS_SET_BITRATE ( CalcBitRateBitsPerSecFromCodedBytes ( iCeltNumCodedBytes, iClientFrameSizeSamples ) ) ); - iUnused = opus_custom_encode ( pCurOpusEncoder, - &vecsSendData[iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], - iClientFrameSizeSamples, - &vecvecbyCodedData[iChanCnt][0], - iCeltNumCodedBytes ); + iUnused = opus_custom_encode_float ( pCurOpusEncoder, + &vecfSendData[iB * SYSTEM_FRAME_SIZE_SAMPLES * vecNumAudioChannels[iChanCnt]], + iClientFrameSizeSamples, + &vecvecbyCodedData[iChanCnt][0], + iCeltNumCodedBytes ); } // send separate mix to current clients @@ -1556,13 +1563,13 @@ bool CServer::PutAudioData ( const CVector& vecbyRecBuf, // time reset gains/pans of this channel ID for all other channels for ( int i = 0; i < iMaxNumChannels; i++ ) { - vecChannels[iCurChanID].SetGain ( i, 1.0 ); - vecChannels[iCurChanID].SetPan ( i, 0.5 ); + vecChannels[iCurChanID].SetGain ( i, 1.0f ); + vecChannels[iCurChanID].SetPan ( i, 0.5f ); // other channels (we do not distinguish the case if // i == iCurChanID for simplicity) - vecChannels[i].SetGain ( iCurChanID, 1.0 ); - vecChannels[i].SetPan ( iCurChanID, 0.5 ); + vecChannels[i].SetGain ( iCurChanID, 1.0f ); + vecChannels[i].SetPan ( iCurChanID, 0.5f ); } } else @@ -1690,10 +1697,10 @@ void CServer::customEvent ( QEvent* pEvent ) } /// @brief Compute frame peak level for each client -bool CServer::CreateLevelsForAllConChannels ( const int iNumClients, - const CVector& vecNumAudioChannels, - const CVector > vecvecsData, - CVector& vecLevelsOut ) +bool CServer::CreateLevelsForAllConChannels ( const int iNumClients, + const CVector& vecNumAudioChannels, + const CVector > vecvecfData, + CVector& vecLevelsOut ) { bool bLevelsWereUpdated = false; @@ -1706,13 +1713,13 @@ bool CServer::CreateLevelsForAllConChannels ( const int i for ( int j = 0; j < iNumClients; j++ ) { // update and get signal level for meter in dB for each channel - const double dCurSigLevelForMeterdB = vecChannels[vecChanIDsCurConChan[j]]. - UpdateAndGetLevelForMeterdB ( vecvecsData[j], + const float fCurSigLevelForMeterdB = vecChannels[vecChanIDsCurConChan[j]]. + UpdateAndGetLevelForMeterdB ( vecvecfData[j], iServerFrameSizeSamples, vecNumAudioChannels[j] > 1 ); // map value to integer for transmission via the protocol (4 bit available) - vecLevelsOut[j] = static_cast ( std::ceil ( dCurSigLevelForMeterdB ) ); + vecLevelsOut[j] = static_cast ( ceilf ( fCurSigLevelForMeterdB ) ); } } diff --git a/src/server.h b/src/server.h old mode 100755 new mode 100644 index 76776b516d..d4d6c7c1c2 --- a/src/server.h +++ b/src/server.h @@ -323,10 +323,10 @@ class CServer : bool bUseMultithreading; QFutureSynchronizer FutureSynchronizer; - bool CreateLevelsForAllConChannels ( const int iNumClients, - const CVector& vecNumAudioChannels, - const CVector > vecvecsData, - CVector& vecLevelsOut ); + bool CreateLevelsForAllConChannels ( const int iNumClients, + const CVector& vecNumAudioChannels, + const CVector > vecvecfData, + CVector& vecLevelsOut ); // do not use the vector class since CChannel does not have appropriate // copy constructor/operator @@ -348,20 +348,20 @@ class CServer : OpusCustomDecoder* OpusDecoderMono[MAX_NUM_CHANNELS]; OpusCustomEncoder* OpusEncoderStereo[MAX_NUM_CHANNELS]; OpusCustomDecoder* OpusDecoderStereo[MAX_NUM_CHANNELS]; - CConvBuf DoubleFrameSizeConvBufIn[MAX_NUM_CHANNELS]; - CConvBuf DoubleFrameSizeConvBufOut[MAX_NUM_CHANNELS]; + CConvBuf DoubleFrameSizeConvBufIn[MAX_NUM_CHANNELS]; + CConvBuf DoubleFrameSizeConvBufOut[MAX_NUM_CHANNELS]; CVector vstrChatColors; CVector vecChanIDsCurConChan; CVector > vecvecfGains; CVector > vecvecfPannings; - CVector > vecvecsData; + CVector > vecvecfData; CVector vecNumAudioChannels; CVector vecNumFrameSizeConvBlocks; CVector vecUseDoubleSysFraSizeConvBuf; CVector vecAudioComprType; - CVector > vecvecsSendData; + CVector > vecvecfSendData; CVector > vecvecfIntermediateProcBuf; CVector > vecvecbyCodedData; @@ -405,11 +405,11 @@ class CServer : void Stopped(); void ClientDisconnected ( const int iChID ); void SvrRegStatusChanged(); - void AudioFrame ( const int iChID, - const QString stChName, - const CHostAddress RecHostAddr, - const int iNumAudChan, - const CVector vecsData ); + void AudioFrame ( const int iChID, + const QString stChName, + const CHostAddress RecHostAddr, + const int iNumAudChan, + const CVector vecfData ); void CLVersionAndOSReceived ( CHostAddress InetAddr, COSUtil::EOpSystemType eOSType, @@ -507,4 +507,4 @@ public slots: void OnHandledSignal ( int sigNum ); }; -Q_DECLARE_METATYPE(CVector) +Q_DECLARE_METATYPE ( CVector ) diff --git a/src/soundbase.cpp b/src/soundbase.cpp index 51382dea2a..04ad4f9710 100755 --- a/src/soundbase.cpp +++ b/src/soundbase.cpp @@ -27,7 +27,7 @@ /* Implementation *************************************************************/ CSoundBase::CSoundBase ( const QString& strNewSystemDriverTechniqueName, - void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + void (*fpNewProcessCallback) ( CVector& vfData, void* pParg ), void* pParg, const int iNewCtrlMIDIChannel ) : fpProcessCallback ( fpNewProcessCallback ), @@ -249,7 +249,7 @@ printf ( "\n" ); { // we are assuming that the controller number is the same // as the audio fader index and the range is 0-127 - const int iFaderLevel = static_cast ( static_cast ( + const int iFaderLevel = static_cast ( static_cast ( qMin ( vMIDIPaketBytes[2], uint8_t ( 127 ) ) ) / 127 * AUD_MIX_FADER_MAX ); // Behringer X-TOUCH: offset of 0x46 diff --git a/src/soundbase.h b/src/soundbase.h index ca3adb9eec..db1039e9db 100755 --- a/src/soundbase.h +++ b/src/soundbase.h @@ -51,7 +51,7 @@ class CSoundBase : public QThread public: CSoundBase ( const QString& strNewSystemDriverTechniqueName, - void (*fpNewProcessCallback) ( CVector& psData, void* pParg ), + void (*fpNewProcessCallback) ( CVector& vfData, void* pParg ), void* pParg, const int iNewCtrlMIDIChannel ); @@ -121,13 +121,13 @@ class CSoundBase : public QThread } // function pointer to callback function - void (*fpProcessCallback) ( CVector& psData, void* arg ); + void (*fpProcessCallback) ( CVector& vfData, void* arg ); void* pProcessCallbackArg; // callback function call for derived classes - void ProcessCallback ( CVector& psData ) + void ProcessCallback ( CVector& vfData ) { - (*fpProcessCallback) ( psData, pProcessCallbackArg ); + (*fpProcessCallback) ( vfData, pProcessCallbackArg ); } void ParseMIDIMessage ( const CVector& vMIDIPaketBytes ); diff --git a/src/util.cpp b/src/util.cpp old mode 100755 new mode 100644 index ea637558fc..7e04570d07 --- a/src/util.cpp +++ b/src/util.cpp @@ -28,7 +28,7 @@ /* Implementation *************************************************************/ // Input level meter implementation -------------------------------------------- -void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, +void CStereoSignalLevelMeter::Update ( const CVector& vecfAudio, const int iMonoBlockSizeSam, const bool bIsStereoIn ) { @@ -36,14 +36,14 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, // // Speed optimization: // - we only make use of the negative values and ignore the positive ones (since - // int16 has range {-32768, 32767}) -> we do not need to call the fabs() function + // float has the range {-1, 1}) -> we do not need to call the fabsf() function // - we only evaluate every third sample // // With these speed optimizations we might loose some information in // special cases but for the average music signals the following code // should give good results. - short sMinLOrMono = 0; - short sMinR = 0; + float fMinLOrMono = 0; + float fMinR = 0; if ( bIsStereoIn ) { @@ -51,14 +51,14 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, for ( int i = 0; i < 2 * iMonoBlockSizeSam; i += 6 ) // 2 * 3 = 6 -> stereo { // left (or mono) and right channel - sMinLOrMono = std::min ( sMinLOrMono, vecsAudio[i] ); - sMinR = std::min ( sMinR, vecsAudio[i + 1] ); + fMinLOrMono = fminf ( fMinLOrMono, vecfAudio[i] ); + fMinR = fminf ( fMinR, vecfAudio[i + 1] ); } // in case of mono out use minimum of both channels if ( !bIsStereoOut ) { - sMinLOrMono = std::min ( sMinLOrMono, sMinR ); + fMinLOrMono = fminf ( fMinLOrMono, fMinR ); } } else @@ -66,66 +66,71 @@ void CStereoSignalLevelMeter::Update ( const CVector& vecsAudio, // mono in for ( int i = 0; i < iMonoBlockSizeSam; i += 3 ) { - sMinLOrMono = std::min ( sMinLOrMono, vecsAudio[i] ); + fMinLOrMono = fminf ( fMinLOrMono, vecfAudio[i] ); } } // apply smoothing, if in stereo out mode, do this for two channels - dCurLevelLOrMono = UpdateCurLevel ( dCurLevelLOrMono, -sMinLOrMono ); + fCurLevelLOrMono = UpdateCurLevel ( fCurLevelLOrMono, -fMinLOrMono ); if ( bIsStereoOut ) { - dCurLevelR = UpdateCurLevel ( dCurLevelR, -sMinR ); + fCurLevelR = UpdateCurLevel ( fCurLevelR, -fMinR ); } } -double CStereoSignalLevelMeter::UpdateCurLevel ( double dCurLevel, - const double dMax ) +float CStereoSignalLevelMeter::UpdateCurLevel ( float fCurLevel, + const float fMax ) { // decrease max with time - if ( dCurLevel >= METER_FLY_BACK ) + if ( fCurLevel >= METER_FLY_BACK ) { - dCurLevel *= dSmoothingFactor; + fCurLevel *= fSmoothingFactor; } else { - dCurLevel = 0; + fCurLevel = 0; } // update current level -> only use maximum - if ( dMax > dCurLevel ) + if ( fMax > fCurLevel ) { - return dMax; + return fMax; } else { - return dCurLevel; + return fCurLevel; } } -double CStereoSignalLevelMeter::CalcLogResultForMeter ( const double& dLinearLevel ) +float CStereoSignalLevelMeter::CalcLogResultForMeter ( const float& fLinearLevel ) { - const double dNormLevel = dLinearLevel / _MAXSHORT; + // With #544 by hselasky the signal processing was changed from short to float. + // With the code using short we defined the clipping to be if a value of 32768 + // was detected. We did this by normalizing by 32767 and the clipping was then + // a bit larger than 1. To get the same behavior also with the new float type, + // we have to multiply with a factor 32768 / 32767 = 1.000030518509476. + const float fNormLevel = fLinearLevel * 1.000030518509476f; // logarithmic measure - double dLevelForMeterdB = -100000.0; // large negative value + float fLevelForMeterdB = -100000.0f; // large negative value - if ( dNormLevel > 0 ) + if ( fNormLevel > 0 ) { - dLevelForMeterdB = 20.0 * log10 ( dNormLevel ); + fLevelForMeterdB = 20.0f * log10f ( fNormLevel ); } // map to signal level meter (linear transformation of the input // level range to the level meter range) - dLevelForMeterdB -= LOW_BOUND_SIG_METER; - dLevelForMeterdB *= NUM_STEPS_LED_BAR / ( UPPER_BOUND_SIG_METER - LOW_BOUND_SIG_METER ); + fLevelForMeterdB -= LOW_BOUND_SIG_METER; + fLevelForMeterdB *= NUM_STEPS_LED_BAR / ( UPPER_BOUND_SIG_METER - LOW_BOUND_SIG_METER ); - if ( dLevelForMeterdB < 0 ) + if ( fLevelForMeterdB < 0 ) { - dLevelForMeterdB = 0; + fLevelForMeterdB = 0; } - return dLevelForMeterdB; + return fLevelForMeterdB; } @@ -315,9 +320,9 @@ float CAudioReverb::COnePole::Calc ( const float fIn ) return fLastSample; } -void CAudioReverb::Process ( CVector& vecsStereoInOut, - const bool bReverbOnLeftChan, - const float fAttenuation ) +void CAudioReverb::Process ( CVector& vecfStereoInOut, + const bool bReverbOnLeftChan, + const float fAttenuation ) { float fMixedInput, temp, temp0, temp1, temp2; @@ -327,22 +332,22 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, // shall be input for the right channel) if ( eAudioChannelConf == CC_STEREO ) { - fMixedInput = 0.5f * ( vecsStereoInOut[i] + vecsStereoInOut[i + 1] ); + fMixedInput = 0.5f * ( vecfStereoInOut[i] + vecfStereoInOut[i + 1] ); } else { if ( bReverbOnLeftChan ) { - fMixedInput = vecsStereoInOut[i]; + fMixedInput = vecfStereoInOut[i]; } else { - fMixedInput = vecsStereoInOut[i + 1]; + fMixedInput = vecfStereoInOut[i + 1]; } } - temp = allpassDelays[0].Get(); - temp0 = allpassCoefficient * temp; + temp = allpassDelays[0].Get(); + temp0 = allpassCoefficient * temp; temp0 += fMixedInput; allpassDelays[0].Add ( temp0 ); temp0 = - ( allpassCoefficient * temp0 ) + temp; @@ -378,15 +383,15 @@ void CAudioReverb::Process ( CVector& vecsStereoInOut, // reverberation effect on both channels) if ( ( eAudioChannelConf == CC_STEREO ) || bReverbOnLeftChan ) { - vecsStereoInOut[i] = Float2Short ( - ( 1.0f - fAttenuation ) * vecsStereoInOut[i] + + vecfStereoInOut[i] = ClipFloat ( + ( 1.0f - fAttenuation ) * vecfStereoInOut[i] + 0.5f * fAttenuation * outLeftDelay.Get() ); } if ( ( eAudioChannelConf == CC_STEREO ) || !bReverbOnLeftChan ) { - vecsStereoInOut[i + 1] = Float2Short ( - ( 1.0f - fAttenuation ) * vecsStereoInOut[i + 1] + + vecfStereoInOut[i + 1] = ClipFloat ( + ( 1.0f - fAttenuation ) * vecfStereoInOut[i + 1] + 0.5f * fAttenuation * outRightDelay.Get() ); } } diff --git a/src/util.h b/src/util.h old mode 100755 new mode 100644 index 5737aaddd9..1f65fdea31 --- a/src/util.h +++ b/src/util.h @@ -69,27 +69,15 @@ class CClient; // forward declaration of CClient /* Definitions ****************************************************************/ -#define METER_FLY_BACK 2 +#define METER_FLY_BACK 0.00006103515625f // 2 / 32768 #define INVALID_MIDI_CH -1 // invalid MIDI channel definition /* Global functions ***********************************************************/ -// converting float to short -inline short Float2Short ( const float fInput ) +// range check audio samples +static inline float ClipFloat ( const float fInput ) { - // lower bound - if ( fInput < _MINSHORT ) - { - return _MINSHORT; - } - - // upper bound - if ( fInput > _MAXSHORT ) - { - return _MAXSHORT; - } - - return static_cast ( fInput ); + return qBound ( -1.0f, fInput, 1.0f ); } // debug error handling @@ -739,32 +727,32 @@ class CStereoSignalLevelMeter // TODO Calculate smoothing factor from sample rate and frame size (64 or 128 samples frame size). // But tests with 128 and 64 samples frame size have shown that the meter fly back // is ok for both numbers of samples frame size with a factor of 0.99. - CStereoSignalLevelMeter ( const bool bNIsStereoOut = true, - const double dNSmoothingFactor = 0.99 ) : - dSmoothingFactor ( dNSmoothingFactor ), bIsStereoOut ( bNIsStereoOut ) { Reset(); } + CStereoSignalLevelMeter ( const bool bNIsStereoOut = true, + const float fNSmoothingFactor = 0.99f ) : + fSmoothingFactor ( fNSmoothingFactor ), bIsStereoOut ( bNIsStereoOut ) { Reset(); } - void Update ( const CVector& vecsAudio, + void Update ( const CVector& vecfAudio, const int iInSize, const bool bIsStereoIn ); - double GetLevelForMeterdBLeftOrMono() { return CalcLogResultForMeter ( dCurLevelLOrMono ); } - double GetLevelForMeterdBRight() { return CalcLogResultForMeter ( dCurLevelR ); } - static double CalcLogResultForMeter ( const double& dLinearLevel ); + float GetLevelForMeterdBLeftOrMono() { return CalcLogResultForMeter ( fCurLevelLOrMono ); } + float GetLevelForMeterdBRight() { return CalcLogResultForMeter ( fCurLevelR ); } + static float CalcLogResultForMeter ( const float& fLinearLevel ); void Reset() { - dCurLevelLOrMono = 0.0; - dCurLevelR = 0.0; + fCurLevelLOrMono = 0.0f; + fCurLevelR = 0.0f; } protected: - double UpdateCurLevel ( double dCurLevel, - const double dMax ); + float UpdateCurLevel ( float fCurLevel, + const float fMax ); - double dCurLevelLOrMono; - double dCurLevelR; - double dSmoothingFactor; - bool bIsStereoOut; + float fCurLevelLOrMono; + float fCurLevelR; + float fSmoothingFactor; + bool bIsStereoOut; }; @@ -1174,9 +1162,9 @@ class CAudioReverb const float fT60 = 1.1f ); void Clear(); - void Process ( CVector& vecsStereoInOut, - const bool bReverbOnLeftChan, - const float fAttenuation ); + void Process ( CVector& vecfStereoInOut, + const bool bReverbOnLeftChan, + const float fAttenuation ); protected: void setT60 ( const float fT60, const int iSampleRate ); diff --git a/src/vstmain.cpp b/src/vstmain.cpp index 6044c909f5..0fd55aff4b 100755 --- a/src/vstmain.cpp +++ b/src/vstmain.cpp @@ -101,8 +101,8 @@ void CLlconVST::processReplacing ( float** pvIn, // copy input data for ( i = 0, j = 0; i < iNumSamples; i++, j += 2 ) { - Client.GetSound()->vecsTmpAudioSndCrdStereo[j] = pfIn0[i]; - Client.GetSound()->vecsTmpAudioSndCrdStereo[j + 1] = pfIn1[i]; + Client.GetSound()->vecfTmpAudioSndCrdStereo[j] = pfIn0[i]; + Client.GetSound()->vecfTmpAudioSndCrdStereo[j + 1] = pfIn1[i]; } // call processing callback function @@ -111,7 +111,7 @@ void CLlconVST::processReplacing ( float** pvIn, // copy output data for ( i = 0, j = 0; i < iNumSamples; i++, j += 2 ) { - pfOut0[i] = Client.GetSound()->vecsTmpAudioSndCrdStereo[j]; - pfOut1[i] = Client.GetSound()->vecsTmpAudioSndCrdStereo[j + 1]; + pfOut0[i] = Client.GetSound()->vecfTmpAudioSndCrdStereo[j]; + pfOut1[i] = Client.GetSound()->vecfTmpAudioSndCrdStereo[j + 1]; } } diff --git a/src/vstsound.h b/src/vstsound.h index 04ea94a675..7709cf3e92 100755 --- a/src/vstsound.h +++ b/src/vstsound.h @@ -34,26 +34,26 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), void* arg ) : + CSound ( void (*fpNewCallback) ( CVector& vfData, void* arg ), void* arg ) : CSoundBase ( true, fpNewCallback, arg ), iVSTMonoBufferSize ( 0 ) {} // special VST functions void SetMonoBufferSize ( const int iNVBS ) { iVSTMonoBufferSize = iNVBS; } void VSTProcessCallback() { - CSoundBase::ProcessCallback ( vecsTmpAudioSndCrdStereo ); + CSoundBase::ProcessCallback ( vecfTmpAudioSndCrdStereo ); } virtual int Init ( const int ) { // init base class CSoundBase::Init ( iVSTMonoBufferSize ); - vecsTmpAudioSndCrdStereo.Init ( 2 * iVSTMonoBufferSize /* stereo */); + vecfTmpAudioSndCrdStereo.Init ( 2 * iVSTMonoBufferSize /* stereo */); return iVSTMonoBufferSize; } // this vector must be accessible from the outside (quick hack solution) - CVector vecsTmpAudioSndCrdStereo; + CVector vecfTmpAudioSndCrdStereo; protected: int iVSTMonoBufferSize; diff --git a/windows/sound.cpp b/windows/sound.cpp old mode 100755 new mode 100644 index b6439b24da..1e9a66a438 --- a/windows/sound.cpp +++ b/windows/sound.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2004-2020 * * Author(s): - * Volker Fischer + * Volker Fischer, hselasky * * Description: * Sound card interface for Windows operating systems @@ -149,9 +149,6 @@ QString CSound::CheckDeviceCapabilities() lNumOutChan = MAX_NUM_IN_OUT_CHANNELS; } - // query channel infos for all available input channels - bool bInputChMixingSupported = true; - for ( int i = 0; i < lNumInChan; i++ ) { // setup for input channels @@ -174,11 +171,6 @@ QString CSound::CheckDeviceCapabilities() // store the name of the channel and check if channel mixing is supported channelInputName[i] = channelInfosInput[i].name; - - if ( !CheckSampleTypeSupportedForCHMixing ( channelInfosInput[i].type ) ) - { - bInputChMixingSupported = false; - } } // query channel infos for all available output channels @@ -204,7 +196,7 @@ QString CSound::CheckDeviceCapabilities() } // special case with 4 input channels: support adding channels - if ( ( lNumInChan == 4 ) && bInputChMixingSupported ) + if ( lNumInChan == 4 ) { // add four mixed channels (i.e. 4 normal, 4 mixed channels) lNumInChanPlusAddChan = 8; @@ -404,7 +396,7 @@ int CSound::Init ( const int iNewPrefMonoBufferSize ) ASIOSetSampleRate ( SYSTEM_SAMPLE_RATE_HZ ); // create memory for intermediate audio buffer - vecsMultChanAudioSndCrd.Init ( iASIOBufferSizeStereo ); + vecfMultChanAudioSndCrd.Init ( iASIOBufferSizeStereo ); // create and activate ASIO buffers (buffer size in samples), // dispose old buffers (if any) @@ -483,7 +475,7 @@ void CSound::Stop() } } -CSound::CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), +CSound::CSound ( void (*fpNewCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -588,519 +580,421 @@ bool CSound::CheckSampleTypeSupported ( const ASIOSampleType SamType ) ( SamType == ASIOSTInt32MSB24 ) ); } -bool CSound::CheckSampleTypeSupportedForCHMixing ( const ASIOSampleType SamType ) +// Might want to comment that use of double in factors below avoids nasty conversion surprises +static constexpr double FACTOR16 = 32767.0; +static constexpr double FACTOR16_INV = 1.0 / 32768.0; // Notice diff in last digit from other factor, ala Port Audio + +struct sample16LSB { - // check for supported sample types for audio channel mixing (see bufferSwitch) - return ( ( SamType == ASIOSTInt16LSB ) || - ( SamType == ASIOSTInt24LSB ) || - ( SamType == ASIOSTInt32LSB ) ); -} + int16_t data[1]; -void CSound::bufferSwitch ( long index, ASIOBool ) + float Get() const + { + return data[0] * FACTOR16_INV; + } + + void Put ( const float value ) + { + data[0] = static_cast ( value * FACTOR16 ); + } +}; + +struct sample16MSB +{ + uint8_t data[2]; + + float Get() const + { + const int16_t temp = data[1] | ( data[0] << 8 ); + + return temp * FACTOR16_INV; + } + + void Put ( const float value ) + { + const int16_t temp = static_cast ( value * FACTOR16 ); + + data[0] = static_cast ( temp >> 8 ); + data[1] = static_cast ( temp ); + } +}; + +// Might want to comment that use of double in factors below avoids nasty conversion surprises +static constexpr double FACTOR24 = 2147483647.0; +static constexpr double FACTOR24_INV = 1.0 / 2147483648.0; // Notice diff in last digit from other factor, ala Port Audio + +struct sample24LSB +{ + uint8_t data[3]; + + float Get() const + { + const int32_t temp = ( data[0] << 8 ) | ( data[1] << 16 ) | ( data[2] << 24 ); + + return temp * FACTOR24_INV; + } + + void Put ( const float value ) + { + const int32_t temp = static_cast ( value * FACTOR24 ); + + data[0] = static_cast ( temp >> 8 ); + data[1] = static_cast ( temp >> 16 ); + data[2] = static_cast ( temp >> 24 ); + } +}; + +struct sample24MSB +{ + uint8_t data[3]; + + float Get() const + { + const int32_t temp = ( data[2] << 8 ) | ( data[1] << 16 ) | ( data[0] << 24 ); + + return temp * FACTOR24_INV; + } + + void Put ( const float value ) + { + const int32_t temp = static_cast ( value * FACTOR24 ); + + data[0] = static_cast ( temp >> 24 ); + data[1] = static_cast ( temp >> 16 ); + data[2] = static_cast ( temp >> 8 ); + } +}; + +// Might want to comment that use of double in factors below avoids nasty conversion surprises. +static constexpr double FACTOR32 = 2147483647.0; +static constexpr double FACTOR32_INV = 1.0 / 2147483648.0; // Notice diff in last digit from other factor, ala Port Audio + +struct sample32LSB +{ + int32_t data[1]; + + float Get() const + { + return ( data[0] * FACTOR32_INV ); + } + + void Put ( const float value ) + { + data[0] = static_cast ( value * FACTOR32 ); + } +}; + +struct sample32MSB +{ + uint8_t data[4]; + + float Get() const + { + const int32_t temp = ( data[3] << 0 ) | ( data[2] << 8 ) | + ( data[1] << 16 ) | ( data[0] << 24 ); + + return temp * FACTOR32_INV; + } + + void Put ( const float value ) + { + const int32_t temp = static_cast ( value * FACTOR32 ); + + data[0] = static_cast ( temp >> 24 ); + data[1] = static_cast ( temp >> 16 ); + data[2] = static_cast ( temp >> 8 ); + data[3] = static_cast ( temp >> 0 ); + } +}; + +union sampleFloat32Data +{ + uint8_t data[4]; + float value; +}; + +struct sampleFloat32LSB +{ + float data[1]; + + float Get() const + { + return data[0]; + } + + void Put ( const float value ) + { + data[0] = value; + } +}; + +struct sampleFloat32MSB +{ + uint8_t data[4]; + + float Get() const + { + sampleFloat32Data temp; + + temp.data[0] = data[3]; + temp.data[1] = data[2]; + temp.data[2] = data[1]; + temp.data[3] = data[0]; + + return temp.value; + } + + void Put ( const float value ) + { + sampleFloat32Data temp; + + temp.value = value; + + data[0] = temp.data[3]; + data[1] = temp.data[2]; + data[2] = temp.data[1]; + data[3] = temp.data[0]; + } +}; + +union sampleFloat64Data { - int iCurSample; + uint8_t data[8]; + double value; +}; - // get references to class members - int& iASIOBufferSizeMono = pSound->iASIOBufferSizeMono; - CVector& vecsMultChanAudioSndCrd = pSound->vecsMultChanAudioSndCrd; +struct sampleFloat64LSB +{ + double data[1]; + float Get() const + { + return data[0]; + } + + void Put ( const float value ) + { + data[0] = value; + } +}; + +struct sampleFloat64MSB +{ + uint8_t data[8]; + + float Get() const + { + sampleFloat64Data temp; + + temp.data[0] = data[7]; + temp.data[1] = data[6]; + temp.data[2] = data[5]; + temp.data[3] = data[4]; + temp.data[4] = data[3]; + temp.data[5] = data[2]; + temp.data[6] = data[1]; + temp.data[7] = data[0]; + + return temp.value; + } + + void Put ( const float value ) + { + sampleFloat64Data temp; + + temp.value = value; + + data[0] = temp.data[7]; + data[1] = temp.data[6]; + data[2] = temp.data[5]; + data[3] = temp.data[4]; + data[4] = temp.data[3]; + data[5] = temp.data[2]; + data[6] = temp.data[1]; + data[7] = temp.data[0]; + } +}; + +void CSound::bufferSwitch ( long index, ASIOBool ) +{ // perform the processing for input and output pSound->ASIOMutex.lock(); // get mutex lock { // CAPTURE ------------------------------------------------------------- for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ ) { - int iSelCH, iSelAddCH; + int iSelAddCH; + int iSelCH; - GetSelCHAndAddCH ( pSound->vSelectedInputChannels[i], pSound->lNumInChan, - iSelCH, iSelAddCH ); + GetSelCHAndAddCH ( pSound->vSelectedInputChannels[i], + pSound->lNumInChan, + iSelCH, + iSelAddCH ); // copy new captured block in thread transfer buffer (copy // mono data interleaved in stereo buffer) switch ( pSound->channelInfosInput[iSelCH].type ) { case ASIOSTInt16LSB: - { - // no type conversion required, just copy operation - int16_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = pASIOBuf[iCurSample]; - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - int16_t* pASIOBufAdd = static_cast ( pSound->bufferInfos[iSelAddCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Float2Short ( (float) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (float) pASIOBufAdd[iCurSample] ); - } - } + pSound->bufferSwitchImport ( 1, index, i ); break; - } - - case ASIOSTInt24LSB: - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 ); - iCurSam >>= 8; - vecsMultChanAudioSndCrd[2 * iCurSample + i] = static_cast ( iCurSam ); - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelAddCH].buffers[index] ) + iCurSample * 3, 3 ); - iCurSam >>= 8; - - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Float2Short ( (float) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (float) static_cast ( iCurSam ) ); - } - } + case ASIOSTInt16MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB: - { - int32_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( pASIOBuf[iCurSample] >> 16 ); - } - - if ( iSelAddCH >= 0 ) - { - // mix input channels case: - int32_t* pASIOBufAdd = static_cast ( pSound->bufferInfos[iSelAddCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Float2Short ( (float) vecsMultChanAudioSndCrd[2 * iCurSample + i] + - (float) static_cast ( pASIOBufAdd[iCurSample] >> 16 ) ); - } - } + case ASIOSTInt24LSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - } - case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT ); - } + case ASIOSTInt24MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] * _MAXSHORT ); - } + case ASIOSTInt32LSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFF ); - } + case ASIOSTInt32MSB: + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0x3FFFF ) >> 2 ); - } + case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFF ) >> 4 ); - } + case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] & 0xFFFFFF ) >> 8 ); - } + case ASIOSTFloat64LSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt16MSB: -// NOT YET TESTED - // flip bits - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Flip16Bits ( ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] ) )[iCurSample] ); - } + case ASIOSTFloat64MSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchImport ( 1, index, i ); break; - case ASIOSTInt24MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // because the bits are flipped, we do not have to perform the - // shift by 8 bits - int iCurSam = 0; - memcpy ( &iCurSam, ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, 3 ); - - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - Flip16Bits ( static_cast ( iCurSam ) ); - } + case ASIOSTInt32LSB16: + pSound->bufferSwitchImport ( 1 << 16, index, i ); break; - case ASIOSTInt32MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // flip bits and convert to 16 bit - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) >> 16 ); - } + case ASIOSTInt32MSB16: + pSound->bufferSwitchImport ( 1 << 16, index, i ); break; - case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT ); - } + case ASIOSTInt32LSB18: + pSound->bufferSwitchImport ( 1 << 14, index, i ); break; - case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( static_cast ( - Flip64Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) ) * _MAXSHORT ); - } + case ASIOSTInt32MSB18: + pSound->bufferSwitchImport ( 1 << 14, index, i ); break; - case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFF ); - } + case ASIOSTInt32LSB20: + pSound->bufferSwitchImport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0x3FFFF ) >> 2 ); - } + case ASIOSTInt32MSB20: + pSound->bufferSwitchImport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFF ) >> 4 ); - } + case ASIOSTInt32LSB24: + pSound->bufferSwitchImport ( 1 << 8, index, i ); break; - case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - vecsMultChanAudioSndCrd[2 * iCurSample + i] = - static_cast ( ( Flip32Bits ( static_cast ( - pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] ) & 0xFFFFFF ) >> 8 ); - } + case ASIOSTInt32MSB24: + pSound->bufferSwitchImport ( 1 << 8, index, i ); break; } } // call processing callback function - pSound->ProcessCallback ( vecsMultChanAudioSndCrd ); + pSound->ProcessCallback ( pSound->vecfMultChanAudioSndCrd ); // PLAYBACK ------------------------------------------------------------ for ( int i = 0; i < NUM_IN_OUT_CHANNELS; i++ ) { - const int iSelCH = pSound->lNumInChan + pSound->vSelectedOutputChannels[i]; - // copy data from sound card in output buffer (copy // interleaved stereo data in mono sound card buffer) switch ( pSound->channelInfosOutput[pSound->vSelectedOutputChannels[i]].type ) { case ASIOSTInt16LSB: - { - // no type conversion required, just copy operation - int16_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - pASIOBuf[iCurSample] = vecsMultChanAudioSndCrd[2 * iCurSample + i]; - } + pSound->bufferSwitchExport ( 1, index, i ); break; - } - case ASIOSTInt24LSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert current sample in 24 bit format - int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - iCurSam <<= 8; - - memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 ); - } + case ASIOSTInt16MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB: - { - int32_t* pASIOBuf = static_cast ( pSound->bufferInfos[iSelCH].buffers[index] ); - - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - pASIOBuf[iCurSample] = ( iCurSam << 16 ); - } + case ASIOSTInt24LSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - } - case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const float fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - fCurSam / _MAXSHORT; - } + case ASIOSTInt24MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTFloat64LSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const double fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - fCurSam / _MAXSHORT; - } + case ASIOSTInt32LSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - iCurSam; - } + case ASIOSTInt32MSB: + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 2 ); - } + case ASIOSTFloat32LSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 4 ); - } + case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt32LSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - ( iCurSam << 8 ); - } + case ASIOSTFloat64LSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt16MSB: -// NOT YET TESTED - // flip bits - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - ( (int16_t*) pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip16Bits ( vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - } + case ASIOSTFloat64MSB: // IEEE 754 64 bit float, as found on Intel x86 architecture + pSound->bufferSwitchExport ( 1, index, i ); break; - case ASIOSTInt24MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // because the bits are flipped, we do not have to perform the - // shift by 8 bits - int32_t iCurSam = static_cast ( Flip16Bits ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ) ); - - memcpy ( ( (char*) pSound->bufferInfos[iSelCH].buffers[index] ) + iCurSample * 3, &iCurSam, 3 ); - } + case ASIOSTInt32LSB16: + pSound->bufferSwitchExport ( 1 << 16, index, i ); break; - case ASIOSTInt32MSB: -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit and flip bits - int iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 16 ); - } + case ASIOSTInt32MSB16: + pSound->bufferSwitchExport ( 1 << 16, index, i ); break; - case ASIOSTFloat32MSB: // IEEE 754 32 bit float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const float fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - static_cast ( Flip32Bits ( static_cast ( - fCurSam / _MAXSHORT ) ) ); - } + case ASIOSTInt32LSB18: + pSound->bufferSwitchExport ( 1 << 14, index, i ); break; - case ASIOSTFloat64MSB: // IEEE 754 64 bit double float, as found on Intel x86 architecture -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - const double fCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - static_cast ( Flip64Bits ( static_cast ( - fCurSam / _MAXSHORT ) ) ); - } + case ASIOSTInt32MSB18: + pSound->bufferSwitchExport ( 1 << 14, index, i ); break; - case ASIOSTInt32MSB16: // 32 bit data with 16 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam ); - } + case ASIOSTInt32LSB20: + pSound->bufferSwitchExport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB18: // 32 bit data with 18 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 2 ); - } + case ASIOSTInt32MSB20: + pSound->bufferSwitchExport ( 1 << 12, index, i ); break; - case ASIOSTInt32MSB20: // 32 bit data with 20 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 4 ); - } + case ASIOSTInt32LSB24: + pSound->bufferSwitchExport ( 1 << 8, index, i ); break; - case ASIOSTInt32MSB24: // 32 bit data with 24 bit alignment -// NOT YET TESTED - for ( iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) - { - // convert to 32 bit - const int32_t iCurSam = static_cast ( - vecsMultChanAudioSndCrd[2 * iCurSample + i] ); - - static_cast ( pSound->bufferInfos[iSelCH].buffers[index] )[iCurSample] = - Flip32Bits ( iCurSam << 8 ); - } + case ASIOSTInt32MSB24: + pSound->bufferSwitchExport ( 1 << 8, index, i ); break; } } + // Finally if the driver supports the ASIOOutputReady() optimization, // do it here, all data are in place ----------------------------------- if ( pSound->bASIOPostOutput ) @@ -1139,57 +1033,3 @@ long CSound::asioMessages ( long selector, return ret; } - -int16_t CSound::Flip16Bits ( const int16_t iIn ) -{ - uint16_t iMask = ( 1 << 15 ); - int16_t iOut = 0; - - for ( unsigned int i = 0; i < 16; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} - -int32_t CSound::Flip32Bits ( const int32_t iIn ) -{ - uint32_t iMask = ( static_cast ( 1 ) << 31 ); - int32_t iOut = 0; - - for ( unsigned int i = 0; i < 32; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} - -int64_t CSound::Flip64Bits ( const int64_t iIn ) -{ - uint64_t iMask = ( static_cast ( 1 ) << 63 ); - int64_t iOut = 0; - - for ( unsigned int i = 0; i < 64; i++ ) - { - // copy current bit to correct position - iOut |= ( iIn & iMask ) ? 1 : 0; - - // shift out value and mask by one bit - iOut <<= 1; - iMask >>= 1; - } - - return iOut; -} diff --git a/windows/sound.h b/windows/sound.h old mode 100755 new mode 100644 index b04ff9d048..646511babc --- a/windows/sound.h +++ b/windows/sound.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2020 * * Author(s): - * Volker Fischer + * Volker Fischer, hselasky * ****************************************************************************** * @@ -46,7 +46,7 @@ class CSound : public CSoundBase { public: - CSound ( void (*fpNewCallback) ( CVector& psData, void* arg ), + CSound ( void (*fpNewCallback) ( CVector& pfData, void* arg ), void* arg, const int iCtrlMIDIChannel, const bool , @@ -78,33 +78,27 @@ class CSound : public CSoundBase virtual float GetInOutLatencyMs() { return fInOutLatencyMs; } protected: - virtual QString LoadAndInitializeDriver ( int iIdx, - bool bOpenDriverSetup ); - virtual void UnloadCurrentDriver(); - int GetActualBufferSize ( const int iDesiredBufferSizeMono ); - QString CheckDeviceCapabilities(); - bool CheckSampleTypeSupported ( const ASIOSampleType SamType ); - bool CheckSampleTypeSupportedForCHMixing ( const ASIOSampleType SamType ); - void ResetChannelMapping(); - - int iASIOBufferSizeMono; - int iASIOBufferSizeStereo; - - long lNumInChan; - long lNumInChanPlusAddChan; // includes additional "added" channels - long lNumOutChan; - float fInOutLatencyMs; - CVector vSelectedInputChannels; - CVector vSelectedOutputChannels; - - CVector vecsMultChanAudioSndCrd; - - QMutex ASIOMutex; - - // utility functions - static int16_t Flip16Bits ( const int16_t iIn ); - static int32_t Flip32Bits ( const int32_t iIn ); - static int64_t Flip64Bits ( const int64_t iIn ); + virtual QString LoadAndInitializeDriver ( int iIdx, + bool bOpenDriverSetup ); + virtual void UnloadCurrentDriver(); + int GetActualBufferSize ( const int iDesiredBufferSizeMono ); + QString CheckDeviceCapabilities(); + bool CheckSampleTypeSupported ( const ASIOSampleType SamType ); + void ResetChannelMapping(); + + int iASIOBufferSizeMono; + int iASIOBufferSizeStereo; + + long lNumInChan; + long lNumInChanPlusAddChan; // includes additional "added" channels + long lNumOutChan; + float fInOutLatencyMs; + CVector vSelectedInputChannels; + CVector vSelectedOutputChannels; + + CVector vecfMultChanAudioSndCrd; + + QMutex ASIOMutex; // audio hardware buffer info struct sHWBufferInfo @@ -124,6 +118,51 @@ class CSound : public CSoundBase bool bASIOPostOutput; ASIOCallbacks asioCallbacks; + // templates + template void bufferSwitchImport ( const int iGain, + const long index, + const int iCH) + { + int iSelAddCH; + int iSelCH; + + GetSelCHAndAddCH ( vSelectedInputChannels[iCH], lNumInChan, iSelCH, iSelAddCH ); + + const T* pASIOBuf = static_cast ( bufferInfos[iSelCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + vecfMultChanAudioSndCrd[2 * iCurSample + iCH] = pASIOBuf[iCurSample].Get() * iGain; + } + + if ( iSelAddCH >= 0 ) + { + // mix input channels case + const T* pASIOBufAdd = static_cast ( bufferInfos[iSelAddCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + vecfMultChanAudioSndCrd[2 * iCurSample + iCH] = + ClipFloat ( vecfMultChanAudioSndCrd[2 * iCurSample + iCH] + + pASIOBufAdd[iCurSample].Get() * iGain ); + } + } + } + + template void bufferSwitchExport ( const int iGain, + const long index, + const int iCH ) + { + const int iSelCH = lNumInChan + vSelectedOutputChannels[iCH]; + + T* pASIOBuf = static_cast ( bufferInfos[iSelCH].buffers[index] ); + + for ( int iCurSample = 0; iCurSample < iASIOBufferSizeMono; iCurSample++ ) + { + pASIOBuf[iCurSample].Put ( vecfMultChanAudioSndCrd[2 * iCurSample + iCH] / iGain ); + } + } + // callbacks static void bufferSwitch ( long index, ASIOBool processNow ); static ASIOTime* bufferSwitchTimeInfo ( ASIOTime* timeInfo, long index, ASIOBool processNow );