michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: /// michael@0: /// SSE optimized routines for Pentium-III, Athlon-XP and later CPUs. All SSE michael@0: /// optimized functions have been gathered into this single source michael@0: /// code file, regardless to their class or original source code file, in order michael@0: /// to ease porting the library to other compiler and processor platforms. michael@0: /// michael@0: /// The SSE-optimizations are programmed using SSE compiler intrinsics that michael@0: /// are supported both by Microsoft Visual C++ and GCC compilers, so this file michael@0: /// should compile with both toolsets. michael@0: /// michael@0: /// NOTICE: If using Visual Studio 6.0, you'll need to install the "Visual C++ michael@0: /// 6.0 processor pack" update to support SSE instruction set. The update is michael@0: /// available for download at Microsoft Developers Network, see here: michael@0: /// http://msdn.microsoft.com/en-us/vstudio/aa718349.aspx michael@0: /// michael@0: /// If the above URL is expired or removed, go to "http://msdn.microsoft.com" and michael@0: /// perform a search with keywords "processor pack". michael@0: /// michael@0: /// Author : Copyright (c) Olli Parviainen michael@0: /// Author e-mail : oparviai 'at' iki.fi michael@0: /// SoundTouch WWW: http://www.surina.net/soundtouch michael@0: /// michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: // michael@0: // Last changed : $Date: 2014-01-07 12:25:40 -0600 (Tue, 07 Jan 2014) $ michael@0: // File revision : $Revision: 4 $ michael@0: // michael@0: // $Id: sse_optimized.cpp 184 2014-01-07 18:25:40Z oparviai $ michael@0: // michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: // michael@0: // License : michael@0: // michael@0: // SoundTouch audio processing library michael@0: // Copyright (c) Olli Parviainen michael@0: // michael@0: // This library is free software; you can redistribute it and/or michael@0: // modify it under the terms of the GNU Lesser General Public michael@0: // License as published by the Free Software Foundation; either michael@0: // version 2.1 of the License, or (at your option) any later version. michael@0: // michael@0: // This library is distributed in the hope that it will be useful, michael@0: // but WITHOUT ANY WARRANTY; without even the implied warranty of michael@0: // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU michael@0: // Lesser General Public License for more details. michael@0: // michael@0: // You should have received a copy of the GNU Lesser General Public michael@0: // License along with this library; if not, write to the Free Software michael@0: // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA michael@0: // michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: #include "cpu_detect.h" michael@0: #include "STTypes.h" michael@0: michael@0: using namespace soundtouch; michael@0: michael@0: #ifdef SOUNDTOUCH_ALLOW_SSE michael@0: michael@0: // SSE routines available only with float sample type michael@0: michael@0: ////////////////////////////////////////////////////////////////////////////// michael@0: // michael@0: // implementation of SSE optimized functions of class 'TDStretchSSE' michael@0: // michael@0: ////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: #include "TDStretch.h" michael@0: #include michael@0: #include michael@0: michael@0: // Calculates cross correlation of two buffers michael@0: double TDStretchSSE::calcCrossCorr(const float *pV1, const float *pV2, double &norm) const michael@0: { michael@0: int i; michael@0: const float *pVec1; michael@0: const __m128 *pVec2; michael@0: __m128 vSum, vNorm; michael@0: michael@0: // Note. It means a major slow-down if the routine needs to tolerate michael@0: // unaligned __m128 memory accesses. It's way faster if we can skip michael@0: // unaligned slots and use _mm_load_ps instruction instead of _mm_loadu_ps. michael@0: // This can mean up to ~ 10-fold difference (incl. part of which is michael@0: // due to skipping every second round for stereo sound though). michael@0: // michael@0: // Compile-time define SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION is provided michael@0: // for choosing if this little cheating is allowed. michael@0: michael@0: #ifdef SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION michael@0: // Little cheating allowed, return valid correlation only for michael@0: // aligned locations, meaning every second round for stereo sound. michael@0: michael@0: #define _MM_LOAD _mm_load_ps michael@0: michael@0: if (((ulongptr)pV1) & 15) return -1e50; // skip unaligned locations michael@0: michael@0: #else michael@0: // No cheating allowed, use unaligned load & take the resulting michael@0: // performance hit. michael@0: #define _MM_LOAD _mm_loadu_ps michael@0: #endif michael@0: michael@0: // ensure overlapLength is divisible by 8 michael@0: assert((overlapLength % 8) == 0); michael@0: michael@0: // Calculates the cross-correlation value between 'pV1' and 'pV2' vectors michael@0: // Note: pV2 _must_ be aligned to 16-bit boundary, pV1 need not. michael@0: pVec1 = (const float*)pV1; michael@0: pVec2 = (const __m128*)pV2; michael@0: vSum = vNorm = _mm_setzero_ps(); michael@0: michael@0: // Unroll the loop by factor of 4 * 4 operations. Use same routine for michael@0: // stereo & mono, for mono it just means twice the amount of unrolling. michael@0: for (i = 0; i < channels * overlapLength / 16; i ++) michael@0: { michael@0: __m128 vTemp; michael@0: // vSum += pV1[0..3] * pV2[0..3] michael@0: vTemp = _MM_LOAD(pVec1); michael@0: vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp ,pVec2[0])); michael@0: vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp)); michael@0: michael@0: // vSum += pV1[4..7] * pV2[4..7] michael@0: vTemp = _MM_LOAD(pVec1 + 4); michael@0: vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[1])); michael@0: vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp)); michael@0: michael@0: // vSum += pV1[8..11] * pV2[8..11] michael@0: vTemp = _MM_LOAD(pVec1 + 8); michael@0: vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[2])); michael@0: vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp)); michael@0: michael@0: // vSum += pV1[12..15] * pV2[12..15] michael@0: vTemp = _MM_LOAD(pVec1 + 12); michael@0: vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[3])); michael@0: vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp)); michael@0: michael@0: pVec1 += 16; michael@0: pVec2 += 4; michael@0: } michael@0: michael@0: // return value = vSum[0] + vSum[1] + vSum[2] + vSum[3] michael@0: float *pvNorm = (float*)&vNorm; michael@0: norm = (pvNorm[0] + pvNorm[1] + pvNorm[2] + pvNorm[3]); michael@0: michael@0: float *pvSum = (float*)&vSum; michael@0: return (double)(pvSum[0] + pvSum[1] + pvSum[2] + pvSum[3]) / sqrt(norm < 1e-9 ? 1.0 : norm); michael@0: michael@0: /* This is approximately corresponding routine in C-language yet without normalization: michael@0: double corr, norm; michael@0: uint i; michael@0: michael@0: // Calculates the cross-correlation value between 'pV1' and 'pV2' vectors michael@0: corr = norm = 0.0; michael@0: for (i = 0; i < channels * overlapLength / 16; i ++) michael@0: { michael@0: corr += pV1[0] * pV2[0] + michael@0: pV1[1] * pV2[1] + michael@0: pV1[2] * pV2[2] + michael@0: pV1[3] * pV2[3] + michael@0: pV1[4] * pV2[4] + michael@0: pV1[5] * pV2[5] + michael@0: pV1[6] * pV2[6] + michael@0: pV1[7] * pV2[7] + michael@0: pV1[8] * pV2[8] + michael@0: pV1[9] * pV2[9] + michael@0: pV1[10] * pV2[10] + michael@0: pV1[11] * pV2[11] + michael@0: pV1[12] * pV2[12] + michael@0: pV1[13] * pV2[13] + michael@0: pV1[14] * pV2[14] + michael@0: pV1[15] * pV2[15]; michael@0: michael@0: for (j = 0; j < 15; j ++) norm += pV1[j] * pV1[j]; michael@0: michael@0: pV1 += 16; michael@0: pV2 += 16; michael@0: } michael@0: return corr / sqrt(norm); michael@0: */ michael@0: } michael@0: michael@0: michael@0: michael@0: double TDStretchSSE::calcCrossCorrAccumulate(const float *pV1, const float *pV2, double &norm) const michael@0: { michael@0: // call usual calcCrossCorr function because SSE does not show big benefit of michael@0: // accumulating "norm" value, and also the "norm" rolling algorithm would get michael@0: // complicated due to SSE-specific alignment-vs-nonexact correlation rules. michael@0: return calcCrossCorr(pV1, pV2, norm); michael@0: } michael@0: michael@0: michael@0: ////////////////////////////////////////////////////////////////////////////// michael@0: // michael@0: // implementation of SSE optimized functions of class 'FIRFilter' michael@0: // michael@0: ////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: #include "FIRFilter.h" michael@0: michael@0: FIRFilterSSE::FIRFilterSSE() : FIRFilter() michael@0: { michael@0: filterCoeffsAlign = NULL; michael@0: filterCoeffsUnalign = NULL; michael@0: } michael@0: michael@0: michael@0: FIRFilterSSE::~FIRFilterSSE() michael@0: { michael@0: delete[] filterCoeffsUnalign; michael@0: filterCoeffsAlign = NULL; michael@0: filterCoeffsUnalign = NULL; michael@0: } michael@0: michael@0: michael@0: // (overloaded) Calculates filter coefficients for SSE routine michael@0: void FIRFilterSSE::setCoefficients(const float *coeffs, uint newLength, uint uResultDivFactor) michael@0: { michael@0: uint i; michael@0: float fDivider; michael@0: michael@0: FIRFilter::setCoefficients(coeffs, newLength, uResultDivFactor); michael@0: michael@0: // Scale the filter coefficients so that it won't be necessary to scale the filtering result michael@0: // also rearrange coefficients suitably for SSE michael@0: // Ensure that filter coeffs array is aligned to 16-byte boundary michael@0: delete[] filterCoeffsUnalign; michael@0: filterCoeffsUnalign = new float[2 * newLength + 4]; michael@0: filterCoeffsAlign = (float *)SOUNDTOUCH_ALIGN_POINTER_16(filterCoeffsUnalign); michael@0: michael@0: fDivider = (float)resultDivider; michael@0: michael@0: // rearrange the filter coefficients for mmx routines michael@0: for (i = 0; i < newLength; i ++) michael@0: { michael@0: filterCoeffsAlign[2 * i + 0] = michael@0: filterCoeffsAlign[2 * i + 1] = coeffs[i + 0] / fDivider; michael@0: } michael@0: } michael@0: michael@0: michael@0: michael@0: // SSE-optimized version of the filter routine for stereo sound michael@0: uint FIRFilterSSE::evaluateFilterStereo(float *dest, const float *source, uint numSamples) const michael@0: { michael@0: int count = (int)((numSamples - length) & (uint)-2); michael@0: int j; michael@0: michael@0: assert(count % 2 == 0); michael@0: michael@0: if (count < 2) return 0; michael@0: michael@0: assert(source != NULL); michael@0: assert(dest != NULL); michael@0: assert((length % 8) == 0); michael@0: assert(filterCoeffsAlign != NULL); michael@0: assert(((ulongptr)filterCoeffsAlign) % 16 == 0); michael@0: michael@0: // filter is evaluated for two stereo samples with each iteration, thus use of 'j += 2' michael@0: for (j = 0; j < count; j += 2) michael@0: { michael@0: const float *pSrc; michael@0: const __m128 *pFil; michael@0: __m128 sum1, sum2; michael@0: uint i; michael@0: michael@0: pSrc = (const float*)source; // source audio data michael@0: pFil = (const __m128*)filterCoeffsAlign; // filter coefficients. NOTE: Assumes coefficients michael@0: // are aligned to 16-byte boundary michael@0: sum1 = sum2 = _mm_setzero_ps(); michael@0: michael@0: for (i = 0; i < length / 8; i ++) michael@0: { michael@0: // Unroll loop for efficiency & calculate filter for 2*2 stereo samples michael@0: // at each pass michael@0: michael@0: // sum1 is accu for 2*2 filtered stereo sound data at the primary sound data offset michael@0: // sum2 is accu for 2*2 filtered stereo sound data for the next sound sample offset. michael@0: michael@0: sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc) , pFil[0])); michael@0: sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 2), pFil[0])); michael@0: michael@0: sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 4), pFil[1])); michael@0: sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 6), pFil[1])); michael@0: michael@0: sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 8) , pFil[2])); michael@0: sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 10), pFil[2])); michael@0: michael@0: sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 12), pFil[3])); michael@0: sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 14), pFil[3])); michael@0: michael@0: pSrc += 16; michael@0: pFil += 4; michael@0: } michael@0: michael@0: // Now sum1 and sum2 both have a filtered 2-channel sample each, but we still need michael@0: // to sum the two hi- and lo-floats of these registers together. michael@0: michael@0: // post-shuffle & add the filtered values and store to dest. michael@0: _mm_storeu_ps(dest, _mm_add_ps( michael@0: _mm_shuffle_ps(sum1, sum2, _MM_SHUFFLE(1,0,3,2)), // s2_1 s2_0 s1_3 s1_2 michael@0: _mm_shuffle_ps(sum1, sum2, _MM_SHUFFLE(3,2,1,0)) // s2_3 s2_2 s1_1 s1_0 michael@0: )); michael@0: source += 4; michael@0: dest += 4; michael@0: } michael@0: michael@0: // Ideas for further improvement: michael@0: // 1. If it could be guaranteed that 'source' were always aligned to 16-byte michael@0: // boundary, a faster aligned '_mm_load_ps' instruction could be used. michael@0: // 2. If it could be guaranteed that 'dest' were always aligned to 16-byte michael@0: // boundary, a faster '_mm_store_ps' instruction could be used. michael@0: michael@0: return (uint)count; michael@0: michael@0: /* original routine in C-language. please notice the C-version has differently michael@0: organized coefficients though. michael@0: double suml1, suml2; michael@0: double sumr1, sumr2; michael@0: uint i, j; michael@0: michael@0: for (j = 0; j < count; j += 2) michael@0: { michael@0: const float *ptr; michael@0: const float *pFil; michael@0: michael@0: suml1 = sumr1 = 0.0; michael@0: suml2 = sumr2 = 0.0; michael@0: ptr = src; michael@0: pFil = filterCoeffs; michael@0: for (i = 0; i < lengthLocal; i ++) michael@0: { michael@0: // unroll loop for efficiency. michael@0: michael@0: suml1 += ptr[0] * pFil[0] + michael@0: ptr[2] * pFil[2] + michael@0: ptr[4] * pFil[4] + michael@0: ptr[6] * pFil[6]; michael@0: michael@0: sumr1 += ptr[1] * pFil[1] + michael@0: ptr[3] * pFil[3] + michael@0: ptr[5] * pFil[5] + michael@0: ptr[7] * pFil[7]; michael@0: michael@0: suml2 += ptr[8] * pFil[0] + michael@0: ptr[10] * pFil[2] + michael@0: ptr[12] * pFil[4] + michael@0: ptr[14] * pFil[6]; michael@0: michael@0: sumr2 += ptr[9] * pFil[1] + michael@0: ptr[11] * pFil[3] + michael@0: ptr[13] * pFil[5] + michael@0: ptr[15] * pFil[7]; michael@0: michael@0: ptr += 16; michael@0: pFil += 8; michael@0: } michael@0: dest[0] = (float)suml1; michael@0: dest[1] = (float)sumr1; michael@0: dest[2] = (float)suml2; michael@0: dest[3] = (float)sumr2; michael@0: michael@0: src += 4; michael@0: dest += 4; michael@0: } michael@0: */ michael@0: } michael@0: michael@0: #endif // SOUNDTOUCH_ALLOW_SSE