michael@0: /* Copyright (c) 2007-2008 CSIRO michael@0: Copyright (c) 2007-2009 Xiph.Org Foundation michael@0: Written by Jean-Marc Valin */ michael@0: /* michael@0: Redistribution and use in source and binary forms, with or without michael@0: modification, are permitted provided that the following conditions michael@0: are met: michael@0: michael@0: - Redistributions of source code must retain the above copyright michael@0: notice, this list of conditions and the following disclaimer. michael@0: michael@0: - Redistributions in binary form must reproduce the above copyright michael@0: notice, this list of conditions and the following disclaimer in the michael@0: documentation and/or other materials provided with the distribution. michael@0: michael@0: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS michael@0: ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT michael@0: LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR michael@0: A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER michael@0: OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, michael@0: EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, michael@0: PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR michael@0: PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF michael@0: LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING michael@0: NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS michael@0: SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: michael@0: #ifdef HAVE_CONFIG_H michael@0: #include "config.h" michael@0: #endif michael@0: michael@0: #include "mathops.h" michael@0: #include "cwrs.h" michael@0: #include "vq.h" michael@0: #include "arch.h" michael@0: #include "os_support.h" michael@0: #include "bands.h" michael@0: #include "rate.h" michael@0: michael@0: static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) michael@0: { michael@0: int i; michael@0: celt_norm *Xptr; michael@0: Xptr = X; michael@0: for (i=0;i=0;i--) michael@0: { michael@0: celt_norm x1, x2; michael@0: x1 = Xptr[0]; michael@0: x2 = Xptr[stride]; michael@0: Xptr[stride] = EXTRACT16(SHR32(MULT16_16(c,x2) + MULT16_16(s,x1), 15)); michael@0: *Xptr-- = EXTRACT16(SHR32(MULT16_16(c,x1) - MULT16_16(s,x2), 15)); michael@0: } michael@0: } michael@0: michael@0: static void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) michael@0: { michael@0: static const int SPREAD_FACTOR[3]={15,10,5}; michael@0: int i; michael@0: opus_val16 c, s; michael@0: opus_val16 gain, theta; michael@0: int stride2=0; michael@0: int factor; michael@0: michael@0: if (2*K>=len || spread==SPREAD_NONE) michael@0: return; michael@0: factor = SPREAD_FACTOR[spread-1]; michael@0: michael@0: gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K)); michael@0: theta = HALF16(MULT16_16_Q15(gain,gain)); michael@0: michael@0: c = celt_cos_norm(EXTEND32(theta)); michael@0: s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */ michael@0: michael@0: if (len>=8*stride) michael@0: { michael@0: stride2 = 1; michael@0: /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding. michael@0: It's basically incrementing long as (stride2+0.5)^2 < len/stride. */ michael@0: while ((stride2*stride2+stride2)*stride + (stride>>2) < len) michael@0: stride2++; michael@0: } michael@0: /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for michael@0: extract_collapse_mask().*/ michael@0: len /= stride; michael@0: for (i=0;i>1; michael@0: #endif michael@0: t = VSHR32(Ryy, 2*(k-7)); michael@0: g = MULT16_16_P15(celt_rsqrt_norm(t),gain); michael@0: michael@0: i=0; michael@0: do michael@0: X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1)); michael@0: while (++i < N); michael@0: } michael@0: michael@0: static unsigned extract_collapse_mask(int *iy, int N, int B) michael@0: { michael@0: unsigned collapse_mask; michael@0: int N0; michael@0: int i; michael@0: if (B<=1) michael@0: return 1; michael@0: /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for michael@0: exp_rotation().*/ michael@0: N0 = N/B; michael@0: collapse_mask = 0; michael@0: i=0; do { michael@0: int j; michael@0: j=0; do { michael@0: collapse_mask |= (iy[i*N0+j]!=0)<0, "alg_quant() needs at least one pulse"); michael@0: celt_assert2(N>1, "alg_quant() needs at least two dimensions"); michael@0: michael@0: ALLOC(y, N, celt_norm); michael@0: ALLOC(iy, N, int); michael@0: ALLOC(signx, N, opus_val16); michael@0: michael@0: exp_rotation(X, N, 1, B, K, spread); michael@0: michael@0: /* Get rid of the sign */ michael@0: sum = 0; michael@0: j=0; do { michael@0: if (X[j]>0) michael@0: signx[j]=1; michael@0: else { michael@0: signx[j]=-1; michael@0: X[j]=-X[j]; michael@0: } michael@0: iy[j] = 0; michael@0: y[j] = 0; michael@0: } while (++j (N>>1)) michael@0: { michael@0: opus_val16 rcp; michael@0: j=0; do { michael@0: sum += X[j]; michael@0: } while (++j EPSILON && sum < 64)) michael@0: #endif michael@0: { michael@0: X[0] = QCONST16(1.f,14); michael@0: j=1; do michael@0: X[j]=0; michael@0: while (++j=1, "Allocated too many pulses in the quick pass"); michael@0: michael@0: /* This should never happen, but just in case it does (e.g. on silence) michael@0: we fill the first bin with pulses. */ michael@0: #ifdef FIXED_POINT_DEBUG michael@0: celt_assert2(pulsesLeft<=N+3, "Not enough pulses in the quick pass"); michael@0: #endif michael@0: if (pulsesLeft > N+3) michael@0: { michael@0: opus_val16 tmp = (opus_val16)pulsesLeft; michael@0: yy = MAC16_16(yy, tmp, tmp); michael@0: yy = MAC16_16(yy, tmp, y[0]); michael@0: iy[0] += pulsesLeft; michael@0: pulsesLeft=0; michael@0: } michael@0: michael@0: s = 1; michael@0: for (i=0;i= best_num/best_den, but that way michael@0: we can do it without any division */ michael@0: /* OPT: Make sure to use conditional moves here */ michael@0: if (MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num)) michael@0: { michael@0: best_den = Ryy; michael@0: best_num = Rxy; michael@0: best_id = j; michael@0: } michael@0: } while (++j0, "alg_unquant() needs at least one pulse"); michael@0: celt_assert2(N>1, "alg_unquant() needs at least two dimensions"); michael@0: ALLOC(iy, N, int); michael@0: decode_pulses(iy, N, K, dec); michael@0: Ryy = 0; michael@0: i=0; michael@0: do { michael@0: Ryy = MAC16_16(Ryy, iy[i], iy[i]); michael@0: } while (++i < N); michael@0: normalise_residual(iy, X, N, Ryy, gain); michael@0: exp_rotation(X, N, -1, B, K, spread); michael@0: collapse_mask = extract_collapse_mask(iy, N, B); michael@0: RESTORE_STACK; michael@0: return collapse_mask; michael@0: } michael@0: michael@0: void renormalise_vector(celt_norm *X, int N, opus_val16 gain) michael@0: { michael@0: int i; michael@0: #ifdef FIXED_POINT michael@0: int k; michael@0: #endif michael@0: opus_val32 E = EPSILON; michael@0: opus_val16 g; michael@0: opus_val32 t; michael@0: celt_norm *xptr = X; michael@0: for (i=0;i>1; michael@0: #endif michael@0: t = VSHR32(E, 2*(k-7)); michael@0: g = MULT16_16_P15(celt_rsqrt_norm(t),gain); michael@0: michael@0: xptr = X; michael@0: for (i=0;i