diff -r 000000000000 -r 6474c204b198 media/libopus/celt/vq.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/media/libopus/celt/vq.c Wed Dec 31 06:09:35 2014 +0100 @@ -0,0 +1,415 @@ +/* Copyright (c) 2007-2008 CSIRO + Copyright (c) 2007-2009 Xiph.Org Foundation + Written by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "mathops.h" +#include "cwrs.h" +#include "vq.h" +#include "arch.h" +#include "os_support.h" +#include "bands.h" +#include "rate.h" + +static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) +{ + int i; + celt_norm *Xptr; + Xptr = X; + for (i=0;i=0;i--) + { + celt_norm x1, x2; + x1 = Xptr[0]; + x2 = Xptr[stride]; + Xptr[stride] = EXTRACT16(SHR32(MULT16_16(c,x2) + MULT16_16(s,x1), 15)); + *Xptr-- = EXTRACT16(SHR32(MULT16_16(c,x1) - MULT16_16(s,x2), 15)); + } +} + +static void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) +{ + static const int SPREAD_FACTOR[3]={15,10,5}; + int i; + opus_val16 c, s; + opus_val16 gain, theta; + int stride2=0; + int factor; + + if (2*K>=len || spread==SPREAD_NONE) + return; + factor = SPREAD_FACTOR[spread-1]; + + gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K)); + theta = HALF16(MULT16_16_Q15(gain,gain)); + + c = celt_cos_norm(EXTEND32(theta)); + s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */ + + if (len>=8*stride) + { + stride2 = 1; + /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding. + It's basically incrementing long as (stride2+0.5)^2 < len/stride. */ + while ((stride2*stride2+stride2)*stride + (stride>>2) < len) + stride2++; + } + /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for + extract_collapse_mask().*/ + len /= stride; + for (i=0;i>1; +#endif + t = VSHR32(Ryy, 2*(k-7)); + g = MULT16_16_P15(celt_rsqrt_norm(t),gain); + + i=0; + do + X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1)); + while (++i < N); +} + +static unsigned extract_collapse_mask(int *iy, int N, int B) +{ + unsigned collapse_mask; + int N0; + int i; + if (B<=1) + return 1; + /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for + exp_rotation().*/ + N0 = N/B; + collapse_mask = 0; + i=0; do { + int j; + j=0; do { + collapse_mask |= (iy[i*N0+j]!=0)<0, "alg_quant() needs at least one pulse"); + celt_assert2(N>1, "alg_quant() needs at least two dimensions"); + + ALLOC(y, N, celt_norm); + ALLOC(iy, N, int); + ALLOC(signx, N, opus_val16); + + exp_rotation(X, N, 1, B, K, spread); + + /* Get rid of the sign */ + sum = 0; + j=0; do { + if (X[j]>0) + signx[j]=1; + else { + signx[j]=-1; + X[j]=-X[j]; + } + iy[j] = 0; + y[j] = 0; + } while (++j (N>>1)) + { + opus_val16 rcp; + j=0; do { + sum += X[j]; + } while (++j EPSILON && sum < 64)) +#endif + { + X[0] = QCONST16(1.f,14); + j=1; do + X[j]=0; + while (++j=1, "Allocated too many pulses in the quick pass"); + + /* This should never happen, but just in case it does (e.g. on silence) + we fill the first bin with pulses. */ +#ifdef FIXED_POINT_DEBUG + celt_assert2(pulsesLeft<=N+3, "Not enough pulses in the quick pass"); +#endif + if (pulsesLeft > N+3) + { + opus_val16 tmp = (opus_val16)pulsesLeft; + yy = MAC16_16(yy, tmp, tmp); + yy = MAC16_16(yy, tmp, y[0]); + iy[0] += pulsesLeft; + pulsesLeft=0; + } + + s = 1; + for (i=0;i= best_num/best_den, but that way + we can do it without any division */ + /* OPT: Make sure to use conditional moves here */ + if (MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num)) + { + best_den = Ryy; + best_num = Rxy; + best_id = j; + } + } while (++j0, "alg_unquant() needs at least one pulse"); + celt_assert2(N>1, "alg_unquant() needs at least two dimensions"); + ALLOC(iy, N, int); + decode_pulses(iy, N, K, dec); + Ryy = 0; + i=0; + do { + Ryy = MAC16_16(Ryy, iy[i], iy[i]); + } while (++i < N); + normalise_residual(iy, X, N, Ryy, gain); + exp_rotation(X, N, -1, B, K, spread); + collapse_mask = extract_collapse_mask(iy, N, B); + RESTORE_STACK; + return collapse_mask; +} + +void renormalise_vector(celt_norm *X, int N, opus_val16 gain) +{ + int i; +#ifdef FIXED_POINT + int k; +#endif + opus_val32 E = EPSILON; + opus_val16 g; + opus_val32 t; + celt_norm *xptr = X; + for (i=0;i>1; +#endif + t = VSHR32(E, 2*(k-7)); + g = MULT16_16_P15(celt_rsqrt_norm(t),gain); + + xptr = X; + for (i=0;i