media/libtremor/lib/misc.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/libtremor/lib/misc.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,250 @@
     1.4 +/********************************************************************
     1.5 + *                                                                  *
     1.6 + * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE.   *
     1.7 + *                                                                  *
     1.8 + * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS     *
     1.9 + * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
    1.10 + * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING.       *
    1.11 + *                                                                  *
    1.12 + * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002    *
    1.13 + * BY THE Xiph.Org FOUNDATION http://www.xiph.org/                  *
    1.14 + *                                                                  *
    1.15 + ********************************************************************
    1.16 +
    1.17 + function: miscellaneous math and prototypes
    1.18 +
    1.19 + ********************************************************************/
    1.20 +
    1.21 +#ifndef _V_RANDOM_H_
    1.22 +#define _V_RANDOM_H_
    1.23 +#include "ivorbiscodec.h"
    1.24 +#include "os.h"
    1.25 +
    1.26 +#ifdef _LOW_ACCURACY_
    1.27 +#  define X(n) (((((n)>>22)+1)>>1) - ((((n)>>22)+1)>>9))
    1.28 +#  define LOOKUP_T const unsigned char
    1.29 +#else
    1.30 +#  define X(n) (n)
    1.31 +#  define LOOKUP_T const ogg_int32_t
    1.32 +#endif
    1.33 +
    1.34 +#include "asm_arm.h"
    1.35 +#include <stdlib.h> /* for abs() */
    1.36 +  
    1.37 +#ifndef _V_WIDE_MATH
    1.38 +#define _V_WIDE_MATH
    1.39 +  
    1.40 +#ifndef  _LOW_ACCURACY_
    1.41 +/* 64 bit multiply */
    1.42 +
    1.43 +#if !(defined WIN32 && defined WINCE)
    1.44 +#include <sys/types.h>
    1.45 +#endif
    1.46 +
    1.47 +#if BYTE_ORDER==LITTLE_ENDIAN
    1.48 +union magic {
    1.49 +  struct {
    1.50 +    ogg_int32_t lo;
    1.51 +    ogg_int32_t hi;
    1.52 +  } halves;
    1.53 +  ogg_int64_t whole;
    1.54 +};
    1.55 +#elif BYTE_ORDER==BIG_ENDIAN
    1.56 +union magic {
    1.57 +  struct {
    1.58 +    ogg_int32_t hi;
    1.59 +    ogg_int32_t lo;
    1.60 +  } halves;
    1.61 +  ogg_int64_t whole;
    1.62 +};
    1.63 +#endif
    1.64 +
    1.65 +STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
    1.66 +  union magic magic;
    1.67 +  magic.whole = (ogg_int64_t)x * y;
    1.68 +  return magic.halves.hi;
    1.69 +}
    1.70 +
    1.71 +STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
    1.72 +  return MULT32(x,y)<<1;
    1.73 +}
    1.74 +
    1.75 +STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
    1.76 +  union magic magic;
    1.77 +  magic.whole  = (ogg_int64_t)x * y;
    1.78 +  return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
    1.79 +}
    1.80 +
    1.81 +#else
    1.82 +/* 32 bit multiply, more portable but less accurate */
    1.83 +
    1.84 +/*
    1.85 + * Note: Precision is biased towards the first argument therefore ordering
    1.86 + * is important.  Shift values were chosen for the best sound quality after
    1.87 + * many listening tests.
    1.88 + */
    1.89 +
    1.90 +/*
    1.91 + * For MULT32 and MULT31: The second argument is always a lookup table
    1.92 + * value already preshifted from 31 to 8 bits.  We therefore take the 
    1.93 + * opportunity to save on text space and use unsigned char for those
    1.94 + * tables in this case.
    1.95 + */
    1.96 +
    1.97 +STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
    1.98 +  return (x >> 9) * y;  /* y preshifted >>23 */
    1.99 +}
   1.100 +
   1.101 +STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
   1.102 +  return (x >> 8) * y;  /* y preshifted >>23 */
   1.103 +}
   1.104 +
   1.105 +STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
   1.106 +  return (x >> 6) * y;  /* y preshifted >>9 */
   1.107 +}
   1.108 +
   1.109 +#endif
   1.110 +
   1.111 +/*
   1.112 + * This should be used as a memory barrier, forcing all cached values in
   1.113 + * registers to wr writen back to memory.  Might or might not be beneficial
   1.114 + * depending on the architecture and compiler.
   1.115 + */
   1.116 +#define MB()
   1.117 +
   1.118 +/*
   1.119 + * The XPROD functions are meant to optimize the cross products found all
   1.120 + * over the place in mdct.c by forcing memory operation ordering to avoid
   1.121 + * unnecessary register reloads as soon as memory is being written to.
   1.122 + * However this is only beneficial on CPUs with a sane number of general
   1.123 + * purpose registers which exclude the Intel x86.  On Intel, better let the
   1.124 + * compiler actually reload registers directly from original memory by using
   1.125 + * macros.
   1.126 + */
   1.127 +
   1.128 +#ifdef __i386__
   1.129 +
   1.130 +#define XPROD32(_a, _b, _t, _v, _x, _y)		\
   1.131 +  { *(_x)=MULT32(_a,_t)+MULT32(_b,_v);		\
   1.132 +    *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
   1.133 +#define XPROD31(_a, _b, _t, _v, _x, _y)		\
   1.134 +  { *(_x)=MULT31(_a,_t)+MULT31(_b,_v);		\
   1.135 +    *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
   1.136 +#define XNPROD31(_a, _b, _t, _v, _x, _y)	\
   1.137 +  { *(_x)=MULT31(_a,_t)-MULT31(_b,_v);		\
   1.138 +    *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
   1.139 +
   1.140 +#else
   1.141 +
   1.142 +STIN void XPROD32(ogg_int32_t  a, ogg_int32_t  b,
   1.143 +			   ogg_int32_t  t, ogg_int32_t  v,
   1.144 +			   ogg_int32_t *x, ogg_int32_t *y)
   1.145 +{
   1.146 +  *x = MULT32(a, t) + MULT32(b, v);
   1.147 +  *y = MULT32(b, t) - MULT32(a, v);
   1.148 +}
   1.149 +
   1.150 +STIN void XPROD31(ogg_int32_t  a, ogg_int32_t  b,
   1.151 +			   ogg_int32_t  t, ogg_int32_t  v,
   1.152 +			   ogg_int32_t *x, ogg_int32_t *y)
   1.153 +{
   1.154 +  *x = MULT31(a, t) + MULT31(b, v);
   1.155 +  *y = MULT31(b, t) - MULT31(a, v);
   1.156 +}
   1.157 +
   1.158 +STIN void XNPROD31(ogg_int32_t  a, ogg_int32_t  b,
   1.159 +			    ogg_int32_t  t, ogg_int32_t  v,
   1.160 +			    ogg_int32_t *x, ogg_int32_t *y)
   1.161 +{
   1.162 +  *x = MULT31(a, t) - MULT31(b, v);
   1.163 +  *y = MULT31(b, t) + MULT31(a, v);
   1.164 +}
   1.165 +
   1.166 +#endif
   1.167 +
   1.168 +#endif
   1.169 +
   1.170 +#ifndef _V_CLIP_MATH
   1.171 +#define _V_CLIP_MATH
   1.172 +
   1.173 +STIN ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
   1.174 +  int ret=x;
   1.175 +  ret-= ((x<=32767)-1)&(x-32767);
   1.176 +  ret-= ((x>=-32768)-1)&(x+32768);
   1.177 +  return(ret);
   1.178 +}
   1.179 +
   1.180 +#endif
   1.181 +
   1.182 +STIN ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap,
   1.183 +				      ogg_int32_t b,ogg_int32_t bp,
   1.184 +				      ogg_int32_t *p){
   1.185 +  if(a && b){
   1.186 +#ifndef _LOW_ACCURACY_
   1.187 +    *p=ap+bp+32;
   1.188 +    return MULT32(a,b);
   1.189 +#else
   1.190 +    *p=ap+bp+31;
   1.191 +    return (a>>15)*(b>>16); 
   1.192 +#endif
   1.193 +  }else
   1.194 +    return 0;
   1.195 +}
   1.196 +
   1.197 +int _ilog(unsigned int);
   1.198 +
   1.199 +STIN ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap,
   1.200 +				      ogg_int32_t i,
   1.201 +				      ogg_int32_t *p){
   1.202 +
   1.203 +  int ip=_ilog(abs(i))-31;
   1.204 +  return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
   1.205 +}
   1.206 +
   1.207 +STIN ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap,
   1.208 +				      ogg_int32_t b,ogg_int32_t bp,
   1.209 +				      ogg_int32_t *p){
   1.210 +
   1.211 +  if(!a){
   1.212 +    *p=bp;
   1.213 +    return b;
   1.214 +  }else if(!b){
   1.215 +    *p=ap;
   1.216 +    return a;
   1.217 +  }
   1.218 +
   1.219 +  /* yes, this can leak a bit. */
   1.220 +  if(ap>bp){
   1.221 +    int shift=ap-bp+1;
   1.222 +    *p=ap+1;
   1.223 +    a>>=1;
   1.224 +    if(shift<32){
   1.225 +      b=(b+(1<<(shift-1)))>>shift;
   1.226 +    }else{
   1.227 +      b=0;
   1.228 +    }
   1.229 +  }else{
   1.230 +    int shift=bp-ap+1;
   1.231 +    *p=bp+1;
   1.232 +    b>>=1;
   1.233 +    if(shift<32){
   1.234 +      a=(a+(1<<(shift-1)))>>shift;
   1.235 +    }else{
   1.236 +      a=0;
   1.237 +    }
   1.238 +  }
   1.239 +
   1.240 +  a+=b;
   1.241 +  if((a&0xc0000000)==0xc0000000 || 
   1.242 +     (a&0xc0000000)==0){
   1.243 +    a<<=1;
   1.244 +    (*p)--;
   1.245 +  }
   1.246 +  return(a);
   1.247 +}
   1.248 +
   1.249 +#endif
   1.250 +
   1.251 +
   1.252 +
   1.253 +

mercurial