Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /******************************************************************** |
michael@0 | 2 | * * |
michael@0 | 3 | * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. * |
michael@0 | 4 | * * |
michael@0 | 5 | * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS * |
michael@0 | 6 | * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE * |
michael@0 | 7 | * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. * |
michael@0 | 8 | * * |
michael@0 | 9 | * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 * |
michael@0 | 10 | * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ * |
michael@0 | 11 | * * |
michael@0 | 12 | ******************************************************************** |
michael@0 | 13 | |
michael@0 | 14 | function: miscellaneous math and prototypes |
michael@0 | 15 | |
michael@0 | 16 | ********************************************************************/ |
michael@0 | 17 | |
michael@0 | 18 | #ifndef _V_RANDOM_H_ |
michael@0 | 19 | #define _V_RANDOM_H_ |
michael@0 | 20 | #include "ivorbiscodec.h" |
michael@0 | 21 | #include "os.h" |
michael@0 | 22 | |
michael@0 | 23 | #ifdef _LOW_ACCURACY_ |
michael@0 | 24 | # define X(n) (((((n)>>22)+1)>>1) - ((((n)>>22)+1)>>9)) |
michael@0 | 25 | # define LOOKUP_T const unsigned char |
michael@0 | 26 | #else |
michael@0 | 27 | # define X(n) (n) |
michael@0 | 28 | # define LOOKUP_T const ogg_int32_t |
michael@0 | 29 | #endif |
michael@0 | 30 | |
michael@0 | 31 | #include "asm_arm.h" |
michael@0 | 32 | #include <stdlib.h> /* for abs() */ |
michael@0 | 33 | |
michael@0 | 34 | #ifndef _V_WIDE_MATH |
michael@0 | 35 | #define _V_WIDE_MATH |
michael@0 | 36 | |
michael@0 | 37 | #ifndef _LOW_ACCURACY_ |
michael@0 | 38 | /* 64 bit multiply */ |
michael@0 | 39 | |
michael@0 | 40 | #if !(defined WIN32 && defined WINCE) |
michael@0 | 41 | #include <sys/types.h> |
michael@0 | 42 | #endif |
michael@0 | 43 | |
michael@0 | 44 | #if BYTE_ORDER==LITTLE_ENDIAN |
michael@0 | 45 | union magic { |
michael@0 | 46 | struct { |
michael@0 | 47 | ogg_int32_t lo; |
michael@0 | 48 | ogg_int32_t hi; |
michael@0 | 49 | } halves; |
michael@0 | 50 | ogg_int64_t whole; |
michael@0 | 51 | }; |
michael@0 | 52 | #elif BYTE_ORDER==BIG_ENDIAN |
michael@0 | 53 | union magic { |
michael@0 | 54 | struct { |
michael@0 | 55 | ogg_int32_t hi; |
michael@0 | 56 | ogg_int32_t lo; |
michael@0 | 57 | } halves; |
michael@0 | 58 | ogg_int64_t whole; |
michael@0 | 59 | }; |
michael@0 | 60 | #endif |
michael@0 | 61 | |
michael@0 | 62 | STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) { |
michael@0 | 63 | union magic magic; |
michael@0 | 64 | magic.whole = (ogg_int64_t)x * y; |
michael@0 | 65 | return magic.halves.hi; |
michael@0 | 66 | } |
michael@0 | 67 | |
michael@0 | 68 | STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) { |
michael@0 | 69 | return MULT32(x,y)<<1; |
michael@0 | 70 | } |
michael@0 | 71 | |
michael@0 | 72 | STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) { |
michael@0 | 73 | union magic magic; |
michael@0 | 74 | magic.whole = (ogg_int64_t)x * y; |
michael@0 | 75 | return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17); |
michael@0 | 76 | } |
michael@0 | 77 | |
michael@0 | 78 | #else |
michael@0 | 79 | /* 32 bit multiply, more portable but less accurate */ |
michael@0 | 80 | |
michael@0 | 81 | /* |
michael@0 | 82 | * Note: Precision is biased towards the first argument therefore ordering |
michael@0 | 83 | * is important. Shift values were chosen for the best sound quality after |
michael@0 | 84 | * many listening tests. |
michael@0 | 85 | */ |
michael@0 | 86 | |
michael@0 | 87 | /* |
michael@0 | 88 | * For MULT32 and MULT31: The second argument is always a lookup table |
michael@0 | 89 | * value already preshifted from 31 to 8 bits. We therefore take the |
michael@0 | 90 | * opportunity to save on text space and use unsigned char for those |
michael@0 | 91 | * tables in this case. |
michael@0 | 92 | */ |
michael@0 | 93 | |
michael@0 | 94 | STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) { |
michael@0 | 95 | return (x >> 9) * y; /* y preshifted >>23 */ |
michael@0 | 96 | } |
michael@0 | 97 | |
michael@0 | 98 | STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) { |
michael@0 | 99 | return (x >> 8) * y; /* y preshifted >>23 */ |
michael@0 | 100 | } |
michael@0 | 101 | |
michael@0 | 102 | STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) { |
michael@0 | 103 | return (x >> 6) * y; /* y preshifted >>9 */ |
michael@0 | 104 | } |
michael@0 | 105 | |
michael@0 | 106 | #endif |
michael@0 | 107 | |
michael@0 | 108 | /* |
michael@0 | 109 | * This should be used as a memory barrier, forcing all cached values in |
michael@0 | 110 | * registers to wr writen back to memory. Might or might not be beneficial |
michael@0 | 111 | * depending on the architecture and compiler. |
michael@0 | 112 | */ |
michael@0 | 113 | #define MB() |
michael@0 | 114 | |
michael@0 | 115 | /* |
michael@0 | 116 | * The XPROD functions are meant to optimize the cross products found all |
michael@0 | 117 | * over the place in mdct.c by forcing memory operation ordering to avoid |
michael@0 | 118 | * unnecessary register reloads as soon as memory is being written to. |
michael@0 | 119 | * However this is only beneficial on CPUs with a sane number of general |
michael@0 | 120 | * purpose registers which exclude the Intel x86. On Intel, better let the |
michael@0 | 121 | * compiler actually reload registers directly from original memory by using |
michael@0 | 122 | * macros. |
michael@0 | 123 | */ |
michael@0 | 124 | |
michael@0 | 125 | #ifdef __i386__ |
michael@0 | 126 | |
michael@0 | 127 | #define XPROD32(_a, _b, _t, _v, _x, _y) \ |
michael@0 | 128 | { *(_x)=MULT32(_a,_t)+MULT32(_b,_v); \ |
michael@0 | 129 | *(_y)=MULT32(_b,_t)-MULT32(_a,_v); } |
michael@0 | 130 | #define XPROD31(_a, _b, _t, _v, _x, _y) \ |
michael@0 | 131 | { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \ |
michael@0 | 132 | *(_y)=MULT31(_b,_t)-MULT31(_a,_v); } |
michael@0 | 133 | #define XNPROD31(_a, _b, _t, _v, _x, _y) \ |
michael@0 | 134 | { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \ |
michael@0 | 135 | *(_y)=MULT31(_b,_t)+MULT31(_a,_v); } |
michael@0 | 136 | |
michael@0 | 137 | #else |
michael@0 | 138 | |
michael@0 | 139 | STIN void XPROD32(ogg_int32_t a, ogg_int32_t b, |
michael@0 | 140 | ogg_int32_t t, ogg_int32_t v, |
michael@0 | 141 | ogg_int32_t *x, ogg_int32_t *y) |
michael@0 | 142 | { |
michael@0 | 143 | *x = MULT32(a, t) + MULT32(b, v); |
michael@0 | 144 | *y = MULT32(b, t) - MULT32(a, v); |
michael@0 | 145 | } |
michael@0 | 146 | |
michael@0 | 147 | STIN void XPROD31(ogg_int32_t a, ogg_int32_t b, |
michael@0 | 148 | ogg_int32_t t, ogg_int32_t v, |
michael@0 | 149 | ogg_int32_t *x, ogg_int32_t *y) |
michael@0 | 150 | { |
michael@0 | 151 | *x = MULT31(a, t) + MULT31(b, v); |
michael@0 | 152 | *y = MULT31(b, t) - MULT31(a, v); |
michael@0 | 153 | } |
michael@0 | 154 | |
michael@0 | 155 | STIN void XNPROD31(ogg_int32_t a, ogg_int32_t b, |
michael@0 | 156 | ogg_int32_t t, ogg_int32_t v, |
michael@0 | 157 | ogg_int32_t *x, ogg_int32_t *y) |
michael@0 | 158 | { |
michael@0 | 159 | *x = MULT31(a, t) - MULT31(b, v); |
michael@0 | 160 | *y = MULT31(b, t) + MULT31(a, v); |
michael@0 | 161 | } |
michael@0 | 162 | |
michael@0 | 163 | #endif |
michael@0 | 164 | |
michael@0 | 165 | #endif |
michael@0 | 166 | |
michael@0 | 167 | #ifndef _V_CLIP_MATH |
michael@0 | 168 | #define _V_CLIP_MATH |
michael@0 | 169 | |
michael@0 | 170 | STIN ogg_int32_t CLIP_TO_15(ogg_int32_t x) { |
michael@0 | 171 | int ret=x; |
michael@0 | 172 | ret-= ((x<=32767)-1)&(x-32767); |
michael@0 | 173 | ret-= ((x>=-32768)-1)&(x+32768); |
michael@0 | 174 | return(ret); |
michael@0 | 175 | } |
michael@0 | 176 | |
michael@0 | 177 | #endif |
michael@0 | 178 | |
michael@0 | 179 | STIN ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap, |
michael@0 | 180 | ogg_int32_t b,ogg_int32_t bp, |
michael@0 | 181 | ogg_int32_t *p){ |
michael@0 | 182 | if(a && b){ |
michael@0 | 183 | #ifndef _LOW_ACCURACY_ |
michael@0 | 184 | *p=ap+bp+32; |
michael@0 | 185 | return MULT32(a,b); |
michael@0 | 186 | #else |
michael@0 | 187 | *p=ap+bp+31; |
michael@0 | 188 | return (a>>15)*(b>>16); |
michael@0 | 189 | #endif |
michael@0 | 190 | }else |
michael@0 | 191 | return 0; |
michael@0 | 192 | } |
michael@0 | 193 | |
michael@0 | 194 | int _ilog(unsigned int); |
michael@0 | 195 | |
michael@0 | 196 | STIN ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap, |
michael@0 | 197 | ogg_int32_t i, |
michael@0 | 198 | ogg_int32_t *p){ |
michael@0 | 199 | |
michael@0 | 200 | int ip=_ilog(abs(i))-31; |
michael@0 | 201 | return VFLOAT_MULT(a,ap,i<<-ip,ip,p); |
michael@0 | 202 | } |
michael@0 | 203 | |
michael@0 | 204 | STIN ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap, |
michael@0 | 205 | ogg_int32_t b,ogg_int32_t bp, |
michael@0 | 206 | ogg_int32_t *p){ |
michael@0 | 207 | |
michael@0 | 208 | if(!a){ |
michael@0 | 209 | *p=bp; |
michael@0 | 210 | return b; |
michael@0 | 211 | }else if(!b){ |
michael@0 | 212 | *p=ap; |
michael@0 | 213 | return a; |
michael@0 | 214 | } |
michael@0 | 215 | |
michael@0 | 216 | /* yes, this can leak a bit. */ |
michael@0 | 217 | if(ap>bp){ |
michael@0 | 218 | int shift=ap-bp+1; |
michael@0 | 219 | *p=ap+1; |
michael@0 | 220 | a>>=1; |
michael@0 | 221 | if(shift<32){ |
michael@0 | 222 | b=(b+(1<<(shift-1)))>>shift; |
michael@0 | 223 | }else{ |
michael@0 | 224 | b=0; |
michael@0 | 225 | } |
michael@0 | 226 | }else{ |
michael@0 | 227 | int shift=bp-ap+1; |
michael@0 | 228 | *p=bp+1; |
michael@0 | 229 | b>>=1; |
michael@0 | 230 | if(shift<32){ |
michael@0 | 231 | a=(a+(1<<(shift-1)))>>shift; |
michael@0 | 232 | }else{ |
michael@0 | 233 | a=0; |
michael@0 | 234 | } |
michael@0 | 235 | } |
michael@0 | 236 | |
michael@0 | 237 | a+=b; |
michael@0 | 238 | if((a&0xc0000000)==0xc0000000 || |
michael@0 | 239 | (a&0xc0000000)==0){ |
michael@0 | 240 | a<<=1; |
michael@0 | 241 | (*p)--; |
michael@0 | 242 | } |
michael@0 | 243 | return(a); |
michael@0 | 244 | } |
michael@0 | 245 | |
michael@0 | 246 | #endif |
michael@0 | 247 | |
michael@0 | 248 | |
michael@0 | 249 | |
michael@0 | 250 |