media/libtremor/lib/asm_arm.h

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 /********************************************************************
michael@0 2 * *
michael@0 3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
michael@0 4 * *
michael@0 5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
michael@0 6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
michael@0 7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
michael@0 8 * *
michael@0 9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
michael@0 10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
michael@0 11 * *
michael@0 12 ********************************************************************
michael@0 13
michael@0 14 function: arm7 and later wide math functions
michael@0 15
michael@0 16 ********************************************************************/
michael@0 17
michael@0 18 #ifdef _ARM_ASSEM_
michael@0 19
michael@0 20 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
michael@0 21 #define _V_WIDE_MATH
michael@0 22
michael@0 23 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
michael@0 24 int lo,hi;
michael@0 25 asm volatile("smull\t%0, %1, %2, %3"
michael@0 26 : "=&r"(lo),"=&r"(hi)
michael@0 27 : "%r"(x),"r"(y)
michael@0 28 : "cc");
michael@0 29 return(hi);
michael@0 30 }
michael@0 31
michael@0 32 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
michael@0 33 return MULT32(x,y)<<1;
michael@0 34 }
michael@0 35
michael@0 36 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
michael@0 37 int lo,hi;
michael@0 38 asm volatile("smull %0, %1, %2, %3\n\t"
michael@0 39 "movs %0, %0, lsr #15\n\t"
michael@0 40 "adc %1, %0, %1, lsl #17\n\t"
michael@0 41 : "=&r"(lo),"=&r"(hi)
michael@0 42 : "%r"(x),"r"(y)
michael@0 43 : "cc");
michael@0 44 return(hi);
michael@0 45 }
michael@0 46
michael@0 47 #define MB() asm volatile ("" : : : "memory")
michael@0 48
michael@0 49 static inline void XPROD32(ogg_int32_t a, ogg_int32_t b,
michael@0 50 ogg_int32_t t, ogg_int32_t v,
michael@0 51 ogg_int32_t *x, ogg_int32_t *y)
michael@0 52 {
michael@0 53 int x1, y1, l;
michael@0 54 asm( "smull %0, %1, %4, %6\n\t"
michael@0 55 "smlal %0, %1, %5, %7\n\t"
michael@0 56 "rsb %3, %4, #0\n\t"
michael@0 57 "smull %0, %2, %5, %6\n\t"
michael@0 58 "smlal %0, %2, %3, %7"
michael@0 59 : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
michael@0 60 : "3" (a), "r" (b), "r" (t), "r" (v)
michael@0 61 : "cc" );
michael@0 62 *x = x1;
michael@0 63 MB();
michael@0 64 *y = y1;
michael@0 65 }
michael@0 66
michael@0 67 static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
michael@0 68 ogg_int32_t t, ogg_int32_t v,
michael@0 69 ogg_int32_t *x, ogg_int32_t *y)
michael@0 70 {
michael@0 71 int x1, y1, l;
michael@0 72 asm( "smull %0, %1, %4, %6\n\t"
michael@0 73 "smlal %0, %1, %5, %7\n\t"
michael@0 74 "rsb %3, %4, #0\n\t"
michael@0 75 "smull %0, %2, %5, %6\n\t"
michael@0 76 "smlal %0, %2, %3, %7"
michael@0 77 : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
michael@0 78 : "3" (a), "r" (b), "r" (t), "r" (v)
michael@0 79 : "cc" );
michael@0 80 *x = x1 << 1;
michael@0 81 MB();
michael@0 82 *y = y1 << 1;
michael@0 83 }
michael@0 84
michael@0 85 static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
michael@0 86 ogg_int32_t t, ogg_int32_t v,
michael@0 87 ogg_int32_t *x, ogg_int32_t *y)
michael@0 88 {
michael@0 89 int x1, y1, l;
michael@0 90 asm( "rsb %2, %4, #0\n\t"
michael@0 91 "smull %0, %1, %3, %5\n\t"
michael@0 92 "smlal %0, %1, %2, %6\n\t"
michael@0 93 "smull %0, %2, %4, %5\n\t"
michael@0 94 "smlal %0, %2, %3, %6"
michael@0 95 : "=&r" (l), "=&r" (x1), "=&r" (y1)
michael@0 96 : "r" (a), "r" (b), "r" (t), "r" (v)
michael@0 97 : "cc" );
michael@0 98 *x = x1 << 1;
michael@0 99 MB();
michael@0 100 *y = y1 << 1;
michael@0 101 }
michael@0 102
michael@0 103 #endif
michael@0 104
michael@0 105 #ifndef _V_CLIP_MATH
michael@0 106 #define _V_CLIP_MATH
michael@0 107
michael@0 108 static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
michael@0 109 int tmp;
michael@0 110 asm volatile("subs %1, %0, #32768\n\t"
michael@0 111 "movpl %0, #0x7f00\n\t"
michael@0 112 "orrpl %0, %0, #0xff\n"
michael@0 113 "adds %1, %0, #32768\n\t"
michael@0 114 "movmi %0, #0x8000"
michael@0 115 : "+r"(x),"=r"(tmp)
michael@0 116 :
michael@0 117 : "cc");
michael@0 118 return(x);
michael@0 119 }
michael@0 120
michael@0 121 #endif
michael@0 122
michael@0 123 #ifndef _V_LSP_MATH_ASM
michael@0 124 #define _V_LSP_MATH_ASM
michael@0 125
michael@0 126 static inline void lsp_loop_asm(ogg_uint32_t *qip,ogg_uint32_t *pip,
michael@0 127 ogg_int32_t *qexpp,
michael@0 128 ogg_int32_t *ilsp,ogg_int32_t wi,
michael@0 129 ogg_int32_t m){
michael@0 130
michael@0 131 ogg_uint32_t qi=*qip,pi=*pip;
michael@0 132 ogg_int32_t qexp=*qexpp;
michael@0 133
michael@0 134 asm("mov r0,%3;"
michael@0 135 "movs r1,%5,asr#1;"
michael@0 136 "add r0,r0,r1,lsl#3;"
michael@0 137 "beq 2f;\n"
michael@0 138 "1:"
michael@0 139
michael@0 140 "ldmdb r0!,{r1,r3};"
michael@0 141 "subs r1,r1,%4;" //ilsp[j]-wi
michael@0 142 "rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
michael@0 143 "umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
michael@0 144
michael@0 145 "subs r1,r3,%4;" //ilsp[j+1]-wi
michael@0 146 "rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
michael@0 147 "umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
michael@0 148
michael@0 149 "cmn r2,r3;" // shift down 16?
michael@0 150 "beq 0f;"
michael@0 151 "add %2,%2,#16;"
michael@0 152 "mov %0,%0,lsr #16;"
michael@0 153 "orr %0,%0,r2,lsl #16;"
michael@0 154 "mov %1,%1,lsr #16;"
michael@0 155 "orr %1,%1,r3,lsl #16;"
michael@0 156 "0:"
michael@0 157 "cmp r0,%3;\n"
michael@0 158 "bhi 1b;\n"
michael@0 159
michael@0 160 "2:"
michael@0 161 // odd filter assymetry
michael@0 162 "ands r0,%5,#1;\n"
michael@0 163 "beq 3f;\n"
michael@0 164 "add r0,%3,%5,lsl#2;\n"
michael@0 165
michael@0 166 "ldr r1,[r0,#-4];\n"
michael@0 167 "mov r0,#0x4000;\n"
michael@0 168
michael@0 169 "subs r1,r1,%4;\n" //ilsp[j]-wi
michael@0 170 "rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
michael@0 171 "umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
michael@0 172 "umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
michael@0 173
michael@0 174 "cmn r2,r3;\n" // shift down 16?
michael@0 175 "beq 3f;\n"
michael@0 176 "add %2,%2,#16;\n"
michael@0 177 "mov %0,%0,lsr #16;\n"
michael@0 178 "orr %0,%0,r2,lsl #16;\n"
michael@0 179 "mov %1,%1,lsr #16;\n"
michael@0 180 "orr %1,%1,r3,lsl #16;\n"
michael@0 181
michael@0 182 //qi=(pi>>shift)*labs(ilsp[j]-wi);
michael@0 183 //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
michael@0 184 //qexp+=shift;
michael@0 185
michael@0 186 //}
michael@0 187
michael@0 188 /* normalize to max 16 sig figs */
michael@0 189 "3:"
michael@0 190 "mov r2,#0;"
michael@0 191 "orr r1,%0,%1;"
michael@0 192 "tst r1,#0xff000000;"
michael@0 193 "addne r2,r2,#8;"
michael@0 194 "movne r1,r1,lsr #8;"
michael@0 195 "tst r1,#0x00f00000;"
michael@0 196 "addne r2,r2,#4;"
michael@0 197 "movne r1,r1,lsr #4;"
michael@0 198 "tst r1,#0x000c0000;"
michael@0 199 "addne r2,r2,#2;"
michael@0 200 "movne r1,r1,lsr #2;"
michael@0 201 "tst r1,#0x00020000;"
michael@0 202 "addne r2,r2,#1;"
michael@0 203 "movne r1,r1,lsr #1;"
michael@0 204 "tst r1,#0x00010000;"
michael@0 205 "addne r2,r2,#1;"
michael@0 206 "mov %0,%0,lsr r2;"
michael@0 207 "mov %1,%1,lsr r2;"
michael@0 208 "add %2,%2,r2;"
michael@0 209
michael@0 210 : "+r"(qi),"+r"(pi),"+r"(qexp)
michael@0 211 : "r"(ilsp),"r"(wi),"r"(m)
michael@0 212 : "r0","r1","r2","r3","cc");
michael@0 213
michael@0 214 *qip=qi;
michael@0 215 *pip=pi;
michael@0 216 *qexpp=qexp;
michael@0 217 }
michael@0 218
michael@0 219 static inline void lsp_norm_asm(ogg_uint32_t *qip,ogg_int32_t *qexpp){
michael@0 220
michael@0 221 ogg_uint32_t qi=*qip;
michael@0 222 ogg_int32_t qexp=*qexpp;
michael@0 223
michael@0 224 asm("tst %0,#0x0000ff00;"
michael@0 225 "moveq %0,%0,lsl #8;"
michael@0 226 "subeq %1,%1,#8;"
michael@0 227 "tst %0,#0x0000f000;"
michael@0 228 "moveq %0,%0,lsl #4;"
michael@0 229 "subeq %1,%1,#4;"
michael@0 230 "tst %0,#0x0000c000;"
michael@0 231 "moveq %0,%0,lsl #2;"
michael@0 232 "subeq %1,%1,#2;"
michael@0 233 "tst %0,#0x00008000;"
michael@0 234 "moveq %0,%0,lsl #1;"
michael@0 235 "subeq %1,%1,#1;"
michael@0 236 : "+r"(qi),"+r"(qexp)
michael@0 237 :
michael@0 238 : "cc");
michael@0 239 *qip=qi;
michael@0 240 *qexpp=qexp;
michael@0 241 }
michael@0 242
michael@0 243 #endif
michael@0 244 #endif
michael@0 245

mercurial