media/libvpx/vp9/common/arm/neon/vp9_convolve8_neon.asm

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rw-r--r--

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

     1 ;
     2 ;  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
     3 ;
     4 ;  Use of this source code is governed by a BSD-style license
     5 ;  that can be found in the LICENSE file in the root of the source
     6 ;  tree. An additional intellectual property rights grant can be found
     7 ;  in the file PATENTS.  All contributing project authors may
     8 ;  be found in the AUTHORS file in the root of the source tree.
     9 ;
    12     ; These functions are only valid when:
    13     ; x_step_q4 == 16
    14     ; w%4 == 0
    15     ; h%4 == 0
    16     ; taps == 8
    17     ; VP9_FILTER_WEIGHT == 128
    18     ; VP9_FILTER_SHIFT == 7
    20     EXPORT  |vp9_convolve8_horiz_neon|
    21     EXPORT  |vp9_convolve8_vert_neon|
    22     IMPORT  |vp9_convolve8_horiz_c|
    23     IMPORT  |vp9_convolve8_vert_c|
    24     ARM
    25     REQUIRE8
    26     PRESERVE8
    28     AREA ||.text||, CODE, READONLY, ALIGN=2
    30     ; Multiply and accumulate by q0
    31     MACRO
    32     MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7
    33     vmull.s16 $dst, $src0, d0[0]
    34     vmlal.s16 $dst, $src1, d0[1]
    35     vmlal.s16 $dst, $src2, d0[2]
    36     vmlal.s16 $dst, $src3, d0[3]
    37     vmlal.s16 $dst, $src4, d1[0]
    38     vmlal.s16 $dst, $src5, d1[1]
    39     vmlal.s16 $dst, $src6, d1[2]
    40     vmlal.s16 $dst, $src7, d1[3]
    41     MEND
    43 ; r0    const uint8_t *src
    44 ; r1    int src_stride
    45 ; r2    uint8_t *dst
    46 ; r3    int dst_stride
    47 ; sp[]const int16_t *filter_x
    48 ; sp[]int x_step_q4
    49 ; sp[]const int16_t *filter_y ; unused
    50 ; sp[]int y_step_q4           ; unused
    51 ; sp[]int w
    52 ; sp[]int h
    54 |vp9_convolve8_horiz_neon| PROC
    55     ldr             r12, [sp, #4]           ; x_step_q4
    56     cmp             r12, #16
    57     bne             vp9_convolve8_horiz_c
    59     push            {r4-r10, lr}
    61     sub             r0, r0, #3              ; adjust for taps
    63     ldr             r5, [sp, #32]           ; filter_x
    64     ldr             r6, [sp, #48]           ; w
    65     ldr             r7, [sp, #52]           ; h
    67     vld1.s16        {q0}, [r5]              ; filter_x
    69     sub             r8, r1, r1, lsl #2      ; -src_stride * 3
    70     add             r8, r8, #4              ; -src_stride * 3 + 4
    72     sub             r4, r3, r3, lsl #2      ; -dst_stride * 3
    73     add             r4, r4, #4              ; -dst_stride * 3 + 4
    75     rsb             r9, r6, r1, lsl #2      ; reset src for outer loop
    76     sub             r9, r9, #7
    77     rsb             r12, r6, r3, lsl #2     ; reset dst for outer loop
    79     mov             r10, r6                 ; w loop counter
    81 loop_horiz_v
    82     vld1.8          {d24}, [r0], r1
    83     vld1.8          {d25}, [r0], r1
    84     vld1.8          {d26}, [r0], r1
    85     vld1.8          {d27}, [r0], r8
    87     vtrn.16         q12, q13
    88     vtrn.8          d24, d25
    89     vtrn.8          d26, d27
    91     pld             [r0, r1, lsl #2]
    93     vmovl.u8        q8, d24
    94     vmovl.u8        q9, d25
    95     vmovl.u8        q10, d26
    96     vmovl.u8        q11, d27
    98     ; save a few instructions in the inner loop
    99     vswp            d17, d18
   100     vmov            d23, d21
   102     add             r0, r0, #3
   104 loop_horiz
   105     add             r5, r0, #64
   107     vld1.32         {d28[]}, [r0], r1
   108     vld1.32         {d29[]}, [r0], r1
   109     vld1.32         {d31[]}, [r0], r1
   110     vld1.32         {d30[]}, [r0], r8
   112     pld             [r5]
   114     vtrn.16         d28, d31
   115     vtrn.16         d29, d30
   116     vtrn.8          d28, d29
   117     vtrn.8          d31, d30
   119     pld             [r5, r1]
   121     ; extract to s16
   122     vtrn.32         q14, q15
   123     vmovl.u8        q12, d28
   124     vmovl.u8        q13, d29
   126     pld             [r5, r1, lsl #1]
   128     ; src[] * filter_x
   129     MULTIPLY_BY_Q0  q1,  d16, d17, d20, d22, d18, d19, d23, d24
   130     MULTIPLY_BY_Q0  q2,  d17, d20, d22, d18, d19, d23, d24, d26
   131     MULTIPLY_BY_Q0  q14, d20, d22, d18, d19, d23, d24, d26, d27
   132     MULTIPLY_BY_Q0  q15, d22, d18, d19, d23, d24, d26, d27, d25
   134     pld             [r5, -r8]
   136     ; += 64 >> 7
   137     vqrshrun.s32    d2, q1, #7
   138     vqrshrun.s32    d3, q2, #7
   139     vqrshrun.s32    d4, q14, #7
   140     vqrshrun.s32    d5, q15, #7
   142     ; saturate
   143     vqmovn.u16      d2, q1
   144     vqmovn.u16      d3, q2
   146     ; transpose
   147     vtrn.16         d2, d3
   148     vtrn.32         d2, d3
   149     vtrn.8          d2, d3
   151     vst1.u32        {d2[0]}, [r2@32], r3
   152     vst1.u32        {d3[0]}, [r2@32], r3
   153     vst1.u32        {d2[1]}, [r2@32], r3
   154     vst1.u32        {d3[1]}, [r2@32], r4
   156     vmov            q8,  q9
   157     vmov            d20, d23
   158     vmov            q11, q12
   159     vmov            q9,  q13
   161     subs            r6, r6, #4              ; w -= 4
   162     bgt             loop_horiz
   164     ; outer loop
   165     mov             r6, r10                 ; restore w counter
   166     add             r0, r0, r9              ; src += src_stride * 4 - w
   167     add             r2, r2, r12             ; dst += dst_stride * 4 - w
   168     subs            r7, r7, #4              ; h -= 4
   169     bgt loop_horiz_v
   171     pop             {r4-r10, pc}
   173     ENDP
   175 |vp9_convolve8_vert_neon| PROC
   176     ldr             r12, [sp, #12]
   177     cmp             r12, #16
   178     bne             vp9_convolve8_vert_c
   180     push            {r4-r8, lr}
   182     ; adjust for taps
   183     sub             r0, r0, r1
   184     sub             r0, r0, r1, lsl #1
   186     ldr             r4, [sp, #32]           ; filter_y
   187     ldr             r6, [sp, #40]           ; w
   188     ldr             lr, [sp, #44]           ; h
   190     vld1.s16        {q0}, [r4]              ; filter_y
   192     lsl             r1, r1, #1
   193     lsl             r3, r3, #1
   195 loop_vert_h
   196     mov             r4, r0
   197     add             r7, r0, r1, asr #1
   198     mov             r5, r2
   199     add             r8, r2, r3, asr #1
   200     mov             r12, lr                 ; h loop counter
   202     vld1.u32        {d16[0]}, [r4], r1
   203     vld1.u32        {d16[1]}, [r7], r1
   204     vld1.u32        {d18[0]}, [r4], r1
   205     vld1.u32        {d18[1]}, [r7], r1
   206     vld1.u32        {d20[0]}, [r4], r1
   207     vld1.u32        {d20[1]}, [r7], r1
   208     vld1.u32        {d22[0]}, [r4], r1
   210     vmovl.u8        q8, d16
   211     vmovl.u8        q9, d18
   212     vmovl.u8        q10, d20
   213     vmovl.u8        q11, d22
   215 loop_vert
   216     ; always process a 4x4 block at a time
   217     vld1.u32        {d24[0]}, [r7], r1
   218     vld1.u32        {d26[0]}, [r4], r1
   219     vld1.u32        {d26[1]}, [r7], r1
   220     vld1.u32        {d24[1]}, [r4], r1
   222     ; extract to s16
   223     vmovl.u8        q12, d24
   224     vmovl.u8        q13, d26
   226     pld             [r5]
   227     pld             [r8]
   229     ; src[] * filter_y
   230     MULTIPLY_BY_Q0  q1,  d16, d17, d18, d19, d20, d21, d22, d24
   232     pld             [r5, r3]
   233     pld             [r8, r3]
   235     MULTIPLY_BY_Q0  q2,  d17, d18, d19, d20, d21, d22, d24, d26
   237     pld             [r7]
   238     pld             [r4]
   240     MULTIPLY_BY_Q0  q14, d18, d19, d20, d21, d22, d24, d26, d27
   242     pld             [r7, r1]
   243     pld             [r4, r1]
   245     MULTIPLY_BY_Q0  q15, d19, d20, d21, d22, d24, d26, d27, d25
   247     ; += 64 >> 7
   248     vqrshrun.s32    d2, q1, #7
   249     vqrshrun.s32    d3, q2, #7
   250     vqrshrun.s32    d4, q14, #7
   251     vqrshrun.s32    d5, q15, #7
   253     ; saturate
   254     vqmovn.u16      d2, q1
   255     vqmovn.u16      d3, q2
   257     vst1.u32        {d2[0]}, [r5@32], r3
   258     vst1.u32        {d2[1]}, [r8@32], r3
   259     vst1.u32        {d3[0]}, [r5@32], r3
   260     vst1.u32        {d3[1]}, [r8@32], r3
   262     vmov            q8, q10
   263     vmov            d18, d22
   264     vmov            d19, d24
   265     vmov            q10, q13
   266     vmov            d22, d25
   268     subs            r12, r12, #4            ; h -= 4
   269     bgt             loop_vert
   271     ; outer loop
   272     add             r0, r0, #4
   273     add             r2, r2, #4
   274     subs            r6, r6, #4              ; w -= 4
   275     bgt             loop_vert_h
   277     pop             {r4-r8, pc}
   279     ENDP
   280     END

mercurial