Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license |
michael@0 | 5 | * that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | * tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | * in the file PATENTS. All contributing project authors may |
michael@0 | 8 | * be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | */ |
michael@0 | 10 | |
michael@0 | 11 | #include "./vpx_config.h" |
michael@0 | 12 | #include "vp9/encoder/vp9_variance.h" |
michael@0 | 13 | #include "vp9/common/vp9_pragmas.h" |
michael@0 | 14 | #include "vpx_ports/mem.h" |
michael@0 | 15 | |
michael@0 | 16 | extern unsigned int vp9_get_mb_ss_mmx(const int16_t *src_ptr); |
michael@0 | 17 | extern unsigned int vp9_get8x8var_mmx |
michael@0 | 18 | ( |
michael@0 | 19 | const unsigned char *src_ptr, |
michael@0 | 20 | int source_stride, |
michael@0 | 21 | const unsigned char *ref_ptr, |
michael@0 | 22 | int recon_stride, |
michael@0 | 23 | unsigned int *SSE, |
michael@0 | 24 | int *Sum |
michael@0 | 25 | ); |
michael@0 | 26 | extern unsigned int vp9_get4x4var_mmx |
michael@0 | 27 | ( |
michael@0 | 28 | const unsigned char *src_ptr, |
michael@0 | 29 | int source_stride, |
michael@0 | 30 | const unsigned char *ref_ptr, |
michael@0 | 31 | int recon_stride, |
michael@0 | 32 | unsigned int *SSE, |
michael@0 | 33 | int *Sum |
michael@0 | 34 | ); |
michael@0 | 35 | |
michael@0 | 36 | unsigned int vp9_variance4x4_mmx( |
michael@0 | 37 | const unsigned char *src_ptr, |
michael@0 | 38 | int source_stride, |
michael@0 | 39 | const unsigned char *ref_ptr, |
michael@0 | 40 | int recon_stride, |
michael@0 | 41 | unsigned int *sse) { |
michael@0 | 42 | unsigned int var; |
michael@0 | 43 | int avg; |
michael@0 | 44 | |
michael@0 | 45 | vp9_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg); |
michael@0 | 46 | *sse = var; |
michael@0 | 47 | return (var - (((unsigned int)avg * avg) >> 4)); |
michael@0 | 48 | } |
michael@0 | 49 | |
michael@0 | 50 | unsigned int vp9_variance8x8_mmx( |
michael@0 | 51 | const unsigned char *src_ptr, |
michael@0 | 52 | int source_stride, |
michael@0 | 53 | const unsigned char *ref_ptr, |
michael@0 | 54 | int recon_stride, |
michael@0 | 55 | unsigned int *sse) { |
michael@0 | 56 | unsigned int var; |
michael@0 | 57 | int avg; |
michael@0 | 58 | |
michael@0 | 59 | vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg); |
michael@0 | 60 | *sse = var; |
michael@0 | 61 | |
michael@0 | 62 | return (var - (((unsigned int)avg * avg) >> 6)); |
michael@0 | 63 | } |
michael@0 | 64 | |
michael@0 | 65 | unsigned int vp9_mse16x16_mmx( |
michael@0 | 66 | const unsigned char *src_ptr, |
michael@0 | 67 | int source_stride, |
michael@0 | 68 | const unsigned char *ref_ptr, |
michael@0 | 69 | int recon_stride, |
michael@0 | 70 | unsigned int *sse) { |
michael@0 | 71 | unsigned int sse0, sse1, sse2, sse3, var; |
michael@0 | 72 | int sum0, sum1, sum2, sum3; |
michael@0 | 73 | |
michael@0 | 74 | |
michael@0 | 75 | vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, |
michael@0 | 76 | &sum0); |
michael@0 | 77 | vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, |
michael@0 | 78 | &sse1, &sum1); |
michael@0 | 79 | vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, |
michael@0 | 80 | ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2); |
michael@0 | 81 | vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, |
michael@0 | 82 | ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3); |
michael@0 | 83 | |
michael@0 | 84 | var = sse0 + sse1 + sse2 + sse3; |
michael@0 | 85 | *sse = var; |
michael@0 | 86 | return var; |
michael@0 | 87 | } |
michael@0 | 88 | |
michael@0 | 89 | |
michael@0 | 90 | unsigned int vp9_variance16x16_mmx( |
michael@0 | 91 | const unsigned char *src_ptr, |
michael@0 | 92 | int source_stride, |
michael@0 | 93 | const unsigned char *ref_ptr, |
michael@0 | 94 | int recon_stride, |
michael@0 | 95 | unsigned int *sse) { |
michael@0 | 96 | unsigned int sse0, sse1, sse2, sse3, var; |
michael@0 | 97 | int sum0, sum1, sum2, sum3, avg; |
michael@0 | 98 | |
michael@0 | 99 | vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, |
michael@0 | 100 | &sum0); |
michael@0 | 101 | vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, |
michael@0 | 102 | &sse1, &sum1); |
michael@0 | 103 | vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, |
michael@0 | 104 | ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2); |
michael@0 | 105 | vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, |
michael@0 | 106 | ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3); |
michael@0 | 107 | |
michael@0 | 108 | var = sse0 + sse1 + sse2 + sse3; |
michael@0 | 109 | avg = sum0 + sum1 + sum2 + sum3; |
michael@0 | 110 | *sse = var; |
michael@0 | 111 | return (var - (((unsigned int)avg * avg) >> 8)); |
michael@0 | 112 | } |
michael@0 | 113 | |
michael@0 | 114 | unsigned int vp9_variance16x8_mmx( |
michael@0 | 115 | const unsigned char *src_ptr, |
michael@0 | 116 | int source_stride, |
michael@0 | 117 | const unsigned char *ref_ptr, |
michael@0 | 118 | int recon_stride, |
michael@0 | 119 | unsigned int *sse) { |
michael@0 | 120 | unsigned int sse0, sse1, var; |
michael@0 | 121 | int sum0, sum1, avg; |
michael@0 | 122 | |
michael@0 | 123 | vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, |
michael@0 | 124 | &sum0); |
michael@0 | 125 | vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, |
michael@0 | 126 | &sse1, &sum1); |
michael@0 | 127 | |
michael@0 | 128 | var = sse0 + sse1; |
michael@0 | 129 | avg = sum0 + sum1; |
michael@0 | 130 | *sse = var; |
michael@0 | 131 | return (var - (((unsigned int)avg * avg) >> 7)); |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | |
michael@0 | 135 | unsigned int vp9_variance8x16_mmx( |
michael@0 | 136 | const unsigned char *src_ptr, |
michael@0 | 137 | int source_stride, |
michael@0 | 138 | const unsigned char *ref_ptr, |
michael@0 | 139 | int recon_stride, |
michael@0 | 140 | unsigned int *sse) { |
michael@0 | 141 | unsigned int sse0, sse1, var; |
michael@0 | 142 | int sum0, sum1, avg; |
michael@0 | 143 | |
michael@0 | 144 | vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, |
michael@0 | 145 | &sum0); |
michael@0 | 146 | vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, |
michael@0 | 147 | ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1); |
michael@0 | 148 | |
michael@0 | 149 | var = sse0 + sse1; |
michael@0 | 150 | avg = sum0 + sum1; |
michael@0 | 151 | *sse = var; |
michael@0 | 152 | |
michael@0 | 153 | return (var - (((unsigned int)avg * avg) >> 7)); |
michael@0 | 154 | } |