Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license |
michael@0 | 5 | * that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | * tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | * in the file PATENTS. All contributing project authors may |
michael@0 | 8 | * be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | */ |
michael@0 | 10 | |
michael@0 | 11 | #include "vpx_config.h" |
michael@0 | 12 | #include "vp8_rtcd.h" |
michael@0 | 13 | #include "vp8/common/blockd.h" |
michael@0 | 14 | #include "vpx_mem/vpx_mem.h" |
michael@0 | 15 | |
michael@0 | 16 | extern void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q); |
michael@0 | 17 | |
michael@0 | 18 | void vp8_dequantize_b_mmx(BLOCKD *d, short *DQC) |
michael@0 | 19 | { |
michael@0 | 20 | short *sq = (short *) d->qcoeff; |
michael@0 | 21 | short *dq = (short *) d->dqcoeff; |
michael@0 | 22 | |
michael@0 | 23 | vp8_dequantize_b_impl_mmx(sq, dq, DQC); |
michael@0 | 24 | } |
michael@0 | 25 | |
michael@0 | 26 | void vp8_dequant_idct_add_y_block_mmx |
michael@0 | 27 | (short *q, short *dq, |
michael@0 | 28 | unsigned char *dst, int stride, char *eobs) |
michael@0 | 29 | { |
michael@0 | 30 | int i; |
michael@0 | 31 | |
michael@0 | 32 | for (i = 0; i < 4; i++) |
michael@0 | 33 | { |
michael@0 | 34 | if (eobs[0] > 1) |
michael@0 | 35 | vp8_dequant_idct_add_mmx (q, dq, dst, stride); |
michael@0 | 36 | else if (eobs[0] == 1) |
michael@0 | 37 | { |
michael@0 | 38 | vp8_dc_only_idct_add_mmx (q[0]*dq[0], dst, stride, dst, stride); |
michael@0 | 39 | vpx_memset(q, 0, 2 * sizeof(q[0])); |
michael@0 | 40 | } |
michael@0 | 41 | |
michael@0 | 42 | if (eobs[1] > 1) |
michael@0 | 43 | vp8_dequant_idct_add_mmx (q+16, dq, dst+4, stride); |
michael@0 | 44 | else if (eobs[1] == 1) |
michael@0 | 45 | { |
michael@0 | 46 | vp8_dc_only_idct_add_mmx (q[16]*dq[0], dst+4, stride, |
michael@0 | 47 | dst+4, stride); |
michael@0 | 48 | vpx_memset(q + 16, 0, 2 * sizeof(q[0])); |
michael@0 | 49 | } |
michael@0 | 50 | |
michael@0 | 51 | if (eobs[2] > 1) |
michael@0 | 52 | vp8_dequant_idct_add_mmx (q+32, dq, dst+8, stride); |
michael@0 | 53 | else if (eobs[2] == 1) |
michael@0 | 54 | { |
michael@0 | 55 | vp8_dc_only_idct_add_mmx (q[32]*dq[0], dst+8, stride, |
michael@0 | 56 | dst+8, stride); |
michael@0 | 57 | vpx_memset(q + 32, 0, 2 * sizeof(q[0])); |
michael@0 | 58 | } |
michael@0 | 59 | |
michael@0 | 60 | if (eobs[3] > 1) |
michael@0 | 61 | vp8_dequant_idct_add_mmx (q+48, dq, dst+12, stride); |
michael@0 | 62 | else if (eobs[3] == 1) |
michael@0 | 63 | { |
michael@0 | 64 | vp8_dc_only_idct_add_mmx (q[48]*dq[0], dst+12, stride, |
michael@0 | 65 | dst+12, stride); |
michael@0 | 66 | vpx_memset(q + 48, 0, 2 * sizeof(q[0])); |
michael@0 | 67 | } |
michael@0 | 68 | |
michael@0 | 69 | q += 64; |
michael@0 | 70 | dst += 4*stride; |
michael@0 | 71 | eobs += 4; |
michael@0 | 72 | } |
michael@0 | 73 | } |
michael@0 | 74 | |
michael@0 | 75 | void vp8_dequant_idct_add_uv_block_mmx |
michael@0 | 76 | (short *q, short *dq, |
michael@0 | 77 | unsigned char *dstu, unsigned char *dstv, int stride, char *eobs) |
michael@0 | 78 | { |
michael@0 | 79 | int i; |
michael@0 | 80 | |
michael@0 | 81 | for (i = 0; i < 2; i++) |
michael@0 | 82 | { |
michael@0 | 83 | if (eobs[0] > 1) |
michael@0 | 84 | vp8_dequant_idct_add_mmx (q, dq, dstu, stride); |
michael@0 | 85 | else if (eobs[0] == 1) |
michael@0 | 86 | { |
michael@0 | 87 | vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstu, stride, dstu, stride); |
michael@0 | 88 | vpx_memset(q, 0, 2 * sizeof(q[0])); |
michael@0 | 89 | } |
michael@0 | 90 | |
michael@0 | 91 | if (eobs[1] > 1) |
michael@0 | 92 | vp8_dequant_idct_add_mmx (q+16, dq, dstu+4, stride); |
michael@0 | 93 | else if (eobs[1] == 1) |
michael@0 | 94 | { |
michael@0 | 95 | vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstu+4, stride, |
michael@0 | 96 | dstu+4, stride); |
michael@0 | 97 | vpx_memset(q + 16, 0, 2 * sizeof(q[0])); |
michael@0 | 98 | } |
michael@0 | 99 | |
michael@0 | 100 | q += 32; |
michael@0 | 101 | dstu += 4*stride; |
michael@0 | 102 | eobs += 2; |
michael@0 | 103 | } |
michael@0 | 104 | |
michael@0 | 105 | for (i = 0; i < 2; i++) |
michael@0 | 106 | { |
michael@0 | 107 | if (eobs[0] > 1) |
michael@0 | 108 | vp8_dequant_idct_add_mmx (q, dq, dstv, stride); |
michael@0 | 109 | else if (eobs[0] == 1) |
michael@0 | 110 | { |
michael@0 | 111 | vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstv, stride, dstv, stride); |
michael@0 | 112 | vpx_memset(q, 0, 2 * sizeof(q[0])); |
michael@0 | 113 | } |
michael@0 | 114 | |
michael@0 | 115 | if (eobs[1] > 1) |
michael@0 | 116 | vp8_dequant_idct_add_mmx (q+16, dq, dstv+4, stride); |
michael@0 | 117 | else if (eobs[1] == 1) |
michael@0 | 118 | { |
michael@0 | 119 | vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstv+4, stride, |
michael@0 | 120 | dstv+4, stride); |
michael@0 | 121 | vpx_memset(q + 16, 0, 2 * sizeof(q[0])); |
michael@0 | 122 | } |
michael@0 | 123 | |
michael@0 | 124 | q += 32; |
michael@0 | 125 | dstv += 4*stride; |
michael@0 | 126 | eobs += 2; |
michael@0 | 127 | } |
michael@0 | 128 | } |