Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license |
michael@0 | 5 | * that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | * tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | * in the file PATENTS. All contributing project authors may |
michael@0 | 8 | * be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | */ |
michael@0 | 10 | |
michael@0 | 11 | |
michael@0 | 12 | /* \file |
michael@0 | 13 | * \brief Provides portable memory access primitives for operating on aligned |
michael@0 | 14 | * data |
michael@0 | 15 | * |
michael@0 | 16 | * This file is split from mem_ops.h for easier maintenance. See mem_ops.h |
michael@0 | 17 | * for a more detailed description of these primitives. |
michael@0 | 18 | */ |
michael@0 | 19 | #ifndef INCLUDED_BY_MEM_OPS_H |
michael@0 | 20 | #error Include mem_ops.h, not mem_ops_aligned.h directly. |
michael@0 | 21 | #endif |
michael@0 | 22 | |
michael@0 | 23 | /* Architectures that provide instructions for doing this byte swapping |
michael@0 | 24 | * could redefine these macros. |
michael@0 | 25 | */ |
michael@0 | 26 | #define swap_endian_16(val,raw) do {\ |
michael@0 | 27 | val = ((raw>>8) & 0x00ff) \ |
michael@0 | 28 | | ((raw<<8) & 0xff00);\ |
michael@0 | 29 | } while(0) |
michael@0 | 30 | #define swap_endian_32(val,raw) do {\ |
michael@0 | 31 | val = ((raw>>24) & 0x000000ff) \ |
michael@0 | 32 | | ((raw>>8) & 0x0000ff00) \ |
michael@0 | 33 | | ((raw<<8) & 0x00ff0000) \ |
michael@0 | 34 | | ((raw<<24) & 0xff000000); \ |
michael@0 | 35 | } while(0) |
michael@0 | 36 | #define swap_endian_16_se(val,raw) do {\ |
michael@0 | 37 | swap_endian_16(val,raw);\ |
michael@0 | 38 | val = ((val << 16) >> 16);\ |
michael@0 | 39 | } while(0) |
michael@0 | 40 | #define swap_endian_32_se(val,raw) swap_endian_32(val,raw) |
michael@0 | 41 | |
michael@0 | 42 | #define mem_get_ne_aligned_generic(end,sz) \ |
michael@0 | 43 | static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\ |
michael@0 | 44 | const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\ |
michael@0 | 45 | return *mem;\ |
michael@0 | 46 | } |
michael@0 | 47 | |
michael@0 | 48 | #define mem_get_sne_aligned_generic(end,sz) \ |
michael@0 | 49 | static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\ |
michael@0 | 50 | const int##sz##_t *mem = (const int##sz##_t *)vmem;\ |
michael@0 | 51 | return *mem;\ |
michael@0 | 52 | } |
michael@0 | 53 | |
michael@0 | 54 | #define mem_get_se_aligned_generic(end,sz) \ |
michael@0 | 55 | static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\ |
michael@0 | 56 | const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\ |
michael@0 | 57 | unsigned MEM_VALUE_T val, raw = *mem;\ |
michael@0 | 58 | swap_endian_##sz(val,raw);\ |
michael@0 | 59 | return val;\ |
michael@0 | 60 | } |
michael@0 | 61 | |
michael@0 | 62 | #define mem_get_sse_aligned_generic(end,sz) \ |
michael@0 | 63 | static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\ |
michael@0 | 64 | const int##sz##_t *mem = (const int##sz##_t *)vmem;\ |
michael@0 | 65 | unsigned MEM_VALUE_T val, raw = *mem;\ |
michael@0 | 66 | swap_endian_##sz##_se(val,raw);\ |
michael@0 | 67 | return val;\ |
michael@0 | 68 | } |
michael@0 | 69 | |
michael@0 | 70 | #define mem_put_ne_aligned_generic(end,sz) \ |
michael@0 | 71 | static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\ |
michael@0 | 72 | uint##sz##_t *mem = (uint##sz##_t *)vmem;\ |
michael@0 | 73 | *mem = (uint##sz##_t)val;\ |
michael@0 | 74 | } |
michael@0 | 75 | |
michael@0 | 76 | #define mem_put_se_aligned_generic(end,sz) \ |
michael@0 | 77 | static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\ |
michael@0 | 78 | uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;\ |
michael@0 | 79 | swap_endian_##sz(raw,val);\ |
michael@0 | 80 | *mem = (uint##sz##_t)raw;\ |
michael@0 | 81 | } |
michael@0 | 82 | |
michael@0 | 83 | #include "vpx_config.h" |
michael@0 | 84 | #if CONFIG_BIG_ENDIAN |
michael@0 | 85 | #define mem_get_be_aligned_generic(sz) mem_get_ne_aligned_generic(be,sz) |
michael@0 | 86 | #define mem_get_sbe_aligned_generic(sz) mem_get_sne_aligned_generic(be,sz) |
michael@0 | 87 | #define mem_get_le_aligned_generic(sz) mem_get_se_aligned_generic(le,sz) |
michael@0 | 88 | #define mem_get_sle_aligned_generic(sz) mem_get_sse_aligned_generic(le,sz) |
michael@0 | 89 | #define mem_put_be_aligned_generic(sz) mem_put_ne_aligned_generic(be,sz) |
michael@0 | 90 | #define mem_put_le_aligned_generic(sz) mem_put_se_aligned_generic(le,sz) |
michael@0 | 91 | #else |
michael@0 | 92 | #define mem_get_be_aligned_generic(sz) mem_get_se_aligned_generic(be,sz) |
michael@0 | 93 | #define mem_get_sbe_aligned_generic(sz) mem_get_sse_aligned_generic(be,sz) |
michael@0 | 94 | #define mem_get_le_aligned_generic(sz) mem_get_ne_aligned_generic(le,sz) |
michael@0 | 95 | #define mem_get_sle_aligned_generic(sz) mem_get_sne_aligned_generic(le,sz) |
michael@0 | 96 | #define mem_put_be_aligned_generic(sz) mem_put_se_aligned_generic(be,sz) |
michael@0 | 97 | #define mem_put_le_aligned_generic(sz) mem_put_ne_aligned_generic(le,sz) |
michael@0 | 98 | #endif |
michael@0 | 99 | |
michael@0 | 100 | #undef mem_get_be16_aligned |
michael@0 | 101 | #define mem_get_be16_aligned mem_ops_wrap_symbol(mem_get_be16_aligned) |
michael@0 | 102 | mem_get_be_aligned_generic(16) |
michael@0 | 103 | |
michael@0 | 104 | #undef mem_get_be32_aligned |
michael@0 | 105 | #define mem_get_be32_aligned mem_ops_wrap_symbol(mem_get_be32_aligned) |
michael@0 | 106 | mem_get_be_aligned_generic(32) |
michael@0 | 107 | |
michael@0 | 108 | #undef mem_get_le16_aligned |
michael@0 | 109 | #define mem_get_le16_aligned mem_ops_wrap_symbol(mem_get_le16_aligned) |
michael@0 | 110 | mem_get_le_aligned_generic(16) |
michael@0 | 111 | |
michael@0 | 112 | #undef mem_get_le32_aligned |
michael@0 | 113 | #define mem_get_le32_aligned mem_ops_wrap_symbol(mem_get_le32_aligned) |
michael@0 | 114 | mem_get_le_aligned_generic(32) |
michael@0 | 115 | |
michael@0 | 116 | #undef mem_get_sbe16_aligned |
michael@0 | 117 | #define mem_get_sbe16_aligned mem_ops_wrap_symbol(mem_get_sbe16_aligned) |
michael@0 | 118 | mem_get_sbe_aligned_generic(16) |
michael@0 | 119 | |
michael@0 | 120 | #undef mem_get_sbe32_aligned |
michael@0 | 121 | #define mem_get_sbe32_aligned mem_ops_wrap_symbol(mem_get_sbe32_aligned) |
michael@0 | 122 | mem_get_sbe_aligned_generic(32) |
michael@0 | 123 | |
michael@0 | 124 | #undef mem_get_sle16_aligned |
michael@0 | 125 | #define mem_get_sle16_aligned mem_ops_wrap_symbol(mem_get_sle16_aligned) |
michael@0 | 126 | mem_get_sle_aligned_generic(16) |
michael@0 | 127 | |
michael@0 | 128 | #undef mem_get_sle32_aligned |
michael@0 | 129 | #define mem_get_sle32_aligned mem_ops_wrap_symbol(mem_get_sle32_aligned) |
michael@0 | 130 | mem_get_sle_aligned_generic(32) |
michael@0 | 131 | |
michael@0 | 132 | #undef mem_put_be16_aligned |
michael@0 | 133 | #define mem_put_be16_aligned mem_ops_wrap_symbol(mem_put_be16_aligned) |
michael@0 | 134 | mem_put_be_aligned_generic(16) |
michael@0 | 135 | |
michael@0 | 136 | #undef mem_put_be32_aligned |
michael@0 | 137 | #define mem_put_be32_aligned mem_ops_wrap_symbol(mem_put_be32_aligned) |
michael@0 | 138 | mem_put_be_aligned_generic(32) |
michael@0 | 139 | |
michael@0 | 140 | #undef mem_put_le16_aligned |
michael@0 | 141 | #define mem_put_le16_aligned mem_ops_wrap_symbol(mem_put_le16_aligned) |
michael@0 | 142 | mem_put_le_aligned_generic(16) |
michael@0 | 143 | |
michael@0 | 144 | #undef mem_put_le32_aligned |
michael@0 | 145 | #define mem_put_le32_aligned mem_ops_wrap_symbol(mem_put_le32_aligned) |
michael@0 | 146 | mem_put_le_aligned_generic(32) |
michael@0 | 147 | |
michael@0 | 148 | #undef mem_get_ne_aligned_generic |
michael@0 | 149 | #undef mem_get_se_aligned_generic |
michael@0 | 150 | #undef mem_get_sne_aligned_generic |
michael@0 | 151 | #undef mem_get_sse_aligned_generic |
michael@0 | 152 | #undef mem_put_ne_aligned_generic |
michael@0 | 153 | #undef mem_put_se_aligned_generic |
michael@0 | 154 | #undef swap_endian_16 |
michael@0 | 155 | #undef swap_endian_32 |
michael@0 | 156 | #undef swap_endian_16_se |
michael@0 | 157 | #undef swap_endian_32_se |