Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
12 #ifndef VP9_COMMON_VP9_BLOCKD_H_
13 #define VP9_COMMON_VP9_BLOCKD_H_
15 #include "./vpx_config.h"
17 #include "vpx_ports/mem.h"
18 #include "vpx_scale/yv12config.h"
20 #include "vp9/common/vp9_common.h"
21 #include "vp9/common/vp9_common_data.h"
22 #include "vp9/common/vp9_enums.h"
23 #include "vp9/common/vp9_filter.h"
24 #include "vp9/common/vp9_mv.h"
25 #include "vp9/common/vp9_scale.h"
26 #include "vp9/common/vp9_seg_common.h"
27 #include "vp9/common/vp9_treecoder.h"
29 #define BLOCK_SIZE_GROUPS 4
30 #define MBSKIP_CONTEXTS 3
32 /* Segment Feature Masks */
33 #define MAX_MV_REF_CANDIDATES 2
35 #define INTRA_INTER_CONTEXTS 4
36 #define COMP_INTER_CONTEXTS 5
37 #define REF_CONTEXTS 5
39 typedef enum {
40 PLANE_TYPE_Y_WITH_DC,
41 PLANE_TYPE_UV,
42 } PLANE_TYPE;
44 typedef char ENTROPY_CONTEXT;
46 typedef char PARTITION_CONTEXT;
48 static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a,
49 ENTROPY_CONTEXT b) {
50 return (a != 0) + (b != 0);
51 }
53 typedef enum {
54 KEY_FRAME = 0,
55 INTER_FRAME = 1,
56 FRAME_TYPES,
57 } FRAME_TYPE;
59 typedef enum {
60 DC_PRED, // Average of above and left pixels
61 V_PRED, // Vertical
62 H_PRED, // Horizontal
63 D45_PRED, // Directional 45 deg = round(arctan(1/1) * 180/pi)
64 D135_PRED, // Directional 135 deg = 180 - 45
65 D117_PRED, // Directional 117 deg = 180 - 63
66 D153_PRED, // Directional 153 deg = 180 - 27
67 D207_PRED, // Directional 207 deg = 180 + 27
68 D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi)
69 TM_PRED, // True-motion
70 NEARESTMV,
71 NEARMV,
72 ZEROMV,
73 NEWMV,
74 MB_MODE_COUNT
75 } MB_PREDICTION_MODE;
77 static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) {
78 return mode >= NEARESTMV && mode <= NEWMV;
79 }
81 #define INTRA_MODES (TM_PRED + 1)
83 #define INTER_MODES (1 + NEWMV - NEARESTMV)
85 #define INTER_OFFSET(mode) ((mode) - NEARESTMV)
88 /* For keyframes, intra block modes are predicted by the (already decoded)
89 modes for the Y blocks to the left and above us; for interframes, there
90 is a single probability table. */
92 typedef struct {
93 MB_PREDICTION_MODE as_mode;
94 int_mv as_mv[2]; // first, second inter predictor motion vectors
95 } b_mode_info;
97 typedef enum {
98 NONE = -1,
99 INTRA_FRAME = 0,
100 LAST_FRAME = 1,
101 GOLDEN_FRAME = 2,
102 ALTREF_FRAME = 3,
103 MAX_REF_FRAMES = 4
104 } MV_REFERENCE_FRAME;
106 static INLINE int b_width_log2(BLOCK_SIZE sb_type) {
107 return b_width_log2_lookup[sb_type];
108 }
109 static INLINE int b_height_log2(BLOCK_SIZE sb_type) {
110 return b_height_log2_lookup[sb_type];
111 }
113 static INLINE int mi_width_log2(BLOCK_SIZE sb_type) {
114 return mi_width_log2_lookup[sb_type];
115 }
117 static INLINE int mi_height_log2(BLOCK_SIZE sb_type) {
118 return mi_height_log2_lookup[sb_type];
119 }
121 // This structure now relates to 8x8 block regions.
122 typedef struct {
123 MB_PREDICTION_MODE mode, uv_mode;
124 MV_REFERENCE_FRAME ref_frame[2];
125 TX_SIZE tx_size;
126 int_mv mv[2]; // for each reference frame used
127 int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
128 int_mv best_mv[2];
130 uint8_t mode_context[MAX_REF_FRAMES];
132 unsigned char skip_coeff; // 0=need to decode coeffs, 1=no coefficients
133 unsigned char segment_id; // Segment id for this block.
135 // Flags used for prediction status of various bit-stream signals
136 unsigned char seg_id_predicted;
138 INTERPOLATION_TYPE interp_filter;
140 BLOCK_SIZE sb_type;
141 } MB_MODE_INFO;
143 typedef struct {
144 MB_MODE_INFO mbmi;
145 b_mode_info bmi[4];
146 } MODE_INFO;
148 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
149 return mbmi->ref_frame[0] > INTRA_FRAME;
150 }
152 static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
153 return mbmi->ref_frame[1] > INTRA_FRAME;
154 }
156 enum mv_precision {
157 MV_PRECISION_Q3,
158 MV_PRECISION_Q4
159 };
161 #if CONFIG_ALPHA
162 enum { MAX_MB_PLANE = 4 };
163 #else
164 enum { MAX_MB_PLANE = 3 };
165 #endif
167 struct buf_2d {
168 uint8_t *buf;
169 int stride;
170 };
172 struct macroblockd_plane {
173 int16_t *qcoeff;
174 int16_t *dqcoeff;
175 uint16_t *eobs;
176 PLANE_TYPE plane_type;
177 int subsampling_x;
178 int subsampling_y;
179 struct buf_2d dst;
180 struct buf_2d pre[2];
181 int16_t *dequant;
182 ENTROPY_CONTEXT *above_context;
183 ENTROPY_CONTEXT *left_context;
184 };
186 #define BLOCK_OFFSET(x, i) ((x) + (i) * 16)
188 typedef struct macroblockd {
189 struct macroblockd_plane plane[MAX_MB_PLANE];
191 struct scale_factors scale_factor[2];
193 MODE_INFO *last_mi;
194 int mode_info_stride;
196 // A NULL indicates that the 8x8 is not part of the image
197 MODE_INFO **mi_8x8;
198 MODE_INFO **prev_mi_8x8;
199 MODE_INFO *mi_stream;
201 int up_available;
202 int left_available;
204 /* Distance of MB away from frame edges */
205 int mb_to_left_edge;
206 int mb_to_right_edge;
207 int mb_to_top_edge;
208 int mb_to_bottom_edge;
210 int lossless;
211 /* Inverse transform function pointers. */
212 void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob);
214 struct subpix_fn_table subpix;
216 int corrupted;
218 /* Y,U,V,(A) */
219 ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
220 ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16];
222 PARTITION_CONTEXT *above_seg_context;
223 PARTITION_CONTEXT left_seg_context[8];
224 } MACROBLOCKD;
228 static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) {
229 const BLOCK_SIZE subsize = subsize_lookup[partition][bsize];
230 assert(subsize < BLOCK_SIZES);
231 return subsize;
232 }
234 extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT];
236 static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
237 const MACROBLOCKD *xd, int ib) {
238 const MODE_INFO *const mi = xd->mi_8x8[0];
239 const MB_MODE_INFO *const mbmi = &mi->mbmi;
241 if (plane_type != PLANE_TYPE_Y_WITH_DC ||
242 xd->lossless ||
243 is_inter_block(mbmi))
244 return DCT_DCT;
246 return mode2txfm_map[mbmi->sb_type < BLOCK_8X8 ?
247 mi->bmi[ib].as_mode : mbmi->mode];
248 }
250 static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type,
251 const MACROBLOCKD *xd) {
252 return plane_type == PLANE_TYPE_Y_WITH_DC ?
253 mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] : DCT_DCT;
254 }
256 static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type,
257 const MACROBLOCKD *xd) {
258 return plane_type == PLANE_TYPE_Y_WITH_DC ?
259 mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] : DCT_DCT;
260 }
262 static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) {
263 int i;
265 for (i = 0; i < MAX_MB_PLANE; i++) {
266 xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y_WITH_DC;
267 xd->plane[i].subsampling_x = i ? ss_x : 0;
268 xd->plane[i].subsampling_y = i ? ss_y : 0;
269 }
270 #if CONFIG_ALPHA
271 // TODO(jkoleszar): Using the Y w/h for now
272 xd->plane[3].subsampling_x = 0;
273 xd->plane[3].subsampling_y = 0;
274 #endif
275 }
278 static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) {
279 return MIN(mbmi->tx_size, max_uv_txsize_lookup[mbmi->sb_type]);
280 }
282 static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
283 const struct macroblockd_plane *pd) {
284 BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y];
285 assert(bs < BLOCK_SIZES);
286 return bs;
287 }
289 static INLINE int plane_block_width(BLOCK_SIZE bsize,
290 const struct macroblockd_plane* plane) {
291 return 4 << (b_width_log2(bsize) - plane->subsampling_x);
292 }
294 static INLINE int plane_block_height(BLOCK_SIZE bsize,
295 const struct macroblockd_plane* plane) {
296 return 4 << (b_height_log2(bsize) - plane->subsampling_y);
297 }
299 typedef void (*foreach_transformed_block_visitor)(int plane, int block,
300 BLOCK_SIZE plane_bsize,
301 TX_SIZE tx_size,
302 void *arg);
304 static INLINE void foreach_transformed_block_in_plane(
305 const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
306 foreach_transformed_block_visitor visit, void *arg) {
307 const struct macroblockd_plane *const pd = &xd->plane[plane];
308 const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi;
309 // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
310 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
311 // transform size varies per plane, look it up in a common way.
312 const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi)
313 : mbmi->tx_size;
314 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
315 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
316 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
317 const int step = 1 << (tx_size << 1);
318 int i;
320 // If mb_to_right_edge is < 0 we are in a situation in which
321 // the current block size extends into the UMV and we won't
322 // visit the sub blocks that are wholly within the UMV.
323 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
324 int r, c;
326 int max_blocks_wide = num_4x4_w;
327 int max_blocks_high = num_4x4_h;
329 // xd->mb_to_right_edge is in units of pixels * 8. This converts
330 // it to 4x4 block sizes.
331 if (xd->mb_to_right_edge < 0)
332 max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
334 if (xd->mb_to_bottom_edge < 0)
335 max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
337 i = 0;
338 // Unlike the normal case - in here we have to keep track of the
339 // row and column of the blocks we use so that we know if we are in
340 // the unrestricted motion border.
341 for (r = 0; r < num_4x4_h; r += (1 << tx_size)) {
342 for (c = 0; c < num_4x4_w; c += (1 << tx_size)) {
343 if (r < max_blocks_high && c < max_blocks_wide)
344 visit(plane, i, plane_bsize, tx_size, arg);
345 i += step;
346 }
347 }
348 } else {
349 for (i = 0; i < num_4x4_w * num_4x4_h; i += step)
350 visit(plane, i, plane_bsize, tx_size, arg);
351 }
352 }
354 static INLINE void foreach_transformed_block(
355 const MACROBLOCKD* const xd, BLOCK_SIZE bsize,
356 foreach_transformed_block_visitor visit, void *arg) {
357 int plane;
359 for (plane = 0; plane < MAX_MB_PLANE; plane++)
360 foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
361 }
363 static INLINE void foreach_transformed_block_uv(
364 const MACROBLOCKD* const xd, BLOCK_SIZE bsize,
365 foreach_transformed_block_visitor visit, void *arg) {
366 int plane;
368 for (plane = 1; plane < MAX_MB_PLANE; plane++)
369 foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
370 }
372 static int raster_block_offset(BLOCK_SIZE plane_bsize,
373 int raster_block, int stride) {
374 const int bw = b_width_log2(plane_bsize);
375 const int y = 4 * (raster_block >> bw);
376 const int x = 4 * (raster_block & ((1 << bw) - 1));
377 return y * stride + x;
378 }
379 static int16_t* raster_block_offset_int16(BLOCK_SIZE plane_bsize,
380 int raster_block, int16_t *base) {
381 const int stride = 4 << b_width_log2(plane_bsize);
382 return base + raster_block_offset(plane_bsize, raster_block, stride);
383 }
384 static uint8_t* raster_block_offset_uint8(BLOCK_SIZE plane_bsize,
385 int raster_block, uint8_t *base,
386 int stride) {
387 return base + raster_block_offset(plane_bsize, raster_block, stride);
388 }
390 static int txfrm_block_to_raster_block(BLOCK_SIZE plane_bsize,
391 TX_SIZE tx_size, int block) {
392 const int bwl = b_width_log2(plane_bsize);
393 const int tx_cols_log2 = bwl - tx_size;
394 const int tx_cols = 1 << tx_cols_log2;
395 const int raster_mb = block >> (tx_size << 1);
396 const int x = (raster_mb & (tx_cols - 1)) << tx_size;
397 const int y = (raster_mb >> tx_cols_log2) << tx_size;
398 return x + (y << bwl);
399 }
401 static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize,
402 TX_SIZE tx_size, int block,
403 int *x, int *y) {
404 const int bwl = b_width_log2(plane_bsize);
405 const int tx_cols_log2 = bwl - tx_size;
406 const int tx_cols = 1 << tx_cols_log2;
407 const int raster_mb = block >> (tx_size << 1);
408 *x = (raster_mb & (tx_cols - 1)) << tx_size;
409 *y = (raster_mb >> tx_cols_log2) << tx_size;
410 }
412 static void extend_for_intra(MACROBLOCKD *xd, BLOCK_SIZE plane_bsize,
413 int plane, int block, TX_SIZE tx_size) {
414 struct macroblockd_plane *const pd = &xd->plane[plane];
415 uint8_t *const buf = pd->dst.buf;
416 const int stride = pd->dst.stride;
418 int x, y;
419 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
420 x = x * 4 - 1;
421 y = y * 4 - 1;
422 // Copy a pixel into the umv if we are in a situation where the block size
423 // extends into the UMV.
424 // TODO(JBB): Should be able to do the full extend in place so we don't have
425 // to do this multiple times.
426 if (xd->mb_to_right_edge < 0) {
427 const int bw = 4 << b_width_log2(plane_bsize);
428 const int umv_border_start = bw + (xd->mb_to_right_edge >>
429 (3 + pd->subsampling_x));
431 if (x + bw > umv_border_start)
432 vpx_memset(&buf[y * stride + umv_border_start],
433 buf[y * stride + umv_border_start - 1], bw);
434 }
436 if (xd->mb_to_bottom_edge < 0) {
437 if (xd->left_available || x >= 0) {
438 const int bh = 4 << b_height_log2(plane_bsize);
439 const int umv_border_start =
440 bh + (xd->mb_to_bottom_edge >> (3 + pd->subsampling_y));
442 if (y + bh > umv_border_start) {
443 const uint8_t c = buf[(umv_border_start - 1) * stride + x];
444 uint8_t *d = &buf[umv_border_start * stride + x];
445 int i;
446 for (i = 0; i < bh; ++i, d += stride)
447 *d = c;
448 }
449 }
450 }
451 }
453 static void set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
454 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
455 int has_eob, int aoff, int loff) {
456 ENTROPY_CONTEXT *const a = pd->above_context + aoff;
457 ENTROPY_CONTEXT *const l = pd->left_context + loff;
458 const int tx_size_in_blocks = 1 << tx_size;
460 // above
461 if (has_eob && xd->mb_to_right_edge < 0) {
462 int i;
463 const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] +
464 (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
465 int above_contexts = tx_size_in_blocks;
466 if (above_contexts + aoff > blocks_wide)
467 above_contexts = blocks_wide - aoff;
469 for (i = 0; i < above_contexts; ++i)
470 a[i] = has_eob;
471 for (i = above_contexts; i < tx_size_in_blocks; ++i)
472 a[i] = 0;
473 } else {
474 vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
475 }
477 // left
478 if (has_eob && xd->mb_to_bottom_edge < 0) {
479 int i;
480 const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] +
481 (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
482 int left_contexts = tx_size_in_blocks;
483 if (left_contexts + loff > blocks_high)
484 left_contexts = blocks_high - loff;
486 for (i = 0; i < left_contexts; ++i)
487 l[i] = has_eob;
488 for (i = left_contexts; i < tx_size_in_blocks; ++i)
489 l[i] = 0;
490 } else {
491 vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
492 }
493 }
495 static int get_tx_eob(const struct segmentation *seg, int segment_id,
496 TX_SIZE tx_size) {
497 const int eob_max = 16 << (tx_size << 1);
498 return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
499 }
501 #endif // VP9_COMMON_VP9_BLOCKD_H_