Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
11 #include <assert.h>
13 #include "./vp9_rtcd.h"
14 #include "vpx_mem/vpx_mem.h"
15 #include "vpx_scale/vpx_scale.h"
17 #include "vp9/common/vp9_alloccommon.h"
18 #include "vp9/common/vp9_common.h"
19 #include "vp9/common/vp9_entropy.h"
20 #include "vp9/common/vp9_entropymode.h"
21 #include "vp9/common/vp9_extend.h"
22 #include "vp9/common/vp9_idct.h"
23 #include "vp9/common/vp9_pred_common.h"
24 #include "vp9/common/vp9_quant_common.h"
25 #include "vp9/common/vp9_reconintra.h"
26 #include "vp9/common/vp9_reconinter.h"
27 #include "vp9/common/vp9_seg_common.h"
28 #include "vp9/common/vp9_tile_common.h"
30 #include "vp9/decoder/vp9_dboolhuff.h"
31 #include "vp9/decoder/vp9_decodframe.h"
32 #include "vp9/decoder/vp9_detokenize.h"
33 #include "vp9/decoder/vp9_decodemv.h"
34 #include "vp9/decoder/vp9_dsubexp.h"
35 #include "vp9/decoder/vp9_onyxd_int.h"
36 #include "vp9/decoder/vp9_read_bit_buffer.h"
37 #include "vp9/decoder/vp9_thread.h"
38 #include "vp9/decoder/vp9_treereader.h"
40 typedef struct TileWorkerData {
41 VP9_COMMON *cm;
42 vp9_reader bit_reader;
43 DECLARE_ALIGNED(16, MACROBLOCKD, xd);
44 DECLARE_ALIGNED(16, unsigned char, token_cache[1024]);
45 DECLARE_ALIGNED(16, int16_t, qcoeff[MAX_MB_PLANE][64 * 64]);
46 DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]);
47 DECLARE_ALIGNED(16, uint16_t, eobs[MAX_MB_PLANE][256]);
48 } TileWorkerData;
50 static int read_be32(const uint8_t *p) {
51 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
52 }
54 static int is_compound_prediction_allowed(const VP9_COMMON *cm) {
55 int i;
56 for (i = 1; i < ALLOWED_REFS_PER_FRAME; ++i)
57 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
58 return 1;
60 return 0;
61 }
63 static void setup_compound_prediction(VP9_COMMON *cm) {
64 if (cm->ref_frame_sign_bias[LAST_FRAME] ==
65 cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
66 cm->comp_fixed_ref = ALTREF_FRAME;
67 cm->comp_var_ref[0] = LAST_FRAME;
68 cm->comp_var_ref[1] = GOLDEN_FRAME;
69 } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
70 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
71 cm->comp_fixed_ref = GOLDEN_FRAME;
72 cm->comp_var_ref[0] = LAST_FRAME;
73 cm->comp_var_ref[1] = ALTREF_FRAME;
74 } else {
75 cm->comp_fixed_ref = LAST_FRAME;
76 cm->comp_var_ref[0] = GOLDEN_FRAME;
77 cm->comp_var_ref[1] = ALTREF_FRAME;
78 }
79 }
81 // len == 0 is not allowed
82 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
83 return start + len > start && start + len <= end;
84 }
86 static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
87 const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
88 return data > max ? max : data;
89 }
91 static TX_MODE read_tx_mode(vp9_reader *r) {
92 TX_MODE tx_mode = vp9_read_literal(r, 2);
93 if (tx_mode == ALLOW_32X32)
94 tx_mode += vp9_read_bit(r);
95 return tx_mode;
96 }
98 static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) {
99 int i, j;
101 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
102 for (j = 0; j < TX_SIZES - 3; ++j)
103 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
105 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
106 for (j = 0; j < TX_SIZES - 2; ++j)
107 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
109 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
110 for (j = 0; j < TX_SIZES - 1; ++j)
111 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
112 }
114 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
115 int i, j;
116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
117 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
118 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
119 }
121 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
122 int i, j;
123 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
124 for (j = 0; j < INTER_MODES - 1; ++j)
125 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
126 }
128 static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
129 COMPPREDMODE_TYPE mode = vp9_read_bit(r);
130 if (mode)
131 mode += vp9_read_bit(r);
132 return mode;
133 }
135 static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) {
136 int i;
138 const int compound_allowed = is_compound_prediction_allowed(cm);
139 cm->comp_pred_mode = compound_allowed ? read_comp_pred_mode(r)
140 : SINGLE_PREDICTION_ONLY;
141 if (compound_allowed)
142 setup_compound_prediction(cm);
144 if (cm->comp_pred_mode == HYBRID_PREDICTION)
145 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
146 vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);
148 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
149 for (i = 0; i < REF_CONTEXTS; i++) {
150 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
151 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
152 }
154 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
155 for (i = 0; i < REF_CONTEXTS; i++)
156 vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
157 }
159 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) {
160 int i;
161 for (i = 0; i < n; ++i)
162 if (vp9_read(r, NMV_UPDATE_PROB))
163 p[i] = (vp9_read_literal(r, 7) << 1) | 1;
164 }
166 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) {
167 int i, j;
169 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
171 for (i = 0; i < 2; ++i) {
172 nmv_component *const comp_ctx = &ctx->comps[i];
173 update_mv_probs(&comp_ctx->sign, 1, r);
174 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
175 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
176 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
177 }
179 for (i = 0; i < 2; ++i) {
180 nmv_component *const comp_ctx = &ctx->comps[i];
181 for (j = 0; j < CLASS0_SIZE; ++j)
182 update_mv_probs(comp_ctx->class0_fp[j], 3, r);
183 update_mv_probs(comp_ctx->fp, 3, r);
184 }
186 if (allow_hp) {
187 for (i = 0; i < 2; ++i) {
188 nmv_component *const comp_ctx = &ctx->comps[i];
189 update_mv_probs(&comp_ctx->class0_hp, 1, r);
190 update_mv_probs(&comp_ctx->hp, 1, r);
191 }
192 }
193 }
195 static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
196 int i;
197 xd->plane[0].dequant = cm->y_dequant[q_index];
199 for (i = 1; i < MAX_MB_PLANE; i++)
200 xd->plane[i].dequant = cm->uv_dequant[q_index];
201 }
203 // Allocate storage for each tile column.
204 // TODO(jzern): when max_threads <= 1 the same storage could be used for each
205 // tile.
206 static void alloc_tile_storage(VP9D_COMP *pbi, int tile_rows, int tile_cols) {
207 VP9_COMMON *const cm = &pbi->common;
208 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
209 int i, tile_row, tile_col;
211 CHECK_MEM_ERROR(cm, pbi->mi_streams,
212 vpx_realloc(pbi->mi_streams, tile_rows * tile_cols *
213 sizeof(*pbi->mi_streams)));
214 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
215 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
216 TileInfo tile;
217 vp9_tile_init(&tile, cm, tile_row, tile_col);
218 pbi->mi_streams[tile_row * tile_cols + tile_col] =
219 &cm->mi[tile.mi_row_start * cm->mode_info_stride
220 + tile.mi_col_start];
221 }
222 }
224 // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
225 // block where mi unit size is 8x8.
226 CHECK_MEM_ERROR(cm, pbi->above_context[0],
227 vpx_realloc(pbi->above_context[0],
228 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
229 2 * aligned_mi_cols));
230 for (i = 1; i < MAX_MB_PLANE; ++i) {
231 pbi->above_context[i] = pbi->above_context[0] +
232 i * sizeof(*pbi->above_context[0]) *
233 2 * aligned_mi_cols;
234 }
236 // This is sized based on the entire frame. Each tile operates within its
237 // column bounds.
238 CHECK_MEM_ERROR(cm, pbi->above_seg_context,
239 vpx_realloc(pbi->above_seg_context,
240 sizeof(*pbi->above_seg_context) *
241 aligned_mi_cols));
242 }
244 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
245 TX_SIZE tx_size, int x, int y) {
246 struct macroblockd_plane *const pd = &xd->plane[plane];
247 const int eob = pd->eobs[block];
248 if (eob > 0) {
249 TX_TYPE tx_type;
250 const int plane_type = pd->plane_type;
251 const int stride = pd->dst.stride;
252 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
253 uint8_t *const dst = &pd->dst.buf[4 * y * stride + 4 * x];
255 switch (tx_size) {
256 case TX_4X4:
257 tx_type = get_tx_type_4x4(plane_type, xd, block);
258 if (tx_type == DCT_DCT)
259 xd->itxm_add(dqcoeff, dst, stride, eob);
260 else
261 vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type);
262 break;
263 case TX_8X8:
264 tx_type = get_tx_type_8x8(plane_type, xd);
265 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
266 break;
267 case TX_16X16:
268 tx_type = get_tx_type_16x16(plane_type, xd);
269 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
270 break;
271 case TX_32X32:
272 tx_type = DCT_DCT;
273 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
274 break;
275 default:
276 assert(!"Invalid transform size");
277 }
279 if (eob == 1) {
280 vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
281 } else {
282 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
283 vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
284 else if (tx_size == TX_32X32 && eob <= 34)
285 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
286 else
287 vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
288 }
289 }
290 }
292 struct intra_args {
293 VP9_COMMON *cm;
294 MACROBLOCKD *xd;
295 vp9_reader *r;
296 uint8_t *token_cache;
297 };
299 static void predict_and_reconstruct_intra_block(int plane, int block,
300 BLOCK_SIZE plane_bsize,
301 TX_SIZE tx_size, void *arg) {
302 struct intra_args *const args = arg;
303 VP9_COMMON *const cm = args->cm;
304 MACROBLOCKD *const xd = args->xd;
305 struct macroblockd_plane *const pd = &xd->plane[plane];
306 MODE_INFO *const mi = xd->mi_8x8[0];
307 const MB_PREDICTION_MODE mode = (plane == 0)
308 ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[block].as_mode
309 : mi->mbmi.mode)
310 : mi->mbmi.uv_mode;
311 int x, y;
312 uint8_t *dst;
313 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
314 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
316 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
317 extend_for_intra(xd, plane_bsize, plane, block, tx_size);
319 vp9_predict_intra_block(xd, block >> (tx_size << 1),
320 b_width_log2(plane_bsize), tx_size, mode,
321 dst, pd->dst.stride, dst, pd->dst.stride);
323 if (!mi->mbmi.skip_coeff) {
324 vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, tx_size,
325 args->r, args->token_cache);
326 inverse_transform_block(xd, plane, block, tx_size, x, y);
327 }
328 }
330 struct inter_args {
331 VP9_COMMON *cm;
332 MACROBLOCKD *xd;
333 vp9_reader *r;
334 int *eobtotal;
335 uint8_t *token_cache;
336 };
338 static void reconstruct_inter_block(int plane, int block,
339 BLOCK_SIZE plane_bsize,
340 TX_SIZE tx_size, void *arg) {
341 struct inter_args *args = arg;
342 VP9_COMMON *const cm = args->cm;
343 MACROBLOCKD *const xd = args->xd;
344 int x, y;
345 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
347 *args->eobtotal += vp9_decode_block_tokens(cm, xd, plane, block,
348 plane_bsize, x, y, tx_size,
349 args->r, args->token_cache);
350 inverse_transform_block(xd, plane, block, tx_size, x, y);
351 }
353 static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
354 const TileInfo *const tile,
355 BLOCK_SIZE bsize, int mi_row, int mi_col) {
356 const int bh = num_8x8_blocks_high_lookup[bsize];
357 const int bw = num_8x8_blocks_wide_lookup[bsize];
358 const int offset = mi_row * cm->mode_info_stride + mi_col;
359 const int tile_offset = tile->mi_row_start * cm->mode_info_stride +
360 tile->mi_col_start;
362 xd->mi_8x8 = cm->mi_grid_visible + offset;
363 xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;
365 // we are using the mode info context stream here
366 xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset;
367 xd->mi_8x8[0]->mbmi.sb_type = bsize;
369 // Special case: if prev_mi is NULL, the previous mode info context
370 // cannot be used.
371 xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
373 set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col);
375 // Distance of Mb to the various image edges. These are specified to 8th pel
376 // as they are always compared to values that are in 1/8th pel units
377 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
379 setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
380 }
382 static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
383 int idx, int mi_row, int mi_col) {
384 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
385 const int ref = mbmi->ref_frame[idx] - LAST_FRAME;
386 const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref);
387 const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref];
388 if (!vp9_is_valid_scale(sfc))
389 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
390 "Invalid scale factors");
392 xd->scale_factor[idx].sfc = sfc;
393 setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]);
394 xd->corrupted |= cfg->corrupted;
395 }
397 static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd,
398 const TileInfo *const tile,
399 int mi_row, int mi_col,
400 vp9_reader *r, BLOCK_SIZE bsize,
401 uint8_t *token_cache) {
402 const int less8x8 = bsize < BLOCK_8X8;
403 MB_MODE_INFO *mbmi;
405 set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
406 vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
408 if (less8x8)
409 bsize = BLOCK_8X8;
411 // Has to be called after set_offsets
412 mbmi = &xd->mi_8x8[0]->mbmi;
414 if (mbmi->skip_coeff) {
415 reset_skip_context(xd, bsize);
416 } else {
417 if (cm->seg.enabled)
418 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id,
419 cm->base_qindex));
420 }
422 if (!is_inter_block(mbmi)) {
423 struct intra_args arg = {
424 cm, xd, r, token_cache
425 };
426 foreach_transformed_block(xd, bsize, predict_and_reconstruct_intra_block,
427 &arg);
428 } else {
429 // Setup
430 set_ref(cm, xd, 0, mi_row, mi_col);
431 if (has_second_ref(mbmi))
432 set_ref(cm, xd, 1, mi_row, mi_col);
434 xd->subpix.filter_x = xd->subpix.filter_y =
435 vp9_get_filter_kernel(mbmi->interp_filter);
437 // Prediction
438 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
440 // Reconstruction
441 if (!mbmi->skip_coeff) {
442 int eobtotal = 0;
443 struct inter_args arg = {
444 cm, xd, r, &eobtotal, token_cache
445 };
446 foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
447 if (!less8x8 && eobtotal == 0)
448 mbmi->skip_coeff = 1; // skip loopfilter
449 }
450 }
452 xd->corrupted |= vp9_reader_has_error(r);
453 }
455 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs,
456 int mi_row, int mi_col, BLOCK_SIZE bsize,
457 vp9_reader *r) {
458 const int ctx = partition_plane_context(xd->above_seg_context,
459 xd->left_seg_context,
460 mi_row, mi_col, bsize);
461 const vp9_prob *const probs = get_partition_probs(cm, ctx);
462 const int has_rows = (mi_row + hbs) < cm->mi_rows;
463 const int has_cols = (mi_col + hbs) < cm->mi_cols;
464 PARTITION_TYPE p;
466 if (has_rows && has_cols)
467 p = treed_read(r, vp9_partition_tree, probs);
468 else if (!has_rows && has_cols)
469 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
470 else if (has_rows && !has_cols)
471 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
472 else
473 p = PARTITION_SPLIT;
475 if (!cm->frame_parallel_decoding_mode)
476 ++cm->counts.partition[ctx][p];
478 return p;
479 }
481 static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd,
482 const TileInfo *const tile,
483 int mi_row, int mi_col,
484 vp9_reader* r, BLOCK_SIZE bsize,
485 uint8_t *token_cache) {
486 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
487 PARTITION_TYPE partition;
488 BLOCK_SIZE subsize;
490 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
491 return;
493 partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r);
494 subsize = get_subsize(bsize, partition);
495 if (subsize < BLOCK_8X8) {
496 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
497 } else {
498 switch (partition) {
499 case PARTITION_NONE:
500 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
501 break;
502 case PARTITION_HORZ:
503 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
504 if (mi_row + hbs < cm->mi_rows)
505 decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize,
506 token_cache);
507 break;
508 case PARTITION_VERT:
509 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
510 if (mi_col + hbs < cm->mi_cols)
511 decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize,
512 token_cache);
513 break;
514 case PARTITION_SPLIT:
515 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize,
516 token_cache);
517 decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize,
518 token_cache);
519 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize,
520 token_cache);
521 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize,
522 token_cache);
523 break;
524 default:
525 assert(!"Invalid partition type");
526 }
527 }
529 // update partition context
530 if (bsize >= BLOCK_8X8 &&
531 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
532 update_partition_context(xd->above_seg_context, xd->left_seg_context,
533 mi_row, mi_col, subsize, bsize);
534 }
536 static void setup_token_decoder(const uint8_t *data,
537 const uint8_t *data_end,
538 size_t read_size,
539 struct vpx_internal_error_info *error_info,
540 vp9_reader *r) {
541 // Validate the calculated partition length. If the buffer
542 // described by the partition can't be fully read, then restrict
543 // it to the portion that can be (for EC mode) or throw an error.
544 if (!read_is_valid(data, read_size, data_end))
545 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
546 "Truncated packet or corrupt tile length");
548 if (vp9_reader_init(r, data, read_size))
549 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
550 "Failed to allocate bool decoder %d", 1);
551 }
553 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
554 vp9_reader *r) {
555 int i, j, k, l, m;
557 if (vp9_read_bit(r))
558 for (i = 0; i < BLOCK_TYPES; i++)
559 for (j = 0; j < REF_TYPES; j++)
560 for (k = 0; k < COEF_BANDS; k++)
561 for (l = 0; l < PREV_COEF_CONTEXTS; l++)
562 if (k > 0 || l < 3)
563 for (m = 0; m < UNCONSTRAINED_NODES; m++)
564 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
565 }
567 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
568 vp9_reader *r) {
569 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
570 TX_SIZE tx_size;
571 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
572 read_coef_probs_common(fc->coef_probs[tx_size], r);
573 }
575 static void setup_segmentation(struct segmentation *seg,
576 struct vp9_read_bit_buffer *rb) {
577 int i, j;
579 seg->update_map = 0;
580 seg->update_data = 0;
582 seg->enabled = vp9_rb_read_bit(rb);
583 if (!seg->enabled)
584 return;
586 // Segmentation map update
587 seg->update_map = vp9_rb_read_bit(rb);
588 if (seg->update_map) {
589 for (i = 0; i < SEG_TREE_PROBS; i++)
590 seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
591 : MAX_PROB;
593 seg->temporal_update = vp9_rb_read_bit(rb);
594 if (seg->temporal_update) {
595 for (i = 0; i < PREDICTION_PROBS; i++)
596 seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
597 : MAX_PROB;
598 } else {
599 for (i = 0; i < PREDICTION_PROBS; i++)
600 seg->pred_probs[i] = MAX_PROB;
601 }
602 }
604 // Segmentation data update
605 seg->update_data = vp9_rb_read_bit(rb);
606 if (seg->update_data) {
607 seg->abs_delta = vp9_rb_read_bit(rb);
609 vp9_clearall_segfeatures(seg);
611 for (i = 0; i < MAX_SEGMENTS; i++) {
612 for (j = 0; j < SEG_LVL_MAX; j++) {
613 int data = 0;
614 const int feature_enabled = vp9_rb_read_bit(rb);
615 if (feature_enabled) {
616 vp9_enable_segfeature(seg, i, j);
617 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
618 if (vp9_is_segfeature_signed(j))
619 data = vp9_rb_read_bit(rb) ? -data : data;
620 }
621 vp9_set_segdata(seg, i, j, data);
622 }
623 }
624 }
625 }
627 static void setup_loopfilter(struct loopfilter *lf,
628 struct vp9_read_bit_buffer *rb) {
629 lf->filter_level = vp9_rb_read_literal(rb, 6);
630 lf->sharpness_level = vp9_rb_read_literal(rb, 3);
632 // Read in loop filter deltas applied at the MB level based on mode or ref
633 // frame.
634 lf->mode_ref_delta_update = 0;
636 lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
637 if (lf->mode_ref_delta_enabled) {
638 lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
639 if (lf->mode_ref_delta_update) {
640 int i;
642 for (i = 0; i < MAX_REF_LF_DELTAS; i++)
643 if (vp9_rb_read_bit(rb))
644 lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
646 for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
647 if (vp9_rb_read_bit(rb))
648 lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
649 }
650 }
651 }
653 static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
654 const int old = *delta_q;
655 *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
656 return old != *delta_q;
657 }
659 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
660 struct vp9_read_bit_buffer *rb) {
661 int update = 0;
663 cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
664 update |= read_delta_q(rb, &cm->y_dc_delta_q);
665 update |= read_delta_q(rb, &cm->uv_dc_delta_q);
666 update |= read_delta_q(rb, &cm->uv_ac_delta_q);
667 if (update)
668 vp9_init_dequantizer(cm);
670 xd->lossless = cm->base_qindex == 0 &&
671 cm->y_dc_delta_q == 0 &&
672 cm->uv_dc_delta_q == 0 &&
673 cm->uv_ac_delta_q == 0;
675 xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
676 }
678 static INTERPOLATION_TYPE read_interp_filter_type(
679 struct vp9_read_bit_buffer *rb) {
680 const INTERPOLATION_TYPE literal_to_type[] = { EIGHTTAP_SMOOTH,
681 EIGHTTAP,
682 EIGHTTAP_SHARP,
683 BILINEAR };
684 return vp9_rb_read_bit(rb) ? SWITCHABLE
685 : literal_to_type[vp9_rb_read_literal(rb, 2)];
686 }
688 static void read_frame_size(struct vp9_read_bit_buffer *rb,
689 int *width, int *height) {
690 const int w = vp9_rb_read_literal(rb, 16) + 1;
691 const int h = vp9_rb_read_literal(rb, 16) + 1;
692 *width = w;
693 *height = h;
694 }
696 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
697 cm->display_width = cm->width;
698 cm->display_height = cm->height;
699 if (vp9_rb_read_bit(rb))
700 read_frame_size(rb, &cm->display_width, &cm->display_height);
701 }
703 static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
704 VP9_COMMON *cm = &pbi->common;
706 if (cm->width != width || cm->height != height) {
707 // Change in frame size.
708 if (cm->width == 0 || cm->height == 0) {
709 // Assign new frame buffer on first call.
710 cm->new_fb_idx = NUM_YV12_BUFFERS - 1;
711 cm->fb_idx_ref_cnt[cm->new_fb_idx] = 1;
712 }
714 // TODO(agrange) Don't test width/height, check overall size.
715 if (width > cm->width || height > cm->height) {
716 // Rescale frame buffers only if they're not big enough already.
717 if (vp9_resize_frame_buffers(cm, width, height))
718 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
719 "Failed to allocate frame buffers");
720 }
722 cm->width = width;
723 cm->height = height;
725 vp9_update_frame_size(cm);
726 }
728 vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
729 cm->subsampling_x, cm->subsampling_y,
730 VP9BORDERINPIXELS);
731 }
733 static void setup_frame_size(VP9D_COMP *pbi,
734 struct vp9_read_bit_buffer *rb) {
735 int width, height;
736 read_frame_size(rb, &width, &height);
737 apply_frame_size(pbi, width, height);
738 setup_display_size(&pbi->common, rb);
739 }
741 static void setup_frame_size_with_refs(VP9D_COMP *pbi,
742 struct vp9_read_bit_buffer *rb) {
743 VP9_COMMON *const cm = &pbi->common;
745 int width, height;
746 int found = 0, i;
747 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
748 if (vp9_rb_read_bit(rb)) {
749 YV12_BUFFER_CONFIG *const cfg = get_frame_ref_buffer(cm, i);
750 width = cfg->y_crop_width;
751 height = cfg->y_crop_height;
752 found = 1;
753 break;
754 }
755 }
757 if (!found)
758 read_frame_size(rb, &width, &height);
760 if (!width || !height)
761 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
762 "Referenced frame with invalid size");
764 apply_frame_size(pbi, width, height);
765 setup_display_size(cm, rb);
766 }
768 static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd,
769 int tile_row, int tile_col) {
770 int i;
771 const int tile_cols = 1 << pbi->common.log2_tile_cols;
772 xd->mi_stream = pbi->mi_streams[tile_row * tile_cols + tile_col];
774 for (i = 0; i < MAX_MB_PLANE; ++i) {
775 xd->above_context[i] = pbi->above_context[i];
776 }
777 // see note in alloc_tile_storage().
778 xd->above_seg_context = pbi->above_seg_context;
779 }
781 static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile,
782 vp9_reader *r) {
783 const int num_threads = pbi->oxcf.max_threads;
784 VP9_COMMON *const cm = &pbi->common;
785 int mi_row, mi_col;
786 MACROBLOCKD *xd = &pbi->mb;
788 if (pbi->do_loopfilter_inline) {
789 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
790 lf_data->frame_buffer = get_frame_new_buffer(cm);
791 lf_data->cm = cm;
792 lf_data->xd = pbi->mb;
793 lf_data->stop = 0;
794 lf_data->y_only = 0;
795 vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
796 }
798 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
799 mi_row += MI_BLOCK_SIZE) {
800 // For a SB there are 2 left contexts, each pertaining to a MB row within
801 vp9_zero(xd->left_context);
802 vp9_zero(xd->left_seg_context);
803 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
804 mi_col += MI_BLOCK_SIZE) {
805 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64,
806 pbi->token_cache);
807 }
809 if (pbi->do_loopfilter_inline) {
810 const int lf_start = mi_row - MI_BLOCK_SIZE;
811 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
813 // delay the loopfilter by 1 macroblock row.
814 if (lf_start < 0) continue;
816 // decoding has completed: finish up the loop filter in this thread.
817 if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue;
819 vp9_worker_sync(&pbi->lf_worker);
820 lf_data->start = lf_start;
821 lf_data->stop = mi_row;
822 if (num_threads > 1) {
823 vp9_worker_launch(&pbi->lf_worker);
824 } else {
825 vp9_worker_execute(&pbi->lf_worker);
826 }
827 }
828 }
830 if (pbi->do_loopfilter_inline) {
831 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
833 vp9_worker_sync(&pbi->lf_worker);
834 lf_data->start = lf_data->stop;
835 lf_data->stop = cm->mi_rows;
836 vp9_worker_execute(&pbi->lf_worker);
837 }
838 }
840 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
841 int min_log2_tile_cols, max_log2_tile_cols, max_ones;
842 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
844 // columns
845 max_ones = max_log2_tile_cols - min_log2_tile_cols;
846 cm->log2_tile_cols = min_log2_tile_cols;
847 while (max_ones-- && vp9_rb_read_bit(rb))
848 cm->log2_tile_cols++;
850 // rows
851 cm->log2_tile_rows = vp9_rb_read_bit(rb);
852 if (cm->log2_tile_rows)
853 cm->log2_tile_rows += vp9_rb_read_bit(rb);
854 }
856 // Reads the next tile returning its size and adjusting '*data' accordingly
857 // based on 'is_last'.
858 static size_t get_tile(const uint8_t *const data_end,
859 int is_last,
860 struct vpx_internal_error_info *error_info,
861 const uint8_t **data) {
862 size_t size;
864 if (!is_last) {
865 if (!read_is_valid(*data, 4, data_end))
866 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
867 "Truncated packet or corrupt tile length");
869 size = read_be32(*data);
870 *data += 4;
872 if (size > data_end - *data) {
873 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
874 "Truncated packet or corrupt tile size");
875 }
876 } else {
877 size = data_end - *data;
878 }
879 return size;
880 }
882 typedef struct TileBuffer {
883 const uint8_t *data;
884 size_t size;
885 } TileBuffer;
887 static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
888 VP9_COMMON *const cm = &pbi->common;
889 MACROBLOCKD *const xd = &pbi->mb;
890 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
891 const int tile_cols = 1 << cm->log2_tile_cols;
892 const int tile_rows = 1 << cm->log2_tile_rows;
893 TileBuffer tile_buffers[4][1 << 6];
894 int tile_row, tile_col;
895 const uint8_t *const data_end = pbi->source + pbi->source_sz;
896 const uint8_t *end = NULL;
897 vp9_reader r;
899 assert(tile_rows <= 4);
900 assert(tile_cols <= (1 << 6));
902 // Note: this memset assumes above_context[0], [1] and [2]
903 // are allocated as part of the same buffer.
904 vpx_memset(pbi->above_context[0], 0,
905 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 2 * aligned_cols);
907 vpx_memset(pbi->above_seg_context, 0,
908 sizeof(*pbi->above_seg_context) * aligned_cols);
910 // Load tile data into tile_buffers
911 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
912 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
913 const int last_tile = tile_row == tile_rows - 1 &&
914 tile_col == tile_cols - 1;
915 const size_t size = get_tile(data_end, last_tile, &cm->error, &data);
916 TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
917 buf->data = data;
918 buf->size = size;
919 data += size;
920 }
921 }
923 // Decode tiles using data from tile_buffers
924 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
925 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
926 const int col = pbi->oxcf.inv_tile_order ? tile_cols - tile_col - 1
927 : tile_col;
928 const int last_tile = tile_row == tile_rows - 1 &&
929 col == tile_cols - 1;
930 const TileBuffer *const buf = &tile_buffers[tile_row][col];
931 TileInfo tile;
933 vp9_tile_init(&tile, cm, tile_row, col);
934 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r);
935 setup_tile_context(pbi, xd, tile_row, col);
936 decode_tile(pbi, &tile, &r);
938 if (last_tile)
939 end = vp9_reader_find_end(&r);
940 }
941 }
943 return end;
944 }
946 static void setup_tile_macroblockd(TileWorkerData *const tile_data) {
947 MACROBLOCKD *xd = &tile_data->xd;
948 struct macroblockd_plane *const pd = xd->plane;
949 int i;
951 for (i = 0; i < MAX_MB_PLANE; ++i) {
952 pd[i].qcoeff = tile_data->qcoeff[i];
953 pd[i].dqcoeff = tile_data->dqcoeff[i];
954 pd[i].eobs = tile_data->eobs[i];
955 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
956 }
957 }
959 static int tile_worker_hook(void *arg1, void *arg2) {
960 TileWorkerData *const tile_data = (TileWorkerData*)arg1;
961 const TileInfo *const tile = (TileInfo*)arg2;
962 int mi_row, mi_col;
964 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
965 mi_row += MI_BLOCK_SIZE) {
966 vp9_zero(tile_data->xd.left_context);
967 vp9_zero(tile_data->xd.left_seg_context);
968 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
969 mi_col += MI_BLOCK_SIZE) {
970 decode_modes_sb(tile_data->cm, &tile_data->xd, tile,
971 mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64,
972 tile_data->token_cache);
973 }
974 }
975 return !tile_data->xd.corrupted;
976 }
978 static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) {
979 VP9_COMMON *const cm = &pbi->common;
980 const uint8_t *const data_end = pbi->source + pbi->source_sz;
981 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
982 const int tile_cols = 1 << cm->log2_tile_cols;
983 const int tile_rows = 1 << cm->log2_tile_rows;
984 const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols);
985 int tile_col = 0;
987 assert(tile_rows == 1);
988 (void)tile_rows;
990 if (num_workers > pbi->num_tile_workers) {
991 int i;
992 CHECK_MEM_ERROR(cm, pbi->tile_workers,
993 vpx_realloc(pbi->tile_workers,
994 num_workers * sizeof(*pbi->tile_workers)));
995 for (i = pbi->num_tile_workers; i < num_workers; ++i) {
996 VP9Worker *const worker = &pbi->tile_workers[i];
997 ++pbi->num_tile_workers;
999 vp9_worker_init(worker);
1000 worker->hook = (VP9WorkerHook)tile_worker_hook;
1001 CHECK_MEM_ERROR(cm, worker->data1,
1002 vpx_memalign(32, sizeof(TileWorkerData)));
1003 CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo)));
1004 if (i < num_workers - 1 && !vp9_worker_reset(worker)) {
1005 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1006 "Tile decoder thread creation failed");
1007 }
1008 }
1009 }
1011 // Note: this memset assumes above_context[0], [1] and [2]
1012 // are allocated as part of the same buffer.
1013 vpx_memset(pbi->above_context[0], 0,
1014 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
1015 2 * aligned_mi_cols);
1016 vpx_memset(pbi->above_seg_context, 0,
1017 sizeof(*pbi->above_seg_context) * aligned_mi_cols);
1019 while (tile_col < tile_cols) {
1020 int i;
1021 for (i = 0; i < num_workers && tile_col < tile_cols; ++i) {
1022 VP9Worker *const worker = &pbi->tile_workers[i];
1023 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
1024 TileInfo *const tile = (TileInfo*)worker->data2;
1025 const size_t size =
1026 get_tile(data_end, tile_col == tile_cols - 1, &cm->error, &data);
1028 tile_data->cm = cm;
1029 tile_data->xd = pbi->mb;
1030 tile_data->xd.corrupted = 0;
1031 vp9_tile_init(tile, tile_data->cm, 0, tile_col);
1033 setup_token_decoder(data, data_end, size, &cm->error,
1034 &tile_data->bit_reader);
1035 setup_tile_context(pbi, &tile_data->xd, 0, tile_col);
1036 setup_tile_macroblockd(tile_data);
1038 worker->had_error = 0;
1039 if (i == num_workers - 1 || tile_col == tile_cols - 1) {
1040 vp9_worker_execute(worker);
1041 } else {
1042 vp9_worker_launch(worker);
1043 }
1045 data += size;
1046 ++tile_col;
1047 }
1049 for (; i > 0; --i) {
1050 VP9Worker *const worker = &pbi->tile_workers[i - 1];
1051 pbi->mb.corrupted |= !vp9_worker_sync(worker);
1052 }
1053 }
1055 {
1056 const int final_worker = (tile_cols + num_workers - 1) % num_workers;
1057 TileWorkerData *const tile_data =
1058 (TileWorkerData*)pbi->tile_workers[final_worker].data1;
1059 return vp9_reader_find_end(&tile_data->bit_reader);
1060 }
1061 }
1063 static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1064 if (vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_0 ||
1065 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_1 ||
1066 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_2) {
1067 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1068 "Invalid frame sync code");
1069 }
1070 }
1072 static void error_handler(void *data, size_t bit_offset) {
1073 VP9_COMMON *const cm = (VP9_COMMON *)data;
1074 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
1075 }
1077 #define RESERVED \
1078 if (vp9_rb_read_bit(rb)) \
1079 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \
1080 "Reserved bit must be unset")
1082 static size_t read_uncompressed_header(VP9D_COMP *pbi,
1083 struct vp9_read_bit_buffer *rb) {
1084 VP9_COMMON *const cm = &pbi->common;
1085 size_t sz;
1086 int i;
1088 cm->last_frame_type = cm->frame_type;
1090 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
1091 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1092 "Invalid frame marker");
1094 cm->version = vp9_rb_read_bit(rb);
1095 RESERVED;
1097 if (vp9_rb_read_bit(rb)) {
1098 // show an existing frame directly
1099 int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
1100 ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show);
1101 pbi->refresh_frame_flags = 0;
1102 cm->lf.filter_level = 0;
1103 return 0;
1104 }
1106 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
1107 cm->show_frame = vp9_rb_read_bit(rb);
1108 cm->error_resilient_mode = vp9_rb_read_bit(rb);
1110 if (cm->frame_type == KEY_FRAME) {
1111 check_sync_code(cm, rb);
1113 cm->color_space = vp9_rb_read_literal(rb, 3); // colorspace
1114 if (cm->color_space != SRGB) {
1115 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
1116 if (cm->version == 1) {
1117 cm->subsampling_x = vp9_rb_read_bit(rb);
1118 cm->subsampling_y = vp9_rb_read_bit(rb);
1119 vp9_rb_read_bit(rb); // has extra plane
1120 } else {
1121 cm->subsampling_y = cm->subsampling_x = 1;
1122 }
1123 } else {
1124 if (cm->version == 1) {
1125 cm->subsampling_y = cm->subsampling_x = 0;
1126 vp9_rb_read_bit(rb); // has extra plane
1127 } else {
1128 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1129 "RGB not supported in profile 0");
1130 }
1131 }
1133 pbi->refresh_frame_flags = (1 << NUM_REF_FRAMES) - 1;
1135 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
1136 cm->active_ref_idx[i] = cm->new_fb_idx;
1138 setup_frame_size(pbi, rb);
1139 } else {
1140 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
1142 cm->reset_frame_context = cm->error_resilient_mode ?
1143 0 : vp9_rb_read_literal(rb, 2);
1145 if (cm->intra_only) {
1146 check_sync_code(cm, rb);
1148 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
1149 setup_frame_size(pbi, rb);
1150 } else {
1151 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
1153 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
1154 const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LOG2);
1155 cm->active_ref_idx[i] = cm->ref_frame_map[ref];
1156 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
1157 }
1159 setup_frame_size_with_refs(pbi, rb);
1161 cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
1162 cm->mcomp_filter_type = read_interp_filter_type(rb);
1164 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
1165 vp9_setup_scale_factors(cm, i);
1166 }
1167 }
1169 if (!cm->error_resilient_mode) {
1170 cm->refresh_frame_context = vp9_rb_read_bit(rb);
1171 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
1172 } else {
1173 cm->refresh_frame_context = 0;
1174 cm->frame_parallel_decoding_mode = 1;
1175 }
1177 // This flag will be overridden by the call to vp9_setup_past_independence
1178 // below, forcing the use of context 0 for those frame types.
1179 cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LOG2);
1181 if (frame_is_intra_only(cm) || cm->error_resilient_mode)
1182 vp9_setup_past_independence(cm);
1184 setup_loopfilter(&cm->lf, rb);
1185 setup_quantization(cm, &pbi->mb, rb);
1186 setup_segmentation(&cm->seg, rb);
1188 setup_tile_info(cm, rb);
1189 sz = vp9_rb_read_literal(rb, 16);
1191 if (sz == 0)
1192 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1193 "Invalid header size");
1195 return sz;
1196 }
1198 static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data,
1199 size_t partition_size) {
1200 VP9_COMMON *const cm = &pbi->common;
1201 MACROBLOCKD *const xd = &pbi->mb;
1202 FRAME_CONTEXT *const fc = &cm->fc;
1203 vp9_reader r;
1204 int k;
1206 if (vp9_reader_init(&r, data, partition_size))
1207 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1208 "Failed to allocate bool decoder 0");
1210 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
1211 if (cm->tx_mode == TX_MODE_SELECT)
1212 read_tx_probs(&fc->tx_probs, &r);
1213 read_coef_probs(fc, cm->tx_mode, &r);
1215 for (k = 0; k < MBSKIP_CONTEXTS; ++k)
1216 vp9_diff_update_prob(&r, &fc->mbskip_probs[k]);
1218 if (!frame_is_intra_only(cm)) {
1219 nmv_context *const nmvc = &fc->nmvc;
1220 int i, j;
1222 read_inter_mode_probs(fc, &r);
1224 if (cm->mcomp_filter_type == SWITCHABLE)
1225 read_switchable_interp_probs(fc, &r);
1227 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1228 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
1230 read_comp_pred(cm, &r);
1232 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
1233 for (i = 0; i < INTRA_MODES - 1; ++i)
1234 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
1236 for (j = 0; j < PARTITION_CONTEXTS; ++j)
1237 for (i = 0; i < PARTITION_TYPES - 1; ++i)
1238 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]);
1240 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
1241 }
1243 return vp9_reader_has_error(&r);
1244 }
1246 void vp9_init_dequantizer(VP9_COMMON *cm) {
1247 int q;
1249 for (q = 0; q < QINDEX_RANGE; q++) {
1250 cm->y_dequant[q][0] = vp9_dc_quant(q, cm->y_dc_delta_q);
1251 cm->y_dequant[q][1] = vp9_ac_quant(q, 0);
1253 cm->uv_dequant[q][0] = vp9_dc_quant(q, cm->uv_dc_delta_q);
1254 cm->uv_dequant[q][1] = vp9_ac_quant(q, cm->uv_ac_delta_q);
1255 }
1256 }
1258 #ifdef NDEBUG
1259 #define debug_check_frame_counts(cm) (void)0
1260 #else // !NDEBUG
1261 // Counts should only be incremented when frame_parallel_decoding_mode and
1262 // error_resilient_mode are disabled.
1263 static void debug_check_frame_counts(const VP9_COMMON *const cm) {
1264 FRAME_COUNTS zero_counts;
1265 vp9_zero(zero_counts);
1266 assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode);
1267 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
1268 sizeof(cm->counts.y_mode)));
1269 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
1270 sizeof(cm->counts.uv_mode)));
1271 assert(!memcmp(cm->counts.partition, zero_counts.partition,
1272 sizeof(cm->counts.partition)));
1273 assert(!memcmp(cm->counts.coef, zero_counts.coef,
1274 sizeof(cm->counts.coef)));
1275 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
1276 sizeof(cm->counts.eob_branch)));
1277 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
1278 sizeof(cm->counts.switchable_interp)));
1279 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode,
1280 sizeof(cm->counts.inter_mode)));
1281 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
1282 sizeof(cm->counts.intra_inter)));
1283 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
1284 sizeof(cm->counts.comp_inter)));
1285 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
1286 sizeof(cm->counts.single_ref)));
1287 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
1288 sizeof(cm->counts.comp_ref)));
1289 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
1290 assert(!memcmp(cm->counts.mbskip, zero_counts.mbskip,
1291 sizeof(cm->counts.mbskip)));
1292 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
1293 }
1294 #endif // NDEBUG
1296 int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
1297 int i;
1298 VP9_COMMON *const cm = &pbi->common;
1299 MACROBLOCKD *const xd = &pbi->mb;
1301 const uint8_t *data = pbi->source;
1302 const uint8_t *const data_end = pbi->source + pbi->source_sz;
1304 struct vp9_read_bit_buffer rb = { data, data_end, 0, cm, error_handler };
1305 const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
1306 const int keyframe = cm->frame_type == KEY_FRAME;
1307 const int tile_rows = 1 << cm->log2_tile_rows;
1308 const int tile_cols = 1 << cm->log2_tile_cols;
1309 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
1311 if (!first_partition_size) {
1312 // showing a frame directly
1313 *p_data_end = data + 1;
1314 return 0;
1315 }
1317 if (!pbi->decoded_key_frame && !keyframe)
1318 return -1;
1320 data += vp9_rb_bytes_read(&rb);
1321 if (!read_is_valid(data, first_partition_size, data_end))
1322 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1323 "Truncated packet or corrupt header length");
1325 pbi->do_loopfilter_inline =
1326 (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level;
1327 if (pbi->do_loopfilter_inline && pbi->lf_worker.data1 == NULL) {
1328 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, vpx_malloc(sizeof(LFWorkerData)));
1329 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
1330 if (pbi->oxcf.max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) {
1331 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1332 "Loop filter thread creation failed");
1333 }
1334 }
1336 alloc_tile_storage(pbi, tile_rows, tile_cols);
1338 xd->mode_info_stride = cm->mode_info_stride;
1339 set_prev_mi(cm);
1341 setup_plane_dequants(cm, xd, cm->base_qindex);
1342 setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y);
1344 cm->fc = cm->frame_contexts[cm->frame_context_idx];
1345 vp9_zero(cm->counts);
1346 for (i = 0; i < MAX_MB_PLANE; ++i)
1347 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
1349 xd->corrupted = 0;
1350 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
1352 // TODO(jzern): remove frame_parallel_decoding_mode restriction for
1353 // single-frame tile decoding.
1354 if (pbi->oxcf.max_threads > 1 && tile_rows == 1 && tile_cols > 1 &&
1355 cm->frame_parallel_decoding_mode) {
1356 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size);
1357 } else {
1358 *p_data_end = decode_tiles(pbi, data + first_partition_size);
1359 }
1361 cm->last_width = cm->width;
1362 cm->last_height = cm->height;
1364 new_fb->corrupted |= xd->corrupted;
1366 if (!pbi->decoded_key_frame) {
1367 if (keyframe && !new_fb->corrupted)
1368 pbi->decoded_key_frame = 1;
1369 else
1370 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1371 "A stream must start with a complete key frame");
1372 }
1374 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
1375 vp9_adapt_coef_probs(cm);
1377 if (!frame_is_intra_only(cm)) {
1378 vp9_adapt_mode_probs(cm);
1379 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
1380 }
1381 } else {
1382 debug_check_frame_counts(cm);
1383 }
1385 if (cm->refresh_frame_context)
1386 cm->frame_contexts[cm->frame_context_idx] = cm->fc;
1388 return 0;
1389 }