media/libvpx/vp9/decoder/vp9_decodframe.c

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rw-r--r--

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /*
michael@0 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license
michael@0 5 * that can be found in the LICENSE file in the root of the source
michael@0 6 * tree. An additional intellectual property rights grant can be found
michael@0 7 * in the file PATENTS. All contributing project authors may
michael@0 8 * be found in the AUTHORS file in the root of the source tree.
michael@0 9 */
michael@0 10
michael@0 11 #include <assert.h>
michael@0 12
michael@0 13 #include "./vp9_rtcd.h"
michael@0 14 #include "vpx_mem/vpx_mem.h"
michael@0 15 #include "vpx_scale/vpx_scale.h"
michael@0 16
michael@0 17 #include "vp9/common/vp9_alloccommon.h"
michael@0 18 #include "vp9/common/vp9_common.h"
michael@0 19 #include "vp9/common/vp9_entropy.h"
michael@0 20 #include "vp9/common/vp9_entropymode.h"
michael@0 21 #include "vp9/common/vp9_extend.h"
michael@0 22 #include "vp9/common/vp9_idct.h"
michael@0 23 #include "vp9/common/vp9_pred_common.h"
michael@0 24 #include "vp9/common/vp9_quant_common.h"
michael@0 25 #include "vp9/common/vp9_reconintra.h"
michael@0 26 #include "vp9/common/vp9_reconinter.h"
michael@0 27 #include "vp9/common/vp9_seg_common.h"
michael@0 28 #include "vp9/common/vp9_tile_common.h"
michael@0 29
michael@0 30 #include "vp9/decoder/vp9_dboolhuff.h"
michael@0 31 #include "vp9/decoder/vp9_decodframe.h"
michael@0 32 #include "vp9/decoder/vp9_detokenize.h"
michael@0 33 #include "vp9/decoder/vp9_decodemv.h"
michael@0 34 #include "vp9/decoder/vp9_dsubexp.h"
michael@0 35 #include "vp9/decoder/vp9_onyxd_int.h"
michael@0 36 #include "vp9/decoder/vp9_read_bit_buffer.h"
michael@0 37 #include "vp9/decoder/vp9_thread.h"
michael@0 38 #include "vp9/decoder/vp9_treereader.h"
michael@0 39
michael@0 40 typedef struct TileWorkerData {
michael@0 41 VP9_COMMON *cm;
michael@0 42 vp9_reader bit_reader;
michael@0 43 DECLARE_ALIGNED(16, MACROBLOCKD, xd);
michael@0 44 DECLARE_ALIGNED(16, unsigned char, token_cache[1024]);
michael@0 45 DECLARE_ALIGNED(16, int16_t, qcoeff[MAX_MB_PLANE][64 * 64]);
michael@0 46 DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]);
michael@0 47 DECLARE_ALIGNED(16, uint16_t, eobs[MAX_MB_PLANE][256]);
michael@0 48 } TileWorkerData;
michael@0 49
michael@0 50 static int read_be32(const uint8_t *p) {
michael@0 51 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
michael@0 52 }
michael@0 53
michael@0 54 static int is_compound_prediction_allowed(const VP9_COMMON *cm) {
michael@0 55 int i;
michael@0 56 for (i = 1; i < ALLOWED_REFS_PER_FRAME; ++i)
michael@0 57 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
michael@0 58 return 1;
michael@0 59
michael@0 60 return 0;
michael@0 61 }
michael@0 62
michael@0 63 static void setup_compound_prediction(VP9_COMMON *cm) {
michael@0 64 if (cm->ref_frame_sign_bias[LAST_FRAME] ==
michael@0 65 cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
michael@0 66 cm->comp_fixed_ref = ALTREF_FRAME;
michael@0 67 cm->comp_var_ref[0] = LAST_FRAME;
michael@0 68 cm->comp_var_ref[1] = GOLDEN_FRAME;
michael@0 69 } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
michael@0 70 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
michael@0 71 cm->comp_fixed_ref = GOLDEN_FRAME;
michael@0 72 cm->comp_var_ref[0] = LAST_FRAME;
michael@0 73 cm->comp_var_ref[1] = ALTREF_FRAME;
michael@0 74 } else {
michael@0 75 cm->comp_fixed_ref = LAST_FRAME;
michael@0 76 cm->comp_var_ref[0] = GOLDEN_FRAME;
michael@0 77 cm->comp_var_ref[1] = ALTREF_FRAME;
michael@0 78 }
michael@0 79 }
michael@0 80
michael@0 81 // len == 0 is not allowed
michael@0 82 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
michael@0 83 return start + len > start && start + len <= end;
michael@0 84 }
michael@0 85
michael@0 86 static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
michael@0 87 const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
michael@0 88 return data > max ? max : data;
michael@0 89 }
michael@0 90
michael@0 91 static TX_MODE read_tx_mode(vp9_reader *r) {
michael@0 92 TX_MODE tx_mode = vp9_read_literal(r, 2);
michael@0 93 if (tx_mode == ALLOW_32X32)
michael@0 94 tx_mode += vp9_read_bit(r);
michael@0 95 return tx_mode;
michael@0 96 }
michael@0 97
michael@0 98 static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) {
michael@0 99 int i, j;
michael@0 100
michael@0 101 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
michael@0 102 for (j = 0; j < TX_SIZES - 3; ++j)
michael@0 103 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
michael@0 104
michael@0 105 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
michael@0 106 for (j = 0; j < TX_SIZES - 2; ++j)
michael@0 107 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
michael@0 108
michael@0 109 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
michael@0 110 for (j = 0; j < TX_SIZES - 1; ++j)
michael@0 111 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
michael@0 112 }
michael@0 113
michael@0 114 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
michael@0 115 int i, j;
michael@0 116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
michael@0 117 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
michael@0 118 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
michael@0 119 }
michael@0 120
michael@0 121 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
michael@0 122 int i, j;
michael@0 123 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
michael@0 124 for (j = 0; j < INTER_MODES - 1; ++j)
michael@0 125 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
michael@0 126 }
michael@0 127
michael@0 128 static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
michael@0 129 COMPPREDMODE_TYPE mode = vp9_read_bit(r);
michael@0 130 if (mode)
michael@0 131 mode += vp9_read_bit(r);
michael@0 132 return mode;
michael@0 133 }
michael@0 134
michael@0 135 static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) {
michael@0 136 int i;
michael@0 137
michael@0 138 const int compound_allowed = is_compound_prediction_allowed(cm);
michael@0 139 cm->comp_pred_mode = compound_allowed ? read_comp_pred_mode(r)
michael@0 140 : SINGLE_PREDICTION_ONLY;
michael@0 141 if (compound_allowed)
michael@0 142 setup_compound_prediction(cm);
michael@0 143
michael@0 144 if (cm->comp_pred_mode == HYBRID_PREDICTION)
michael@0 145 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
michael@0 146 vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);
michael@0 147
michael@0 148 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
michael@0 149 for (i = 0; i < REF_CONTEXTS; i++) {
michael@0 150 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
michael@0 151 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
michael@0 152 }
michael@0 153
michael@0 154 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
michael@0 155 for (i = 0; i < REF_CONTEXTS; i++)
michael@0 156 vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
michael@0 157 }
michael@0 158
michael@0 159 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) {
michael@0 160 int i;
michael@0 161 for (i = 0; i < n; ++i)
michael@0 162 if (vp9_read(r, NMV_UPDATE_PROB))
michael@0 163 p[i] = (vp9_read_literal(r, 7) << 1) | 1;
michael@0 164 }
michael@0 165
michael@0 166 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) {
michael@0 167 int i, j;
michael@0 168
michael@0 169 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
michael@0 170
michael@0 171 for (i = 0; i < 2; ++i) {
michael@0 172 nmv_component *const comp_ctx = &ctx->comps[i];
michael@0 173 update_mv_probs(&comp_ctx->sign, 1, r);
michael@0 174 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
michael@0 175 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
michael@0 176 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
michael@0 177 }
michael@0 178
michael@0 179 for (i = 0; i < 2; ++i) {
michael@0 180 nmv_component *const comp_ctx = &ctx->comps[i];
michael@0 181 for (j = 0; j < CLASS0_SIZE; ++j)
michael@0 182 update_mv_probs(comp_ctx->class0_fp[j], 3, r);
michael@0 183 update_mv_probs(comp_ctx->fp, 3, r);
michael@0 184 }
michael@0 185
michael@0 186 if (allow_hp) {
michael@0 187 for (i = 0; i < 2; ++i) {
michael@0 188 nmv_component *const comp_ctx = &ctx->comps[i];
michael@0 189 update_mv_probs(&comp_ctx->class0_hp, 1, r);
michael@0 190 update_mv_probs(&comp_ctx->hp, 1, r);
michael@0 191 }
michael@0 192 }
michael@0 193 }
michael@0 194
michael@0 195 static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
michael@0 196 int i;
michael@0 197 xd->plane[0].dequant = cm->y_dequant[q_index];
michael@0 198
michael@0 199 for (i = 1; i < MAX_MB_PLANE; i++)
michael@0 200 xd->plane[i].dequant = cm->uv_dequant[q_index];
michael@0 201 }
michael@0 202
michael@0 203 // Allocate storage for each tile column.
michael@0 204 // TODO(jzern): when max_threads <= 1 the same storage could be used for each
michael@0 205 // tile.
michael@0 206 static void alloc_tile_storage(VP9D_COMP *pbi, int tile_rows, int tile_cols) {
michael@0 207 VP9_COMMON *const cm = &pbi->common;
michael@0 208 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
michael@0 209 int i, tile_row, tile_col;
michael@0 210
michael@0 211 CHECK_MEM_ERROR(cm, pbi->mi_streams,
michael@0 212 vpx_realloc(pbi->mi_streams, tile_rows * tile_cols *
michael@0 213 sizeof(*pbi->mi_streams)));
michael@0 214 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
michael@0 215 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
michael@0 216 TileInfo tile;
michael@0 217 vp9_tile_init(&tile, cm, tile_row, tile_col);
michael@0 218 pbi->mi_streams[tile_row * tile_cols + tile_col] =
michael@0 219 &cm->mi[tile.mi_row_start * cm->mode_info_stride
michael@0 220 + tile.mi_col_start];
michael@0 221 }
michael@0 222 }
michael@0 223
michael@0 224 // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
michael@0 225 // block where mi unit size is 8x8.
michael@0 226 CHECK_MEM_ERROR(cm, pbi->above_context[0],
michael@0 227 vpx_realloc(pbi->above_context[0],
michael@0 228 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
michael@0 229 2 * aligned_mi_cols));
michael@0 230 for (i = 1; i < MAX_MB_PLANE; ++i) {
michael@0 231 pbi->above_context[i] = pbi->above_context[0] +
michael@0 232 i * sizeof(*pbi->above_context[0]) *
michael@0 233 2 * aligned_mi_cols;
michael@0 234 }
michael@0 235
michael@0 236 // This is sized based on the entire frame. Each tile operates within its
michael@0 237 // column bounds.
michael@0 238 CHECK_MEM_ERROR(cm, pbi->above_seg_context,
michael@0 239 vpx_realloc(pbi->above_seg_context,
michael@0 240 sizeof(*pbi->above_seg_context) *
michael@0 241 aligned_mi_cols));
michael@0 242 }
michael@0 243
michael@0 244 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
michael@0 245 TX_SIZE tx_size, int x, int y) {
michael@0 246 struct macroblockd_plane *const pd = &xd->plane[plane];
michael@0 247 const int eob = pd->eobs[block];
michael@0 248 if (eob > 0) {
michael@0 249 TX_TYPE tx_type;
michael@0 250 const int plane_type = pd->plane_type;
michael@0 251 const int stride = pd->dst.stride;
michael@0 252 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
michael@0 253 uint8_t *const dst = &pd->dst.buf[4 * y * stride + 4 * x];
michael@0 254
michael@0 255 switch (tx_size) {
michael@0 256 case TX_4X4:
michael@0 257 tx_type = get_tx_type_4x4(plane_type, xd, block);
michael@0 258 if (tx_type == DCT_DCT)
michael@0 259 xd->itxm_add(dqcoeff, dst, stride, eob);
michael@0 260 else
michael@0 261 vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type);
michael@0 262 break;
michael@0 263 case TX_8X8:
michael@0 264 tx_type = get_tx_type_8x8(plane_type, xd);
michael@0 265 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
michael@0 266 break;
michael@0 267 case TX_16X16:
michael@0 268 tx_type = get_tx_type_16x16(plane_type, xd);
michael@0 269 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
michael@0 270 break;
michael@0 271 case TX_32X32:
michael@0 272 tx_type = DCT_DCT;
michael@0 273 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
michael@0 274 break;
michael@0 275 default:
michael@0 276 assert(!"Invalid transform size");
michael@0 277 }
michael@0 278
michael@0 279 if (eob == 1) {
michael@0 280 vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
michael@0 281 } else {
michael@0 282 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
michael@0 283 vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
michael@0 284 else if (tx_size == TX_32X32 && eob <= 34)
michael@0 285 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
michael@0 286 else
michael@0 287 vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
michael@0 288 }
michael@0 289 }
michael@0 290 }
michael@0 291
michael@0 292 struct intra_args {
michael@0 293 VP9_COMMON *cm;
michael@0 294 MACROBLOCKD *xd;
michael@0 295 vp9_reader *r;
michael@0 296 uint8_t *token_cache;
michael@0 297 };
michael@0 298
michael@0 299 static void predict_and_reconstruct_intra_block(int plane, int block,
michael@0 300 BLOCK_SIZE plane_bsize,
michael@0 301 TX_SIZE tx_size, void *arg) {
michael@0 302 struct intra_args *const args = arg;
michael@0 303 VP9_COMMON *const cm = args->cm;
michael@0 304 MACROBLOCKD *const xd = args->xd;
michael@0 305 struct macroblockd_plane *const pd = &xd->plane[plane];
michael@0 306 MODE_INFO *const mi = xd->mi_8x8[0];
michael@0 307 const MB_PREDICTION_MODE mode = (plane == 0)
michael@0 308 ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[block].as_mode
michael@0 309 : mi->mbmi.mode)
michael@0 310 : mi->mbmi.uv_mode;
michael@0 311 int x, y;
michael@0 312 uint8_t *dst;
michael@0 313 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
michael@0 314 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
michael@0 315
michael@0 316 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
michael@0 317 extend_for_intra(xd, plane_bsize, plane, block, tx_size);
michael@0 318
michael@0 319 vp9_predict_intra_block(xd, block >> (tx_size << 1),
michael@0 320 b_width_log2(plane_bsize), tx_size, mode,
michael@0 321 dst, pd->dst.stride, dst, pd->dst.stride);
michael@0 322
michael@0 323 if (!mi->mbmi.skip_coeff) {
michael@0 324 vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, tx_size,
michael@0 325 args->r, args->token_cache);
michael@0 326 inverse_transform_block(xd, plane, block, tx_size, x, y);
michael@0 327 }
michael@0 328 }
michael@0 329
michael@0 330 struct inter_args {
michael@0 331 VP9_COMMON *cm;
michael@0 332 MACROBLOCKD *xd;
michael@0 333 vp9_reader *r;
michael@0 334 int *eobtotal;
michael@0 335 uint8_t *token_cache;
michael@0 336 };
michael@0 337
michael@0 338 static void reconstruct_inter_block(int plane, int block,
michael@0 339 BLOCK_SIZE plane_bsize,
michael@0 340 TX_SIZE tx_size, void *arg) {
michael@0 341 struct inter_args *args = arg;
michael@0 342 VP9_COMMON *const cm = args->cm;
michael@0 343 MACROBLOCKD *const xd = args->xd;
michael@0 344 int x, y;
michael@0 345 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
michael@0 346
michael@0 347 *args->eobtotal += vp9_decode_block_tokens(cm, xd, plane, block,
michael@0 348 plane_bsize, x, y, tx_size,
michael@0 349 args->r, args->token_cache);
michael@0 350 inverse_transform_block(xd, plane, block, tx_size, x, y);
michael@0 351 }
michael@0 352
michael@0 353 static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
michael@0 354 const TileInfo *const tile,
michael@0 355 BLOCK_SIZE bsize, int mi_row, int mi_col) {
michael@0 356 const int bh = num_8x8_blocks_high_lookup[bsize];
michael@0 357 const int bw = num_8x8_blocks_wide_lookup[bsize];
michael@0 358 const int offset = mi_row * cm->mode_info_stride + mi_col;
michael@0 359 const int tile_offset = tile->mi_row_start * cm->mode_info_stride +
michael@0 360 tile->mi_col_start;
michael@0 361
michael@0 362 xd->mi_8x8 = cm->mi_grid_visible + offset;
michael@0 363 xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;
michael@0 364
michael@0 365 // we are using the mode info context stream here
michael@0 366 xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset;
michael@0 367 xd->mi_8x8[0]->mbmi.sb_type = bsize;
michael@0 368
michael@0 369 // Special case: if prev_mi is NULL, the previous mode info context
michael@0 370 // cannot be used.
michael@0 371 xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
michael@0 372
michael@0 373 set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col);
michael@0 374
michael@0 375 // Distance of Mb to the various image edges. These are specified to 8th pel
michael@0 376 // as they are always compared to values that are in 1/8th pel units
michael@0 377 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
michael@0 378
michael@0 379 setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
michael@0 380 }
michael@0 381
michael@0 382 static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
michael@0 383 int idx, int mi_row, int mi_col) {
michael@0 384 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
michael@0 385 const int ref = mbmi->ref_frame[idx] - LAST_FRAME;
michael@0 386 const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref);
michael@0 387 const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref];
michael@0 388 if (!vp9_is_valid_scale(sfc))
michael@0 389 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
michael@0 390 "Invalid scale factors");
michael@0 391
michael@0 392 xd->scale_factor[idx].sfc = sfc;
michael@0 393 setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]);
michael@0 394 xd->corrupted |= cfg->corrupted;
michael@0 395 }
michael@0 396
michael@0 397 static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd,
michael@0 398 const TileInfo *const tile,
michael@0 399 int mi_row, int mi_col,
michael@0 400 vp9_reader *r, BLOCK_SIZE bsize,
michael@0 401 uint8_t *token_cache) {
michael@0 402 const int less8x8 = bsize < BLOCK_8X8;
michael@0 403 MB_MODE_INFO *mbmi;
michael@0 404
michael@0 405 set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
michael@0 406 vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
michael@0 407
michael@0 408 if (less8x8)
michael@0 409 bsize = BLOCK_8X8;
michael@0 410
michael@0 411 // Has to be called after set_offsets
michael@0 412 mbmi = &xd->mi_8x8[0]->mbmi;
michael@0 413
michael@0 414 if (mbmi->skip_coeff) {
michael@0 415 reset_skip_context(xd, bsize);
michael@0 416 } else {
michael@0 417 if (cm->seg.enabled)
michael@0 418 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id,
michael@0 419 cm->base_qindex));
michael@0 420 }
michael@0 421
michael@0 422 if (!is_inter_block(mbmi)) {
michael@0 423 struct intra_args arg = {
michael@0 424 cm, xd, r, token_cache
michael@0 425 };
michael@0 426 foreach_transformed_block(xd, bsize, predict_and_reconstruct_intra_block,
michael@0 427 &arg);
michael@0 428 } else {
michael@0 429 // Setup
michael@0 430 set_ref(cm, xd, 0, mi_row, mi_col);
michael@0 431 if (has_second_ref(mbmi))
michael@0 432 set_ref(cm, xd, 1, mi_row, mi_col);
michael@0 433
michael@0 434 xd->subpix.filter_x = xd->subpix.filter_y =
michael@0 435 vp9_get_filter_kernel(mbmi->interp_filter);
michael@0 436
michael@0 437 // Prediction
michael@0 438 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
michael@0 439
michael@0 440 // Reconstruction
michael@0 441 if (!mbmi->skip_coeff) {
michael@0 442 int eobtotal = 0;
michael@0 443 struct inter_args arg = {
michael@0 444 cm, xd, r, &eobtotal, token_cache
michael@0 445 };
michael@0 446 foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
michael@0 447 if (!less8x8 && eobtotal == 0)
michael@0 448 mbmi->skip_coeff = 1; // skip loopfilter
michael@0 449 }
michael@0 450 }
michael@0 451
michael@0 452 xd->corrupted |= vp9_reader_has_error(r);
michael@0 453 }
michael@0 454
michael@0 455 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs,
michael@0 456 int mi_row, int mi_col, BLOCK_SIZE bsize,
michael@0 457 vp9_reader *r) {
michael@0 458 const int ctx = partition_plane_context(xd->above_seg_context,
michael@0 459 xd->left_seg_context,
michael@0 460 mi_row, mi_col, bsize);
michael@0 461 const vp9_prob *const probs = get_partition_probs(cm, ctx);
michael@0 462 const int has_rows = (mi_row + hbs) < cm->mi_rows;
michael@0 463 const int has_cols = (mi_col + hbs) < cm->mi_cols;
michael@0 464 PARTITION_TYPE p;
michael@0 465
michael@0 466 if (has_rows && has_cols)
michael@0 467 p = treed_read(r, vp9_partition_tree, probs);
michael@0 468 else if (!has_rows && has_cols)
michael@0 469 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
michael@0 470 else if (has_rows && !has_cols)
michael@0 471 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
michael@0 472 else
michael@0 473 p = PARTITION_SPLIT;
michael@0 474
michael@0 475 if (!cm->frame_parallel_decoding_mode)
michael@0 476 ++cm->counts.partition[ctx][p];
michael@0 477
michael@0 478 return p;
michael@0 479 }
michael@0 480
michael@0 481 static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd,
michael@0 482 const TileInfo *const tile,
michael@0 483 int mi_row, int mi_col,
michael@0 484 vp9_reader* r, BLOCK_SIZE bsize,
michael@0 485 uint8_t *token_cache) {
michael@0 486 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
michael@0 487 PARTITION_TYPE partition;
michael@0 488 BLOCK_SIZE subsize;
michael@0 489
michael@0 490 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
michael@0 491 return;
michael@0 492
michael@0 493 partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r);
michael@0 494 subsize = get_subsize(bsize, partition);
michael@0 495 if (subsize < BLOCK_8X8) {
michael@0 496 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
michael@0 497 } else {
michael@0 498 switch (partition) {
michael@0 499 case PARTITION_NONE:
michael@0 500 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
michael@0 501 break;
michael@0 502 case PARTITION_HORZ:
michael@0 503 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
michael@0 504 if (mi_row + hbs < cm->mi_rows)
michael@0 505 decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize,
michael@0 506 token_cache);
michael@0 507 break;
michael@0 508 case PARTITION_VERT:
michael@0 509 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache);
michael@0 510 if (mi_col + hbs < cm->mi_cols)
michael@0 511 decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize,
michael@0 512 token_cache);
michael@0 513 break;
michael@0 514 case PARTITION_SPLIT:
michael@0 515 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize,
michael@0 516 token_cache);
michael@0 517 decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize,
michael@0 518 token_cache);
michael@0 519 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize,
michael@0 520 token_cache);
michael@0 521 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize,
michael@0 522 token_cache);
michael@0 523 break;
michael@0 524 default:
michael@0 525 assert(!"Invalid partition type");
michael@0 526 }
michael@0 527 }
michael@0 528
michael@0 529 // update partition context
michael@0 530 if (bsize >= BLOCK_8X8 &&
michael@0 531 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
michael@0 532 update_partition_context(xd->above_seg_context, xd->left_seg_context,
michael@0 533 mi_row, mi_col, subsize, bsize);
michael@0 534 }
michael@0 535
michael@0 536 static void setup_token_decoder(const uint8_t *data,
michael@0 537 const uint8_t *data_end,
michael@0 538 size_t read_size,
michael@0 539 struct vpx_internal_error_info *error_info,
michael@0 540 vp9_reader *r) {
michael@0 541 // Validate the calculated partition length. If the buffer
michael@0 542 // described by the partition can't be fully read, then restrict
michael@0 543 // it to the portion that can be (for EC mode) or throw an error.
michael@0 544 if (!read_is_valid(data, read_size, data_end))
michael@0 545 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
michael@0 546 "Truncated packet or corrupt tile length");
michael@0 547
michael@0 548 if (vp9_reader_init(r, data, read_size))
michael@0 549 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
michael@0 550 "Failed to allocate bool decoder %d", 1);
michael@0 551 }
michael@0 552
michael@0 553 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
michael@0 554 vp9_reader *r) {
michael@0 555 int i, j, k, l, m;
michael@0 556
michael@0 557 if (vp9_read_bit(r))
michael@0 558 for (i = 0; i < BLOCK_TYPES; i++)
michael@0 559 for (j = 0; j < REF_TYPES; j++)
michael@0 560 for (k = 0; k < COEF_BANDS; k++)
michael@0 561 for (l = 0; l < PREV_COEF_CONTEXTS; l++)
michael@0 562 if (k > 0 || l < 3)
michael@0 563 for (m = 0; m < UNCONSTRAINED_NODES; m++)
michael@0 564 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
michael@0 565 }
michael@0 566
michael@0 567 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
michael@0 568 vp9_reader *r) {
michael@0 569 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
michael@0 570 TX_SIZE tx_size;
michael@0 571 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
michael@0 572 read_coef_probs_common(fc->coef_probs[tx_size], r);
michael@0 573 }
michael@0 574
michael@0 575 static void setup_segmentation(struct segmentation *seg,
michael@0 576 struct vp9_read_bit_buffer *rb) {
michael@0 577 int i, j;
michael@0 578
michael@0 579 seg->update_map = 0;
michael@0 580 seg->update_data = 0;
michael@0 581
michael@0 582 seg->enabled = vp9_rb_read_bit(rb);
michael@0 583 if (!seg->enabled)
michael@0 584 return;
michael@0 585
michael@0 586 // Segmentation map update
michael@0 587 seg->update_map = vp9_rb_read_bit(rb);
michael@0 588 if (seg->update_map) {
michael@0 589 for (i = 0; i < SEG_TREE_PROBS; i++)
michael@0 590 seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
michael@0 591 : MAX_PROB;
michael@0 592
michael@0 593 seg->temporal_update = vp9_rb_read_bit(rb);
michael@0 594 if (seg->temporal_update) {
michael@0 595 for (i = 0; i < PREDICTION_PROBS; i++)
michael@0 596 seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
michael@0 597 : MAX_PROB;
michael@0 598 } else {
michael@0 599 for (i = 0; i < PREDICTION_PROBS; i++)
michael@0 600 seg->pred_probs[i] = MAX_PROB;
michael@0 601 }
michael@0 602 }
michael@0 603
michael@0 604 // Segmentation data update
michael@0 605 seg->update_data = vp9_rb_read_bit(rb);
michael@0 606 if (seg->update_data) {
michael@0 607 seg->abs_delta = vp9_rb_read_bit(rb);
michael@0 608
michael@0 609 vp9_clearall_segfeatures(seg);
michael@0 610
michael@0 611 for (i = 0; i < MAX_SEGMENTS; i++) {
michael@0 612 for (j = 0; j < SEG_LVL_MAX; j++) {
michael@0 613 int data = 0;
michael@0 614 const int feature_enabled = vp9_rb_read_bit(rb);
michael@0 615 if (feature_enabled) {
michael@0 616 vp9_enable_segfeature(seg, i, j);
michael@0 617 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
michael@0 618 if (vp9_is_segfeature_signed(j))
michael@0 619 data = vp9_rb_read_bit(rb) ? -data : data;
michael@0 620 }
michael@0 621 vp9_set_segdata(seg, i, j, data);
michael@0 622 }
michael@0 623 }
michael@0 624 }
michael@0 625 }
michael@0 626
michael@0 627 static void setup_loopfilter(struct loopfilter *lf,
michael@0 628 struct vp9_read_bit_buffer *rb) {
michael@0 629 lf->filter_level = vp9_rb_read_literal(rb, 6);
michael@0 630 lf->sharpness_level = vp9_rb_read_literal(rb, 3);
michael@0 631
michael@0 632 // Read in loop filter deltas applied at the MB level based on mode or ref
michael@0 633 // frame.
michael@0 634 lf->mode_ref_delta_update = 0;
michael@0 635
michael@0 636 lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
michael@0 637 if (lf->mode_ref_delta_enabled) {
michael@0 638 lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
michael@0 639 if (lf->mode_ref_delta_update) {
michael@0 640 int i;
michael@0 641
michael@0 642 for (i = 0; i < MAX_REF_LF_DELTAS; i++)
michael@0 643 if (vp9_rb_read_bit(rb))
michael@0 644 lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
michael@0 645
michael@0 646 for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
michael@0 647 if (vp9_rb_read_bit(rb))
michael@0 648 lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
michael@0 649 }
michael@0 650 }
michael@0 651 }
michael@0 652
michael@0 653 static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
michael@0 654 const int old = *delta_q;
michael@0 655 *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
michael@0 656 return old != *delta_q;
michael@0 657 }
michael@0 658
michael@0 659 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
michael@0 660 struct vp9_read_bit_buffer *rb) {
michael@0 661 int update = 0;
michael@0 662
michael@0 663 cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
michael@0 664 update |= read_delta_q(rb, &cm->y_dc_delta_q);
michael@0 665 update |= read_delta_q(rb, &cm->uv_dc_delta_q);
michael@0 666 update |= read_delta_q(rb, &cm->uv_ac_delta_q);
michael@0 667 if (update)
michael@0 668 vp9_init_dequantizer(cm);
michael@0 669
michael@0 670 xd->lossless = cm->base_qindex == 0 &&
michael@0 671 cm->y_dc_delta_q == 0 &&
michael@0 672 cm->uv_dc_delta_q == 0 &&
michael@0 673 cm->uv_ac_delta_q == 0;
michael@0 674
michael@0 675 xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
michael@0 676 }
michael@0 677
michael@0 678 static INTERPOLATION_TYPE read_interp_filter_type(
michael@0 679 struct vp9_read_bit_buffer *rb) {
michael@0 680 const INTERPOLATION_TYPE literal_to_type[] = { EIGHTTAP_SMOOTH,
michael@0 681 EIGHTTAP,
michael@0 682 EIGHTTAP_SHARP,
michael@0 683 BILINEAR };
michael@0 684 return vp9_rb_read_bit(rb) ? SWITCHABLE
michael@0 685 : literal_to_type[vp9_rb_read_literal(rb, 2)];
michael@0 686 }
michael@0 687
michael@0 688 static void read_frame_size(struct vp9_read_bit_buffer *rb,
michael@0 689 int *width, int *height) {
michael@0 690 const int w = vp9_rb_read_literal(rb, 16) + 1;
michael@0 691 const int h = vp9_rb_read_literal(rb, 16) + 1;
michael@0 692 *width = w;
michael@0 693 *height = h;
michael@0 694 }
michael@0 695
michael@0 696 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
michael@0 697 cm->display_width = cm->width;
michael@0 698 cm->display_height = cm->height;
michael@0 699 if (vp9_rb_read_bit(rb))
michael@0 700 read_frame_size(rb, &cm->display_width, &cm->display_height);
michael@0 701 }
michael@0 702
michael@0 703 static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
michael@0 704 VP9_COMMON *cm = &pbi->common;
michael@0 705
michael@0 706 if (cm->width != width || cm->height != height) {
michael@0 707 // Change in frame size.
michael@0 708 if (cm->width == 0 || cm->height == 0) {
michael@0 709 // Assign new frame buffer on first call.
michael@0 710 cm->new_fb_idx = NUM_YV12_BUFFERS - 1;
michael@0 711 cm->fb_idx_ref_cnt[cm->new_fb_idx] = 1;
michael@0 712 }
michael@0 713
michael@0 714 // TODO(agrange) Don't test width/height, check overall size.
michael@0 715 if (width > cm->width || height > cm->height) {
michael@0 716 // Rescale frame buffers only if they're not big enough already.
michael@0 717 if (vp9_resize_frame_buffers(cm, width, height))
michael@0 718 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
michael@0 719 "Failed to allocate frame buffers");
michael@0 720 }
michael@0 721
michael@0 722 cm->width = width;
michael@0 723 cm->height = height;
michael@0 724
michael@0 725 vp9_update_frame_size(cm);
michael@0 726 }
michael@0 727
michael@0 728 vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
michael@0 729 cm->subsampling_x, cm->subsampling_y,
michael@0 730 VP9BORDERINPIXELS);
michael@0 731 }
michael@0 732
michael@0 733 static void setup_frame_size(VP9D_COMP *pbi,
michael@0 734 struct vp9_read_bit_buffer *rb) {
michael@0 735 int width, height;
michael@0 736 read_frame_size(rb, &width, &height);
michael@0 737 apply_frame_size(pbi, width, height);
michael@0 738 setup_display_size(&pbi->common, rb);
michael@0 739 }
michael@0 740
michael@0 741 static void setup_frame_size_with_refs(VP9D_COMP *pbi,
michael@0 742 struct vp9_read_bit_buffer *rb) {
michael@0 743 VP9_COMMON *const cm = &pbi->common;
michael@0 744
michael@0 745 int width, height;
michael@0 746 int found = 0, i;
michael@0 747 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
michael@0 748 if (vp9_rb_read_bit(rb)) {
michael@0 749 YV12_BUFFER_CONFIG *const cfg = get_frame_ref_buffer(cm, i);
michael@0 750 width = cfg->y_crop_width;
michael@0 751 height = cfg->y_crop_height;
michael@0 752 found = 1;
michael@0 753 break;
michael@0 754 }
michael@0 755 }
michael@0 756
michael@0 757 if (!found)
michael@0 758 read_frame_size(rb, &width, &height);
michael@0 759
michael@0 760 if (!width || !height)
michael@0 761 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
michael@0 762 "Referenced frame with invalid size");
michael@0 763
michael@0 764 apply_frame_size(pbi, width, height);
michael@0 765 setup_display_size(cm, rb);
michael@0 766 }
michael@0 767
michael@0 768 static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd,
michael@0 769 int tile_row, int tile_col) {
michael@0 770 int i;
michael@0 771 const int tile_cols = 1 << pbi->common.log2_tile_cols;
michael@0 772 xd->mi_stream = pbi->mi_streams[tile_row * tile_cols + tile_col];
michael@0 773
michael@0 774 for (i = 0; i < MAX_MB_PLANE; ++i) {
michael@0 775 xd->above_context[i] = pbi->above_context[i];
michael@0 776 }
michael@0 777 // see note in alloc_tile_storage().
michael@0 778 xd->above_seg_context = pbi->above_seg_context;
michael@0 779 }
michael@0 780
michael@0 781 static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile,
michael@0 782 vp9_reader *r) {
michael@0 783 const int num_threads = pbi->oxcf.max_threads;
michael@0 784 VP9_COMMON *const cm = &pbi->common;
michael@0 785 int mi_row, mi_col;
michael@0 786 MACROBLOCKD *xd = &pbi->mb;
michael@0 787
michael@0 788 if (pbi->do_loopfilter_inline) {
michael@0 789 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
michael@0 790 lf_data->frame_buffer = get_frame_new_buffer(cm);
michael@0 791 lf_data->cm = cm;
michael@0 792 lf_data->xd = pbi->mb;
michael@0 793 lf_data->stop = 0;
michael@0 794 lf_data->y_only = 0;
michael@0 795 vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
michael@0 796 }
michael@0 797
michael@0 798 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
michael@0 799 mi_row += MI_BLOCK_SIZE) {
michael@0 800 // For a SB there are 2 left contexts, each pertaining to a MB row within
michael@0 801 vp9_zero(xd->left_context);
michael@0 802 vp9_zero(xd->left_seg_context);
michael@0 803 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
michael@0 804 mi_col += MI_BLOCK_SIZE) {
michael@0 805 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64,
michael@0 806 pbi->token_cache);
michael@0 807 }
michael@0 808
michael@0 809 if (pbi->do_loopfilter_inline) {
michael@0 810 const int lf_start = mi_row - MI_BLOCK_SIZE;
michael@0 811 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
michael@0 812
michael@0 813 // delay the loopfilter by 1 macroblock row.
michael@0 814 if (lf_start < 0) continue;
michael@0 815
michael@0 816 // decoding has completed: finish up the loop filter in this thread.
michael@0 817 if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue;
michael@0 818
michael@0 819 vp9_worker_sync(&pbi->lf_worker);
michael@0 820 lf_data->start = lf_start;
michael@0 821 lf_data->stop = mi_row;
michael@0 822 if (num_threads > 1) {
michael@0 823 vp9_worker_launch(&pbi->lf_worker);
michael@0 824 } else {
michael@0 825 vp9_worker_execute(&pbi->lf_worker);
michael@0 826 }
michael@0 827 }
michael@0 828 }
michael@0 829
michael@0 830 if (pbi->do_loopfilter_inline) {
michael@0 831 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
michael@0 832
michael@0 833 vp9_worker_sync(&pbi->lf_worker);
michael@0 834 lf_data->start = lf_data->stop;
michael@0 835 lf_data->stop = cm->mi_rows;
michael@0 836 vp9_worker_execute(&pbi->lf_worker);
michael@0 837 }
michael@0 838 }
michael@0 839
michael@0 840 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
michael@0 841 int min_log2_tile_cols, max_log2_tile_cols, max_ones;
michael@0 842 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
michael@0 843
michael@0 844 // columns
michael@0 845 max_ones = max_log2_tile_cols - min_log2_tile_cols;
michael@0 846 cm->log2_tile_cols = min_log2_tile_cols;
michael@0 847 while (max_ones-- && vp9_rb_read_bit(rb))
michael@0 848 cm->log2_tile_cols++;
michael@0 849
michael@0 850 // rows
michael@0 851 cm->log2_tile_rows = vp9_rb_read_bit(rb);
michael@0 852 if (cm->log2_tile_rows)
michael@0 853 cm->log2_tile_rows += vp9_rb_read_bit(rb);
michael@0 854 }
michael@0 855
michael@0 856 // Reads the next tile returning its size and adjusting '*data' accordingly
michael@0 857 // based on 'is_last'.
michael@0 858 static size_t get_tile(const uint8_t *const data_end,
michael@0 859 int is_last,
michael@0 860 struct vpx_internal_error_info *error_info,
michael@0 861 const uint8_t **data) {
michael@0 862 size_t size;
michael@0 863
michael@0 864 if (!is_last) {
michael@0 865 if (!read_is_valid(*data, 4, data_end))
michael@0 866 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
michael@0 867 "Truncated packet or corrupt tile length");
michael@0 868
michael@0 869 size = read_be32(*data);
michael@0 870 *data += 4;
michael@0 871
michael@0 872 if (size > data_end - *data) {
michael@0 873 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
michael@0 874 "Truncated packet or corrupt tile size");
michael@0 875 }
michael@0 876 } else {
michael@0 877 size = data_end - *data;
michael@0 878 }
michael@0 879 return size;
michael@0 880 }
michael@0 881
michael@0 882 typedef struct TileBuffer {
michael@0 883 const uint8_t *data;
michael@0 884 size_t size;
michael@0 885 } TileBuffer;
michael@0 886
michael@0 887 static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
michael@0 888 VP9_COMMON *const cm = &pbi->common;
michael@0 889 MACROBLOCKD *const xd = &pbi->mb;
michael@0 890 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
michael@0 891 const int tile_cols = 1 << cm->log2_tile_cols;
michael@0 892 const int tile_rows = 1 << cm->log2_tile_rows;
michael@0 893 TileBuffer tile_buffers[4][1 << 6];
michael@0 894 int tile_row, tile_col;
michael@0 895 const uint8_t *const data_end = pbi->source + pbi->source_sz;
michael@0 896 const uint8_t *end = NULL;
michael@0 897 vp9_reader r;
michael@0 898
michael@0 899 assert(tile_rows <= 4);
michael@0 900 assert(tile_cols <= (1 << 6));
michael@0 901
michael@0 902 // Note: this memset assumes above_context[0], [1] and [2]
michael@0 903 // are allocated as part of the same buffer.
michael@0 904 vpx_memset(pbi->above_context[0], 0,
michael@0 905 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 2 * aligned_cols);
michael@0 906
michael@0 907 vpx_memset(pbi->above_seg_context, 0,
michael@0 908 sizeof(*pbi->above_seg_context) * aligned_cols);
michael@0 909
michael@0 910 // Load tile data into tile_buffers
michael@0 911 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
michael@0 912 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
michael@0 913 const int last_tile = tile_row == tile_rows - 1 &&
michael@0 914 tile_col == tile_cols - 1;
michael@0 915 const size_t size = get_tile(data_end, last_tile, &cm->error, &data);
michael@0 916 TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
michael@0 917 buf->data = data;
michael@0 918 buf->size = size;
michael@0 919 data += size;
michael@0 920 }
michael@0 921 }
michael@0 922
michael@0 923 // Decode tiles using data from tile_buffers
michael@0 924 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
michael@0 925 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
michael@0 926 const int col = pbi->oxcf.inv_tile_order ? tile_cols - tile_col - 1
michael@0 927 : tile_col;
michael@0 928 const int last_tile = tile_row == tile_rows - 1 &&
michael@0 929 col == tile_cols - 1;
michael@0 930 const TileBuffer *const buf = &tile_buffers[tile_row][col];
michael@0 931 TileInfo tile;
michael@0 932
michael@0 933 vp9_tile_init(&tile, cm, tile_row, col);
michael@0 934 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r);
michael@0 935 setup_tile_context(pbi, xd, tile_row, col);
michael@0 936 decode_tile(pbi, &tile, &r);
michael@0 937
michael@0 938 if (last_tile)
michael@0 939 end = vp9_reader_find_end(&r);
michael@0 940 }
michael@0 941 }
michael@0 942
michael@0 943 return end;
michael@0 944 }
michael@0 945
michael@0 946 static void setup_tile_macroblockd(TileWorkerData *const tile_data) {
michael@0 947 MACROBLOCKD *xd = &tile_data->xd;
michael@0 948 struct macroblockd_plane *const pd = xd->plane;
michael@0 949 int i;
michael@0 950
michael@0 951 for (i = 0; i < MAX_MB_PLANE; ++i) {
michael@0 952 pd[i].qcoeff = tile_data->qcoeff[i];
michael@0 953 pd[i].dqcoeff = tile_data->dqcoeff[i];
michael@0 954 pd[i].eobs = tile_data->eobs[i];
michael@0 955 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
michael@0 956 }
michael@0 957 }
michael@0 958
michael@0 959 static int tile_worker_hook(void *arg1, void *arg2) {
michael@0 960 TileWorkerData *const tile_data = (TileWorkerData*)arg1;
michael@0 961 const TileInfo *const tile = (TileInfo*)arg2;
michael@0 962 int mi_row, mi_col;
michael@0 963
michael@0 964 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
michael@0 965 mi_row += MI_BLOCK_SIZE) {
michael@0 966 vp9_zero(tile_data->xd.left_context);
michael@0 967 vp9_zero(tile_data->xd.left_seg_context);
michael@0 968 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
michael@0 969 mi_col += MI_BLOCK_SIZE) {
michael@0 970 decode_modes_sb(tile_data->cm, &tile_data->xd, tile,
michael@0 971 mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64,
michael@0 972 tile_data->token_cache);
michael@0 973 }
michael@0 974 }
michael@0 975 return !tile_data->xd.corrupted;
michael@0 976 }
michael@0 977
michael@0 978 static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) {
michael@0 979 VP9_COMMON *const cm = &pbi->common;
michael@0 980 const uint8_t *const data_end = pbi->source + pbi->source_sz;
michael@0 981 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
michael@0 982 const int tile_cols = 1 << cm->log2_tile_cols;
michael@0 983 const int tile_rows = 1 << cm->log2_tile_rows;
michael@0 984 const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols);
michael@0 985 int tile_col = 0;
michael@0 986
michael@0 987 assert(tile_rows == 1);
michael@0 988 (void)tile_rows;
michael@0 989
michael@0 990 if (num_workers > pbi->num_tile_workers) {
michael@0 991 int i;
michael@0 992 CHECK_MEM_ERROR(cm, pbi->tile_workers,
michael@0 993 vpx_realloc(pbi->tile_workers,
michael@0 994 num_workers * sizeof(*pbi->tile_workers)));
michael@0 995 for (i = pbi->num_tile_workers; i < num_workers; ++i) {
michael@0 996 VP9Worker *const worker = &pbi->tile_workers[i];
michael@0 997 ++pbi->num_tile_workers;
michael@0 998
michael@0 999 vp9_worker_init(worker);
michael@0 1000 worker->hook = (VP9WorkerHook)tile_worker_hook;
michael@0 1001 CHECK_MEM_ERROR(cm, worker->data1,
michael@0 1002 vpx_memalign(32, sizeof(TileWorkerData)));
michael@0 1003 CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo)));
michael@0 1004 if (i < num_workers - 1 && !vp9_worker_reset(worker)) {
michael@0 1005 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
michael@0 1006 "Tile decoder thread creation failed");
michael@0 1007 }
michael@0 1008 }
michael@0 1009 }
michael@0 1010
michael@0 1011 // Note: this memset assumes above_context[0], [1] and [2]
michael@0 1012 // are allocated as part of the same buffer.
michael@0 1013 vpx_memset(pbi->above_context[0], 0,
michael@0 1014 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
michael@0 1015 2 * aligned_mi_cols);
michael@0 1016 vpx_memset(pbi->above_seg_context, 0,
michael@0 1017 sizeof(*pbi->above_seg_context) * aligned_mi_cols);
michael@0 1018
michael@0 1019 while (tile_col < tile_cols) {
michael@0 1020 int i;
michael@0 1021 for (i = 0; i < num_workers && tile_col < tile_cols; ++i) {
michael@0 1022 VP9Worker *const worker = &pbi->tile_workers[i];
michael@0 1023 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
michael@0 1024 TileInfo *const tile = (TileInfo*)worker->data2;
michael@0 1025 const size_t size =
michael@0 1026 get_tile(data_end, tile_col == tile_cols - 1, &cm->error, &data);
michael@0 1027
michael@0 1028 tile_data->cm = cm;
michael@0 1029 tile_data->xd = pbi->mb;
michael@0 1030 tile_data->xd.corrupted = 0;
michael@0 1031 vp9_tile_init(tile, tile_data->cm, 0, tile_col);
michael@0 1032
michael@0 1033 setup_token_decoder(data, data_end, size, &cm->error,
michael@0 1034 &tile_data->bit_reader);
michael@0 1035 setup_tile_context(pbi, &tile_data->xd, 0, tile_col);
michael@0 1036 setup_tile_macroblockd(tile_data);
michael@0 1037
michael@0 1038 worker->had_error = 0;
michael@0 1039 if (i == num_workers - 1 || tile_col == tile_cols - 1) {
michael@0 1040 vp9_worker_execute(worker);
michael@0 1041 } else {
michael@0 1042 vp9_worker_launch(worker);
michael@0 1043 }
michael@0 1044
michael@0 1045 data += size;
michael@0 1046 ++tile_col;
michael@0 1047 }
michael@0 1048
michael@0 1049 for (; i > 0; --i) {
michael@0 1050 VP9Worker *const worker = &pbi->tile_workers[i - 1];
michael@0 1051 pbi->mb.corrupted |= !vp9_worker_sync(worker);
michael@0 1052 }
michael@0 1053 }
michael@0 1054
michael@0 1055 {
michael@0 1056 const int final_worker = (tile_cols + num_workers - 1) % num_workers;
michael@0 1057 TileWorkerData *const tile_data =
michael@0 1058 (TileWorkerData*)pbi->tile_workers[final_worker].data1;
michael@0 1059 return vp9_reader_find_end(&tile_data->bit_reader);
michael@0 1060 }
michael@0 1061 }
michael@0 1062
michael@0 1063 static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
michael@0 1064 if (vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_0 ||
michael@0 1065 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_1 ||
michael@0 1066 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_2) {
michael@0 1067 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
michael@0 1068 "Invalid frame sync code");
michael@0 1069 }
michael@0 1070 }
michael@0 1071
michael@0 1072 static void error_handler(void *data, size_t bit_offset) {
michael@0 1073 VP9_COMMON *const cm = (VP9_COMMON *)data;
michael@0 1074 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
michael@0 1075 }
michael@0 1076
michael@0 1077 #define RESERVED \
michael@0 1078 if (vp9_rb_read_bit(rb)) \
michael@0 1079 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \
michael@0 1080 "Reserved bit must be unset")
michael@0 1081
michael@0 1082 static size_t read_uncompressed_header(VP9D_COMP *pbi,
michael@0 1083 struct vp9_read_bit_buffer *rb) {
michael@0 1084 VP9_COMMON *const cm = &pbi->common;
michael@0 1085 size_t sz;
michael@0 1086 int i;
michael@0 1087
michael@0 1088 cm->last_frame_type = cm->frame_type;
michael@0 1089
michael@0 1090 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
michael@0 1091 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
michael@0 1092 "Invalid frame marker");
michael@0 1093
michael@0 1094 cm->version = vp9_rb_read_bit(rb);
michael@0 1095 RESERVED;
michael@0 1096
michael@0 1097 if (vp9_rb_read_bit(rb)) {
michael@0 1098 // show an existing frame directly
michael@0 1099 int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
michael@0 1100 ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show);
michael@0 1101 pbi->refresh_frame_flags = 0;
michael@0 1102 cm->lf.filter_level = 0;
michael@0 1103 return 0;
michael@0 1104 }
michael@0 1105
michael@0 1106 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
michael@0 1107 cm->show_frame = vp9_rb_read_bit(rb);
michael@0 1108 cm->error_resilient_mode = vp9_rb_read_bit(rb);
michael@0 1109
michael@0 1110 if (cm->frame_type == KEY_FRAME) {
michael@0 1111 check_sync_code(cm, rb);
michael@0 1112
michael@0 1113 cm->color_space = vp9_rb_read_literal(rb, 3); // colorspace
michael@0 1114 if (cm->color_space != SRGB) {
michael@0 1115 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
michael@0 1116 if (cm->version == 1) {
michael@0 1117 cm->subsampling_x = vp9_rb_read_bit(rb);
michael@0 1118 cm->subsampling_y = vp9_rb_read_bit(rb);
michael@0 1119 vp9_rb_read_bit(rb); // has extra plane
michael@0 1120 } else {
michael@0 1121 cm->subsampling_y = cm->subsampling_x = 1;
michael@0 1122 }
michael@0 1123 } else {
michael@0 1124 if (cm->version == 1) {
michael@0 1125 cm->subsampling_y = cm->subsampling_x = 0;
michael@0 1126 vp9_rb_read_bit(rb); // has extra plane
michael@0 1127 } else {
michael@0 1128 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
michael@0 1129 "RGB not supported in profile 0");
michael@0 1130 }
michael@0 1131 }
michael@0 1132
michael@0 1133 pbi->refresh_frame_flags = (1 << NUM_REF_FRAMES) - 1;
michael@0 1134
michael@0 1135 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
michael@0 1136 cm->active_ref_idx[i] = cm->new_fb_idx;
michael@0 1137
michael@0 1138 setup_frame_size(pbi, rb);
michael@0 1139 } else {
michael@0 1140 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
michael@0 1141
michael@0 1142 cm->reset_frame_context = cm->error_resilient_mode ?
michael@0 1143 0 : vp9_rb_read_literal(rb, 2);
michael@0 1144
michael@0 1145 if (cm->intra_only) {
michael@0 1146 check_sync_code(cm, rb);
michael@0 1147
michael@0 1148 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
michael@0 1149 setup_frame_size(pbi, rb);
michael@0 1150 } else {
michael@0 1151 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
michael@0 1152
michael@0 1153 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
michael@0 1154 const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LOG2);
michael@0 1155 cm->active_ref_idx[i] = cm->ref_frame_map[ref];
michael@0 1156 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
michael@0 1157 }
michael@0 1158
michael@0 1159 setup_frame_size_with_refs(pbi, rb);
michael@0 1160
michael@0 1161 cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
michael@0 1162 cm->mcomp_filter_type = read_interp_filter_type(rb);
michael@0 1163
michael@0 1164 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
michael@0 1165 vp9_setup_scale_factors(cm, i);
michael@0 1166 }
michael@0 1167 }
michael@0 1168
michael@0 1169 if (!cm->error_resilient_mode) {
michael@0 1170 cm->refresh_frame_context = vp9_rb_read_bit(rb);
michael@0 1171 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
michael@0 1172 } else {
michael@0 1173 cm->refresh_frame_context = 0;
michael@0 1174 cm->frame_parallel_decoding_mode = 1;
michael@0 1175 }
michael@0 1176
michael@0 1177 // This flag will be overridden by the call to vp9_setup_past_independence
michael@0 1178 // below, forcing the use of context 0 for those frame types.
michael@0 1179 cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LOG2);
michael@0 1180
michael@0 1181 if (frame_is_intra_only(cm) || cm->error_resilient_mode)
michael@0 1182 vp9_setup_past_independence(cm);
michael@0 1183
michael@0 1184 setup_loopfilter(&cm->lf, rb);
michael@0 1185 setup_quantization(cm, &pbi->mb, rb);
michael@0 1186 setup_segmentation(&cm->seg, rb);
michael@0 1187
michael@0 1188 setup_tile_info(cm, rb);
michael@0 1189 sz = vp9_rb_read_literal(rb, 16);
michael@0 1190
michael@0 1191 if (sz == 0)
michael@0 1192 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
michael@0 1193 "Invalid header size");
michael@0 1194
michael@0 1195 return sz;
michael@0 1196 }
michael@0 1197
michael@0 1198 static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data,
michael@0 1199 size_t partition_size) {
michael@0 1200 VP9_COMMON *const cm = &pbi->common;
michael@0 1201 MACROBLOCKD *const xd = &pbi->mb;
michael@0 1202 FRAME_CONTEXT *const fc = &cm->fc;
michael@0 1203 vp9_reader r;
michael@0 1204 int k;
michael@0 1205
michael@0 1206 if (vp9_reader_init(&r, data, partition_size))
michael@0 1207 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
michael@0 1208 "Failed to allocate bool decoder 0");
michael@0 1209
michael@0 1210 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
michael@0 1211 if (cm->tx_mode == TX_MODE_SELECT)
michael@0 1212 read_tx_probs(&fc->tx_probs, &r);
michael@0 1213 read_coef_probs(fc, cm->tx_mode, &r);
michael@0 1214
michael@0 1215 for (k = 0; k < MBSKIP_CONTEXTS; ++k)
michael@0 1216 vp9_diff_update_prob(&r, &fc->mbskip_probs[k]);
michael@0 1217
michael@0 1218 if (!frame_is_intra_only(cm)) {
michael@0 1219 nmv_context *const nmvc = &fc->nmvc;
michael@0 1220 int i, j;
michael@0 1221
michael@0 1222 read_inter_mode_probs(fc, &r);
michael@0 1223
michael@0 1224 if (cm->mcomp_filter_type == SWITCHABLE)
michael@0 1225 read_switchable_interp_probs(fc, &r);
michael@0 1226
michael@0 1227 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
michael@0 1228 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
michael@0 1229
michael@0 1230 read_comp_pred(cm, &r);
michael@0 1231
michael@0 1232 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
michael@0 1233 for (i = 0; i < INTRA_MODES - 1; ++i)
michael@0 1234 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
michael@0 1235
michael@0 1236 for (j = 0; j < PARTITION_CONTEXTS; ++j)
michael@0 1237 for (i = 0; i < PARTITION_TYPES - 1; ++i)
michael@0 1238 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]);
michael@0 1239
michael@0 1240 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
michael@0 1241 }
michael@0 1242
michael@0 1243 return vp9_reader_has_error(&r);
michael@0 1244 }
michael@0 1245
michael@0 1246 void vp9_init_dequantizer(VP9_COMMON *cm) {
michael@0 1247 int q;
michael@0 1248
michael@0 1249 for (q = 0; q < QINDEX_RANGE; q++) {
michael@0 1250 cm->y_dequant[q][0] = vp9_dc_quant(q, cm->y_dc_delta_q);
michael@0 1251 cm->y_dequant[q][1] = vp9_ac_quant(q, 0);
michael@0 1252
michael@0 1253 cm->uv_dequant[q][0] = vp9_dc_quant(q, cm->uv_dc_delta_q);
michael@0 1254 cm->uv_dequant[q][1] = vp9_ac_quant(q, cm->uv_ac_delta_q);
michael@0 1255 }
michael@0 1256 }
michael@0 1257
michael@0 1258 #ifdef NDEBUG
michael@0 1259 #define debug_check_frame_counts(cm) (void)0
michael@0 1260 #else // !NDEBUG
michael@0 1261 // Counts should only be incremented when frame_parallel_decoding_mode and
michael@0 1262 // error_resilient_mode are disabled.
michael@0 1263 static void debug_check_frame_counts(const VP9_COMMON *const cm) {
michael@0 1264 FRAME_COUNTS zero_counts;
michael@0 1265 vp9_zero(zero_counts);
michael@0 1266 assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode);
michael@0 1267 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
michael@0 1268 sizeof(cm->counts.y_mode)));
michael@0 1269 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
michael@0 1270 sizeof(cm->counts.uv_mode)));
michael@0 1271 assert(!memcmp(cm->counts.partition, zero_counts.partition,
michael@0 1272 sizeof(cm->counts.partition)));
michael@0 1273 assert(!memcmp(cm->counts.coef, zero_counts.coef,
michael@0 1274 sizeof(cm->counts.coef)));
michael@0 1275 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
michael@0 1276 sizeof(cm->counts.eob_branch)));
michael@0 1277 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
michael@0 1278 sizeof(cm->counts.switchable_interp)));
michael@0 1279 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode,
michael@0 1280 sizeof(cm->counts.inter_mode)));
michael@0 1281 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
michael@0 1282 sizeof(cm->counts.intra_inter)));
michael@0 1283 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
michael@0 1284 sizeof(cm->counts.comp_inter)));
michael@0 1285 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
michael@0 1286 sizeof(cm->counts.single_ref)));
michael@0 1287 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
michael@0 1288 sizeof(cm->counts.comp_ref)));
michael@0 1289 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
michael@0 1290 assert(!memcmp(cm->counts.mbskip, zero_counts.mbskip,
michael@0 1291 sizeof(cm->counts.mbskip)));
michael@0 1292 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
michael@0 1293 }
michael@0 1294 #endif // NDEBUG
michael@0 1295
michael@0 1296 int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
michael@0 1297 int i;
michael@0 1298 VP9_COMMON *const cm = &pbi->common;
michael@0 1299 MACROBLOCKD *const xd = &pbi->mb;
michael@0 1300
michael@0 1301 const uint8_t *data = pbi->source;
michael@0 1302 const uint8_t *const data_end = pbi->source + pbi->source_sz;
michael@0 1303
michael@0 1304 struct vp9_read_bit_buffer rb = { data, data_end, 0, cm, error_handler };
michael@0 1305 const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
michael@0 1306 const int keyframe = cm->frame_type == KEY_FRAME;
michael@0 1307 const int tile_rows = 1 << cm->log2_tile_rows;
michael@0 1308 const int tile_cols = 1 << cm->log2_tile_cols;
michael@0 1309 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
michael@0 1310
michael@0 1311 if (!first_partition_size) {
michael@0 1312 // showing a frame directly
michael@0 1313 *p_data_end = data + 1;
michael@0 1314 return 0;
michael@0 1315 }
michael@0 1316
michael@0 1317 if (!pbi->decoded_key_frame && !keyframe)
michael@0 1318 return -1;
michael@0 1319
michael@0 1320 data += vp9_rb_bytes_read(&rb);
michael@0 1321 if (!read_is_valid(data, first_partition_size, data_end))
michael@0 1322 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
michael@0 1323 "Truncated packet or corrupt header length");
michael@0 1324
michael@0 1325 pbi->do_loopfilter_inline =
michael@0 1326 (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level;
michael@0 1327 if (pbi->do_loopfilter_inline && pbi->lf_worker.data1 == NULL) {
michael@0 1328 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, vpx_malloc(sizeof(LFWorkerData)));
michael@0 1329 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
michael@0 1330 if (pbi->oxcf.max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) {
michael@0 1331 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
michael@0 1332 "Loop filter thread creation failed");
michael@0 1333 }
michael@0 1334 }
michael@0 1335
michael@0 1336 alloc_tile_storage(pbi, tile_rows, tile_cols);
michael@0 1337
michael@0 1338 xd->mode_info_stride = cm->mode_info_stride;
michael@0 1339 set_prev_mi(cm);
michael@0 1340
michael@0 1341 setup_plane_dequants(cm, xd, cm->base_qindex);
michael@0 1342 setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y);
michael@0 1343
michael@0 1344 cm->fc = cm->frame_contexts[cm->frame_context_idx];
michael@0 1345 vp9_zero(cm->counts);
michael@0 1346 for (i = 0; i < MAX_MB_PLANE; ++i)
michael@0 1347 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
michael@0 1348
michael@0 1349 xd->corrupted = 0;
michael@0 1350 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
michael@0 1351
michael@0 1352 // TODO(jzern): remove frame_parallel_decoding_mode restriction for
michael@0 1353 // single-frame tile decoding.
michael@0 1354 if (pbi->oxcf.max_threads > 1 && tile_rows == 1 && tile_cols > 1 &&
michael@0 1355 cm->frame_parallel_decoding_mode) {
michael@0 1356 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size);
michael@0 1357 } else {
michael@0 1358 *p_data_end = decode_tiles(pbi, data + first_partition_size);
michael@0 1359 }
michael@0 1360
michael@0 1361 cm->last_width = cm->width;
michael@0 1362 cm->last_height = cm->height;
michael@0 1363
michael@0 1364 new_fb->corrupted |= xd->corrupted;
michael@0 1365
michael@0 1366 if (!pbi->decoded_key_frame) {
michael@0 1367 if (keyframe && !new_fb->corrupted)
michael@0 1368 pbi->decoded_key_frame = 1;
michael@0 1369 else
michael@0 1370 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
michael@0 1371 "A stream must start with a complete key frame");
michael@0 1372 }
michael@0 1373
michael@0 1374 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
michael@0 1375 vp9_adapt_coef_probs(cm);
michael@0 1376
michael@0 1377 if (!frame_is_intra_only(cm)) {
michael@0 1378 vp9_adapt_mode_probs(cm);
michael@0 1379 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
michael@0 1380 }
michael@0 1381 } else {
michael@0 1382 debug_check_frame_counts(cm);
michael@0 1383 }
michael@0 1384
michael@0 1385 if (cm->refresh_frame_context)
michael@0 1386 cm->frame_contexts[cm->frame_context_idx] = cm->fc;
michael@0 1387
michael@0 1388 return 0;
michael@0 1389 }

mercurial