1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/media/libvpx/vp9/decoder/vp9_decodframe.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1389 @@ 1.4 +/* 1.5 + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 1.6 + * 1.7 + * Use of this source code is governed by a BSD-style license 1.8 + * that can be found in the LICENSE file in the root of the source 1.9 + * tree. An additional intellectual property rights grant can be found 1.10 + * in the file PATENTS. All contributing project authors may 1.11 + * be found in the AUTHORS file in the root of the source tree. 1.12 + */ 1.13 + 1.14 +#include <assert.h> 1.15 + 1.16 +#include "./vp9_rtcd.h" 1.17 +#include "vpx_mem/vpx_mem.h" 1.18 +#include "vpx_scale/vpx_scale.h" 1.19 + 1.20 +#include "vp9/common/vp9_alloccommon.h" 1.21 +#include "vp9/common/vp9_common.h" 1.22 +#include "vp9/common/vp9_entropy.h" 1.23 +#include "vp9/common/vp9_entropymode.h" 1.24 +#include "vp9/common/vp9_extend.h" 1.25 +#include "vp9/common/vp9_idct.h" 1.26 +#include "vp9/common/vp9_pred_common.h" 1.27 +#include "vp9/common/vp9_quant_common.h" 1.28 +#include "vp9/common/vp9_reconintra.h" 1.29 +#include "vp9/common/vp9_reconinter.h" 1.30 +#include "vp9/common/vp9_seg_common.h" 1.31 +#include "vp9/common/vp9_tile_common.h" 1.32 + 1.33 +#include "vp9/decoder/vp9_dboolhuff.h" 1.34 +#include "vp9/decoder/vp9_decodframe.h" 1.35 +#include "vp9/decoder/vp9_detokenize.h" 1.36 +#include "vp9/decoder/vp9_decodemv.h" 1.37 +#include "vp9/decoder/vp9_dsubexp.h" 1.38 +#include "vp9/decoder/vp9_onyxd_int.h" 1.39 +#include "vp9/decoder/vp9_read_bit_buffer.h" 1.40 +#include "vp9/decoder/vp9_thread.h" 1.41 +#include "vp9/decoder/vp9_treereader.h" 1.42 + 1.43 +typedef struct TileWorkerData { 1.44 + VP9_COMMON *cm; 1.45 + vp9_reader bit_reader; 1.46 + DECLARE_ALIGNED(16, MACROBLOCKD, xd); 1.47 + DECLARE_ALIGNED(16, unsigned char, token_cache[1024]); 1.48 + DECLARE_ALIGNED(16, int16_t, qcoeff[MAX_MB_PLANE][64 * 64]); 1.49 + DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]); 1.50 + DECLARE_ALIGNED(16, uint16_t, eobs[MAX_MB_PLANE][256]); 1.51 +} TileWorkerData; 1.52 + 1.53 +static int read_be32(const uint8_t *p) { 1.54 + return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; 1.55 +} 1.56 + 1.57 +static int is_compound_prediction_allowed(const VP9_COMMON *cm) { 1.58 + int i; 1.59 + for (i = 1; i < ALLOWED_REFS_PER_FRAME; ++i) 1.60 + if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) 1.61 + return 1; 1.62 + 1.63 + return 0; 1.64 +} 1.65 + 1.66 +static void setup_compound_prediction(VP9_COMMON *cm) { 1.67 + if (cm->ref_frame_sign_bias[LAST_FRAME] == 1.68 + cm->ref_frame_sign_bias[GOLDEN_FRAME]) { 1.69 + cm->comp_fixed_ref = ALTREF_FRAME; 1.70 + cm->comp_var_ref[0] = LAST_FRAME; 1.71 + cm->comp_var_ref[1] = GOLDEN_FRAME; 1.72 + } else if (cm->ref_frame_sign_bias[LAST_FRAME] == 1.73 + cm->ref_frame_sign_bias[ALTREF_FRAME]) { 1.74 + cm->comp_fixed_ref = GOLDEN_FRAME; 1.75 + cm->comp_var_ref[0] = LAST_FRAME; 1.76 + cm->comp_var_ref[1] = ALTREF_FRAME; 1.77 + } else { 1.78 + cm->comp_fixed_ref = LAST_FRAME; 1.79 + cm->comp_var_ref[0] = GOLDEN_FRAME; 1.80 + cm->comp_var_ref[1] = ALTREF_FRAME; 1.81 + } 1.82 +} 1.83 + 1.84 +// len == 0 is not allowed 1.85 +static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) { 1.86 + return start + len > start && start + len <= end; 1.87 +} 1.88 + 1.89 +static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) { 1.90 + const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max)); 1.91 + return data > max ? max : data; 1.92 +} 1.93 + 1.94 +static TX_MODE read_tx_mode(vp9_reader *r) { 1.95 + TX_MODE tx_mode = vp9_read_literal(r, 2); 1.96 + if (tx_mode == ALLOW_32X32) 1.97 + tx_mode += vp9_read_bit(r); 1.98 + return tx_mode; 1.99 +} 1.100 + 1.101 +static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) { 1.102 + int i, j; 1.103 + 1.104 + for (i = 0; i < TX_SIZE_CONTEXTS; ++i) 1.105 + for (j = 0; j < TX_SIZES - 3; ++j) 1.106 + vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]); 1.107 + 1.108 + for (i = 0; i < TX_SIZE_CONTEXTS; ++i) 1.109 + for (j = 0; j < TX_SIZES - 2; ++j) 1.110 + vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]); 1.111 + 1.112 + for (i = 0; i < TX_SIZE_CONTEXTS; ++i) 1.113 + for (j = 0; j < TX_SIZES - 1; ++j) 1.114 + vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]); 1.115 +} 1.116 + 1.117 +static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) { 1.118 + int i, j; 1.119 + for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 1.120 + for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) 1.121 + vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]); 1.122 +} 1.123 + 1.124 +static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) { 1.125 + int i, j; 1.126 + for (i = 0; i < INTER_MODE_CONTEXTS; ++i) 1.127 + for (j = 0; j < INTER_MODES - 1; ++j) 1.128 + vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]); 1.129 +} 1.130 + 1.131 +static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) { 1.132 + COMPPREDMODE_TYPE mode = vp9_read_bit(r); 1.133 + if (mode) 1.134 + mode += vp9_read_bit(r); 1.135 + return mode; 1.136 +} 1.137 + 1.138 +static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) { 1.139 + int i; 1.140 + 1.141 + const int compound_allowed = is_compound_prediction_allowed(cm); 1.142 + cm->comp_pred_mode = compound_allowed ? read_comp_pred_mode(r) 1.143 + : SINGLE_PREDICTION_ONLY; 1.144 + if (compound_allowed) 1.145 + setup_compound_prediction(cm); 1.146 + 1.147 + if (cm->comp_pred_mode == HYBRID_PREDICTION) 1.148 + for (i = 0; i < COMP_INTER_CONTEXTS; i++) 1.149 + vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]); 1.150 + 1.151 + if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) 1.152 + for (i = 0; i < REF_CONTEXTS; i++) { 1.153 + vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]); 1.154 + vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]); 1.155 + } 1.156 + 1.157 + if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) 1.158 + for (i = 0; i < REF_CONTEXTS; i++) 1.159 + vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]); 1.160 +} 1.161 + 1.162 +static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) { 1.163 + int i; 1.164 + for (i = 0; i < n; ++i) 1.165 + if (vp9_read(r, NMV_UPDATE_PROB)) 1.166 + p[i] = (vp9_read_literal(r, 7) << 1) | 1; 1.167 +} 1.168 + 1.169 +static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) { 1.170 + int i, j; 1.171 + 1.172 + update_mv_probs(ctx->joints, MV_JOINTS - 1, r); 1.173 + 1.174 + for (i = 0; i < 2; ++i) { 1.175 + nmv_component *const comp_ctx = &ctx->comps[i]; 1.176 + update_mv_probs(&comp_ctx->sign, 1, r); 1.177 + update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r); 1.178 + update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r); 1.179 + update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r); 1.180 + } 1.181 + 1.182 + for (i = 0; i < 2; ++i) { 1.183 + nmv_component *const comp_ctx = &ctx->comps[i]; 1.184 + for (j = 0; j < CLASS0_SIZE; ++j) 1.185 + update_mv_probs(comp_ctx->class0_fp[j], 3, r); 1.186 + update_mv_probs(comp_ctx->fp, 3, r); 1.187 + } 1.188 + 1.189 + if (allow_hp) { 1.190 + for (i = 0; i < 2; ++i) { 1.191 + nmv_component *const comp_ctx = &ctx->comps[i]; 1.192 + update_mv_probs(&comp_ctx->class0_hp, 1, r); 1.193 + update_mv_probs(&comp_ctx->hp, 1, r); 1.194 + } 1.195 + } 1.196 +} 1.197 + 1.198 +static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) { 1.199 + int i; 1.200 + xd->plane[0].dequant = cm->y_dequant[q_index]; 1.201 + 1.202 + for (i = 1; i < MAX_MB_PLANE; i++) 1.203 + xd->plane[i].dequant = cm->uv_dequant[q_index]; 1.204 +} 1.205 + 1.206 +// Allocate storage for each tile column. 1.207 +// TODO(jzern): when max_threads <= 1 the same storage could be used for each 1.208 +// tile. 1.209 +static void alloc_tile_storage(VP9D_COMP *pbi, int tile_rows, int tile_cols) { 1.210 + VP9_COMMON *const cm = &pbi->common; 1.211 + const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); 1.212 + int i, tile_row, tile_col; 1.213 + 1.214 + CHECK_MEM_ERROR(cm, pbi->mi_streams, 1.215 + vpx_realloc(pbi->mi_streams, tile_rows * tile_cols * 1.216 + sizeof(*pbi->mi_streams))); 1.217 + for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 1.218 + for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 1.219 + TileInfo tile; 1.220 + vp9_tile_init(&tile, cm, tile_row, tile_col); 1.221 + pbi->mi_streams[tile_row * tile_cols + tile_col] = 1.222 + &cm->mi[tile.mi_row_start * cm->mode_info_stride 1.223 + + tile.mi_col_start]; 1.224 + } 1.225 + } 1.226 + 1.227 + // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm 1.228 + // block where mi unit size is 8x8. 1.229 + CHECK_MEM_ERROR(cm, pbi->above_context[0], 1.230 + vpx_realloc(pbi->above_context[0], 1.231 + sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 1.232 + 2 * aligned_mi_cols)); 1.233 + for (i = 1; i < MAX_MB_PLANE; ++i) { 1.234 + pbi->above_context[i] = pbi->above_context[0] + 1.235 + i * sizeof(*pbi->above_context[0]) * 1.236 + 2 * aligned_mi_cols; 1.237 + } 1.238 + 1.239 + // This is sized based on the entire frame. Each tile operates within its 1.240 + // column bounds. 1.241 + CHECK_MEM_ERROR(cm, pbi->above_seg_context, 1.242 + vpx_realloc(pbi->above_seg_context, 1.243 + sizeof(*pbi->above_seg_context) * 1.244 + aligned_mi_cols)); 1.245 +} 1.246 + 1.247 +static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, 1.248 + TX_SIZE tx_size, int x, int y) { 1.249 + struct macroblockd_plane *const pd = &xd->plane[plane]; 1.250 + const int eob = pd->eobs[block]; 1.251 + if (eob > 0) { 1.252 + TX_TYPE tx_type; 1.253 + const int plane_type = pd->plane_type; 1.254 + const int stride = pd->dst.stride; 1.255 + int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); 1.256 + uint8_t *const dst = &pd->dst.buf[4 * y * stride + 4 * x]; 1.257 + 1.258 + switch (tx_size) { 1.259 + case TX_4X4: 1.260 + tx_type = get_tx_type_4x4(plane_type, xd, block); 1.261 + if (tx_type == DCT_DCT) 1.262 + xd->itxm_add(dqcoeff, dst, stride, eob); 1.263 + else 1.264 + vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type); 1.265 + break; 1.266 + case TX_8X8: 1.267 + tx_type = get_tx_type_8x8(plane_type, xd); 1.268 + vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); 1.269 + break; 1.270 + case TX_16X16: 1.271 + tx_type = get_tx_type_16x16(plane_type, xd); 1.272 + vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); 1.273 + break; 1.274 + case TX_32X32: 1.275 + tx_type = DCT_DCT; 1.276 + vp9_idct32x32_add(dqcoeff, dst, stride, eob); 1.277 + break; 1.278 + default: 1.279 + assert(!"Invalid transform size"); 1.280 + } 1.281 + 1.282 + if (eob == 1) { 1.283 + vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0])); 1.284 + } else { 1.285 + if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) 1.286 + vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); 1.287 + else if (tx_size == TX_32X32 && eob <= 34) 1.288 + vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); 1.289 + else 1.290 + vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); 1.291 + } 1.292 + } 1.293 +} 1.294 + 1.295 +struct intra_args { 1.296 + VP9_COMMON *cm; 1.297 + MACROBLOCKD *xd; 1.298 + vp9_reader *r; 1.299 + uint8_t *token_cache; 1.300 +}; 1.301 + 1.302 +static void predict_and_reconstruct_intra_block(int plane, int block, 1.303 + BLOCK_SIZE plane_bsize, 1.304 + TX_SIZE tx_size, void *arg) { 1.305 + struct intra_args *const args = arg; 1.306 + VP9_COMMON *const cm = args->cm; 1.307 + MACROBLOCKD *const xd = args->xd; 1.308 + struct macroblockd_plane *const pd = &xd->plane[plane]; 1.309 + MODE_INFO *const mi = xd->mi_8x8[0]; 1.310 + const MB_PREDICTION_MODE mode = (plane == 0) 1.311 + ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[block].as_mode 1.312 + : mi->mbmi.mode) 1.313 + : mi->mbmi.uv_mode; 1.314 + int x, y; 1.315 + uint8_t *dst; 1.316 + txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); 1.317 + dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x]; 1.318 + 1.319 + if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) 1.320 + extend_for_intra(xd, plane_bsize, plane, block, tx_size); 1.321 + 1.322 + vp9_predict_intra_block(xd, block >> (tx_size << 1), 1.323 + b_width_log2(plane_bsize), tx_size, mode, 1.324 + dst, pd->dst.stride, dst, pd->dst.stride); 1.325 + 1.326 + if (!mi->mbmi.skip_coeff) { 1.327 + vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, tx_size, 1.328 + args->r, args->token_cache); 1.329 + inverse_transform_block(xd, plane, block, tx_size, x, y); 1.330 + } 1.331 +} 1.332 + 1.333 +struct inter_args { 1.334 + VP9_COMMON *cm; 1.335 + MACROBLOCKD *xd; 1.336 + vp9_reader *r; 1.337 + int *eobtotal; 1.338 + uint8_t *token_cache; 1.339 +}; 1.340 + 1.341 +static void reconstruct_inter_block(int plane, int block, 1.342 + BLOCK_SIZE plane_bsize, 1.343 + TX_SIZE tx_size, void *arg) { 1.344 + struct inter_args *args = arg; 1.345 + VP9_COMMON *const cm = args->cm; 1.346 + MACROBLOCKD *const xd = args->xd; 1.347 + int x, y; 1.348 + txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); 1.349 + 1.350 + *args->eobtotal += vp9_decode_block_tokens(cm, xd, plane, block, 1.351 + plane_bsize, x, y, tx_size, 1.352 + args->r, args->token_cache); 1.353 + inverse_transform_block(xd, plane, block, tx_size, x, y); 1.354 +} 1.355 + 1.356 +static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, 1.357 + const TileInfo *const tile, 1.358 + BLOCK_SIZE bsize, int mi_row, int mi_col) { 1.359 + const int bh = num_8x8_blocks_high_lookup[bsize]; 1.360 + const int bw = num_8x8_blocks_wide_lookup[bsize]; 1.361 + const int offset = mi_row * cm->mode_info_stride + mi_col; 1.362 + const int tile_offset = tile->mi_row_start * cm->mode_info_stride + 1.363 + tile->mi_col_start; 1.364 + 1.365 + xd->mi_8x8 = cm->mi_grid_visible + offset; 1.366 + xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset; 1.367 + 1.368 + // we are using the mode info context stream here 1.369 + xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset; 1.370 + xd->mi_8x8[0]->mbmi.sb_type = bsize; 1.371 + 1.372 + // Special case: if prev_mi is NULL, the previous mode info context 1.373 + // cannot be used. 1.374 + xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; 1.375 + 1.376 + set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col); 1.377 + 1.378 + // Distance of Mb to the various image edges. These are specified to 8th pel 1.379 + // as they are always compared to values that are in 1/8th pel units 1.380 + set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); 1.381 + 1.382 + setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); 1.383 +} 1.384 + 1.385 +static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd, 1.386 + int idx, int mi_row, int mi_col) { 1.387 + MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; 1.388 + const int ref = mbmi->ref_frame[idx] - LAST_FRAME; 1.389 + const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref); 1.390 + const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref]; 1.391 + if (!vp9_is_valid_scale(sfc)) 1.392 + vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1.393 + "Invalid scale factors"); 1.394 + 1.395 + xd->scale_factor[idx].sfc = sfc; 1.396 + setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]); 1.397 + xd->corrupted |= cfg->corrupted; 1.398 +} 1.399 + 1.400 +static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd, 1.401 + const TileInfo *const tile, 1.402 + int mi_row, int mi_col, 1.403 + vp9_reader *r, BLOCK_SIZE bsize, 1.404 + uint8_t *token_cache) { 1.405 + const int less8x8 = bsize < BLOCK_8X8; 1.406 + MB_MODE_INFO *mbmi; 1.407 + 1.408 + set_offsets(cm, xd, tile, bsize, mi_row, mi_col); 1.409 + vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r); 1.410 + 1.411 + if (less8x8) 1.412 + bsize = BLOCK_8X8; 1.413 + 1.414 + // Has to be called after set_offsets 1.415 + mbmi = &xd->mi_8x8[0]->mbmi; 1.416 + 1.417 + if (mbmi->skip_coeff) { 1.418 + reset_skip_context(xd, bsize); 1.419 + } else { 1.420 + if (cm->seg.enabled) 1.421 + setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id, 1.422 + cm->base_qindex)); 1.423 + } 1.424 + 1.425 + if (!is_inter_block(mbmi)) { 1.426 + struct intra_args arg = { 1.427 + cm, xd, r, token_cache 1.428 + }; 1.429 + foreach_transformed_block(xd, bsize, predict_and_reconstruct_intra_block, 1.430 + &arg); 1.431 + } else { 1.432 + // Setup 1.433 + set_ref(cm, xd, 0, mi_row, mi_col); 1.434 + if (has_second_ref(mbmi)) 1.435 + set_ref(cm, xd, 1, mi_row, mi_col); 1.436 + 1.437 + xd->subpix.filter_x = xd->subpix.filter_y = 1.438 + vp9_get_filter_kernel(mbmi->interp_filter); 1.439 + 1.440 + // Prediction 1.441 + vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); 1.442 + 1.443 + // Reconstruction 1.444 + if (!mbmi->skip_coeff) { 1.445 + int eobtotal = 0; 1.446 + struct inter_args arg = { 1.447 + cm, xd, r, &eobtotal, token_cache 1.448 + }; 1.449 + foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg); 1.450 + if (!less8x8 && eobtotal == 0) 1.451 + mbmi->skip_coeff = 1; // skip loopfilter 1.452 + } 1.453 + } 1.454 + 1.455 + xd->corrupted |= vp9_reader_has_error(r); 1.456 +} 1.457 + 1.458 +static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs, 1.459 + int mi_row, int mi_col, BLOCK_SIZE bsize, 1.460 + vp9_reader *r) { 1.461 + const int ctx = partition_plane_context(xd->above_seg_context, 1.462 + xd->left_seg_context, 1.463 + mi_row, mi_col, bsize); 1.464 + const vp9_prob *const probs = get_partition_probs(cm, ctx); 1.465 + const int has_rows = (mi_row + hbs) < cm->mi_rows; 1.466 + const int has_cols = (mi_col + hbs) < cm->mi_cols; 1.467 + PARTITION_TYPE p; 1.468 + 1.469 + if (has_rows && has_cols) 1.470 + p = treed_read(r, vp9_partition_tree, probs); 1.471 + else if (!has_rows && has_cols) 1.472 + p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; 1.473 + else if (has_rows && !has_cols) 1.474 + p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; 1.475 + else 1.476 + p = PARTITION_SPLIT; 1.477 + 1.478 + if (!cm->frame_parallel_decoding_mode) 1.479 + ++cm->counts.partition[ctx][p]; 1.480 + 1.481 + return p; 1.482 +} 1.483 + 1.484 +static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd, 1.485 + const TileInfo *const tile, 1.486 + int mi_row, int mi_col, 1.487 + vp9_reader* r, BLOCK_SIZE bsize, 1.488 + uint8_t *token_cache) { 1.489 + const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2; 1.490 + PARTITION_TYPE partition; 1.491 + BLOCK_SIZE subsize; 1.492 + 1.493 + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 1.494 + return; 1.495 + 1.496 + partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r); 1.497 + subsize = get_subsize(bsize, partition); 1.498 + if (subsize < BLOCK_8X8) { 1.499 + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); 1.500 + } else { 1.501 + switch (partition) { 1.502 + case PARTITION_NONE: 1.503 + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); 1.504 + break; 1.505 + case PARTITION_HORZ: 1.506 + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); 1.507 + if (mi_row + hbs < cm->mi_rows) 1.508 + decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize, 1.509 + token_cache); 1.510 + break; 1.511 + case PARTITION_VERT: 1.512 + decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, token_cache); 1.513 + if (mi_col + hbs < cm->mi_cols) 1.514 + decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize, 1.515 + token_cache); 1.516 + break; 1.517 + case PARTITION_SPLIT: 1.518 + decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize, 1.519 + token_cache); 1.520 + decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize, 1.521 + token_cache); 1.522 + decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize, 1.523 + token_cache); 1.524 + decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize, 1.525 + token_cache); 1.526 + break; 1.527 + default: 1.528 + assert(!"Invalid partition type"); 1.529 + } 1.530 + } 1.531 + 1.532 + // update partition context 1.533 + if (bsize >= BLOCK_8X8 && 1.534 + (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 1.535 + update_partition_context(xd->above_seg_context, xd->left_seg_context, 1.536 + mi_row, mi_col, subsize, bsize); 1.537 +} 1.538 + 1.539 +static void setup_token_decoder(const uint8_t *data, 1.540 + const uint8_t *data_end, 1.541 + size_t read_size, 1.542 + struct vpx_internal_error_info *error_info, 1.543 + vp9_reader *r) { 1.544 + // Validate the calculated partition length. If the buffer 1.545 + // described by the partition can't be fully read, then restrict 1.546 + // it to the portion that can be (for EC mode) or throw an error. 1.547 + if (!read_is_valid(data, read_size, data_end)) 1.548 + vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, 1.549 + "Truncated packet or corrupt tile length"); 1.550 + 1.551 + if (vp9_reader_init(r, data, read_size)) 1.552 + vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR, 1.553 + "Failed to allocate bool decoder %d", 1); 1.554 +} 1.555 + 1.556 +static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs, 1.557 + vp9_reader *r) { 1.558 + int i, j, k, l, m; 1.559 + 1.560 + if (vp9_read_bit(r)) 1.561 + for (i = 0; i < BLOCK_TYPES; i++) 1.562 + for (j = 0; j < REF_TYPES; j++) 1.563 + for (k = 0; k < COEF_BANDS; k++) 1.564 + for (l = 0; l < PREV_COEF_CONTEXTS; l++) 1.565 + if (k > 0 || l < 3) 1.566 + for (m = 0; m < UNCONSTRAINED_NODES; m++) 1.567 + vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]); 1.568 +} 1.569 + 1.570 +static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, 1.571 + vp9_reader *r) { 1.572 + const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; 1.573 + TX_SIZE tx_size; 1.574 + for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) 1.575 + read_coef_probs_common(fc->coef_probs[tx_size], r); 1.576 +} 1.577 + 1.578 +static void setup_segmentation(struct segmentation *seg, 1.579 + struct vp9_read_bit_buffer *rb) { 1.580 + int i, j; 1.581 + 1.582 + seg->update_map = 0; 1.583 + seg->update_data = 0; 1.584 + 1.585 + seg->enabled = vp9_rb_read_bit(rb); 1.586 + if (!seg->enabled) 1.587 + return; 1.588 + 1.589 + // Segmentation map update 1.590 + seg->update_map = vp9_rb_read_bit(rb); 1.591 + if (seg->update_map) { 1.592 + for (i = 0; i < SEG_TREE_PROBS; i++) 1.593 + seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8) 1.594 + : MAX_PROB; 1.595 + 1.596 + seg->temporal_update = vp9_rb_read_bit(rb); 1.597 + if (seg->temporal_update) { 1.598 + for (i = 0; i < PREDICTION_PROBS; i++) 1.599 + seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8) 1.600 + : MAX_PROB; 1.601 + } else { 1.602 + for (i = 0; i < PREDICTION_PROBS; i++) 1.603 + seg->pred_probs[i] = MAX_PROB; 1.604 + } 1.605 + } 1.606 + 1.607 + // Segmentation data update 1.608 + seg->update_data = vp9_rb_read_bit(rb); 1.609 + if (seg->update_data) { 1.610 + seg->abs_delta = vp9_rb_read_bit(rb); 1.611 + 1.612 + vp9_clearall_segfeatures(seg); 1.613 + 1.614 + for (i = 0; i < MAX_SEGMENTS; i++) { 1.615 + for (j = 0; j < SEG_LVL_MAX; j++) { 1.616 + int data = 0; 1.617 + const int feature_enabled = vp9_rb_read_bit(rb); 1.618 + if (feature_enabled) { 1.619 + vp9_enable_segfeature(seg, i, j); 1.620 + data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j)); 1.621 + if (vp9_is_segfeature_signed(j)) 1.622 + data = vp9_rb_read_bit(rb) ? -data : data; 1.623 + } 1.624 + vp9_set_segdata(seg, i, j, data); 1.625 + } 1.626 + } 1.627 + } 1.628 +} 1.629 + 1.630 +static void setup_loopfilter(struct loopfilter *lf, 1.631 + struct vp9_read_bit_buffer *rb) { 1.632 + lf->filter_level = vp9_rb_read_literal(rb, 6); 1.633 + lf->sharpness_level = vp9_rb_read_literal(rb, 3); 1.634 + 1.635 + // Read in loop filter deltas applied at the MB level based on mode or ref 1.636 + // frame. 1.637 + lf->mode_ref_delta_update = 0; 1.638 + 1.639 + lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb); 1.640 + if (lf->mode_ref_delta_enabled) { 1.641 + lf->mode_ref_delta_update = vp9_rb_read_bit(rb); 1.642 + if (lf->mode_ref_delta_update) { 1.643 + int i; 1.644 + 1.645 + for (i = 0; i < MAX_REF_LF_DELTAS; i++) 1.646 + if (vp9_rb_read_bit(rb)) 1.647 + lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6); 1.648 + 1.649 + for (i = 0; i < MAX_MODE_LF_DELTAS; i++) 1.650 + if (vp9_rb_read_bit(rb)) 1.651 + lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6); 1.652 + } 1.653 + } 1.654 +} 1.655 + 1.656 +static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) { 1.657 + const int old = *delta_q; 1.658 + *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0; 1.659 + return old != *delta_q; 1.660 +} 1.661 + 1.662 +static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd, 1.663 + struct vp9_read_bit_buffer *rb) { 1.664 + int update = 0; 1.665 + 1.666 + cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS); 1.667 + update |= read_delta_q(rb, &cm->y_dc_delta_q); 1.668 + update |= read_delta_q(rb, &cm->uv_dc_delta_q); 1.669 + update |= read_delta_q(rb, &cm->uv_ac_delta_q); 1.670 + if (update) 1.671 + vp9_init_dequantizer(cm); 1.672 + 1.673 + xd->lossless = cm->base_qindex == 0 && 1.674 + cm->y_dc_delta_q == 0 && 1.675 + cm->uv_dc_delta_q == 0 && 1.676 + cm->uv_ac_delta_q == 0; 1.677 + 1.678 + xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; 1.679 +} 1.680 + 1.681 +static INTERPOLATION_TYPE read_interp_filter_type( 1.682 + struct vp9_read_bit_buffer *rb) { 1.683 + const INTERPOLATION_TYPE literal_to_type[] = { EIGHTTAP_SMOOTH, 1.684 + EIGHTTAP, 1.685 + EIGHTTAP_SHARP, 1.686 + BILINEAR }; 1.687 + return vp9_rb_read_bit(rb) ? SWITCHABLE 1.688 + : literal_to_type[vp9_rb_read_literal(rb, 2)]; 1.689 +} 1.690 + 1.691 +static void read_frame_size(struct vp9_read_bit_buffer *rb, 1.692 + int *width, int *height) { 1.693 + const int w = vp9_rb_read_literal(rb, 16) + 1; 1.694 + const int h = vp9_rb_read_literal(rb, 16) + 1; 1.695 + *width = w; 1.696 + *height = h; 1.697 +} 1.698 + 1.699 +static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { 1.700 + cm->display_width = cm->width; 1.701 + cm->display_height = cm->height; 1.702 + if (vp9_rb_read_bit(rb)) 1.703 + read_frame_size(rb, &cm->display_width, &cm->display_height); 1.704 +} 1.705 + 1.706 +static void apply_frame_size(VP9D_COMP *pbi, int width, int height) { 1.707 + VP9_COMMON *cm = &pbi->common; 1.708 + 1.709 + if (cm->width != width || cm->height != height) { 1.710 + // Change in frame size. 1.711 + if (cm->width == 0 || cm->height == 0) { 1.712 + // Assign new frame buffer on first call. 1.713 + cm->new_fb_idx = NUM_YV12_BUFFERS - 1; 1.714 + cm->fb_idx_ref_cnt[cm->new_fb_idx] = 1; 1.715 + } 1.716 + 1.717 + // TODO(agrange) Don't test width/height, check overall size. 1.718 + if (width > cm->width || height > cm->height) { 1.719 + // Rescale frame buffers only if they're not big enough already. 1.720 + if (vp9_resize_frame_buffers(cm, width, height)) 1.721 + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 1.722 + "Failed to allocate frame buffers"); 1.723 + } 1.724 + 1.725 + cm->width = width; 1.726 + cm->height = height; 1.727 + 1.728 + vp9_update_frame_size(cm); 1.729 + } 1.730 + 1.731 + vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height, 1.732 + cm->subsampling_x, cm->subsampling_y, 1.733 + VP9BORDERINPIXELS); 1.734 +} 1.735 + 1.736 +static void setup_frame_size(VP9D_COMP *pbi, 1.737 + struct vp9_read_bit_buffer *rb) { 1.738 + int width, height; 1.739 + read_frame_size(rb, &width, &height); 1.740 + apply_frame_size(pbi, width, height); 1.741 + setup_display_size(&pbi->common, rb); 1.742 +} 1.743 + 1.744 +static void setup_frame_size_with_refs(VP9D_COMP *pbi, 1.745 + struct vp9_read_bit_buffer *rb) { 1.746 + VP9_COMMON *const cm = &pbi->common; 1.747 + 1.748 + int width, height; 1.749 + int found = 0, i; 1.750 + for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1.751 + if (vp9_rb_read_bit(rb)) { 1.752 + YV12_BUFFER_CONFIG *const cfg = get_frame_ref_buffer(cm, i); 1.753 + width = cfg->y_crop_width; 1.754 + height = cfg->y_crop_height; 1.755 + found = 1; 1.756 + break; 1.757 + } 1.758 + } 1.759 + 1.760 + if (!found) 1.761 + read_frame_size(rb, &width, &height); 1.762 + 1.763 + if (!width || !height) 1.764 + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1.765 + "Referenced frame with invalid size"); 1.766 + 1.767 + apply_frame_size(pbi, width, height); 1.768 + setup_display_size(cm, rb); 1.769 +} 1.770 + 1.771 +static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd, 1.772 + int tile_row, int tile_col) { 1.773 + int i; 1.774 + const int tile_cols = 1 << pbi->common.log2_tile_cols; 1.775 + xd->mi_stream = pbi->mi_streams[tile_row * tile_cols + tile_col]; 1.776 + 1.777 + for (i = 0; i < MAX_MB_PLANE; ++i) { 1.778 + xd->above_context[i] = pbi->above_context[i]; 1.779 + } 1.780 + // see note in alloc_tile_storage(). 1.781 + xd->above_seg_context = pbi->above_seg_context; 1.782 +} 1.783 + 1.784 +static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile, 1.785 + vp9_reader *r) { 1.786 + const int num_threads = pbi->oxcf.max_threads; 1.787 + VP9_COMMON *const cm = &pbi->common; 1.788 + int mi_row, mi_col; 1.789 + MACROBLOCKD *xd = &pbi->mb; 1.790 + 1.791 + if (pbi->do_loopfilter_inline) { 1.792 + LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; 1.793 + lf_data->frame_buffer = get_frame_new_buffer(cm); 1.794 + lf_data->cm = cm; 1.795 + lf_data->xd = pbi->mb; 1.796 + lf_data->stop = 0; 1.797 + lf_data->y_only = 0; 1.798 + vp9_loop_filter_frame_init(cm, cm->lf.filter_level); 1.799 + } 1.800 + 1.801 + for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 1.802 + mi_row += MI_BLOCK_SIZE) { 1.803 + // For a SB there are 2 left contexts, each pertaining to a MB row within 1.804 + vp9_zero(xd->left_context); 1.805 + vp9_zero(xd->left_seg_context); 1.806 + for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 1.807 + mi_col += MI_BLOCK_SIZE) { 1.808 + decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64, 1.809 + pbi->token_cache); 1.810 + } 1.811 + 1.812 + if (pbi->do_loopfilter_inline) { 1.813 + const int lf_start = mi_row - MI_BLOCK_SIZE; 1.814 + LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; 1.815 + 1.816 + // delay the loopfilter by 1 macroblock row. 1.817 + if (lf_start < 0) continue; 1.818 + 1.819 + // decoding has completed: finish up the loop filter in this thread. 1.820 + if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue; 1.821 + 1.822 + vp9_worker_sync(&pbi->lf_worker); 1.823 + lf_data->start = lf_start; 1.824 + lf_data->stop = mi_row; 1.825 + if (num_threads > 1) { 1.826 + vp9_worker_launch(&pbi->lf_worker); 1.827 + } else { 1.828 + vp9_worker_execute(&pbi->lf_worker); 1.829 + } 1.830 + } 1.831 + } 1.832 + 1.833 + if (pbi->do_loopfilter_inline) { 1.834 + LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; 1.835 + 1.836 + vp9_worker_sync(&pbi->lf_worker); 1.837 + lf_data->start = lf_data->stop; 1.838 + lf_data->stop = cm->mi_rows; 1.839 + vp9_worker_execute(&pbi->lf_worker); 1.840 + } 1.841 +} 1.842 + 1.843 +static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { 1.844 + int min_log2_tile_cols, max_log2_tile_cols, max_ones; 1.845 + vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 1.846 + 1.847 + // columns 1.848 + max_ones = max_log2_tile_cols - min_log2_tile_cols; 1.849 + cm->log2_tile_cols = min_log2_tile_cols; 1.850 + while (max_ones-- && vp9_rb_read_bit(rb)) 1.851 + cm->log2_tile_cols++; 1.852 + 1.853 + // rows 1.854 + cm->log2_tile_rows = vp9_rb_read_bit(rb); 1.855 + if (cm->log2_tile_rows) 1.856 + cm->log2_tile_rows += vp9_rb_read_bit(rb); 1.857 +} 1.858 + 1.859 +// Reads the next tile returning its size and adjusting '*data' accordingly 1.860 +// based on 'is_last'. 1.861 +static size_t get_tile(const uint8_t *const data_end, 1.862 + int is_last, 1.863 + struct vpx_internal_error_info *error_info, 1.864 + const uint8_t **data) { 1.865 + size_t size; 1.866 + 1.867 + if (!is_last) { 1.868 + if (!read_is_valid(*data, 4, data_end)) 1.869 + vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, 1.870 + "Truncated packet or corrupt tile length"); 1.871 + 1.872 + size = read_be32(*data); 1.873 + *data += 4; 1.874 + 1.875 + if (size > data_end - *data) { 1.876 + vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, 1.877 + "Truncated packet or corrupt tile size"); 1.878 + } 1.879 + } else { 1.880 + size = data_end - *data; 1.881 + } 1.882 + return size; 1.883 +} 1.884 + 1.885 +typedef struct TileBuffer { 1.886 + const uint8_t *data; 1.887 + size_t size; 1.888 +} TileBuffer; 1.889 + 1.890 +static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) { 1.891 + VP9_COMMON *const cm = &pbi->common; 1.892 + MACROBLOCKD *const xd = &pbi->mb; 1.893 + const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols); 1.894 + const int tile_cols = 1 << cm->log2_tile_cols; 1.895 + const int tile_rows = 1 << cm->log2_tile_rows; 1.896 + TileBuffer tile_buffers[4][1 << 6]; 1.897 + int tile_row, tile_col; 1.898 + const uint8_t *const data_end = pbi->source + pbi->source_sz; 1.899 + const uint8_t *end = NULL; 1.900 + vp9_reader r; 1.901 + 1.902 + assert(tile_rows <= 4); 1.903 + assert(tile_cols <= (1 << 6)); 1.904 + 1.905 + // Note: this memset assumes above_context[0], [1] and [2] 1.906 + // are allocated as part of the same buffer. 1.907 + vpx_memset(pbi->above_context[0], 0, 1.908 + sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 2 * aligned_cols); 1.909 + 1.910 + vpx_memset(pbi->above_seg_context, 0, 1.911 + sizeof(*pbi->above_seg_context) * aligned_cols); 1.912 + 1.913 + // Load tile data into tile_buffers 1.914 + for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 1.915 + for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 1.916 + const int last_tile = tile_row == tile_rows - 1 && 1.917 + tile_col == tile_cols - 1; 1.918 + const size_t size = get_tile(data_end, last_tile, &cm->error, &data); 1.919 + TileBuffer *const buf = &tile_buffers[tile_row][tile_col]; 1.920 + buf->data = data; 1.921 + buf->size = size; 1.922 + data += size; 1.923 + } 1.924 + } 1.925 + 1.926 + // Decode tiles using data from tile_buffers 1.927 + for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 1.928 + for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 1.929 + const int col = pbi->oxcf.inv_tile_order ? tile_cols - tile_col - 1 1.930 + : tile_col; 1.931 + const int last_tile = tile_row == tile_rows - 1 && 1.932 + col == tile_cols - 1; 1.933 + const TileBuffer *const buf = &tile_buffers[tile_row][col]; 1.934 + TileInfo tile; 1.935 + 1.936 + vp9_tile_init(&tile, cm, tile_row, col); 1.937 + setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r); 1.938 + setup_tile_context(pbi, xd, tile_row, col); 1.939 + decode_tile(pbi, &tile, &r); 1.940 + 1.941 + if (last_tile) 1.942 + end = vp9_reader_find_end(&r); 1.943 + } 1.944 + } 1.945 + 1.946 + return end; 1.947 +} 1.948 + 1.949 +static void setup_tile_macroblockd(TileWorkerData *const tile_data) { 1.950 + MACROBLOCKD *xd = &tile_data->xd; 1.951 + struct macroblockd_plane *const pd = xd->plane; 1.952 + int i; 1.953 + 1.954 + for (i = 0; i < MAX_MB_PLANE; ++i) { 1.955 + pd[i].qcoeff = tile_data->qcoeff[i]; 1.956 + pd[i].dqcoeff = tile_data->dqcoeff[i]; 1.957 + pd[i].eobs = tile_data->eobs[i]; 1.958 + vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t)); 1.959 + } 1.960 +} 1.961 + 1.962 +static int tile_worker_hook(void *arg1, void *arg2) { 1.963 + TileWorkerData *const tile_data = (TileWorkerData*)arg1; 1.964 + const TileInfo *const tile = (TileInfo*)arg2; 1.965 + int mi_row, mi_col; 1.966 + 1.967 + for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 1.968 + mi_row += MI_BLOCK_SIZE) { 1.969 + vp9_zero(tile_data->xd.left_context); 1.970 + vp9_zero(tile_data->xd.left_seg_context); 1.971 + for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 1.972 + mi_col += MI_BLOCK_SIZE) { 1.973 + decode_modes_sb(tile_data->cm, &tile_data->xd, tile, 1.974 + mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64, 1.975 + tile_data->token_cache); 1.976 + } 1.977 + } 1.978 + return !tile_data->xd.corrupted; 1.979 +} 1.980 + 1.981 +static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) { 1.982 + VP9_COMMON *const cm = &pbi->common; 1.983 + const uint8_t *const data_end = pbi->source + pbi->source_sz; 1.984 + const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); 1.985 + const int tile_cols = 1 << cm->log2_tile_cols; 1.986 + const int tile_rows = 1 << cm->log2_tile_rows; 1.987 + const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols); 1.988 + int tile_col = 0; 1.989 + 1.990 + assert(tile_rows == 1); 1.991 + (void)tile_rows; 1.992 + 1.993 + if (num_workers > pbi->num_tile_workers) { 1.994 + int i; 1.995 + CHECK_MEM_ERROR(cm, pbi->tile_workers, 1.996 + vpx_realloc(pbi->tile_workers, 1.997 + num_workers * sizeof(*pbi->tile_workers))); 1.998 + for (i = pbi->num_tile_workers; i < num_workers; ++i) { 1.999 + VP9Worker *const worker = &pbi->tile_workers[i]; 1.1000 + ++pbi->num_tile_workers; 1.1001 + 1.1002 + vp9_worker_init(worker); 1.1003 + worker->hook = (VP9WorkerHook)tile_worker_hook; 1.1004 + CHECK_MEM_ERROR(cm, worker->data1, 1.1005 + vpx_memalign(32, sizeof(TileWorkerData))); 1.1006 + CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo))); 1.1007 + if (i < num_workers - 1 && !vp9_worker_reset(worker)) { 1.1008 + vpx_internal_error(&cm->error, VPX_CODEC_ERROR, 1.1009 + "Tile decoder thread creation failed"); 1.1010 + } 1.1011 + } 1.1012 + } 1.1013 + 1.1014 + // Note: this memset assumes above_context[0], [1] and [2] 1.1015 + // are allocated as part of the same buffer. 1.1016 + vpx_memset(pbi->above_context[0], 0, 1.1017 + sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 1.1018 + 2 * aligned_mi_cols); 1.1019 + vpx_memset(pbi->above_seg_context, 0, 1.1020 + sizeof(*pbi->above_seg_context) * aligned_mi_cols); 1.1021 + 1.1022 + while (tile_col < tile_cols) { 1.1023 + int i; 1.1024 + for (i = 0; i < num_workers && tile_col < tile_cols; ++i) { 1.1025 + VP9Worker *const worker = &pbi->tile_workers[i]; 1.1026 + TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; 1.1027 + TileInfo *const tile = (TileInfo*)worker->data2; 1.1028 + const size_t size = 1.1029 + get_tile(data_end, tile_col == tile_cols - 1, &cm->error, &data); 1.1030 + 1.1031 + tile_data->cm = cm; 1.1032 + tile_data->xd = pbi->mb; 1.1033 + tile_data->xd.corrupted = 0; 1.1034 + vp9_tile_init(tile, tile_data->cm, 0, tile_col); 1.1035 + 1.1036 + setup_token_decoder(data, data_end, size, &cm->error, 1.1037 + &tile_data->bit_reader); 1.1038 + setup_tile_context(pbi, &tile_data->xd, 0, tile_col); 1.1039 + setup_tile_macroblockd(tile_data); 1.1040 + 1.1041 + worker->had_error = 0; 1.1042 + if (i == num_workers - 1 || tile_col == tile_cols - 1) { 1.1043 + vp9_worker_execute(worker); 1.1044 + } else { 1.1045 + vp9_worker_launch(worker); 1.1046 + } 1.1047 + 1.1048 + data += size; 1.1049 + ++tile_col; 1.1050 + } 1.1051 + 1.1052 + for (; i > 0; --i) { 1.1053 + VP9Worker *const worker = &pbi->tile_workers[i - 1]; 1.1054 + pbi->mb.corrupted |= !vp9_worker_sync(worker); 1.1055 + } 1.1056 + } 1.1057 + 1.1058 + { 1.1059 + const int final_worker = (tile_cols + num_workers - 1) % num_workers; 1.1060 + TileWorkerData *const tile_data = 1.1061 + (TileWorkerData*)pbi->tile_workers[final_worker].data1; 1.1062 + return vp9_reader_find_end(&tile_data->bit_reader); 1.1063 + } 1.1064 +} 1.1065 + 1.1066 +static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { 1.1067 + if (vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_0 || 1.1068 + vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_1 || 1.1069 + vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_2) { 1.1070 + vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1.1071 + "Invalid frame sync code"); 1.1072 + } 1.1073 +} 1.1074 + 1.1075 +static void error_handler(void *data, size_t bit_offset) { 1.1076 + VP9_COMMON *const cm = (VP9_COMMON *)data; 1.1077 + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet"); 1.1078 +} 1.1079 + 1.1080 +#define RESERVED \ 1.1081 + if (vp9_rb_read_bit(rb)) \ 1.1082 + vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \ 1.1083 + "Reserved bit must be unset") 1.1084 + 1.1085 +static size_t read_uncompressed_header(VP9D_COMP *pbi, 1.1086 + struct vp9_read_bit_buffer *rb) { 1.1087 + VP9_COMMON *const cm = &pbi->common; 1.1088 + size_t sz; 1.1089 + int i; 1.1090 + 1.1091 + cm->last_frame_type = cm->frame_type; 1.1092 + 1.1093 + if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) 1.1094 + vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1.1095 + "Invalid frame marker"); 1.1096 + 1.1097 + cm->version = vp9_rb_read_bit(rb); 1.1098 + RESERVED; 1.1099 + 1.1100 + if (vp9_rb_read_bit(rb)) { 1.1101 + // show an existing frame directly 1.1102 + int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)]; 1.1103 + ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show); 1.1104 + pbi->refresh_frame_flags = 0; 1.1105 + cm->lf.filter_level = 0; 1.1106 + return 0; 1.1107 + } 1.1108 + 1.1109 + cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb); 1.1110 + cm->show_frame = vp9_rb_read_bit(rb); 1.1111 + cm->error_resilient_mode = vp9_rb_read_bit(rb); 1.1112 + 1.1113 + if (cm->frame_type == KEY_FRAME) { 1.1114 + check_sync_code(cm, rb); 1.1115 + 1.1116 + cm->color_space = vp9_rb_read_literal(rb, 3); // colorspace 1.1117 + if (cm->color_space != SRGB) { 1.1118 + vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range 1.1119 + if (cm->version == 1) { 1.1120 + cm->subsampling_x = vp9_rb_read_bit(rb); 1.1121 + cm->subsampling_y = vp9_rb_read_bit(rb); 1.1122 + vp9_rb_read_bit(rb); // has extra plane 1.1123 + } else { 1.1124 + cm->subsampling_y = cm->subsampling_x = 1; 1.1125 + } 1.1126 + } else { 1.1127 + if (cm->version == 1) { 1.1128 + cm->subsampling_y = cm->subsampling_x = 0; 1.1129 + vp9_rb_read_bit(rb); // has extra plane 1.1130 + } else { 1.1131 + vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1.1132 + "RGB not supported in profile 0"); 1.1133 + } 1.1134 + } 1.1135 + 1.1136 + pbi->refresh_frame_flags = (1 << NUM_REF_FRAMES) - 1; 1.1137 + 1.1138 + for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) 1.1139 + cm->active_ref_idx[i] = cm->new_fb_idx; 1.1140 + 1.1141 + setup_frame_size(pbi, rb); 1.1142 + } else { 1.1143 + cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb); 1.1144 + 1.1145 + cm->reset_frame_context = cm->error_resilient_mode ? 1.1146 + 0 : vp9_rb_read_literal(rb, 2); 1.1147 + 1.1148 + if (cm->intra_only) { 1.1149 + check_sync_code(cm, rb); 1.1150 + 1.1151 + pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES); 1.1152 + setup_frame_size(pbi, rb); 1.1153 + } else { 1.1154 + pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES); 1.1155 + 1.1156 + for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1.1157 + const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LOG2); 1.1158 + cm->active_ref_idx[i] = cm->ref_frame_map[ref]; 1.1159 + cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb); 1.1160 + } 1.1161 + 1.1162 + setup_frame_size_with_refs(pbi, rb); 1.1163 + 1.1164 + cm->allow_high_precision_mv = vp9_rb_read_bit(rb); 1.1165 + cm->mcomp_filter_type = read_interp_filter_type(rb); 1.1166 + 1.1167 + for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) 1.1168 + vp9_setup_scale_factors(cm, i); 1.1169 + } 1.1170 + } 1.1171 + 1.1172 + if (!cm->error_resilient_mode) { 1.1173 + cm->refresh_frame_context = vp9_rb_read_bit(rb); 1.1174 + cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb); 1.1175 + } else { 1.1176 + cm->refresh_frame_context = 0; 1.1177 + cm->frame_parallel_decoding_mode = 1; 1.1178 + } 1.1179 + 1.1180 + // This flag will be overridden by the call to vp9_setup_past_independence 1.1181 + // below, forcing the use of context 0 for those frame types. 1.1182 + cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LOG2); 1.1183 + 1.1184 + if (frame_is_intra_only(cm) || cm->error_resilient_mode) 1.1185 + vp9_setup_past_independence(cm); 1.1186 + 1.1187 + setup_loopfilter(&cm->lf, rb); 1.1188 + setup_quantization(cm, &pbi->mb, rb); 1.1189 + setup_segmentation(&cm->seg, rb); 1.1190 + 1.1191 + setup_tile_info(cm, rb); 1.1192 + sz = vp9_rb_read_literal(rb, 16); 1.1193 + 1.1194 + if (sz == 0) 1.1195 + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1.1196 + "Invalid header size"); 1.1197 + 1.1198 + return sz; 1.1199 +} 1.1200 + 1.1201 +static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data, 1.1202 + size_t partition_size) { 1.1203 + VP9_COMMON *const cm = &pbi->common; 1.1204 + MACROBLOCKD *const xd = &pbi->mb; 1.1205 + FRAME_CONTEXT *const fc = &cm->fc; 1.1206 + vp9_reader r; 1.1207 + int k; 1.1208 + 1.1209 + if (vp9_reader_init(&r, data, partition_size)) 1.1210 + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 1.1211 + "Failed to allocate bool decoder 0"); 1.1212 + 1.1213 + cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r); 1.1214 + if (cm->tx_mode == TX_MODE_SELECT) 1.1215 + read_tx_probs(&fc->tx_probs, &r); 1.1216 + read_coef_probs(fc, cm->tx_mode, &r); 1.1217 + 1.1218 + for (k = 0; k < MBSKIP_CONTEXTS; ++k) 1.1219 + vp9_diff_update_prob(&r, &fc->mbskip_probs[k]); 1.1220 + 1.1221 + if (!frame_is_intra_only(cm)) { 1.1222 + nmv_context *const nmvc = &fc->nmvc; 1.1223 + int i, j; 1.1224 + 1.1225 + read_inter_mode_probs(fc, &r); 1.1226 + 1.1227 + if (cm->mcomp_filter_type == SWITCHABLE) 1.1228 + read_switchable_interp_probs(fc, &r); 1.1229 + 1.1230 + for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1.1231 + vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]); 1.1232 + 1.1233 + read_comp_pred(cm, &r); 1.1234 + 1.1235 + for (j = 0; j < BLOCK_SIZE_GROUPS; j++) 1.1236 + for (i = 0; i < INTRA_MODES - 1; ++i) 1.1237 + vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]); 1.1238 + 1.1239 + for (j = 0; j < PARTITION_CONTEXTS; ++j) 1.1240 + for (i = 0; i < PARTITION_TYPES - 1; ++i) 1.1241 + vp9_diff_update_prob(&r, &fc->partition_prob[j][i]); 1.1242 + 1.1243 + read_mv_probs(nmvc, cm->allow_high_precision_mv, &r); 1.1244 + } 1.1245 + 1.1246 + return vp9_reader_has_error(&r); 1.1247 +} 1.1248 + 1.1249 +void vp9_init_dequantizer(VP9_COMMON *cm) { 1.1250 + int q; 1.1251 + 1.1252 + for (q = 0; q < QINDEX_RANGE; q++) { 1.1253 + cm->y_dequant[q][0] = vp9_dc_quant(q, cm->y_dc_delta_q); 1.1254 + cm->y_dequant[q][1] = vp9_ac_quant(q, 0); 1.1255 + 1.1256 + cm->uv_dequant[q][0] = vp9_dc_quant(q, cm->uv_dc_delta_q); 1.1257 + cm->uv_dequant[q][1] = vp9_ac_quant(q, cm->uv_ac_delta_q); 1.1258 + } 1.1259 +} 1.1260 + 1.1261 +#ifdef NDEBUG 1.1262 +#define debug_check_frame_counts(cm) (void)0 1.1263 +#else // !NDEBUG 1.1264 +// Counts should only be incremented when frame_parallel_decoding_mode and 1.1265 +// error_resilient_mode are disabled. 1.1266 +static void debug_check_frame_counts(const VP9_COMMON *const cm) { 1.1267 + FRAME_COUNTS zero_counts; 1.1268 + vp9_zero(zero_counts); 1.1269 + assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode); 1.1270 + assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode, 1.1271 + sizeof(cm->counts.y_mode))); 1.1272 + assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode, 1.1273 + sizeof(cm->counts.uv_mode))); 1.1274 + assert(!memcmp(cm->counts.partition, zero_counts.partition, 1.1275 + sizeof(cm->counts.partition))); 1.1276 + assert(!memcmp(cm->counts.coef, zero_counts.coef, 1.1277 + sizeof(cm->counts.coef))); 1.1278 + assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch, 1.1279 + sizeof(cm->counts.eob_branch))); 1.1280 + assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp, 1.1281 + sizeof(cm->counts.switchable_interp))); 1.1282 + assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode, 1.1283 + sizeof(cm->counts.inter_mode))); 1.1284 + assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter, 1.1285 + sizeof(cm->counts.intra_inter))); 1.1286 + assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter, 1.1287 + sizeof(cm->counts.comp_inter))); 1.1288 + assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref, 1.1289 + sizeof(cm->counts.single_ref))); 1.1290 + assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref, 1.1291 + sizeof(cm->counts.comp_ref))); 1.1292 + assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx))); 1.1293 + assert(!memcmp(cm->counts.mbskip, zero_counts.mbskip, 1.1294 + sizeof(cm->counts.mbskip))); 1.1295 + assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); 1.1296 +} 1.1297 +#endif // NDEBUG 1.1298 + 1.1299 +int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { 1.1300 + int i; 1.1301 + VP9_COMMON *const cm = &pbi->common; 1.1302 + MACROBLOCKD *const xd = &pbi->mb; 1.1303 + 1.1304 + const uint8_t *data = pbi->source; 1.1305 + const uint8_t *const data_end = pbi->source + pbi->source_sz; 1.1306 + 1.1307 + struct vp9_read_bit_buffer rb = { data, data_end, 0, cm, error_handler }; 1.1308 + const size_t first_partition_size = read_uncompressed_header(pbi, &rb); 1.1309 + const int keyframe = cm->frame_type == KEY_FRAME; 1.1310 + const int tile_rows = 1 << cm->log2_tile_rows; 1.1311 + const int tile_cols = 1 << cm->log2_tile_cols; 1.1312 + YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); 1.1313 + 1.1314 + if (!first_partition_size) { 1.1315 + // showing a frame directly 1.1316 + *p_data_end = data + 1; 1.1317 + return 0; 1.1318 + } 1.1319 + 1.1320 + if (!pbi->decoded_key_frame && !keyframe) 1.1321 + return -1; 1.1322 + 1.1323 + data += vp9_rb_bytes_read(&rb); 1.1324 + if (!read_is_valid(data, first_partition_size, data_end)) 1.1325 + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1.1326 + "Truncated packet or corrupt header length"); 1.1327 + 1.1328 + pbi->do_loopfilter_inline = 1.1329 + (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level; 1.1330 + if (pbi->do_loopfilter_inline && pbi->lf_worker.data1 == NULL) { 1.1331 + CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, vpx_malloc(sizeof(LFWorkerData))); 1.1332 + pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker; 1.1333 + if (pbi->oxcf.max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) { 1.1334 + vpx_internal_error(&cm->error, VPX_CODEC_ERROR, 1.1335 + "Loop filter thread creation failed"); 1.1336 + } 1.1337 + } 1.1338 + 1.1339 + alloc_tile_storage(pbi, tile_rows, tile_cols); 1.1340 + 1.1341 + xd->mode_info_stride = cm->mode_info_stride; 1.1342 + set_prev_mi(cm); 1.1343 + 1.1344 + setup_plane_dequants(cm, xd, cm->base_qindex); 1.1345 + setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y); 1.1346 + 1.1347 + cm->fc = cm->frame_contexts[cm->frame_context_idx]; 1.1348 + vp9_zero(cm->counts); 1.1349 + for (i = 0; i < MAX_MB_PLANE; ++i) 1.1350 + vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t)); 1.1351 + 1.1352 + xd->corrupted = 0; 1.1353 + new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size); 1.1354 + 1.1355 + // TODO(jzern): remove frame_parallel_decoding_mode restriction for 1.1356 + // single-frame tile decoding. 1.1357 + if (pbi->oxcf.max_threads > 1 && tile_rows == 1 && tile_cols > 1 && 1.1358 + cm->frame_parallel_decoding_mode) { 1.1359 + *p_data_end = decode_tiles_mt(pbi, data + first_partition_size); 1.1360 + } else { 1.1361 + *p_data_end = decode_tiles(pbi, data + first_partition_size); 1.1362 + } 1.1363 + 1.1364 + cm->last_width = cm->width; 1.1365 + cm->last_height = cm->height; 1.1366 + 1.1367 + new_fb->corrupted |= xd->corrupted; 1.1368 + 1.1369 + if (!pbi->decoded_key_frame) { 1.1370 + if (keyframe && !new_fb->corrupted) 1.1371 + pbi->decoded_key_frame = 1; 1.1372 + else 1.1373 + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1.1374 + "A stream must start with a complete key frame"); 1.1375 + } 1.1376 + 1.1377 + if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { 1.1378 + vp9_adapt_coef_probs(cm); 1.1379 + 1.1380 + if (!frame_is_intra_only(cm)) { 1.1381 + vp9_adapt_mode_probs(cm); 1.1382 + vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); 1.1383 + } 1.1384 + } else { 1.1385 + debug_check_frame_counts(cm); 1.1386 + } 1.1387 + 1.1388 + if (cm->refresh_frame_context) 1.1389 + cm->frame_contexts[cm->frame_context_idx] = cm->fc; 1.1390 + 1.1391 + return 0; 1.1392 +}