Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* |
michael@0 | 2 | Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license |
michael@0 | 5 | * that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | * tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | * in the file PATENTS. All contributing project authors may |
michael@0 | 8 | * be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | */ |
michael@0 | 10 | |
michael@0 | 11 | #include <assert.h> |
michael@0 | 12 | |
michael@0 | 13 | #include "vp9/common/vp9_common.h" |
michael@0 | 14 | #include "vp9/common/vp9_entropy.h" |
michael@0 | 15 | #include "vp9/common/vp9_entropymode.h" |
michael@0 | 16 | #include "vp9/common/vp9_entropymv.h" |
michael@0 | 17 | #include "vp9/common/vp9_findnearmv.h" |
michael@0 | 18 | #include "vp9/common/vp9_mvref_common.h" |
michael@0 | 19 | #include "vp9/common/vp9_pred_common.h" |
michael@0 | 20 | #include "vp9/common/vp9_reconinter.h" |
michael@0 | 21 | #include "vp9/common/vp9_seg_common.h" |
michael@0 | 22 | |
michael@0 | 23 | #include "vp9/decoder/vp9_decodemv.h" |
michael@0 | 24 | #include "vp9/decoder/vp9_decodframe.h" |
michael@0 | 25 | #include "vp9/decoder/vp9_onyxd_int.h" |
michael@0 | 26 | #include "vp9/decoder/vp9_treereader.h" |
michael@0 | 27 | |
michael@0 | 28 | static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) { |
michael@0 | 29 | return (MB_PREDICTION_MODE)treed_read(r, vp9_intra_mode_tree, p); |
michael@0 | 30 | } |
michael@0 | 31 | |
michael@0 | 32 | static MB_PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, vp9_reader *r, |
michael@0 | 33 | int size_group) { |
michael@0 | 34 | const MB_PREDICTION_MODE y_mode = read_intra_mode(r, |
michael@0 | 35 | cm->fc.y_mode_prob[size_group]); |
michael@0 | 36 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 37 | ++cm->counts.y_mode[size_group][y_mode]; |
michael@0 | 38 | return y_mode; |
michael@0 | 39 | } |
michael@0 | 40 | |
michael@0 | 41 | static MB_PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, vp9_reader *r, |
michael@0 | 42 | MB_PREDICTION_MODE y_mode) { |
michael@0 | 43 | const MB_PREDICTION_MODE uv_mode = read_intra_mode(r, |
michael@0 | 44 | cm->fc.uv_mode_prob[y_mode]); |
michael@0 | 45 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 46 | ++cm->counts.uv_mode[y_mode][uv_mode]; |
michael@0 | 47 | return uv_mode; |
michael@0 | 48 | } |
michael@0 | 49 | |
michael@0 | 50 | static MB_PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, vp9_reader *r, |
michael@0 | 51 | int ctx) { |
michael@0 | 52 | const int mode = treed_read(r, vp9_inter_mode_tree, |
michael@0 | 53 | cm->fc.inter_mode_probs[ctx]); |
michael@0 | 54 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 55 | ++cm->counts.inter_mode[ctx][mode]; |
michael@0 | 56 | |
michael@0 | 57 | return NEARESTMV + mode; |
michael@0 | 58 | } |
michael@0 | 59 | |
michael@0 | 60 | static int read_segment_id(vp9_reader *r, const struct segmentation *seg) { |
michael@0 | 61 | return treed_read(r, vp9_segment_tree, seg->tree_probs); |
michael@0 | 62 | } |
michael@0 | 63 | |
michael@0 | 64 | static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, |
michael@0 | 65 | TX_SIZE max_tx_size, vp9_reader *r) { |
michael@0 | 66 | const int ctx = vp9_get_pred_context_tx_size(xd); |
michael@0 | 67 | const vp9_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc.tx_probs); |
michael@0 | 68 | TX_SIZE tx_size = vp9_read(r, tx_probs[0]); |
michael@0 | 69 | if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { |
michael@0 | 70 | tx_size += vp9_read(r, tx_probs[1]); |
michael@0 | 71 | if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) |
michael@0 | 72 | tx_size += vp9_read(r, tx_probs[2]); |
michael@0 | 73 | } |
michael@0 | 74 | |
michael@0 | 75 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 76 | ++get_tx_counts(max_tx_size, ctx, &cm->counts.tx)[tx_size]; |
michael@0 | 77 | return tx_size; |
michael@0 | 78 | } |
michael@0 | 79 | |
michael@0 | 80 | static TX_SIZE read_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, TX_MODE tx_mode, |
michael@0 | 81 | BLOCK_SIZE bsize, int allow_select, vp9_reader *r) { |
michael@0 | 82 | const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; |
michael@0 | 83 | if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8) |
michael@0 | 84 | return read_selected_tx_size(cm, xd, max_tx_size, r); |
michael@0 | 85 | else |
michael@0 | 86 | return MIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]); |
michael@0 | 87 | } |
michael@0 | 88 | |
michael@0 | 89 | static void set_segment_id(VP9_COMMON *cm, BLOCK_SIZE bsize, |
michael@0 | 90 | int mi_row, int mi_col, int segment_id) { |
michael@0 | 91 | const int mi_offset = mi_row * cm->mi_cols + mi_col; |
michael@0 | 92 | const int bw = num_8x8_blocks_wide_lookup[bsize]; |
michael@0 | 93 | const int bh = num_8x8_blocks_high_lookup[bsize]; |
michael@0 | 94 | const int xmis = MIN(cm->mi_cols - mi_col, bw); |
michael@0 | 95 | const int ymis = MIN(cm->mi_rows - mi_row, bh); |
michael@0 | 96 | int x, y; |
michael@0 | 97 | |
michael@0 | 98 | assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); |
michael@0 | 99 | |
michael@0 | 100 | for (y = 0; y < ymis; y++) |
michael@0 | 101 | for (x = 0; x < xmis; x++) |
michael@0 | 102 | cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id; |
michael@0 | 103 | } |
michael@0 | 104 | |
michael@0 | 105 | static int read_intra_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
michael@0 | 106 | int mi_row, int mi_col, |
michael@0 | 107 | vp9_reader *r) { |
michael@0 | 108 | struct segmentation *const seg = &cm->seg; |
michael@0 | 109 | const BLOCK_SIZE bsize = xd->mi_8x8[0]->mbmi.sb_type; |
michael@0 | 110 | int segment_id; |
michael@0 | 111 | |
michael@0 | 112 | if (!seg->enabled) |
michael@0 | 113 | return 0; // Default for disabled segmentation |
michael@0 | 114 | |
michael@0 | 115 | if (!seg->update_map) |
michael@0 | 116 | return 0; |
michael@0 | 117 | |
michael@0 | 118 | segment_id = read_segment_id(r, seg); |
michael@0 | 119 | set_segment_id(cm, bsize, mi_row, mi_col, segment_id); |
michael@0 | 120 | return segment_id; |
michael@0 | 121 | } |
michael@0 | 122 | |
michael@0 | 123 | static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
michael@0 | 124 | int mi_row, int mi_col, vp9_reader *r) { |
michael@0 | 125 | struct segmentation *const seg = &cm->seg; |
michael@0 | 126 | const BLOCK_SIZE bsize = xd->mi_8x8[0]->mbmi.sb_type; |
michael@0 | 127 | int pred_segment_id, segment_id; |
michael@0 | 128 | |
michael@0 | 129 | if (!seg->enabled) |
michael@0 | 130 | return 0; // Default for disabled segmentation |
michael@0 | 131 | |
michael@0 | 132 | pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map, |
michael@0 | 133 | bsize, mi_row, mi_col); |
michael@0 | 134 | if (!seg->update_map) |
michael@0 | 135 | return pred_segment_id; |
michael@0 | 136 | |
michael@0 | 137 | if (seg->temporal_update) { |
michael@0 | 138 | const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); |
michael@0 | 139 | const int pred_flag = vp9_read(r, pred_prob); |
michael@0 | 140 | vp9_set_pred_flag_seg_id(xd, pred_flag); |
michael@0 | 141 | segment_id = pred_flag ? pred_segment_id |
michael@0 | 142 | : read_segment_id(r, seg); |
michael@0 | 143 | } else { |
michael@0 | 144 | segment_id = read_segment_id(r, seg); |
michael@0 | 145 | } |
michael@0 | 146 | set_segment_id(cm, bsize, mi_row, mi_col, segment_id); |
michael@0 | 147 | return segment_id; |
michael@0 | 148 | } |
michael@0 | 149 | |
michael@0 | 150 | static int read_skip_coeff(VP9_COMMON *cm, const MACROBLOCKD *xd, |
michael@0 | 151 | int segment_id, vp9_reader *r) { |
michael@0 | 152 | if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { |
michael@0 | 153 | return 1; |
michael@0 | 154 | } else { |
michael@0 | 155 | const int ctx = vp9_get_pred_context_mbskip(xd); |
michael@0 | 156 | const int skip = vp9_read(r, cm->fc.mbskip_probs[ctx]); |
michael@0 | 157 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 158 | ++cm->counts.mbskip[ctx][skip]; |
michael@0 | 159 | return skip; |
michael@0 | 160 | } |
michael@0 | 161 | } |
michael@0 | 162 | |
michael@0 | 163 | static void read_intra_frame_mode_info(VP9_COMMON *const cm, |
michael@0 | 164 | MACROBLOCKD *const xd, |
michael@0 | 165 | MODE_INFO *const m, |
michael@0 | 166 | int mi_row, int mi_col, vp9_reader *r) { |
michael@0 | 167 | MB_MODE_INFO *const mbmi = &m->mbmi; |
michael@0 | 168 | const BLOCK_SIZE bsize = mbmi->sb_type; |
michael@0 | 169 | const MODE_INFO *above_mi = xd->mi_8x8[-cm->mode_info_stride]; |
michael@0 | 170 | const MODE_INFO *left_mi = xd->left_available ? xd->mi_8x8[-1] : NULL; |
michael@0 | 171 | |
michael@0 | 172 | mbmi->segment_id = read_intra_segment_id(cm, xd, mi_row, mi_col, r); |
michael@0 | 173 | mbmi->skip_coeff = read_skip_coeff(cm, xd, mbmi->segment_id, r); |
michael@0 | 174 | mbmi->tx_size = read_tx_size(cm, xd, cm->tx_mode, bsize, 1, r); |
michael@0 | 175 | mbmi->ref_frame[0] = INTRA_FRAME; |
michael@0 | 176 | mbmi->ref_frame[1] = NONE; |
michael@0 | 177 | |
michael@0 | 178 | if (bsize >= BLOCK_8X8) { |
michael@0 | 179 | const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); |
michael@0 | 180 | const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0); |
michael@0 | 181 | mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]); |
michael@0 | 182 | } else { |
michael@0 | 183 | // Only 4x4, 4x8, 8x4 blocks |
michael@0 | 184 | const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 |
michael@0 | 185 | const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 |
michael@0 | 186 | int idx, idy; |
michael@0 | 187 | |
michael@0 | 188 | for (idy = 0; idy < 2; idy += num_4x4_h) { |
michael@0 | 189 | for (idx = 0; idx < 2; idx += num_4x4_w) { |
michael@0 | 190 | const int ib = idy * 2 + idx; |
michael@0 | 191 | const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, ib); |
michael@0 | 192 | const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, ib); |
michael@0 | 193 | const MB_PREDICTION_MODE b_mode = read_intra_mode(r, |
michael@0 | 194 | vp9_kf_y_mode_prob[A][L]); |
michael@0 | 195 | m->bmi[ib].as_mode = b_mode; |
michael@0 | 196 | if (num_4x4_h == 2) |
michael@0 | 197 | m->bmi[ib + 2].as_mode = b_mode; |
michael@0 | 198 | if (num_4x4_w == 2) |
michael@0 | 199 | m->bmi[ib + 1].as_mode = b_mode; |
michael@0 | 200 | } |
michael@0 | 201 | } |
michael@0 | 202 | |
michael@0 | 203 | mbmi->mode = m->bmi[3].as_mode; |
michael@0 | 204 | } |
michael@0 | 205 | |
michael@0 | 206 | mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]); |
michael@0 | 207 | } |
michael@0 | 208 | |
michael@0 | 209 | static int read_mv_component(vp9_reader *r, |
michael@0 | 210 | const nmv_component *mvcomp, int usehp) { |
michael@0 | 211 | int mag, d, fr, hp; |
michael@0 | 212 | const int sign = vp9_read(r, mvcomp->sign); |
michael@0 | 213 | const int mv_class = treed_read(r, vp9_mv_class_tree, mvcomp->classes); |
michael@0 | 214 | const int class0 = mv_class == MV_CLASS_0; |
michael@0 | 215 | |
michael@0 | 216 | // Integer part |
michael@0 | 217 | if (class0) { |
michael@0 | 218 | d = treed_read(r, vp9_mv_class0_tree, mvcomp->class0); |
michael@0 | 219 | } else { |
michael@0 | 220 | int i; |
michael@0 | 221 | const int n = mv_class + CLASS0_BITS - 1; // number of bits |
michael@0 | 222 | |
michael@0 | 223 | d = 0; |
michael@0 | 224 | for (i = 0; i < n; ++i) |
michael@0 | 225 | d |= vp9_read(r, mvcomp->bits[i]) << i; |
michael@0 | 226 | } |
michael@0 | 227 | |
michael@0 | 228 | // Fractional part |
michael@0 | 229 | fr = treed_read(r, vp9_mv_fp_tree, |
michael@0 | 230 | class0 ? mvcomp->class0_fp[d] : mvcomp->fp); |
michael@0 | 231 | |
michael@0 | 232 | |
michael@0 | 233 | // High precision part (if hp is not used, the default value of the hp is 1) |
michael@0 | 234 | hp = usehp ? vp9_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) |
michael@0 | 235 | : 1; |
michael@0 | 236 | |
michael@0 | 237 | // Result |
michael@0 | 238 | mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1; |
michael@0 | 239 | return sign ? -mag : mag; |
michael@0 | 240 | } |
michael@0 | 241 | |
michael@0 | 242 | static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref, |
michael@0 | 243 | const nmv_context *ctx, |
michael@0 | 244 | nmv_context_counts *counts, int allow_hp) { |
michael@0 | 245 | const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints); |
michael@0 | 246 | const int use_hp = allow_hp && vp9_use_mv_hp(ref); |
michael@0 | 247 | MV diff = {0, 0}; |
michael@0 | 248 | |
michael@0 | 249 | if (mv_joint_vertical(j)) |
michael@0 | 250 | diff.row = read_mv_component(r, &ctx->comps[0], use_hp); |
michael@0 | 251 | |
michael@0 | 252 | if (mv_joint_horizontal(j)) |
michael@0 | 253 | diff.col = read_mv_component(r, &ctx->comps[1], use_hp); |
michael@0 | 254 | |
michael@0 | 255 | vp9_inc_mv(&diff, counts); |
michael@0 | 256 | |
michael@0 | 257 | mv->row = ref->row + diff.row; |
michael@0 | 258 | mv->col = ref->col + diff.col; |
michael@0 | 259 | } |
michael@0 | 260 | |
michael@0 | 261 | static COMPPREDMODE_TYPE read_reference_mode(VP9_COMMON *cm, |
michael@0 | 262 | const MACROBLOCKD *xd, |
michael@0 | 263 | vp9_reader *r) { |
michael@0 | 264 | const int ctx = vp9_get_pred_context_comp_inter_inter(cm, xd); |
michael@0 | 265 | const int mode = vp9_read(r, cm->fc.comp_inter_prob[ctx]); |
michael@0 | 266 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 267 | ++cm->counts.comp_inter[ctx][mode]; |
michael@0 | 268 | return mode; // SINGLE_PREDICTION_ONLY or COMP_PREDICTION_ONLY |
michael@0 | 269 | } |
michael@0 | 270 | |
michael@0 | 271 | // Read the referncence frame |
michael@0 | 272 | static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
michael@0 | 273 | vp9_reader *r, |
michael@0 | 274 | int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { |
michael@0 | 275 | FRAME_CONTEXT *const fc = &cm->fc; |
michael@0 | 276 | FRAME_COUNTS *const counts = &cm->counts; |
michael@0 | 277 | |
michael@0 | 278 | if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { |
michael@0 | 279 | ref_frame[0] = vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME); |
michael@0 | 280 | ref_frame[1] = NONE; |
michael@0 | 281 | } else { |
michael@0 | 282 | const COMPPREDMODE_TYPE mode = (cm->comp_pred_mode == HYBRID_PREDICTION) |
michael@0 | 283 | ? read_reference_mode(cm, xd, r) |
michael@0 | 284 | : cm->comp_pred_mode; |
michael@0 | 285 | |
michael@0 | 286 | // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding |
michael@0 | 287 | if (mode == COMP_PREDICTION_ONLY) { |
michael@0 | 288 | const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; |
michael@0 | 289 | const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd); |
michael@0 | 290 | const int bit = vp9_read(r, fc->comp_ref_prob[ctx]); |
michael@0 | 291 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 292 | ++counts->comp_ref[ctx][bit]; |
michael@0 | 293 | ref_frame[idx] = cm->comp_fixed_ref; |
michael@0 | 294 | ref_frame[!idx] = cm->comp_var_ref[bit]; |
michael@0 | 295 | } else if (mode == SINGLE_PREDICTION_ONLY) { |
michael@0 | 296 | const int ctx0 = vp9_get_pred_context_single_ref_p1(xd); |
michael@0 | 297 | const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]); |
michael@0 | 298 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 299 | ++counts->single_ref[ctx0][0][bit0]; |
michael@0 | 300 | if (bit0) { |
michael@0 | 301 | const int ctx1 = vp9_get_pred_context_single_ref_p2(xd); |
michael@0 | 302 | const int bit1 = vp9_read(r, fc->single_ref_prob[ctx1][1]); |
michael@0 | 303 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 304 | ++counts->single_ref[ctx1][1][bit1]; |
michael@0 | 305 | ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME; |
michael@0 | 306 | } else { |
michael@0 | 307 | ref_frame[0] = LAST_FRAME; |
michael@0 | 308 | } |
michael@0 | 309 | |
michael@0 | 310 | ref_frame[1] = NONE; |
michael@0 | 311 | } else { |
michael@0 | 312 | assert(!"Invalid prediction mode."); |
michael@0 | 313 | } |
michael@0 | 314 | } |
michael@0 | 315 | } |
michael@0 | 316 | |
michael@0 | 317 | |
michael@0 | 318 | static INLINE INTERPOLATION_TYPE read_switchable_filter_type( |
michael@0 | 319 | VP9_COMMON *const cm, MACROBLOCKD *const xd, vp9_reader *r) { |
michael@0 | 320 | const int ctx = vp9_get_pred_context_switchable_interp(xd); |
michael@0 | 321 | const int type = treed_read(r, vp9_switchable_interp_tree, |
michael@0 | 322 | cm->fc.switchable_interp_prob[ctx]); |
michael@0 | 323 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 324 | ++cm->counts.switchable_interp[ctx][type]; |
michael@0 | 325 | return type; |
michael@0 | 326 | } |
michael@0 | 327 | |
michael@0 | 328 | static void read_intra_block_mode_info(VP9_COMMON *const cm, MODE_INFO *mi, |
michael@0 | 329 | vp9_reader *r) { |
michael@0 | 330 | MB_MODE_INFO *const mbmi = &mi->mbmi; |
michael@0 | 331 | const BLOCK_SIZE bsize = mi->mbmi.sb_type; |
michael@0 | 332 | |
michael@0 | 333 | mbmi->ref_frame[0] = INTRA_FRAME; |
michael@0 | 334 | mbmi->ref_frame[1] = NONE; |
michael@0 | 335 | |
michael@0 | 336 | if (bsize >= BLOCK_8X8) { |
michael@0 | 337 | mbmi->mode = read_intra_mode_y(cm, r, size_group_lookup[bsize]); |
michael@0 | 338 | } else { |
michael@0 | 339 | // Only 4x4, 4x8, 8x4 blocks |
michael@0 | 340 | const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 |
michael@0 | 341 | const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 |
michael@0 | 342 | int idx, idy; |
michael@0 | 343 | |
michael@0 | 344 | for (idy = 0; idy < 2; idy += num_4x4_h) { |
michael@0 | 345 | for (idx = 0; idx < 2; idx += num_4x4_w) { |
michael@0 | 346 | const int ib = idy * 2 + idx; |
michael@0 | 347 | const int b_mode = read_intra_mode_y(cm, r, 0); |
michael@0 | 348 | mi->bmi[ib].as_mode = b_mode; |
michael@0 | 349 | if (num_4x4_h == 2) |
michael@0 | 350 | mi->bmi[ib + 2].as_mode = b_mode; |
michael@0 | 351 | if (num_4x4_w == 2) |
michael@0 | 352 | mi->bmi[ib + 1].as_mode = b_mode; |
michael@0 | 353 | } |
michael@0 | 354 | } |
michael@0 | 355 | mbmi->mode = mi->bmi[3].as_mode; |
michael@0 | 356 | } |
michael@0 | 357 | |
michael@0 | 358 | mbmi->uv_mode = read_intra_mode_uv(cm, r, mbmi->mode); |
michael@0 | 359 | } |
michael@0 | 360 | |
michael@0 | 361 | static INLINE int assign_mv(VP9_COMMON *cm, MB_PREDICTION_MODE mode, |
michael@0 | 362 | int_mv mv[2], int_mv best_mv[2], |
michael@0 | 363 | int_mv nearest_mv[2], int_mv near_mv[2], |
michael@0 | 364 | int is_compound, int allow_hp, vp9_reader *r) { |
michael@0 | 365 | int i; |
michael@0 | 366 | int ret = 1; |
michael@0 | 367 | |
michael@0 | 368 | switch (mode) { |
michael@0 | 369 | case NEWMV: { |
michael@0 | 370 | nmv_context_counts *const mv_counts = cm->frame_parallel_decoding_mode ? |
michael@0 | 371 | NULL : &cm->counts.mv; |
michael@0 | 372 | read_mv(r, &mv[0].as_mv, &best_mv[0].as_mv, |
michael@0 | 373 | &cm->fc.nmvc, mv_counts, allow_hp); |
michael@0 | 374 | if (is_compound) |
michael@0 | 375 | read_mv(r, &mv[1].as_mv, &best_mv[1].as_mv, |
michael@0 | 376 | &cm->fc.nmvc, mv_counts, allow_hp); |
michael@0 | 377 | for (i = 0; i < 1 + is_compound; ++i) { |
michael@0 | 378 | ret = ret && mv[i].as_mv.row < MV_UPP && mv[i].as_mv.row > MV_LOW; |
michael@0 | 379 | ret = ret && mv[i].as_mv.col < MV_UPP && mv[i].as_mv.col > MV_LOW; |
michael@0 | 380 | } |
michael@0 | 381 | break; |
michael@0 | 382 | } |
michael@0 | 383 | case NEARESTMV: { |
michael@0 | 384 | mv[0].as_int = nearest_mv[0].as_int; |
michael@0 | 385 | if (is_compound) mv[1].as_int = nearest_mv[1].as_int; |
michael@0 | 386 | break; |
michael@0 | 387 | } |
michael@0 | 388 | case NEARMV: { |
michael@0 | 389 | mv[0].as_int = near_mv[0].as_int; |
michael@0 | 390 | if (is_compound) mv[1].as_int = near_mv[1].as_int; |
michael@0 | 391 | break; |
michael@0 | 392 | } |
michael@0 | 393 | case ZEROMV: { |
michael@0 | 394 | mv[0].as_int = 0; |
michael@0 | 395 | if (is_compound) mv[1].as_int = 0; |
michael@0 | 396 | break; |
michael@0 | 397 | } |
michael@0 | 398 | default: { |
michael@0 | 399 | return 0; |
michael@0 | 400 | } |
michael@0 | 401 | } |
michael@0 | 402 | return ret; |
michael@0 | 403 | } |
michael@0 | 404 | |
michael@0 | 405 | static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
michael@0 | 406 | int segment_id, vp9_reader *r) { |
michael@0 | 407 | if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { |
michael@0 | 408 | return vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != |
michael@0 | 409 | INTRA_FRAME; |
michael@0 | 410 | } else { |
michael@0 | 411 | const int ctx = vp9_get_pred_context_intra_inter(xd); |
michael@0 | 412 | const int is_inter = vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd)); |
michael@0 | 413 | if (!cm->frame_parallel_decoding_mode) |
michael@0 | 414 | ++cm->counts.intra_inter[ctx][is_inter]; |
michael@0 | 415 | return is_inter; |
michael@0 | 416 | } |
michael@0 | 417 | } |
michael@0 | 418 | |
michael@0 | 419 | static void read_inter_block_mode_info(VP9_COMMON *const cm, |
michael@0 | 420 | MACROBLOCKD *const xd, |
michael@0 | 421 | const TileInfo *const tile, |
michael@0 | 422 | MODE_INFO *const mi, |
michael@0 | 423 | int mi_row, int mi_col, vp9_reader *r) { |
michael@0 | 424 | MB_MODE_INFO *const mbmi = &mi->mbmi; |
michael@0 | 425 | const BLOCK_SIZE bsize = mbmi->sb_type; |
michael@0 | 426 | const int allow_hp = cm->allow_high_precision_mv; |
michael@0 | 427 | |
michael@0 | 428 | int_mv nearest[2], nearmv[2], best[2]; |
michael@0 | 429 | uint8_t inter_mode_ctx; |
michael@0 | 430 | MV_REFERENCE_FRAME ref0; |
michael@0 | 431 | int is_compound; |
michael@0 | 432 | |
michael@0 | 433 | mbmi->uv_mode = DC_PRED; |
michael@0 | 434 | read_ref_frames(cm, xd, r, mbmi->segment_id, mbmi->ref_frame); |
michael@0 | 435 | ref0 = mbmi->ref_frame[0]; |
michael@0 | 436 | is_compound = has_second_ref(mbmi); |
michael@0 | 437 | |
michael@0 | 438 | vp9_find_mv_refs(cm, xd, tile, mi, xd->last_mi, ref0, mbmi->ref_mvs[ref0], |
michael@0 | 439 | mi_row, mi_col); |
michael@0 | 440 | |
michael@0 | 441 | inter_mode_ctx = mbmi->mode_context[ref0]; |
michael@0 | 442 | |
michael@0 | 443 | if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { |
michael@0 | 444 | mbmi->mode = ZEROMV; |
michael@0 | 445 | if (bsize < BLOCK_8X8) { |
michael@0 | 446 | vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
michael@0 | 447 | "Invalid usage of segement feature on small blocks"); |
michael@0 | 448 | return; |
michael@0 | 449 | } |
michael@0 | 450 | } else { |
michael@0 | 451 | if (bsize >= BLOCK_8X8) |
michael@0 | 452 | mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx); |
michael@0 | 453 | } |
michael@0 | 454 | |
michael@0 | 455 | // nearest, nearby |
michael@0 | 456 | if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { |
michael@0 | 457 | vp9_find_best_ref_mvs(xd, allow_hp, |
michael@0 | 458 | mbmi->ref_mvs[ref0], &nearest[0], &nearmv[0]); |
michael@0 | 459 | best[0].as_int = nearest[0].as_int; |
michael@0 | 460 | } |
michael@0 | 461 | |
michael@0 | 462 | if (is_compound) { |
michael@0 | 463 | const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1]; |
michael@0 | 464 | vp9_find_mv_refs(cm, xd, tile, mi, xd->last_mi, |
michael@0 | 465 | ref1, mbmi->ref_mvs[ref1], mi_row, mi_col); |
michael@0 | 466 | |
michael@0 | 467 | if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { |
michael@0 | 468 | vp9_find_best_ref_mvs(xd, allow_hp, |
michael@0 | 469 | mbmi->ref_mvs[ref1], &nearest[1], &nearmv[1]); |
michael@0 | 470 | best[1].as_int = nearest[1].as_int; |
michael@0 | 471 | } |
michael@0 | 472 | } |
michael@0 | 473 | |
michael@0 | 474 | mbmi->interp_filter = (cm->mcomp_filter_type == SWITCHABLE) |
michael@0 | 475 | ? read_switchable_filter_type(cm, xd, r) |
michael@0 | 476 | : cm->mcomp_filter_type; |
michael@0 | 477 | |
michael@0 | 478 | if (bsize < BLOCK_8X8) { |
michael@0 | 479 | const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 |
michael@0 | 480 | const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 |
michael@0 | 481 | int idx, idy; |
michael@0 | 482 | int b_mode; |
michael@0 | 483 | for (idy = 0; idy < 2; idy += num_4x4_h) { |
michael@0 | 484 | for (idx = 0; idx < 2; idx += num_4x4_w) { |
michael@0 | 485 | int_mv block[2]; |
michael@0 | 486 | const int j = idy * 2 + idx; |
michael@0 | 487 | b_mode = read_inter_mode(cm, r, inter_mode_ctx); |
michael@0 | 488 | |
michael@0 | 489 | if (b_mode == NEARESTMV || b_mode == NEARMV) { |
michael@0 | 490 | vp9_append_sub8x8_mvs_for_idx(cm, xd, tile, &nearest[0], |
michael@0 | 491 | &nearmv[0], j, 0, |
michael@0 | 492 | mi_row, mi_col); |
michael@0 | 493 | |
michael@0 | 494 | if (is_compound) |
michael@0 | 495 | vp9_append_sub8x8_mvs_for_idx(cm, xd, tile, &nearest[1], |
michael@0 | 496 | &nearmv[1], j, 1, |
michael@0 | 497 | mi_row, mi_col); |
michael@0 | 498 | } |
michael@0 | 499 | |
michael@0 | 500 | if (!assign_mv(cm, b_mode, block, best, nearest, nearmv, |
michael@0 | 501 | is_compound, allow_hp, r)) { |
michael@0 | 502 | xd->corrupted |= 1; |
michael@0 | 503 | break; |
michael@0 | 504 | }; |
michael@0 | 505 | |
michael@0 | 506 | |
michael@0 | 507 | mi->bmi[j].as_mv[0].as_int = block[0].as_int; |
michael@0 | 508 | if (is_compound) |
michael@0 | 509 | mi->bmi[j].as_mv[1].as_int = block[1].as_int; |
michael@0 | 510 | |
michael@0 | 511 | if (num_4x4_h == 2) |
michael@0 | 512 | mi->bmi[j + 2] = mi->bmi[j]; |
michael@0 | 513 | if (num_4x4_w == 2) |
michael@0 | 514 | mi->bmi[j + 1] = mi->bmi[j]; |
michael@0 | 515 | } |
michael@0 | 516 | } |
michael@0 | 517 | |
michael@0 | 518 | mi->mbmi.mode = b_mode; |
michael@0 | 519 | |
michael@0 | 520 | mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; |
michael@0 | 521 | mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int; |
michael@0 | 522 | } else { |
michael@0 | 523 | xd->corrupted |= !assign_mv(cm, mbmi->mode, mbmi->mv, |
michael@0 | 524 | best, nearest, nearmv, |
michael@0 | 525 | is_compound, allow_hp, r); |
michael@0 | 526 | } |
michael@0 | 527 | } |
michael@0 | 528 | |
michael@0 | 529 | static void read_inter_frame_mode_info(VP9_COMMON *const cm, |
michael@0 | 530 | MACROBLOCKD *const xd, |
michael@0 | 531 | const TileInfo *const tile, |
michael@0 | 532 | MODE_INFO *const mi, |
michael@0 | 533 | int mi_row, int mi_col, vp9_reader *r) { |
michael@0 | 534 | MB_MODE_INFO *const mbmi = &mi->mbmi; |
michael@0 | 535 | int inter_block; |
michael@0 | 536 | |
michael@0 | 537 | mbmi->mv[0].as_int = 0; |
michael@0 | 538 | mbmi->mv[1].as_int = 0; |
michael@0 | 539 | mbmi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r); |
michael@0 | 540 | mbmi->skip_coeff = read_skip_coeff(cm, xd, mbmi->segment_id, r); |
michael@0 | 541 | inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r); |
michael@0 | 542 | mbmi->tx_size = read_tx_size(cm, xd, cm->tx_mode, mbmi->sb_type, |
michael@0 | 543 | !mbmi->skip_coeff || !inter_block, r); |
michael@0 | 544 | |
michael@0 | 545 | if (inter_block) |
michael@0 | 546 | read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r); |
michael@0 | 547 | else |
michael@0 | 548 | read_intra_block_mode_info(cm, mi, r); |
michael@0 | 549 | } |
michael@0 | 550 | |
michael@0 | 551 | void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd, |
michael@0 | 552 | const TileInfo *const tile, |
michael@0 | 553 | int mi_row, int mi_col, vp9_reader *r) { |
michael@0 | 554 | MODE_INFO *const mi = xd->mi_8x8[0]; |
michael@0 | 555 | const BLOCK_SIZE bsize = mi->mbmi.sb_type; |
michael@0 | 556 | const int bw = num_8x8_blocks_wide_lookup[bsize]; |
michael@0 | 557 | const int bh = num_8x8_blocks_high_lookup[bsize]; |
michael@0 | 558 | const int y_mis = MIN(bh, cm->mi_rows - mi_row); |
michael@0 | 559 | const int x_mis = MIN(bw, cm->mi_cols - mi_col); |
michael@0 | 560 | int x, y, z; |
michael@0 | 561 | |
michael@0 | 562 | if (frame_is_intra_only(cm)) |
michael@0 | 563 | read_intra_frame_mode_info(cm, xd, mi, mi_row, mi_col, r); |
michael@0 | 564 | else |
michael@0 | 565 | read_inter_frame_mode_info(cm, xd, tile, mi, mi_row, mi_col, r); |
michael@0 | 566 | |
michael@0 | 567 | for (y = 0, z = 0; y < y_mis; y++, z += cm->mode_info_stride) { |
michael@0 | 568 | for (x = !y; x < x_mis; x++) { |
michael@0 | 569 | xd->mi_8x8[z + x] = mi; |
michael@0 | 570 | } |
michael@0 | 571 | } |
michael@0 | 572 | } |