1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/media/libvpx/vp8/decoder/decodemv.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,668 @@ 1.4 +/* 1.5 + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 1.6 + * 1.7 + * Use of this source code is governed by a BSD-style license 1.8 + * that can be found in the LICENSE file in the root of the source 1.9 + * tree. An additional intellectual property rights grant can be found 1.10 + * in the file PATENTS. All contributing project authors may 1.11 + * be found in the AUTHORS file in the root of the source tree. 1.12 + */ 1.13 + 1.14 + 1.15 +#include "treereader.h" 1.16 +#include "vp8/common/entropymv.h" 1.17 +#include "vp8/common/entropymode.h" 1.18 +#include "onyxd_int.h" 1.19 +#include "vp8/common/findnearmv.h" 1.20 + 1.21 +#if CONFIG_DEBUG 1.22 +#include <assert.h> 1.23 +#endif 1.24 +static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) 1.25 +{ 1.26 + const int i = vp8_treed_read(bc, vp8_bmode_tree, p); 1.27 + 1.28 + return (B_PREDICTION_MODE)i; 1.29 +} 1.30 + 1.31 +static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) 1.32 +{ 1.33 + const int i = vp8_treed_read(bc, vp8_ymode_tree, p); 1.34 + 1.35 + return (MB_PREDICTION_MODE)i; 1.36 +} 1.37 + 1.38 +static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) 1.39 +{ 1.40 + const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p); 1.41 + 1.42 + return (MB_PREDICTION_MODE)i; 1.43 +} 1.44 + 1.45 +static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) 1.46 +{ 1.47 + const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p); 1.48 + 1.49 + return (MB_PREDICTION_MODE)i; 1.50 +} 1.51 + 1.52 +static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) 1.53 +{ 1.54 + vp8_reader *const bc = & pbi->mbc[8]; 1.55 + const int mis = pbi->common.mode_info_stride; 1.56 + 1.57 + mi->mbmi.ref_frame = INTRA_FRAME; 1.58 + mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob); 1.59 + 1.60 + if (mi->mbmi.mode == B_PRED) 1.61 + { 1.62 + int i = 0; 1.63 + mi->mbmi.is_4x4 = 1; 1.64 + 1.65 + do 1.66 + { 1.67 + const B_PREDICTION_MODE A = above_block_mode(mi, i, mis); 1.68 + const B_PREDICTION_MODE L = left_block_mode(mi, i); 1.69 + 1.70 + mi->bmi[i].as_mode = 1.71 + read_bmode(bc, vp8_kf_bmode_prob [A] [L]); 1.72 + } 1.73 + while (++i < 16); 1.74 + } 1.75 + 1.76 + mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob); 1.77 +} 1.78 + 1.79 +static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) 1.80 +{ 1.81 + const vp8_prob *const p = (const vp8_prob *) mvc; 1.82 + int x = 0; 1.83 + 1.84 + if (vp8_read(r, p [mvpis_short])) /* Large */ 1.85 + { 1.86 + int i = 0; 1.87 + 1.88 + do 1.89 + { 1.90 + x += vp8_read(r, p [MVPbits + i]) << i; 1.91 + } 1.92 + while (++i < 3); 1.93 + 1.94 + i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ 1.95 + 1.96 + do 1.97 + { 1.98 + x += vp8_read(r, p [MVPbits + i]) << i; 1.99 + } 1.100 + while (--i > 3); 1.101 + 1.102 + if (!(x & 0xFFF0) || vp8_read(r, p [MVPbits + 3])) 1.103 + x += 8; 1.104 + } 1.105 + else /* small */ 1.106 + x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort); 1.107 + 1.108 + if (x && vp8_read(r, p [MVPsign])) 1.109 + x = -x; 1.110 + 1.111 + return x; 1.112 +} 1.113 + 1.114 +static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) 1.115 +{ 1.116 + mv->row = (short)(read_mvcomponent(r, mvc) * 2); 1.117 + mv->col = (short)(read_mvcomponent(r, ++mvc) * 2); 1.118 +} 1.119 + 1.120 + 1.121 +static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) 1.122 +{ 1.123 + int i = 0; 1.124 + 1.125 + do 1.126 + { 1.127 + const vp8_prob *up = vp8_mv_update_probs[i].prob; 1.128 + vp8_prob *p = (vp8_prob *)(mvc + i); 1.129 + vp8_prob *const pstop = p + MVPcount; 1.130 + 1.131 + do 1.132 + { 1.133 + if (vp8_read(bc, *up++)) 1.134 + { 1.135 + const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7); 1.136 + 1.137 + *p = x ? x << 1 : 1; 1.138 + } 1.139 + } 1.140 + while (++p < pstop); 1.141 + } 1.142 + while (++i < 2); 1.143 +} 1.144 + 1.145 +static const unsigned char mbsplit_fill_count[4] = {8, 8, 4, 1}; 1.146 +static const unsigned char mbsplit_fill_offset[4][16] = { 1.147 + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 1.148 + { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15}, 1.149 + { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15}, 1.150 + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} 1.151 +}; 1.152 + 1.153 + 1.154 +static void mb_mode_mv_init(VP8D_COMP *pbi) 1.155 +{ 1.156 + vp8_reader *const bc = & pbi->mbc[8]; 1.157 + MV_CONTEXT *const mvc = pbi->common.fc.mvc; 1.158 + 1.159 +#if CONFIG_ERROR_CONCEALMENT 1.160 + /* Default is that no macroblock is corrupt, therefore we initialize 1.161 + * mvs_corrupt_from_mb to something very big, which we can be sure is 1.162 + * outside the frame. */ 1.163 + pbi->mvs_corrupt_from_mb = UINT_MAX; 1.164 +#endif 1.165 + /* Read the mb_no_coeff_skip flag */ 1.166 + pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc); 1.167 + 1.168 + pbi->prob_skip_false = 0; 1.169 + if (pbi->common.mb_no_coeff_skip) 1.170 + pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); 1.171 + 1.172 + if(pbi->common.frame_type != KEY_FRAME) 1.173 + { 1.174 + pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8); 1.175 + pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8); 1.176 + pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8); 1.177 + 1.178 + if (vp8_read_bit(bc)) 1.179 + { 1.180 + int i = 0; 1.181 + 1.182 + do 1.183 + { 1.184 + pbi->common.fc.ymode_prob[i] = 1.185 + (vp8_prob) vp8_read_literal(bc, 8); 1.186 + } 1.187 + while (++i < 4); 1.188 + } 1.189 + 1.190 + if (vp8_read_bit(bc)) 1.191 + { 1.192 + int i = 0; 1.193 + 1.194 + do 1.195 + { 1.196 + pbi->common.fc.uv_mode_prob[i] = 1.197 + (vp8_prob) vp8_read_literal(bc, 8); 1.198 + } 1.199 + while (++i < 3); 1.200 + } 1.201 + 1.202 + read_mvcontexts(bc, mvc); 1.203 + } 1.204 +} 1.205 + 1.206 +const vp8_prob vp8_sub_mv_ref_prob3 [8][VP8_SUBMVREFS-1] = 1.207 +{ 1.208 + { 147, 136, 18 }, /* SUBMVREF_NORMAL */ 1.209 + { 223, 1 , 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ 1.210 + { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */ 1.211 + { 208, 1 , 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */ 1.212 + { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ 1.213 + { 223, 1 , 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ 1.214 + { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ 1.215 + { 208, 1 , 1 } /* SUBMVREF_LEFT_ABOVE_ZED */ 1.216 +}; 1.217 + 1.218 +static 1.219 +const vp8_prob * get_sub_mv_ref_prob(const int left, const int above) 1.220 +{ 1.221 + int lez = (left == 0); 1.222 + int aez = (above == 0); 1.223 + int lea = (left == above); 1.224 + const vp8_prob * prob; 1.225 + 1.226 + prob = vp8_sub_mv_ref_prob3[(aez << 2) | 1.227 + (lez << 1) | 1.228 + (lea)]; 1.229 + 1.230 + return prob; 1.231 +} 1.232 + 1.233 +static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi, 1.234 + const MODE_INFO *left_mb, const MODE_INFO *above_mb, 1.235 + MB_MODE_INFO *mbmi, int_mv best_mv, 1.236 + MV_CONTEXT *const mvc, int mb_to_left_edge, 1.237 + int mb_to_right_edge, int mb_to_top_edge, 1.238 + int mb_to_bottom_edge) 1.239 +{ 1.240 + int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */ 1.241 + int num_p; /* number of partitions in the split configuration 1.242 + (see vp8_mbsplit_count) */ 1.243 + int j = 0; 1.244 + 1.245 + s = 3; 1.246 + num_p = 16; 1.247 + if( vp8_read(bc, 110) ) 1.248 + { 1.249 + s = 2; 1.250 + num_p = 4; 1.251 + if( vp8_read(bc, 111) ) 1.252 + { 1.253 + s = vp8_read(bc, 150); 1.254 + num_p = 2; 1.255 + } 1.256 + } 1.257 + 1.258 + do /* for each subset j */ 1.259 + { 1.260 + int_mv leftmv, abovemv; 1.261 + int_mv blockmv; 1.262 + int k; /* first block in subset j */ 1.263 + 1.264 + const vp8_prob *prob; 1.265 + k = vp8_mbsplit_offset[s][j]; 1.266 + 1.267 + if (!(k & 3)) 1.268 + { 1.269 + /* On L edge, get from MB to left of us */ 1.270 + if(left_mb->mbmi.mode != SPLITMV) 1.271 + leftmv.as_int = left_mb->mbmi.mv.as_int; 1.272 + else 1.273 + leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int; 1.274 + } 1.275 + else 1.276 + leftmv.as_int = (mi->bmi + k - 1)->mv.as_int; 1.277 + 1.278 + if (!(k >> 2)) 1.279 + { 1.280 + /* On top edge, get from MB above us */ 1.281 + if(above_mb->mbmi.mode != SPLITMV) 1.282 + abovemv.as_int = above_mb->mbmi.mv.as_int; 1.283 + else 1.284 + abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int; 1.285 + } 1.286 + else 1.287 + abovemv.as_int = (mi->bmi + k - 4)->mv.as_int; 1.288 + 1.289 + prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int); 1.290 + 1.291 + if( vp8_read(bc, prob[0]) ) 1.292 + { 1.293 + if( vp8_read(bc, prob[1]) ) 1.294 + { 1.295 + blockmv.as_int = 0; 1.296 + if( vp8_read(bc, prob[2]) ) 1.297 + { 1.298 + blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2; 1.299 + blockmv.as_mv.row += best_mv.as_mv.row; 1.300 + blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2; 1.301 + blockmv.as_mv.col += best_mv.as_mv.col; 1.302 + } 1.303 + } 1.304 + else 1.305 + { 1.306 + blockmv.as_int = abovemv.as_int; 1.307 + } 1.308 + } 1.309 + else 1.310 + { 1.311 + blockmv.as_int = leftmv.as_int; 1.312 + } 1.313 + 1.314 + mbmi->need_to_clamp_mvs |= vp8_check_mv_bounds(&blockmv, 1.315 + mb_to_left_edge, 1.316 + mb_to_right_edge, 1.317 + mb_to_top_edge, 1.318 + mb_to_bottom_edge); 1.319 + 1.320 + { 1.321 + /* Fill (uniform) modes, mvs of jth subset. 1.322 + Must do it here because ensuing subsets can 1.323 + refer back to us via "left" or "above". */ 1.324 + const unsigned char *fill_offset; 1.325 + unsigned int fill_count = mbsplit_fill_count[s]; 1.326 + 1.327 + fill_offset = &mbsplit_fill_offset[s] 1.328 + [(unsigned char)j * mbsplit_fill_count[s]]; 1.329 + 1.330 + do { 1.331 + mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int; 1.332 + fill_offset++; 1.333 + }while (--fill_count); 1.334 + } 1.335 + 1.336 + } 1.337 + while (++j < num_p); 1.338 + 1.339 + mbmi->partitioning = s; 1.340 +} 1.341 + 1.342 +static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi) 1.343 +{ 1.344 + vp8_reader *const bc = & pbi->mbc[8]; 1.345 + mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, pbi->prob_intra); 1.346 + if (mbmi->ref_frame) /* inter MB */ 1.347 + { 1.348 + enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV}; 1.349 + int cnt[4]; 1.350 + int *cntx = cnt; 1.351 + int_mv near_mvs[4]; 1.352 + int_mv *nmv = near_mvs; 1.353 + const int mis = pbi->mb.mode_info_stride; 1.354 + const MODE_INFO *above = mi - mis; 1.355 + const MODE_INFO *left = mi - 1; 1.356 + const MODE_INFO *aboveleft = above - 1; 1.357 + int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias; 1.358 + 1.359 + mbmi->need_to_clamp_mvs = 0; 1.360 + 1.361 + if (vp8_read(bc, pbi->prob_last)) 1.362 + { 1.363 + mbmi->ref_frame = 1.364 + (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf))); 1.365 + } 1.366 + 1.367 + /* Zero accumulators */ 1.368 + nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0; 1.369 + cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0; 1.370 + 1.371 + /* Process above */ 1.372 + if (above->mbmi.ref_frame != INTRA_FRAME) 1.373 + { 1.374 + if (above->mbmi.mv.as_int) 1.375 + { 1.376 + (++nmv)->as_int = above->mbmi.mv.as_int; 1.377 + mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], 1.378 + mbmi->ref_frame, nmv, ref_frame_sign_bias); 1.379 + ++cntx; 1.380 + } 1.381 + 1.382 + *cntx += 2; 1.383 + } 1.384 + 1.385 + /* Process left */ 1.386 + if (left->mbmi.ref_frame != INTRA_FRAME) 1.387 + { 1.388 + if (left->mbmi.mv.as_int) 1.389 + { 1.390 + int_mv this_mv; 1.391 + 1.392 + this_mv.as_int = left->mbmi.mv.as_int; 1.393 + mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], 1.394 + mbmi->ref_frame, &this_mv, ref_frame_sign_bias); 1.395 + 1.396 + if (this_mv.as_int != nmv->as_int) 1.397 + { 1.398 + (++nmv)->as_int = this_mv.as_int; 1.399 + ++cntx; 1.400 + } 1.401 + 1.402 + *cntx += 2; 1.403 + } 1.404 + else 1.405 + cnt[CNT_INTRA] += 2; 1.406 + } 1.407 + 1.408 + /* Process above left */ 1.409 + if (aboveleft->mbmi.ref_frame != INTRA_FRAME) 1.410 + { 1.411 + if (aboveleft->mbmi.mv.as_int) 1.412 + { 1.413 + int_mv this_mv; 1.414 + 1.415 + this_mv.as_int = aboveleft->mbmi.mv.as_int; 1.416 + mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], 1.417 + mbmi->ref_frame, &this_mv, ref_frame_sign_bias); 1.418 + 1.419 + if (this_mv.as_int != nmv->as_int) 1.420 + { 1.421 + (++nmv)->as_int = this_mv.as_int; 1.422 + ++cntx; 1.423 + } 1.424 + 1.425 + *cntx += 1; 1.426 + } 1.427 + else 1.428 + cnt[CNT_INTRA] += 1; 1.429 + } 1.430 + 1.431 + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_INTRA]] [0]) ) 1.432 + { 1.433 + 1.434 + /* If we have three distinct MV's ... */ 1.435 + /* See if above-left MV can be merged with NEAREST */ 1.436 + cnt[CNT_NEAREST] += ( (cnt[CNT_SPLITMV] > 0) & 1.437 + (nmv->as_int == near_mvs[CNT_NEAREST].as_int)); 1.438 + 1.439 + /* Swap near and nearest if necessary */ 1.440 + if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) 1.441 + { 1.442 + int tmp; 1.443 + tmp = cnt[CNT_NEAREST]; 1.444 + cnt[CNT_NEAREST] = cnt[CNT_NEAR]; 1.445 + cnt[CNT_NEAR] = tmp; 1.446 + tmp = near_mvs[CNT_NEAREST].as_int; 1.447 + near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int; 1.448 + near_mvs[CNT_NEAR].as_int = tmp; 1.449 + } 1.450 + 1.451 + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAREST]] [1]) ) 1.452 + { 1.453 + 1.454 + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAR]] [2]) ) 1.455 + { 1.456 + int mb_to_top_edge; 1.457 + int mb_to_bottom_edge; 1.458 + int mb_to_left_edge; 1.459 + int mb_to_right_edge; 1.460 + MV_CONTEXT *const mvc = pbi->common.fc.mvc; 1.461 + int near_index; 1.462 + 1.463 + mb_to_top_edge = pbi->mb.mb_to_top_edge; 1.464 + mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge; 1.465 + mb_to_top_edge -= LEFT_TOP_MARGIN; 1.466 + mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; 1.467 + mb_to_right_edge = pbi->mb.mb_to_right_edge; 1.468 + mb_to_right_edge += RIGHT_BOTTOM_MARGIN; 1.469 + mb_to_left_edge = pbi->mb.mb_to_left_edge; 1.470 + mb_to_left_edge -= LEFT_TOP_MARGIN; 1.471 + 1.472 + /* Use near_mvs[0] to store the "best" MV */ 1.473 + near_index = CNT_INTRA + 1.474 + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]); 1.475 + 1.476 + vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb); 1.477 + 1.478 + cnt[CNT_SPLITMV] = ((above->mbmi.mode == SPLITMV) 1.479 + + (left->mbmi.mode == SPLITMV)) * 2 1.480 + + (aboveleft->mbmi.mode == SPLITMV); 1.481 + 1.482 + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_SPLITMV]] [3]) ) 1.483 + { 1.484 + decode_split_mv(bc, mi, left, above, 1.485 + mbmi, 1.486 + near_mvs[near_index], 1.487 + mvc, mb_to_left_edge, 1.488 + mb_to_right_edge, 1.489 + mb_to_top_edge, 1.490 + mb_to_bottom_edge); 1.491 + mbmi->mv.as_int = mi->bmi[15].mv.as_int; 1.492 + mbmi->mode = SPLITMV; 1.493 + mbmi->is_4x4 = 1; 1.494 + } 1.495 + else 1.496 + { 1.497 + int_mv *const mbmi_mv = & mbmi->mv; 1.498 + read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *) mvc); 1.499 + mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row; 1.500 + mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col; 1.501 + 1.502 + /* Don't need to check this on NEARMV and NEARESTMV 1.503 + * modes since those modes clamp the MV. The NEWMV mode 1.504 + * does not, so signal to the prediction stage whether 1.505 + * special handling may be required. 1.506 + */ 1.507 + mbmi->need_to_clamp_mvs = 1.508 + vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, 1.509 + mb_to_right_edge, 1.510 + mb_to_top_edge, 1.511 + mb_to_bottom_edge); 1.512 + mbmi->mode = NEWMV; 1.513 + } 1.514 + } 1.515 + else 1.516 + { 1.517 + mbmi->mode = NEARMV; 1.518 + mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int; 1.519 + vp8_clamp_mv2(&mbmi->mv, &pbi->mb); 1.520 + } 1.521 + } 1.522 + else 1.523 + { 1.524 + mbmi->mode = NEARESTMV; 1.525 + mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int; 1.526 + vp8_clamp_mv2(&mbmi->mv, &pbi->mb); 1.527 + } 1.528 + } 1.529 + else 1.530 + { 1.531 + mbmi->mode = ZEROMV; 1.532 + mbmi->mv.as_int = 0; 1.533 + } 1.534 + 1.535 +#if CONFIG_ERROR_CONCEALMENT 1.536 + if(pbi->ec_enabled && (mbmi->mode != SPLITMV)) 1.537 + { 1.538 + mi->bmi[ 0].mv.as_int = 1.539 + mi->bmi[ 1].mv.as_int = 1.540 + mi->bmi[ 2].mv.as_int = 1.541 + mi->bmi[ 3].mv.as_int = 1.542 + mi->bmi[ 4].mv.as_int = 1.543 + mi->bmi[ 5].mv.as_int = 1.544 + mi->bmi[ 6].mv.as_int = 1.545 + mi->bmi[ 7].mv.as_int = 1.546 + mi->bmi[ 8].mv.as_int = 1.547 + mi->bmi[ 9].mv.as_int = 1.548 + mi->bmi[10].mv.as_int = 1.549 + mi->bmi[11].mv.as_int = 1.550 + mi->bmi[12].mv.as_int = 1.551 + mi->bmi[13].mv.as_int = 1.552 + mi->bmi[14].mv.as_int = 1.553 + mi->bmi[15].mv.as_int = mbmi->mv.as_int; 1.554 + } 1.555 +#endif 1.556 + } 1.557 + else 1.558 + { 1.559 + /* required for left and above block mv */ 1.560 + mbmi->mv.as_int = 0; 1.561 + 1.562 + /* MB is intra coded */ 1.563 + if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) 1.564 + { 1.565 + int j = 0; 1.566 + mbmi->is_4x4 = 1; 1.567 + do 1.568 + { 1.569 + mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob); 1.570 + } 1.571 + while (++j < 16); 1.572 + } 1.573 + 1.574 + mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob); 1.575 + } 1.576 + 1.577 +} 1.578 + 1.579 +static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) 1.580 +{ 1.581 + /* Is segmentation enabled */ 1.582 + if (x->segmentation_enabled && x->update_mb_segmentation_map) 1.583 + { 1.584 + /* If so then read the segment id. */ 1.585 + if (vp8_read(r, x->mb_segment_tree_probs[0])) 1.586 + mi->segment_id = 1.587 + (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2])); 1.588 + else 1.589 + mi->segment_id = 1.590 + (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1])); 1.591 + } 1.592 +} 1.593 + 1.594 +static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi, 1.595 + MB_MODE_INFO *mbmi) 1.596 +{ 1.597 + /* Read the Macroblock segmentation map if it is being updated explicitly 1.598 + * this frame (reset to 0 above by default) 1.599 + * By default on a key frame reset all MBs to segment 0 1.600 + */ 1.601 + if (pbi->mb.update_mb_segmentation_map) 1.602 + read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb); 1.603 + else if(pbi->common.frame_type == KEY_FRAME) 1.604 + mi->mbmi.segment_id = 0; 1.605 + 1.606 + /* Read the macroblock coeff skip flag if this feature is in use, 1.607 + * else default to 0 */ 1.608 + if (pbi->common.mb_no_coeff_skip) 1.609 + mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false); 1.610 + else 1.611 + mi->mbmi.mb_skip_coeff = 0; 1.612 + 1.613 + mi->mbmi.is_4x4 = 0; 1.614 + if(pbi->common.frame_type == KEY_FRAME) 1.615 + read_kf_modes(pbi, mi); 1.616 + else 1.617 + read_mb_modes_mv(pbi, mi, &mi->mbmi); 1.618 + 1.619 +} 1.620 + 1.621 +void vp8_decode_mode_mvs(VP8D_COMP *pbi) 1.622 +{ 1.623 + MODE_INFO *mi = pbi->common.mi; 1.624 + int mb_row = -1; 1.625 + int mb_to_right_edge_start; 1.626 + 1.627 + mb_mode_mv_init(pbi); 1.628 + 1.629 + pbi->mb.mb_to_top_edge = 0; 1.630 + pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3; 1.631 + mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3; 1.632 + 1.633 + while (++mb_row < pbi->common.mb_rows) 1.634 + { 1.635 + int mb_col = -1; 1.636 + 1.637 + pbi->mb.mb_to_left_edge = 0; 1.638 + pbi->mb.mb_to_right_edge = mb_to_right_edge_start; 1.639 + 1.640 + while (++mb_col < pbi->common.mb_cols) 1.641 + { 1.642 +#if CONFIG_ERROR_CONCEALMENT 1.643 + int mb_num = mb_row * pbi->common.mb_cols + mb_col; 1.644 +#endif 1.645 + 1.646 + decode_mb_mode_mvs(pbi, mi, &mi->mbmi); 1.647 + 1.648 +#if CONFIG_ERROR_CONCEALMENT 1.649 + /* look for corruption. set mvs_corrupt_from_mb to the current 1.650 + * mb_num if the frame is corrupt from this macroblock. */ 1.651 + if (vp8dx_bool_error(&pbi->mbc[8]) && mb_num < 1.652 + (int)pbi->mvs_corrupt_from_mb) 1.653 + { 1.654 + pbi->mvs_corrupt_from_mb = mb_num; 1.655 + /* no need to continue since the partition is corrupt from 1.656 + * here on. 1.657 + */ 1.658 + return; 1.659 + } 1.660 +#endif 1.661 + 1.662 + pbi->mb.mb_to_left_edge -= (16 << 3); 1.663 + pbi->mb.mb_to_right_edge -= (16 << 3); 1.664 + mi++; /* next macroblock */ 1.665 + } 1.666 + pbi->mb.mb_to_top_edge -= (16 << 3); 1.667 + pbi->mb.mb_to_bottom_edge -= (16 << 3); 1.668 + 1.669 + mi++; /* skip left predictor each row */ 1.670 + } 1.671 +}