1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/media/libvpx/vp9/encoder/vp9_bitstream.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1523 @@ 1.4 +/* 1.5 + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 1.6 + * 1.7 + * Use of this source code is governed by a BSD-style license 1.8 + * that can be found in the LICENSE file in the root of the source 1.9 + * tree. An additional intellectual property rights grant can be found 1.10 + * in the file PATENTS. All contributing project authors may 1.11 + * be found in the AUTHORS file in the root of the source tree. 1.12 + */ 1.13 + 1.14 +#include <assert.h> 1.15 +#include <stdio.h> 1.16 +#include <limits.h> 1.17 + 1.18 +#include "vpx/vpx_encoder.h" 1.19 +#include "vpx_mem/vpx_mem.h" 1.20 + 1.21 +#include "vp9/common/vp9_entropymode.h" 1.22 +#include "vp9/common/vp9_entropymv.h" 1.23 +#include "vp9/common/vp9_findnearmv.h" 1.24 +#include "vp9/common/vp9_tile_common.h" 1.25 +#include "vp9/common/vp9_seg_common.h" 1.26 +#include "vp9/common/vp9_pred_common.h" 1.27 +#include "vp9/common/vp9_entropy.h" 1.28 +#include "vp9/common/vp9_mvref_common.h" 1.29 +#include "vp9/common/vp9_treecoder.h" 1.30 +#include "vp9/common/vp9_systemdependent.h" 1.31 +#include "vp9/common/vp9_pragmas.h" 1.32 + 1.33 +#include "vp9/encoder/vp9_mcomp.h" 1.34 +#include "vp9/encoder/vp9_encodemv.h" 1.35 +#include "vp9/encoder/vp9_bitstream.h" 1.36 +#include "vp9/encoder/vp9_segmentation.h" 1.37 +#include "vp9/encoder/vp9_subexp.h" 1.38 +#include "vp9/encoder/vp9_write_bit_buffer.h" 1.39 + 1.40 + 1.41 +#if defined(SECTIONBITS_OUTPUT) 1.42 +unsigned __int64 Sectionbits[500]; 1.43 +#endif 1.44 + 1.45 +#ifdef ENTROPY_STATS 1.46 +int intra_mode_stats[INTRA_MODES] 1.47 + [INTRA_MODES] 1.48 + [INTRA_MODES]; 1.49 +vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; 1.50 + 1.51 +extern unsigned int active_section; 1.52 +#endif 1.53 + 1.54 + 1.55 +#ifdef MODE_STATS 1.56 +int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; 1.57 +int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; 1.58 +int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; 1.59 +int64_t switchable_interp_stats[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; 1.60 + 1.61 +void init_tx_count_stats() { 1.62 + vp9_zero(tx_count_32x32p_stats); 1.63 + vp9_zero(tx_count_16x16p_stats); 1.64 + vp9_zero(tx_count_8x8p_stats); 1.65 +} 1.66 + 1.67 +void init_switchable_interp_stats() { 1.68 + vp9_zero(switchable_interp_stats); 1.69 +} 1.70 + 1.71 +static void update_tx_count_stats(VP9_COMMON *cm) { 1.72 + int i, j; 1.73 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.74 + for (j = 0; j < TX_SIZES; j++) { 1.75 + tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j]; 1.76 + } 1.77 + } 1.78 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.79 + for (j = 0; j < TX_SIZES - 1; j++) { 1.80 + tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j]; 1.81 + } 1.82 + } 1.83 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.84 + for (j = 0; j < TX_SIZES - 2; j++) { 1.85 + tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; 1.86 + } 1.87 + } 1.88 +} 1.89 + 1.90 +static void update_switchable_interp_stats(VP9_COMMON *cm) { 1.91 + int i, j; 1.92 + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) 1.93 + for (j = 0; j < SWITCHABLE_FILTERS; ++j) 1.94 + switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; 1.95 +} 1.96 + 1.97 +void write_tx_count_stats() { 1.98 + int i, j; 1.99 + FILE *fp = fopen("tx_count.bin", "wb"); 1.100 + fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); 1.101 + fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); 1.102 + fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); 1.103 + fclose(fp); 1.104 + 1.105 + printf( 1.106 + "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n"); 1.107 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.108 + printf(" { "); 1.109 + for (j = 0; j < TX_SIZES; j++) { 1.110 + printf("%"PRId64", ", tx_count_32x32p_stats[i][j]); 1.111 + } 1.112 + printf("},\n"); 1.113 + } 1.114 + printf("};\n"); 1.115 + printf( 1.116 + "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n"); 1.117 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.118 + printf(" { "); 1.119 + for (j = 0; j < TX_SIZES - 1; j++) { 1.120 + printf("%"PRId64", ", tx_count_16x16p_stats[i][j]); 1.121 + } 1.122 + printf("},\n"); 1.123 + } 1.124 + printf("};\n"); 1.125 + printf( 1.126 + "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n"); 1.127 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.128 + printf(" { "); 1.129 + for (j = 0; j < TX_SIZES - 2; j++) { 1.130 + printf("%"PRId64", ", tx_count_8x8p_stats[i][j]); 1.131 + } 1.132 + printf("},\n"); 1.133 + } 1.134 + printf("};\n"); 1.135 +} 1.136 + 1.137 +void write_switchable_interp_stats() { 1.138 + int i, j; 1.139 + FILE *fp = fopen("switchable_interp.bin", "wb"); 1.140 + fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); 1.141 + fclose(fp); 1.142 + 1.143 + printf( 1.144 + "vp9_default_switchable_filter_count[SWITCHABLE_FILTER_CONTEXTS]" 1.145 + "[SWITCHABLE_FILTERS] = {\n"); 1.146 + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { 1.147 + printf(" { "); 1.148 + for (j = 0; j < SWITCHABLE_FILTERS; j++) { 1.149 + printf("%"PRId64", ", switchable_interp_stats[i][j]); 1.150 + } 1.151 + printf("},\n"); 1.152 + } 1.153 + printf("};\n"); 1.154 +} 1.155 +#endif 1.156 + 1.157 +static INLINE void write_be32(uint8_t *p, int value) { 1.158 + p[0] = value >> 24; 1.159 + p[1] = value >> 16; 1.160 + p[2] = value >> 8; 1.161 + p[3] = value; 1.162 +} 1.163 + 1.164 +void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb, 1.165 + int data, int max) { 1.166 + vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); 1.167 +} 1.168 + 1.169 +static void update_mode(vp9_writer *w, int n, vp9_tree tree, 1.170 + vp9_prob Pcur[/* n-1 */], 1.171 + unsigned int bct[/* n-1 */][2], 1.172 + const unsigned int num_events[/* n */]) { 1.173 + int i = 0; 1.174 + 1.175 + vp9_tree_probs_from_distribution(tree, bct, num_events); 1.176 + for (i = 0; i < n - 1; ++i) 1.177 + vp9_cond_prob_diff_update(w, &Pcur[i], bct[i]); 1.178 +} 1.179 + 1.180 +static void update_mbintra_mode_probs(VP9_COMP* const cpi, 1.181 + vp9_writer* const bc) { 1.182 + VP9_COMMON *const cm = &cpi->common; 1.183 + int j; 1.184 + unsigned int bct[INTRA_MODES - 1][2]; 1.185 + 1.186 + for (j = 0; j < BLOCK_SIZE_GROUPS; j++) 1.187 + update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, 1.188 + cm->fc.y_mode_prob[j], bct, 1.189 + (unsigned int *)cpi->y_mode_count[j]); 1.190 +} 1.191 + 1.192 +static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m, 1.193 + TX_SIZE tx_size, BLOCK_SIZE bsize, 1.194 + vp9_writer *w) { 1.195 + const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; 1.196 + const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1.197 + const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, 1.198 + &cpi->common.fc.tx_probs); 1.199 + vp9_write(w, tx_size != TX_4X4, tx_probs[0]); 1.200 + if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { 1.201 + vp9_write(w, tx_size != TX_8X8, tx_probs[1]); 1.202 + if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) 1.203 + vp9_write(w, tx_size != TX_16X16, tx_probs[2]); 1.204 + } 1.205 +} 1.206 + 1.207 +static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, 1.208 + vp9_writer *w) { 1.209 + const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1.210 + if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { 1.211 + return 1; 1.212 + } else { 1.213 + const int skip_coeff = m->mbmi.skip_coeff; 1.214 + vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); 1.215 + return skip_coeff; 1.216 + } 1.217 +} 1.218 + 1.219 +void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { 1.220 + VP9_COMMON *cm = &cpi->common; 1.221 + int k; 1.222 + 1.223 + for (k = 0; k < MBSKIP_CONTEXTS; ++k) 1.224 + vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]); 1.225 +} 1.226 + 1.227 +static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { 1.228 + write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); 1.229 +} 1.230 + 1.231 +static void update_switchable_interp_probs(VP9_COMP *cpi, vp9_writer *w) { 1.232 + VP9_COMMON *const cm = &cpi->common; 1.233 + unsigned int branch_ct[SWITCHABLE_FILTERS - 1][2]; 1.234 + int i, j; 1.235 + for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) { 1.236 + vp9_tree_probs_from_distribution(vp9_switchable_interp_tree, branch_ct, 1.237 + cm->counts.switchable_interp[j]); 1.238 + 1.239 + for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) 1.240 + vp9_cond_prob_diff_update(w, &cm->fc.switchable_interp_prob[j][i], 1.241 + branch_ct[i]); 1.242 + } 1.243 + 1.244 +#ifdef MODE_STATS 1.245 + if (!cpi->dummy_packing) 1.246 + update_switchable_interp_stats(cm); 1.247 +#endif 1.248 +} 1.249 + 1.250 +static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer *w) { 1.251 + int i, j; 1.252 + 1.253 + for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { 1.254 + unsigned int branch_ct[INTER_MODES - 1][2]; 1.255 + vp9_tree_probs_from_distribution(vp9_inter_mode_tree, branch_ct, 1.256 + cm->counts.inter_mode[i]); 1.257 + 1.258 + for (j = 0; j < INTER_MODES - 1; ++j) 1.259 + vp9_cond_prob_diff_update(w, &cm->fc.inter_mode_probs[i][j], 1.260 + branch_ct[j]); 1.261 + } 1.262 +} 1.263 + 1.264 +static void pack_mb_tokens(vp9_writer* const w, 1.265 + TOKENEXTRA **tp, 1.266 + const TOKENEXTRA *const stop) { 1.267 + TOKENEXTRA *p = *tp; 1.268 + 1.269 + while (p < stop && p->token != EOSB_TOKEN) { 1.270 + const int t = p->token; 1.271 + const struct vp9_token *const a = &vp9_coef_encodings[t]; 1.272 + const vp9_extra_bit *const b = &vp9_extra_bits[t]; 1.273 + int i = 0; 1.274 + const vp9_prob *pp; 1.275 + int v = a->value; 1.276 + int n = a->len; 1.277 + vp9_prob probs[ENTROPY_NODES]; 1.278 + 1.279 + if (t >= TWO_TOKEN) { 1.280 + vp9_model_to_full_probs(p->context_tree, probs); 1.281 + pp = probs; 1.282 + } else { 1.283 + pp = p->context_tree; 1.284 + } 1.285 + assert(pp != 0); 1.286 + 1.287 + /* skip one or two nodes */ 1.288 + if (p->skip_eob_node) { 1.289 + n -= p->skip_eob_node; 1.290 + i = 2 * p->skip_eob_node; 1.291 + } 1.292 + 1.293 + do { 1.294 + const int bb = (v >> --n) & 1; 1.295 + vp9_write(w, bb, pp[i >> 1]); 1.296 + i = vp9_coef_tree[i + bb]; 1.297 + } while (n); 1.298 + 1.299 + if (b->base_val) { 1.300 + const int e = p->extra, l = b->len; 1.301 + 1.302 + if (l) { 1.303 + const unsigned char *pb = b->prob; 1.304 + int v = e >> 1; 1.305 + int n = l; /* number of bits in v, assumed nonzero */ 1.306 + int i = 0; 1.307 + 1.308 + do { 1.309 + const int bb = (v >> --n) & 1; 1.310 + vp9_write(w, bb, pb[i >> 1]); 1.311 + i = b->tree[i + bb]; 1.312 + } while (n); 1.313 + } 1.314 + 1.315 + vp9_write_bit(w, e & 1); 1.316 + } 1.317 + ++p; 1.318 + } 1.319 + 1.320 + *tp = p + (p->token == EOSB_TOKEN); 1.321 +} 1.322 + 1.323 +static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode, 1.324 + const vp9_prob *p) { 1.325 + assert(is_inter_mode(mode)); 1.326 + write_token(w, vp9_inter_mode_tree, p, 1.327 + &vp9_inter_mode_encodings[INTER_OFFSET(mode)]); 1.328 +} 1.329 + 1.330 + 1.331 +static void write_segment_id(vp9_writer *w, const struct segmentation *seg, 1.332 + int segment_id) { 1.333 + if (seg->enabled && seg->update_map) 1.334 + treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); 1.335 +} 1.336 + 1.337 +// This function encodes the reference frame 1.338 +static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { 1.339 + VP9_COMMON *const cm = &cpi->common; 1.340 + MACROBLOCK *const x = &cpi->mb; 1.341 + MACROBLOCKD *const xd = &x->e_mbd; 1.342 + MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi; 1.343 + const int segment_id = mi->segment_id; 1.344 + int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, 1.345 + SEG_LVL_REF_FRAME); 1.346 + // If segment level coding of this signal is disabled... 1.347 + // or the segment allows multiple reference frame options 1.348 + if (!seg_ref_active) { 1.349 + // does the feature use compound prediction or not 1.350 + // (if not specified at the frame/segment level) 1.351 + if (cm->comp_pred_mode == HYBRID_PREDICTION) { 1.352 + vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, 1.353 + vp9_get_pred_prob_comp_inter_inter(cm, xd)); 1.354 + } else { 1.355 + assert((mi->ref_frame[1] <= INTRA_FRAME) == 1.356 + (cm->comp_pred_mode == SINGLE_PREDICTION_ONLY)); 1.357 + } 1.358 + 1.359 + if (mi->ref_frame[1] > INTRA_FRAME) { 1.360 + vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME, 1.361 + vp9_get_pred_prob_comp_ref_p(cm, xd)); 1.362 + } else { 1.363 + vp9_write(bc, mi->ref_frame[0] != LAST_FRAME, 1.364 + vp9_get_pred_prob_single_ref_p1(cm, xd)); 1.365 + if (mi->ref_frame[0] != LAST_FRAME) 1.366 + vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, 1.367 + vp9_get_pred_prob_single_ref_p2(cm, xd)); 1.368 + } 1.369 + } else { 1.370 + assert(mi->ref_frame[1] <= INTRA_FRAME); 1.371 + assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) == 1.372 + mi->ref_frame[0]); 1.373 + } 1.374 + 1.375 + // If using the prediction model we have nothing further to do because 1.376 + // the reference frame is fully coded by the segment. 1.377 +} 1.378 + 1.379 +static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { 1.380 + VP9_COMMON *const cm = &cpi->common; 1.381 + const nmv_context *nmvc = &cm->fc.nmvc; 1.382 + MACROBLOCK *const x = &cpi->mb; 1.383 + MACROBLOCKD *const xd = &x->e_mbd; 1.384 + struct segmentation *seg = &cm->seg; 1.385 + MB_MODE_INFO *const mi = &m->mbmi; 1.386 + const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; 1.387 + const MB_PREDICTION_MODE mode = mi->mode; 1.388 + const int segment_id = mi->segment_id; 1.389 + int skip_coeff; 1.390 + const BLOCK_SIZE bsize = mi->sb_type; 1.391 + const int allow_hp = cm->allow_high_precision_mv; 1.392 + 1.393 +#ifdef ENTROPY_STATS 1.394 + active_section = 9; 1.395 +#endif 1.396 + 1.397 + if (seg->update_map) { 1.398 + if (seg->temporal_update) { 1.399 + const int pred_flag = mi->seg_id_predicted; 1.400 + vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); 1.401 + vp9_write(bc, pred_flag, pred_prob); 1.402 + if (!pred_flag) 1.403 + write_segment_id(bc, seg, segment_id); 1.404 + } else { 1.405 + write_segment_id(bc, seg, segment_id); 1.406 + } 1.407 + } 1.408 + 1.409 + skip_coeff = write_skip_coeff(cpi, segment_id, m, bc); 1.410 + 1.411 + if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 1.412 + vp9_write(bc, rf != INTRA_FRAME, 1.413 + vp9_get_pred_prob_intra_inter(cm, xd)); 1.414 + 1.415 + if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 1.416 + !(rf != INTRA_FRAME && 1.417 + (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { 1.418 + write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc); 1.419 + } 1.420 + 1.421 + if (rf == INTRA_FRAME) { 1.422 +#ifdef ENTROPY_STATS 1.423 + active_section = 6; 1.424 +#endif 1.425 + 1.426 + if (bsize >= BLOCK_8X8) { 1.427 + write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); 1.428 + } else { 1.429 + int idx, idy; 1.430 + const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; 1.431 + const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; 1.432 + for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 1.433 + for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 1.434 + const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode; 1.435 + write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]); 1.436 + } 1.437 + } 1.438 + } 1.439 + write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]); 1.440 + } else { 1.441 + vp9_prob *mv_ref_p; 1.442 + encode_ref_frame(cpi, bc); 1.443 + mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]]; 1.444 + 1.445 +#ifdef ENTROPY_STATS 1.446 + active_section = 3; 1.447 +#endif 1.448 + 1.449 + // If segment skip is not enabled code the mode. 1.450 + if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 1.451 + if (bsize >= BLOCK_8X8) { 1.452 + write_sb_mv_ref(bc, mode, mv_ref_p); 1.453 + ++cm->counts.inter_mode[mi->mode_context[rf]] 1.454 + [INTER_OFFSET(mode)]; 1.455 + } 1.456 + } 1.457 + 1.458 + if (cm->mcomp_filter_type == SWITCHABLE) { 1.459 + const int ctx = vp9_get_pred_context_switchable_interp(xd); 1.460 + write_token(bc, vp9_switchable_interp_tree, 1.461 + cm->fc.switchable_interp_prob[ctx], 1.462 + &vp9_switchable_interp_encodings[mi->interp_filter]); 1.463 + } else { 1.464 + assert(mi->interp_filter == cm->mcomp_filter_type); 1.465 + } 1.466 + 1.467 + if (bsize < BLOCK_8X8) { 1.468 + const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; 1.469 + const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; 1.470 + int idx, idy; 1.471 + for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 1.472 + for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 1.473 + const int j = idy * 2 + idx; 1.474 + const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode; 1.475 + write_sb_mv_ref(bc, blockmode, mv_ref_p); 1.476 + ++cm->counts.inter_mode[mi->mode_context[rf]] 1.477 + [INTER_OFFSET(blockmode)]; 1.478 + 1.479 + if (blockmode == NEWMV) { 1.480 +#ifdef ENTROPY_STATS 1.481 + active_section = 11; 1.482 +#endif 1.483 + vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv, 1.484 + &mi->best_mv[0].as_mv, nmvc, allow_hp); 1.485 + 1.486 + if (has_second_ref(mi)) 1.487 + vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv, 1.488 + &mi->best_mv[1].as_mv, nmvc, allow_hp); 1.489 + } 1.490 + } 1.491 + } 1.492 + } else if (mode == NEWMV) { 1.493 +#ifdef ENTROPY_STATS 1.494 + active_section = 5; 1.495 +#endif 1.496 + vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, 1.497 + &mi->best_mv[0].as_mv, nmvc, allow_hp); 1.498 + 1.499 + if (has_second_ref(mi)) 1.500 + vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, 1.501 + &mi->best_mv[1].as_mv, nmvc, allow_hp); 1.502 + } 1.503 + } 1.504 +} 1.505 + 1.506 +static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, 1.507 + vp9_writer *bc) { 1.508 + const VP9_COMMON *const cm = &cpi->common; 1.509 + const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1.510 + const struct segmentation *const seg = &cm->seg; 1.511 + MODE_INFO *m = mi_8x8[0]; 1.512 + const int ym = m->mbmi.mode; 1.513 + const int segment_id = m->mbmi.segment_id; 1.514 + MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; 1.515 + MODE_INFO *left_mi = xd->left_available ? mi_8x8[-1] : NULL; 1.516 + 1.517 + if (seg->update_map) 1.518 + write_segment_id(bc, seg, m->mbmi.segment_id); 1.519 + 1.520 + write_skip_coeff(cpi, segment_id, m, bc); 1.521 + 1.522 + if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) 1.523 + write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc); 1.524 + 1.525 + if (m->mbmi.sb_type >= BLOCK_8X8) { 1.526 + const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); 1.527 + const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0); 1.528 + write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); 1.529 + } else { 1.530 + int idx, idy; 1.531 + const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; 1.532 + const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; 1.533 + for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 1.534 + for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 1.535 + int i = idy * 2 + idx; 1.536 + const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i); 1.537 + const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, i); 1.538 + const int bm = m->bmi[i].as_mode; 1.539 +#ifdef ENTROPY_STATS 1.540 + ++intra_mode_stats[A][L][bm]; 1.541 +#endif 1.542 + write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); 1.543 + } 1.544 + } 1.545 + } 1.546 + 1.547 + write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); 1.548 +} 1.549 + 1.550 +static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, 1.551 + vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 1.552 + int mi_row, int mi_col) { 1.553 + VP9_COMMON *const cm = &cpi->common; 1.554 + MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1.555 + MODE_INFO *m; 1.556 + 1.557 + xd->mi_8x8 = cm->mi_grid_visible + (mi_row * cm->mode_info_stride + mi_col); 1.558 + m = xd->mi_8x8[0]; 1.559 + 1.560 + set_mi_row_col(xd, tile, 1.561 + mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], 1.562 + mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], 1.563 + cm->mi_rows, cm->mi_cols); 1.564 + if (frame_is_intra_only(cm)) { 1.565 + write_mb_modes_kf(cpi, xd->mi_8x8, w); 1.566 +#ifdef ENTROPY_STATS 1.567 + active_section = 8; 1.568 +#endif 1.569 + } else { 1.570 + pack_inter_mode_mvs(cpi, m, w); 1.571 +#ifdef ENTROPY_STATS 1.572 + active_section = 1; 1.573 +#endif 1.574 + } 1.575 + 1.576 + assert(*tok < tok_end); 1.577 + pack_mb_tokens(w, tok, tok_end); 1.578 +} 1.579 + 1.580 +static void write_partition(VP9_COMP *cpi, int hbs, int mi_row, int mi_col, 1.581 + PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { 1.582 + VP9_COMMON *const cm = &cpi->common; 1.583 + const int ctx = partition_plane_context(cpi->above_seg_context, 1.584 + cpi->left_seg_context, 1.585 + mi_row, mi_col, bsize); 1.586 + const vp9_prob *const probs = get_partition_probs(cm, ctx); 1.587 + const int has_rows = (mi_row + hbs) < cm->mi_rows; 1.588 + const int has_cols = (mi_col + hbs) < cm->mi_cols; 1.589 + 1.590 + if (has_rows && has_cols) { 1.591 + write_token(w, vp9_partition_tree, probs, &vp9_partition_encodings[p]); 1.592 + } else if (!has_rows && has_cols) { 1.593 + assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); 1.594 + vp9_write(w, p == PARTITION_SPLIT, probs[1]); 1.595 + } else if (has_rows && !has_cols) { 1.596 + assert(p == PARTITION_SPLIT || p == PARTITION_VERT); 1.597 + vp9_write(w, p == PARTITION_SPLIT, probs[2]); 1.598 + } else { 1.599 + assert(p == PARTITION_SPLIT); 1.600 + } 1.601 +} 1.602 + 1.603 +static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile, 1.604 + vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 1.605 + int mi_row, int mi_col, BLOCK_SIZE bsize) { 1.606 + VP9_COMMON *const cm = &cpi->common; 1.607 + const int bsl = b_width_log2(bsize); 1.608 + const int bs = (1 << bsl) / 4; 1.609 + PARTITION_TYPE partition; 1.610 + BLOCK_SIZE subsize; 1.611 + MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mode_info_stride + mi_col]; 1.612 + 1.613 + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 1.614 + return; 1.615 + 1.616 + partition = partition_lookup[bsl][m->mbmi.sb_type]; 1.617 + write_partition(cpi, bs, mi_row, mi_col, partition, bsize, w); 1.618 + subsize = get_subsize(bsize, partition); 1.619 + if (subsize < BLOCK_8X8) { 1.620 + write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 1.621 + } else { 1.622 + switch (partition) { 1.623 + case PARTITION_NONE: 1.624 + write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 1.625 + break; 1.626 + case PARTITION_HORZ: 1.627 + write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 1.628 + if (mi_row + bs < cm->mi_rows) 1.629 + write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col); 1.630 + break; 1.631 + case PARTITION_VERT: 1.632 + write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 1.633 + if (mi_col + bs < cm->mi_cols) 1.634 + write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs); 1.635 + break; 1.636 + case PARTITION_SPLIT: 1.637 + write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize); 1.638 + write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs, 1.639 + subsize); 1.640 + write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col, 1.641 + subsize); 1.642 + write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, 1.643 + subsize); 1.644 + break; 1.645 + default: 1.646 + assert(0); 1.647 + } 1.648 + } 1.649 + 1.650 + // update partition context 1.651 + if (bsize >= BLOCK_8X8 && 1.652 + (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 1.653 + update_partition_context(cpi->above_seg_context, cpi->left_seg_context, 1.654 + mi_row, mi_col, subsize, bsize); 1.655 +} 1.656 + 1.657 +static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, 1.658 + vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { 1.659 + int mi_row, mi_col; 1.660 + 1.661 + for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 1.662 + mi_row += MI_BLOCK_SIZE) { 1.663 + vp9_zero(cpi->left_seg_context); 1.664 + for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 1.665 + mi_col += MI_BLOCK_SIZE) 1.666 + write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64); 1.667 + } 1.668 +} 1.669 + 1.670 +static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { 1.671 + vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; 1.672 + vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; 1.673 + unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = 1.674 + cpi->common.counts.eob_branch[tx_size]; 1.675 + vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; 1.676 + int i, j, k, l, m; 1.677 + 1.678 + for (i = 0; i < BLOCK_TYPES; ++i) { 1.679 + for (j = 0; j < REF_TYPES; ++j) { 1.680 + for (k = 0; k < COEF_BANDS; ++k) { 1.681 + for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 1.682 + if (l >= 3 && k == 0) 1.683 + continue; 1.684 + vp9_tree_probs_from_distribution(vp9_coef_tree, 1.685 + coef_branch_ct[i][j][k][l], 1.686 + coef_counts[i][j][k][l]); 1.687 + coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - 1.688 + coef_branch_ct[i][j][k][l][0][0]; 1.689 + for (m = 0; m < UNCONSTRAINED_NODES; ++m) 1.690 + coef_probs[i][j][k][l][m] = get_binary_prob( 1.691 + coef_branch_ct[i][j][k][l][m][0], 1.692 + coef_branch_ct[i][j][k][l][m][1]); 1.693 +#ifdef ENTROPY_STATS 1.694 + if (!cpi->dummy_packing) { 1.695 + int t; 1.696 + for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) 1.697 + context_counters[tx_size][i][j][k][l][t] += 1.698 + coef_counts[i][j][k][l][t]; 1.699 + context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] += 1.700 + eob_branch_ct[i][j][k][l]; 1.701 + } 1.702 +#endif 1.703 + } 1.704 + } 1.705 + } 1.706 + } 1.707 +} 1.708 + 1.709 +static void build_coeff_contexts(VP9_COMP *cpi) { 1.710 + TX_SIZE t; 1.711 + for (t = TX_4X4; t <= TX_32X32; t++) 1.712 + build_tree_distribution(cpi, t); 1.713 +} 1.714 + 1.715 +static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, 1.716 + TX_SIZE tx_size) { 1.717 + vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; 1.718 + vp9_coeff_probs_model *old_frame_coef_probs = 1.719 + cpi->common.fc.coef_probs[tx_size]; 1.720 + vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size]; 1.721 + const vp9_prob upd = DIFF_UPDATE_PROB; 1.722 + const int entropy_nodes_update = UNCONSTRAINED_NODES; 1.723 + int i, j, k, l, t; 1.724 + switch (cpi->sf.use_fast_coef_updates) { 1.725 + case 0: { 1.726 + /* dry run to see if there is any udpate at all needed */ 1.727 + int savings = 0; 1.728 + int update[2] = {0, 0}; 1.729 + for (i = 0; i < BLOCK_TYPES; ++i) { 1.730 + for (j = 0; j < REF_TYPES; ++j) { 1.731 + for (k = 0; k < COEF_BANDS; ++k) { 1.732 + for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 1.733 + for (t = 0; t < entropy_nodes_update; ++t) { 1.734 + vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 1.735 + const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; 1.736 + int s; 1.737 + int u = 0; 1.738 + 1.739 + if (l >= 3 && k == 0) 1.740 + continue; 1.741 + if (t == PIVOT_NODE) 1.742 + s = vp9_prob_diff_update_savings_search_model( 1.743 + frame_branch_ct[i][j][k][l][0], 1.744 + old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 1.745 + else 1.746 + s = vp9_prob_diff_update_savings_search( 1.747 + frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); 1.748 + if (s > 0 && newp != oldp) 1.749 + u = 1; 1.750 + if (u) 1.751 + savings += s - (int)(vp9_cost_zero(upd)); 1.752 + else 1.753 + savings -= (int)(vp9_cost_zero(upd)); 1.754 + update[u]++; 1.755 + } 1.756 + } 1.757 + } 1.758 + } 1.759 + } 1.760 + 1.761 + // printf("Update %d %d, savings %d\n", update[0], update[1], savings); 1.762 + /* Is coef updated at all */ 1.763 + if (update[1] == 0 || savings < 0) { 1.764 + vp9_write_bit(bc, 0); 1.765 + return; 1.766 + } 1.767 + vp9_write_bit(bc, 1); 1.768 + for (i = 0; i < BLOCK_TYPES; ++i) { 1.769 + for (j = 0; j < REF_TYPES; ++j) { 1.770 + for (k = 0; k < COEF_BANDS; ++k) { 1.771 + for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 1.772 + // calc probs and branch cts for this frame only 1.773 + for (t = 0; t < entropy_nodes_update; ++t) { 1.774 + vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 1.775 + vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 1.776 + const vp9_prob upd = DIFF_UPDATE_PROB; 1.777 + int s; 1.778 + int u = 0; 1.779 + if (l >= 3 && k == 0) 1.780 + continue; 1.781 + if (t == PIVOT_NODE) 1.782 + s = vp9_prob_diff_update_savings_search_model( 1.783 + frame_branch_ct[i][j][k][l][0], 1.784 + old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 1.785 + else 1.786 + s = vp9_prob_diff_update_savings_search( 1.787 + frame_branch_ct[i][j][k][l][t], 1.788 + *oldp, &newp, upd); 1.789 + if (s > 0 && newp != *oldp) 1.790 + u = 1; 1.791 + vp9_write(bc, u, upd); 1.792 +#ifdef ENTROPY_STATS 1.793 + if (!cpi->dummy_packing) 1.794 + ++tree_update_hist[tx_size][i][j][k][l][t][u]; 1.795 +#endif 1.796 + if (u) { 1.797 + /* send/use new probability */ 1.798 + vp9_write_prob_diff_update(bc, newp, *oldp); 1.799 + *oldp = newp; 1.800 + } 1.801 + } 1.802 + } 1.803 + } 1.804 + } 1.805 + } 1.806 + return; 1.807 + } 1.808 + 1.809 + case 1: 1.810 + case 2: { 1.811 + const int prev_coef_contexts_to_update = 1.812 + (cpi->sf.use_fast_coef_updates == 2 ? 1.813 + PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS); 1.814 + const int coef_band_to_update = 1.815 + (cpi->sf.use_fast_coef_updates == 2 ? 1.816 + COEF_BANDS >> 1 : COEF_BANDS); 1.817 + int updates = 0; 1.818 + int noupdates_before_first = 0; 1.819 + for (i = 0; i < BLOCK_TYPES; ++i) { 1.820 + for (j = 0; j < REF_TYPES; ++j) { 1.821 + for (k = 0; k < COEF_BANDS; ++k) { 1.822 + for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 1.823 + // calc probs and branch cts for this frame only 1.824 + for (t = 0; t < entropy_nodes_update; ++t) { 1.825 + vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 1.826 + vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 1.827 + int s; 1.828 + int u = 0; 1.829 + if (l >= 3 && k == 0) 1.830 + continue; 1.831 + if (l >= prev_coef_contexts_to_update || 1.832 + k >= coef_band_to_update) { 1.833 + u = 0; 1.834 + } else { 1.835 + if (t == PIVOT_NODE) 1.836 + s = vp9_prob_diff_update_savings_search_model( 1.837 + frame_branch_ct[i][j][k][l][0], 1.838 + old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 1.839 + else 1.840 + s = vp9_prob_diff_update_savings_search( 1.841 + frame_branch_ct[i][j][k][l][t], 1.842 + *oldp, &newp, upd); 1.843 + if (s > 0 && newp != *oldp) 1.844 + u = 1; 1.845 + } 1.846 + updates += u; 1.847 + if (u == 0 && updates == 0) { 1.848 + noupdates_before_first++; 1.849 +#ifdef ENTROPY_STATS 1.850 + if (!cpi->dummy_packing) 1.851 + ++tree_update_hist[tx_size][i][j][k][l][t][u]; 1.852 +#endif 1.853 + continue; 1.854 + } 1.855 + if (u == 1 && updates == 1) { 1.856 + int v; 1.857 + // first update 1.858 + vp9_write_bit(bc, 1); 1.859 + for (v = 0; v < noupdates_before_first; ++v) 1.860 + vp9_write(bc, 0, upd); 1.861 + } 1.862 + vp9_write(bc, u, upd); 1.863 +#ifdef ENTROPY_STATS 1.864 + if (!cpi->dummy_packing) 1.865 + ++tree_update_hist[tx_size][i][j][k][l][t][u]; 1.866 +#endif 1.867 + if (u) { 1.868 + /* send/use new probability */ 1.869 + vp9_write_prob_diff_update(bc, newp, *oldp); 1.870 + *oldp = newp; 1.871 + } 1.872 + } 1.873 + } 1.874 + } 1.875 + } 1.876 + } 1.877 + if (updates == 0) { 1.878 + vp9_write_bit(bc, 0); // no updates 1.879 + } 1.880 + return; 1.881 + } 1.882 + 1.883 + default: 1.884 + assert(0); 1.885 + } 1.886 +} 1.887 + 1.888 +static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) { 1.889 + const TX_MODE tx_mode = cpi->common.tx_mode; 1.890 + 1.891 + vp9_clear_system_state(); 1.892 + 1.893 + // Build the cofficient contexts based on counts collected in encode loop 1.894 + build_coeff_contexts(cpi); 1.895 + 1.896 + update_coef_probs_common(bc, cpi, TX_4X4); 1.897 + 1.898 + // do not do this if not even allowed 1.899 + if (tx_mode > ONLY_4X4) 1.900 + update_coef_probs_common(bc, cpi, TX_8X8); 1.901 + 1.902 + if (tx_mode > ALLOW_8X8) 1.903 + update_coef_probs_common(bc, cpi, TX_16X16); 1.904 + 1.905 + if (tx_mode > ALLOW_16X16) 1.906 + update_coef_probs_common(bc, cpi, TX_32X32); 1.907 +} 1.908 + 1.909 +static void encode_loopfilter(struct loopfilter *lf, 1.910 + struct vp9_write_bit_buffer *wb) { 1.911 + int i; 1.912 + 1.913 + // Encode the loop filter level and type 1.914 + vp9_wb_write_literal(wb, lf->filter_level, 6); 1.915 + vp9_wb_write_literal(wb, lf->sharpness_level, 3); 1.916 + 1.917 + // Write out loop filter deltas applied at the MB level based on mode or 1.918 + // ref frame (if they are enabled). 1.919 + vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled); 1.920 + 1.921 + if (lf->mode_ref_delta_enabled) { 1.922 + // Do the deltas need to be updated 1.923 + vp9_wb_write_bit(wb, lf->mode_ref_delta_update); 1.924 + if (lf->mode_ref_delta_update) { 1.925 + // Send update 1.926 + for (i = 0; i < MAX_REF_LF_DELTAS; i++) { 1.927 + const int delta = lf->ref_deltas[i]; 1.928 + 1.929 + // Frame level data 1.930 + if (delta != lf->last_ref_deltas[i]) { 1.931 + lf->last_ref_deltas[i] = delta; 1.932 + vp9_wb_write_bit(wb, 1); 1.933 + 1.934 + assert(delta != 0); 1.935 + vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 1.936 + vp9_wb_write_bit(wb, delta < 0); 1.937 + } else { 1.938 + vp9_wb_write_bit(wb, 0); 1.939 + } 1.940 + } 1.941 + 1.942 + // Send update 1.943 + for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { 1.944 + const int delta = lf->mode_deltas[i]; 1.945 + if (delta != lf->last_mode_deltas[i]) { 1.946 + lf->last_mode_deltas[i] = delta; 1.947 + vp9_wb_write_bit(wb, 1); 1.948 + 1.949 + assert(delta != 0); 1.950 + vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 1.951 + vp9_wb_write_bit(wb, delta < 0); 1.952 + } else { 1.953 + vp9_wb_write_bit(wb, 0); 1.954 + } 1.955 + } 1.956 + } 1.957 + } 1.958 +} 1.959 + 1.960 +static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) { 1.961 + if (delta_q != 0) { 1.962 + vp9_wb_write_bit(wb, 1); 1.963 + vp9_wb_write_literal(wb, abs(delta_q), 4); 1.964 + vp9_wb_write_bit(wb, delta_q < 0); 1.965 + } else { 1.966 + vp9_wb_write_bit(wb, 0); 1.967 + } 1.968 +} 1.969 + 1.970 +static void encode_quantization(VP9_COMMON *cm, 1.971 + struct vp9_write_bit_buffer *wb) { 1.972 + vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); 1.973 + write_delta_q(wb, cm->y_dc_delta_q); 1.974 + write_delta_q(wb, cm->uv_dc_delta_q); 1.975 + write_delta_q(wb, cm->uv_ac_delta_q); 1.976 +} 1.977 + 1.978 + 1.979 +static void encode_segmentation(VP9_COMP *cpi, 1.980 + struct vp9_write_bit_buffer *wb) { 1.981 + int i, j; 1.982 + 1.983 + struct segmentation *seg = &cpi->common.seg; 1.984 + 1.985 + vp9_wb_write_bit(wb, seg->enabled); 1.986 + if (!seg->enabled) 1.987 + return; 1.988 + 1.989 + // Segmentation map 1.990 + vp9_wb_write_bit(wb, seg->update_map); 1.991 + if (seg->update_map) { 1.992 + // Select the coding strategy (temporal or spatial) 1.993 + vp9_choose_segmap_coding_method(cpi); 1.994 + // Write out probabilities used to decode unpredicted macro-block segments 1.995 + for (i = 0; i < SEG_TREE_PROBS; i++) { 1.996 + const int prob = seg->tree_probs[i]; 1.997 + const int update = prob != MAX_PROB; 1.998 + vp9_wb_write_bit(wb, update); 1.999 + if (update) 1.1000 + vp9_wb_write_literal(wb, prob, 8); 1.1001 + } 1.1002 + 1.1003 + // Write out the chosen coding method. 1.1004 + vp9_wb_write_bit(wb, seg->temporal_update); 1.1005 + if (seg->temporal_update) { 1.1006 + for (i = 0; i < PREDICTION_PROBS; i++) { 1.1007 + const int prob = seg->pred_probs[i]; 1.1008 + const int update = prob != MAX_PROB; 1.1009 + vp9_wb_write_bit(wb, update); 1.1010 + if (update) 1.1011 + vp9_wb_write_literal(wb, prob, 8); 1.1012 + } 1.1013 + } 1.1014 + } 1.1015 + 1.1016 + // Segmentation data 1.1017 + vp9_wb_write_bit(wb, seg->update_data); 1.1018 + if (seg->update_data) { 1.1019 + vp9_wb_write_bit(wb, seg->abs_delta); 1.1020 + 1.1021 + for (i = 0; i < MAX_SEGMENTS; i++) { 1.1022 + for (j = 0; j < SEG_LVL_MAX; j++) { 1.1023 + const int active = vp9_segfeature_active(seg, i, j); 1.1024 + vp9_wb_write_bit(wb, active); 1.1025 + if (active) { 1.1026 + const int data = vp9_get_segdata(seg, i, j); 1.1027 + const int data_max = vp9_seg_feature_data_max(j); 1.1028 + 1.1029 + if (vp9_is_segfeature_signed(j)) { 1.1030 + vp9_encode_unsigned_max(wb, abs(data), data_max); 1.1031 + vp9_wb_write_bit(wb, data < 0); 1.1032 + } else { 1.1033 + vp9_encode_unsigned_max(wb, data, data_max); 1.1034 + } 1.1035 + } 1.1036 + } 1.1037 + } 1.1038 + } 1.1039 +} 1.1040 + 1.1041 + 1.1042 +static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) { 1.1043 + VP9_COMMON *const cm = &cpi->common; 1.1044 + 1.1045 + // Mode 1.1046 + vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); 1.1047 + if (cm->tx_mode >= ALLOW_32X32) 1.1048 + vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT); 1.1049 + 1.1050 + // Probabilities 1.1051 + if (cm->tx_mode == TX_MODE_SELECT) { 1.1052 + int i, j; 1.1053 + unsigned int ct_8x8p[TX_SIZES - 3][2]; 1.1054 + unsigned int ct_16x16p[TX_SIZES - 2][2]; 1.1055 + unsigned int ct_32x32p[TX_SIZES - 1][2]; 1.1056 + 1.1057 + 1.1058 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.1059 + tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p); 1.1060 + for (j = 0; j < TX_SIZES - 3; j++) 1.1061 + vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]); 1.1062 + } 1.1063 + 1.1064 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.1065 + tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p); 1.1066 + for (j = 0; j < TX_SIZES - 2; j++) 1.1067 + vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], 1.1068 + ct_16x16p[j]); 1.1069 + } 1.1070 + 1.1071 + for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 1.1072 + tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); 1.1073 + for (j = 0; j < TX_SIZES - 1; j++) 1.1074 + vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], 1.1075 + ct_32x32p[j]); 1.1076 + } 1.1077 +#ifdef MODE_STATS 1.1078 + if (!cpi->dummy_packing) 1.1079 + update_tx_count_stats(cm); 1.1080 +#endif 1.1081 + } 1.1082 +} 1.1083 + 1.1084 +static void write_interp_filter_type(INTERPOLATION_TYPE type, 1.1085 + struct vp9_write_bit_buffer *wb) { 1.1086 + const int type_to_literal[] = { 1, 0, 2, 3 }; 1.1087 + 1.1088 + vp9_wb_write_bit(wb, type == SWITCHABLE); 1.1089 + if (type != SWITCHABLE) 1.1090 + vp9_wb_write_literal(wb, type_to_literal[type], 2); 1.1091 +} 1.1092 + 1.1093 +static void fix_mcomp_filter_type(VP9_COMP *cpi) { 1.1094 + VP9_COMMON *const cm = &cpi->common; 1.1095 + 1.1096 + if (cm->mcomp_filter_type == SWITCHABLE) { 1.1097 + // Check to see if only one of the filters is actually used 1.1098 + int count[SWITCHABLE_FILTERS]; 1.1099 + int i, j, c = 0; 1.1100 + for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 1.1101 + count[i] = 0; 1.1102 + for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 1.1103 + count[i] += cm->counts.switchable_interp[j][i]; 1.1104 + c += (count[i] > 0); 1.1105 + } 1.1106 + if (c == 1) { 1.1107 + // Only one filter is used. So set the filter at frame level 1.1108 + for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 1.1109 + if (count[i]) { 1.1110 + cm->mcomp_filter_type = i; 1.1111 + break; 1.1112 + } 1.1113 + } 1.1114 + } 1.1115 + } 1.1116 +} 1.1117 + 1.1118 +static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { 1.1119 + int min_log2_tile_cols, max_log2_tile_cols, ones; 1.1120 + vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 1.1121 + 1.1122 + // columns 1.1123 + ones = cm->log2_tile_cols - min_log2_tile_cols; 1.1124 + while (ones--) 1.1125 + vp9_wb_write_bit(wb, 1); 1.1126 + 1.1127 + if (cm->log2_tile_cols < max_log2_tile_cols) 1.1128 + vp9_wb_write_bit(wb, 0); 1.1129 + 1.1130 + // rows 1.1131 + vp9_wb_write_bit(wb, cm->log2_tile_rows != 0); 1.1132 + if (cm->log2_tile_rows != 0) 1.1133 + vp9_wb_write_bit(wb, cm->log2_tile_rows != 1); 1.1134 +} 1.1135 + 1.1136 +static int get_refresh_mask(VP9_COMP *cpi) { 1.1137 + // Should the GF or ARF be updated using the transmitted frame or buffer 1.1138 +#if CONFIG_MULTIPLE_ARF 1.1139 + if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame && 1.1140 + !cpi->refresh_alt_ref_frame) { 1.1141 +#else 1.1142 + if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame && 1.1143 + !cpi->use_svc) { 1.1144 +#endif 1.1145 + // Preserve the previously existing golden frame and update the frame in 1.1146 + // the alt ref slot instead. This is highly specific to the use of 1.1147 + // alt-ref as a forward reference, and this needs to be generalized as 1.1148 + // other uses are implemented (like RTC/temporal scaling) 1.1149 + // 1.1150 + // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but 1.1151 + // that happens in vp9_onyx_if.c:update_reference_frames() so that it can 1.1152 + // be done outside of the recode loop. 1.1153 + return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 1.1154 + (cpi->refresh_golden_frame << cpi->alt_fb_idx); 1.1155 + } else { 1.1156 + int arf_idx = cpi->alt_fb_idx; 1.1157 +#if CONFIG_MULTIPLE_ARF 1.1158 + // Determine which ARF buffer to use to encode this ARF frame. 1.1159 + if (cpi->multi_arf_enabled) { 1.1160 + int sn = cpi->sequence_number; 1.1161 + arf_idx = (cpi->frame_coding_order[sn] < 0) ? 1.1162 + cpi->arf_buffer_idx[sn + 1] : 1.1163 + cpi->arf_buffer_idx[sn]; 1.1164 + } 1.1165 +#endif 1.1166 + return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 1.1167 + (cpi->refresh_golden_frame << cpi->gld_fb_idx) | 1.1168 + (cpi->refresh_alt_ref_frame << arf_idx); 1.1169 + } 1.1170 +} 1.1171 + 1.1172 +static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { 1.1173 + VP9_COMMON *const cm = &cpi->common; 1.1174 + vp9_writer residual_bc; 1.1175 + 1.1176 + int tile_row, tile_col; 1.1177 + TOKENEXTRA *tok[4][1 << 6], *tok_end; 1.1178 + size_t total_size = 0; 1.1179 + const int tile_cols = 1 << cm->log2_tile_cols; 1.1180 + const int tile_rows = 1 << cm->log2_tile_rows; 1.1181 + 1.1182 + vpx_memset(cpi->above_seg_context, 0, sizeof(*cpi->above_seg_context) * 1.1183 + mi_cols_aligned_to_sb(cm->mi_cols)); 1.1184 + 1.1185 + tok[0][0] = cpi->tok; 1.1186 + for (tile_row = 0; tile_row < tile_rows; tile_row++) { 1.1187 + if (tile_row) 1.1188 + tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + 1.1189 + cpi->tok_count[tile_row - 1][tile_cols - 1]; 1.1190 + 1.1191 + for (tile_col = 1; tile_col < tile_cols; tile_col++) 1.1192 + tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + 1.1193 + cpi->tok_count[tile_row][tile_col - 1]; 1.1194 + } 1.1195 + 1.1196 + for (tile_row = 0; tile_row < tile_rows; tile_row++) { 1.1197 + for (tile_col = 0; tile_col < tile_cols; tile_col++) { 1.1198 + TileInfo tile; 1.1199 + 1.1200 + vp9_tile_init(&tile, cm, tile_row, tile_col); 1.1201 + tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; 1.1202 + 1.1203 + if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) 1.1204 + vp9_start_encode(&residual_bc, data_ptr + total_size + 4); 1.1205 + else 1.1206 + vp9_start_encode(&residual_bc, data_ptr + total_size); 1.1207 + 1.1208 + write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end); 1.1209 + assert(tok[tile_row][tile_col] == tok_end); 1.1210 + vp9_stop_encode(&residual_bc); 1.1211 + if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { 1.1212 + // size of this tile 1.1213 + write_be32(data_ptr + total_size, residual_bc.pos); 1.1214 + total_size += 4; 1.1215 + } 1.1216 + 1.1217 + total_size += residual_bc.pos; 1.1218 + } 1.1219 + } 1.1220 + 1.1221 + return total_size; 1.1222 +} 1.1223 + 1.1224 +static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) { 1.1225 + VP9_COMMON *const cm = &cpi->common; 1.1226 + 1.1227 + const int scaling_active = cm->width != cm->display_width || 1.1228 + cm->height != cm->display_height; 1.1229 + vp9_wb_write_bit(wb, scaling_active); 1.1230 + if (scaling_active) { 1.1231 + vp9_wb_write_literal(wb, cm->display_width - 1, 16); 1.1232 + vp9_wb_write_literal(wb, cm->display_height - 1, 16); 1.1233 + } 1.1234 +} 1.1235 + 1.1236 +static void write_frame_size(VP9_COMP *cpi, 1.1237 + struct vp9_write_bit_buffer *wb) { 1.1238 + VP9_COMMON *const cm = &cpi->common; 1.1239 + vp9_wb_write_literal(wb, cm->width - 1, 16); 1.1240 + vp9_wb_write_literal(wb, cm->height - 1, 16); 1.1241 + 1.1242 + write_display_size(cpi, wb); 1.1243 +} 1.1244 + 1.1245 +static void write_frame_size_with_refs(VP9_COMP *cpi, 1.1246 + struct vp9_write_bit_buffer *wb) { 1.1247 + VP9_COMMON *const cm = &cpi->common; 1.1248 + int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, 1.1249 + cpi->alt_fb_idx}; 1.1250 + int i, found = 0; 1.1251 + 1.1252 + for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1.1253 + YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]]; 1.1254 + found = cm->width == cfg->y_crop_width && 1.1255 + cm->height == cfg->y_crop_height; 1.1256 + 1.1257 + // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it 1.1258 + // in a better way. 1.1259 + if (cpi->use_svc) { 1.1260 + found = 0; 1.1261 + } 1.1262 + vp9_wb_write_bit(wb, found); 1.1263 + if (found) { 1.1264 + break; 1.1265 + } 1.1266 + } 1.1267 + 1.1268 + if (!found) { 1.1269 + vp9_wb_write_literal(wb, cm->width - 1, 16); 1.1270 + vp9_wb_write_literal(wb, cm->height - 1, 16); 1.1271 + } 1.1272 + 1.1273 + write_display_size(cpi, wb); 1.1274 +} 1.1275 + 1.1276 +static void write_sync_code(struct vp9_write_bit_buffer *wb) { 1.1277 + vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); 1.1278 + vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); 1.1279 + vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); 1.1280 +} 1.1281 + 1.1282 +static void write_uncompressed_header(VP9_COMP *cpi, 1.1283 + struct vp9_write_bit_buffer *wb) { 1.1284 + VP9_COMMON *const cm = &cpi->common; 1.1285 + 1.1286 + vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); 1.1287 + 1.1288 + // bitstream version. 1.1289 + // 00 - profile 0. 4:2:0 only 1.1290 + // 10 - profile 1. adds 4:4:4, 4:2:2, alpha 1.1291 + vp9_wb_write_bit(wb, cm->version); 1.1292 + vp9_wb_write_bit(wb, 0); 1.1293 + 1.1294 + vp9_wb_write_bit(wb, 0); 1.1295 + vp9_wb_write_bit(wb, cm->frame_type); 1.1296 + vp9_wb_write_bit(wb, cm->show_frame); 1.1297 + vp9_wb_write_bit(wb, cm->error_resilient_mode); 1.1298 + 1.1299 + if (cm->frame_type == KEY_FRAME) { 1.1300 + const COLOR_SPACE cs = UNKNOWN; 1.1301 + write_sync_code(wb); 1.1302 + vp9_wb_write_literal(wb, cs, 3); 1.1303 + if (cs != SRGB) { 1.1304 + vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] 1.1305 + if (cm->version == 1) { 1.1306 + vp9_wb_write_bit(wb, cm->subsampling_x); 1.1307 + vp9_wb_write_bit(wb, cm->subsampling_y); 1.1308 + vp9_wb_write_bit(wb, 0); // has extra plane 1.1309 + } 1.1310 + } else { 1.1311 + assert(cm->version == 1); 1.1312 + vp9_wb_write_bit(wb, 0); // has extra plane 1.1313 + } 1.1314 + 1.1315 + write_frame_size(cpi, wb); 1.1316 + } else { 1.1317 + const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, 1.1318 + cpi->alt_fb_idx}; 1.1319 + if (!cm->show_frame) 1.1320 + vp9_wb_write_bit(wb, cm->intra_only); 1.1321 + 1.1322 + if (!cm->error_resilient_mode) 1.1323 + vp9_wb_write_literal(wb, cm->reset_frame_context, 2); 1.1324 + 1.1325 + if (cm->intra_only) { 1.1326 + write_sync_code(wb); 1.1327 + 1.1328 + vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); 1.1329 + write_frame_size(cpi, wb); 1.1330 + } else { 1.1331 + int i; 1.1332 + vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); 1.1333 + for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1.1334 + vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2); 1.1335 + vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]); 1.1336 + } 1.1337 + 1.1338 + write_frame_size_with_refs(cpi, wb); 1.1339 + 1.1340 + vp9_wb_write_bit(wb, cm->allow_high_precision_mv); 1.1341 + 1.1342 + fix_mcomp_filter_type(cpi); 1.1343 + write_interp_filter_type(cm->mcomp_filter_type, wb); 1.1344 + } 1.1345 + } 1.1346 + 1.1347 + if (!cm->error_resilient_mode) { 1.1348 + vp9_wb_write_bit(wb, cm->refresh_frame_context); 1.1349 + vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); 1.1350 + } 1.1351 + 1.1352 + vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2); 1.1353 + 1.1354 + encode_loopfilter(&cm->lf, wb); 1.1355 + encode_quantization(cm, wb); 1.1356 + encode_segmentation(cpi, wb); 1.1357 + 1.1358 + write_tile_info(cm, wb); 1.1359 +} 1.1360 + 1.1361 +static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1.1362 + VP9_COMMON *const cm = &cpi->common; 1.1363 + MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1.1364 + FRAME_CONTEXT *const fc = &cm->fc; 1.1365 + vp9_writer header_bc; 1.1366 + 1.1367 + vp9_start_encode(&header_bc, data); 1.1368 + 1.1369 + if (xd->lossless) 1.1370 + cm->tx_mode = ONLY_4X4; 1.1371 + else 1.1372 + encode_txfm_probs(cpi, &header_bc); 1.1373 + 1.1374 + update_coef_probs(cpi, &header_bc); 1.1375 + 1.1376 +#ifdef ENTROPY_STATS 1.1377 + active_section = 2; 1.1378 +#endif 1.1379 + 1.1380 + vp9_update_skip_probs(cpi, &header_bc); 1.1381 + 1.1382 + if (!frame_is_intra_only(cm)) { 1.1383 + int i; 1.1384 +#ifdef ENTROPY_STATS 1.1385 + active_section = 1; 1.1386 +#endif 1.1387 + 1.1388 + update_inter_mode_probs(cm, &header_bc); 1.1389 + vp9_zero(cm->counts.inter_mode); 1.1390 + 1.1391 + if (cm->mcomp_filter_type == SWITCHABLE) 1.1392 + update_switchable_interp_probs(cpi, &header_bc); 1.1393 + 1.1394 + for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1.1395 + vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], 1.1396 + cpi->intra_inter_count[i]); 1.1397 + 1.1398 + if (cm->allow_comp_inter_inter) { 1.1399 + const int comp_pred_mode = cpi->common.comp_pred_mode; 1.1400 + const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; 1.1401 + const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; 1.1402 + 1.1403 + vp9_write_bit(&header_bc, use_compound_pred); 1.1404 + if (use_compound_pred) { 1.1405 + vp9_write_bit(&header_bc, use_hybrid_pred); 1.1406 + if (use_hybrid_pred) 1.1407 + for (i = 0; i < COMP_INTER_CONTEXTS; i++) 1.1408 + vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], 1.1409 + cpi->comp_inter_count[i]); 1.1410 + } 1.1411 + } 1.1412 + 1.1413 + if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { 1.1414 + for (i = 0; i < REF_CONTEXTS; i++) { 1.1415 + vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], 1.1416 + cpi->single_ref_count[i][0]); 1.1417 + vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], 1.1418 + cpi->single_ref_count[i][1]); 1.1419 + } 1.1420 + } 1.1421 + 1.1422 + if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) 1.1423 + for (i = 0; i < REF_CONTEXTS; i++) 1.1424 + vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], 1.1425 + cpi->comp_ref_count[i]); 1.1426 + 1.1427 + update_mbintra_mode_probs(cpi, &header_bc); 1.1428 + 1.1429 + for (i = 0; i < PARTITION_CONTEXTS; ++i) { 1.1430 + unsigned int bct[PARTITION_TYPES - 1][2]; 1.1431 + update_mode(&header_bc, PARTITION_TYPES, vp9_partition_tree, 1.1432 + fc->partition_prob[i], bct, 1.1433 + (unsigned int *)cpi->partition_count[i]); 1.1434 + } 1.1435 + 1.1436 + vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc); 1.1437 + } 1.1438 + 1.1439 + vp9_stop_encode(&header_bc); 1.1440 + assert(header_bc.pos <= 0xffff); 1.1441 + 1.1442 + return header_bc.pos; 1.1443 +} 1.1444 + 1.1445 +void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { 1.1446 + uint8_t *data = dest; 1.1447 + size_t first_part_size; 1.1448 + struct vp9_write_bit_buffer wb = {data, 0}; 1.1449 + struct vp9_write_bit_buffer saved_wb; 1.1450 + 1.1451 + write_uncompressed_header(cpi, &wb); 1.1452 + saved_wb = wb; 1.1453 + vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size 1.1454 + 1.1455 + data += vp9_rb_bytes_written(&wb); 1.1456 + 1.1457 + vp9_compute_update_table(); 1.1458 + 1.1459 +#ifdef ENTROPY_STATS 1.1460 + if (cm->frame_type == INTER_FRAME) 1.1461 + active_section = 0; 1.1462 + else 1.1463 + active_section = 7; 1.1464 +#endif 1.1465 + 1.1466 + vp9_clear_system_state(); // __asm emms; 1.1467 + 1.1468 + first_part_size = write_compressed_header(cpi, data); 1.1469 + data += first_part_size; 1.1470 + vp9_wb_write_literal(&saved_wb, first_part_size, 16); 1.1471 + 1.1472 + data += encode_tiles(cpi, data); 1.1473 + 1.1474 + *size = data - dest; 1.1475 +} 1.1476 + 1.1477 +#ifdef ENTROPY_STATS 1.1478 +static void print_tree_update_for_type(FILE *f, 1.1479 + vp9_coeff_stats *tree_update_hist, 1.1480 + int block_types, const char *header) { 1.1481 + int i, j, k, l, m; 1.1482 + 1.1483 + fprintf(f, "const vp9_coeff_prob %s = {\n", header); 1.1484 + for (i = 0; i < block_types; i++) { 1.1485 + fprintf(f, " { \n"); 1.1486 + for (j = 0; j < REF_TYPES; j++) { 1.1487 + fprintf(f, " { \n"); 1.1488 + for (k = 0; k < COEF_BANDS; k++) { 1.1489 + fprintf(f, " {\n"); 1.1490 + for (l = 0; l < PREV_COEF_CONTEXTS; l++) { 1.1491 + fprintf(f, " {"); 1.1492 + for (m = 0; m < ENTROPY_NODES; m++) { 1.1493 + fprintf(f, "%3d, ", 1.1494 + get_binary_prob(tree_update_hist[i][j][k][l][m][0], 1.1495 + tree_update_hist[i][j][k][l][m][1])); 1.1496 + } 1.1497 + fprintf(f, "},\n"); 1.1498 + } 1.1499 + fprintf(f, "},\n"); 1.1500 + } 1.1501 + fprintf(f, " },\n"); 1.1502 + } 1.1503 + fprintf(f, " },\n"); 1.1504 + } 1.1505 + fprintf(f, "};\n"); 1.1506 +} 1.1507 + 1.1508 +void print_tree_update_probs() { 1.1509 + FILE *f = fopen("coefupdprob.h", "w"); 1.1510 + fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n"); 1.1511 + 1.1512 + print_tree_update_for_type(f, tree_update_hist[TX_4X4], BLOCK_TYPES, 1.1513 + "vp9_coef_update_probs_4x4[BLOCK_TYPES]"); 1.1514 + print_tree_update_for_type(f, tree_update_hist[TX_8X8], BLOCK_TYPES, 1.1515 + "vp9_coef_update_probs_8x8[BLOCK_TYPES]"); 1.1516 + print_tree_update_for_type(f, tree_update_hist[TX_16X16], BLOCK_TYPES, 1.1517 + "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); 1.1518 + print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, 1.1519 + "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); 1.1520 + 1.1521 + fclose(f); 1.1522 + f = fopen("treeupdate.bin", "wb"); 1.1523 + fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); 1.1524 + fclose(f); 1.1525 +} 1.1526 +#endif