michael@0: /* michael@0: * Copyright (c) 2010 The WebM project authors. All Rights Reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: #include "vpx/vpx_encoder.h" michael@0: #include "vpx_mem/vpx_mem.h" michael@0: michael@0: #include "vp9/common/vp9_entropymode.h" michael@0: #include "vp9/common/vp9_entropymv.h" michael@0: #include "vp9/common/vp9_findnearmv.h" michael@0: #include "vp9/common/vp9_tile_common.h" michael@0: #include "vp9/common/vp9_seg_common.h" michael@0: #include "vp9/common/vp9_pred_common.h" michael@0: #include "vp9/common/vp9_entropy.h" michael@0: #include "vp9/common/vp9_mvref_common.h" michael@0: #include "vp9/common/vp9_treecoder.h" michael@0: #include "vp9/common/vp9_systemdependent.h" michael@0: #include "vp9/common/vp9_pragmas.h" michael@0: michael@0: #include "vp9/encoder/vp9_mcomp.h" michael@0: #include "vp9/encoder/vp9_encodemv.h" michael@0: #include "vp9/encoder/vp9_bitstream.h" michael@0: #include "vp9/encoder/vp9_segmentation.h" michael@0: #include "vp9/encoder/vp9_subexp.h" michael@0: #include "vp9/encoder/vp9_write_bit_buffer.h" michael@0: michael@0: michael@0: #if defined(SECTIONBITS_OUTPUT) michael@0: unsigned __int64 Sectionbits[500]; michael@0: #endif michael@0: michael@0: #ifdef ENTROPY_STATS michael@0: int intra_mode_stats[INTRA_MODES] michael@0: [INTRA_MODES] michael@0: [INTRA_MODES]; michael@0: vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; michael@0: michael@0: extern unsigned int active_section; michael@0: #endif michael@0: michael@0: michael@0: #ifdef MODE_STATS michael@0: int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; michael@0: int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; michael@0: int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; michael@0: int64_t switchable_interp_stats[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; michael@0: michael@0: void init_tx_count_stats() { michael@0: vp9_zero(tx_count_32x32p_stats); michael@0: vp9_zero(tx_count_16x16p_stats); michael@0: vp9_zero(tx_count_8x8p_stats); michael@0: } michael@0: michael@0: void init_switchable_interp_stats() { michael@0: vp9_zero(switchable_interp_stats); michael@0: } michael@0: michael@0: static void update_tx_count_stats(VP9_COMMON *cm) { michael@0: int i, j; michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: for (j = 0; j < TX_SIZES; j++) { michael@0: tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j]; michael@0: } michael@0: } michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: for (j = 0; j < TX_SIZES - 1; j++) { michael@0: tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j]; michael@0: } michael@0: } michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: for (j = 0; j < TX_SIZES - 2; j++) { michael@0: tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void update_switchable_interp_stats(VP9_COMMON *cm) { michael@0: int i, j; michael@0: for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) michael@0: for (j = 0; j < SWITCHABLE_FILTERS; ++j) michael@0: switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; michael@0: } michael@0: michael@0: void write_tx_count_stats() { michael@0: int i, j; michael@0: FILE *fp = fopen("tx_count.bin", "wb"); michael@0: fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); michael@0: fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); michael@0: fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); michael@0: fclose(fp); michael@0: michael@0: printf( michael@0: "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n"); michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: printf(" { "); michael@0: for (j = 0; j < TX_SIZES; j++) { michael@0: printf("%"PRId64", ", tx_count_32x32p_stats[i][j]); michael@0: } michael@0: printf("},\n"); michael@0: } michael@0: printf("};\n"); michael@0: printf( michael@0: "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n"); michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: printf(" { "); michael@0: for (j = 0; j < TX_SIZES - 1; j++) { michael@0: printf("%"PRId64", ", tx_count_16x16p_stats[i][j]); michael@0: } michael@0: printf("},\n"); michael@0: } michael@0: printf("};\n"); michael@0: printf( michael@0: "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n"); michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: printf(" { "); michael@0: for (j = 0; j < TX_SIZES - 2; j++) { michael@0: printf("%"PRId64", ", tx_count_8x8p_stats[i][j]); michael@0: } michael@0: printf("},\n"); michael@0: } michael@0: printf("};\n"); michael@0: } michael@0: michael@0: void write_switchable_interp_stats() { michael@0: int i, j; michael@0: FILE *fp = fopen("switchable_interp.bin", "wb"); michael@0: fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); michael@0: fclose(fp); michael@0: michael@0: printf( michael@0: "vp9_default_switchable_filter_count[SWITCHABLE_FILTER_CONTEXTS]" michael@0: "[SWITCHABLE_FILTERS] = {\n"); michael@0: for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { michael@0: printf(" { "); michael@0: for (j = 0; j < SWITCHABLE_FILTERS; j++) { michael@0: printf("%"PRId64", ", switchable_interp_stats[i][j]); michael@0: } michael@0: printf("},\n"); michael@0: } michael@0: printf("};\n"); michael@0: } michael@0: #endif michael@0: michael@0: static INLINE void write_be32(uint8_t *p, int value) { michael@0: p[0] = value >> 24; michael@0: p[1] = value >> 16; michael@0: p[2] = value >> 8; michael@0: p[3] = value; michael@0: } michael@0: michael@0: void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb, michael@0: int data, int max) { michael@0: vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); michael@0: } michael@0: michael@0: static void update_mode(vp9_writer *w, int n, vp9_tree tree, michael@0: vp9_prob Pcur[/* n-1 */], michael@0: unsigned int bct[/* n-1 */][2], michael@0: const unsigned int num_events[/* n */]) { michael@0: int i = 0; michael@0: michael@0: vp9_tree_probs_from_distribution(tree, bct, num_events); michael@0: for (i = 0; i < n - 1; ++i) michael@0: vp9_cond_prob_diff_update(w, &Pcur[i], bct[i]); michael@0: } michael@0: michael@0: static void update_mbintra_mode_probs(VP9_COMP* const cpi, michael@0: vp9_writer* const bc) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: int j; michael@0: unsigned int bct[INTRA_MODES - 1][2]; michael@0: michael@0: for (j = 0; j < BLOCK_SIZE_GROUPS; j++) michael@0: update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, michael@0: cm->fc.y_mode_prob[j], bct, michael@0: (unsigned int *)cpi->y_mode_count[j]); michael@0: } michael@0: michael@0: static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m, michael@0: TX_SIZE tx_size, BLOCK_SIZE bsize, michael@0: vp9_writer *w) { michael@0: const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; michael@0: const MACROBLOCKD *const xd = &cpi->mb.e_mbd; michael@0: const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, michael@0: &cpi->common.fc.tx_probs); michael@0: vp9_write(w, tx_size != TX_4X4, tx_probs[0]); michael@0: if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { michael@0: vp9_write(w, tx_size != TX_8X8, tx_probs[1]); michael@0: if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) michael@0: vp9_write(w, tx_size != TX_16X16, tx_probs[2]); michael@0: } michael@0: } michael@0: michael@0: static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, michael@0: vp9_writer *w) { michael@0: const MACROBLOCKD *const xd = &cpi->mb.e_mbd; michael@0: if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { michael@0: return 1; michael@0: } else { michael@0: const int skip_coeff = m->mbmi.skip_coeff; michael@0: vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); michael@0: return skip_coeff; michael@0: } michael@0: } michael@0: michael@0: void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { michael@0: VP9_COMMON *cm = &cpi->common; michael@0: int k; michael@0: michael@0: for (k = 0; k < MBSKIP_CONTEXTS; ++k) michael@0: vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]); michael@0: } michael@0: michael@0: static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { michael@0: write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); michael@0: } michael@0: michael@0: static void update_switchable_interp_probs(VP9_COMP *cpi, vp9_writer *w) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: unsigned int branch_ct[SWITCHABLE_FILTERS - 1][2]; michael@0: int i, j; michael@0: for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) { michael@0: vp9_tree_probs_from_distribution(vp9_switchable_interp_tree, branch_ct, michael@0: cm->counts.switchable_interp[j]); michael@0: michael@0: for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) michael@0: vp9_cond_prob_diff_update(w, &cm->fc.switchable_interp_prob[j][i], michael@0: branch_ct[i]); michael@0: } michael@0: michael@0: #ifdef MODE_STATS michael@0: if (!cpi->dummy_packing) michael@0: update_switchable_interp_stats(cm); michael@0: #endif michael@0: } michael@0: michael@0: static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer *w) { michael@0: int i, j; michael@0: michael@0: for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { michael@0: unsigned int branch_ct[INTER_MODES - 1][2]; michael@0: vp9_tree_probs_from_distribution(vp9_inter_mode_tree, branch_ct, michael@0: cm->counts.inter_mode[i]); michael@0: michael@0: for (j = 0; j < INTER_MODES - 1; ++j) michael@0: vp9_cond_prob_diff_update(w, &cm->fc.inter_mode_probs[i][j], michael@0: branch_ct[j]); michael@0: } michael@0: } michael@0: michael@0: static void pack_mb_tokens(vp9_writer* const w, michael@0: TOKENEXTRA **tp, michael@0: const TOKENEXTRA *const stop) { michael@0: TOKENEXTRA *p = *tp; michael@0: michael@0: while (p < stop && p->token != EOSB_TOKEN) { michael@0: const int t = p->token; michael@0: const struct vp9_token *const a = &vp9_coef_encodings[t]; michael@0: const vp9_extra_bit *const b = &vp9_extra_bits[t]; michael@0: int i = 0; michael@0: const vp9_prob *pp; michael@0: int v = a->value; michael@0: int n = a->len; michael@0: vp9_prob probs[ENTROPY_NODES]; michael@0: michael@0: if (t >= TWO_TOKEN) { michael@0: vp9_model_to_full_probs(p->context_tree, probs); michael@0: pp = probs; michael@0: } else { michael@0: pp = p->context_tree; michael@0: } michael@0: assert(pp != 0); michael@0: michael@0: /* skip one or two nodes */ michael@0: if (p->skip_eob_node) { michael@0: n -= p->skip_eob_node; michael@0: i = 2 * p->skip_eob_node; michael@0: } michael@0: michael@0: do { michael@0: const int bb = (v >> --n) & 1; michael@0: vp9_write(w, bb, pp[i >> 1]); michael@0: i = vp9_coef_tree[i + bb]; michael@0: } while (n); michael@0: michael@0: if (b->base_val) { michael@0: const int e = p->extra, l = b->len; michael@0: michael@0: if (l) { michael@0: const unsigned char *pb = b->prob; michael@0: int v = e >> 1; michael@0: int n = l; /* number of bits in v, assumed nonzero */ michael@0: int i = 0; michael@0: michael@0: do { michael@0: const int bb = (v >> --n) & 1; michael@0: vp9_write(w, bb, pb[i >> 1]); michael@0: i = b->tree[i + bb]; michael@0: } while (n); michael@0: } michael@0: michael@0: vp9_write_bit(w, e & 1); michael@0: } michael@0: ++p; michael@0: } michael@0: michael@0: *tp = p + (p->token == EOSB_TOKEN); michael@0: } michael@0: michael@0: static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode, michael@0: const vp9_prob *p) { michael@0: assert(is_inter_mode(mode)); michael@0: write_token(w, vp9_inter_mode_tree, p, michael@0: &vp9_inter_mode_encodings[INTER_OFFSET(mode)]); michael@0: } michael@0: michael@0: michael@0: static void write_segment_id(vp9_writer *w, const struct segmentation *seg, michael@0: int segment_id) { michael@0: if (seg->enabled && seg->update_map) michael@0: treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); michael@0: } michael@0: michael@0: // This function encodes the reference frame michael@0: static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: MACROBLOCK *const x = &cpi->mb; michael@0: MACROBLOCKD *const xd = &x->e_mbd; michael@0: MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi; michael@0: const int segment_id = mi->segment_id; michael@0: int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, michael@0: SEG_LVL_REF_FRAME); michael@0: // If segment level coding of this signal is disabled... michael@0: // or the segment allows multiple reference frame options michael@0: if (!seg_ref_active) { michael@0: // does the feature use compound prediction or not michael@0: // (if not specified at the frame/segment level) michael@0: if (cm->comp_pred_mode == HYBRID_PREDICTION) { michael@0: vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, michael@0: vp9_get_pred_prob_comp_inter_inter(cm, xd)); michael@0: } else { michael@0: assert((mi->ref_frame[1] <= INTRA_FRAME) == michael@0: (cm->comp_pred_mode == SINGLE_PREDICTION_ONLY)); michael@0: } michael@0: michael@0: if (mi->ref_frame[1] > INTRA_FRAME) { michael@0: vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME, michael@0: vp9_get_pred_prob_comp_ref_p(cm, xd)); michael@0: } else { michael@0: vp9_write(bc, mi->ref_frame[0] != LAST_FRAME, michael@0: vp9_get_pred_prob_single_ref_p1(cm, xd)); michael@0: if (mi->ref_frame[0] != LAST_FRAME) michael@0: vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, michael@0: vp9_get_pred_prob_single_ref_p2(cm, xd)); michael@0: } michael@0: } else { michael@0: assert(mi->ref_frame[1] <= INTRA_FRAME); michael@0: assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) == michael@0: mi->ref_frame[0]); michael@0: } michael@0: michael@0: // If using the prediction model we have nothing further to do because michael@0: // the reference frame is fully coded by the segment. michael@0: } michael@0: michael@0: static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: const nmv_context *nmvc = &cm->fc.nmvc; michael@0: MACROBLOCK *const x = &cpi->mb; michael@0: MACROBLOCKD *const xd = &x->e_mbd; michael@0: struct segmentation *seg = &cm->seg; michael@0: MB_MODE_INFO *const mi = &m->mbmi; michael@0: const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; michael@0: const MB_PREDICTION_MODE mode = mi->mode; michael@0: const int segment_id = mi->segment_id; michael@0: int skip_coeff; michael@0: const BLOCK_SIZE bsize = mi->sb_type; michael@0: const int allow_hp = cm->allow_high_precision_mv; michael@0: michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 9; michael@0: #endif michael@0: michael@0: if (seg->update_map) { michael@0: if (seg->temporal_update) { michael@0: const int pred_flag = mi->seg_id_predicted; michael@0: vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); michael@0: vp9_write(bc, pred_flag, pred_prob); michael@0: if (!pred_flag) michael@0: write_segment_id(bc, seg, segment_id); michael@0: } else { michael@0: write_segment_id(bc, seg, segment_id); michael@0: } michael@0: } michael@0: michael@0: skip_coeff = write_skip_coeff(cpi, segment_id, m, bc); michael@0: michael@0: if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) michael@0: vp9_write(bc, rf != INTRA_FRAME, michael@0: vp9_get_pred_prob_intra_inter(cm, xd)); michael@0: michael@0: if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && michael@0: !(rf != INTRA_FRAME && michael@0: (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { michael@0: write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc); michael@0: } michael@0: michael@0: if (rf == INTRA_FRAME) { michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 6; michael@0: #endif michael@0: michael@0: if (bsize >= BLOCK_8X8) { michael@0: write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); michael@0: } else { michael@0: int idx, idy; michael@0: const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; michael@0: const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; michael@0: for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { michael@0: for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { michael@0: const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode; michael@0: write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]); michael@0: } michael@0: } michael@0: } michael@0: write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]); michael@0: } else { michael@0: vp9_prob *mv_ref_p; michael@0: encode_ref_frame(cpi, bc); michael@0: mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]]; michael@0: michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 3; michael@0: #endif michael@0: michael@0: // If segment skip is not enabled code the mode. michael@0: if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { michael@0: if (bsize >= BLOCK_8X8) { michael@0: write_sb_mv_ref(bc, mode, mv_ref_p); michael@0: ++cm->counts.inter_mode[mi->mode_context[rf]] michael@0: [INTER_OFFSET(mode)]; michael@0: } michael@0: } michael@0: michael@0: if (cm->mcomp_filter_type == SWITCHABLE) { michael@0: const int ctx = vp9_get_pred_context_switchable_interp(xd); michael@0: write_token(bc, vp9_switchable_interp_tree, michael@0: cm->fc.switchable_interp_prob[ctx], michael@0: &vp9_switchable_interp_encodings[mi->interp_filter]); michael@0: } else { michael@0: assert(mi->interp_filter == cm->mcomp_filter_type); michael@0: } michael@0: michael@0: if (bsize < BLOCK_8X8) { michael@0: const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; michael@0: const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; michael@0: int idx, idy; michael@0: for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { michael@0: for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { michael@0: const int j = idy * 2 + idx; michael@0: const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode; michael@0: write_sb_mv_ref(bc, blockmode, mv_ref_p); michael@0: ++cm->counts.inter_mode[mi->mode_context[rf]] michael@0: [INTER_OFFSET(blockmode)]; michael@0: michael@0: if (blockmode == NEWMV) { michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 11; michael@0: #endif michael@0: vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv, michael@0: &mi->best_mv[0].as_mv, nmvc, allow_hp); michael@0: michael@0: if (has_second_ref(mi)) michael@0: vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv, michael@0: &mi->best_mv[1].as_mv, nmvc, allow_hp); michael@0: } michael@0: } michael@0: } michael@0: } else if (mode == NEWMV) { michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 5; michael@0: #endif michael@0: vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, michael@0: &mi->best_mv[0].as_mv, nmvc, allow_hp); michael@0: michael@0: if (has_second_ref(mi)) michael@0: vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, michael@0: &mi->best_mv[1].as_mv, nmvc, allow_hp); michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, michael@0: vp9_writer *bc) { michael@0: const VP9_COMMON *const cm = &cpi->common; michael@0: const MACROBLOCKD *const xd = &cpi->mb.e_mbd; michael@0: const struct segmentation *const seg = &cm->seg; michael@0: MODE_INFO *m = mi_8x8[0]; michael@0: const int ym = m->mbmi.mode; michael@0: const int segment_id = m->mbmi.segment_id; michael@0: MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; michael@0: MODE_INFO *left_mi = xd->left_available ? mi_8x8[-1] : NULL; michael@0: michael@0: if (seg->update_map) michael@0: write_segment_id(bc, seg, m->mbmi.segment_id); michael@0: michael@0: write_skip_coeff(cpi, segment_id, m, bc); michael@0: michael@0: if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) michael@0: write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc); michael@0: michael@0: if (m->mbmi.sb_type >= BLOCK_8X8) { michael@0: const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); michael@0: const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0); michael@0: write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); michael@0: } else { michael@0: int idx, idy; michael@0: const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; michael@0: const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; michael@0: for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { michael@0: for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { michael@0: int i = idy * 2 + idx; michael@0: const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i); michael@0: const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, i); michael@0: const int bm = m->bmi[i].as_mode; michael@0: #ifdef ENTROPY_STATS michael@0: ++intra_mode_stats[A][L][bm]; michael@0: #endif michael@0: write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); michael@0: } michael@0: } michael@0: } michael@0: michael@0: write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); michael@0: } michael@0: michael@0: static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, michael@0: vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, michael@0: int mi_row, int mi_col) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: MACROBLOCKD *const xd = &cpi->mb.e_mbd; michael@0: MODE_INFO *m; michael@0: michael@0: xd->mi_8x8 = cm->mi_grid_visible + (mi_row * cm->mode_info_stride + mi_col); michael@0: m = xd->mi_8x8[0]; michael@0: michael@0: set_mi_row_col(xd, tile, michael@0: mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], michael@0: mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], michael@0: cm->mi_rows, cm->mi_cols); michael@0: if (frame_is_intra_only(cm)) { michael@0: write_mb_modes_kf(cpi, xd->mi_8x8, w); michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 8; michael@0: #endif michael@0: } else { michael@0: pack_inter_mode_mvs(cpi, m, w); michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 1; michael@0: #endif michael@0: } michael@0: michael@0: assert(*tok < tok_end); michael@0: pack_mb_tokens(w, tok, tok_end); michael@0: } michael@0: michael@0: static void write_partition(VP9_COMP *cpi, int hbs, int mi_row, int mi_col, michael@0: PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: const int ctx = partition_plane_context(cpi->above_seg_context, michael@0: cpi->left_seg_context, michael@0: mi_row, mi_col, bsize); michael@0: const vp9_prob *const probs = get_partition_probs(cm, ctx); michael@0: const int has_rows = (mi_row + hbs) < cm->mi_rows; michael@0: const int has_cols = (mi_col + hbs) < cm->mi_cols; michael@0: michael@0: if (has_rows && has_cols) { michael@0: write_token(w, vp9_partition_tree, probs, &vp9_partition_encodings[p]); michael@0: } else if (!has_rows && has_cols) { michael@0: assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); michael@0: vp9_write(w, p == PARTITION_SPLIT, probs[1]); michael@0: } else if (has_rows && !has_cols) { michael@0: assert(p == PARTITION_SPLIT || p == PARTITION_VERT); michael@0: vp9_write(w, p == PARTITION_SPLIT, probs[2]); michael@0: } else { michael@0: assert(p == PARTITION_SPLIT); michael@0: } michael@0: } michael@0: michael@0: static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile, michael@0: vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, michael@0: int mi_row, int mi_col, BLOCK_SIZE bsize) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: const int bsl = b_width_log2(bsize); michael@0: const int bs = (1 << bsl) / 4; michael@0: PARTITION_TYPE partition; michael@0: BLOCK_SIZE subsize; michael@0: MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mode_info_stride + mi_col]; michael@0: michael@0: if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) michael@0: return; michael@0: michael@0: partition = partition_lookup[bsl][m->mbmi.sb_type]; michael@0: write_partition(cpi, bs, mi_row, mi_col, partition, bsize, w); michael@0: subsize = get_subsize(bsize, partition); michael@0: if (subsize < BLOCK_8X8) { michael@0: write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); michael@0: } else { michael@0: switch (partition) { michael@0: case PARTITION_NONE: michael@0: write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); michael@0: break; michael@0: case PARTITION_HORZ: michael@0: write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); michael@0: if (mi_row + bs < cm->mi_rows) michael@0: write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col); michael@0: break; michael@0: case PARTITION_VERT: michael@0: write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); michael@0: if (mi_col + bs < cm->mi_cols) michael@0: write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs); michael@0: break; michael@0: case PARTITION_SPLIT: michael@0: write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize); michael@0: write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs, michael@0: subsize); michael@0: write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col, michael@0: subsize); michael@0: write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, michael@0: subsize); michael@0: break; michael@0: default: michael@0: assert(0); michael@0: } michael@0: } michael@0: michael@0: // update partition context michael@0: if (bsize >= BLOCK_8X8 && michael@0: (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) michael@0: update_partition_context(cpi->above_seg_context, cpi->left_seg_context, michael@0: mi_row, mi_col, subsize, bsize); michael@0: } michael@0: michael@0: static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, michael@0: vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { michael@0: int mi_row, mi_col; michael@0: michael@0: for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; michael@0: mi_row += MI_BLOCK_SIZE) { michael@0: vp9_zero(cpi->left_seg_context); michael@0: for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; michael@0: mi_col += MI_BLOCK_SIZE) michael@0: write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64); michael@0: } michael@0: } michael@0: michael@0: static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { michael@0: vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; michael@0: vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; michael@0: unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = michael@0: cpi->common.counts.eob_branch[tx_size]; michael@0: vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; michael@0: int i, j, k, l, m; michael@0: michael@0: for (i = 0; i < BLOCK_TYPES; ++i) { michael@0: for (j = 0; j < REF_TYPES; ++j) { michael@0: for (k = 0; k < COEF_BANDS; ++k) { michael@0: for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { michael@0: if (l >= 3 && k == 0) michael@0: continue; michael@0: vp9_tree_probs_from_distribution(vp9_coef_tree, michael@0: coef_branch_ct[i][j][k][l], michael@0: coef_counts[i][j][k][l]); michael@0: coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - michael@0: coef_branch_ct[i][j][k][l][0][0]; michael@0: for (m = 0; m < UNCONSTRAINED_NODES; ++m) michael@0: coef_probs[i][j][k][l][m] = get_binary_prob( michael@0: coef_branch_ct[i][j][k][l][m][0], michael@0: coef_branch_ct[i][j][k][l][m][1]); michael@0: #ifdef ENTROPY_STATS michael@0: if (!cpi->dummy_packing) { michael@0: int t; michael@0: for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) michael@0: context_counters[tx_size][i][j][k][l][t] += michael@0: coef_counts[i][j][k][l][t]; michael@0: context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] += michael@0: eob_branch_ct[i][j][k][l]; michael@0: } michael@0: #endif michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void build_coeff_contexts(VP9_COMP *cpi) { michael@0: TX_SIZE t; michael@0: for (t = TX_4X4; t <= TX_32X32; t++) michael@0: build_tree_distribution(cpi, t); michael@0: } michael@0: michael@0: static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, michael@0: TX_SIZE tx_size) { michael@0: vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; michael@0: vp9_coeff_probs_model *old_frame_coef_probs = michael@0: cpi->common.fc.coef_probs[tx_size]; michael@0: vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size]; michael@0: const vp9_prob upd = DIFF_UPDATE_PROB; michael@0: const int entropy_nodes_update = UNCONSTRAINED_NODES; michael@0: int i, j, k, l, t; michael@0: switch (cpi->sf.use_fast_coef_updates) { michael@0: case 0: { michael@0: /* dry run to see if there is any udpate at all needed */ michael@0: int savings = 0; michael@0: int update[2] = {0, 0}; michael@0: for (i = 0; i < BLOCK_TYPES; ++i) { michael@0: for (j = 0; j < REF_TYPES; ++j) { michael@0: for (k = 0; k < COEF_BANDS; ++k) { michael@0: for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { michael@0: for (t = 0; t < entropy_nodes_update; ++t) { michael@0: vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; michael@0: const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; michael@0: int s; michael@0: int u = 0; michael@0: michael@0: if (l >= 3 && k == 0) michael@0: continue; michael@0: if (t == PIVOT_NODE) michael@0: s = vp9_prob_diff_update_savings_search_model( michael@0: frame_branch_ct[i][j][k][l][0], michael@0: old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); michael@0: else michael@0: s = vp9_prob_diff_update_savings_search( michael@0: frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); michael@0: if (s > 0 && newp != oldp) michael@0: u = 1; michael@0: if (u) michael@0: savings += s - (int)(vp9_cost_zero(upd)); michael@0: else michael@0: savings -= (int)(vp9_cost_zero(upd)); michael@0: update[u]++; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: // printf("Update %d %d, savings %d\n", update[0], update[1], savings); michael@0: /* Is coef updated at all */ michael@0: if (update[1] == 0 || savings < 0) { michael@0: vp9_write_bit(bc, 0); michael@0: return; michael@0: } michael@0: vp9_write_bit(bc, 1); michael@0: for (i = 0; i < BLOCK_TYPES; ++i) { michael@0: for (j = 0; j < REF_TYPES; ++j) { michael@0: for (k = 0; k < COEF_BANDS; ++k) { michael@0: for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { michael@0: // calc probs and branch cts for this frame only michael@0: for (t = 0; t < entropy_nodes_update; ++t) { michael@0: vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; michael@0: vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; michael@0: const vp9_prob upd = DIFF_UPDATE_PROB; michael@0: int s; michael@0: int u = 0; michael@0: if (l >= 3 && k == 0) michael@0: continue; michael@0: if (t == PIVOT_NODE) michael@0: s = vp9_prob_diff_update_savings_search_model( michael@0: frame_branch_ct[i][j][k][l][0], michael@0: old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); michael@0: else michael@0: s = vp9_prob_diff_update_savings_search( michael@0: frame_branch_ct[i][j][k][l][t], michael@0: *oldp, &newp, upd); michael@0: if (s > 0 && newp != *oldp) michael@0: u = 1; michael@0: vp9_write(bc, u, upd); michael@0: #ifdef ENTROPY_STATS michael@0: if (!cpi->dummy_packing) michael@0: ++tree_update_hist[tx_size][i][j][k][l][t][u]; michael@0: #endif michael@0: if (u) { michael@0: /* send/use new probability */ michael@0: vp9_write_prob_diff_update(bc, newp, *oldp); michael@0: *oldp = newp; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: return; michael@0: } michael@0: michael@0: case 1: michael@0: case 2: { michael@0: const int prev_coef_contexts_to_update = michael@0: (cpi->sf.use_fast_coef_updates == 2 ? michael@0: PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS); michael@0: const int coef_band_to_update = michael@0: (cpi->sf.use_fast_coef_updates == 2 ? michael@0: COEF_BANDS >> 1 : COEF_BANDS); michael@0: int updates = 0; michael@0: int noupdates_before_first = 0; michael@0: for (i = 0; i < BLOCK_TYPES; ++i) { michael@0: for (j = 0; j < REF_TYPES; ++j) { michael@0: for (k = 0; k < COEF_BANDS; ++k) { michael@0: for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { michael@0: // calc probs and branch cts for this frame only michael@0: for (t = 0; t < entropy_nodes_update; ++t) { michael@0: vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; michael@0: vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; michael@0: int s; michael@0: int u = 0; michael@0: if (l >= 3 && k == 0) michael@0: continue; michael@0: if (l >= prev_coef_contexts_to_update || michael@0: k >= coef_band_to_update) { michael@0: u = 0; michael@0: } else { michael@0: if (t == PIVOT_NODE) michael@0: s = vp9_prob_diff_update_savings_search_model( michael@0: frame_branch_ct[i][j][k][l][0], michael@0: old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); michael@0: else michael@0: s = vp9_prob_diff_update_savings_search( michael@0: frame_branch_ct[i][j][k][l][t], michael@0: *oldp, &newp, upd); michael@0: if (s > 0 && newp != *oldp) michael@0: u = 1; michael@0: } michael@0: updates += u; michael@0: if (u == 0 && updates == 0) { michael@0: noupdates_before_first++; michael@0: #ifdef ENTROPY_STATS michael@0: if (!cpi->dummy_packing) michael@0: ++tree_update_hist[tx_size][i][j][k][l][t][u]; michael@0: #endif michael@0: continue; michael@0: } michael@0: if (u == 1 && updates == 1) { michael@0: int v; michael@0: // first update michael@0: vp9_write_bit(bc, 1); michael@0: for (v = 0; v < noupdates_before_first; ++v) michael@0: vp9_write(bc, 0, upd); michael@0: } michael@0: vp9_write(bc, u, upd); michael@0: #ifdef ENTROPY_STATS michael@0: if (!cpi->dummy_packing) michael@0: ++tree_update_hist[tx_size][i][j][k][l][t][u]; michael@0: #endif michael@0: if (u) { michael@0: /* send/use new probability */ michael@0: vp9_write_prob_diff_update(bc, newp, *oldp); michael@0: *oldp = newp; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: if (updates == 0) { michael@0: vp9_write_bit(bc, 0); // no updates michael@0: } michael@0: return; michael@0: } michael@0: michael@0: default: michael@0: assert(0); michael@0: } michael@0: } michael@0: michael@0: static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) { michael@0: const TX_MODE tx_mode = cpi->common.tx_mode; michael@0: michael@0: vp9_clear_system_state(); michael@0: michael@0: // Build the cofficient contexts based on counts collected in encode loop michael@0: build_coeff_contexts(cpi); michael@0: michael@0: update_coef_probs_common(bc, cpi, TX_4X4); michael@0: michael@0: // do not do this if not even allowed michael@0: if (tx_mode > ONLY_4X4) michael@0: update_coef_probs_common(bc, cpi, TX_8X8); michael@0: michael@0: if (tx_mode > ALLOW_8X8) michael@0: update_coef_probs_common(bc, cpi, TX_16X16); michael@0: michael@0: if (tx_mode > ALLOW_16X16) michael@0: update_coef_probs_common(bc, cpi, TX_32X32); michael@0: } michael@0: michael@0: static void encode_loopfilter(struct loopfilter *lf, michael@0: struct vp9_write_bit_buffer *wb) { michael@0: int i; michael@0: michael@0: // Encode the loop filter level and type michael@0: vp9_wb_write_literal(wb, lf->filter_level, 6); michael@0: vp9_wb_write_literal(wb, lf->sharpness_level, 3); michael@0: michael@0: // Write out loop filter deltas applied at the MB level based on mode or michael@0: // ref frame (if they are enabled). michael@0: vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled); michael@0: michael@0: if (lf->mode_ref_delta_enabled) { michael@0: // Do the deltas need to be updated michael@0: vp9_wb_write_bit(wb, lf->mode_ref_delta_update); michael@0: if (lf->mode_ref_delta_update) { michael@0: // Send update michael@0: for (i = 0; i < MAX_REF_LF_DELTAS; i++) { michael@0: const int delta = lf->ref_deltas[i]; michael@0: michael@0: // Frame level data michael@0: if (delta != lf->last_ref_deltas[i]) { michael@0: lf->last_ref_deltas[i] = delta; michael@0: vp9_wb_write_bit(wb, 1); michael@0: michael@0: assert(delta != 0); michael@0: vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); michael@0: vp9_wb_write_bit(wb, delta < 0); michael@0: } else { michael@0: vp9_wb_write_bit(wb, 0); michael@0: } michael@0: } michael@0: michael@0: // Send update michael@0: for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { michael@0: const int delta = lf->mode_deltas[i]; michael@0: if (delta != lf->last_mode_deltas[i]) { michael@0: lf->last_mode_deltas[i] = delta; michael@0: vp9_wb_write_bit(wb, 1); michael@0: michael@0: assert(delta != 0); michael@0: vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); michael@0: vp9_wb_write_bit(wb, delta < 0); michael@0: } else { michael@0: vp9_wb_write_bit(wb, 0); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) { michael@0: if (delta_q != 0) { michael@0: vp9_wb_write_bit(wb, 1); michael@0: vp9_wb_write_literal(wb, abs(delta_q), 4); michael@0: vp9_wb_write_bit(wb, delta_q < 0); michael@0: } else { michael@0: vp9_wb_write_bit(wb, 0); michael@0: } michael@0: } michael@0: michael@0: static void encode_quantization(VP9_COMMON *cm, michael@0: struct vp9_write_bit_buffer *wb) { michael@0: vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); michael@0: write_delta_q(wb, cm->y_dc_delta_q); michael@0: write_delta_q(wb, cm->uv_dc_delta_q); michael@0: write_delta_q(wb, cm->uv_ac_delta_q); michael@0: } michael@0: michael@0: michael@0: static void encode_segmentation(VP9_COMP *cpi, michael@0: struct vp9_write_bit_buffer *wb) { michael@0: int i, j; michael@0: michael@0: struct segmentation *seg = &cpi->common.seg; michael@0: michael@0: vp9_wb_write_bit(wb, seg->enabled); michael@0: if (!seg->enabled) michael@0: return; michael@0: michael@0: // Segmentation map michael@0: vp9_wb_write_bit(wb, seg->update_map); michael@0: if (seg->update_map) { michael@0: // Select the coding strategy (temporal or spatial) michael@0: vp9_choose_segmap_coding_method(cpi); michael@0: // Write out probabilities used to decode unpredicted macro-block segments michael@0: for (i = 0; i < SEG_TREE_PROBS; i++) { michael@0: const int prob = seg->tree_probs[i]; michael@0: const int update = prob != MAX_PROB; michael@0: vp9_wb_write_bit(wb, update); michael@0: if (update) michael@0: vp9_wb_write_literal(wb, prob, 8); michael@0: } michael@0: michael@0: // Write out the chosen coding method. michael@0: vp9_wb_write_bit(wb, seg->temporal_update); michael@0: if (seg->temporal_update) { michael@0: for (i = 0; i < PREDICTION_PROBS; i++) { michael@0: const int prob = seg->pred_probs[i]; michael@0: const int update = prob != MAX_PROB; michael@0: vp9_wb_write_bit(wb, update); michael@0: if (update) michael@0: vp9_wb_write_literal(wb, prob, 8); michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Segmentation data michael@0: vp9_wb_write_bit(wb, seg->update_data); michael@0: if (seg->update_data) { michael@0: vp9_wb_write_bit(wb, seg->abs_delta); michael@0: michael@0: for (i = 0; i < MAX_SEGMENTS; i++) { michael@0: for (j = 0; j < SEG_LVL_MAX; j++) { michael@0: const int active = vp9_segfeature_active(seg, i, j); michael@0: vp9_wb_write_bit(wb, active); michael@0: if (active) { michael@0: const int data = vp9_get_segdata(seg, i, j); michael@0: const int data_max = vp9_seg_feature_data_max(j); michael@0: michael@0: if (vp9_is_segfeature_signed(j)) { michael@0: vp9_encode_unsigned_max(wb, abs(data), data_max); michael@0: vp9_wb_write_bit(wb, data < 0); michael@0: } else { michael@0: vp9_encode_unsigned_max(wb, data, data_max); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: michael@0: // Mode michael@0: vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); michael@0: if (cm->tx_mode >= ALLOW_32X32) michael@0: vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT); michael@0: michael@0: // Probabilities michael@0: if (cm->tx_mode == TX_MODE_SELECT) { michael@0: int i, j; michael@0: unsigned int ct_8x8p[TX_SIZES - 3][2]; michael@0: unsigned int ct_16x16p[TX_SIZES - 2][2]; michael@0: unsigned int ct_32x32p[TX_SIZES - 1][2]; michael@0: michael@0: michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p); michael@0: for (j = 0; j < TX_SIZES - 3; j++) michael@0: vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]); michael@0: } michael@0: michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p); michael@0: for (j = 0; j < TX_SIZES - 2; j++) michael@0: vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], michael@0: ct_16x16p[j]); michael@0: } michael@0: michael@0: for (i = 0; i < TX_SIZE_CONTEXTS; i++) { michael@0: tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); michael@0: for (j = 0; j < TX_SIZES - 1; j++) michael@0: vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], michael@0: ct_32x32p[j]); michael@0: } michael@0: #ifdef MODE_STATS michael@0: if (!cpi->dummy_packing) michael@0: update_tx_count_stats(cm); michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: static void write_interp_filter_type(INTERPOLATION_TYPE type, michael@0: struct vp9_write_bit_buffer *wb) { michael@0: const int type_to_literal[] = { 1, 0, 2, 3 }; michael@0: michael@0: vp9_wb_write_bit(wb, type == SWITCHABLE); michael@0: if (type != SWITCHABLE) michael@0: vp9_wb_write_literal(wb, type_to_literal[type], 2); michael@0: } michael@0: michael@0: static void fix_mcomp_filter_type(VP9_COMP *cpi) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: michael@0: if (cm->mcomp_filter_type == SWITCHABLE) { michael@0: // Check to see if only one of the filters is actually used michael@0: int count[SWITCHABLE_FILTERS]; michael@0: int i, j, c = 0; michael@0: for (i = 0; i < SWITCHABLE_FILTERS; ++i) { michael@0: count[i] = 0; michael@0: for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) michael@0: count[i] += cm->counts.switchable_interp[j][i]; michael@0: c += (count[i] > 0); michael@0: } michael@0: if (c == 1) { michael@0: // Only one filter is used. So set the filter at frame level michael@0: for (i = 0; i < SWITCHABLE_FILTERS; ++i) { michael@0: if (count[i]) { michael@0: cm->mcomp_filter_type = i; michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { michael@0: int min_log2_tile_cols, max_log2_tile_cols, ones; michael@0: vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); michael@0: michael@0: // columns michael@0: ones = cm->log2_tile_cols - min_log2_tile_cols; michael@0: while (ones--) michael@0: vp9_wb_write_bit(wb, 1); michael@0: michael@0: if (cm->log2_tile_cols < max_log2_tile_cols) michael@0: vp9_wb_write_bit(wb, 0); michael@0: michael@0: // rows michael@0: vp9_wb_write_bit(wb, cm->log2_tile_rows != 0); michael@0: if (cm->log2_tile_rows != 0) michael@0: vp9_wb_write_bit(wb, cm->log2_tile_rows != 1); michael@0: } michael@0: michael@0: static int get_refresh_mask(VP9_COMP *cpi) { michael@0: // Should the GF or ARF be updated using the transmitted frame or buffer michael@0: #if CONFIG_MULTIPLE_ARF michael@0: if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame && michael@0: !cpi->refresh_alt_ref_frame) { michael@0: #else michael@0: if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame && michael@0: !cpi->use_svc) { michael@0: #endif michael@0: // Preserve the previously existing golden frame and update the frame in michael@0: // the alt ref slot instead. This is highly specific to the use of michael@0: // alt-ref as a forward reference, and this needs to be generalized as michael@0: // other uses are implemented (like RTC/temporal scaling) michael@0: // michael@0: // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but michael@0: // that happens in vp9_onyx_if.c:update_reference_frames() so that it can michael@0: // be done outside of the recode loop. michael@0: return (cpi->refresh_last_frame << cpi->lst_fb_idx) | michael@0: (cpi->refresh_golden_frame << cpi->alt_fb_idx); michael@0: } else { michael@0: int arf_idx = cpi->alt_fb_idx; michael@0: #if CONFIG_MULTIPLE_ARF michael@0: // Determine which ARF buffer to use to encode this ARF frame. michael@0: if (cpi->multi_arf_enabled) { michael@0: int sn = cpi->sequence_number; michael@0: arf_idx = (cpi->frame_coding_order[sn] < 0) ? michael@0: cpi->arf_buffer_idx[sn + 1] : michael@0: cpi->arf_buffer_idx[sn]; michael@0: } michael@0: #endif michael@0: return (cpi->refresh_last_frame << cpi->lst_fb_idx) | michael@0: (cpi->refresh_golden_frame << cpi->gld_fb_idx) | michael@0: (cpi->refresh_alt_ref_frame << arf_idx); michael@0: } michael@0: } michael@0: michael@0: static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: vp9_writer residual_bc; michael@0: michael@0: int tile_row, tile_col; michael@0: TOKENEXTRA *tok[4][1 << 6], *tok_end; michael@0: size_t total_size = 0; michael@0: const int tile_cols = 1 << cm->log2_tile_cols; michael@0: const int tile_rows = 1 << cm->log2_tile_rows; michael@0: michael@0: vpx_memset(cpi->above_seg_context, 0, sizeof(*cpi->above_seg_context) * michael@0: mi_cols_aligned_to_sb(cm->mi_cols)); michael@0: michael@0: tok[0][0] = cpi->tok; michael@0: for (tile_row = 0; tile_row < tile_rows; tile_row++) { michael@0: if (tile_row) michael@0: tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + michael@0: cpi->tok_count[tile_row - 1][tile_cols - 1]; michael@0: michael@0: for (tile_col = 1; tile_col < tile_cols; tile_col++) michael@0: tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + michael@0: cpi->tok_count[tile_row][tile_col - 1]; michael@0: } michael@0: michael@0: for (tile_row = 0; tile_row < tile_rows; tile_row++) { michael@0: for (tile_col = 0; tile_col < tile_cols; tile_col++) { michael@0: TileInfo tile; michael@0: michael@0: vp9_tile_init(&tile, cm, tile_row, tile_col); michael@0: tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; michael@0: michael@0: if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) michael@0: vp9_start_encode(&residual_bc, data_ptr + total_size + 4); michael@0: else michael@0: vp9_start_encode(&residual_bc, data_ptr + total_size); michael@0: michael@0: write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end); michael@0: assert(tok[tile_row][tile_col] == tok_end); michael@0: vp9_stop_encode(&residual_bc); michael@0: if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { michael@0: // size of this tile michael@0: write_be32(data_ptr + total_size, residual_bc.pos); michael@0: total_size += 4; michael@0: } michael@0: michael@0: total_size += residual_bc.pos; michael@0: } michael@0: } michael@0: michael@0: return total_size; michael@0: } michael@0: michael@0: static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: michael@0: const int scaling_active = cm->width != cm->display_width || michael@0: cm->height != cm->display_height; michael@0: vp9_wb_write_bit(wb, scaling_active); michael@0: if (scaling_active) { michael@0: vp9_wb_write_literal(wb, cm->display_width - 1, 16); michael@0: vp9_wb_write_literal(wb, cm->display_height - 1, 16); michael@0: } michael@0: } michael@0: michael@0: static void write_frame_size(VP9_COMP *cpi, michael@0: struct vp9_write_bit_buffer *wb) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: vp9_wb_write_literal(wb, cm->width - 1, 16); michael@0: vp9_wb_write_literal(wb, cm->height - 1, 16); michael@0: michael@0: write_display_size(cpi, wb); michael@0: } michael@0: michael@0: static void write_frame_size_with_refs(VP9_COMP *cpi, michael@0: struct vp9_write_bit_buffer *wb) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, michael@0: cpi->alt_fb_idx}; michael@0: int i, found = 0; michael@0: michael@0: for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { michael@0: YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]]; michael@0: found = cm->width == cfg->y_crop_width && michael@0: cm->height == cfg->y_crop_height; michael@0: michael@0: // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it michael@0: // in a better way. michael@0: if (cpi->use_svc) { michael@0: found = 0; michael@0: } michael@0: vp9_wb_write_bit(wb, found); michael@0: if (found) { michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (!found) { michael@0: vp9_wb_write_literal(wb, cm->width - 1, 16); michael@0: vp9_wb_write_literal(wb, cm->height - 1, 16); michael@0: } michael@0: michael@0: write_display_size(cpi, wb); michael@0: } michael@0: michael@0: static void write_sync_code(struct vp9_write_bit_buffer *wb) { michael@0: vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); michael@0: vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); michael@0: vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); michael@0: } michael@0: michael@0: static void write_uncompressed_header(VP9_COMP *cpi, michael@0: struct vp9_write_bit_buffer *wb) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: michael@0: vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); michael@0: michael@0: // bitstream version. michael@0: // 00 - profile 0. 4:2:0 only michael@0: // 10 - profile 1. adds 4:4:4, 4:2:2, alpha michael@0: vp9_wb_write_bit(wb, cm->version); michael@0: vp9_wb_write_bit(wb, 0); michael@0: michael@0: vp9_wb_write_bit(wb, 0); michael@0: vp9_wb_write_bit(wb, cm->frame_type); michael@0: vp9_wb_write_bit(wb, cm->show_frame); michael@0: vp9_wb_write_bit(wb, cm->error_resilient_mode); michael@0: michael@0: if (cm->frame_type == KEY_FRAME) { michael@0: const COLOR_SPACE cs = UNKNOWN; michael@0: write_sync_code(wb); michael@0: vp9_wb_write_literal(wb, cs, 3); michael@0: if (cs != SRGB) { michael@0: vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] michael@0: if (cm->version == 1) { michael@0: vp9_wb_write_bit(wb, cm->subsampling_x); michael@0: vp9_wb_write_bit(wb, cm->subsampling_y); michael@0: vp9_wb_write_bit(wb, 0); // has extra plane michael@0: } michael@0: } else { michael@0: assert(cm->version == 1); michael@0: vp9_wb_write_bit(wb, 0); // has extra plane michael@0: } michael@0: michael@0: write_frame_size(cpi, wb); michael@0: } else { michael@0: const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, michael@0: cpi->alt_fb_idx}; michael@0: if (!cm->show_frame) michael@0: vp9_wb_write_bit(wb, cm->intra_only); michael@0: michael@0: if (!cm->error_resilient_mode) michael@0: vp9_wb_write_literal(wb, cm->reset_frame_context, 2); michael@0: michael@0: if (cm->intra_only) { michael@0: write_sync_code(wb); michael@0: michael@0: vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); michael@0: write_frame_size(cpi, wb); michael@0: } else { michael@0: int i; michael@0: vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); michael@0: for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { michael@0: vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2); michael@0: vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]); michael@0: } michael@0: michael@0: write_frame_size_with_refs(cpi, wb); michael@0: michael@0: vp9_wb_write_bit(wb, cm->allow_high_precision_mv); michael@0: michael@0: fix_mcomp_filter_type(cpi); michael@0: write_interp_filter_type(cm->mcomp_filter_type, wb); michael@0: } michael@0: } michael@0: michael@0: if (!cm->error_resilient_mode) { michael@0: vp9_wb_write_bit(wb, cm->refresh_frame_context); michael@0: vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); michael@0: } michael@0: michael@0: vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2); michael@0: michael@0: encode_loopfilter(&cm->lf, wb); michael@0: encode_quantization(cm, wb); michael@0: encode_segmentation(cpi, wb); michael@0: michael@0: write_tile_info(cm, wb); michael@0: } michael@0: michael@0: static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { michael@0: VP9_COMMON *const cm = &cpi->common; michael@0: MACROBLOCKD *const xd = &cpi->mb.e_mbd; michael@0: FRAME_CONTEXT *const fc = &cm->fc; michael@0: vp9_writer header_bc; michael@0: michael@0: vp9_start_encode(&header_bc, data); michael@0: michael@0: if (xd->lossless) michael@0: cm->tx_mode = ONLY_4X4; michael@0: else michael@0: encode_txfm_probs(cpi, &header_bc); michael@0: michael@0: update_coef_probs(cpi, &header_bc); michael@0: michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 2; michael@0: #endif michael@0: michael@0: vp9_update_skip_probs(cpi, &header_bc); michael@0: michael@0: if (!frame_is_intra_only(cm)) { michael@0: int i; michael@0: #ifdef ENTROPY_STATS michael@0: active_section = 1; michael@0: #endif michael@0: michael@0: update_inter_mode_probs(cm, &header_bc); michael@0: vp9_zero(cm->counts.inter_mode); michael@0: michael@0: if (cm->mcomp_filter_type == SWITCHABLE) michael@0: update_switchable_interp_probs(cpi, &header_bc); michael@0: michael@0: for (i = 0; i < INTRA_INTER_CONTEXTS; i++) michael@0: vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], michael@0: cpi->intra_inter_count[i]); michael@0: michael@0: if (cm->allow_comp_inter_inter) { michael@0: const int comp_pred_mode = cpi->common.comp_pred_mode; michael@0: const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; michael@0: const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; michael@0: michael@0: vp9_write_bit(&header_bc, use_compound_pred); michael@0: if (use_compound_pred) { michael@0: vp9_write_bit(&header_bc, use_hybrid_pred); michael@0: if (use_hybrid_pred) michael@0: for (i = 0; i < COMP_INTER_CONTEXTS; i++) michael@0: vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], michael@0: cpi->comp_inter_count[i]); michael@0: } michael@0: } michael@0: michael@0: if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { michael@0: for (i = 0; i < REF_CONTEXTS; i++) { michael@0: vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], michael@0: cpi->single_ref_count[i][0]); michael@0: vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], michael@0: cpi->single_ref_count[i][1]); michael@0: } michael@0: } michael@0: michael@0: if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) michael@0: for (i = 0; i < REF_CONTEXTS; i++) michael@0: vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], michael@0: cpi->comp_ref_count[i]); michael@0: michael@0: update_mbintra_mode_probs(cpi, &header_bc); michael@0: michael@0: for (i = 0; i < PARTITION_CONTEXTS; ++i) { michael@0: unsigned int bct[PARTITION_TYPES - 1][2]; michael@0: update_mode(&header_bc, PARTITION_TYPES, vp9_partition_tree, michael@0: fc->partition_prob[i], bct, michael@0: (unsigned int *)cpi->partition_count[i]); michael@0: } michael@0: michael@0: vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc); michael@0: } michael@0: michael@0: vp9_stop_encode(&header_bc); michael@0: assert(header_bc.pos <= 0xffff); michael@0: michael@0: return header_bc.pos; michael@0: } michael@0: michael@0: void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { michael@0: uint8_t *data = dest; michael@0: size_t first_part_size; michael@0: struct vp9_write_bit_buffer wb = {data, 0}; michael@0: struct vp9_write_bit_buffer saved_wb; michael@0: michael@0: write_uncompressed_header(cpi, &wb); michael@0: saved_wb = wb; michael@0: vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size michael@0: michael@0: data += vp9_rb_bytes_written(&wb); michael@0: michael@0: vp9_compute_update_table(); michael@0: michael@0: #ifdef ENTROPY_STATS michael@0: if (cm->frame_type == INTER_FRAME) michael@0: active_section = 0; michael@0: else michael@0: active_section = 7; michael@0: #endif michael@0: michael@0: vp9_clear_system_state(); // __asm emms; michael@0: michael@0: first_part_size = write_compressed_header(cpi, data); michael@0: data += first_part_size; michael@0: vp9_wb_write_literal(&saved_wb, first_part_size, 16); michael@0: michael@0: data += encode_tiles(cpi, data); michael@0: michael@0: *size = data - dest; michael@0: } michael@0: michael@0: #ifdef ENTROPY_STATS michael@0: static void print_tree_update_for_type(FILE *f, michael@0: vp9_coeff_stats *tree_update_hist, michael@0: int block_types, const char *header) { michael@0: int i, j, k, l, m; michael@0: michael@0: fprintf(f, "const vp9_coeff_prob %s = {\n", header); michael@0: for (i = 0; i < block_types; i++) { michael@0: fprintf(f, " { \n"); michael@0: for (j = 0; j < REF_TYPES; j++) { michael@0: fprintf(f, " { \n"); michael@0: for (k = 0; k < COEF_BANDS; k++) { michael@0: fprintf(f, " {\n"); michael@0: for (l = 0; l < PREV_COEF_CONTEXTS; l++) { michael@0: fprintf(f, " {"); michael@0: for (m = 0; m < ENTROPY_NODES; m++) { michael@0: fprintf(f, "%3d, ", michael@0: get_binary_prob(tree_update_hist[i][j][k][l][m][0], michael@0: tree_update_hist[i][j][k][l][m][1])); michael@0: } michael@0: fprintf(f, "},\n"); michael@0: } michael@0: fprintf(f, "},\n"); michael@0: } michael@0: fprintf(f, " },\n"); michael@0: } michael@0: fprintf(f, " },\n"); michael@0: } michael@0: fprintf(f, "};\n"); michael@0: } michael@0: michael@0: void print_tree_update_probs() { michael@0: FILE *f = fopen("coefupdprob.h", "w"); michael@0: fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n"); michael@0: michael@0: print_tree_update_for_type(f, tree_update_hist[TX_4X4], BLOCK_TYPES, michael@0: "vp9_coef_update_probs_4x4[BLOCK_TYPES]"); michael@0: print_tree_update_for_type(f, tree_update_hist[TX_8X8], BLOCK_TYPES, michael@0: "vp9_coef_update_probs_8x8[BLOCK_TYPES]"); michael@0: print_tree_update_for_type(f, tree_update_hist[TX_16X16], BLOCK_TYPES, michael@0: "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); michael@0: print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, michael@0: "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); michael@0: michael@0: fclose(f); michael@0: f = fopen("treeupdate.bin", "wb"); michael@0: fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); michael@0: fclose(f); michael@0: } michael@0: #endif