Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license |
michael@0 | 5 | * that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | * tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | * in the file PATENTS. All contributing project authors may |
michael@0 | 8 | * be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | */ |
michael@0 | 10 | |
michael@0 | 11 | |
michael@0 | 12 | #include "vpx_config.h" |
michael@0 | 13 | #include "vp8_rtcd.h" |
michael@0 | 14 | #include "quantize.h" |
michael@0 | 15 | #include "vp8/common/reconintra4x4.h" |
michael@0 | 16 | #include "encodemb.h" |
michael@0 | 17 | #include "vp8/common/invtrans.h" |
michael@0 | 18 | #include "encodeintra.h" |
michael@0 | 19 | |
michael@0 | 20 | |
michael@0 | 21 | int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) |
michael@0 | 22 | { |
michael@0 | 23 | |
michael@0 | 24 | int i; |
michael@0 | 25 | int intra_pred_var = 0; |
michael@0 | 26 | (void) cpi; |
michael@0 | 27 | |
michael@0 | 28 | if (use_dc_pred) |
michael@0 | 29 | { |
michael@0 | 30 | x->e_mbd.mode_info_context->mbmi.mode = DC_PRED; |
michael@0 | 31 | x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; |
michael@0 | 32 | x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; |
michael@0 | 33 | |
michael@0 | 34 | vp8_encode_intra16x16mby(x); |
michael@0 | 35 | |
michael@0 | 36 | vp8_inverse_transform_mby(&x->e_mbd); |
michael@0 | 37 | } |
michael@0 | 38 | else |
michael@0 | 39 | { |
michael@0 | 40 | for (i = 0; i < 16; i++) |
michael@0 | 41 | { |
michael@0 | 42 | x->e_mbd.block[i].bmi.as_mode = B_DC_PRED; |
michael@0 | 43 | vp8_encode_intra4x4block(x, i); |
michael@0 | 44 | } |
michael@0 | 45 | } |
michael@0 | 46 | |
michael@0 | 47 | intra_pred_var = vp8_get_mb_ss(x->src_diff); |
michael@0 | 48 | |
michael@0 | 49 | return intra_pred_var; |
michael@0 | 50 | } |
michael@0 | 51 | |
michael@0 | 52 | void vp8_encode_intra4x4block(MACROBLOCK *x, int ib) |
michael@0 | 53 | { |
michael@0 | 54 | BLOCKD *b = &x->e_mbd.block[ib]; |
michael@0 | 55 | BLOCK *be = &x->block[ib]; |
michael@0 | 56 | int dst_stride = x->e_mbd.dst.y_stride; |
michael@0 | 57 | unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; |
michael@0 | 58 | unsigned char *Above = dst - dst_stride; |
michael@0 | 59 | unsigned char *yleft = dst - 1; |
michael@0 | 60 | unsigned char top_left = Above[-1]; |
michael@0 | 61 | |
michael@0 | 62 | vp8_intra4x4_predict(Above, yleft, dst_stride, b->bmi.as_mode, |
michael@0 | 63 | b->predictor, 16, top_left); |
michael@0 | 64 | |
michael@0 | 65 | vp8_subtract_b(be, b, 16); |
michael@0 | 66 | |
michael@0 | 67 | x->short_fdct4x4(be->src_diff, be->coeff, 32); |
michael@0 | 68 | |
michael@0 | 69 | x->quantize_b(be, b); |
michael@0 | 70 | |
michael@0 | 71 | if (*b->eob > 1) |
michael@0 | 72 | { |
michael@0 | 73 | vp8_short_idct4x4llm(b->dqcoeff, b->predictor, 16, dst, dst_stride); |
michael@0 | 74 | } |
michael@0 | 75 | else |
michael@0 | 76 | { |
michael@0 | 77 | vp8_dc_only_idct_add(b->dqcoeff[0], b->predictor, 16, dst, dst_stride); |
michael@0 | 78 | } |
michael@0 | 79 | } |
michael@0 | 80 | |
michael@0 | 81 | void vp8_encode_intra4x4mby(MACROBLOCK *mb) |
michael@0 | 82 | { |
michael@0 | 83 | int i; |
michael@0 | 84 | |
michael@0 | 85 | MACROBLOCKD *xd = &mb->e_mbd; |
michael@0 | 86 | intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16); |
michael@0 | 87 | |
michael@0 | 88 | for (i = 0; i < 16; i++) |
michael@0 | 89 | vp8_encode_intra4x4block(mb, i); |
michael@0 | 90 | return; |
michael@0 | 91 | } |
michael@0 | 92 | |
michael@0 | 93 | void vp8_encode_intra16x16mby(MACROBLOCK *x) |
michael@0 | 94 | { |
michael@0 | 95 | BLOCK *b = &x->block[0]; |
michael@0 | 96 | MACROBLOCKD *xd = &x->e_mbd; |
michael@0 | 97 | |
michael@0 | 98 | vp8_build_intra_predictors_mby_s(xd, |
michael@0 | 99 | xd->dst.y_buffer - xd->dst.y_stride, |
michael@0 | 100 | xd->dst.y_buffer - 1, |
michael@0 | 101 | xd->dst.y_stride, |
michael@0 | 102 | xd->dst.y_buffer, |
michael@0 | 103 | xd->dst.y_stride); |
michael@0 | 104 | |
michael@0 | 105 | vp8_subtract_mby(x->src_diff, *(b->base_src), |
michael@0 | 106 | b->src_stride, xd->dst.y_buffer, xd->dst.y_stride); |
michael@0 | 107 | |
michael@0 | 108 | vp8_transform_intra_mby(x); |
michael@0 | 109 | |
michael@0 | 110 | vp8_quantize_mby(x); |
michael@0 | 111 | |
michael@0 | 112 | if (x->optimize) |
michael@0 | 113 | vp8_optimize_mby(x); |
michael@0 | 114 | } |
michael@0 | 115 | |
michael@0 | 116 | void vp8_encode_intra16x16mbuv(MACROBLOCK *x) |
michael@0 | 117 | { |
michael@0 | 118 | MACROBLOCKD *xd = &x->e_mbd; |
michael@0 | 119 | |
michael@0 | 120 | vp8_build_intra_predictors_mbuv_s(xd, xd->dst.u_buffer - xd->dst.uv_stride, |
michael@0 | 121 | xd->dst.v_buffer - xd->dst.uv_stride, |
michael@0 | 122 | xd->dst.u_buffer - 1, |
michael@0 | 123 | xd->dst.v_buffer - 1, |
michael@0 | 124 | xd->dst.uv_stride, |
michael@0 | 125 | xd->dst.u_buffer, xd->dst.v_buffer, |
michael@0 | 126 | xd->dst.uv_stride); |
michael@0 | 127 | |
michael@0 | 128 | vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, |
michael@0 | 129 | x->src.v_buffer, x->src.uv_stride, xd->dst.u_buffer, |
michael@0 | 130 | xd->dst.v_buffer, xd->dst.uv_stride); |
michael@0 | 131 | |
michael@0 | 132 | vp8_transform_mbuv(x); |
michael@0 | 133 | |
michael@0 | 134 | vp8_quantize_mbuv(x); |
michael@0 | 135 | |
michael@0 | 136 | if (x->optimize) |
michael@0 | 137 | vp8_optimize_mbuv(x); |
michael@0 | 138 | } |