michael@0: /* michael@0: * Copyright (c) 2010 The WebM project authors. All Rights Reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: michael@0: #include michael@0: #include "vpx_config.h" michael@0: #include "vp8_rtcd.h" michael@0: #include "vpx/vpx_integer.h" michael@0: #include "blockd.h" michael@0: #include "reconinter.h" michael@0: #if CONFIG_RUNTIME_CPU_DETECT michael@0: #include "onyxc_int.h" michael@0: #endif michael@0: michael@0: void vp8_copy_mem16x16_c( michael@0: unsigned char *src, michael@0: int src_stride, michael@0: unsigned char *dst, michael@0: int dst_stride) michael@0: { michael@0: michael@0: int r; michael@0: michael@0: for (r = 0; r < 16; r++) michael@0: { michael@0: #if !(CONFIG_FAST_UNALIGNED) michael@0: dst[0] = src[0]; michael@0: dst[1] = src[1]; michael@0: dst[2] = src[2]; michael@0: dst[3] = src[3]; michael@0: dst[4] = src[4]; michael@0: dst[5] = src[5]; michael@0: dst[6] = src[6]; michael@0: dst[7] = src[7]; michael@0: dst[8] = src[8]; michael@0: dst[9] = src[9]; michael@0: dst[10] = src[10]; michael@0: dst[11] = src[11]; michael@0: dst[12] = src[12]; michael@0: dst[13] = src[13]; michael@0: dst[14] = src[14]; michael@0: dst[15] = src[15]; michael@0: michael@0: #else michael@0: ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; michael@0: ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; michael@0: ((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ; michael@0: ((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ; michael@0: michael@0: #endif michael@0: src += src_stride; michael@0: dst += dst_stride; michael@0: michael@0: } michael@0: michael@0: } michael@0: michael@0: void vp8_copy_mem8x8_c( michael@0: unsigned char *src, michael@0: int src_stride, michael@0: unsigned char *dst, michael@0: int dst_stride) michael@0: { michael@0: int r; michael@0: michael@0: for (r = 0; r < 8; r++) michael@0: { michael@0: #if !(CONFIG_FAST_UNALIGNED) michael@0: dst[0] = src[0]; michael@0: dst[1] = src[1]; michael@0: dst[2] = src[2]; michael@0: dst[3] = src[3]; michael@0: dst[4] = src[4]; michael@0: dst[5] = src[5]; michael@0: dst[6] = src[6]; michael@0: dst[7] = src[7]; michael@0: #else michael@0: ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; michael@0: ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; michael@0: #endif michael@0: src += src_stride; michael@0: dst += dst_stride; michael@0: michael@0: } michael@0: michael@0: } michael@0: michael@0: void vp8_copy_mem8x4_c( michael@0: unsigned char *src, michael@0: int src_stride, michael@0: unsigned char *dst, michael@0: int dst_stride) michael@0: { michael@0: int r; michael@0: michael@0: for (r = 0; r < 4; r++) michael@0: { michael@0: #if !(CONFIG_FAST_UNALIGNED) michael@0: dst[0] = src[0]; michael@0: dst[1] = src[1]; michael@0: dst[2] = src[2]; michael@0: dst[3] = src[3]; michael@0: dst[4] = src[4]; michael@0: dst[5] = src[5]; michael@0: dst[6] = src[6]; michael@0: dst[7] = src[7]; michael@0: #else michael@0: ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; michael@0: ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; michael@0: #endif michael@0: src += src_stride; michael@0: dst += dst_stride; michael@0: michael@0: } michael@0: michael@0: } michael@0: michael@0: michael@0: void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf) michael@0: { michael@0: int r; michael@0: unsigned char *pred_ptr = d->predictor; michael@0: unsigned char *ptr; michael@0: ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); michael@0: michael@0: if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) michael@0: { michael@0: sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch); michael@0: } michael@0: else michael@0: { michael@0: for (r = 0; r < 4; r++) michael@0: { michael@0: pred_ptr[0] = ptr[0]; michael@0: pred_ptr[1] = ptr[1]; michael@0: pred_ptr[2] = ptr[2]; michael@0: pred_ptr[3] = ptr[3]; michael@0: pred_ptr += pitch; michael@0: ptr += pre_stride; michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride) michael@0: { michael@0: unsigned char *ptr; michael@0: ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); michael@0: michael@0: if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) michael@0: { michael@0: x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); michael@0: } michael@0: else michael@0: { michael@0: vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride); michael@0: } michael@0: } michael@0: michael@0: static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride) michael@0: { michael@0: unsigned char *ptr; michael@0: ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); michael@0: michael@0: if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) michael@0: { michael@0: x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); michael@0: } michael@0: else michael@0: { michael@0: vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride); michael@0: } michael@0: } michael@0: michael@0: static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf) michael@0: { michael@0: int r; michael@0: unsigned char *ptr; michael@0: ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); michael@0: michael@0: if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) michael@0: { michael@0: sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); michael@0: } michael@0: else michael@0: { michael@0: for (r = 0; r < 4; r++) michael@0: { michael@0: dst[0] = ptr[0]; michael@0: dst[1] = ptr[1]; michael@0: dst[2] = ptr[2]; michael@0: dst[3] = ptr[3]; michael@0: dst += dst_stride; michael@0: ptr += pre_stride; michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: /*encoder only*/ michael@0: void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) michael@0: { michael@0: unsigned char *uptr, *vptr; michael@0: unsigned char *upred_ptr = &x->predictor[256]; michael@0: unsigned char *vpred_ptr = &x->predictor[320]; michael@0: michael@0: int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; michael@0: int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; michael@0: int offset; michael@0: int pre_stride = x->pre.uv_stride; michael@0: michael@0: /* calc uv motion vectors */ michael@0: mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1)); michael@0: mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1)); michael@0: mv_row /= 2; michael@0: mv_col /= 2; michael@0: mv_row &= x->fullpixel_mask; michael@0: mv_col &= x->fullpixel_mask; michael@0: michael@0: offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); michael@0: uptr = x->pre.u_buffer + offset; michael@0: vptr = x->pre.v_buffer + offset; michael@0: michael@0: if ((mv_row | mv_col) & 7) michael@0: { michael@0: x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8); michael@0: x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8); michael@0: } michael@0: else michael@0: { michael@0: vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8); michael@0: vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8); michael@0: } michael@0: } michael@0: michael@0: /*encoder only*/ michael@0: void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) michael@0: { michael@0: int i, j; michael@0: int pre_stride = x->pre.uv_stride; michael@0: unsigned char *base_pre; michael@0: michael@0: /* build uv mvs */ michael@0: for (i = 0; i < 2; i++) michael@0: { michael@0: for (j = 0; j < 2; j++) michael@0: { michael@0: int yoffset = i * 8 + j * 2; michael@0: int uoffset = 16 + i * 2 + j; michael@0: int voffset = 20 + i * 2 + j; michael@0: michael@0: int temp; michael@0: michael@0: temp = x->block[yoffset ].bmi.mv.as_mv.row michael@0: + x->block[yoffset+1].bmi.mv.as_mv.row michael@0: + x->block[yoffset+4].bmi.mv.as_mv.row michael@0: + x->block[yoffset+5].bmi.mv.as_mv.row; michael@0: michael@0: temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); michael@0: michael@0: x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; michael@0: michael@0: temp = x->block[yoffset ].bmi.mv.as_mv.col michael@0: + x->block[yoffset+1].bmi.mv.as_mv.col michael@0: + x->block[yoffset+4].bmi.mv.as_mv.col michael@0: + x->block[yoffset+5].bmi.mv.as_mv.col; michael@0: michael@0: temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); michael@0: michael@0: x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; michael@0: michael@0: x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int; michael@0: } michael@0: } michael@0: michael@0: base_pre = x->pre.u_buffer; michael@0: for (i = 16; i < 20; i += 2) michael@0: { michael@0: BLOCKD *d0 = &x->block[i]; michael@0: BLOCKD *d1 = &x->block[i+1]; michael@0: michael@0: if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) michael@0: build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); michael@0: else michael@0: { michael@0: vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict); michael@0: vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict); michael@0: } michael@0: } michael@0: michael@0: base_pre = x->pre.v_buffer; michael@0: for (i = 20; i < 24; i += 2) michael@0: { michael@0: BLOCKD *d0 = &x->block[i]; michael@0: BLOCKD *d1 = &x->block[i+1]; michael@0: michael@0: if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) michael@0: build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); michael@0: else michael@0: { michael@0: vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict); michael@0: vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict); michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: /*encoder only*/ michael@0: void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, michael@0: unsigned char *dst_y, michael@0: int dst_ystride) michael@0: { michael@0: unsigned char *ptr_base; michael@0: unsigned char *ptr; michael@0: int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; michael@0: int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; michael@0: int pre_stride = x->pre.y_stride; michael@0: michael@0: ptr_base = x->pre.y_buffer; michael@0: ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); michael@0: michael@0: if ((mv_row | mv_col) & 7) michael@0: { michael@0: x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, michael@0: dst_y, dst_ystride); michael@0: } michael@0: else michael@0: { michael@0: vp8_copy_mem16x16(ptr, pre_stride, dst_y, michael@0: dst_ystride); michael@0: } michael@0: } michael@0: michael@0: static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) michael@0: { michael@0: /* If the MV points so far into the UMV border that no visible pixels michael@0: * are used for reconstruction, the subpel part of the MV can be michael@0: * discarded and the MV limited to 16 pixels with equivalent results. michael@0: * michael@0: * This limit kicks in at 19 pixels for the top and left edges, for michael@0: * the 16 pixels plus 3 taps right of the central pixel when subpel michael@0: * filtering. The bottom and right edges use 16 pixels plus 2 pixels michael@0: * left of the central pixel when filtering. michael@0: */ michael@0: if (mv->col < (xd->mb_to_left_edge - (19 << 3))) michael@0: mv->col = xd->mb_to_left_edge - (16 << 3); michael@0: else if (mv->col > xd->mb_to_right_edge + (18 << 3)) michael@0: mv->col = xd->mb_to_right_edge + (16 << 3); michael@0: michael@0: if (mv->row < (xd->mb_to_top_edge - (19 << 3))) michael@0: mv->row = xd->mb_to_top_edge - (16 << 3); michael@0: else if (mv->row > xd->mb_to_bottom_edge + (18 << 3)) michael@0: mv->row = xd->mb_to_bottom_edge + (16 << 3); michael@0: } michael@0: michael@0: /* A version of the above function for chroma block MVs.*/ michael@0: static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) michael@0: { michael@0: mv->col = (2*mv->col < (xd->mb_to_left_edge - (19 << 3))) ? michael@0: (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col; michael@0: mv->col = (2*mv->col > xd->mb_to_right_edge + (18 << 3)) ? michael@0: (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col; michael@0: michael@0: mv->row = (2*mv->row < (xd->mb_to_top_edge - (19 << 3))) ? michael@0: (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row; michael@0: mv->row = (2*mv->row > xd->mb_to_bottom_edge + (18 << 3)) ? michael@0: (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row; michael@0: } michael@0: michael@0: void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, michael@0: unsigned char *dst_y, michael@0: unsigned char *dst_u, michael@0: unsigned char *dst_v, michael@0: int dst_ystride, michael@0: int dst_uvstride) michael@0: { michael@0: int offset; michael@0: unsigned char *ptr; michael@0: unsigned char *uptr, *vptr; michael@0: michael@0: int_mv _16x16mv; michael@0: michael@0: unsigned char *ptr_base = x->pre.y_buffer; michael@0: int pre_stride = x->pre.y_stride; michael@0: michael@0: _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int; michael@0: michael@0: if (x->mode_info_context->mbmi.need_to_clamp_mvs) michael@0: { michael@0: clamp_mv_to_umv_border(&_16x16mv.as_mv, x); michael@0: } michael@0: michael@0: ptr = ptr_base + ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3); michael@0: michael@0: if ( _16x16mv.as_int & 0x00070007) michael@0: { michael@0: x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_y, dst_ystride); michael@0: } michael@0: else michael@0: { michael@0: vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride); michael@0: } michael@0: michael@0: /* calc uv motion vectors */ michael@0: _16x16mv.as_mv.row += 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1)); michael@0: _16x16mv.as_mv.col += 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1)); michael@0: _16x16mv.as_mv.row /= 2; michael@0: _16x16mv.as_mv.col /= 2; michael@0: _16x16mv.as_mv.row &= x->fullpixel_mask; michael@0: _16x16mv.as_mv.col &= x->fullpixel_mask; michael@0: michael@0: pre_stride >>= 1; michael@0: offset = ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3); michael@0: uptr = x->pre.u_buffer + offset; michael@0: vptr = x->pre.v_buffer + offset; michael@0: michael@0: if ( _16x16mv.as_int & 0x00070007) michael@0: { michael@0: x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_u, dst_uvstride); michael@0: x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_v, dst_uvstride); michael@0: } michael@0: else michael@0: { michael@0: vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride); michael@0: vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride); michael@0: } michael@0: } michael@0: michael@0: static void build_inter4x4_predictors_mb(MACROBLOCKD *x) michael@0: { michael@0: int i; michael@0: unsigned char *base_dst = x->dst.y_buffer; michael@0: unsigned char *base_pre = x->pre.y_buffer; michael@0: michael@0: if (x->mode_info_context->mbmi.partitioning < 3) michael@0: { michael@0: BLOCKD *b; michael@0: int dst_stride = x->dst.y_stride; michael@0: michael@0: x->block[ 0].bmi = x->mode_info_context->bmi[ 0]; michael@0: x->block[ 2].bmi = x->mode_info_context->bmi[ 2]; michael@0: x->block[ 8].bmi = x->mode_info_context->bmi[ 8]; michael@0: x->block[10].bmi = x->mode_info_context->bmi[10]; michael@0: if (x->mode_info_context->mbmi.need_to_clamp_mvs) michael@0: { michael@0: clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x); michael@0: clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x); michael@0: clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x); michael@0: clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x); michael@0: } michael@0: michael@0: b = &x->block[ 0]; michael@0: build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); michael@0: b = &x->block[ 2]; michael@0: build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); michael@0: b = &x->block[ 8]; michael@0: build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); michael@0: b = &x->block[10]; michael@0: build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); michael@0: } michael@0: else michael@0: { michael@0: for (i = 0; i < 16; i += 2) michael@0: { michael@0: BLOCKD *d0 = &x->block[i]; michael@0: BLOCKD *d1 = &x->block[i+1]; michael@0: int dst_stride = x->dst.y_stride; michael@0: michael@0: x->block[i+0].bmi = x->mode_info_context->bmi[i+0]; michael@0: x->block[i+1].bmi = x->mode_info_context->bmi[i+1]; michael@0: if (x->mode_info_context->mbmi.need_to_clamp_mvs) michael@0: { michael@0: clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x); michael@0: clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x); michael@0: } michael@0: michael@0: if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) michael@0: build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); michael@0: else michael@0: { michael@0: build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); michael@0: build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); michael@0: } michael@0: michael@0: } michael@0: michael@0: } michael@0: base_dst = x->dst.u_buffer; michael@0: base_pre = x->pre.u_buffer; michael@0: for (i = 16; i < 20; i += 2) michael@0: { michael@0: BLOCKD *d0 = &x->block[i]; michael@0: BLOCKD *d1 = &x->block[i+1]; michael@0: int dst_stride = x->dst.uv_stride; michael@0: michael@0: /* Note: uv mvs already clamped in build_4x4uvmvs() */ michael@0: michael@0: if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) michael@0: build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); michael@0: else michael@0: { michael@0: build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); michael@0: build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); michael@0: } michael@0: } michael@0: michael@0: base_dst = x->dst.v_buffer; michael@0: base_pre = x->pre.v_buffer; michael@0: for (i = 20; i < 24; i += 2) michael@0: { michael@0: BLOCKD *d0 = &x->block[i]; michael@0: BLOCKD *d1 = &x->block[i+1]; michael@0: int dst_stride = x->dst.uv_stride; michael@0: michael@0: /* Note: uv mvs already clamped in build_4x4uvmvs() */ michael@0: michael@0: if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) michael@0: build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); michael@0: else michael@0: { michael@0: build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); michael@0: build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); michael@0: } michael@0: } michael@0: } michael@0: michael@0: static michael@0: void build_4x4uvmvs(MACROBLOCKD *x) michael@0: { michael@0: int i, j; michael@0: michael@0: for (i = 0; i < 2; i++) michael@0: { michael@0: for (j = 0; j < 2; j++) michael@0: { michael@0: int yoffset = i * 8 + j * 2; michael@0: int uoffset = 16 + i * 2 + j; michael@0: int voffset = 20 + i * 2 + j; michael@0: michael@0: int temp; michael@0: michael@0: temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row michael@0: + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row michael@0: + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row michael@0: + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row; michael@0: michael@0: temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); michael@0: michael@0: x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; michael@0: michael@0: temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col michael@0: + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col michael@0: + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col michael@0: + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col; michael@0: michael@0: temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); michael@0: michael@0: x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; michael@0: michael@0: if (x->mode_info_context->mbmi.need_to_clamp_mvs) michael@0: clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x); michael@0: michael@0: x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int; michael@0: } michael@0: } michael@0: } michael@0: michael@0: void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) michael@0: { michael@0: if (xd->mode_info_context->mbmi.mode != SPLITMV) michael@0: { michael@0: vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, michael@0: xd->dst.u_buffer, xd->dst.v_buffer, michael@0: xd->dst.y_stride, xd->dst.uv_stride); michael@0: } michael@0: else michael@0: { michael@0: build_4x4uvmvs(xd); michael@0: build_inter4x4_predictors_mb(xd); michael@0: } michael@0: }