media/libvpx/vp8/common/reconinter.c

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rw-r--r--

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /*
michael@0 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license
michael@0 5 * that can be found in the LICENSE file in the root of the source
michael@0 6 * tree. An additional intellectual property rights grant can be found
michael@0 7 * in the file PATENTS. All contributing project authors may
michael@0 8 * be found in the AUTHORS file in the root of the source tree.
michael@0 9 */
michael@0 10
michael@0 11
michael@0 12 #include <limits.h>
michael@0 13 #include "vpx_config.h"
michael@0 14 #include "vp8_rtcd.h"
michael@0 15 #include "vpx/vpx_integer.h"
michael@0 16 #include "blockd.h"
michael@0 17 #include "reconinter.h"
michael@0 18 #if CONFIG_RUNTIME_CPU_DETECT
michael@0 19 #include "onyxc_int.h"
michael@0 20 #endif
michael@0 21
michael@0 22 void vp8_copy_mem16x16_c(
michael@0 23 unsigned char *src,
michael@0 24 int src_stride,
michael@0 25 unsigned char *dst,
michael@0 26 int dst_stride)
michael@0 27 {
michael@0 28
michael@0 29 int r;
michael@0 30
michael@0 31 for (r = 0; r < 16; r++)
michael@0 32 {
michael@0 33 #if !(CONFIG_FAST_UNALIGNED)
michael@0 34 dst[0] = src[0];
michael@0 35 dst[1] = src[1];
michael@0 36 dst[2] = src[2];
michael@0 37 dst[3] = src[3];
michael@0 38 dst[4] = src[4];
michael@0 39 dst[5] = src[5];
michael@0 40 dst[6] = src[6];
michael@0 41 dst[7] = src[7];
michael@0 42 dst[8] = src[8];
michael@0 43 dst[9] = src[9];
michael@0 44 dst[10] = src[10];
michael@0 45 dst[11] = src[11];
michael@0 46 dst[12] = src[12];
michael@0 47 dst[13] = src[13];
michael@0 48 dst[14] = src[14];
michael@0 49 dst[15] = src[15];
michael@0 50
michael@0 51 #else
michael@0 52 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
michael@0 53 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
michael@0 54 ((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ;
michael@0 55 ((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ;
michael@0 56
michael@0 57 #endif
michael@0 58 src += src_stride;
michael@0 59 dst += dst_stride;
michael@0 60
michael@0 61 }
michael@0 62
michael@0 63 }
michael@0 64
michael@0 65 void vp8_copy_mem8x8_c(
michael@0 66 unsigned char *src,
michael@0 67 int src_stride,
michael@0 68 unsigned char *dst,
michael@0 69 int dst_stride)
michael@0 70 {
michael@0 71 int r;
michael@0 72
michael@0 73 for (r = 0; r < 8; r++)
michael@0 74 {
michael@0 75 #if !(CONFIG_FAST_UNALIGNED)
michael@0 76 dst[0] = src[0];
michael@0 77 dst[1] = src[1];
michael@0 78 dst[2] = src[2];
michael@0 79 dst[3] = src[3];
michael@0 80 dst[4] = src[4];
michael@0 81 dst[5] = src[5];
michael@0 82 dst[6] = src[6];
michael@0 83 dst[7] = src[7];
michael@0 84 #else
michael@0 85 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
michael@0 86 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
michael@0 87 #endif
michael@0 88 src += src_stride;
michael@0 89 dst += dst_stride;
michael@0 90
michael@0 91 }
michael@0 92
michael@0 93 }
michael@0 94
michael@0 95 void vp8_copy_mem8x4_c(
michael@0 96 unsigned char *src,
michael@0 97 int src_stride,
michael@0 98 unsigned char *dst,
michael@0 99 int dst_stride)
michael@0 100 {
michael@0 101 int r;
michael@0 102
michael@0 103 for (r = 0; r < 4; r++)
michael@0 104 {
michael@0 105 #if !(CONFIG_FAST_UNALIGNED)
michael@0 106 dst[0] = src[0];
michael@0 107 dst[1] = src[1];
michael@0 108 dst[2] = src[2];
michael@0 109 dst[3] = src[3];
michael@0 110 dst[4] = src[4];
michael@0 111 dst[5] = src[5];
michael@0 112 dst[6] = src[6];
michael@0 113 dst[7] = src[7];
michael@0 114 #else
michael@0 115 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
michael@0 116 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
michael@0 117 #endif
michael@0 118 src += src_stride;
michael@0 119 dst += dst_stride;
michael@0 120
michael@0 121 }
michael@0 122
michael@0 123 }
michael@0 124
michael@0 125
michael@0 126 void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
michael@0 127 {
michael@0 128 int r;
michael@0 129 unsigned char *pred_ptr = d->predictor;
michael@0 130 unsigned char *ptr;
michael@0 131 ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
michael@0 132
michael@0 133 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
michael@0 134 {
michael@0 135 sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
michael@0 136 }
michael@0 137 else
michael@0 138 {
michael@0 139 for (r = 0; r < 4; r++)
michael@0 140 {
michael@0 141 pred_ptr[0] = ptr[0];
michael@0 142 pred_ptr[1] = ptr[1];
michael@0 143 pred_ptr[2] = ptr[2];
michael@0 144 pred_ptr[3] = ptr[3];
michael@0 145 pred_ptr += pitch;
michael@0 146 ptr += pre_stride;
michael@0 147 }
michael@0 148 }
michael@0 149 }
michael@0 150
michael@0 151 static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
michael@0 152 {
michael@0 153 unsigned char *ptr;
michael@0 154 ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
michael@0 155
michael@0 156 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
michael@0 157 {
michael@0 158 x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
michael@0 159 }
michael@0 160 else
michael@0 161 {
michael@0 162 vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride);
michael@0 163 }
michael@0 164 }
michael@0 165
michael@0 166 static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride)
michael@0 167 {
michael@0 168 unsigned char *ptr;
michael@0 169 ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
michael@0 170
michael@0 171 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
michael@0 172 {
michael@0 173 x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
michael@0 174 }
michael@0 175 else
michael@0 176 {
michael@0 177 vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
michael@0 178 }
michael@0 179 }
michael@0 180
michael@0 181 static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf)
michael@0 182 {
michael@0 183 int r;
michael@0 184 unsigned char *ptr;
michael@0 185 ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3);
michael@0 186
michael@0 187 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
michael@0 188 {
michael@0 189 sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride);
michael@0 190 }
michael@0 191 else
michael@0 192 {
michael@0 193 for (r = 0; r < 4; r++)
michael@0 194 {
michael@0 195 dst[0] = ptr[0];
michael@0 196 dst[1] = ptr[1];
michael@0 197 dst[2] = ptr[2];
michael@0 198 dst[3] = ptr[3];
michael@0 199 dst += dst_stride;
michael@0 200 ptr += pre_stride;
michael@0 201 }
michael@0 202 }
michael@0 203 }
michael@0 204
michael@0 205
michael@0 206 /*encoder only*/
michael@0 207 void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x)
michael@0 208 {
michael@0 209 unsigned char *uptr, *vptr;
michael@0 210 unsigned char *upred_ptr = &x->predictor[256];
michael@0 211 unsigned char *vpred_ptr = &x->predictor[320];
michael@0 212
michael@0 213 int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
michael@0 214 int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
michael@0 215 int offset;
michael@0 216 int pre_stride = x->pre.uv_stride;
michael@0 217
michael@0 218 /* calc uv motion vectors */
michael@0 219 mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1));
michael@0 220 mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1));
michael@0 221 mv_row /= 2;
michael@0 222 mv_col /= 2;
michael@0 223 mv_row &= x->fullpixel_mask;
michael@0 224 mv_col &= x->fullpixel_mask;
michael@0 225
michael@0 226 offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
michael@0 227 uptr = x->pre.u_buffer + offset;
michael@0 228 vptr = x->pre.v_buffer + offset;
michael@0 229
michael@0 230 if ((mv_row | mv_col) & 7)
michael@0 231 {
michael@0 232 x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
michael@0 233 x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
michael@0 234 }
michael@0 235 else
michael@0 236 {
michael@0 237 vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8);
michael@0 238 vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8);
michael@0 239 }
michael@0 240 }
michael@0 241
michael@0 242 /*encoder only*/
michael@0 243 void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
michael@0 244 {
michael@0 245 int i, j;
michael@0 246 int pre_stride = x->pre.uv_stride;
michael@0 247 unsigned char *base_pre;
michael@0 248
michael@0 249 /* build uv mvs */
michael@0 250 for (i = 0; i < 2; i++)
michael@0 251 {
michael@0 252 for (j = 0; j < 2; j++)
michael@0 253 {
michael@0 254 int yoffset = i * 8 + j * 2;
michael@0 255 int uoffset = 16 + i * 2 + j;
michael@0 256 int voffset = 20 + i * 2 + j;
michael@0 257
michael@0 258 int temp;
michael@0 259
michael@0 260 temp = x->block[yoffset ].bmi.mv.as_mv.row
michael@0 261 + x->block[yoffset+1].bmi.mv.as_mv.row
michael@0 262 + x->block[yoffset+4].bmi.mv.as_mv.row
michael@0 263 + x->block[yoffset+5].bmi.mv.as_mv.row;
michael@0 264
michael@0 265 temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
michael@0 266
michael@0 267 x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
michael@0 268
michael@0 269 temp = x->block[yoffset ].bmi.mv.as_mv.col
michael@0 270 + x->block[yoffset+1].bmi.mv.as_mv.col
michael@0 271 + x->block[yoffset+4].bmi.mv.as_mv.col
michael@0 272 + x->block[yoffset+5].bmi.mv.as_mv.col;
michael@0 273
michael@0 274 temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
michael@0 275
michael@0 276 x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
michael@0 277
michael@0 278 x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
michael@0 279 }
michael@0 280 }
michael@0 281
michael@0 282 base_pre = x->pre.u_buffer;
michael@0 283 for (i = 16; i < 20; i += 2)
michael@0 284 {
michael@0 285 BLOCKD *d0 = &x->block[i];
michael@0 286 BLOCKD *d1 = &x->block[i+1];
michael@0 287
michael@0 288 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
michael@0 289 build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
michael@0 290 else
michael@0 291 {
michael@0 292 vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
michael@0 293 vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
michael@0 294 }
michael@0 295 }
michael@0 296
michael@0 297 base_pre = x->pre.v_buffer;
michael@0 298 for (i = 20; i < 24; i += 2)
michael@0 299 {
michael@0 300 BLOCKD *d0 = &x->block[i];
michael@0 301 BLOCKD *d1 = &x->block[i+1];
michael@0 302
michael@0 303 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
michael@0 304 build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
michael@0 305 else
michael@0 306 {
michael@0 307 vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict);
michael@0 308 vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict);
michael@0 309 }
michael@0 310 }
michael@0 311 }
michael@0 312
michael@0 313
michael@0 314 /*encoder only*/
michael@0 315 void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x,
michael@0 316 unsigned char *dst_y,
michael@0 317 int dst_ystride)
michael@0 318 {
michael@0 319 unsigned char *ptr_base;
michael@0 320 unsigned char *ptr;
michael@0 321 int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
michael@0 322 int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
michael@0 323 int pre_stride = x->pre.y_stride;
michael@0 324
michael@0 325 ptr_base = x->pre.y_buffer;
michael@0 326 ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
michael@0 327
michael@0 328 if ((mv_row | mv_col) & 7)
michael@0 329 {
michael@0 330 x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7,
michael@0 331 dst_y, dst_ystride);
michael@0 332 }
michael@0 333 else
michael@0 334 {
michael@0 335 vp8_copy_mem16x16(ptr, pre_stride, dst_y,
michael@0 336 dst_ystride);
michael@0 337 }
michael@0 338 }
michael@0 339
michael@0 340 static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
michael@0 341 {
michael@0 342 /* If the MV points so far into the UMV border that no visible pixels
michael@0 343 * are used for reconstruction, the subpel part of the MV can be
michael@0 344 * discarded and the MV limited to 16 pixels with equivalent results.
michael@0 345 *
michael@0 346 * This limit kicks in at 19 pixels for the top and left edges, for
michael@0 347 * the 16 pixels plus 3 taps right of the central pixel when subpel
michael@0 348 * filtering. The bottom and right edges use 16 pixels plus 2 pixels
michael@0 349 * left of the central pixel when filtering.
michael@0 350 */
michael@0 351 if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
michael@0 352 mv->col = xd->mb_to_left_edge - (16 << 3);
michael@0 353 else if (mv->col > xd->mb_to_right_edge + (18 << 3))
michael@0 354 mv->col = xd->mb_to_right_edge + (16 << 3);
michael@0 355
michael@0 356 if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
michael@0 357 mv->row = xd->mb_to_top_edge - (16 << 3);
michael@0 358 else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
michael@0 359 mv->row = xd->mb_to_bottom_edge + (16 << 3);
michael@0 360 }
michael@0 361
michael@0 362 /* A version of the above function for chroma block MVs.*/
michael@0 363 static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
michael@0 364 {
michael@0 365 mv->col = (2*mv->col < (xd->mb_to_left_edge - (19 << 3))) ?
michael@0 366 (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
michael@0 367 mv->col = (2*mv->col > xd->mb_to_right_edge + (18 << 3)) ?
michael@0 368 (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
michael@0 369
michael@0 370 mv->row = (2*mv->row < (xd->mb_to_top_edge - (19 << 3))) ?
michael@0 371 (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
michael@0 372 mv->row = (2*mv->row > xd->mb_to_bottom_edge + (18 << 3)) ?
michael@0 373 (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
michael@0 374 }
michael@0 375
michael@0 376 void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
michael@0 377 unsigned char *dst_y,
michael@0 378 unsigned char *dst_u,
michael@0 379 unsigned char *dst_v,
michael@0 380 int dst_ystride,
michael@0 381 int dst_uvstride)
michael@0 382 {
michael@0 383 int offset;
michael@0 384 unsigned char *ptr;
michael@0 385 unsigned char *uptr, *vptr;
michael@0 386
michael@0 387 int_mv _16x16mv;
michael@0 388
michael@0 389 unsigned char *ptr_base = x->pre.y_buffer;
michael@0 390 int pre_stride = x->pre.y_stride;
michael@0 391
michael@0 392 _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
michael@0 393
michael@0 394 if (x->mode_info_context->mbmi.need_to_clamp_mvs)
michael@0 395 {
michael@0 396 clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
michael@0 397 }
michael@0 398
michael@0 399 ptr = ptr_base + ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
michael@0 400
michael@0 401 if ( _16x16mv.as_int & 0x00070007)
michael@0 402 {
michael@0 403 x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
michael@0 404 }
michael@0 405 else
michael@0 406 {
michael@0 407 vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
michael@0 408 }
michael@0 409
michael@0 410 /* calc uv motion vectors */
michael@0 411 _16x16mv.as_mv.row += 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1));
michael@0 412 _16x16mv.as_mv.col += 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1));
michael@0 413 _16x16mv.as_mv.row /= 2;
michael@0 414 _16x16mv.as_mv.col /= 2;
michael@0 415 _16x16mv.as_mv.row &= x->fullpixel_mask;
michael@0 416 _16x16mv.as_mv.col &= x->fullpixel_mask;
michael@0 417
michael@0 418 pre_stride >>= 1;
michael@0 419 offset = ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
michael@0 420 uptr = x->pre.u_buffer + offset;
michael@0 421 vptr = x->pre.v_buffer + offset;
michael@0 422
michael@0 423 if ( _16x16mv.as_int & 0x00070007)
michael@0 424 {
michael@0 425 x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
michael@0 426 x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
michael@0 427 }
michael@0 428 else
michael@0 429 {
michael@0 430 vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
michael@0 431 vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
michael@0 432 }
michael@0 433 }
michael@0 434
michael@0 435 static void build_inter4x4_predictors_mb(MACROBLOCKD *x)
michael@0 436 {
michael@0 437 int i;
michael@0 438 unsigned char *base_dst = x->dst.y_buffer;
michael@0 439 unsigned char *base_pre = x->pre.y_buffer;
michael@0 440
michael@0 441 if (x->mode_info_context->mbmi.partitioning < 3)
michael@0 442 {
michael@0 443 BLOCKD *b;
michael@0 444 int dst_stride = x->dst.y_stride;
michael@0 445
michael@0 446 x->block[ 0].bmi = x->mode_info_context->bmi[ 0];
michael@0 447 x->block[ 2].bmi = x->mode_info_context->bmi[ 2];
michael@0 448 x->block[ 8].bmi = x->mode_info_context->bmi[ 8];
michael@0 449 x->block[10].bmi = x->mode_info_context->bmi[10];
michael@0 450 if (x->mode_info_context->mbmi.need_to_clamp_mvs)
michael@0 451 {
michael@0 452 clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x);
michael@0 453 clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x);
michael@0 454 clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x);
michael@0 455 clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
michael@0 456 }
michael@0 457
michael@0 458 b = &x->block[ 0];
michael@0 459 build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
michael@0 460 b = &x->block[ 2];
michael@0 461 build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
michael@0 462 b = &x->block[ 8];
michael@0 463 build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
michael@0 464 b = &x->block[10];
michael@0 465 build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride);
michael@0 466 }
michael@0 467 else
michael@0 468 {
michael@0 469 for (i = 0; i < 16; i += 2)
michael@0 470 {
michael@0 471 BLOCKD *d0 = &x->block[i];
michael@0 472 BLOCKD *d1 = &x->block[i+1];
michael@0 473 int dst_stride = x->dst.y_stride;
michael@0 474
michael@0 475 x->block[i+0].bmi = x->mode_info_context->bmi[i+0];
michael@0 476 x->block[i+1].bmi = x->mode_info_context->bmi[i+1];
michael@0 477 if (x->mode_info_context->mbmi.need_to_clamp_mvs)
michael@0 478 {
michael@0 479 clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x);
michael@0 480 clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x);
michael@0 481 }
michael@0 482
michael@0 483 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
michael@0 484 build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
michael@0 485 else
michael@0 486 {
michael@0 487 build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
michael@0 488 build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
michael@0 489 }
michael@0 490
michael@0 491 }
michael@0 492
michael@0 493 }
michael@0 494 base_dst = x->dst.u_buffer;
michael@0 495 base_pre = x->pre.u_buffer;
michael@0 496 for (i = 16; i < 20; i += 2)
michael@0 497 {
michael@0 498 BLOCKD *d0 = &x->block[i];
michael@0 499 BLOCKD *d1 = &x->block[i+1];
michael@0 500 int dst_stride = x->dst.uv_stride;
michael@0 501
michael@0 502 /* Note: uv mvs already clamped in build_4x4uvmvs() */
michael@0 503
michael@0 504 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
michael@0 505 build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
michael@0 506 else
michael@0 507 {
michael@0 508 build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
michael@0 509 build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
michael@0 510 }
michael@0 511 }
michael@0 512
michael@0 513 base_dst = x->dst.v_buffer;
michael@0 514 base_pre = x->pre.v_buffer;
michael@0 515 for (i = 20; i < 24; i += 2)
michael@0 516 {
michael@0 517 BLOCKD *d0 = &x->block[i];
michael@0 518 BLOCKD *d1 = &x->block[i+1];
michael@0 519 int dst_stride = x->dst.uv_stride;
michael@0 520
michael@0 521 /* Note: uv mvs already clamped in build_4x4uvmvs() */
michael@0 522
michael@0 523 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
michael@0 524 build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride);
michael@0 525 else
michael@0 526 {
michael@0 527 build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
michael@0 528 build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict);
michael@0 529 }
michael@0 530 }
michael@0 531 }
michael@0 532
michael@0 533 static
michael@0 534 void build_4x4uvmvs(MACROBLOCKD *x)
michael@0 535 {
michael@0 536 int i, j;
michael@0 537
michael@0 538 for (i = 0; i < 2; i++)
michael@0 539 {
michael@0 540 for (j = 0; j < 2; j++)
michael@0 541 {
michael@0 542 int yoffset = i * 8 + j * 2;
michael@0 543 int uoffset = 16 + i * 2 + j;
michael@0 544 int voffset = 20 + i * 2 + j;
michael@0 545
michael@0 546 int temp;
michael@0 547
michael@0 548 temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row
michael@0 549 + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row
michael@0 550 + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row
michael@0 551 + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
michael@0 552
michael@0 553 temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
michael@0 554
michael@0 555 x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
michael@0 556
michael@0 557 temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col
michael@0 558 + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col
michael@0 559 + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col
michael@0 560 + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
michael@0 561
michael@0 562 temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
michael@0 563
michael@0 564 x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
michael@0 565
michael@0 566 if (x->mode_info_context->mbmi.need_to_clamp_mvs)
michael@0 567 clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
michael@0 568
michael@0 569 x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
michael@0 570 }
michael@0 571 }
michael@0 572 }
michael@0 573
michael@0 574 void vp8_build_inter_predictors_mb(MACROBLOCKD *xd)
michael@0 575 {
michael@0 576 if (xd->mode_info_context->mbmi.mode != SPLITMV)
michael@0 577 {
michael@0 578 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
michael@0 579 xd->dst.u_buffer, xd->dst.v_buffer,
michael@0 580 xd->dst.y_stride, xd->dst.uv_stride);
michael@0 581 }
michael@0 582 else
michael@0 583 {
michael@0 584 build_4x4uvmvs(xd);
michael@0 585 build_inter4x4_predictors_mb(xd);
michael@0 586 }
michael@0 587 }

mercurial