media/libvpx/vp8/encoder/mcomp.c

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rw-r--r--

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /*
michael@0 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license
michael@0 5 * that can be found in the LICENSE file in the root of the source
michael@0 6 * tree. An additional intellectual property rights grant can be found
michael@0 7 * in the file PATENTS. All contributing project authors may
michael@0 8 * be found in the AUTHORS file in the root of the source tree.
michael@0 9 */
michael@0 10
michael@0 11
michael@0 12 #include "onyx_int.h"
michael@0 13 #include "mcomp.h"
michael@0 14 #include "vpx_mem/vpx_mem.h"
michael@0 15 #include "vpx_config.h"
michael@0 16 #include <stdio.h>
michael@0 17 #include <limits.h>
michael@0 18 #include <math.h>
michael@0 19 #include "vp8/common/findnearmv.h"
michael@0 20
michael@0 21 #ifdef VP8_ENTROPY_STATS
michael@0 22 static int mv_ref_ct [31] [4] [2];
michael@0 23 static int mv_mode_cts [4] [2];
michael@0 24 #endif
michael@0 25
michael@0 26 int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
michael@0 27 {
michael@0 28 /* MV costing is based on the distribution of vectors in the previous
michael@0 29 * frame and as such will tend to over state the cost of vectors. In
michael@0 30 * addition coding a new vector can have a knock on effect on the cost
michael@0 31 * of subsequent vectors and the quality of prediction from NEAR and
michael@0 32 * NEAREST for subsequent blocks. The "Weight" parameter allows, to a
michael@0 33 * limited extent, for some account to be taken of these factors.
michael@0 34 */
michael@0 35 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
michael@0 36 }
michael@0 37
michael@0 38 static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
michael@0 39 {
michael@0 40 /* Ignore mv costing if mvcost is NULL */
michael@0 41 if (mvcost)
michael@0 42 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
michael@0 43 mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
michael@0 44 * error_per_bit + 128) >> 8;
michael@0 45 return 0;
michael@0 46 }
michael@0 47
michael@0 48 static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
michael@0 49 {
michael@0 50 /* Calculate sad error cost on full pixel basis. */
michael@0 51 /* Ignore mv costing if mvsadcost is NULL */
michael@0 52 if (mvsadcost)
michael@0 53 return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
michael@0 54 mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
michael@0 55 * error_per_bit + 128) >> 8;
michael@0 56 return 0;
michael@0 57 }
michael@0 58
michael@0 59 void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
michael@0 60 {
michael@0 61 int Len;
michael@0 62 int search_site_count = 0;
michael@0 63
michael@0 64
michael@0 65 /* Generate offsets for 4 search sites per step. */
michael@0 66 Len = MAX_FIRST_STEP;
michael@0 67 x->ss[search_site_count].mv.col = 0;
michael@0 68 x->ss[search_site_count].mv.row = 0;
michael@0 69 x->ss[search_site_count].offset = 0;
michael@0 70 search_site_count++;
michael@0 71
michael@0 72 while (Len > 0)
michael@0 73 {
michael@0 74
michael@0 75 /* Compute offsets for search sites. */
michael@0 76 x->ss[search_site_count].mv.col = 0;
michael@0 77 x->ss[search_site_count].mv.row = -Len;
michael@0 78 x->ss[search_site_count].offset = -Len * stride;
michael@0 79 search_site_count++;
michael@0 80
michael@0 81 /* Compute offsets for search sites. */
michael@0 82 x->ss[search_site_count].mv.col = 0;
michael@0 83 x->ss[search_site_count].mv.row = Len;
michael@0 84 x->ss[search_site_count].offset = Len * stride;
michael@0 85 search_site_count++;
michael@0 86
michael@0 87 /* Compute offsets for search sites. */
michael@0 88 x->ss[search_site_count].mv.col = -Len;
michael@0 89 x->ss[search_site_count].mv.row = 0;
michael@0 90 x->ss[search_site_count].offset = -Len;
michael@0 91 search_site_count++;
michael@0 92
michael@0 93 /* Compute offsets for search sites. */
michael@0 94 x->ss[search_site_count].mv.col = Len;
michael@0 95 x->ss[search_site_count].mv.row = 0;
michael@0 96 x->ss[search_site_count].offset = Len;
michael@0 97 search_site_count++;
michael@0 98
michael@0 99 /* Contract. */
michael@0 100 Len /= 2;
michael@0 101 }
michael@0 102
michael@0 103 x->ss_count = search_site_count;
michael@0 104 x->searches_per_step = 4;
michael@0 105 }
michael@0 106
michael@0 107 void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
michael@0 108 {
michael@0 109 int Len;
michael@0 110 int search_site_count = 0;
michael@0 111
michael@0 112 /* Generate offsets for 8 search sites per step. */
michael@0 113 Len = MAX_FIRST_STEP;
michael@0 114 x->ss[search_site_count].mv.col = 0;
michael@0 115 x->ss[search_site_count].mv.row = 0;
michael@0 116 x->ss[search_site_count].offset = 0;
michael@0 117 search_site_count++;
michael@0 118
michael@0 119 while (Len > 0)
michael@0 120 {
michael@0 121
michael@0 122 /* Compute offsets for search sites. */
michael@0 123 x->ss[search_site_count].mv.col = 0;
michael@0 124 x->ss[search_site_count].mv.row = -Len;
michael@0 125 x->ss[search_site_count].offset = -Len * stride;
michael@0 126 search_site_count++;
michael@0 127
michael@0 128 /* Compute offsets for search sites. */
michael@0 129 x->ss[search_site_count].mv.col = 0;
michael@0 130 x->ss[search_site_count].mv.row = Len;
michael@0 131 x->ss[search_site_count].offset = Len * stride;
michael@0 132 search_site_count++;
michael@0 133
michael@0 134 /* Compute offsets for search sites. */
michael@0 135 x->ss[search_site_count].mv.col = -Len;
michael@0 136 x->ss[search_site_count].mv.row = 0;
michael@0 137 x->ss[search_site_count].offset = -Len;
michael@0 138 search_site_count++;
michael@0 139
michael@0 140 /* Compute offsets for search sites. */
michael@0 141 x->ss[search_site_count].mv.col = Len;
michael@0 142 x->ss[search_site_count].mv.row = 0;
michael@0 143 x->ss[search_site_count].offset = Len;
michael@0 144 search_site_count++;
michael@0 145
michael@0 146 /* Compute offsets for search sites. */
michael@0 147 x->ss[search_site_count].mv.col = -Len;
michael@0 148 x->ss[search_site_count].mv.row = -Len;
michael@0 149 x->ss[search_site_count].offset = -Len * stride - Len;
michael@0 150 search_site_count++;
michael@0 151
michael@0 152 /* Compute offsets for search sites. */
michael@0 153 x->ss[search_site_count].mv.col = Len;
michael@0 154 x->ss[search_site_count].mv.row = -Len;
michael@0 155 x->ss[search_site_count].offset = -Len * stride + Len;
michael@0 156 search_site_count++;
michael@0 157
michael@0 158 /* Compute offsets for search sites. */
michael@0 159 x->ss[search_site_count].mv.col = -Len;
michael@0 160 x->ss[search_site_count].mv.row = Len;
michael@0 161 x->ss[search_site_count].offset = Len * stride - Len;
michael@0 162 search_site_count++;
michael@0 163
michael@0 164 /* Compute offsets for search sites. */
michael@0 165 x->ss[search_site_count].mv.col = Len;
michael@0 166 x->ss[search_site_count].mv.row = Len;
michael@0 167 x->ss[search_site_count].offset = Len * stride + Len;
michael@0 168 search_site_count++;
michael@0 169
michael@0 170
michael@0 171 /* Contract. */
michael@0 172 Len /= 2;
michael@0 173 }
michael@0 174
michael@0 175 x->ss_count = search_site_count;
michael@0 176 x->searches_per_step = 8;
michael@0 177 }
michael@0 178
michael@0 179 /*
michael@0 180 * To avoid the penalty for crossing cache-line read, preload the reference
michael@0 181 * area in a small buffer, which is aligned to make sure there won't be crossing
michael@0 182 * cache-line read while reading from this buffer. This reduced the cpu
michael@0 183 * cycles spent on reading ref data in sub-pixel filter functions.
michael@0 184 * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
michael@0 185 * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
michael@0 186 * could reduce the area.
michael@0 187 */
michael@0 188
michael@0 189 /* estimated cost of a motion vector (r,c) */
michael@0 190 #define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 : 0)
michael@0 191 /* pointer to predictor base of a motionvector */
michael@0 192 #define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset)))
michael@0 193 /* convert motion vector component to offset for svf calc */
michael@0 194 #define SP(x) (((x)&3)<<1)
michael@0 195 /* returns subpixel variance error function. */
michael@0 196 #define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse)
michael@0 197 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
michael@0 198 /* returns distortion + motion vector cost */
michael@0 199 #define ERR(r,c) (MVC(r,c)+DIST(r,c))
michael@0 200 /* checks if (r,c) has better score than previous best */
michael@0 201 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=UINT_MAX;)
michael@0 202
michael@0 203 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
michael@0 204 int_mv *bestmv, int_mv *ref_mv,
michael@0 205 int error_per_bit,
michael@0 206 const vp8_variance_fn_ptr_t *vfp,
michael@0 207 int *mvcost[2], int *distortion,
michael@0 208 unsigned int *sse1)
michael@0 209 {
michael@0 210 unsigned char *z = (*(b->base_src) + b->src);
michael@0 211
michael@0 212 int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
michael@0 213 int br = bestmv->as_mv.row * 4, bc = bestmv->as_mv.col * 4;
michael@0 214 int tr = br, tc = bc;
michael@0 215 unsigned int besterr;
michael@0 216 unsigned int left, right, up, down, diag;
michael@0 217 unsigned int sse;
michael@0 218 unsigned int whichdir;
michael@0 219 unsigned int halfiters = 4;
michael@0 220 unsigned int quarteriters = 4;
michael@0 221 int thismse;
michael@0 222
michael@0 223 int minc = MAX(x->mv_col_min * 4,
michael@0 224 (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
michael@0 225 int maxc = MIN(x->mv_col_max * 4,
michael@0 226 (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
michael@0 227 int minr = MAX(x->mv_row_min * 4,
michael@0 228 (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
michael@0 229 int maxr = MIN(x->mv_row_max * 4,
michael@0 230 (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
michael@0 231
michael@0 232 int y_stride;
michael@0 233 int offset;
michael@0 234 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 235 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 236
michael@0 237
michael@0 238 #if ARCH_X86 || ARCH_X86_64
michael@0 239 MACROBLOCKD *xd = &x->e_mbd;
michael@0 240 unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
michael@0 241 unsigned char *y;
michael@0 242 int buf_r1, buf_r2, buf_c1;
michael@0 243
michael@0 244 /* Clamping to avoid out-of-range data access */
michael@0 245 buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
michael@0 246 buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
michael@0 247 buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
michael@0 248 y_stride = 32;
michael@0 249
michael@0 250 /* Copy to intermediate buffer before searching. */
michael@0 251 vfp->copymem(y_0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
michael@0 252 y = xd->y_buf + y_stride*buf_r1 +buf_c1;
michael@0 253 #else
michael@0 254 unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
michael@0 255 y_stride = pre_stride;
michael@0 256 #endif
michael@0 257
michael@0 258 offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
michael@0 259
michael@0 260 /* central mv */
michael@0 261 bestmv->as_mv.row *= 8;
michael@0 262 bestmv->as_mv.col *= 8;
michael@0 263
michael@0 264 /* calculate central point error */
michael@0 265 besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
michael@0 266 *distortion = besterr;
michael@0 267 besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
michael@0 268
michael@0 269 /* TODO: Each subsequent iteration checks at least one point in common
michael@0 270 * with the last iteration could be 2 ( if diag selected)
michael@0 271 */
michael@0 272 while (--halfiters)
michael@0 273 {
michael@0 274 /* 1/2 pel */
michael@0 275 CHECK_BETTER(left, tr, tc - 2);
michael@0 276 CHECK_BETTER(right, tr, tc + 2);
michael@0 277 CHECK_BETTER(up, tr - 2, tc);
michael@0 278 CHECK_BETTER(down, tr + 2, tc);
michael@0 279
michael@0 280 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
michael@0 281
michael@0 282 switch (whichdir)
michael@0 283 {
michael@0 284 case 0:
michael@0 285 CHECK_BETTER(diag, tr - 2, tc - 2);
michael@0 286 break;
michael@0 287 case 1:
michael@0 288 CHECK_BETTER(diag, tr - 2, tc + 2);
michael@0 289 break;
michael@0 290 case 2:
michael@0 291 CHECK_BETTER(diag, tr + 2, tc - 2);
michael@0 292 break;
michael@0 293 case 3:
michael@0 294 CHECK_BETTER(diag, tr + 2, tc + 2);
michael@0 295 break;
michael@0 296 }
michael@0 297
michael@0 298 /* no reason to check the same one again. */
michael@0 299 if (tr == br && tc == bc)
michael@0 300 break;
michael@0 301
michael@0 302 tr = br;
michael@0 303 tc = bc;
michael@0 304 }
michael@0 305
michael@0 306 /* TODO: Each subsequent iteration checks at least one point in common
michael@0 307 * with the last iteration could be 2 ( if diag selected)
michael@0 308 */
michael@0 309
michael@0 310 /* 1/4 pel */
michael@0 311 while (--quarteriters)
michael@0 312 {
michael@0 313 CHECK_BETTER(left, tr, tc - 1);
michael@0 314 CHECK_BETTER(right, tr, tc + 1);
michael@0 315 CHECK_BETTER(up, tr - 1, tc);
michael@0 316 CHECK_BETTER(down, tr + 1, tc);
michael@0 317
michael@0 318 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
michael@0 319
michael@0 320 switch (whichdir)
michael@0 321 {
michael@0 322 case 0:
michael@0 323 CHECK_BETTER(diag, tr - 1, tc - 1);
michael@0 324 break;
michael@0 325 case 1:
michael@0 326 CHECK_BETTER(diag, tr - 1, tc + 1);
michael@0 327 break;
michael@0 328 case 2:
michael@0 329 CHECK_BETTER(diag, tr + 1, tc - 1);
michael@0 330 break;
michael@0 331 case 3:
michael@0 332 CHECK_BETTER(diag, tr + 1, tc + 1);
michael@0 333 break;
michael@0 334 }
michael@0 335
michael@0 336 /* no reason to check the same one again. */
michael@0 337 if (tr == br && tc == bc)
michael@0 338 break;
michael@0 339
michael@0 340 tr = br;
michael@0 341 tc = bc;
michael@0 342 }
michael@0 343
michael@0 344 bestmv->as_mv.row = br * 2;
michael@0 345 bestmv->as_mv.col = bc * 2;
michael@0 346
michael@0 347 if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) ||
michael@0 348 (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3)))
michael@0 349 return INT_MAX;
michael@0 350
michael@0 351 return besterr;
michael@0 352 }
michael@0 353 #undef MVC
michael@0 354 #undef PRE
michael@0 355 #undef SP
michael@0 356 #undef DIST
michael@0 357 #undef IFMVCV
michael@0 358 #undef ERR
michael@0 359 #undef CHECK_BETTER
michael@0 360
michael@0 361 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
michael@0 362 int_mv *bestmv, int_mv *ref_mv,
michael@0 363 int error_per_bit,
michael@0 364 const vp8_variance_fn_ptr_t *vfp,
michael@0 365 int *mvcost[2], int *distortion,
michael@0 366 unsigned int *sse1)
michael@0 367 {
michael@0 368 int bestmse = INT_MAX;
michael@0 369 int_mv startmv;
michael@0 370 int_mv this_mv;
michael@0 371 unsigned char *z = (*(b->base_src) + b->src);
michael@0 372 int left, right, up, down, diag;
michael@0 373 unsigned int sse;
michael@0 374 int whichdir ;
michael@0 375 int thismse;
michael@0 376 int y_stride;
michael@0 377 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 378 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 379
michael@0 380 #if ARCH_X86 || ARCH_X86_64
michael@0 381 MACROBLOCKD *xd = &x->e_mbd;
michael@0 382 unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
michael@0 383 unsigned char *y;
michael@0 384
michael@0 385 y_stride = 32;
michael@0 386 /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
michael@0 387 vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
michael@0 388 y = xd->y_buf + y_stride + 1;
michael@0 389 #else
michael@0 390 unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
michael@0 391 y_stride = pre_stride;
michael@0 392 #endif
michael@0 393
michael@0 394 /* central mv */
michael@0 395 bestmv->as_mv.row <<= 3;
michael@0 396 bestmv->as_mv.col <<= 3;
michael@0 397 startmv = *bestmv;
michael@0 398
michael@0 399 /* calculate central point error */
michael@0 400 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
michael@0 401 *distortion = bestmse;
michael@0 402 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
michael@0 403
michael@0 404 /* go left then right and check error */
michael@0 405 this_mv.as_mv.row = startmv.as_mv.row;
michael@0 406 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
michael@0 407 thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
michael@0 408 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 409
michael@0 410 if (left < bestmse)
michael@0 411 {
michael@0 412 *bestmv = this_mv;
michael@0 413 bestmse = left;
michael@0 414 *distortion = thismse;
michael@0 415 *sse1 = sse;
michael@0 416 }
michael@0 417
michael@0 418 this_mv.as_mv.col += 8;
michael@0 419 thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
michael@0 420 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 421
michael@0 422 if (right < bestmse)
michael@0 423 {
michael@0 424 *bestmv = this_mv;
michael@0 425 bestmse = right;
michael@0 426 *distortion = thismse;
michael@0 427 *sse1 = sse;
michael@0 428 }
michael@0 429
michael@0 430 /* go up then down and check error */
michael@0 431 this_mv.as_mv.col = startmv.as_mv.col;
michael@0 432 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
michael@0 433 thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
michael@0 434 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 435
michael@0 436 if (up < bestmse)
michael@0 437 {
michael@0 438 *bestmv = this_mv;
michael@0 439 bestmse = up;
michael@0 440 *distortion = thismse;
michael@0 441 *sse1 = sse;
michael@0 442 }
michael@0 443
michael@0 444 this_mv.as_mv.row += 8;
michael@0 445 thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
michael@0 446 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 447
michael@0 448 if (down < bestmse)
michael@0 449 {
michael@0 450 *bestmv = this_mv;
michael@0 451 bestmse = down;
michael@0 452 *distortion = thismse;
michael@0 453 *sse1 = sse;
michael@0 454 }
michael@0 455
michael@0 456
michael@0 457 /* now check 1 more diagonal */
michael@0 458 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
michael@0 459 this_mv = startmv;
michael@0 460
michael@0 461 switch (whichdir)
michael@0 462 {
michael@0 463 case 0:
michael@0 464 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
michael@0 465 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
michael@0 466 thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
michael@0 467 break;
michael@0 468 case 1:
michael@0 469 this_mv.as_mv.col += 4;
michael@0 470 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
michael@0 471 thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
michael@0 472 break;
michael@0 473 case 2:
michael@0 474 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
michael@0 475 this_mv.as_mv.row += 4;
michael@0 476 thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
michael@0 477 break;
michael@0 478 case 3:
michael@0 479 default:
michael@0 480 this_mv.as_mv.col += 4;
michael@0 481 this_mv.as_mv.row += 4;
michael@0 482 thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
michael@0 483 break;
michael@0 484 }
michael@0 485
michael@0 486 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 487
michael@0 488 if (diag < bestmse)
michael@0 489 {
michael@0 490 *bestmv = this_mv;
michael@0 491 bestmse = diag;
michael@0 492 *distortion = thismse;
michael@0 493 *sse1 = sse;
michael@0 494 }
michael@0 495
michael@0 496
michael@0 497 /* time to check quarter pels. */
michael@0 498 if (bestmv->as_mv.row < startmv.as_mv.row)
michael@0 499 y -= y_stride;
michael@0 500
michael@0 501 if (bestmv->as_mv.col < startmv.as_mv.col)
michael@0 502 y--;
michael@0 503
michael@0 504 startmv = *bestmv;
michael@0 505
michael@0 506
michael@0 507
michael@0 508 /* go left then right and check error */
michael@0 509 this_mv.as_mv.row = startmv.as_mv.row;
michael@0 510
michael@0 511 if (startmv.as_mv.col & 7)
michael@0 512 {
michael@0 513 this_mv.as_mv.col = startmv.as_mv.col - 2;
michael@0 514 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 515 }
michael@0 516 else
michael@0 517 {
michael@0 518 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
michael@0 519 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 520 }
michael@0 521
michael@0 522 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 523
michael@0 524 if (left < bestmse)
michael@0 525 {
michael@0 526 *bestmv = this_mv;
michael@0 527 bestmse = left;
michael@0 528 *distortion = thismse;
michael@0 529 *sse1 = sse;
michael@0 530 }
michael@0 531
michael@0 532 this_mv.as_mv.col += 4;
michael@0 533 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 534 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 535
michael@0 536 if (right < bestmse)
michael@0 537 {
michael@0 538 *bestmv = this_mv;
michael@0 539 bestmse = right;
michael@0 540 *distortion = thismse;
michael@0 541 *sse1 = sse;
michael@0 542 }
michael@0 543
michael@0 544 /* go up then down and check error */
michael@0 545 this_mv.as_mv.col = startmv.as_mv.col;
michael@0 546
michael@0 547 if (startmv.as_mv.row & 7)
michael@0 548 {
michael@0 549 this_mv.as_mv.row = startmv.as_mv.row - 2;
michael@0 550 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 551 }
michael@0 552 else
michael@0 553 {
michael@0 554 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
michael@0 555 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
michael@0 556 }
michael@0 557
michael@0 558 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 559
michael@0 560 if (up < bestmse)
michael@0 561 {
michael@0 562 *bestmv = this_mv;
michael@0 563 bestmse = up;
michael@0 564 *distortion = thismse;
michael@0 565 *sse1 = sse;
michael@0 566 }
michael@0 567
michael@0 568 this_mv.as_mv.row += 4;
michael@0 569 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 570 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 571
michael@0 572 if (down < bestmse)
michael@0 573 {
michael@0 574 *bestmv = this_mv;
michael@0 575 bestmse = down;
michael@0 576 *distortion = thismse;
michael@0 577 *sse1 = sse;
michael@0 578 }
michael@0 579
michael@0 580
michael@0 581 /* now check 1 more diagonal */
michael@0 582 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
michael@0 583
michael@0 584 this_mv = startmv;
michael@0 585
michael@0 586 switch (whichdir)
michael@0 587 {
michael@0 588 case 0:
michael@0 589
michael@0 590 if (startmv.as_mv.row & 7)
michael@0 591 {
michael@0 592 this_mv.as_mv.row -= 2;
michael@0 593
michael@0 594 if (startmv.as_mv.col & 7)
michael@0 595 {
michael@0 596 this_mv.as_mv.col -= 2;
michael@0 597 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 598 }
michael@0 599 else
michael@0 600 {
michael@0 601 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
michael@0 602 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
michael@0 603 }
michael@0 604 }
michael@0 605 else
michael@0 606 {
michael@0 607 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
michael@0 608
michael@0 609 if (startmv.as_mv.col & 7)
michael@0 610 {
michael@0 611 this_mv.as_mv.col -= 2;
michael@0 612 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
michael@0 613 }
michael@0 614 else
michael@0 615 {
michael@0 616 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
michael@0 617 thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride, &sse);
michael@0 618 }
michael@0 619 }
michael@0 620
michael@0 621 break;
michael@0 622 case 1:
michael@0 623 this_mv.as_mv.col += 2;
michael@0 624
michael@0 625 if (startmv.as_mv.row & 7)
michael@0 626 {
michael@0 627 this_mv.as_mv.row -= 2;
michael@0 628 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 629 }
michael@0 630 else
michael@0 631 {
michael@0 632 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
michael@0 633 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
michael@0 634 }
michael@0 635
michael@0 636 break;
michael@0 637 case 2:
michael@0 638 this_mv.as_mv.row += 2;
michael@0 639
michael@0 640 if (startmv.as_mv.col & 7)
michael@0 641 {
michael@0 642 this_mv.as_mv.col -= 2;
michael@0 643 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 644 }
michael@0 645 else
michael@0 646 {
michael@0 647 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
michael@0 648 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 649 }
michael@0 650
michael@0 651 break;
michael@0 652 case 3:
michael@0 653 this_mv.as_mv.col += 2;
michael@0 654 this_mv.as_mv.row += 2;
michael@0 655 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
michael@0 656 break;
michael@0 657 }
michael@0 658
michael@0 659 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 660
michael@0 661 if (diag < bestmse)
michael@0 662 {
michael@0 663 *bestmv = this_mv;
michael@0 664 bestmse = diag;
michael@0 665 *distortion = thismse;
michael@0 666 *sse1 = sse;
michael@0 667 }
michael@0 668
michael@0 669 return bestmse;
michael@0 670 }
michael@0 671
michael@0 672 int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
michael@0 673 int_mv *bestmv, int_mv *ref_mv,
michael@0 674 int error_per_bit,
michael@0 675 const vp8_variance_fn_ptr_t *vfp,
michael@0 676 int *mvcost[2], int *distortion,
michael@0 677 unsigned int *sse1)
michael@0 678 {
michael@0 679 int bestmse = INT_MAX;
michael@0 680 int_mv startmv;
michael@0 681 int_mv this_mv;
michael@0 682 unsigned char *z = (*(b->base_src) + b->src);
michael@0 683 int left, right, up, down, diag;
michael@0 684 unsigned int sse;
michael@0 685 int whichdir ;
michael@0 686 int thismse;
michael@0 687 int y_stride;
michael@0 688 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 689 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 690
michael@0 691 #if ARCH_X86 || ARCH_X86_64
michael@0 692 MACROBLOCKD *xd = &x->e_mbd;
michael@0 693 unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
michael@0 694 unsigned char *y;
michael@0 695
michael@0 696 y_stride = 32;
michael@0 697 /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
michael@0 698 vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
michael@0 699 y = xd->y_buf + y_stride + 1;
michael@0 700 #else
michael@0 701 unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
michael@0 702 y_stride = pre_stride;
michael@0 703 #endif
michael@0 704
michael@0 705 /* central mv */
michael@0 706 bestmv->as_mv.row *= 8;
michael@0 707 bestmv->as_mv.col *= 8;
michael@0 708 startmv = *bestmv;
michael@0 709
michael@0 710 /* calculate central point error */
michael@0 711 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
michael@0 712 *distortion = bestmse;
michael@0 713 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
michael@0 714
michael@0 715 /* go left then right and check error */
michael@0 716 this_mv.as_mv.row = startmv.as_mv.row;
michael@0 717 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
michael@0 718 thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
michael@0 719 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 720
michael@0 721 if (left < bestmse)
michael@0 722 {
michael@0 723 *bestmv = this_mv;
michael@0 724 bestmse = left;
michael@0 725 *distortion = thismse;
michael@0 726 *sse1 = sse;
michael@0 727 }
michael@0 728
michael@0 729 this_mv.as_mv.col += 8;
michael@0 730 thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
michael@0 731 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 732
michael@0 733 if (right < bestmse)
michael@0 734 {
michael@0 735 *bestmv = this_mv;
michael@0 736 bestmse = right;
michael@0 737 *distortion = thismse;
michael@0 738 *sse1 = sse;
michael@0 739 }
michael@0 740
michael@0 741 /* go up then down and check error */
michael@0 742 this_mv.as_mv.col = startmv.as_mv.col;
michael@0 743 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
michael@0 744 thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
michael@0 745 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 746
michael@0 747 if (up < bestmse)
michael@0 748 {
michael@0 749 *bestmv = this_mv;
michael@0 750 bestmse = up;
michael@0 751 *distortion = thismse;
michael@0 752 *sse1 = sse;
michael@0 753 }
michael@0 754
michael@0 755 this_mv.as_mv.row += 8;
michael@0 756 thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
michael@0 757 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 758
michael@0 759 if (down < bestmse)
michael@0 760 {
michael@0 761 *bestmv = this_mv;
michael@0 762 bestmse = down;
michael@0 763 *distortion = thismse;
michael@0 764 *sse1 = sse;
michael@0 765 }
michael@0 766
michael@0 767 /* now check 1 more diagonal - */
michael@0 768 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
michael@0 769 this_mv = startmv;
michael@0 770
michael@0 771 switch (whichdir)
michael@0 772 {
michael@0 773 case 0:
michael@0 774 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
michael@0 775 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
michael@0 776 thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
michael@0 777 break;
michael@0 778 case 1:
michael@0 779 this_mv.as_mv.col += 4;
michael@0 780 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
michael@0 781 thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
michael@0 782 break;
michael@0 783 case 2:
michael@0 784 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
michael@0 785 this_mv.as_mv.row += 4;
michael@0 786 thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
michael@0 787 break;
michael@0 788 case 3:
michael@0 789 default:
michael@0 790 this_mv.as_mv.col += 4;
michael@0 791 this_mv.as_mv.row += 4;
michael@0 792 thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
michael@0 793 break;
michael@0 794 }
michael@0 795
michael@0 796 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
michael@0 797
michael@0 798 if (diag < bestmse)
michael@0 799 {
michael@0 800 *bestmv = this_mv;
michael@0 801 bestmse = diag;
michael@0 802 *distortion = thismse;
michael@0 803 *sse1 = sse;
michael@0 804 }
michael@0 805
michael@0 806 return bestmse;
michael@0 807 }
michael@0 808
michael@0 809 #define CHECK_BOUNDS(range) \
michael@0 810 {\
michael@0 811 all_in = 1;\
michael@0 812 all_in &= ((br-range) >= x->mv_row_min);\
michael@0 813 all_in &= ((br+range) <= x->mv_row_max);\
michael@0 814 all_in &= ((bc-range) >= x->mv_col_min);\
michael@0 815 all_in &= ((bc+range) <= x->mv_col_max);\
michael@0 816 }
michael@0 817
michael@0 818 #define CHECK_POINT \
michael@0 819 {\
michael@0 820 if (this_mv.as_mv.col < x->mv_col_min) continue;\
michael@0 821 if (this_mv.as_mv.col > x->mv_col_max) continue;\
michael@0 822 if (this_mv.as_mv.row < x->mv_row_min) continue;\
michael@0 823 if (this_mv.as_mv.row > x->mv_row_max) continue;\
michael@0 824 }
michael@0 825
michael@0 826 #define CHECK_BETTER \
michael@0 827 {\
michael@0 828 if (thissad < bestsad)\
michael@0 829 {\
michael@0 830 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
michael@0 831 if (thissad < bestsad)\
michael@0 832 {\
michael@0 833 bestsad = thissad;\
michael@0 834 best_site = i;\
michael@0 835 }\
michael@0 836 }\
michael@0 837 }
michael@0 838
michael@0 839 static const MV next_chkpts[6][3] =
michael@0 840 {
michael@0 841 {{ -2, 0}, { -1, -2}, {1, -2}},
michael@0 842 {{ -1, -2}, {1, -2}, {2, 0}},
michael@0 843 {{1, -2}, {2, 0}, {1, 2}},
michael@0 844 {{2, 0}, {1, 2}, { -1, 2}},
michael@0 845 {{1, 2}, { -1, 2}, { -2, 0}},
michael@0 846 {{ -1, 2}, { -2, 0}, { -1, -2}}
michael@0 847 };
michael@0 848
michael@0 849 int vp8_hex_search
michael@0 850 (
michael@0 851 MACROBLOCK *x,
michael@0 852 BLOCK *b,
michael@0 853 BLOCKD *d,
michael@0 854 int_mv *ref_mv,
michael@0 855 int_mv *best_mv,
michael@0 856 int search_param,
michael@0 857 int sad_per_bit,
michael@0 858 const vp8_variance_fn_ptr_t *vfp,
michael@0 859 int *mvsadcost[2],
michael@0 860 int *mvcost[2],
michael@0 861 int_mv *center_mv
michael@0 862 )
michael@0 863 {
michael@0 864 MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
michael@0 865 MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}} ;
michael@0 866 int i, j;
michael@0 867
michael@0 868 unsigned char *what = (*(b->base_src) + b->src);
michael@0 869 int what_stride = b->src_stride;
michael@0 870 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 871 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 872
michael@0 873 int in_what_stride = pre_stride;
michael@0 874 int br, bc;
michael@0 875 int_mv this_mv;
michael@0 876 unsigned int bestsad;
michael@0 877 unsigned int thissad;
michael@0 878 unsigned char *base_offset;
michael@0 879 unsigned char *this_offset;
michael@0 880 int k = -1;
michael@0 881 int all_in;
michael@0 882 int best_site = -1;
michael@0 883 int hex_range = 127;
michael@0 884 int dia_range = 8;
michael@0 885
michael@0 886 int_mv fcenter_mv;
michael@0 887 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 888 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 889
michael@0 890 /* adjust ref_mv to make sure it is within MV range */
michael@0 891 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
michael@0 892 br = ref_mv->as_mv.row;
michael@0 893 bc = ref_mv->as_mv.col;
michael@0 894
michael@0 895 /* Work out the start point for the search */
michael@0 896 base_offset = (unsigned char *)(base_pre + d->offset);
michael@0 897 this_offset = base_offset + (br * (pre_stride)) + bc;
michael@0 898 this_mv.as_mv.row = br;
michael@0 899 this_mv.as_mv.col = bc;
michael@0 900 bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride, UINT_MAX)
michael@0 901 + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
michael@0 902
michael@0 903 #if CONFIG_MULTI_RES_ENCODING
michael@0 904 /* Lower search range based on prediction info */
michael@0 905 if (search_param >= 6) goto cal_neighbors;
michael@0 906 else if (search_param >= 5) hex_range = 4;
michael@0 907 else if (search_param >= 4) hex_range = 6;
michael@0 908 else if (search_param >= 3) hex_range = 15;
michael@0 909 else if (search_param >= 2) hex_range = 31;
michael@0 910 else if (search_param >= 1) hex_range = 63;
michael@0 911
michael@0 912 dia_range = 8;
michael@0 913 #endif
michael@0 914
michael@0 915 /* hex search */
michael@0 916 CHECK_BOUNDS(2)
michael@0 917
michael@0 918 if(all_in)
michael@0 919 {
michael@0 920 for (i = 0; i < 6; i++)
michael@0 921 {
michael@0 922 this_mv.as_mv.row = br + hex[i].row;
michael@0 923 this_mv.as_mv.col = bc + hex[i].col;
michael@0 924 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
michael@0 925 thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
michael@0 926 CHECK_BETTER
michael@0 927 }
michael@0 928 }else
michael@0 929 {
michael@0 930 for (i = 0; i < 6; i++)
michael@0 931 {
michael@0 932 this_mv.as_mv.row = br + hex[i].row;
michael@0 933 this_mv.as_mv.col = bc + hex[i].col;
michael@0 934 CHECK_POINT
michael@0 935 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
michael@0 936 thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
michael@0 937 CHECK_BETTER
michael@0 938 }
michael@0 939 }
michael@0 940
michael@0 941 if (best_site == -1)
michael@0 942 goto cal_neighbors;
michael@0 943 else
michael@0 944 {
michael@0 945 br += hex[best_site].row;
michael@0 946 bc += hex[best_site].col;
michael@0 947 k = best_site;
michael@0 948 }
michael@0 949
michael@0 950 for (j = 1; j < hex_range; j++)
michael@0 951 {
michael@0 952 best_site = -1;
michael@0 953 CHECK_BOUNDS(2)
michael@0 954
michael@0 955 if(all_in)
michael@0 956 {
michael@0 957 for (i = 0; i < 3; i++)
michael@0 958 {
michael@0 959 this_mv.as_mv.row = br + next_chkpts[k][i].row;
michael@0 960 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
michael@0 961 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
michael@0 962 thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
michael@0 963 CHECK_BETTER
michael@0 964 }
michael@0 965 }else
michael@0 966 {
michael@0 967 for (i = 0; i < 3; i++)
michael@0 968 {
michael@0 969 this_mv.as_mv.row = br + next_chkpts[k][i].row;
michael@0 970 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
michael@0 971 CHECK_POINT
michael@0 972 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
michael@0 973 thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
michael@0 974 CHECK_BETTER
michael@0 975 }
michael@0 976 }
michael@0 977
michael@0 978 if (best_site == -1)
michael@0 979 break;
michael@0 980 else
michael@0 981 {
michael@0 982 br += next_chkpts[k][best_site].row;
michael@0 983 bc += next_chkpts[k][best_site].col;
michael@0 984 k += 5 + best_site;
michael@0 985 if (k >= 12) k -= 12;
michael@0 986 else if (k >= 6) k -= 6;
michael@0 987 }
michael@0 988 }
michael@0 989
michael@0 990 /* check 4 1-away neighbors */
michael@0 991 cal_neighbors:
michael@0 992 for (j = 0; j < dia_range; j++)
michael@0 993 {
michael@0 994 best_site = -1;
michael@0 995 CHECK_BOUNDS(1)
michael@0 996
michael@0 997 if(all_in)
michael@0 998 {
michael@0 999 for (i = 0; i < 4; i++)
michael@0 1000 {
michael@0 1001 this_mv.as_mv.row = br + neighbors[i].row;
michael@0 1002 this_mv.as_mv.col = bc + neighbors[i].col;
michael@0 1003 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
michael@0 1004 thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
michael@0 1005 CHECK_BETTER
michael@0 1006 }
michael@0 1007 }else
michael@0 1008 {
michael@0 1009 for (i = 0; i < 4; i++)
michael@0 1010 {
michael@0 1011 this_mv.as_mv.row = br + neighbors[i].row;
michael@0 1012 this_mv.as_mv.col = bc + neighbors[i].col;
michael@0 1013 CHECK_POINT
michael@0 1014 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
michael@0 1015 thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
michael@0 1016 CHECK_BETTER
michael@0 1017 }
michael@0 1018 }
michael@0 1019
michael@0 1020 if (best_site == -1)
michael@0 1021 break;
michael@0 1022 else
michael@0 1023 {
michael@0 1024 br += neighbors[best_site].row;
michael@0 1025 bc += neighbors[best_site].col;
michael@0 1026 }
michael@0 1027 }
michael@0 1028
michael@0 1029 best_mv->as_mv.row = br;
michael@0 1030 best_mv->as_mv.col = bc;
michael@0 1031
michael@0 1032 return bestsad;
michael@0 1033 }
michael@0 1034 #undef CHECK_BOUNDS
michael@0 1035 #undef CHECK_POINT
michael@0 1036 #undef CHECK_BETTER
michael@0 1037
michael@0 1038 int vp8_diamond_search_sad_c
michael@0 1039 (
michael@0 1040 MACROBLOCK *x,
michael@0 1041 BLOCK *b,
michael@0 1042 BLOCKD *d,
michael@0 1043 int_mv *ref_mv,
michael@0 1044 int_mv *best_mv,
michael@0 1045 int search_param,
michael@0 1046 int sad_per_bit,
michael@0 1047 int *num00,
michael@0 1048 vp8_variance_fn_ptr_t *fn_ptr,
michael@0 1049 int *mvcost[2],
michael@0 1050 int_mv *center_mv
michael@0 1051 )
michael@0 1052 {
michael@0 1053 int i, j, step;
michael@0 1054
michael@0 1055 unsigned char *what = (*(b->base_src) + b->src);
michael@0 1056 int what_stride = b->src_stride;
michael@0 1057 unsigned char *in_what;
michael@0 1058 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 1059 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 1060 int in_what_stride = pre_stride;
michael@0 1061 unsigned char *best_address;
michael@0 1062
michael@0 1063 int tot_steps;
michael@0 1064 int_mv this_mv;
michael@0 1065
michael@0 1066 unsigned int bestsad;
michael@0 1067 unsigned int thissad;
michael@0 1068 int best_site = 0;
michael@0 1069 int last_site = 0;
michael@0 1070
michael@0 1071 int ref_row;
michael@0 1072 int ref_col;
michael@0 1073 int this_row_offset;
michael@0 1074 int this_col_offset;
michael@0 1075 search_site *ss;
michael@0 1076
michael@0 1077 unsigned char *check_here;
michael@0 1078
michael@0 1079 int *mvsadcost[2];
michael@0 1080 int_mv fcenter_mv;
michael@0 1081
michael@0 1082 mvsadcost[0] = x->mvsadcost[0];
michael@0 1083 mvsadcost[1] = x->mvsadcost[1];
michael@0 1084 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 1085 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 1086
michael@0 1087 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
michael@0 1088 ref_row = ref_mv->as_mv.row;
michael@0 1089 ref_col = ref_mv->as_mv.col;
michael@0 1090 *num00 = 0;
michael@0 1091 best_mv->as_mv.row = ref_row;
michael@0 1092 best_mv->as_mv.col = ref_col;
michael@0 1093
michael@0 1094 /* Work out the start point for the search */
michael@0 1095 in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
michael@0 1096 best_address = in_what;
michael@0 1097
michael@0 1098 /* Check the starting position */
michael@0 1099 bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, UINT_MAX)
michael@0 1100 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
michael@0 1101
michael@0 1102 /* search_param determines the length of the initial step and hence
michael@0 1103 * the number of iterations 0 = initial step (MAX_FIRST_STEP) pel :
michael@0 1104 * 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
michael@0 1105 */
michael@0 1106 ss = &x->ss[search_param * x->searches_per_step];
michael@0 1107 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
michael@0 1108
michael@0 1109 i = 1;
michael@0 1110
michael@0 1111 for (step = 0; step < tot_steps ; step++)
michael@0 1112 {
michael@0 1113 for (j = 0 ; j < x->searches_per_step ; j++)
michael@0 1114 {
michael@0 1115 /* Trap illegal vectors */
michael@0 1116 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
michael@0 1117 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
michael@0 1118
michael@0 1119 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
michael@0 1120 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
michael@0 1121
michael@0 1122 {
michael@0 1123 check_here = ss[i].offset + best_address;
michael@0 1124 thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
michael@0 1125
michael@0 1126 if (thissad < bestsad)
michael@0 1127 {
michael@0 1128 this_mv.as_mv.row = this_row_offset;
michael@0 1129 this_mv.as_mv.col = this_col_offset;
michael@0 1130 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1131 mvsadcost, sad_per_bit);
michael@0 1132
michael@0 1133 if (thissad < bestsad)
michael@0 1134 {
michael@0 1135 bestsad = thissad;
michael@0 1136 best_site = i;
michael@0 1137 }
michael@0 1138 }
michael@0 1139 }
michael@0 1140
michael@0 1141 i++;
michael@0 1142 }
michael@0 1143
michael@0 1144 if (best_site != last_site)
michael@0 1145 {
michael@0 1146 best_mv->as_mv.row += ss[best_site].mv.row;
michael@0 1147 best_mv->as_mv.col += ss[best_site].mv.col;
michael@0 1148 best_address += ss[best_site].offset;
michael@0 1149 last_site = best_site;
michael@0 1150 }
michael@0 1151 else if (best_address == in_what)
michael@0 1152 (*num00)++;
michael@0 1153 }
michael@0 1154
michael@0 1155 this_mv.as_mv.row = best_mv->as_mv.row << 3;
michael@0 1156 this_mv.as_mv.col = best_mv->as_mv.col << 3;
michael@0 1157
michael@0 1158 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad)
michael@0 1159 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
michael@0 1160 }
michael@0 1161
michael@0 1162 int vp8_diamond_search_sadx4
michael@0 1163 (
michael@0 1164 MACROBLOCK *x,
michael@0 1165 BLOCK *b,
michael@0 1166 BLOCKD *d,
michael@0 1167 int_mv *ref_mv,
michael@0 1168 int_mv *best_mv,
michael@0 1169 int search_param,
michael@0 1170 int sad_per_bit,
michael@0 1171 int *num00,
michael@0 1172 vp8_variance_fn_ptr_t *fn_ptr,
michael@0 1173 int *mvcost[2],
michael@0 1174 int_mv *center_mv
michael@0 1175 )
michael@0 1176 {
michael@0 1177 int i, j, step;
michael@0 1178
michael@0 1179 unsigned char *what = (*(b->base_src) + b->src);
michael@0 1180 int what_stride = b->src_stride;
michael@0 1181 unsigned char *in_what;
michael@0 1182 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 1183 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 1184 int in_what_stride = pre_stride;
michael@0 1185 unsigned char *best_address;
michael@0 1186
michael@0 1187 int tot_steps;
michael@0 1188 int_mv this_mv;
michael@0 1189
michael@0 1190 unsigned int bestsad;
michael@0 1191 unsigned int thissad;
michael@0 1192 int best_site = 0;
michael@0 1193 int last_site = 0;
michael@0 1194
michael@0 1195 int ref_row;
michael@0 1196 int ref_col;
michael@0 1197 int this_row_offset;
michael@0 1198 int this_col_offset;
michael@0 1199 search_site *ss;
michael@0 1200
michael@0 1201 unsigned char *check_here;
michael@0 1202
michael@0 1203 int *mvsadcost[2];
michael@0 1204 int_mv fcenter_mv;
michael@0 1205
michael@0 1206 mvsadcost[0] = x->mvsadcost[0];
michael@0 1207 mvsadcost[1] = x->mvsadcost[1];
michael@0 1208 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 1209 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 1210
michael@0 1211 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
michael@0 1212 ref_row = ref_mv->as_mv.row;
michael@0 1213 ref_col = ref_mv->as_mv.col;
michael@0 1214 *num00 = 0;
michael@0 1215 best_mv->as_mv.row = ref_row;
michael@0 1216 best_mv->as_mv.col = ref_col;
michael@0 1217
michael@0 1218 /* Work out the start point for the search */
michael@0 1219 in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col);
michael@0 1220 best_address = in_what;
michael@0 1221
michael@0 1222 /* Check the starting position */
michael@0 1223 bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, UINT_MAX)
michael@0 1224 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
michael@0 1225
michael@0 1226 /* search_param determines the length of the initial step and hence the
michael@0 1227 * number of iterations 0 = initial step (MAX_FIRST_STEP) pel : 1 =
michael@0 1228 * (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
michael@0 1229 */
michael@0 1230 ss = &x->ss[search_param * x->searches_per_step];
michael@0 1231 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
michael@0 1232
michael@0 1233 i = 1;
michael@0 1234
michael@0 1235 for (step = 0; step < tot_steps ; step++)
michael@0 1236 {
michael@0 1237 int all_in = 1, t;
michael@0 1238
michael@0 1239 /* To know if all neighbor points are within the bounds, 4 bounds
michael@0 1240 * checking are enough instead of checking 4 bounds for each
michael@0 1241 * points.
michael@0 1242 */
michael@0 1243 all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
michael@0 1244 all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
michael@0 1245 all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
michael@0 1246 all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max);
michael@0 1247
michael@0 1248 if (all_in)
michael@0 1249 {
michael@0 1250 unsigned int sad_array[4];
michael@0 1251
michael@0 1252 for (j = 0 ; j < x->searches_per_step ; j += 4)
michael@0 1253 {
michael@0 1254 const unsigned char *block_offset[4];
michael@0 1255
michael@0 1256 for (t = 0; t < 4; t++)
michael@0 1257 block_offset[t] = ss[i+t].offset + best_address;
michael@0 1258
michael@0 1259 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
michael@0 1260
michael@0 1261 for (t = 0; t < 4; t++, i++)
michael@0 1262 {
michael@0 1263 if (sad_array[t] < bestsad)
michael@0 1264 {
michael@0 1265 this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
michael@0 1266 this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
michael@0 1267 sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1268 mvsadcost, sad_per_bit);
michael@0 1269
michael@0 1270 if (sad_array[t] < bestsad)
michael@0 1271 {
michael@0 1272 bestsad = sad_array[t];
michael@0 1273 best_site = i;
michael@0 1274 }
michael@0 1275 }
michael@0 1276 }
michael@0 1277 }
michael@0 1278 }
michael@0 1279 else
michael@0 1280 {
michael@0 1281 for (j = 0 ; j < x->searches_per_step ; j++)
michael@0 1282 {
michael@0 1283 /* Trap illegal vectors */
michael@0 1284 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
michael@0 1285 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
michael@0 1286
michael@0 1287 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
michael@0 1288 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
michael@0 1289 {
michael@0 1290 check_here = ss[i].offset + best_address;
michael@0 1291 thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
michael@0 1292
michael@0 1293 if (thissad < bestsad)
michael@0 1294 {
michael@0 1295 this_mv.as_mv.row = this_row_offset;
michael@0 1296 this_mv.as_mv.col = this_col_offset;
michael@0 1297 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1298 mvsadcost, sad_per_bit);
michael@0 1299
michael@0 1300 if (thissad < bestsad)
michael@0 1301 {
michael@0 1302 bestsad = thissad;
michael@0 1303 best_site = i;
michael@0 1304 }
michael@0 1305 }
michael@0 1306 }
michael@0 1307 i++;
michael@0 1308 }
michael@0 1309 }
michael@0 1310
michael@0 1311 if (best_site != last_site)
michael@0 1312 {
michael@0 1313 best_mv->as_mv.row += ss[best_site].mv.row;
michael@0 1314 best_mv->as_mv.col += ss[best_site].mv.col;
michael@0 1315 best_address += ss[best_site].offset;
michael@0 1316 last_site = best_site;
michael@0 1317 }
michael@0 1318 else if (best_address == in_what)
michael@0 1319 (*num00)++;
michael@0 1320 }
michael@0 1321
michael@0 1322 this_mv.as_mv.row = best_mv->as_mv.row * 8;
michael@0 1323 this_mv.as_mv.col = best_mv->as_mv.col * 8;
michael@0 1324
michael@0 1325 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad)
michael@0 1326 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
michael@0 1327 }
michael@0 1328
michael@0 1329 int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
michael@0 1330 int sad_per_bit, int distance,
michael@0 1331 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
michael@0 1332 int_mv *center_mv)
michael@0 1333 {
michael@0 1334 unsigned char *what = (*(b->base_src) + b->src);
michael@0 1335 int what_stride = b->src_stride;
michael@0 1336 unsigned char *in_what;
michael@0 1337 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 1338 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 1339 int in_what_stride = pre_stride;
michael@0 1340 int mv_stride = pre_stride;
michael@0 1341 unsigned char *bestaddress;
michael@0 1342 int_mv *best_mv = &d->bmi.mv;
michael@0 1343 int_mv this_mv;
michael@0 1344 unsigned int bestsad;
michael@0 1345 unsigned int thissad;
michael@0 1346 int r, c;
michael@0 1347
michael@0 1348 unsigned char *check_here;
michael@0 1349
michael@0 1350 int ref_row = ref_mv->as_mv.row;
michael@0 1351 int ref_col = ref_mv->as_mv.col;
michael@0 1352
michael@0 1353 int row_min = ref_row - distance;
michael@0 1354 int row_max = ref_row + distance;
michael@0 1355 int col_min = ref_col - distance;
michael@0 1356 int col_max = ref_col + distance;
michael@0 1357
michael@0 1358 int *mvsadcost[2];
michael@0 1359 int_mv fcenter_mv;
michael@0 1360
michael@0 1361 mvsadcost[0] = x->mvsadcost[0];
michael@0 1362 mvsadcost[1] = x->mvsadcost[1];
michael@0 1363 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 1364 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 1365
michael@0 1366 /* Work out the mid point for the search */
michael@0 1367 in_what = base_pre + d->offset;
michael@0 1368 bestaddress = in_what + (ref_row * pre_stride) + ref_col;
michael@0 1369
michael@0 1370 best_mv->as_mv.row = ref_row;
michael@0 1371 best_mv->as_mv.col = ref_col;
michael@0 1372
michael@0 1373 /* Baseline value at the centre */
michael@0 1374 bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
michael@0 1375 in_what_stride, UINT_MAX)
michael@0 1376 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
michael@0 1377
michael@0 1378 /* Apply further limits to prevent us looking using vectors that
michael@0 1379 * stretch beyiond the UMV border
michael@0 1380 */
michael@0 1381 if (col_min < x->mv_col_min)
michael@0 1382 col_min = x->mv_col_min;
michael@0 1383
michael@0 1384 if (col_max > x->mv_col_max)
michael@0 1385 col_max = x->mv_col_max;
michael@0 1386
michael@0 1387 if (row_min < x->mv_row_min)
michael@0 1388 row_min = x->mv_row_min;
michael@0 1389
michael@0 1390 if (row_max > x->mv_row_max)
michael@0 1391 row_max = x->mv_row_max;
michael@0 1392
michael@0 1393 for (r = row_min; r < row_max ; r++)
michael@0 1394 {
michael@0 1395 this_mv.as_mv.row = r;
michael@0 1396 check_here = r * mv_stride + in_what + col_min;
michael@0 1397
michael@0 1398 for (c = col_min; c < col_max; c++)
michael@0 1399 {
michael@0 1400 thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
michael@0 1401
michael@0 1402 this_mv.as_mv.col = c;
michael@0 1403 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1404 mvsadcost, sad_per_bit);
michael@0 1405
michael@0 1406 if (thissad < bestsad)
michael@0 1407 {
michael@0 1408 bestsad = thissad;
michael@0 1409 best_mv->as_mv.row = r;
michael@0 1410 best_mv->as_mv.col = c;
michael@0 1411 bestaddress = check_here;
michael@0 1412 }
michael@0 1413
michael@0 1414 check_here++;
michael@0 1415 }
michael@0 1416 }
michael@0 1417
michael@0 1418 this_mv.as_mv.row = best_mv->as_mv.row << 3;
michael@0 1419 this_mv.as_mv.col = best_mv->as_mv.col << 3;
michael@0 1420
michael@0 1421 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad)
michael@0 1422 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
michael@0 1423 }
michael@0 1424
michael@0 1425 int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
michael@0 1426 int sad_per_bit, int distance,
michael@0 1427 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
michael@0 1428 int_mv *center_mv)
michael@0 1429 {
michael@0 1430 unsigned char *what = (*(b->base_src) + b->src);
michael@0 1431 int what_stride = b->src_stride;
michael@0 1432 unsigned char *in_what;
michael@0 1433 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 1434 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 1435 int in_what_stride = pre_stride;
michael@0 1436 int mv_stride = pre_stride;
michael@0 1437 unsigned char *bestaddress;
michael@0 1438 int_mv *best_mv = &d->bmi.mv;
michael@0 1439 int_mv this_mv;
michael@0 1440 unsigned int bestsad;
michael@0 1441 unsigned int thissad;
michael@0 1442 int r, c;
michael@0 1443
michael@0 1444 unsigned char *check_here;
michael@0 1445
michael@0 1446 int ref_row = ref_mv->as_mv.row;
michael@0 1447 int ref_col = ref_mv->as_mv.col;
michael@0 1448
michael@0 1449 int row_min = ref_row - distance;
michael@0 1450 int row_max = ref_row + distance;
michael@0 1451 int col_min = ref_col - distance;
michael@0 1452 int col_max = ref_col + distance;
michael@0 1453
michael@0 1454 unsigned int sad_array[3];
michael@0 1455
michael@0 1456 int *mvsadcost[2];
michael@0 1457 int_mv fcenter_mv;
michael@0 1458
michael@0 1459 mvsadcost[0] = x->mvsadcost[0];
michael@0 1460 mvsadcost[1] = x->mvsadcost[1];
michael@0 1461 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 1462 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 1463
michael@0 1464 /* Work out the mid point for the search */
michael@0 1465 in_what = base_pre + d->offset;
michael@0 1466 bestaddress = in_what + (ref_row * pre_stride) + ref_col;
michael@0 1467
michael@0 1468 best_mv->as_mv.row = ref_row;
michael@0 1469 best_mv->as_mv.col = ref_col;
michael@0 1470
michael@0 1471 /* Baseline value at the centre */
michael@0 1472 bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
michael@0 1473 in_what_stride, UINT_MAX)
michael@0 1474 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
michael@0 1475
michael@0 1476 /* Apply further limits to prevent us looking using vectors that stretch
michael@0 1477 * beyond the UMV border
michael@0 1478 */
michael@0 1479 if (col_min < x->mv_col_min)
michael@0 1480 col_min = x->mv_col_min;
michael@0 1481
michael@0 1482 if (col_max > x->mv_col_max)
michael@0 1483 col_max = x->mv_col_max;
michael@0 1484
michael@0 1485 if (row_min < x->mv_row_min)
michael@0 1486 row_min = x->mv_row_min;
michael@0 1487
michael@0 1488 if (row_max > x->mv_row_max)
michael@0 1489 row_max = x->mv_row_max;
michael@0 1490
michael@0 1491 for (r = row_min; r < row_max ; r++)
michael@0 1492 {
michael@0 1493 this_mv.as_mv.row = r;
michael@0 1494 check_here = r * mv_stride + in_what + col_min;
michael@0 1495 c = col_min;
michael@0 1496
michael@0 1497 while ((c + 2) < col_max)
michael@0 1498 {
michael@0 1499 int i;
michael@0 1500
michael@0 1501 fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
michael@0 1502
michael@0 1503 for (i = 0; i < 3; i++)
michael@0 1504 {
michael@0 1505 thissad = sad_array[i];
michael@0 1506
michael@0 1507 if (thissad < bestsad)
michael@0 1508 {
michael@0 1509 this_mv.as_mv.col = c;
michael@0 1510 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1511 mvsadcost, sad_per_bit);
michael@0 1512
michael@0 1513 if (thissad < bestsad)
michael@0 1514 {
michael@0 1515 bestsad = thissad;
michael@0 1516 best_mv->as_mv.row = r;
michael@0 1517 best_mv->as_mv.col = c;
michael@0 1518 bestaddress = check_here;
michael@0 1519 }
michael@0 1520 }
michael@0 1521
michael@0 1522 check_here++;
michael@0 1523 c++;
michael@0 1524 }
michael@0 1525 }
michael@0 1526
michael@0 1527 while (c < col_max)
michael@0 1528 {
michael@0 1529 thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
michael@0 1530
michael@0 1531 if (thissad < bestsad)
michael@0 1532 {
michael@0 1533 this_mv.as_mv.col = c;
michael@0 1534 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1535 mvsadcost, sad_per_bit);
michael@0 1536
michael@0 1537 if (thissad < bestsad)
michael@0 1538 {
michael@0 1539 bestsad = thissad;
michael@0 1540 best_mv->as_mv.row = r;
michael@0 1541 best_mv->as_mv.col = c;
michael@0 1542 bestaddress = check_here;
michael@0 1543 }
michael@0 1544 }
michael@0 1545
michael@0 1546 check_here ++;
michael@0 1547 c ++;
michael@0 1548 }
michael@0 1549
michael@0 1550 }
michael@0 1551
michael@0 1552 this_mv.as_mv.row = best_mv->as_mv.row << 3;
michael@0 1553 this_mv.as_mv.col = best_mv->as_mv.col << 3;
michael@0 1554
michael@0 1555 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad)
michael@0 1556 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
michael@0 1557 }
michael@0 1558
michael@0 1559 int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
michael@0 1560 int sad_per_bit, int distance,
michael@0 1561 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
michael@0 1562 int_mv *center_mv)
michael@0 1563 {
michael@0 1564 unsigned char *what = (*(b->base_src) + b->src);
michael@0 1565 int what_stride = b->src_stride;
michael@0 1566 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 1567 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 1568 unsigned char *in_what;
michael@0 1569 int in_what_stride = pre_stride;
michael@0 1570 int mv_stride = pre_stride;
michael@0 1571 unsigned char *bestaddress;
michael@0 1572 int_mv *best_mv = &d->bmi.mv;
michael@0 1573 int_mv this_mv;
michael@0 1574 unsigned int bestsad;
michael@0 1575 unsigned int thissad;
michael@0 1576 int r, c;
michael@0 1577
michael@0 1578 unsigned char *check_here;
michael@0 1579
michael@0 1580 int ref_row = ref_mv->as_mv.row;
michael@0 1581 int ref_col = ref_mv->as_mv.col;
michael@0 1582
michael@0 1583 int row_min = ref_row - distance;
michael@0 1584 int row_max = ref_row + distance;
michael@0 1585 int col_min = ref_col - distance;
michael@0 1586 int col_max = ref_col + distance;
michael@0 1587
michael@0 1588 DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
michael@0 1589 unsigned int sad_array[3];
michael@0 1590
michael@0 1591 int *mvsadcost[2];
michael@0 1592 int_mv fcenter_mv;
michael@0 1593
michael@0 1594 mvsadcost[0] = x->mvsadcost[0];
michael@0 1595 mvsadcost[1] = x->mvsadcost[1];
michael@0 1596 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 1597 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 1598
michael@0 1599 /* Work out the mid point for the search */
michael@0 1600 in_what = base_pre + d->offset;
michael@0 1601 bestaddress = in_what + (ref_row * pre_stride) + ref_col;
michael@0 1602
michael@0 1603 best_mv->as_mv.row = ref_row;
michael@0 1604 best_mv->as_mv.col = ref_col;
michael@0 1605
michael@0 1606 /* Baseline value at the centre */
michael@0 1607 bestsad = fn_ptr->sdf(what, what_stride,
michael@0 1608 bestaddress, in_what_stride, UINT_MAX)
michael@0 1609 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
michael@0 1610
michael@0 1611 /* Apply further limits to prevent us looking using vectors that stretch
michael@0 1612 * beyond the UMV border
michael@0 1613 */
michael@0 1614 if (col_min < x->mv_col_min)
michael@0 1615 col_min = x->mv_col_min;
michael@0 1616
michael@0 1617 if (col_max > x->mv_col_max)
michael@0 1618 col_max = x->mv_col_max;
michael@0 1619
michael@0 1620 if (row_min < x->mv_row_min)
michael@0 1621 row_min = x->mv_row_min;
michael@0 1622
michael@0 1623 if (row_max > x->mv_row_max)
michael@0 1624 row_max = x->mv_row_max;
michael@0 1625
michael@0 1626 for (r = row_min; r < row_max ; r++)
michael@0 1627 {
michael@0 1628 this_mv.as_mv.row = r;
michael@0 1629 check_here = r * mv_stride + in_what + col_min;
michael@0 1630 c = col_min;
michael@0 1631
michael@0 1632 while ((c + 7) < col_max)
michael@0 1633 {
michael@0 1634 int i;
michael@0 1635
michael@0 1636 fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8);
michael@0 1637
michael@0 1638 for (i = 0; i < 8; i++)
michael@0 1639 {
michael@0 1640 thissad = sad_array8[i];
michael@0 1641
michael@0 1642 if (thissad < bestsad)
michael@0 1643 {
michael@0 1644 this_mv.as_mv.col = c;
michael@0 1645 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1646 mvsadcost, sad_per_bit);
michael@0 1647
michael@0 1648 if (thissad < bestsad)
michael@0 1649 {
michael@0 1650 bestsad = thissad;
michael@0 1651 best_mv->as_mv.row = r;
michael@0 1652 best_mv->as_mv.col = c;
michael@0 1653 bestaddress = check_here;
michael@0 1654 }
michael@0 1655 }
michael@0 1656
michael@0 1657 check_here++;
michael@0 1658 c++;
michael@0 1659 }
michael@0 1660 }
michael@0 1661
michael@0 1662 while ((c + 2) < col_max)
michael@0 1663 {
michael@0 1664 int i;
michael@0 1665
michael@0 1666 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
michael@0 1667
michael@0 1668 for (i = 0; i < 3; i++)
michael@0 1669 {
michael@0 1670 thissad = sad_array[i];
michael@0 1671
michael@0 1672 if (thissad < bestsad)
michael@0 1673 {
michael@0 1674 this_mv.as_mv.col = c;
michael@0 1675 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1676 mvsadcost, sad_per_bit);
michael@0 1677
michael@0 1678 if (thissad < bestsad)
michael@0 1679 {
michael@0 1680 bestsad = thissad;
michael@0 1681 best_mv->as_mv.row = r;
michael@0 1682 best_mv->as_mv.col = c;
michael@0 1683 bestaddress = check_here;
michael@0 1684 }
michael@0 1685 }
michael@0 1686
michael@0 1687 check_here++;
michael@0 1688 c++;
michael@0 1689 }
michael@0 1690 }
michael@0 1691
michael@0 1692 while (c < col_max)
michael@0 1693 {
michael@0 1694 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
michael@0 1695
michael@0 1696 if (thissad < bestsad)
michael@0 1697 {
michael@0 1698 this_mv.as_mv.col = c;
michael@0 1699 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
michael@0 1700 mvsadcost, sad_per_bit);
michael@0 1701
michael@0 1702 if (thissad < bestsad)
michael@0 1703 {
michael@0 1704 bestsad = thissad;
michael@0 1705 best_mv->as_mv.row = r;
michael@0 1706 best_mv->as_mv.col = c;
michael@0 1707 bestaddress = check_here;
michael@0 1708 }
michael@0 1709 }
michael@0 1710
michael@0 1711 check_here ++;
michael@0 1712 c ++;
michael@0 1713 }
michael@0 1714 }
michael@0 1715
michael@0 1716 this_mv.as_mv.row = best_mv->as_mv.row * 8;
michael@0 1717 this_mv.as_mv.col = best_mv->as_mv.col * 8;
michael@0 1718
michael@0 1719 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad)
michael@0 1720 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
michael@0 1721 }
michael@0 1722
michael@0 1723 int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
michael@0 1724 int error_per_bit, int search_range,
michael@0 1725 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
michael@0 1726 int_mv *center_mv)
michael@0 1727 {
michael@0 1728 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
michael@0 1729 int i, j;
michael@0 1730 short this_row_offset, this_col_offset;
michael@0 1731
michael@0 1732 int what_stride = b->src_stride;
michael@0 1733 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 1734 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 1735 int in_what_stride = pre_stride;
michael@0 1736 unsigned char *what = (*(b->base_src) + b->src);
michael@0 1737 unsigned char *best_address = (unsigned char *)(base_pre + d->offset +
michael@0 1738 (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
michael@0 1739 unsigned char *check_here;
michael@0 1740 int_mv this_mv;
michael@0 1741 unsigned int bestsad;
michael@0 1742 unsigned int thissad;
michael@0 1743
michael@0 1744 int *mvsadcost[2];
michael@0 1745 int_mv fcenter_mv;
michael@0 1746
michael@0 1747 mvsadcost[0] = x->mvsadcost[0];
michael@0 1748 mvsadcost[1] = x->mvsadcost[1];
michael@0 1749 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 1750 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 1751
michael@0 1752 bestsad = fn_ptr->sdf(what, what_stride, best_address,
michael@0 1753 in_what_stride, UINT_MAX)
michael@0 1754 + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
michael@0 1755
michael@0 1756 for (i=0; i<search_range; i++)
michael@0 1757 {
michael@0 1758 int best_site = -1;
michael@0 1759
michael@0 1760 for (j = 0 ; j < 4 ; j++)
michael@0 1761 {
michael@0 1762 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
michael@0 1763 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
michael@0 1764
michael@0 1765 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
michael@0 1766 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
michael@0 1767 {
michael@0 1768 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
michael@0 1769 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
michael@0 1770
michael@0 1771 if (thissad < bestsad)
michael@0 1772 {
michael@0 1773 this_mv.as_mv.row = this_row_offset;
michael@0 1774 this_mv.as_mv.col = this_col_offset;
michael@0 1775 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
michael@0 1776
michael@0 1777 if (thissad < bestsad)
michael@0 1778 {
michael@0 1779 bestsad = thissad;
michael@0 1780 best_site = j;
michael@0 1781 }
michael@0 1782 }
michael@0 1783 }
michael@0 1784 }
michael@0 1785
michael@0 1786 if (best_site == -1)
michael@0 1787 break;
michael@0 1788 else
michael@0 1789 {
michael@0 1790 ref_mv->as_mv.row += neighbors[best_site].row;
michael@0 1791 ref_mv->as_mv.col += neighbors[best_site].col;
michael@0 1792 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
michael@0 1793 }
michael@0 1794 }
michael@0 1795
michael@0 1796 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
michael@0 1797 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
michael@0 1798
michael@0 1799 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad)
michael@0 1800 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
michael@0 1801 }
michael@0 1802
michael@0 1803 int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
michael@0 1804 int_mv *ref_mv, int error_per_bit,
michael@0 1805 int search_range, vp8_variance_fn_ptr_t *fn_ptr,
michael@0 1806 int *mvcost[2], int_mv *center_mv)
michael@0 1807 {
michael@0 1808 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
michael@0 1809 int i, j;
michael@0 1810 short this_row_offset, this_col_offset;
michael@0 1811
michael@0 1812 int what_stride = b->src_stride;
michael@0 1813 int pre_stride = x->e_mbd.pre.y_stride;
michael@0 1814 unsigned char *base_pre = x->e_mbd.pre.y_buffer;
michael@0 1815 int in_what_stride = pre_stride;
michael@0 1816 unsigned char *what = (*(b->base_src) + b->src);
michael@0 1817 unsigned char *best_address = (unsigned char *)(base_pre + d->offset +
michael@0 1818 (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
michael@0 1819 unsigned char *check_here;
michael@0 1820 int_mv this_mv;
michael@0 1821 unsigned int bestsad;
michael@0 1822 unsigned int thissad;
michael@0 1823
michael@0 1824 int *mvsadcost[2];
michael@0 1825 int_mv fcenter_mv;
michael@0 1826
michael@0 1827 mvsadcost[0] = x->mvsadcost[0];
michael@0 1828 mvsadcost[1] = x->mvsadcost[1];
michael@0 1829 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
michael@0 1830 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
michael@0 1831
michael@0 1832 bestsad = fn_ptr->sdf(what, what_stride, best_address,
michael@0 1833 in_what_stride, UINT_MAX)
michael@0 1834 + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
michael@0 1835
michael@0 1836 for (i=0; i<search_range; i++)
michael@0 1837 {
michael@0 1838 int best_site = -1;
michael@0 1839 int all_in = 1;
michael@0 1840
michael@0 1841 all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
michael@0 1842 all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
michael@0 1843 all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
michael@0 1844 all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
michael@0 1845
michael@0 1846 if(all_in)
michael@0 1847 {
michael@0 1848 unsigned int sad_array[4];
michael@0 1849 const unsigned char *block_offset[4];
michael@0 1850 block_offset[0] = best_address - in_what_stride;
michael@0 1851 block_offset[1] = best_address - 1;
michael@0 1852 block_offset[2] = best_address + 1;
michael@0 1853 block_offset[3] = best_address + in_what_stride;
michael@0 1854
michael@0 1855 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
michael@0 1856
michael@0 1857 for (j = 0; j < 4; j++)
michael@0 1858 {
michael@0 1859 if (sad_array[j] < bestsad)
michael@0 1860 {
michael@0 1861 this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
michael@0 1862 this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
michael@0 1863 sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
michael@0 1864
michael@0 1865 if (sad_array[j] < bestsad)
michael@0 1866 {
michael@0 1867 bestsad = sad_array[j];
michael@0 1868 best_site = j;
michael@0 1869 }
michael@0 1870 }
michael@0 1871 }
michael@0 1872 }
michael@0 1873 else
michael@0 1874 {
michael@0 1875 for (j = 0 ; j < 4 ; j++)
michael@0 1876 {
michael@0 1877 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
michael@0 1878 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
michael@0 1879
michael@0 1880 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
michael@0 1881 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
michael@0 1882 {
michael@0 1883 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
michael@0 1884 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
michael@0 1885
michael@0 1886 if (thissad < bestsad)
michael@0 1887 {
michael@0 1888 this_mv.as_mv.row = this_row_offset;
michael@0 1889 this_mv.as_mv.col = this_col_offset;
michael@0 1890 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
michael@0 1891
michael@0 1892 if (thissad < bestsad)
michael@0 1893 {
michael@0 1894 bestsad = thissad;
michael@0 1895 best_site = j;
michael@0 1896 }
michael@0 1897 }
michael@0 1898 }
michael@0 1899 }
michael@0 1900 }
michael@0 1901
michael@0 1902 if (best_site == -1)
michael@0 1903 break;
michael@0 1904 else
michael@0 1905 {
michael@0 1906 ref_mv->as_mv.row += neighbors[best_site].row;
michael@0 1907 ref_mv->as_mv.col += neighbors[best_site].col;
michael@0 1908 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
michael@0 1909 }
michael@0 1910 }
michael@0 1911
michael@0 1912 this_mv.as_mv.row = ref_mv->as_mv.row * 8;
michael@0 1913 this_mv.as_mv.col = ref_mv->as_mv.col * 8;
michael@0 1914
michael@0 1915 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad)
michael@0 1916 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
michael@0 1917 }
michael@0 1918
michael@0 1919 #ifdef VP8_ENTROPY_STATS
michael@0 1920 void print_mode_context(void)
michael@0 1921 {
michael@0 1922 FILE *f = fopen("modecont.c", "w");
michael@0 1923 int i, j;
michael@0 1924
michael@0 1925 fprintf(f, "#include \"entropy.h\"\n");
michael@0 1926 fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
michael@0 1927 fprintf(f, "{\n");
michael@0 1928
michael@0 1929 for (j = 0; j < 6; j++)
michael@0 1930 {
michael@0 1931 fprintf(f, " { /* %d */\n", j);
michael@0 1932 fprintf(f, " ");
michael@0 1933
michael@0 1934 for (i = 0; i < 4; i++)
michael@0 1935 {
michael@0 1936 int overal_prob;
michael@0 1937 int this_prob;
michael@0 1938 int count;
michael@0 1939
michael@0 1940 /* Overall probs */
michael@0 1941 count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
michael@0 1942
michael@0 1943 if (count)
michael@0 1944 overal_prob = 256 * mv_mode_cts[i][0] / count;
michael@0 1945 else
michael@0 1946 overal_prob = 128;
michael@0 1947
michael@0 1948 if (overal_prob == 0)
michael@0 1949 overal_prob = 1;
michael@0 1950
michael@0 1951 /* context probs */
michael@0 1952 count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
michael@0 1953
michael@0 1954 if (count)
michael@0 1955 this_prob = 256 * mv_ref_ct[j][i][0] / count;
michael@0 1956 else
michael@0 1957 this_prob = 128;
michael@0 1958
michael@0 1959 if (this_prob == 0)
michael@0 1960 this_prob = 1;
michael@0 1961
michael@0 1962 fprintf(f, "%5d, ", this_prob);
michael@0 1963 }
michael@0 1964
michael@0 1965 fprintf(f, " },\n");
michael@0 1966 }
michael@0 1967
michael@0 1968 fprintf(f, "};\n");
michael@0 1969 fclose(f);
michael@0 1970 }
michael@0 1971
michael@0 1972 /* MV ref count VP8_ENTROPY_STATS stats code */
michael@0 1973 #ifdef VP8_ENTROPY_STATS
michael@0 1974 void init_mv_ref_counts()
michael@0 1975 {
michael@0 1976 vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
michael@0 1977 vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
michael@0 1978 }
michael@0 1979
michael@0 1980 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
michael@0 1981 {
michael@0 1982 if (m == ZEROMV)
michael@0 1983 {
michael@0 1984 ++mv_ref_ct [ct[0]] [0] [0];
michael@0 1985 ++mv_mode_cts[0][0];
michael@0 1986 }
michael@0 1987 else
michael@0 1988 {
michael@0 1989 ++mv_ref_ct [ct[0]] [0] [1];
michael@0 1990 ++mv_mode_cts[0][1];
michael@0 1991
michael@0 1992 if (m == NEARESTMV)
michael@0 1993 {
michael@0 1994 ++mv_ref_ct [ct[1]] [1] [0];
michael@0 1995 ++mv_mode_cts[1][0];
michael@0 1996 }
michael@0 1997 else
michael@0 1998 {
michael@0 1999 ++mv_ref_ct [ct[1]] [1] [1];
michael@0 2000 ++mv_mode_cts[1][1];
michael@0 2001
michael@0 2002 if (m == NEARMV)
michael@0 2003 {
michael@0 2004 ++mv_ref_ct [ct[2]] [2] [0];
michael@0 2005 ++mv_mode_cts[2][0];
michael@0 2006 }
michael@0 2007 else
michael@0 2008 {
michael@0 2009 ++mv_ref_ct [ct[2]] [2] [1];
michael@0 2010 ++mv_mode_cts[2][1];
michael@0 2011
michael@0 2012 if (m == NEWMV)
michael@0 2013 {
michael@0 2014 ++mv_ref_ct [ct[3]] [3] [0];
michael@0 2015 ++mv_mode_cts[3][0];
michael@0 2016 }
michael@0 2017 else
michael@0 2018 {
michael@0 2019 ++mv_ref_ct [ct[3]] [3] [1];
michael@0 2020 ++mv_mode_cts[3][1];
michael@0 2021 }
michael@0 2022 }
michael@0 2023 }
michael@0 2024 }
michael@0 2025 }
michael@0 2026
michael@0 2027 #endif/* END MV ref count VP8_ENTROPY_STATS stats code */
michael@0 2028
michael@0 2029 #endif

mercurial