media/libvpx/vp8/common/mfqe.c

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rw-r--r--

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /*
michael@0 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license
michael@0 5 * that can be found in the LICENSE file in the root of the source
michael@0 6 * tree. An additional intellectual property rights grant can be found
michael@0 7 * in the file PATENTS. All contributing project authors may
michael@0 8 * be found in the AUTHORS file in the root of the source tree.
michael@0 9 */
michael@0 10
michael@0 11
michael@0 12 /* MFQE: Multiframe Quality Enhancement
michael@0 13 * In rate limited situations keyframes may cause significant visual artifacts
michael@0 14 * commonly referred to as "popping." This file implements a postproccesing
michael@0 15 * algorithm which blends data from the preceeding frame when there is no
michael@0 16 * motion and the q from the previous frame is lower which indicates that it is
michael@0 17 * higher quality.
michael@0 18 */
michael@0 19
michael@0 20 #include "postproc.h"
michael@0 21 #include "variance.h"
michael@0 22 #include "vpx_mem/vpx_mem.h"
michael@0 23 #include "vp8_rtcd.h"
michael@0 24 #include "vpx_scale/yv12config.h"
michael@0 25
michael@0 26 #include <limits.h>
michael@0 27 #include <stdlib.h>
michael@0 28
michael@0 29 static void filter_by_weight(unsigned char *src, int src_stride,
michael@0 30 unsigned char *dst, int dst_stride,
michael@0 31 int block_size, int src_weight)
michael@0 32 {
michael@0 33 int dst_weight = (1 << MFQE_PRECISION) - src_weight;
michael@0 34 int rounding_bit = 1 << (MFQE_PRECISION - 1);
michael@0 35 int r, c;
michael@0 36
michael@0 37 for (r = 0; r < block_size; r++)
michael@0 38 {
michael@0 39 for (c = 0; c < block_size; c++)
michael@0 40 {
michael@0 41 dst[c] = (src[c] * src_weight +
michael@0 42 dst[c] * dst_weight +
michael@0 43 rounding_bit) >> MFQE_PRECISION;
michael@0 44 }
michael@0 45 src += src_stride;
michael@0 46 dst += dst_stride;
michael@0 47 }
michael@0 48 }
michael@0 49
michael@0 50 void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride,
michael@0 51 unsigned char *dst, int dst_stride,
michael@0 52 int src_weight)
michael@0 53 {
michael@0 54 filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight);
michael@0 55 }
michael@0 56
michael@0 57 void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride,
michael@0 58 unsigned char *dst, int dst_stride,
michael@0 59 int src_weight)
michael@0 60 {
michael@0 61 filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight);
michael@0 62 }
michael@0 63
michael@0 64 void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride,
michael@0 65 unsigned char *dst, int dst_stride,
michael@0 66 int src_weight)
michael@0 67 {
michael@0 68 filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight);
michael@0 69 }
michael@0 70
michael@0 71 static void apply_ifactor(unsigned char *y_src,
michael@0 72 int y_src_stride,
michael@0 73 unsigned char *y_dst,
michael@0 74 int y_dst_stride,
michael@0 75 unsigned char *u_src,
michael@0 76 unsigned char *v_src,
michael@0 77 int uv_src_stride,
michael@0 78 unsigned char *u_dst,
michael@0 79 unsigned char *v_dst,
michael@0 80 int uv_dst_stride,
michael@0 81 int block_size,
michael@0 82 int src_weight)
michael@0 83 {
michael@0 84 if (block_size == 16)
michael@0 85 {
michael@0 86 vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride, src_weight);
michael@0 87 vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride, src_weight);
michael@0 88 vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride, src_weight);
michael@0 89 }
michael@0 90 else /* if (block_size == 8) */
michael@0 91 {
michael@0 92 vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride, src_weight);
michael@0 93 vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride, src_weight);
michael@0 94 vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride, src_weight);
michael@0 95 }
michael@0 96 }
michael@0 97
michael@0 98 static unsigned int int_sqrt(unsigned int x)
michael@0 99 {
michael@0 100 unsigned int y = x;
michael@0 101 unsigned int guess;
michael@0 102 int p = 1;
michael@0 103 while (y>>=1) p++;
michael@0 104 p>>=1;
michael@0 105
michael@0 106 guess=0;
michael@0 107 while (p>=0)
michael@0 108 {
michael@0 109 guess |= (1<<p);
michael@0 110 if (x<guess*guess)
michael@0 111 guess -= (1<<p);
michael@0 112 p--;
michael@0 113 }
michael@0 114 /* choose between guess or guess+1 */
michael@0 115 return guess+(guess*guess+guess+1<=x);
michael@0 116 }
michael@0 117
michael@0 118 #define USE_SSD
michael@0 119 static void multiframe_quality_enhance_block
michael@0 120 (
michael@0 121 int blksize, /* Currently only values supported are 16, 8 */
michael@0 122 int qcurr,
michael@0 123 int qprev,
michael@0 124 unsigned char *y,
michael@0 125 unsigned char *u,
michael@0 126 unsigned char *v,
michael@0 127 int y_stride,
michael@0 128 int uv_stride,
michael@0 129 unsigned char *yd,
michael@0 130 unsigned char *ud,
michael@0 131 unsigned char *vd,
michael@0 132 int yd_stride,
michael@0 133 int uvd_stride
michael@0 134 )
michael@0 135 {
michael@0 136 static const unsigned char VP8_ZEROS[16]=
michael@0 137 {
michael@0 138 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
michael@0 139 };
michael@0 140 int uvblksize = blksize >> 1;
michael@0 141 int qdiff = qcurr - qprev;
michael@0 142
michael@0 143 int i;
michael@0 144 unsigned char *up;
michael@0 145 unsigned char *udp;
michael@0 146 unsigned char *vp;
michael@0 147 unsigned char *vdp;
michael@0 148
michael@0 149 unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk;
michael@0 150
michael@0 151 if (blksize == 16)
michael@0 152 {
michael@0 153 actd = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
michael@0 154 act = (vp8_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8;
michael@0 155 #ifdef USE_SSD
michael@0 156 sad = (vp8_variance16x16(y, y_stride, yd, yd_stride, &sse));
michael@0 157 sad = (sse + 128)>>8;
michael@0 158 usad = (vp8_variance8x8(u, uv_stride, ud, uvd_stride, &sse));
michael@0 159 usad = (sse + 32)>>6;
michael@0 160 vsad = (vp8_variance8x8(v, uv_stride, vd, uvd_stride, &sse));
michael@0 161 vsad = (sse + 32)>>6;
michael@0 162 #else
michael@0 163 sad = (vp8_sad16x16(y, y_stride, yd, yd_stride, UINT_MAX) + 128) >> 8;
michael@0 164 usad = (vp8_sad8x8(u, uv_stride, ud, uvd_stride, UINT_MAX) + 32) >> 6;
michael@0 165 vsad = (vp8_sad8x8(v, uv_stride, vd, uvd_stride, UINT_MAX)+ 32) >> 6;
michael@0 166 #endif
michael@0 167 }
michael@0 168 else /* if (blksize == 8) */
michael@0 169 {
michael@0 170 actd = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
michael@0 171 act = (vp8_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6;
michael@0 172 #ifdef USE_SSD
michael@0 173 sad = (vp8_variance8x8(y, y_stride, yd, yd_stride, &sse));
michael@0 174 sad = (sse + 32)>>6;
michael@0 175 usad = (vp8_variance4x4(u, uv_stride, ud, uvd_stride, &sse));
michael@0 176 usad = (sse + 8)>>4;
michael@0 177 vsad = (vp8_variance4x4(v, uv_stride, vd, uvd_stride, &sse));
michael@0 178 vsad = (sse + 8)>>4;
michael@0 179 #else
michael@0 180 sad = (vp8_sad8x8(y, y_stride, yd, yd_stride, UINT_MAX) + 32) >> 6;
michael@0 181 usad = (vp8_sad4x4(u, uv_stride, ud, uvd_stride, UINT_MAX) + 8) >> 4;
michael@0 182 vsad = (vp8_sad4x4(v, uv_stride, vd, uvd_stride, UINT_MAX) + 8) >> 4;
michael@0 183 #endif
michael@0 184 }
michael@0 185
michael@0 186 actrisk = (actd > act * 5);
michael@0 187
michael@0 188 /* thr = qdiff/16 + log2(act) + log4(qprev) */
michael@0 189 thr = (qdiff >> 4);
michael@0 190 while (actd >>= 1) thr++;
michael@0 191 while (qprev >>= 2) thr++;
michael@0 192
michael@0 193 #ifdef USE_SSD
michael@0 194 thrsq = thr * thr;
michael@0 195 if (sad < thrsq &&
michael@0 196 /* additional checks for color mismatch and excessive addition of
michael@0 197 * high-frequencies */
michael@0 198 4 * usad < thrsq && 4 * vsad < thrsq && !actrisk)
michael@0 199 #else
michael@0 200 if (sad < thr &&
michael@0 201 /* additional checks for color mismatch and excessive addition of
michael@0 202 * high-frequencies */
michael@0 203 2 * usad < thr && 2 * vsad < thr && !actrisk)
michael@0 204 #endif
michael@0 205 {
michael@0 206 int ifactor;
michael@0 207 #ifdef USE_SSD
michael@0 208 /* TODO: optimize this later to not need sqr root */
michael@0 209 sad = int_sqrt(sad);
michael@0 210 #endif
michael@0 211 ifactor = (sad << MFQE_PRECISION) / thr;
michael@0 212 ifactor >>= (qdiff >> 5);
michael@0 213
michael@0 214 if (ifactor)
michael@0 215 {
michael@0 216 apply_ifactor(y, y_stride, yd, yd_stride,
michael@0 217 u, v, uv_stride,
michael@0 218 ud, vd, uvd_stride,
michael@0 219 blksize, ifactor);
michael@0 220 }
michael@0 221 }
michael@0 222 else /* else implicitly copy from previous frame */
michael@0 223 {
michael@0 224 if (blksize == 16)
michael@0 225 {
michael@0 226 vp8_copy_mem16x16(y, y_stride, yd, yd_stride);
michael@0 227 vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride);
michael@0 228 vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride);
michael@0 229 }
michael@0 230 else /* if (blksize == 8) */
michael@0 231 {
michael@0 232 vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
michael@0 233 for (up = u, udp = ud, i = 0; i < uvblksize; ++i, up += uv_stride, udp += uvd_stride)
michael@0 234 vpx_memcpy(udp, up, uvblksize);
michael@0 235 for (vp = v, vdp = vd, i = 0; i < uvblksize; ++i, vp += uv_stride, vdp += uvd_stride)
michael@0 236 vpx_memcpy(vdp, vp, uvblksize);
michael@0 237 }
michael@0 238 }
michael@0 239 }
michael@0 240
michael@0 241 static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map)
michael@0 242 {
michael@0 243 if (mode_info_context->mbmi.mb_skip_coeff)
michael@0 244 map[0] = map[1] = map[2] = map[3] = 1;
michael@0 245 else if (mode_info_context->mbmi.mode==SPLITMV)
michael@0 246 {
michael@0 247 static int ndx[4][4] =
michael@0 248 {
michael@0 249 {0, 1, 4, 5},
michael@0 250 {2, 3, 6, 7},
michael@0 251 {8, 9, 12, 13},
michael@0 252 {10, 11, 14, 15}
michael@0 253 };
michael@0 254 int i, j;
michael@0 255 for (i=0; i<4; ++i)
michael@0 256 {
michael@0 257 map[i] = 1;
michael@0 258 for (j=0; j<4 && map[j]; ++j)
michael@0 259 map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 &&
michael@0 260 mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2);
michael@0 261 }
michael@0 262 }
michael@0 263 else
michael@0 264 {
michael@0 265 map[0] = map[1] = map[2] = map[3] =
michael@0 266 (mode_info_context->mbmi.mode > B_PRED &&
michael@0 267 abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 &&
michael@0 268 abs(mode_info_context->mbmi.mv.as_mv.col) <= 2);
michael@0 269 }
michael@0 270 return (map[0]+map[1]+map[2]+map[3]);
michael@0 271 }
michael@0 272
michael@0 273 void vp8_multiframe_quality_enhance
michael@0 274 (
michael@0 275 VP8_COMMON *cm
michael@0 276 )
michael@0 277 {
michael@0 278 YV12_BUFFER_CONFIG *show = cm->frame_to_show;
michael@0 279 YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer;
michael@0 280
michael@0 281 FRAME_TYPE frame_type = cm->frame_type;
michael@0 282 /* Point at base of Mb MODE_INFO list has motion vectors etc */
michael@0 283 const MODE_INFO *mode_info_context = cm->show_frame_mi;
michael@0 284 int mb_row;
michael@0 285 int mb_col;
michael@0 286 int totmap, map[4];
michael@0 287 int qcurr = cm->base_qindex;
michael@0 288 int qprev = cm->postproc_state.last_base_qindex;
michael@0 289
michael@0 290 unsigned char *y_ptr, *u_ptr, *v_ptr;
michael@0 291 unsigned char *yd_ptr, *ud_ptr, *vd_ptr;
michael@0 292
michael@0 293 /* Set up the buffer pointers */
michael@0 294 y_ptr = show->y_buffer;
michael@0 295 u_ptr = show->u_buffer;
michael@0 296 v_ptr = show->v_buffer;
michael@0 297 yd_ptr = dest->y_buffer;
michael@0 298 ud_ptr = dest->u_buffer;
michael@0 299 vd_ptr = dest->v_buffer;
michael@0 300
michael@0 301 /* postprocess each macro block */
michael@0 302 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
michael@0 303 {
michael@0 304 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
michael@0 305 {
michael@0 306 /* if motion is high there will likely be no benefit */
michael@0 307 if (frame_type == INTER_FRAME) totmap = qualify_inter_mb(mode_info_context, map);
michael@0 308 else totmap = (frame_type == KEY_FRAME ? 4 : 0);
michael@0 309 if (totmap)
michael@0 310 {
michael@0 311 if (totmap < 4)
michael@0 312 {
michael@0 313 int i, j;
michael@0 314 for (i=0; i<2; ++i)
michael@0 315 for (j=0; j<2; ++j)
michael@0 316 {
michael@0 317 if (map[i*2+j])
michael@0 318 {
michael@0 319 multiframe_quality_enhance_block(8, qcurr, qprev,
michael@0 320 y_ptr + 8*(i*show->y_stride+j),
michael@0 321 u_ptr + 4*(i*show->uv_stride+j),
michael@0 322 v_ptr + 4*(i*show->uv_stride+j),
michael@0 323 show->y_stride,
michael@0 324 show->uv_stride,
michael@0 325 yd_ptr + 8*(i*dest->y_stride+j),
michael@0 326 ud_ptr + 4*(i*dest->uv_stride+j),
michael@0 327 vd_ptr + 4*(i*dest->uv_stride+j),
michael@0 328 dest->y_stride,
michael@0 329 dest->uv_stride);
michael@0 330 }
michael@0 331 else
michael@0 332 {
michael@0 333 /* copy a 8x8 block */
michael@0 334 int k;
michael@0 335 unsigned char *up = u_ptr + 4*(i*show->uv_stride+j);
michael@0 336 unsigned char *udp = ud_ptr + 4*(i*dest->uv_stride+j);
michael@0 337 unsigned char *vp = v_ptr + 4*(i*show->uv_stride+j);
michael@0 338 unsigned char *vdp = vd_ptr + 4*(i*dest->uv_stride+j);
michael@0 339 vp8_copy_mem8x8(y_ptr + 8*(i*show->y_stride+j), show->y_stride,
michael@0 340 yd_ptr + 8*(i*dest->y_stride+j), dest->y_stride);
michael@0 341 for (k = 0; k < 4; ++k, up += show->uv_stride, udp += dest->uv_stride,
michael@0 342 vp += show->uv_stride, vdp += dest->uv_stride)
michael@0 343 {
michael@0 344 vpx_memcpy(udp, up, 4);
michael@0 345 vpx_memcpy(vdp, vp, 4);
michael@0 346 }
michael@0 347 }
michael@0 348 }
michael@0 349 }
michael@0 350 else /* totmap = 4 */
michael@0 351 {
michael@0 352 multiframe_quality_enhance_block(16, qcurr, qprev, y_ptr,
michael@0 353 u_ptr, v_ptr,
michael@0 354 show->y_stride,
michael@0 355 show->uv_stride,
michael@0 356 yd_ptr, ud_ptr, vd_ptr,
michael@0 357 dest->y_stride,
michael@0 358 dest->uv_stride);
michael@0 359 }
michael@0 360 }
michael@0 361 else
michael@0 362 {
michael@0 363 vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride);
michael@0 364 vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride);
michael@0 365 vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride);
michael@0 366 }
michael@0 367 y_ptr += 16;
michael@0 368 u_ptr += 8;
michael@0 369 v_ptr += 8;
michael@0 370 yd_ptr += 16;
michael@0 371 ud_ptr += 8;
michael@0 372 vd_ptr += 8;
michael@0 373 mode_info_context++; /* step to next MB */
michael@0 374 }
michael@0 375
michael@0 376 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols;
michael@0 377 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
michael@0 378 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
michael@0 379 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols;
michael@0 380 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
michael@0 381 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
michael@0 382
michael@0 383 mode_info_context++; /* Skip border mb */
michael@0 384 }
michael@0 385 }

mercurial