Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
12 #include <math.h>
13 #include "vpx_mem/vpx_mem.h"
15 #include "onyx_int.h"
16 #include "quantize.h"
17 #include "vp8/common/quant_common.h"
19 #define EXACT_QUANT
21 #ifdef EXACT_FASTQUANT
22 void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
23 {
24 int i, rc, eob;
25 int zbin;
26 int x, y, z, sz;
27 short *coeff_ptr = b->coeff;
28 short *zbin_ptr = b->zbin;
29 short *round_ptr = b->round;
30 short *quant_ptr = b->quant_fast;
31 unsigned char *quant_shift_ptr = b->quant_shift;
32 short *qcoeff_ptr = d->qcoeff;
33 short *dqcoeff_ptr = d->dqcoeff;
34 short *dequant_ptr = d->dequant;
36 vpx_memset(qcoeff_ptr, 0, 32);
37 vpx_memset(dqcoeff_ptr, 0, 32);
39 eob = -1;
41 for (i = 0; i < 16; i++)
42 {
43 rc = vp8_default_zig_zag1d[i];
44 z = coeff_ptr[rc];
45 zbin = zbin_ptr[rc] ;
47 sz = (z >> 31); /* sign of z */
48 x = (z ^ sz) - sz; /* x = abs(z) */
50 if (x >= zbin)
51 {
52 x += round_ptr[rc];
53 y = ((((x * quant_ptr[rc]) >> 16) + x)
54 * quant_shift_ptr[rc]) >> 16; /* quantize (x) */
55 x = (y ^ sz) - sz; /* get the sign back */
56 qcoeff_ptr[rc] = x; /* write to destination */
57 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
59 if (y)
60 {
61 eob = i; /* last nonzero coeffs */
62 }
63 }
64 }
65 *d->eob = (char)(eob + 1);
66 }
68 #else
70 void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
71 {
72 int i, rc, eob;
73 int x, y, z, sz;
74 short *coeff_ptr = b->coeff;
75 short *round_ptr = b->round;
76 short *quant_ptr = b->quant_fast;
77 short *qcoeff_ptr = d->qcoeff;
78 short *dqcoeff_ptr = d->dqcoeff;
79 short *dequant_ptr = d->dequant;
81 eob = -1;
82 for (i = 0; i < 16; i++)
83 {
84 rc = vp8_default_zig_zag1d[i];
85 z = coeff_ptr[rc];
87 sz = (z >> 31); /* sign of z */
88 x = (z ^ sz) - sz; /* x = abs(z) */
90 y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
91 x = (y ^ sz) - sz; /* get the sign back */
92 qcoeff_ptr[rc] = x; /* write to destination */
93 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
95 if (y)
96 {
97 eob = i; /* last nonzero coeffs */
98 }
99 }
100 *d->eob = (char)(eob + 1);
101 }
103 #endif
105 #ifdef EXACT_QUANT
106 void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
107 {
108 int i, rc, eob;
109 int zbin;
110 int x, y, z, sz;
111 short *zbin_boost_ptr = b->zrun_zbin_boost;
112 short *coeff_ptr = b->coeff;
113 short *zbin_ptr = b->zbin;
114 short *round_ptr = b->round;
115 short *quant_ptr = b->quant;
116 short *quant_shift_ptr = b->quant_shift;
117 short *qcoeff_ptr = d->qcoeff;
118 short *dqcoeff_ptr = d->dqcoeff;
119 short *dequant_ptr = d->dequant;
120 short zbin_oq_value = b->zbin_extra;
122 vpx_memset(qcoeff_ptr, 0, 32);
123 vpx_memset(dqcoeff_ptr, 0, 32);
125 eob = -1;
127 for (i = 0; i < 16; i++)
128 {
129 rc = vp8_default_zig_zag1d[i];
130 z = coeff_ptr[rc];
132 zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
134 zbin_boost_ptr ++;
135 sz = (z >> 31); /* sign of z */
136 x = (z ^ sz) - sz; /* x = abs(z) */
138 if (x >= zbin)
139 {
140 x += round_ptr[rc];
141 y = ((((x * quant_ptr[rc]) >> 16) + x)
142 * quant_shift_ptr[rc]) >> 16; /* quantize (x) */
143 x = (y ^ sz) - sz; /* get the sign back */
144 qcoeff_ptr[rc] = x; /* write to destination */
145 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
147 if (y)
148 {
149 eob = i; /* last nonzero coeffs */
150 zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */
151 }
152 }
153 }
155 *d->eob = (char)(eob + 1);
156 }
158 /* Perform regular quantization, with unbiased rounding and no zero bin. */
159 void vp8_strict_quantize_b_c(BLOCK *b, BLOCKD *d)
160 {
161 int i;
162 int rc;
163 int eob;
164 int x;
165 int y;
166 int z;
167 int sz;
168 short *coeff_ptr;
169 short *quant_ptr;
170 short *quant_shift_ptr;
171 short *qcoeff_ptr;
172 short *dqcoeff_ptr;
173 short *dequant_ptr;
175 coeff_ptr = b->coeff;
176 quant_ptr = b->quant;
177 quant_shift_ptr = b->quant_shift;
178 qcoeff_ptr = d->qcoeff;
179 dqcoeff_ptr = d->dqcoeff;
180 dequant_ptr = d->dequant;
181 eob = - 1;
182 vpx_memset(qcoeff_ptr, 0, 32);
183 vpx_memset(dqcoeff_ptr, 0, 32);
184 for (i = 0; i < 16; i++)
185 {
186 int dq;
187 int rounding;
189 /*TODO: These arrays should be stored in zig-zag order.*/
190 rc = vp8_default_zig_zag1d[i];
191 z = coeff_ptr[rc];
192 dq = dequant_ptr[rc];
193 rounding = dq >> 1;
194 /* Sign of z. */
195 sz = -(z < 0);
196 x = (z + sz) ^ sz;
197 x += rounding;
198 if (x >= dq)
199 {
200 /* Quantize x. */
201 y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >> 16;
202 /* Put the sign back. */
203 x = (y + sz) ^ sz;
204 /* Save the coefficient and its dequantized value. */
205 qcoeff_ptr[rc] = x;
206 dqcoeff_ptr[rc] = x * dq;
207 /* Remember the last non-zero coefficient. */
208 if (y)
209 eob = i;
210 }
211 }
213 *d->eob = (char)(eob + 1);
214 }
216 #else
218 void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
219 {
220 int i, rc, eob;
221 int zbin;
222 int x, y, z, sz;
223 short *zbin_boost_ptr = b->zrun_zbin_boost;
224 short *coeff_ptr = b->coeff;
225 short *zbin_ptr = b->zbin;
226 short *round_ptr = b->round;
227 short *quant_ptr = b->quant;
228 short *qcoeff_ptr = d->qcoeff;
229 short *dqcoeff_ptr = d->dqcoeff;
230 short *dequant_ptr = d->dequant;
231 short zbin_oq_value = b->zbin_extra;
233 vpx_memset(qcoeff_ptr, 0, 32);
234 vpx_memset(dqcoeff_ptr, 0, 32);
236 eob = -1;
238 for (i = 0; i < 16; i++)
239 {
240 rc = vp8_default_zig_zag1d[i];
241 z = coeff_ptr[rc];
243 zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
245 zbin_boost_ptr ++;
246 sz = (z >> 31); /* sign of z */
247 x = (z ^ sz) - sz; /* x = abs(z) */
249 if (x >= zbin)
250 {
251 y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */
252 x = (y ^ sz) - sz; /* get the sign back */
253 qcoeff_ptr[rc] = x; /* write to destination */
254 dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
256 if (y)
257 {
258 eob = i; /* last nonzero coeffs */
259 zbin_boost_ptr = &b->zrun_zbin_boost[0]; /* reset zrl */
260 }
261 }
262 }
264 *d->eob = (char)(eob + 1);
265 }
267 #endif
269 void vp8_quantize_mby_c(MACROBLOCK *x)
270 {
271 int i;
272 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
273 && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
275 for (i = 0; i < 16; i++)
276 x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
278 if(has_2nd_order)
279 x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
280 }
282 void vp8_quantize_mb_c(MACROBLOCK *x)
283 {
284 int i;
285 int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
286 && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
288 for (i = 0; i < 24+has_2nd_order; i++)
289 x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
290 }
293 void vp8_quantize_mbuv_c(MACROBLOCK *x)
294 {
295 int i;
297 for (i = 16; i < 24; i++)
298 x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
299 }
301 /* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
302 * these two C functions if corresponding optimized routine is not available.
303 * NEON optimized version implements currently the fast quantization for pair
304 * of blocks. */
305 void vp8_regular_quantize_b_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
306 {
307 vp8_regular_quantize_b(b1, d1);
308 vp8_regular_quantize_b(b2, d2);
309 }
311 void vp8_fast_quantize_b_pair_c(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
312 {
313 vp8_fast_quantize_b_c(b1, d1);
314 vp8_fast_quantize_b_c(b2, d2);
315 }
318 static const int qrounding_factors[129] =
319 {
320 48, 48, 48, 48, 48, 48, 48, 48,
321 48, 48, 48, 48, 48, 48, 48, 48,
322 48, 48, 48, 48, 48, 48, 48, 48,
323 48, 48, 48, 48, 48, 48, 48, 48,
324 48, 48, 48, 48, 48, 48, 48, 48,
325 48, 48, 48, 48, 48, 48, 48, 48,
326 48, 48, 48, 48, 48, 48, 48, 48,
327 48, 48, 48, 48, 48, 48, 48, 48,
328 48, 48, 48, 48, 48, 48, 48, 48,
329 48, 48, 48, 48, 48, 48, 48, 48,
330 48, 48, 48, 48, 48, 48, 48, 48,
331 48, 48, 48, 48, 48, 48, 48, 48,
332 48, 48, 48, 48, 48, 48, 48, 48,
333 48, 48, 48, 48, 48, 48, 48, 48,
334 48, 48, 48, 48, 48, 48, 48, 48,
335 48, 48, 48, 48, 48, 48, 48, 48,
336 48
337 };
340 static const int qzbin_factors[129] =
341 {
342 84, 84, 84, 84, 84, 84, 84, 84,
343 84, 84, 84, 84, 84, 84, 84, 84,
344 84, 84, 84, 84, 84, 84, 84, 84,
345 84, 84, 84, 84, 84, 84, 84, 84,
346 84, 84, 84, 84, 84, 84, 84, 84,
347 84, 84, 84, 84, 84, 84, 84, 84,
348 80, 80, 80, 80, 80, 80, 80, 80,
349 80, 80, 80, 80, 80, 80, 80, 80,
350 80, 80, 80, 80, 80, 80, 80, 80,
351 80, 80, 80, 80, 80, 80, 80, 80,
352 80, 80, 80, 80, 80, 80, 80, 80,
353 80, 80, 80, 80, 80, 80, 80, 80,
354 80, 80, 80, 80, 80, 80, 80, 80,
355 80, 80, 80, 80, 80, 80, 80, 80,
356 80, 80, 80, 80, 80, 80, 80, 80,
357 80, 80, 80, 80, 80, 80, 80, 80,
358 80
359 };
362 static const int qrounding_factors_y2[129] =
363 {
364 48, 48, 48, 48, 48, 48, 48, 48,
365 48, 48, 48, 48, 48, 48, 48, 48,
366 48, 48, 48, 48, 48, 48, 48, 48,
367 48, 48, 48, 48, 48, 48, 48, 48,
368 48, 48, 48, 48, 48, 48, 48, 48,
369 48, 48, 48, 48, 48, 48, 48, 48,
370 48, 48, 48, 48, 48, 48, 48, 48,
371 48, 48, 48, 48, 48, 48, 48, 48,
372 48, 48, 48, 48, 48, 48, 48, 48,
373 48, 48, 48, 48, 48, 48, 48, 48,
374 48, 48, 48, 48, 48, 48, 48, 48,
375 48, 48, 48, 48, 48, 48, 48, 48,
376 48, 48, 48, 48, 48, 48, 48, 48,
377 48, 48, 48, 48, 48, 48, 48, 48,
378 48, 48, 48, 48, 48, 48, 48, 48,
379 48, 48, 48, 48, 48, 48, 48, 48,
380 48
381 };
384 static const int qzbin_factors_y2[129] =
385 {
386 84, 84, 84, 84, 84, 84, 84, 84,
387 84, 84, 84, 84, 84, 84, 84, 84,
388 84, 84, 84, 84, 84, 84, 84, 84,
389 84, 84, 84, 84, 84, 84, 84, 84,
390 84, 84, 84, 84, 84, 84, 84, 84,
391 84, 84, 84, 84, 84, 84, 84, 84,
392 80, 80, 80, 80, 80, 80, 80, 80,
393 80, 80, 80, 80, 80, 80, 80, 80,
394 80, 80, 80, 80, 80, 80, 80, 80,
395 80, 80, 80, 80, 80, 80, 80, 80,
396 80, 80, 80, 80, 80, 80, 80, 80,
397 80, 80, 80, 80, 80, 80, 80, 80,
398 80, 80, 80, 80, 80, 80, 80, 80,
399 80, 80, 80, 80, 80, 80, 80, 80,
400 80, 80, 80, 80, 80, 80, 80, 80,
401 80, 80, 80, 80, 80, 80, 80, 80,
402 80
403 };
406 #define EXACT_QUANT
407 #ifdef EXACT_QUANT
408 static void invert_quant(int improved_quant, short *quant,
409 short *shift, short d)
410 {
411 if(improved_quant)
412 {
413 unsigned t;
414 int l;
415 t = d;
416 for(l = 0; t > 1; l++)
417 t>>=1;
418 t = 1 + (1<<(16+l))/d;
419 *quant = (short)(t - (1<<16));
420 *shift = l;
421 /* use multiplication and constant shift by 16 */
422 *shift = 1 << (16 - *shift);
423 }
424 else
425 {
426 *quant = (1 << 16) / d;
427 *shift = 0;
428 /* use multiplication and constant shift by 16 */
429 *shift = 1 << (16 - *shift);
430 }
431 }
434 void vp8cx_init_quantizer(VP8_COMP *cpi)
435 {
436 int i;
437 int quant_val;
438 int Q;
440 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44,
441 44, 44};
443 for (Q = 0; Q < QINDEX_RANGE; Q++)
444 {
445 /* dc values */
446 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
447 cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
448 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
449 cpi->Y1quant_shift[Q] + 0, quant_val);
450 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
451 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
452 cpi->common.Y1dequant[Q][0] = quant_val;
453 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
455 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
456 cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
457 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
458 cpi->Y2quant_shift[Q] + 0, quant_val);
459 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
460 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
461 cpi->common.Y2dequant[Q][0] = quant_val;
462 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
464 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
465 cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
466 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
467 cpi->UVquant_shift[Q] + 0, quant_val);
468 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
469 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
470 cpi->common.UVdequant[Q][0] = quant_val;
471 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
473 /* all the ac values = ; */
474 quant_val = vp8_ac_yquant(Q);
475 cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val;
476 invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1,
477 cpi->Y1quant_shift[Q] + 1, quant_val);
478 cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
479 cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
480 cpi->common.Y1dequant[Q][1] = quant_val;
481 cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7;
483 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
484 cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val;
485 invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1,
486 cpi->Y2quant_shift[Q] + 1, quant_val);
487 cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
488 cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7;
489 cpi->common.Y2dequant[Q][1] = quant_val;
490 cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7;
492 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
493 cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val;
494 invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1,
495 cpi->UVquant_shift[Q] + 1, quant_val);
496 cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
497 cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7;
498 cpi->common.UVdequant[Q][1] = quant_val;
499 cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7;
501 for (i = 2; i < 16; i++)
502 {
503 cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1];
504 cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1];
505 cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1];
506 cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1];
507 cpi->Y1round[Q][i] = cpi->Y1round[Q][1];
508 cpi->zrun_zbin_boost_y1[Q][i] = (cpi->common.Y1dequant[Q][1] *
509 zbin_boost[i]) >> 7;
511 cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1];
512 cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1];
513 cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1];
514 cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1];
515 cpi->Y2round[Q][i] = cpi->Y2round[Q][1];
516 cpi->zrun_zbin_boost_y2[Q][i] = (cpi->common.Y2dequant[Q][1] *
517 zbin_boost[i]) >> 7;
519 cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1];
520 cpi->UVquant[Q][i] = cpi->UVquant[Q][1];
521 cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1];
522 cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1];
523 cpi->UVround[Q][i] = cpi->UVround[Q][1];
524 cpi->zrun_zbin_boost_uv[Q][i] = (cpi->common.UVdequant[Q][1] *
525 zbin_boost[i]) >> 7;
526 }
527 }
528 }
529 #else
530 void vp8cx_init_quantizer(VP8_COMP *cpi)
531 {
532 int i;
533 int quant_val;
534 int Q;
536 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
538 for (Q = 0; Q < QINDEX_RANGE; Q++)
539 {
540 /* dc values */
541 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
542 cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
543 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
544 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
545 cpi->common.Y1dequant[Q][0] = quant_val;
546 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
548 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
549 cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
550 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
551 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
552 cpi->common.Y2dequant[Q][0] = quant_val;
553 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
555 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
556 cpi->UVquant[Q][0] = (1 << 16) / quant_val;
557 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
558 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
559 cpi->common.UVdequant[Q][0] = quant_val;
560 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
562 /* all the ac values = ; */
563 for (i = 1; i < 16; i++)
564 {
565 int rc = vp8_default_zig_zag1d[i];
567 quant_val = vp8_ac_yquant(Q);
568 cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
569 cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
570 cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
571 cpi->common.Y1dequant[Q][rc] = quant_val;
572 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
574 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
575 cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
576 cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
577 cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
578 cpi->common.Y2dequant[Q][rc] = quant_val;
579 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
581 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
582 cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
583 cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
584 cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
585 cpi->common.UVdequant[Q][rc] = quant_val;
586 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
587 }
588 }
589 }
590 #endif
592 #define ZBIN_EXTRA_Y \
593 (( cpi->common.Y1dequant[QIndex][1] * \
594 ( x->zbin_over_quant + \
595 x->zbin_mode_boost + \
596 x->act_zbin_adj ) ) >> 7)
598 #define ZBIN_EXTRA_UV \
599 (( cpi->common.UVdequant[QIndex][1] * \
600 ( x->zbin_over_quant + \
601 x->zbin_mode_boost + \
602 x->act_zbin_adj ) ) >> 7)
604 #define ZBIN_EXTRA_Y2 \
605 (( cpi->common.Y2dequant[QIndex][1] * \
606 ( (x->zbin_over_quant / 2) + \
607 x->zbin_mode_boost + \
608 x->act_zbin_adj ) ) >> 7)
610 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
611 {
612 int i;
613 int QIndex;
614 MACROBLOCKD *xd = &x->e_mbd;
615 int zbin_extra;
617 /* Select the baseline MB Q index. */
618 if (xd->segmentation_enabled)
619 {
620 /* Abs Value */
621 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
622 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
623 /* Delta Value */
624 else
625 {
626 QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
627 /* Clamp to valid range */
628 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
629 }
630 }
631 else
632 QIndex = cpi->common.base_qindex;
634 /* This initialization should be called at least once. Use ok_to_skip to
635 * decide if it is ok to skip.
636 * Before encoding a frame, this function is always called with ok_to_skip
637 * =0, which means no skiping of calculations. The "last" values are
638 * initialized at that time.
639 */
640 if (!ok_to_skip || QIndex != x->q_index)
641 {
643 xd->dequant_y1_dc[0] = 1;
644 xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0];
645 xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0];
646 xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0];
648 for (i = 1; i < 16; i++)
649 {
650 xd->dequant_y1_dc[i] =
651 xd->dequant_y1[i] = cpi->common.Y1dequant[QIndex][1];
652 xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1];
653 xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1];
654 }
655 #if 1
656 /*TODO: Remove dequant from BLOCKD. This is a temporary solution until
657 * the quantizer code uses a passed in pointer to the dequant constants.
658 * This will also require modifications to the x86 and neon assembly.
659 * */
660 for (i = 0; i < 16; i++)
661 x->e_mbd.block[i].dequant = xd->dequant_y1;
662 for (i = 16; i < 24; i++)
663 x->e_mbd.block[i].dequant = xd->dequant_uv;
664 x->e_mbd.block[24].dequant = xd->dequant_y2;
665 #endif
667 /* Y */
668 zbin_extra = ZBIN_EXTRA_Y;
670 for (i = 0; i < 16; i++)
671 {
672 x->block[i].quant = cpi->Y1quant[QIndex];
673 x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
674 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
675 x->block[i].zbin = cpi->Y1zbin[QIndex];
676 x->block[i].round = cpi->Y1round[QIndex];
677 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
678 x->block[i].zbin_extra = (short)zbin_extra;
679 }
681 /* UV */
682 zbin_extra = ZBIN_EXTRA_UV;
684 for (i = 16; i < 24; i++)
685 {
686 x->block[i].quant = cpi->UVquant[QIndex];
687 x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
688 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
689 x->block[i].zbin = cpi->UVzbin[QIndex];
690 x->block[i].round = cpi->UVround[QIndex];
691 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
692 x->block[i].zbin_extra = (short)zbin_extra;
693 }
695 /* Y2 */
696 zbin_extra = ZBIN_EXTRA_Y2;
698 x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
699 x->block[24].quant = cpi->Y2quant[QIndex];
700 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
701 x->block[24].zbin = cpi->Y2zbin[QIndex];
702 x->block[24].round = cpi->Y2round[QIndex];
703 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
704 x->block[24].zbin_extra = (short)zbin_extra;
706 /* save this macroblock QIndex for vp8_update_zbin_extra() */
707 x->q_index = QIndex;
709 x->last_zbin_over_quant = x->zbin_over_quant;
710 x->last_zbin_mode_boost = x->zbin_mode_boost;
711 x->last_act_zbin_adj = x->act_zbin_adj;
715 }
716 else if(x->last_zbin_over_quant != x->zbin_over_quant
717 || x->last_zbin_mode_boost != x->zbin_mode_boost
718 || x->last_act_zbin_adj != x->act_zbin_adj)
719 {
720 /* Y */
721 zbin_extra = ZBIN_EXTRA_Y;
723 for (i = 0; i < 16; i++)
724 x->block[i].zbin_extra = (short)zbin_extra;
726 /* UV */
727 zbin_extra = ZBIN_EXTRA_UV;
729 for (i = 16; i < 24; i++)
730 x->block[i].zbin_extra = (short)zbin_extra;
732 /* Y2 */
733 zbin_extra = ZBIN_EXTRA_Y2;
734 x->block[24].zbin_extra = (short)zbin_extra;
736 x->last_zbin_over_quant = x->zbin_over_quant;
737 x->last_zbin_mode_boost = x->zbin_mode_boost;
738 x->last_act_zbin_adj = x->act_zbin_adj;
739 }
740 }
742 void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
743 {
744 int i;
745 int QIndex = x->q_index;
746 int zbin_extra;
748 /* Y */
749 zbin_extra = ZBIN_EXTRA_Y;
751 for (i = 0; i < 16; i++)
752 x->block[i].zbin_extra = (short)zbin_extra;
754 /* UV */
755 zbin_extra = ZBIN_EXTRA_UV;
757 for (i = 16; i < 24; i++)
758 x->block[i].zbin_extra = (short)zbin_extra;
760 /* Y2 */
761 zbin_extra = ZBIN_EXTRA_Y2;
762 x->block[24].zbin_extra = (short)zbin_extra;
763 }
764 #undef ZBIN_EXTRA_Y
765 #undef ZBIN_EXTRA_UV
766 #undef ZBIN_EXTRA_Y2
768 void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
769 {
770 /* Clear Zbin mode boost for default case */
771 cpi->mb.zbin_mode_boost = 0;
773 /* MB level quantizer setup */
774 vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
775 }
778 void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
779 {
780 VP8_COMMON *cm = &cpi->common;
781 MACROBLOCKD *mbd = &cpi->mb.e_mbd;
782 int update = 0;
783 int new_delta_q;
784 cm->base_qindex = Q;
786 /* if any of the delta_q values are changing update flag has to be set */
787 /* currently only y2dc_delta_q may change */
789 cm->y1dc_delta_q = 0;
790 cm->y2ac_delta_q = 0;
791 cm->uvdc_delta_q = 0;
792 cm->uvac_delta_q = 0;
794 if (Q < 4)
795 {
796 new_delta_q = 4-Q;
797 }
798 else
799 new_delta_q = 0;
801 update |= cm->y2dc_delta_q != new_delta_q;
802 cm->y2dc_delta_q = new_delta_q;
805 /* Set Segment specific quatizers */
806 mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
807 mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
808 mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2];
809 mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3];
811 /* quantizer has to be reinitialized for any delta_q changes */
812 if(update)
813 vp8cx_init_quantizer(cpi);
815 }