media/libvpx/vp9/common/vp9_reconintra.c

changeset 0
6474c204b198
equal deleted inserted replaced
-1:000000000000 0:14d887e16f1a
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "./vpx_config.h"
12
13 #include "vpx_mem/vpx_mem.h"
14 #include "vpx_ports/vpx_once.h"
15
16 #include "./vp9_rtcd.h"
17
18 #include "vp9/common/vp9_reconintra.h"
19 #include "vp9/common/vp9_onyxc_int.h"
20
21 const TX_TYPE mode2txfm_map[MB_MODE_COUNT] = {
22 DCT_DCT, // DC
23 ADST_DCT, // V
24 DCT_ADST, // H
25 DCT_DCT, // D45
26 ADST_ADST, // D135
27 ADST_DCT, // D117
28 DCT_ADST, // D153
29 DCT_ADST, // D207
30 ADST_DCT, // D63
31 ADST_ADST, // TM
32 DCT_DCT, // NEARESTMV
33 DCT_DCT, // NEARMV
34 DCT_DCT, // ZEROMV
35 DCT_DCT // NEWMV
36 };
37
38 #define intra_pred_sized(type, size) \
39 void vp9_##type##_predictor_##size##x##size##_c(uint8_t *dst, \
40 ptrdiff_t stride, \
41 const uint8_t *above, \
42 const uint8_t *left) { \
43 type##_predictor(dst, stride, size, above, left); \
44 }
45
46 #define intra_pred_allsizes(type) \
47 intra_pred_sized(type, 4) \
48 intra_pred_sized(type, 8) \
49 intra_pred_sized(type, 16) \
50 intra_pred_sized(type, 32)
51
52 static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
53 const uint8_t *above, const uint8_t *left) {
54 int r, c;
55
56 // first column
57 for (r = 0; r < bs - 1; ++r)
58 dst[r * stride] = ROUND_POWER_OF_TWO(left[r] + left[r + 1], 1);
59 dst[(bs - 1) * stride] = left[bs - 1];
60 dst++;
61
62 // second column
63 for (r = 0; r < bs - 2; ++r)
64 dst[r * stride] = ROUND_POWER_OF_TWO(left[r] + left[r + 1] * 2 +
65 left[r + 2], 2);
66 dst[(bs - 2) * stride] = ROUND_POWER_OF_TWO(left[bs - 2] +
67 left[bs - 1] * 3, 2);
68 dst[(bs - 1) * stride] = left[bs - 1];
69 dst++;
70
71 // rest of last row
72 for (c = 0; c < bs - 2; ++c)
73 dst[(bs - 1) * stride + c] = left[bs - 1];
74
75 for (r = bs - 2; r >= 0; --r)
76 for (c = 0; c < bs - 2; ++c)
77 dst[r * stride + c] = dst[(r + 1) * stride + c - 2];
78 }
79 intra_pred_allsizes(d207)
80
81 static INLINE void d63_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
82 const uint8_t *above, const uint8_t *left) {
83 int r, c;
84 for (r = 0; r < bs; ++r) {
85 for (c = 0; c < bs; ++c)
86 dst[c] = r & 1 ? ROUND_POWER_OF_TWO(above[r/2 + c] +
87 above[r/2 + c + 1] * 2 +
88 above[r/2 + c + 2], 2)
89 : ROUND_POWER_OF_TWO(above[r/2 + c] +
90 above[r/2 + c + 1], 1);
91 dst += stride;
92 }
93 }
94 intra_pred_allsizes(d63)
95
96 static INLINE void d45_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
97 const uint8_t *above, const uint8_t *left) {
98 int r, c;
99 for (r = 0; r < bs; ++r) {
100 for (c = 0; c < bs; ++c)
101 dst[c] = r + c + 2 < bs * 2 ? ROUND_POWER_OF_TWO(above[r + c] +
102 above[r + c + 1] * 2 +
103 above[r + c + 2], 2)
104 : above[bs * 2 - 1];
105 dst += stride;
106 }
107 }
108 intra_pred_allsizes(d45)
109
110 static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
111 const uint8_t *above, const uint8_t *left) {
112 int r, c;
113
114 // first row
115 for (c = 0; c < bs; c++)
116 dst[c] = ROUND_POWER_OF_TWO(above[c - 1] + above[c], 1);
117 dst += stride;
118
119 // second row
120 dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
121 for (c = 1; c < bs; c++)
122 dst[c] = ROUND_POWER_OF_TWO(above[c - 2] + above[c - 1] * 2 + above[c], 2);
123 dst += stride;
124
125 // the rest of first col
126 dst[0] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
127 for (r = 3; r < bs; ++r)
128 dst[(r - 2) * stride] = ROUND_POWER_OF_TWO(left[r - 3] + left[r - 2] * 2 +
129 left[r - 1], 2);
130
131 // the rest of the block
132 for (r = 2; r < bs; ++r) {
133 for (c = 1; c < bs; c++)
134 dst[c] = dst[-2 * stride + c - 1];
135 dst += stride;
136 }
137 }
138 intra_pred_allsizes(d117)
139
140 static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
141 const uint8_t *above, const uint8_t *left) {
142 int r, c;
143 dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
144 for (c = 1; c < bs; c++)
145 dst[c] = ROUND_POWER_OF_TWO(above[c - 2] + above[c - 1] * 2 + above[c], 2);
146
147 dst[stride] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
148 for (r = 2; r < bs; ++r)
149 dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 2] + left[r - 1] * 2 +
150 left[r], 2);
151
152 dst += stride;
153 for (r = 1; r < bs; ++r) {
154 for (c = 1; c < bs; c++)
155 dst[c] = dst[-stride + c - 1];
156 dst += stride;
157 }
158 }
159 intra_pred_allsizes(d135)
160
161 static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
162 const uint8_t *above, const uint8_t *left) {
163 int r, c;
164 dst[0] = ROUND_POWER_OF_TWO(above[-1] + left[0], 1);
165 for (r = 1; r < bs; r++)
166 dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 1] + left[r], 1);
167 dst++;
168
169 dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
170 dst[stride] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
171 for (r = 2; r < bs; r++)
172 dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 2] + left[r - 1] * 2 +
173 left[r], 2);
174 dst++;
175
176 for (c = 0; c < bs - 2; c++)
177 dst[c] = ROUND_POWER_OF_TWO(above[c - 1] + above[c] * 2 + above[c + 1], 2);
178 dst += stride;
179
180 for (r = 1; r < bs; ++r) {
181 for (c = 0; c < bs - 2; c++)
182 dst[c] = dst[-stride + c - 2];
183 dst += stride;
184 }
185 }
186 intra_pred_allsizes(d153)
187
188 static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
189 const uint8_t *above, const uint8_t *left) {
190 int r;
191
192 for (r = 0; r < bs; r++) {
193 vpx_memcpy(dst, above, bs);
194 dst += stride;
195 }
196 }
197 intra_pred_allsizes(v)
198
199 static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
200 const uint8_t *above, const uint8_t *left) {
201 int r;
202
203 for (r = 0; r < bs; r++) {
204 vpx_memset(dst, left[r], bs);
205 dst += stride;
206 }
207 }
208 intra_pred_allsizes(h)
209
210 static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
211 const uint8_t *above, const uint8_t *left) {
212 int r, c;
213 int ytop_left = above[-1];
214
215 for (r = 0; r < bs; r++) {
216 for (c = 0; c < bs; c++)
217 dst[c] = clip_pixel(left[r] + above[c] - ytop_left);
218 dst += stride;
219 }
220 }
221 intra_pred_allsizes(tm)
222
223 static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
224 const uint8_t *above, const uint8_t *left) {
225 int r;
226
227 for (r = 0; r < bs; r++) {
228 vpx_memset(dst, 128, bs);
229 dst += stride;
230 }
231 }
232 intra_pred_allsizes(dc_128)
233
234 static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
235 const uint8_t *above,
236 const uint8_t *left) {
237 int i, r, expected_dc, sum = 0;
238
239 for (i = 0; i < bs; i++)
240 sum += left[i];
241 expected_dc = (sum + (bs >> 1)) / bs;
242
243 for (r = 0; r < bs; r++) {
244 vpx_memset(dst, expected_dc, bs);
245 dst += stride;
246 }
247 }
248 intra_pred_allsizes(dc_left)
249
250 static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
251 const uint8_t *above, const uint8_t *left) {
252 int i, r, expected_dc, sum = 0;
253
254 for (i = 0; i < bs; i++)
255 sum += above[i];
256 expected_dc = (sum + (bs >> 1)) / bs;
257
258 for (r = 0; r < bs; r++) {
259 vpx_memset(dst, expected_dc, bs);
260 dst += stride;
261 }
262 }
263 intra_pred_allsizes(dc_top)
264
265 static INLINE void dc_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
266 const uint8_t *above, const uint8_t *left) {
267 int i, r, expected_dc, sum = 0;
268 const int count = 2 * bs;
269
270 for (i = 0; i < bs; i++) {
271 sum += above[i];
272 sum += left[i];
273 }
274
275 expected_dc = (sum + (count >> 1)) / count;
276
277 for (r = 0; r < bs; r++) {
278 vpx_memset(dst, expected_dc, bs);
279 dst += stride;
280 }
281 }
282 intra_pred_allsizes(dc)
283 #undef intra_pred_allsizes
284
285 typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
286 const uint8_t *above, const uint8_t *left);
287
288 static intra_pred_fn pred[INTRA_MODES][4];
289 static intra_pred_fn dc_pred[2][2][4];
290
291 static void init_intra_pred_fn_ptrs(void) {
292 #define intra_pred_allsizes(l, type) \
293 l[0] = vp9_##type##_predictor_4x4; \
294 l[1] = vp9_##type##_predictor_8x8; \
295 l[2] = vp9_##type##_predictor_16x16; \
296 l[3] = vp9_##type##_predictor_32x32
297
298 intra_pred_allsizes(pred[V_PRED], v);
299 intra_pred_allsizes(pred[H_PRED], h);
300 intra_pred_allsizes(pred[D207_PRED], d207);
301 intra_pred_allsizes(pred[D45_PRED], d45);
302 intra_pred_allsizes(pred[D63_PRED], d63);
303 intra_pred_allsizes(pred[D117_PRED], d117);
304 intra_pred_allsizes(pred[D135_PRED], d135);
305 intra_pred_allsizes(pred[D153_PRED], d153);
306 intra_pred_allsizes(pred[TM_PRED], tm);
307
308 intra_pred_allsizes(dc_pred[0][0], dc_128);
309 intra_pred_allsizes(dc_pred[0][1], dc_top);
310 intra_pred_allsizes(dc_pred[1][0], dc_left);
311 intra_pred_allsizes(dc_pred[1][1], dc);
312
313 #undef intra_pred_allsizes
314 }
315
316 static void build_intra_predictors(const uint8_t *ref, int ref_stride,
317 uint8_t *dst, int dst_stride,
318 MB_PREDICTION_MODE mode, TX_SIZE tx_size,
319 int up_available, int left_available,
320 int right_available) {
321 int i;
322 DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64);
323 DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16);
324 uint8_t *above_row = above_data + 16;
325 const uint8_t *const_above_row = above_row;
326 const int bs = 4 << tx_size;
327
328 // 127 127 127 .. 127 127 127 127 127 127
329 // 129 A B .. Y Z
330 // 129 C D .. W X
331 // 129 E F .. U V
332 // 129 G H .. S T T T T T
333 // ..
334
335 once(init_intra_pred_fn_ptrs);
336
337 // left
338 if (left_available) {
339 for (i = 0; i < bs; i++)
340 left_col[i] = ref[i * ref_stride - 1];
341 } else {
342 vpx_memset(left_col, 129, bs);
343 }
344
345 // above
346 if (up_available) {
347 const uint8_t *above_ref = ref - ref_stride;
348 if (bs == 4 && right_available && left_available) {
349 const_above_row = above_ref;
350 } else {
351 vpx_memcpy(above_row, above_ref, bs);
352 if (bs == 4 && right_available)
353 vpx_memcpy(above_row + bs, above_ref + bs, bs);
354 else
355 vpx_memset(above_row + bs, above_row[bs - 1], bs);
356 above_row[-1] = left_available ? above_ref[-1] : 129;
357 }
358 } else {
359 vpx_memset(above_row, 127, bs * 2);
360 above_row[-1] = 127;
361 }
362
363 // predict
364 if (mode == DC_PRED) {
365 dc_pred[left_available][up_available][tx_size](dst, dst_stride,
366 const_above_row, left_col);
367 } else {
368 pred[mode][tx_size](dst, dst_stride, const_above_row, left_col);
369 }
370 }
371
372 void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in,
373 TX_SIZE tx_size, int mode,
374 const uint8_t *ref, int ref_stride,
375 uint8_t *dst, int dst_stride) {
376 const int bwl = bwl_in - tx_size;
377 const int wmask = (1 << bwl) - 1;
378 const int have_top = (block_idx >> bwl) || xd->up_available;
379 const int have_left = (block_idx & wmask) || xd->left_available;
380 const int have_right = ((block_idx & wmask) != wmask);
381
382 assert(bwl >= 0);
383 build_intra_predictors(ref, ref_stride, dst, dst_stride, mode, tx_size,
384 have_top, have_left, have_right);
385 }

mercurial