|
1 |
|
2 /* |
|
3 * Copyright 2006 The Android Open Source Project |
|
4 * |
|
5 * Use of this source code is governed by a BSD-style license that can be |
|
6 * found in the LICENSE file. |
|
7 */ |
|
8 |
|
9 |
|
10 #include "SkBlurMask.h" |
|
11 #include "SkMath.h" |
|
12 #include "SkTemplates.h" |
|
13 #include "SkEndian.h" |
|
14 |
|
15 |
|
16 SkScalar SkBlurMask::ConvertRadiusToSigma(SkScalar radius) { |
|
17 // This constant approximates the scaling done in the software path's |
|
18 // "high quality" mode, in SkBlurMask::Blur() (1 / sqrt(3)). |
|
19 // IMHO, it actually should be 1: we blur "less" than we should do |
|
20 // according to the CSS and canvas specs, simply because Safari does the same. |
|
21 // Firefox used to do the same too, until 4.0 where they fixed it. So at some |
|
22 // point we should probably get rid of these scaling constants and rebaseline |
|
23 // all the blur tests. |
|
24 static const SkScalar kBLUR_SIGMA_SCALE = 0.57735f; |
|
25 |
|
26 return radius ? kBLUR_SIGMA_SCALE * radius + 0.5f : 0.0f; |
|
27 } |
|
28 |
|
29 #define UNROLL_SEPARABLE_LOOPS |
|
30 |
|
31 /** |
|
32 * This function performs a box blur in X, of the given radius. If the |
|
33 * "transpose" parameter is true, it will transpose the pixels on write, |
|
34 * such that X and Y are swapped. Reads are always performed from contiguous |
|
35 * memory in X, for speed. The destination buffer (dst) must be at least |
|
36 * (width + leftRadius + rightRadius) * height bytes in size. |
|
37 * |
|
38 * This is what the inner loop looks like before unrolling, and with the two |
|
39 * cases broken out separately (width < diameter, width >= diameter): |
|
40 * |
|
41 * if (width < diameter) { |
|
42 * for (int x = 0; x < width; ++x) { |
|
43 * sum += *right++; |
|
44 * *dptr = (sum * scale + half) >> 24; |
|
45 * dptr += dst_x_stride; |
|
46 * } |
|
47 * for (int x = width; x < diameter; ++x) { |
|
48 * *dptr = (sum * scale + half) >> 24; |
|
49 * dptr += dst_x_stride; |
|
50 * } |
|
51 * for (int x = 0; x < width; ++x) { |
|
52 * *dptr = (sum * scale + half) >> 24; |
|
53 * sum -= *left++; |
|
54 * dptr += dst_x_stride; |
|
55 * } |
|
56 * } else { |
|
57 * for (int x = 0; x < diameter; ++x) { |
|
58 * sum += *right++; |
|
59 * *dptr = (sum * scale + half) >> 24; |
|
60 * dptr += dst_x_stride; |
|
61 * } |
|
62 * for (int x = diameter; x < width; ++x) { |
|
63 * sum += *right++; |
|
64 * *dptr = (sum * scale + half) >> 24; |
|
65 * sum -= *left++; |
|
66 * dptr += dst_x_stride; |
|
67 * } |
|
68 * for (int x = 0; x < diameter; ++x) { |
|
69 * *dptr = (sum * scale + half) >> 24; |
|
70 * sum -= *left++; |
|
71 * dptr += dst_x_stride; |
|
72 * } |
|
73 * } |
|
74 */ |
|
75 static int boxBlur(const uint8_t* src, int src_y_stride, uint8_t* dst, |
|
76 int leftRadius, int rightRadius, int width, int height, |
|
77 bool transpose) |
|
78 { |
|
79 int diameter = leftRadius + rightRadius; |
|
80 int kernelSize = diameter + 1; |
|
81 int border = SkMin32(width, diameter); |
|
82 uint32_t scale = (1 << 24) / kernelSize; |
|
83 int new_width = width + SkMax32(leftRadius, rightRadius) * 2; |
|
84 int dst_x_stride = transpose ? height : 1; |
|
85 int dst_y_stride = transpose ? 1 : new_width; |
|
86 uint32_t half = 1 << 23; |
|
87 for (int y = 0; y < height; ++y) { |
|
88 uint32_t sum = 0; |
|
89 uint8_t* dptr = dst + y * dst_y_stride; |
|
90 const uint8_t* right = src + y * src_y_stride; |
|
91 const uint8_t* left = right; |
|
92 for (int x = 0; x < rightRadius - leftRadius; x++) { |
|
93 *dptr = 0; |
|
94 dptr += dst_x_stride; |
|
95 } |
|
96 #define LEFT_BORDER_ITER \ |
|
97 sum += *right++; \ |
|
98 *dptr = (sum * scale + half) >> 24; \ |
|
99 dptr += dst_x_stride; |
|
100 |
|
101 int x = 0; |
|
102 #ifdef UNROLL_SEPARABLE_LOOPS |
|
103 for (; x < border - 16; x += 16) { |
|
104 LEFT_BORDER_ITER |
|
105 LEFT_BORDER_ITER |
|
106 LEFT_BORDER_ITER |
|
107 LEFT_BORDER_ITER |
|
108 LEFT_BORDER_ITER |
|
109 LEFT_BORDER_ITER |
|
110 LEFT_BORDER_ITER |
|
111 LEFT_BORDER_ITER |
|
112 LEFT_BORDER_ITER |
|
113 LEFT_BORDER_ITER |
|
114 LEFT_BORDER_ITER |
|
115 LEFT_BORDER_ITER |
|
116 LEFT_BORDER_ITER |
|
117 LEFT_BORDER_ITER |
|
118 LEFT_BORDER_ITER |
|
119 LEFT_BORDER_ITER |
|
120 } |
|
121 #endif |
|
122 for (; x < border; ++x) { |
|
123 LEFT_BORDER_ITER |
|
124 } |
|
125 #undef LEFT_BORDER_ITER |
|
126 #define TRIVIAL_ITER \ |
|
127 *dptr = (sum * scale + half) >> 24; \ |
|
128 dptr += dst_x_stride; |
|
129 x = width; |
|
130 #ifdef UNROLL_SEPARABLE_LOOPS |
|
131 for (; x < diameter - 16; x += 16) { |
|
132 TRIVIAL_ITER |
|
133 TRIVIAL_ITER |
|
134 TRIVIAL_ITER |
|
135 TRIVIAL_ITER |
|
136 TRIVIAL_ITER |
|
137 TRIVIAL_ITER |
|
138 TRIVIAL_ITER |
|
139 TRIVIAL_ITER |
|
140 TRIVIAL_ITER |
|
141 TRIVIAL_ITER |
|
142 TRIVIAL_ITER |
|
143 TRIVIAL_ITER |
|
144 TRIVIAL_ITER |
|
145 TRIVIAL_ITER |
|
146 TRIVIAL_ITER |
|
147 TRIVIAL_ITER |
|
148 } |
|
149 #endif |
|
150 for (; x < diameter; ++x) { |
|
151 TRIVIAL_ITER |
|
152 } |
|
153 #undef TRIVIAL_ITER |
|
154 #define CENTER_ITER \ |
|
155 sum += *right++; \ |
|
156 *dptr = (sum * scale + half) >> 24; \ |
|
157 sum -= *left++; \ |
|
158 dptr += dst_x_stride; |
|
159 |
|
160 x = diameter; |
|
161 #ifdef UNROLL_SEPARABLE_LOOPS |
|
162 for (; x < width - 16; x += 16) { |
|
163 CENTER_ITER |
|
164 CENTER_ITER |
|
165 CENTER_ITER |
|
166 CENTER_ITER |
|
167 CENTER_ITER |
|
168 CENTER_ITER |
|
169 CENTER_ITER |
|
170 CENTER_ITER |
|
171 CENTER_ITER |
|
172 CENTER_ITER |
|
173 CENTER_ITER |
|
174 CENTER_ITER |
|
175 CENTER_ITER |
|
176 CENTER_ITER |
|
177 CENTER_ITER |
|
178 CENTER_ITER |
|
179 } |
|
180 #endif |
|
181 for (; x < width; ++x) { |
|
182 CENTER_ITER |
|
183 } |
|
184 #undef CENTER_ITER |
|
185 #define RIGHT_BORDER_ITER \ |
|
186 *dptr = (sum * scale + half) >> 24; \ |
|
187 sum -= *left++; \ |
|
188 dptr += dst_x_stride; |
|
189 |
|
190 x = 0; |
|
191 #ifdef UNROLL_SEPARABLE_LOOPS |
|
192 for (; x < border - 16; x += 16) { |
|
193 RIGHT_BORDER_ITER |
|
194 RIGHT_BORDER_ITER |
|
195 RIGHT_BORDER_ITER |
|
196 RIGHT_BORDER_ITER |
|
197 RIGHT_BORDER_ITER |
|
198 RIGHT_BORDER_ITER |
|
199 RIGHT_BORDER_ITER |
|
200 RIGHT_BORDER_ITER |
|
201 RIGHT_BORDER_ITER |
|
202 RIGHT_BORDER_ITER |
|
203 RIGHT_BORDER_ITER |
|
204 RIGHT_BORDER_ITER |
|
205 RIGHT_BORDER_ITER |
|
206 RIGHT_BORDER_ITER |
|
207 RIGHT_BORDER_ITER |
|
208 RIGHT_BORDER_ITER |
|
209 } |
|
210 #endif |
|
211 for (; x < border; ++x) { |
|
212 RIGHT_BORDER_ITER |
|
213 } |
|
214 #undef RIGHT_BORDER_ITER |
|
215 for (int x = 0; x < leftRadius - rightRadius; ++x) { |
|
216 *dptr = 0; |
|
217 dptr += dst_x_stride; |
|
218 } |
|
219 SkASSERT(sum == 0); |
|
220 } |
|
221 return new_width; |
|
222 } |
|
223 |
|
224 /** |
|
225 * This variant of the box blur handles blurring of non-integer radii. It |
|
226 * keeps two running sums: an outer sum for the rounded-up kernel radius, and |
|
227 * an inner sum for the rounded-down kernel radius. For each pixel, it linearly |
|
228 * interpolates between them. In float this would be: |
|
229 * outer_weight * outer_sum / kernelSize + |
|
230 * (1.0 - outer_weight) * innerSum / (kernelSize - 2) |
|
231 * |
|
232 * This is what the inner loop looks like before unrolling, and with the two |
|
233 * cases broken out separately (width < diameter, width >= diameter): |
|
234 * |
|
235 * if (width < diameter) { |
|
236 * for (int x = 0; x < width; x++) { |
|
237 * inner_sum = outer_sum; |
|
238 * outer_sum += *right++; |
|
239 * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; |
|
240 * dptr += dst_x_stride; |
|
241 * } |
|
242 * for (int x = width; x < diameter; ++x) { |
|
243 * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; |
|
244 * dptr += dst_x_stride; |
|
245 * } |
|
246 * for (int x = 0; x < width; x++) { |
|
247 * inner_sum = outer_sum - *left++; |
|
248 * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; |
|
249 * dptr += dst_x_stride; |
|
250 * outer_sum = inner_sum; |
|
251 * } |
|
252 * } else { |
|
253 * for (int x = 0; x < diameter; x++) { |
|
254 * inner_sum = outer_sum; |
|
255 * outer_sum += *right++; |
|
256 * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; |
|
257 * dptr += dst_x_stride; |
|
258 * } |
|
259 * for (int x = diameter; x < width; ++x) { |
|
260 * inner_sum = outer_sum - *left; |
|
261 * outer_sum += *right++; |
|
262 * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; |
|
263 * dptr += dst_x_stride; |
|
264 * outer_sum -= *left++; |
|
265 * } |
|
266 * for (int x = 0; x < diameter; x++) { |
|
267 * inner_sum = outer_sum - *left++; |
|
268 * *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; |
|
269 * dptr += dst_x_stride; |
|
270 * outer_sum = inner_sum; |
|
271 * } |
|
272 * } |
|
273 * } |
|
274 * return new_width; |
|
275 */ |
|
276 |
|
277 static int boxBlurInterp(const uint8_t* src, int src_y_stride, uint8_t* dst, |
|
278 int radius, int width, int height, |
|
279 bool transpose, uint8_t outer_weight) |
|
280 { |
|
281 int diameter = radius * 2; |
|
282 int kernelSize = diameter + 1; |
|
283 int border = SkMin32(width, diameter); |
|
284 int inner_weight = 255 - outer_weight; |
|
285 outer_weight += outer_weight >> 7; |
|
286 inner_weight += inner_weight >> 7; |
|
287 uint32_t outer_scale = (outer_weight << 16) / kernelSize; |
|
288 uint32_t inner_scale = (inner_weight << 16) / (kernelSize - 2); |
|
289 uint32_t half = 1 << 23; |
|
290 int new_width = width + diameter; |
|
291 int dst_x_stride = transpose ? height : 1; |
|
292 int dst_y_stride = transpose ? 1 : new_width; |
|
293 for (int y = 0; y < height; ++y) { |
|
294 uint32_t outer_sum = 0, inner_sum = 0; |
|
295 uint8_t* dptr = dst + y * dst_y_stride; |
|
296 const uint8_t* right = src + y * src_y_stride; |
|
297 const uint8_t* left = right; |
|
298 int x = 0; |
|
299 |
|
300 #define LEFT_BORDER_ITER \ |
|
301 inner_sum = outer_sum; \ |
|
302 outer_sum += *right++; \ |
|
303 *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; \ |
|
304 dptr += dst_x_stride; |
|
305 |
|
306 #ifdef UNROLL_SEPARABLE_LOOPS |
|
307 for (;x < border - 16; x += 16) { |
|
308 LEFT_BORDER_ITER |
|
309 LEFT_BORDER_ITER |
|
310 LEFT_BORDER_ITER |
|
311 LEFT_BORDER_ITER |
|
312 LEFT_BORDER_ITER |
|
313 LEFT_BORDER_ITER |
|
314 LEFT_BORDER_ITER |
|
315 LEFT_BORDER_ITER |
|
316 LEFT_BORDER_ITER |
|
317 LEFT_BORDER_ITER |
|
318 LEFT_BORDER_ITER |
|
319 LEFT_BORDER_ITER |
|
320 LEFT_BORDER_ITER |
|
321 LEFT_BORDER_ITER |
|
322 LEFT_BORDER_ITER |
|
323 LEFT_BORDER_ITER |
|
324 } |
|
325 #endif |
|
326 |
|
327 for (;x < border; ++x) { |
|
328 LEFT_BORDER_ITER |
|
329 } |
|
330 #undef LEFT_BORDER_ITER |
|
331 for (int x = width; x < diameter; ++x) { |
|
332 *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; |
|
333 dptr += dst_x_stride; |
|
334 } |
|
335 x = diameter; |
|
336 |
|
337 #define CENTER_ITER \ |
|
338 inner_sum = outer_sum - *left; \ |
|
339 outer_sum += *right++; \ |
|
340 *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; \ |
|
341 dptr += dst_x_stride; \ |
|
342 outer_sum -= *left++; |
|
343 |
|
344 #ifdef UNROLL_SEPARABLE_LOOPS |
|
345 for (; x < width - 16; x += 16) { |
|
346 CENTER_ITER |
|
347 CENTER_ITER |
|
348 CENTER_ITER |
|
349 CENTER_ITER |
|
350 CENTER_ITER |
|
351 CENTER_ITER |
|
352 CENTER_ITER |
|
353 CENTER_ITER |
|
354 CENTER_ITER |
|
355 CENTER_ITER |
|
356 CENTER_ITER |
|
357 CENTER_ITER |
|
358 CENTER_ITER |
|
359 CENTER_ITER |
|
360 CENTER_ITER |
|
361 CENTER_ITER |
|
362 } |
|
363 #endif |
|
364 for (; x < width; ++x) { |
|
365 CENTER_ITER |
|
366 } |
|
367 #undef CENTER_ITER |
|
368 |
|
369 #define RIGHT_BORDER_ITER \ |
|
370 inner_sum = outer_sum - *left++; \ |
|
371 *dptr = (outer_sum * outer_scale + inner_sum * inner_scale + half) >> 24; \ |
|
372 dptr += dst_x_stride; \ |
|
373 outer_sum = inner_sum; |
|
374 |
|
375 x = 0; |
|
376 #ifdef UNROLL_SEPARABLE_LOOPS |
|
377 for (; x < border - 16; x += 16) { |
|
378 RIGHT_BORDER_ITER |
|
379 RIGHT_BORDER_ITER |
|
380 RIGHT_BORDER_ITER |
|
381 RIGHT_BORDER_ITER |
|
382 RIGHT_BORDER_ITER |
|
383 RIGHT_BORDER_ITER |
|
384 RIGHT_BORDER_ITER |
|
385 RIGHT_BORDER_ITER |
|
386 RIGHT_BORDER_ITER |
|
387 RIGHT_BORDER_ITER |
|
388 RIGHT_BORDER_ITER |
|
389 RIGHT_BORDER_ITER |
|
390 RIGHT_BORDER_ITER |
|
391 RIGHT_BORDER_ITER |
|
392 RIGHT_BORDER_ITER |
|
393 RIGHT_BORDER_ITER |
|
394 } |
|
395 #endif |
|
396 for (; x < border; ++x) { |
|
397 RIGHT_BORDER_ITER |
|
398 } |
|
399 #undef RIGHT_BORDER_ITER |
|
400 SkASSERT(outer_sum == 0 && inner_sum == 0); |
|
401 } |
|
402 return new_width; |
|
403 } |
|
404 |
|
405 static void get_adjusted_radii(SkScalar passRadius, int *loRadius, int *hiRadius) |
|
406 { |
|
407 *loRadius = *hiRadius = SkScalarCeilToInt(passRadius); |
|
408 if (SkIntToScalar(*hiRadius) - passRadius > 0.5f) { |
|
409 *loRadius = *hiRadius - 1; |
|
410 } |
|
411 } |
|
412 |
|
413 #include "SkColorPriv.h" |
|
414 |
|
415 static void merge_src_with_blur(uint8_t dst[], int dstRB, |
|
416 const uint8_t src[], int srcRB, |
|
417 const uint8_t blur[], int blurRB, |
|
418 int sw, int sh) { |
|
419 dstRB -= sw; |
|
420 srcRB -= sw; |
|
421 blurRB -= sw; |
|
422 while (--sh >= 0) { |
|
423 for (int x = sw - 1; x >= 0; --x) { |
|
424 *dst = SkToU8(SkAlphaMul(*blur, SkAlpha255To256(*src))); |
|
425 dst += 1; |
|
426 src += 1; |
|
427 blur += 1; |
|
428 } |
|
429 dst += dstRB; |
|
430 src += srcRB; |
|
431 blur += blurRB; |
|
432 } |
|
433 } |
|
434 |
|
435 static void clamp_with_orig(uint8_t dst[], int dstRowBytes, |
|
436 const uint8_t src[], int srcRowBytes, |
|
437 int sw, int sh, |
|
438 SkBlurMask::Style style) { |
|
439 int x; |
|
440 while (--sh >= 0) { |
|
441 switch (style) { |
|
442 case SkBlurMask::kSolid_Style: |
|
443 for (x = sw - 1; x >= 0; --x) { |
|
444 int s = *src; |
|
445 int d = *dst; |
|
446 *dst = SkToU8(s + d - SkMulDiv255Round(s, d)); |
|
447 dst += 1; |
|
448 src += 1; |
|
449 } |
|
450 break; |
|
451 case SkBlurMask::kOuter_Style: |
|
452 for (x = sw - 1; x >= 0; --x) { |
|
453 if (*src) { |
|
454 *dst = SkToU8(SkAlphaMul(*dst, SkAlpha255To256(255 - *src))); |
|
455 } |
|
456 dst += 1; |
|
457 src += 1; |
|
458 } |
|
459 break; |
|
460 default: |
|
461 SkDEBUGFAIL("Unexpected blur style here"); |
|
462 break; |
|
463 } |
|
464 dst += dstRowBytes - sw; |
|
465 src += srcRowBytes - sw; |
|
466 } |
|
467 } |
|
468 |
|
469 /////////////////////////////////////////////////////////////////////////////// |
|
470 |
|
471 // we use a local function to wrap the class static method to work around |
|
472 // a bug in gcc98 |
|
473 void SkMask_FreeImage(uint8_t* image); |
|
474 void SkMask_FreeImage(uint8_t* image) { |
|
475 SkMask::FreeImage(image); |
|
476 } |
|
477 |
|
478 bool SkBlurMask::BoxBlur(SkMask* dst, const SkMask& src, |
|
479 SkScalar sigma, Style style, Quality quality, |
|
480 SkIPoint* margin) { |
|
481 |
|
482 if (src.fFormat != SkMask::kA8_Format) { |
|
483 return false; |
|
484 } |
|
485 |
|
486 // Force high quality off for small radii (performance) |
|
487 if (sigma <= SkIntToScalar(2)) { |
|
488 quality = kLow_Quality; |
|
489 } |
|
490 |
|
491 SkScalar passRadius; |
|
492 if (kHigh_Quality == quality) { |
|
493 // For the high quality path the 3 pass box blur kernel width is |
|
494 // 6*rad+1 while the full Gaussian width is 6*sigma. |
|
495 passRadius = sigma - (1/6.0f); |
|
496 } else { |
|
497 // For the low quality path we only attempt to cover 3*sigma of the |
|
498 // Gaussian blur area (1.5*sigma on each side). The single pass box |
|
499 // blur's kernel size is 2*rad+1. |
|
500 passRadius = 1.5f*sigma - 0.5f; |
|
501 } |
|
502 |
|
503 // highQuality: use three box blur passes as a cheap way |
|
504 // to approximate a Gaussian blur |
|
505 int passCount = (kHigh_Quality == quality) ? 3 : 1; |
|
506 |
|
507 int rx = SkScalarCeilToInt(passRadius); |
|
508 int outerWeight = 255 - SkScalarRoundToInt((SkIntToScalar(rx) - passRadius) * 255); |
|
509 |
|
510 SkASSERT(rx >= 0); |
|
511 SkASSERT((unsigned)outerWeight <= 255); |
|
512 if (rx <= 0) { |
|
513 return false; |
|
514 } |
|
515 |
|
516 int ry = rx; // only do square blur for now |
|
517 |
|
518 int padx = passCount * rx; |
|
519 int pady = passCount * ry; |
|
520 |
|
521 if (margin) { |
|
522 margin->set(padx, pady); |
|
523 } |
|
524 dst->fBounds.set(src.fBounds.fLeft - padx, src.fBounds.fTop - pady, |
|
525 src.fBounds.fRight + padx, src.fBounds.fBottom + pady); |
|
526 |
|
527 dst->fRowBytes = dst->fBounds.width(); |
|
528 dst->fFormat = SkMask::kA8_Format; |
|
529 dst->fImage = NULL; |
|
530 |
|
531 if (src.fImage) { |
|
532 size_t dstSize = dst->computeImageSize(); |
|
533 if (0 == dstSize) { |
|
534 return false; // too big to allocate, abort |
|
535 } |
|
536 |
|
537 int sw = src.fBounds.width(); |
|
538 int sh = src.fBounds.height(); |
|
539 const uint8_t* sp = src.fImage; |
|
540 uint8_t* dp = SkMask::AllocImage(dstSize); |
|
541 SkAutoTCallVProc<uint8_t, SkMask_FreeImage> autoCall(dp); |
|
542 |
|
543 // build the blurry destination |
|
544 SkAutoTMalloc<uint8_t> tmpBuffer(dstSize); |
|
545 uint8_t* tp = tmpBuffer.get(); |
|
546 int w = sw, h = sh; |
|
547 |
|
548 if (outerWeight == 255) { |
|
549 int loRadius, hiRadius; |
|
550 get_adjusted_radii(passRadius, &loRadius, &hiRadius); |
|
551 if (kHigh_Quality == quality) { |
|
552 // Do three X blurs, with a transpose on the final one. |
|
553 w = boxBlur(sp, src.fRowBytes, tp, loRadius, hiRadius, w, h, false); |
|
554 w = boxBlur(tp, w, dp, hiRadius, loRadius, w, h, false); |
|
555 w = boxBlur(dp, w, tp, hiRadius, hiRadius, w, h, true); |
|
556 // Do three Y blurs, with a transpose on the final one. |
|
557 h = boxBlur(tp, h, dp, loRadius, hiRadius, h, w, false); |
|
558 h = boxBlur(dp, h, tp, hiRadius, loRadius, h, w, false); |
|
559 h = boxBlur(tp, h, dp, hiRadius, hiRadius, h, w, true); |
|
560 } else { |
|
561 w = boxBlur(sp, src.fRowBytes, tp, rx, rx, w, h, true); |
|
562 h = boxBlur(tp, h, dp, ry, ry, h, w, true); |
|
563 } |
|
564 } else { |
|
565 if (kHigh_Quality == quality) { |
|
566 // Do three X blurs, with a transpose on the final one. |
|
567 w = boxBlurInterp(sp, src.fRowBytes, tp, rx, w, h, false, outerWeight); |
|
568 w = boxBlurInterp(tp, w, dp, rx, w, h, false, outerWeight); |
|
569 w = boxBlurInterp(dp, w, tp, rx, w, h, true, outerWeight); |
|
570 // Do three Y blurs, with a transpose on the final one. |
|
571 h = boxBlurInterp(tp, h, dp, ry, h, w, false, outerWeight); |
|
572 h = boxBlurInterp(dp, h, tp, ry, h, w, false, outerWeight); |
|
573 h = boxBlurInterp(tp, h, dp, ry, h, w, true, outerWeight); |
|
574 } else { |
|
575 w = boxBlurInterp(sp, src.fRowBytes, tp, rx, w, h, true, outerWeight); |
|
576 h = boxBlurInterp(tp, h, dp, ry, h, w, true, outerWeight); |
|
577 } |
|
578 } |
|
579 |
|
580 dst->fImage = dp; |
|
581 // if need be, alloc the "real" dst (same size as src) and copy/merge |
|
582 // the blur into it (applying the src) |
|
583 if (style == kInner_Style) { |
|
584 // now we allocate the "real" dst, mirror the size of src |
|
585 size_t srcSize = src.computeImageSize(); |
|
586 if (0 == srcSize) { |
|
587 return false; // too big to allocate, abort |
|
588 } |
|
589 dst->fImage = SkMask::AllocImage(srcSize); |
|
590 merge_src_with_blur(dst->fImage, src.fRowBytes, |
|
591 sp, src.fRowBytes, |
|
592 dp + passCount * (rx + ry * dst->fRowBytes), |
|
593 dst->fRowBytes, sw, sh); |
|
594 SkMask::FreeImage(dp); |
|
595 } else if (style != kNormal_Style) { |
|
596 clamp_with_orig(dp + passCount * (rx + ry * dst->fRowBytes), |
|
597 dst->fRowBytes, sp, src.fRowBytes, sw, sh, style); |
|
598 } |
|
599 (void)autoCall.detach(); |
|
600 } |
|
601 |
|
602 if (style == kInner_Style) { |
|
603 dst->fBounds = src.fBounds; // restore trimmed bounds |
|
604 dst->fRowBytes = src.fRowBytes; |
|
605 } |
|
606 |
|
607 return true; |
|
608 } |
|
609 |
|
610 /* Convolving a box with itself three times results in a piecewise |
|
611 quadratic function: |
|
612 |
|
613 0 x <= -1.5 |
|
614 9/8 + 3/2 x + 1/2 x^2 -1.5 < x <= -.5 |
|
615 3/4 - x^2 -.5 < x <= .5 |
|
616 9/8 - 3/2 x + 1/2 x^2 0.5 < x <= 1.5 |
|
617 0 1.5 < x |
|
618 |
|
619 Mathematica: |
|
620 |
|
621 g[x_] := Piecewise [ { |
|
622 {9/8 + 3/2 x + 1/2 x^2 , -1.5 < x <= -.5}, |
|
623 {3/4 - x^2 , -.5 < x <= .5}, |
|
624 {9/8 - 3/2 x + 1/2 x^2 , 0.5 < x <= 1.5} |
|
625 }, 0] |
|
626 |
|
627 To get the profile curve of the blurred step function at the rectangle |
|
628 edge, we evaluate the indefinite integral, which is piecewise cubic: |
|
629 |
|
630 0 x <= -1.5 |
|
631 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3 -1.5 < x <= -0.5 |
|
632 1/2 + 3/4 x - 1/3 x^3 -.5 < x <= .5 |
|
633 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3 .5 < x <= 1.5 |
|
634 1 1.5 < x |
|
635 |
|
636 in Mathematica code: |
|
637 |
|
638 gi[x_] := Piecewise[ { |
|
639 { 0 , x <= -1.5 }, |
|
640 { 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3, -1.5 < x <= -0.5 }, |
|
641 { 1/2 + 3/4 x - 1/3 x^3 , -.5 < x <= .5}, |
|
642 { 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3, .5 < x <= 1.5} |
|
643 },1] |
|
644 */ |
|
645 |
|
646 static float gaussianIntegral(float x) { |
|
647 if (x > 1.5f) { |
|
648 return 0.0f; |
|
649 } |
|
650 if (x < -1.5f) { |
|
651 return 1.0f; |
|
652 } |
|
653 |
|
654 float x2 = x*x; |
|
655 float x3 = x2*x; |
|
656 |
|
657 if ( x > 0.5f ) { |
|
658 return 0.5625f - (x3 / 6.0f - 3.0f * x2 * 0.25f + 1.125f * x); |
|
659 } |
|
660 if ( x > -0.5f ) { |
|
661 return 0.5f - (0.75f * x - x3 / 3.0f); |
|
662 } |
|
663 return 0.4375f + (-x3 / 6.0f - 3.0f * x2 * 0.25f - 1.125f * x); |
|
664 } |
|
665 |
|
666 /* ComputeBlurProfile allocates and fills in an array of floating |
|
667 point values between 0 and 255 for the profile signature of |
|
668 a blurred half-plane with the given blur radius. Since we're |
|
669 going to be doing screened multiplications (i.e., 1 - (1-x)(1-y)) |
|
670 all the time, we actually fill in the profile pre-inverted |
|
671 (already done 255-x). |
|
672 |
|
673 It's the responsibility of the caller to delete the |
|
674 memory returned in profile_out. |
|
675 */ |
|
676 |
|
677 void SkBlurMask::ComputeBlurProfile(SkScalar sigma, uint8_t **profile_out) { |
|
678 int size = SkScalarCeilToInt(6*sigma); |
|
679 |
|
680 int center = size >> 1; |
|
681 uint8_t *profile = SkNEW_ARRAY(uint8_t, size); |
|
682 |
|
683 float invr = 1.f/(2*sigma); |
|
684 |
|
685 profile[0] = 255; |
|
686 for (int x = 1 ; x < size ; ++x) { |
|
687 float scaled_x = (center - x - .5f) * invr; |
|
688 float gi = gaussianIntegral(scaled_x); |
|
689 profile[x] = 255 - (uint8_t) (255.f * gi); |
|
690 } |
|
691 |
|
692 *profile_out = profile; |
|
693 } |
|
694 |
|
695 // TODO MAYBE: Maintain a profile cache to avoid recomputing this for |
|
696 // commonly used radii. Consider baking some of the most common blur radii |
|
697 // directly in as static data? |
|
698 |
|
699 // Implementation adapted from Michael Herf's approach: |
|
700 // http://stereopsis.com/shadowrect/ |
|
701 |
|
702 uint8_t SkBlurMask::ProfileLookup(const uint8_t *profile, int loc, int blurred_width, int sharp_width) { |
|
703 int dx = SkAbs32(((loc << 1) + 1) - blurred_width) - sharp_width; // how far are we from the original edge? |
|
704 int ox = dx >> 1; |
|
705 if (ox < 0) { |
|
706 ox = 0; |
|
707 } |
|
708 |
|
709 return profile[ox]; |
|
710 } |
|
711 |
|
712 void SkBlurMask::ComputeBlurredScanline(uint8_t *pixels, const uint8_t *profile, |
|
713 unsigned int width, SkScalar sigma) { |
|
714 |
|
715 unsigned int profile_size = SkScalarCeilToInt(6*sigma); |
|
716 SkAutoTMalloc<uint8_t> horizontalScanline(width); |
|
717 |
|
718 unsigned int sw = width - profile_size; |
|
719 // nearest odd number less than the profile size represents the center |
|
720 // of the (2x scaled) profile |
|
721 int center = ( profile_size & ~1 ) - 1; |
|
722 |
|
723 int w = sw - center; |
|
724 |
|
725 for (unsigned int x = 0 ; x < width ; ++x) { |
|
726 if (profile_size <= sw) { |
|
727 pixels[x] = ProfileLookup(profile, x, width, w); |
|
728 } else { |
|
729 float span = float(sw)/(2*sigma); |
|
730 float giX = 1.5f - (x+.5f)/(2*sigma); |
|
731 pixels[x] = (uint8_t) (255 * (gaussianIntegral(giX) - gaussianIntegral(giX + span))); |
|
732 } |
|
733 } |
|
734 } |
|
735 |
|
736 bool SkBlurMask::BlurRect(SkScalar sigma, SkMask *dst, |
|
737 const SkRect &src, Style style, |
|
738 SkIPoint *margin, SkMask::CreateMode createMode) { |
|
739 int profile_size = SkScalarCeilToInt(6*sigma); |
|
740 |
|
741 int pad = profile_size/2; |
|
742 if (margin) { |
|
743 margin->set( pad, pad ); |
|
744 } |
|
745 |
|
746 dst->fBounds.set(SkScalarRoundToInt(src.fLeft - pad), |
|
747 SkScalarRoundToInt(src.fTop - pad), |
|
748 SkScalarRoundToInt(src.fRight + pad), |
|
749 SkScalarRoundToInt(src.fBottom + pad)); |
|
750 |
|
751 dst->fRowBytes = dst->fBounds.width(); |
|
752 dst->fFormat = SkMask::kA8_Format; |
|
753 dst->fImage = NULL; |
|
754 |
|
755 int sw = SkScalarFloorToInt(src.width()); |
|
756 int sh = SkScalarFloorToInt(src.height()); |
|
757 |
|
758 if (createMode == SkMask::kJustComputeBounds_CreateMode) { |
|
759 if (style == kInner_Style) { |
|
760 dst->fBounds.set(SkScalarRoundToInt(src.fLeft), |
|
761 SkScalarRoundToInt(src.fTop), |
|
762 SkScalarRoundToInt(src.fRight), |
|
763 SkScalarRoundToInt(src.fBottom)); // restore trimmed bounds |
|
764 dst->fRowBytes = sw; |
|
765 } |
|
766 return true; |
|
767 } |
|
768 uint8_t *profile = NULL; |
|
769 |
|
770 ComputeBlurProfile(sigma, &profile); |
|
771 SkAutoTDeleteArray<uint8_t> ada(profile); |
|
772 |
|
773 size_t dstSize = dst->computeImageSize(); |
|
774 if (0 == dstSize) { |
|
775 return false; // too big to allocate, abort |
|
776 } |
|
777 |
|
778 uint8_t* dp = SkMask::AllocImage(dstSize); |
|
779 |
|
780 dst->fImage = dp; |
|
781 |
|
782 int dstHeight = dst->fBounds.height(); |
|
783 int dstWidth = dst->fBounds.width(); |
|
784 |
|
785 uint8_t *outptr = dp; |
|
786 |
|
787 SkAutoTMalloc<uint8_t> horizontalScanline(dstWidth); |
|
788 SkAutoTMalloc<uint8_t> verticalScanline(dstHeight); |
|
789 |
|
790 ComputeBlurredScanline(horizontalScanline, profile, dstWidth, sigma); |
|
791 ComputeBlurredScanline(verticalScanline, profile, dstHeight, sigma); |
|
792 |
|
793 for (int y = 0 ; y < dstHeight ; ++y) { |
|
794 for (int x = 0 ; x < dstWidth ; x++) { |
|
795 unsigned int maskval = SkMulDiv255Round(horizontalScanline[x], verticalScanline[y]); |
|
796 *(outptr++) = maskval; |
|
797 } |
|
798 } |
|
799 |
|
800 if (style == kInner_Style) { |
|
801 // now we allocate the "real" dst, mirror the size of src |
|
802 size_t srcSize = (size_t)(src.width() * src.height()); |
|
803 if (0 == srcSize) { |
|
804 return false; // too big to allocate, abort |
|
805 } |
|
806 dst->fImage = SkMask::AllocImage(srcSize); |
|
807 for (int y = 0 ; y < sh ; y++) { |
|
808 uint8_t *blur_scanline = dp + (y+pad)*dstWidth + pad; |
|
809 uint8_t *inner_scanline = dst->fImage + y*sw; |
|
810 memcpy(inner_scanline, blur_scanline, sw); |
|
811 } |
|
812 SkMask::FreeImage(dp); |
|
813 |
|
814 dst->fBounds.set(SkScalarRoundToInt(src.fLeft), |
|
815 SkScalarRoundToInt(src.fTop), |
|
816 SkScalarRoundToInt(src.fRight), |
|
817 SkScalarRoundToInt(src.fBottom)); // restore trimmed bounds |
|
818 dst->fRowBytes = sw; |
|
819 |
|
820 } else if (style == kOuter_Style) { |
|
821 for (int y = pad ; y < dstHeight-pad ; y++) { |
|
822 uint8_t *dst_scanline = dp + y*dstWidth + pad; |
|
823 memset(dst_scanline, 0, sw); |
|
824 } |
|
825 } else if (style == kSolid_Style) { |
|
826 for (int y = pad ; y < dstHeight-pad ; y++) { |
|
827 uint8_t *dst_scanline = dp + y*dstWidth + pad; |
|
828 memset(dst_scanline, 0xff, sw); |
|
829 } |
|
830 } |
|
831 // normal and solid styles are the same for analytic rect blurs, so don't |
|
832 // need to handle solid specially. |
|
833 |
|
834 return true; |
|
835 } |
|
836 |
|
837 bool SkBlurMask::BlurRRect(SkScalar sigma, SkMask *dst, |
|
838 const SkRRect &src, Style style, |
|
839 SkIPoint *margin, SkMask::CreateMode createMode) { |
|
840 // Temporary for now -- always fail, should cause caller to fall back |
|
841 // to old path. Plumbing just to land API and parallelize effort. |
|
842 |
|
843 return false; |
|
844 } |
|
845 |
|
846 // The "simple" blur is a direct implementation of separable convolution with a discrete |
|
847 // gaussian kernel. It's "ground truth" in a sense; too slow to be used, but very |
|
848 // useful for correctness comparisons. |
|
849 |
|
850 bool SkBlurMask::BlurGroundTruth(SkScalar sigma, SkMask* dst, const SkMask& src, |
|
851 Style style, SkIPoint* margin) { |
|
852 |
|
853 if (src.fFormat != SkMask::kA8_Format) { |
|
854 return false; |
|
855 } |
|
856 |
|
857 float variance = sigma * sigma; |
|
858 |
|
859 int windowSize = SkScalarCeilToInt(sigma*6); |
|
860 // round window size up to nearest odd number |
|
861 windowSize |= 1; |
|
862 |
|
863 SkAutoTMalloc<float> gaussWindow(windowSize); |
|
864 |
|
865 int halfWindow = windowSize >> 1; |
|
866 |
|
867 gaussWindow[halfWindow] = 1; |
|
868 |
|
869 float windowSum = 1; |
|
870 for (int x = 1 ; x <= halfWindow ; ++x) { |
|
871 float gaussian = expf(-x*x / (2*variance)); |
|
872 gaussWindow[halfWindow + x] = gaussWindow[halfWindow-x] = gaussian; |
|
873 windowSum += 2*gaussian; |
|
874 } |
|
875 |
|
876 // leave the filter un-normalized for now; we will divide by the normalization |
|
877 // sum later; |
|
878 |
|
879 int pad = halfWindow; |
|
880 if (margin) { |
|
881 margin->set( pad, pad ); |
|
882 } |
|
883 |
|
884 dst->fBounds = src.fBounds; |
|
885 dst->fBounds.outset(pad, pad); |
|
886 |
|
887 dst->fRowBytes = dst->fBounds.width(); |
|
888 dst->fFormat = SkMask::kA8_Format; |
|
889 dst->fImage = NULL; |
|
890 |
|
891 if (src.fImage) { |
|
892 |
|
893 size_t dstSize = dst->computeImageSize(); |
|
894 if (0 == dstSize) { |
|
895 return false; // too big to allocate, abort |
|
896 } |
|
897 |
|
898 int srcWidth = src.fBounds.width(); |
|
899 int srcHeight = src.fBounds.height(); |
|
900 int dstWidth = dst->fBounds.width(); |
|
901 |
|
902 const uint8_t* srcPixels = src.fImage; |
|
903 uint8_t* dstPixels = SkMask::AllocImage(dstSize); |
|
904 SkAutoTCallVProc<uint8_t, SkMask_FreeImage> autoCall(dstPixels); |
|
905 |
|
906 // do the actual blur. First, make a padded copy of the source. |
|
907 // use double pad so we never have to check if we're outside anything |
|
908 |
|
909 int padWidth = srcWidth + 4*pad; |
|
910 int padHeight = srcHeight; |
|
911 int padSize = padWidth * padHeight; |
|
912 |
|
913 SkAutoTMalloc<uint8_t> padPixels(padSize); |
|
914 memset(padPixels, 0, padSize); |
|
915 |
|
916 for (int y = 0 ; y < srcHeight; ++y) { |
|
917 uint8_t* padptr = padPixels + y * padWidth + 2*pad; |
|
918 const uint8_t* srcptr = srcPixels + y * srcWidth; |
|
919 memcpy(padptr, srcptr, srcWidth); |
|
920 } |
|
921 |
|
922 // blur in X, transposing the result into a temporary floating point buffer. |
|
923 // also double-pad the intermediate result so that the second blur doesn't |
|
924 // have to do extra conditionals. |
|
925 |
|
926 int tmpWidth = padHeight + 4*pad; |
|
927 int tmpHeight = padWidth - 2*pad; |
|
928 int tmpSize = tmpWidth * tmpHeight; |
|
929 |
|
930 SkAutoTMalloc<float> tmpImage(tmpSize); |
|
931 memset(tmpImage, 0, tmpSize*sizeof(tmpImage[0])); |
|
932 |
|
933 for (int y = 0 ; y < padHeight ; ++y) { |
|
934 uint8_t *srcScanline = padPixels + y*padWidth; |
|
935 for (int x = pad ; x < padWidth - pad ; ++x) { |
|
936 float *outPixel = tmpImage + (x-pad)*tmpWidth + y + 2*pad; // transposed output |
|
937 uint8_t *windowCenter = srcScanline + x; |
|
938 for (int i = -pad ; i <= pad ; ++i) { |
|
939 *outPixel += gaussWindow[pad+i]*windowCenter[i]; |
|
940 } |
|
941 *outPixel /= windowSum; |
|
942 } |
|
943 } |
|
944 |
|
945 // blur in Y; now filling in the actual desired destination. We have to do |
|
946 // the transpose again; these transposes guarantee that we read memory in |
|
947 // linear order. |
|
948 |
|
949 for (int y = 0 ; y < tmpHeight ; ++y) { |
|
950 float *srcScanline = tmpImage + y*tmpWidth; |
|
951 for (int x = pad ; x < tmpWidth - pad ; ++x) { |
|
952 float *windowCenter = srcScanline + x; |
|
953 float finalValue = 0; |
|
954 for (int i = -pad ; i <= pad ; ++i) { |
|
955 finalValue += gaussWindow[pad+i]*windowCenter[i]; |
|
956 } |
|
957 finalValue /= windowSum; |
|
958 uint8_t *outPixel = dstPixels + (x-pad)*dstWidth + y; // transposed output |
|
959 int integerPixel = int(finalValue + 0.5f); |
|
960 *outPixel = SkClampMax( SkClampPos(integerPixel), 255 ); |
|
961 } |
|
962 } |
|
963 |
|
964 dst->fImage = dstPixels; |
|
965 // if need be, alloc the "real" dst (same size as src) and copy/merge |
|
966 // the blur into it (applying the src) |
|
967 if (style == kInner_Style) { |
|
968 // now we allocate the "real" dst, mirror the size of src |
|
969 size_t srcSize = src.computeImageSize(); |
|
970 if (0 == srcSize) { |
|
971 return false; // too big to allocate, abort |
|
972 } |
|
973 dst->fImage = SkMask::AllocImage(srcSize); |
|
974 merge_src_with_blur(dst->fImage, src.fRowBytes, |
|
975 srcPixels, src.fRowBytes, |
|
976 dstPixels + pad*dst->fRowBytes + pad, |
|
977 dst->fRowBytes, srcWidth, srcHeight); |
|
978 SkMask::FreeImage(dstPixels); |
|
979 } else if (style != kNormal_Style) { |
|
980 clamp_with_orig(dstPixels + pad*dst->fRowBytes + pad, |
|
981 dst->fRowBytes, srcPixels, src.fRowBytes, srcWidth, srcHeight, style); |
|
982 } |
|
983 (void)autoCall.detach(); |
|
984 } |
|
985 |
|
986 if (style == kInner_Style) { |
|
987 dst->fBounds = src.fBounds; // restore trimmed bounds |
|
988 dst->fRowBytes = src.fRowBytes; |
|
989 } |
|
990 |
|
991 return true; |
|
992 } |