Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
2 /*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8 #include "SkBitmapProcState.h"
9 #include "SkColorPriv.h"
10 #include "SkFilterProc.h"
11 #include "SkPaint.h"
12 #include "SkShader.h" // for tilemodes
13 #include "SkUtilsArm.h"
14 #include "SkBitmapScaler.h"
15 #include "SkMipMap.h"
16 #include "SkPixelRef.h"
17 #include "SkScaledImageCache.h"
19 #if !SK_ARM_NEON_IS_NONE
20 // These are defined in src/opts/SkBitmapProcState_arm_neon.cpp
21 extern const SkBitmapProcState::SampleProc16 gSkBitmapProcStateSample16_neon[];
22 extern const SkBitmapProcState::SampleProc32 gSkBitmapProcStateSample32_neon[];
23 extern void S16_D16_filter_DX_neon(const SkBitmapProcState&, const uint32_t*, int, uint16_t*);
24 extern void Clamp_S16_D16_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint16_t*, int);
25 extern void Repeat_S16_D16_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint16_t*, int);
26 extern void SI8_opaque_D32_filter_DX_neon(const SkBitmapProcState&, const uint32_t*, int, SkPMColor*);
27 extern void SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint32_t*, int);
28 extern void Clamp_SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint32_t*, int);
29 #endif
31 #define NAME_WRAP(x) x
32 #include "SkBitmapProcState_filter.h"
33 #include "SkBitmapProcState_procs.h"
35 ///////////////////////////////////////////////////////////////////////////////
37 // true iff the matrix contains, at most, scale and translate elements
38 static bool matrix_only_scale_translate(const SkMatrix& m) {
39 return m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask);
40 }
42 /**
43 * For the purposes of drawing bitmaps, if a matrix is "almost" translate
44 * go ahead and treat it as if it were, so that subsequent code can go fast.
45 */
46 static bool just_trans_clamp(const SkMatrix& matrix, const SkBitmap& bitmap) {
47 SkASSERT(matrix_only_scale_translate(matrix));
49 if (matrix.getType() & SkMatrix::kScale_Mask) {
50 SkRect src, dst;
51 bitmap.getBounds(&src);
53 // Can't call mapRect(), since that will fix up inverted rectangles,
54 // e.g. when scale is negative, and we don't want to return true for
55 // those.
56 matrix.mapPoints(SkTCast<SkPoint*>(&dst),
57 SkTCast<const SkPoint*>(&src),
58 2);
60 // Now round all 4 edges to device space, and then compare the device
61 // width/height to the original. Note: we must map all 4 and subtract
62 // rather than map the "width" and compare, since we care about the
63 // phase (in pixel space) that any translate in the matrix might impart.
64 SkIRect idst;
65 dst.round(&idst);
66 return idst.width() == bitmap.width() && idst.height() == bitmap.height();
67 }
68 // if we got here, we're either kTranslate_Mask or identity
69 return true;
70 }
72 static bool just_trans_general(const SkMatrix& matrix) {
73 SkASSERT(matrix_only_scale_translate(matrix));
75 if (matrix.getType() & SkMatrix::kScale_Mask) {
76 const SkScalar tol = SK_Scalar1 / 32768;
78 if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleX] - SK_Scalar1, tol)) {
79 return false;
80 }
81 if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleY] - SK_Scalar1, tol)) {
82 return false;
83 }
84 }
85 // if we got here, treat us as either kTranslate_Mask or identity
86 return true;
87 }
89 ///////////////////////////////////////////////////////////////////////////////
91 static bool valid_for_filtering(unsigned dimension) {
92 // for filtering, width and height must fit in 14bits, since we use steal
93 // 2 bits from each to store our 4bit subpixel data
94 return (dimension & ~0x3FFF) == 0;
95 }
97 static SkScalar effective_matrix_scale_sqrd(const SkMatrix& mat) {
98 SkPoint v1, v2;
100 v1.fX = mat.getScaleX();
101 v1.fY = mat.getSkewY();
103 v2.fX = mat.getSkewX();
104 v2.fY = mat.getScaleY();
106 return SkMaxScalar(v1.lengthSqd(), v2.lengthSqd());
107 }
109 class AutoScaledCacheUnlocker {
110 public:
111 AutoScaledCacheUnlocker(SkScaledImageCache::ID** idPtr) : fIDPtr(idPtr) {}
112 ~AutoScaledCacheUnlocker() {
113 if (fIDPtr && *fIDPtr) {
114 SkScaledImageCache::Unlock(*fIDPtr);
115 *fIDPtr = NULL;
116 }
117 }
119 // forgets the ID, so it won't call Unlock
120 void release() {
121 fIDPtr = NULL;
122 }
124 private:
125 SkScaledImageCache::ID** fIDPtr;
126 };
127 #define AutoScaledCacheUnlocker(...) SK_REQUIRE_LOCAL_VAR(AutoScaledCacheUnlocker)
129 // TODO -- we may want to pass the clip into this function so we only scale
130 // the portion of the image that we're going to need. This will complicate
131 // the interface to the cache, but might be well worth it.
133 bool SkBitmapProcState::possiblyScaleImage() {
134 AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
136 SkASSERT(NULL == fBitmap);
137 SkASSERT(NULL == fScaledCacheID);
139 if (fFilterLevel <= SkPaint::kLow_FilterLevel) {
140 return false;
141 }
143 // Check to see if the transformation matrix is simple, and if we're
144 // doing high quality scaling. If so, do the bitmap scale here and
145 // remove the scaling component from the matrix.
147 if (SkPaint::kHigh_FilterLevel == fFilterLevel &&
148 fInvMatrix.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask) &&
149 fOrigBitmap.config() == SkBitmap::kARGB_8888_Config) {
151 SkScalar invScaleX = fInvMatrix.getScaleX();
152 SkScalar invScaleY = fInvMatrix.getScaleY();
154 fScaledCacheID = SkScaledImageCache::FindAndLock(fOrigBitmap,
155 invScaleX, invScaleY,
156 &fScaledBitmap);
157 if (fScaledCacheID) {
158 fScaledBitmap.lockPixels();
159 if (!fScaledBitmap.getPixels()) {
160 fScaledBitmap.unlockPixels();
161 // found a purged entry (discardablememory?), release it
162 SkScaledImageCache::Unlock(fScaledCacheID);
163 fScaledCacheID = NULL;
164 // fall through to rebuild
165 }
166 }
168 if (NULL == fScaledCacheID) {
169 int dest_width = SkScalarCeilToInt(fOrigBitmap.width() / invScaleX);
170 int dest_height = SkScalarCeilToInt(fOrigBitmap.height() / invScaleY);
172 // All the criteria are met; let's make a new bitmap.
174 SkConvolutionProcs simd;
175 sk_bzero(&simd, sizeof(simd));
176 this->platformConvolutionProcs(&simd);
178 if (!SkBitmapScaler::Resize(&fScaledBitmap,
179 fOrigBitmap,
180 SkBitmapScaler::RESIZE_BEST,
181 dest_width,
182 dest_height,
183 simd,
184 SkScaledImageCache::GetAllocator())) {
185 // we failed to create fScaledBitmap, so just return and let
186 // the scanline proc handle it.
187 return false;
189 }
190 SkASSERT(NULL != fScaledBitmap.getPixels());
191 fScaledCacheID = SkScaledImageCache::AddAndLock(fOrigBitmap,
192 invScaleX,
193 invScaleY,
194 fScaledBitmap);
195 if (!fScaledCacheID) {
196 fScaledBitmap.reset();
197 return false;
198 }
199 SkASSERT(NULL != fScaledBitmap.getPixels());
200 }
202 SkASSERT(NULL != fScaledBitmap.getPixels());
203 fBitmap = &fScaledBitmap;
205 // set the inv matrix type to translate-only;
206 fInvMatrix.setTranslate(fInvMatrix.getTranslateX() / fInvMatrix.getScaleX(),
207 fInvMatrix.getTranslateY() / fInvMatrix.getScaleY());
209 // no need for any further filtering; we just did it!
210 fFilterLevel = SkPaint::kNone_FilterLevel;
211 unlocker.release();
212 return true;
213 }
215 /*
216 * If High, then our special-case for scale-only did not take, and so we
217 * have to make a choice:
218 * 1. fall back on mipmaps + bilerp
219 * 2. fall back on scanline bicubic filter
220 * For now, we compute the "scale" value from the matrix, and have a
221 * threshold to decide when bicubic is better, and when mips are better.
222 * No doubt a fancier decision tree could be used uere.
223 *
224 * If Medium, then we just try to build a mipmap and select a level,
225 * setting the filter-level to kLow to signal that we just need bilerp
226 * to process the selected level.
227 */
229 SkScalar scaleSqd = effective_matrix_scale_sqrd(fInvMatrix);
231 if (SkPaint::kHigh_FilterLevel == fFilterLevel) {
232 // Set the limit at 0.25 for the CTM... if the CTM is scaling smaller
233 // than this, then the mipmaps quality may be greater (certainly faster)
234 // so we only keep High quality if the scale is greater than this.
235 //
236 // Since we're dealing with the inverse, we compare against its inverse.
237 const SkScalar bicubicLimit = 4.0f;
238 const SkScalar bicubicLimitSqd = bicubicLimit * bicubicLimit;
239 if (scaleSqd < bicubicLimitSqd) { // use bicubic scanline
240 return false;
241 }
243 // else set the filter-level to Medium, since we're scaling down and
244 // want to reqeust mipmaps
245 fFilterLevel = SkPaint::kMedium_FilterLevel;
246 }
248 SkASSERT(SkPaint::kMedium_FilterLevel == fFilterLevel);
250 /**
251 * Medium quality means use a mipmap for down-scaling, and just bilper
252 * for upscaling. Since we're examining the inverse matrix, we look for
253 * a scale > 1 to indicate down scaling by the CTM.
254 */
255 if (scaleSqd > SK_Scalar1) {
256 const SkMipMap* mip = NULL;
258 SkASSERT(NULL == fScaledCacheID);
259 fScaledCacheID = SkScaledImageCache::FindAndLockMip(fOrigBitmap, &mip);
260 if (!fScaledCacheID) {
261 SkASSERT(NULL == mip);
262 mip = SkMipMap::Build(fOrigBitmap);
263 if (mip) {
264 fScaledCacheID = SkScaledImageCache::AddAndLockMip(fOrigBitmap,
265 mip);
266 mip->unref(); // the cache took a ref
267 SkASSERT(fScaledCacheID);
268 }
269 } else {
270 SkASSERT(mip);
271 }
273 if (mip) {
274 SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));
275 SkMipMap::Level level;
276 if (mip->extractLevel(levelScale, &level)) {
277 SkScalar invScaleFixup = level.fScale;
278 fInvMatrix.postScale(invScaleFixup, invScaleFixup);
280 fScaledBitmap.setConfig(fOrigBitmap.config(),
281 level.fWidth, level.fHeight,
282 level.fRowBytes);
283 fScaledBitmap.setPixels(level.fPixels);
284 fBitmap = &fScaledBitmap;
285 fFilterLevel = SkPaint::kLow_FilterLevel;
286 unlocker.release();
287 return true;
288 }
289 }
290 }
292 return false;
293 }
295 static bool get_locked_pixels(const SkBitmap& src, int pow2, SkBitmap* dst) {
296 SkPixelRef* pr = src.pixelRef();
297 if (pr && pr->decodeInto(pow2, dst)) {
298 return true;
299 }
301 /*
302 * If decodeInto() fails, it is possibe that we have an old subclass that
303 * does not, or cannot, implement that. In that case we fall back to the
304 * older protocol of having the pixelRef handle the caching for us.
305 */
306 *dst = src;
307 dst->lockPixels();
308 return SkToBool(dst->getPixels());
309 }
311 bool SkBitmapProcState::lockBaseBitmap() {
312 AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
314 SkPixelRef* pr = fOrigBitmap.pixelRef();
316 SkASSERT(NULL == fScaledCacheID);
318 if (pr->isLocked() || !pr->implementsDecodeInto()) {
319 // fast-case, no need to look in our cache
320 fScaledBitmap = fOrigBitmap;
321 fScaledBitmap.lockPixels();
322 if (NULL == fScaledBitmap.getPixels()) {
323 return false;
324 }
325 } else {
326 fScaledCacheID = SkScaledImageCache::FindAndLock(fOrigBitmap,
327 SK_Scalar1, SK_Scalar1,
328 &fScaledBitmap);
329 if (fScaledCacheID) {
330 fScaledBitmap.lockPixels();
331 if (!fScaledBitmap.getPixels()) {
332 fScaledBitmap.unlockPixels();
333 // found a purged entry (discardablememory?), release it
334 SkScaledImageCache::Unlock(fScaledCacheID);
335 fScaledCacheID = NULL;
336 // fall through to rebuild
337 }
338 }
340 if (NULL == fScaledCacheID) {
341 if (!get_locked_pixels(fOrigBitmap, 0, &fScaledBitmap)) {
342 return false;
343 }
345 // TODO: if fScaled comes back at a different width/height than fOrig,
346 // we need to update the matrix we are using to sample from this guy.
348 fScaledCacheID = SkScaledImageCache::AddAndLock(fOrigBitmap,
349 SK_Scalar1, SK_Scalar1,
350 fScaledBitmap);
351 if (!fScaledCacheID) {
352 fScaledBitmap.reset();
353 return false;
354 }
355 }
356 }
357 fBitmap = &fScaledBitmap;
358 unlocker.release();
359 return true;
360 }
362 void SkBitmapProcState::endContext() {
363 SkDELETE(fBitmapFilter);
364 fBitmapFilter = NULL;
365 fScaledBitmap.reset();
367 if (fScaledCacheID) {
368 SkScaledImageCache::Unlock(fScaledCacheID);
369 fScaledCacheID = NULL;
370 }
371 }
373 SkBitmapProcState::~SkBitmapProcState() {
374 if (fScaledCacheID) {
375 SkScaledImageCache::Unlock(fScaledCacheID);
376 }
377 SkDELETE(fBitmapFilter);
378 }
380 bool SkBitmapProcState::chooseProcs(const SkMatrix& inv, const SkPaint& paint) {
381 SkASSERT(fOrigBitmap.width() && fOrigBitmap.height());
383 fBitmap = NULL;
384 fInvMatrix = inv;
385 fFilterLevel = paint.getFilterLevel();
387 SkASSERT(NULL == fScaledCacheID);
389 // possiblyScaleImage will look to see if it can rescale the image as a
390 // preprocess; either by scaling up to the target size, or by selecting
391 // a nearby mipmap level. If it does, it will adjust the working
392 // matrix as well as the working bitmap. It may also adjust the filter
393 // quality to avoid re-filtering an already perfectly scaled image.
394 if (!this->possiblyScaleImage()) {
395 if (!this->lockBaseBitmap()) {
396 return false;
397 }
398 }
399 // The above logic should have always assigned fBitmap, but in case it
400 // didn't, we check for that now...
401 if (NULL == fBitmap) {
402 return false;
403 }
405 bool trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
406 bool clampClamp = SkShader::kClamp_TileMode == fTileModeX &&
407 SkShader::kClamp_TileMode == fTileModeY;
409 if (!(clampClamp || trivialMatrix)) {
410 fInvMatrix.postIDiv(fOrigBitmap.width(), fOrigBitmap.height());
411 }
413 // Now that all possible changes to the matrix have taken place, check
414 // to see if we're really close to a no-scale matrix. If so, explicitly
415 // set it to be so. Subsequent code may inspect this matrix to choose
416 // a faster path in this case.
418 // This code will only execute if the matrix has some scale component;
419 // if it's already pure translate then we won't do this inversion.
421 if (matrix_only_scale_translate(fInvMatrix)) {
422 SkMatrix forward;
423 if (fInvMatrix.invert(&forward)) {
424 if (clampClamp ? just_trans_clamp(forward, *fBitmap)
425 : just_trans_general(forward)) {
426 SkScalar tx = -SkScalarRoundToScalar(forward.getTranslateX());
427 SkScalar ty = -SkScalarRoundToScalar(forward.getTranslateY());
428 fInvMatrix.setTranslate(tx, ty);
429 }
430 }
431 }
433 fInvProc = fInvMatrix.getMapXYProc();
434 fInvType = fInvMatrix.getType();
435 fInvSx = SkScalarToFixed(fInvMatrix.getScaleX());
436 fInvSxFractionalInt = SkScalarToFractionalInt(fInvMatrix.getScaleX());
437 fInvKy = SkScalarToFixed(fInvMatrix.getSkewY());
438 fInvKyFractionalInt = SkScalarToFractionalInt(fInvMatrix.getSkewY());
440 fAlphaScale = SkAlpha255To256(paint.getAlpha());
442 fShaderProc32 = NULL;
443 fShaderProc16 = NULL;
444 fSampleProc32 = NULL;
445 fSampleProc16 = NULL;
447 // recompute the triviality of the matrix here because we may have
448 // changed it!
450 trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
452 if (SkPaint::kHigh_FilterLevel == fFilterLevel) {
453 // If this is still set, that means we wanted HQ sampling
454 // but couldn't do it as a preprocess. Let's try to install
455 // the scanline version of the HQ sampler. If that process fails,
456 // downgrade to bilerp.
458 // NOTE: Might need to be careful here in the future when we want
459 // to have the platform proc have a shot at this; it's possible that
460 // the chooseBitmapFilterProc will fail to install a shader but a
461 // platform-specific one might succeed, so it might be premature here
462 // to fall back to bilerp. This needs thought.
464 if (!this->setBitmapFilterProcs()) {
465 fFilterLevel = SkPaint::kLow_FilterLevel;
466 }
467 }
469 if (SkPaint::kLow_FilterLevel == fFilterLevel) {
470 // Only try bilerp if the matrix is "interesting" and
471 // the image has a suitable size.
473 if (fInvType <= SkMatrix::kTranslate_Mask ||
474 !valid_for_filtering(fBitmap->width() | fBitmap->height())) {
475 fFilterLevel = SkPaint::kNone_FilterLevel;
476 }
477 }
479 // At this point, we know exactly what kind of sampling the per-scanline
480 // shader will perform.
482 fMatrixProc = this->chooseMatrixProc(trivialMatrix);
483 if (NULL == fMatrixProc) {
484 return false;
485 }
487 ///////////////////////////////////////////////////////////////////////
489 // No need to do this if we're doing HQ sampling; if filter quality is
490 // still set to HQ by the time we get here, then we must have installed
491 // the shader procs above and can skip all this.
493 if (fFilterLevel < SkPaint::kHigh_FilterLevel) {
495 int index = 0;
496 if (fAlphaScale < 256) { // note: this distinction is not used for D16
497 index |= 1;
498 }
499 if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
500 index |= 2;
501 }
502 if (fFilterLevel > SkPaint::kNone_FilterLevel) {
503 index |= 4;
504 }
505 // bits 3,4,5 encoding the source bitmap format
506 switch (fBitmap->config()) {
507 case SkBitmap::kARGB_8888_Config:
508 index |= 0;
509 break;
510 case SkBitmap::kRGB_565_Config:
511 index |= 8;
512 break;
513 case SkBitmap::kIndex8_Config:
514 index |= 16;
515 break;
516 case SkBitmap::kARGB_4444_Config:
517 index |= 24;
518 break;
519 case SkBitmap::kA8_Config:
520 index |= 32;
521 fPaintPMColor = SkPreMultiplyColor(paint.getColor());
522 break;
523 default:
524 return false;
525 }
527 #if !SK_ARM_NEON_IS_ALWAYS
528 static const SampleProc32 gSkBitmapProcStateSample32[] = {
529 S32_opaque_D32_nofilter_DXDY,
530 S32_alpha_D32_nofilter_DXDY,
531 S32_opaque_D32_nofilter_DX,
532 S32_alpha_D32_nofilter_DX,
533 S32_opaque_D32_filter_DXDY,
534 S32_alpha_D32_filter_DXDY,
535 S32_opaque_D32_filter_DX,
536 S32_alpha_D32_filter_DX,
538 S16_opaque_D32_nofilter_DXDY,
539 S16_alpha_D32_nofilter_DXDY,
540 S16_opaque_D32_nofilter_DX,
541 S16_alpha_D32_nofilter_DX,
542 S16_opaque_D32_filter_DXDY,
543 S16_alpha_D32_filter_DXDY,
544 S16_opaque_D32_filter_DX,
545 S16_alpha_D32_filter_DX,
547 SI8_opaque_D32_nofilter_DXDY,
548 SI8_alpha_D32_nofilter_DXDY,
549 SI8_opaque_D32_nofilter_DX,
550 SI8_alpha_D32_nofilter_DX,
551 SI8_opaque_D32_filter_DXDY,
552 SI8_alpha_D32_filter_DXDY,
553 SI8_opaque_D32_filter_DX,
554 SI8_alpha_D32_filter_DX,
556 S4444_opaque_D32_nofilter_DXDY,
557 S4444_alpha_D32_nofilter_DXDY,
558 S4444_opaque_D32_nofilter_DX,
559 S4444_alpha_D32_nofilter_DX,
560 S4444_opaque_D32_filter_DXDY,
561 S4444_alpha_D32_filter_DXDY,
562 S4444_opaque_D32_filter_DX,
563 S4444_alpha_D32_filter_DX,
565 // A8 treats alpha/opaque the same (equally efficient)
566 SA8_alpha_D32_nofilter_DXDY,
567 SA8_alpha_D32_nofilter_DXDY,
568 SA8_alpha_D32_nofilter_DX,
569 SA8_alpha_D32_nofilter_DX,
570 SA8_alpha_D32_filter_DXDY,
571 SA8_alpha_D32_filter_DXDY,
572 SA8_alpha_D32_filter_DX,
573 SA8_alpha_D32_filter_DX
574 };
576 static const SampleProc16 gSkBitmapProcStateSample16[] = {
577 S32_D16_nofilter_DXDY,
578 S32_D16_nofilter_DX,
579 S32_D16_filter_DXDY,
580 S32_D16_filter_DX,
582 S16_D16_nofilter_DXDY,
583 S16_D16_nofilter_DX,
584 S16_D16_filter_DXDY,
585 S16_D16_filter_DX,
587 SI8_D16_nofilter_DXDY,
588 SI8_D16_nofilter_DX,
589 SI8_D16_filter_DXDY,
590 SI8_D16_filter_DX,
592 // Don't support 4444 -> 565
593 NULL, NULL, NULL, NULL,
594 // Don't support A8 -> 565
595 NULL, NULL, NULL, NULL
596 };
597 #endif
599 fSampleProc32 = SK_ARM_NEON_WRAP(gSkBitmapProcStateSample32)[index];
600 index >>= 1; // shift away any opaque/alpha distinction
601 fSampleProc16 = SK_ARM_NEON_WRAP(gSkBitmapProcStateSample16)[index];
603 // our special-case shaderprocs
604 if (SK_ARM_NEON_WRAP(S16_D16_filter_DX) == fSampleProc16) {
605 if (clampClamp) {
606 fShaderProc16 = SK_ARM_NEON_WRAP(Clamp_S16_D16_filter_DX_shaderproc);
607 } else if (SkShader::kRepeat_TileMode == fTileModeX &&
608 SkShader::kRepeat_TileMode == fTileModeY) {
609 fShaderProc16 = SK_ARM_NEON_WRAP(Repeat_S16_D16_filter_DX_shaderproc);
610 }
611 } else if (SK_ARM_NEON_WRAP(SI8_opaque_D32_filter_DX) == fSampleProc32 && clampClamp) {
612 fShaderProc32 = SK_ARM_NEON_WRAP(Clamp_SI8_opaque_D32_filter_DX_shaderproc);
613 }
615 if (NULL == fShaderProc32) {
616 fShaderProc32 = this->chooseShaderProc32();
617 }
618 }
620 // see if our platform has any accelerated overrides
621 this->platformProcs();
623 return true;
624 }
626 static void Clamp_S32_D32_nofilter_trans_shaderproc(const SkBitmapProcState& s,
627 int x, int y,
628 SkPMColor* SK_RESTRICT colors,
629 int count) {
630 SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
631 SkASSERT(s.fInvKy == 0);
632 SkASSERT(count > 0 && colors != NULL);
633 SkASSERT(SkPaint::kNone_FilterLevel == s.fFilterLevel);
635 const int maxX = s.fBitmap->width() - 1;
636 const int maxY = s.fBitmap->height() - 1;
637 int ix = s.fFilterOneX + x;
638 int iy = SkClampMax(s.fFilterOneY + y, maxY);
639 #ifdef SK_DEBUG
640 {
641 SkPoint pt;
642 s.fInvProc(s.fInvMatrix, SkIntToScalar(x) + SK_ScalarHalf,
643 SkIntToScalar(y) + SK_ScalarHalf, &pt);
644 int iy2 = SkClampMax(SkScalarFloorToInt(pt.fY), maxY);
645 int ix2 = SkScalarFloorToInt(pt.fX);
647 SkASSERT(iy == iy2);
648 SkASSERT(ix == ix2);
649 }
650 #endif
651 const SkPMColor* row = s.fBitmap->getAddr32(0, iy);
653 // clamp to the left
654 if (ix < 0) {
655 int n = SkMin32(-ix, count);
656 sk_memset32(colors, row[0], n);
657 count -= n;
658 if (0 == count) {
659 return;
660 }
661 colors += n;
662 SkASSERT(-ix == n);
663 ix = 0;
664 }
665 // copy the middle
666 if (ix <= maxX) {
667 int n = SkMin32(maxX - ix + 1, count);
668 memcpy(colors, row + ix, n * sizeof(SkPMColor));
669 count -= n;
670 if (0 == count) {
671 return;
672 }
673 colors += n;
674 }
675 SkASSERT(count > 0);
676 // clamp to the right
677 sk_memset32(colors, row[maxX], count);
678 }
680 static inline int sk_int_mod(int x, int n) {
681 SkASSERT(n > 0);
682 if ((unsigned)x >= (unsigned)n) {
683 if (x < 0) {
684 x = n + ~(~x % n);
685 } else {
686 x = x % n;
687 }
688 }
689 return x;
690 }
692 static inline int sk_int_mirror(int x, int n) {
693 x = sk_int_mod(x, 2 * n);
694 if (x >= n) {
695 x = n + ~(x - n);
696 }
697 return x;
698 }
700 static void Repeat_S32_D32_nofilter_trans_shaderproc(const SkBitmapProcState& s,
701 int x, int y,
702 SkPMColor* SK_RESTRICT colors,
703 int count) {
704 SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
705 SkASSERT(s.fInvKy == 0);
706 SkASSERT(count > 0 && colors != NULL);
707 SkASSERT(SkPaint::kNone_FilterLevel == s.fFilterLevel);
709 const int stopX = s.fBitmap->width();
710 const int stopY = s.fBitmap->height();
711 int ix = s.fFilterOneX + x;
712 int iy = sk_int_mod(s.fFilterOneY + y, stopY);
713 #ifdef SK_DEBUG
714 {
715 SkPoint pt;
716 s.fInvProc(s.fInvMatrix, SkIntToScalar(x) + SK_ScalarHalf,
717 SkIntToScalar(y) + SK_ScalarHalf, &pt);
718 int iy2 = sk_int_mod(SkScalarFloorToInt(pt.fY), stopY);
719 int ix2 = SkScalarFloorToInt(pt.fX);
721 SkASSERT(iy == iy2);
722 SkASSERT(ix == ix2);
723 }
724 #endif
725 const SkPMColor* row = s.fBitmap->getAddr32(0, iy);
727 ix = sk_int_mod(ix, stopX);
728 for (;;) {
729 int n = SkMin32(stopX - ix, count);
730 memcpy(colors, row + ix, n * sizeof(SkPMColor));
731 count -= n;
732 if (0 == count) {
733 return;
734 }
735 colors += n;
736 ix = 0;
737 }
738 }
740 static void S32_D32_constX_shaderproc(const SkBitmapProcState& s,
741 int x, int y,
742 SkPMColor* SK_RESTRICT colors,
743 int count) {
744 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) == 0);
745 SkASSERT(s.fInvKy == 0);
746 SkASSERT(count > 0 && colors != NULL);
747 SkASSERT(1 == s.fBitmap->width());
749 int iY0;
750 int iY1 SK_INIT_TO_AVOID_WARNING;
751 int iSubY SK_INIT_TO_AVOID_WARNING;
753 if (SkPaint::kNone_FilterLevel != s.fFilterLevel) {
754 SkBitmapProcState::MatrixProc mproc = s.getMatrixProc();
755 uint32_t xy[2];
757 mproc(s, xy, 1, x, y);
759 iY0 = xy[0] >> 18;
760 iY1 = xy[0] & 0x3FFF;
761 iSubY = (xy[0] >> 14) & 0xF;
762 } else {
763 int yTemp;
765 if (s.fInvType > SkMatrix::kTranslate_Mask) {
766 SkPoint pt;
767 s.fInvProc(s.fInvMatrix,
768 SkIntToScalar(x) + SK_ScalarHalf,
769 SkIntToScalar(y) + SK_ScalarHalf,
770 &pt);
771 // When the matrix has a scale component the setup code in
772 // chooseProcs multiples the inverse matrix by the inverse of the
773 // bitmap's width and height. Since this method is going to do
774 // its own tiling and sampling we need to undo that here.
775 if (SkShader::kClamp_TileMode != s.fTileModeX ||
776 SkShader::kClamp_TileMode != s.fTileModeY) {
777 yTemp = SkScalarFloorToInt(pt.fY * s.fBitmap->height());
778 } else {
779 yTemp = SkScalarFloorToInt(pt.fY);
780 }
781 } else {
782 yTemp = s.fFilterOneY + y;
783 }
785 const int stopY = s.fBitmap->height();
786 switch (s.fTileModeY) {
787 case SkShader::kClamp_TileMode:
788 iY0 = SkClampMax(yTemp, stopY-1);
789 break;
790 case SkShader::kRepeat_TileMode:
791 iY0 = sk_int_mod(yTemp, stopY);
792 break;
793 case SkShader::kMirror_TileMode:
794 default:
795 iY0 = sk_int_mirror(yTemp, stopY);
796 break;
797 }
799 #ifdef SK_DEBUG
800 {
801 SkPoint pt;
802 s.fInvProc(s.fInvMatrix,
803 SkIntToScalar(x) + SK_ScalarHalf,
804 SkIntToScalar(y) + SK_ScalarHalf,
805 &pt);
806 if (s.fInvType > SkMatrix::kTranslate_Mask &&
807 (SkShader::kClamp_TileMode != s.fTileModeX ||
808 SkShader::kClamp_TileMode != s.fTileModeY)) {
809 pt.fY *= s.fBitmap->height();
810 }
811 int iY2;
813 switch (s.fTileModeY) {
814 case SkShader::kClamp_TileMode:
815 iY2 = SkClampMax(SkScalarFloorToInt(pt.fY), stopY-1);
816 break;
817 case SkShader::kRepeat_TileMode:
818 iY2 = sk_int_mod(SkScalarFloorToInt(pt.fY), stopY);
819 break;
820 case SkShader::kMirror_TileMode:
821 default:
822 iY2 = sk_int_mirror(SkScalarFloorToInt(pt.fY), stopY);
823 break;
824 }
826 SkASSERT(iY0 == iY2);
827 }
828 #endif
829 }
831 const SkPMColor* row0 = s.fBitmap->getAddr32(0, iY0);
832 SkPMColor color;
834 if (SkPaint::kNone_FilterLevel != s.fFilterLevel) {
835 const SkPMColor* row1 = s.fBitmap->getAddr32(0, iY1);
837 if (s.fAlphaScale < 256) {
838 Filter_32_alpha(iSubY, *row0, *row1, &color, s.fAlphaScale);
839 } else {
840 Filter_32_opaque(iSubY, *row0, *row1, &color);
841 }
842 } else {
843 if (s.fAlphaScale < 256) {
844 color = SkAlphaMulQ(*row0, s.fAlphaScale);
845 } else {
846 color = *row0;
847 }
848 }
850 sk_memset32(colors, color, count);
851 }
853 static void DoNothing_shaderproc(const SkBitmapProcState&, int x, int y,
854 SkPMColor* SK_RESTRICT colors, int count) {
855 // if we get called, the matrix is too tricky, so we just draw nothing
856 sk_memset32(colors, 0, count);
857 }
859 bool SkBitmapProcState::setupForTranslate() {
860 SkPoint pt;
861 fInvProc(fInvMatrix, SK_ScalarHalf, SK_ScalarHalf, &pt);
863 /*
864 * if the translate is larger than our ints, we can get random results, or
865 * worse, we might get 0x80000000, which wreaks havoc on us, since we can't
866 * negate it.
867 */
868 const SkScalar too_big = SkIntToScalar(1 << 30);
869 if (SkScalarAbs(pt.fX) > too_big || SkScalarAbs(pt.fY) > too_big) {
870 return false;
871 }
873 // Since we know we're not filtered, we re-purpose these fields allow
874 // us to go from device -> src coordinates w/ just an integer add,
875 // rather than running through the inverse-matrix
876 fFilterOneX = SkScalarFloorToInt(pt.fX);
877 fFilterOneY = SkScalarFloorToInt(pt.fY);
878 return true;
879 }
881 SkBitmapProcState::ShaderProc32 SkBitmapProcState::chooseShaderProc32() {
883 if (SkBitmap::kARGB_8888_Config != fBitmap->config()) {
884 return NULL;
885 }
887 static const unsigned kMask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
889 if (1 == fBitmap->width() && 0 == (fInvType & ~kMask)) {
890 if (SkPaint::kNone_FilterLevel == fFilterLevel &&
891 fInvType <= SkMatrix::kTranslate_Mask &&
892 !this->setupForTranslate()) {
893 return DoNothing_shaderproc;
894 }
895 return S32_D32_constX_shaderproc;
896 }
898 if (fAlphaScale < 256) {
899 return NULL;
900 }
901 if (fInvType > SkMatrix::kTranslate_Mask) {
902 return NULL;
903 }
904 if (SkPaint::kNone_FilterLevel != fFilterLevel) {
905 return NULL;
906 }
908 SkShader::TileMode tx = (SkShader::TileMode)fTileModeX;
909 SkShader::TileMode ty = (SkShader::TileMode)fTileModeY;
911 if (SkShader::kClamp_TileMode == tx && SkShader::kClamp_TileMode == ty) {
912 if (this->setupForTranslate()) {
913 return Clamp_S32_D32_nofilter_trans_shaderproc;
914 }
915 return DoNothing_shaderproc;
916 }
917 if (SkShader::kRepeat_TileMode == tx && SkShader::kRepeat_TileMode == ty) {
918 if (this->setupForTranslate()) {
919 return Repeat_S32_D32_nofilter_trans_shaderproc;
920 }
921 return DoNothing_shaderproc;
922 }
923 return NULL;
924 }
926 ///////////////////////////////////////////////////////////////////////////////
928 #ifdef SK_DEBUG
930 static void check_scale_nofilter(uint32_t bitmapXY[], int count,
931 unsigned mx, unsigned my) {
932 unsigned y = *bitmapXY++;
933 SkASSERT(y < my);
935 const uint16_t* xptr = reinterpret_cast<const uint16_t*>(bitmapXY);
936 for (int i = 0; i < count; ++i) {
937 SkASSERT(xptr[i] < mx);
938 }
939 }
941 static void check_scale_filter(uint32_t bitmapXY[], int count,
942 unsigned mx, unsigned my) {
943 uint32_t YY = *bitmapXY++;
944 unsigned y0 = YY >> 18;
945 unsigned y1 = YY & 0x3FFF;
946 SkASSERT(y0 < my);
947 SkASSERT(y1 < my);
949 for (int i = 0; i < count; ++i) {
950 uint32_t XX = bitmapXY[i];
951 unsigned x0 = XX >> 18;
952 unsigned x1 = XX & 0x3FFF;
953 SkASSERT(x0 < mx);
954 SkASSERT(x1 < mx);
955 }
956 }
958 static void check_affine_nofilter(uint32_t bitmapXY[], int count,
959 unsigned mx, unsigned my) {
960 for (int i = 0; i < count; ++i) {
961 uint32_t XY = bitmapXY[i];
962 unsigned x = XY & 0xFFFF;
963 unsigned y = XY >> 16;
964 SkASSERT(x < mx);
965 SkASSERT(y < my);
966 }
967 }
969 static void check_affine_filter(uint32_t bitmapXY[], int count,
970 unsigned mx, unsigned my) {
971 for (int i = 0; i < count; ++i) {
972 uint32_t YY = *bitmapXY++;
973 unsigned y0 = YY >> 18;
974 unsigned y1 = YY & 0x3FFF;
975 SkASSERT(y0 < my);
976 SkASSERT(y1 < my);
978 uint32_t XX = *bitmapXY++;
979 unsigned x0 = XX >> 18;
980 unsigned x1 = XX & 0x3FFF;
981 SkASSERT(x0 < mx);
982 SkASSERT(x1 < mx);
983 }
984 }
986 void SkBitmapProcState::DebugMatrixProc(const SkBitmapProcState& state,
987 uint32_t bitmapXY[], int count,
988 int x, int y) {
989 SkASSERT(bitmapXY);
990 SkASSERT(count > 0);
992 state.fMatrixProc(state, bitmapXY, count, x, y);
994 void (*proc)(uint32_t bitmapXY[], int count, unsigned mx, unsigned my);
996 // There are four formats possible:
997 // scale -vs- affine
998 // filter -vs- nofilter
999 if (state.fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
1000 proc = state.fFilterLevel != SkPaint::kNone_FilterLevel ? check_scale_filter : check_scale_nofilter;
1001 } else {
1002 proc = state.fFilterLevel != SkPaint::kNone_FilterLevel ? check_affine_filter : check_affine_nofilter;
1003 }
1004 proc(bitmapXY, count, state.fBitmap->width(), state.fBitmap->height());
1005 }
1007 SkBitmapProcState::MatrixProc SkBitmapProcState::getMatrixProc() const {
1008 return DebugMatrixProc;
1009 }
1011 #endif
1013 ///////////////////////////////////////////////////////////////////////////////
1014 /*
1015 The storage requirements for the different matrix procs are as follows,
1016 where each X or Y is 2 bytes, and N is the number of pixels/elements:
1018 scale/translate nofilter Y(4bytes) + N * X
1019 affine/perspective nofilter N * (X Y)
1020 scale/translate filter Y Y + N * (X X)
1021 affine/perspective filter N * (Y Y X X)
1022 */
1023 int SkBitmapProcState::maxCountForBufferSize(size_t bufferSize) const {
1024 int32_t size = static_cast<int32_t>(bufferSize);
1026 size &= ~3; // only care about 4-byte aligned chunks
1027 if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
1028 size -= 4; // the shared Y (or YY) coordinate
1029 if (size < 0) {
1030 size = 0;
1031 }
1032 size >>= 1;
1033 } else {
1034 size >>= 2;
1035 }
1037 if (fFilterLevel != SkPaint::kNone_FilterLevel) {
1038 size >>= 1;
1039 }
1041 return size;
1042 }