gfx/skia/trunk/src/opts/SkBitmapProcState_opts_SSSE3.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /*
michael@0 2 * Copyright 2012 The Android Open Source Project
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license that can be
michael@0 5 * found in the LICENSE file.
michael@0 6 */
michael@0 7
michael@0 8 #include "SkBitmapProcState_opts_SSSE3.h"
michael@0 9 #include "SkPaint.h"
michael@0 10 #include "SkUtils.h"
michael@0 11
michael@0 12 /* With the exception of the Android framework we always build the SSSE3 functions
michael@0 13 * and enable the caller to determine SSSE3 support. However for the Android framework
michael@0 14 * if the device does not support SSSE3 then the compiler will not supply the required
michael@0 15 * -mssse3 option needed to build this file, so instead we provide a stub implementation.
michael@0 16 */
michael@0 17 #if !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
michael@0 18
michael@0 19 #include <tmmintrin.h> // SSSE3
michael@0 20
michael@0 21 // adding anonymous namespace seemed to force gcc to inline directly the
michael@0 22 // instantiation, instead of creating the functions
michael@0 23 // S32_generic_D32_filter_DX_SSSE3<true> and
michael@0 24 // S32_generic_D32_filter_DX_SSSE3<false> which were then called by the
michael@0 25 // external functions.
michael@0 26 namespace {
michael@0 27 // In this file, variations for alpha and non alpha versions are implemented
michael@0 28 // with a template, as it makes the code more compact and a bit easier to
michael@0 29 // maintain, while making the compiler generate the same exact code as with
michael@0 30 // two functions that only differ by a few lines.
michael@0 31
michael@0 32
michael@0 33 // Prepare all necessary constants for a round of processing for two pixel
michael@0 34 // pairs.
michael@0 35 // @param xy is the location where the xy parameters for four pixels should be
michael@0 36 // read from. It is identical in concept with argument two of
michael@0 37 // S32_{opaque}_D32_filter_DX methods.
michael@0 38 // @param mask_3FFF vector of 32 bit constants containing 3FFF,
michael@0 39 // suitable to mask the bottom 14 bits of a XY value.
michael@0 40 // @param mask_000F vector of 32 bit constants containing 000F,
michael@0 41 // suitable to mask the bottom 4 bits of a XY value.
michael@0 42 // @param sixteen_8bit vector of 8 bit components containing the value 16.
michael@0 43 // @param mask_dist_select vector of 8 bit components containing the shuffling
michael@0 44 // parameters to reorder x[0-3] parameters.
michael@0 45 // @param all_x_result vector of 8 bit components that will contain the
michael@0 46 // (4x(x3), 4x(x2), 4x(x1), 4x(x0)) upon return.
michael@0 47 // @param sixteen_minus_x vector of 8 bit components, containing
michael@0 48 // (4x(16 - x3), 4x(16 - x2), 4x(16 - x1), 4x(16 - x0))
michael@0 49 inline void PrepareConstantsTwoPixelPairs(const uint32_t* xy,
michael@0 50 const __m128i& mask_3FFF,
michael@0 51 const __m128i& mask_000F,
michael@0 52 const __m128i& sixteen_8bit,
michael@0 53 const __m128i& mask_dist_select,
michael@0 54 __m128i* all_x_result,
michael@0 55 __m128i* sixteen_minus_x,
michael@0 56 int* x0,
michael@0 57 int* x1) {
michael@0 58 const __m128i xx = _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
michael@0 59
michael@0 60 // 4 delta X
michael@0 61 // (x03, x02, x01, x00)
michael@0 62 const __m128i x0_wide = _mm_srli_epi32(xx, 18);
michael@0 63 // (x13, x12, x11, x10)
michael@0 64 const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF);
michael@0 65
michael@0 66 _mm_storeu_si128(reinterpret_cast<__m128i *>(x0), x0_wide);
michael@0 67 _mm_storeu_si128(reinterpret_cast<__m128i *>(x1), x1_wide);
michael@0 68
michael@0 69 __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F);
michael@0 70
michael@0 71 // (4x(x3), 4x(x2), 4x(x1), 4x(x0))
michael@0 72 all_x = _mm_shuffle_epi8(all_x, mask_dist_select);
michael@0 73
michael@0 74 *all_x_result = all_x;
michael@0 75 // (4x(16-x3), 4x(16-x2), 4x(16-x1), 4x(16-x0))
michael@0 76 *sixteen_minus_x = _mm_sub_epi8(sixteen_8bit, all_x);
michael@0 77 }
michael@0 78
michael@0 79 // Prepare all necessary constants for a round of processing for two pixel
michael@0 80 // pairs.
michael@0 81 // @param xy is the location where the xy parameters for four pixels should be
michael@0 82 // read from. It is identical in concept with argument two of
michael@0 83 // S32_{opaque}_D32_filter_DXDY methods.
michael@0 84 // @param mask_3FFF vector of 32 bit constants containing 3FFF,
michael@0 85 // suitable to mask the bottom 14 bits of a XY value.
michael@0 86 // @param mask_000F vector of 32 bit constants containing 000F,
michael@0 87 // suitable to mask the bottom 4 bits of a XY value.
michael@0 88 // @param sixteen_8bit vector of 8 bit components containing the value 16.
michael@0 89 // @param mask_dist_select vector of 8 bit components containing the shuffling
michael@0 90 // parameters to reorder x[0-3] parameters.
michael@0 91 // @param all_xy_result vector of 8 bit components that will contain the
michael@0 92 // (4x(y1), 4x(y0), 4x(x1), 4x(x0)) upon return.
michael@0 93 // @param sixteen_minus_x vector of 8 bit components, containing
michael@0 94 // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)).
michael@0 95 inline void PrepareConstantsTwoPixelPairsDXDY(const uint32_t* xy,
michael@0 96 const __m128i& mask_3FFF,
michael@0 97 const __m128i& mask_000F,
michael@0 98 const __m128i& sixteen_8bit,
michael@0 99 const __m128i& mask_dist_select,
michael@0 100 __m128i* all_xy_result,
michael@0 101 __m128i* sixteen_minus_xy,
michael@0 102 int* xy0, int* xy1) {
michael@0 103 const __m128i xy_wide =
michael@0 104 _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
michael@0 105
michael@0 106 // (x10, y10, x00, y00)
michael@0 107 __m128i xy0_wide = _mm_srli_epi32(xy_wide, 18);
michael@0 108 // (y10, y00, x10, x00)
michael@0 109 xy0_wide = _mm_shuffle_epi32(xy0_wide, _MM_SHUFFLE(2, 0, 3, 1));
michael@0 110 // (x11, y11, x01, y01)
michael@0 111 __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF);
michael@0 112 // (y11, y01, x11, x01)
michael@0 113 xy1_wide = _mm_shuffle_epi32(xy1_wide, _MM_SHUFFLE(2, 0, 3, 1));
michael@0 114
michael@0 115 _mm_storeu_si128(reinterpret_cast<__m128i *>(xy0), xy0_wide);
michael@0 116 _mm_storeu_si128(reinterpret_cast<__m128i *>(xy1), xy1_wide);
michael@0 117
michael@0 118 // (x1, y1, x0, y0)
michael@0 119 __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F);
michael@0 120 // (y1, y0, x1, x0)
michael@0 121 all_xy = _mm_shuffle_epi32(all_xy, _MM_SHUFFLE(2, 0, 3, 1));
michael@0 122 // (4x(y1), 4x(y0), 4x(x1), 4x(x0))
michael@0 123 all_xy = _mm_shuffle_epi8(all_xy, mask_dist_select);
michael@0 124
michael@0 125 *all_xy_result = all_xy;
michael@0 126 // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0))
michael@0 127 *sixteen_minus_xy = _mm_sub_epi8(sixteen_8bit, all_xy);
michael@0 128 }
michael@0 129
michael@0 130 // Helper function used when processing one pixel pair.
michael@0 131 // @param pixel0..3 are the four input pixels
michael@0 132 // @param scale_x vector of 8 bit components to multiply the pixel[0:3]. This
michael@0 133 // will contain (4x(x1, 16-x1), 4x(x0, 16-x0))
michael@0 134 // or (4x(x3, 16-x3), 4x(x2, 16-x2))
michael@0 135 // @return a vector of 16 bit components containing:
michael@0 136 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
michael@0 137 inline __m128i ProcessPixelPairHelper(uint32_t pixel0,
michael@0 138 uint32_t pixel1,
michael@0 139 uint32_t pixel2,
michael@0 140 uint32_t pixel3,
michael@0 141 const __m128i& scale_x) {
michael@0 142 __m128i a0, a1, a2, a3;
michael@0 143 // Load 2 pairs of pixels
michael@0 144 a0 = _mm_cvtsi32_si128(pixel0);
michael@0 145 a1 = _mm_cvtsi32_si128(pixel1);
michael@0 146
michael@0 147 // Interleave pixels.
michael@0 148 // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
michael@0 149 a0 = _mm_unpacklo_epi8(a0, a1);
michael@0 150
michael@0 151 a2 = _mm_cvtsi32_si128(pixel2);
michael@0 152 a3 = _mm_cvtsi32_si128(pixel3);
michael@0 153 // (0, 0, 0, 0, 0, 0, 0, 0, Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2)
michael@0 154 a2 = _mm_unpacklo_epi8(a2, a3);
michael@0 155
michael@0 156 // two pairs of pixel pairs, interleaved.
michael@0 157 // (Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2,
michael@0 158 // Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
michael@0 159 a0 = _mm_unpacklo_epi64(a0, a2);
michael@0 160
michael@0 161 // multiply and sum to 16 bit components.
michael@0 162 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
michael@0 163 // At that point, we use up a bit less than 12 bits for each 16 bit
michael@0 164 // component:
michael@0 165 // All components are less than 255. So,
michael@0 166 // C0 * (16 - x) + C1 * x <= 255 * (16 - x) + 255 * x = 255 * 16.
michael@0 167 return _mm_maddubs_epi16(a0, scale_x);
michael@0 168 }
michael@0 169
michael@0 170 // Scale back the results after multiplications to the [0:255] range, and scale
michael@0 171 // by alpha when has_alpha is true.
michael@0 172 // Depending on whether one set or two sets of multiplications had been applied,
michael@0 173 // the results have to be shifted by four places (dividing by 16), or shifted
michael@0 174 // by eight places (dividing by 256), since each multiplication is by a quantity
michael@0 175 // in the range [0:16].
michael@0 176 template<bool has_alpha, int scale>
michael@0 177 inline __m128i ScaleFourPixels(__m128i* pixels,
michael@0 178 const __m128i& alpha) {
michael@0 179 // Divide each 16 bit component by 16 (or 256 depending on scale).
michael@0 180 *pixels = _mm_srli_epi16(*pixels, scale);
michael@0 181
michael@0 182 if (has_alpha) {
michael@0 183 // Multiply by alpha.
michael@0 184 *pixels = _mm_mullo_epi16(*pixels, alpha);
michael@0 185
michael@0 186 // Divide each 16 bit component by 256.
michael@0 187 *pixels = _mm_srli_epi16(*pixels, 8);
michael@0 188 }
michael@0 189 return *pixels;
michael@0 190 }
michael@0 191
michael@0 192 // Wrapper to calculate two output pixels from four input pixels. The
michael@0 193 // arguments are the same as ProcessPixelPairHelper. Technically, there are
michael@0 194 // eight input pixels, but since sub_y == 0, the factors applied to half of the
michael@0 195 // pixels is zero (sub_y), and are therefore omitted here to save on some
michael@0 196 // processing.
michael@0 197 // @param alpha when has_alpha is true, scale all resulting components by this
michael@0 198 // value.
michael@0 199 // @return a vector of 16 bit components containing:
michael@0 200 // ((Aa2 * (16 - x1) + Aa3 * x1) * alpha, ...,
michael@0 201 // (Ra0 * (16 - x0) + Ra1 * x0) * alpha) (when has_alpha is true)
michael@0 202 // otherwise
michael@0 203 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
michael@0 204 // In both cases, the results are renormalized (divided by 16) to match the
michael@0 205 // expected formats when storing back the results into memory.
michael@0 206 template<bool has_alpha>
michael@0 207 inline __m128i ProcessPixelPairZeroSubY(uint32_t pixel0,
michael@0 208 uint32_t pixel1,
michael@0 209 uint32_t pixel2,
michael@0 210 uint32_t pixel3,
michael@0 211 const __m128i& scale_x,
michael@0 212 const __m128i& alpha) {
michael@0 213 __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
michael@0 214 scale_x);
michael@0 215 return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
michael@0 216 }
michael@0 217
michael@0 218 // Same as ProcessPixelPairZeroSubY, expect processing one output pixel at a
michael@0 219 // time instead of two. As in the above function, only two pixels are needed
michael@0 220 // to generate a single pixel since sub_y == 0.
michael@0 221 // @return same as ProcessPixelPairZeroSubY, except that only the bottom 4
michael@0 222 // 16 bit components are set.
michael@0 223 template<bool has_alpha>
michael@0 224 inline __m128i ProcessOnePixelZeroSubY(uint32_t pixel0,
michael@0 225 uint32_t pixel1,
michael@0 226 __m128i scale_x,
michael@0 227 __m128i alpha) {
michael@0 228 __m128i a0 = _mm_cvtsi32_si128(pixel0);
michael@0 229 __m128i a1 = _mm_cvtsi32_si128(pixel1);
michael@0 230
michael@0 231 // Interleave
michael@0 232 a0 = _mm_unpacklo_epi8(a0, a1);
michael@0 233
michael@0 234 // (a0 * (16-x) + a1 * x)
michael@0 235 __m128i sum = _mm_maddubs_epi16(a0, scale_x);
michael@0 236
michael@0 237 return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
michael@0 238 }
michael@0 239
michael@0 240 // Methods when sub_y != 0
michael@0 241
michael@0 242
michael@0 243 // Same as ProcessPixelPairHelper, except that the values are scaled by y.
michael@0 244 // @param y vector of 16 bit components containing 'y' values. There are two
michael@0 245 // cases in practice, where y will contain the sub_y constant, or will
michael@0 246 // contain the 16 - sub_y constant.
michael@0 247 // @return vector of 16 bit components containing:
michael@0 248 // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , y * (Ra0 * (16 - x0) + Ra1 * x0))
michael@0 249 inline __m128i ProcessPixelPair(uint32_t pixel0,
michael@0 250 uint32_t pixel1,
michael@0 251 uint32_t pixel2,
michael@0 252 uint32_t pixel3,
michael@0 253 const __m128i& scale_x,
michael@0 254 const __m128i& y) {
michael@0 255 __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
michael@0 256 scale_x);
michael@0 257
michael@0 258 // first row times 16-y or y depending on whether 'y' represents one or
michael@0 259 // the other.
michael@0 260 // Values will be up to 255 * 16 * 16 = 65280.
michael@0 261 // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... ,
michael@0 262 // y * (Ra0 * (16 - x0) + Ra1 * x0))
michael@0 263 sum = _mm_mullo_epi16(sum, y);
michael@0 264
michael@0 265 return sum;
michael@0 266 }
michael@0 267
michael@0 268 // Process two pixel pairs out of eight input pixels.
michael@0 269 // In other methods, the distinct pixels are passed one by one, but in this
michael@0 270 // case, the rows, and index offsets to the pixels into the row are passed
michael@0 271 // to generate the 8 pixels.
michael@0 272 // @param row0..1 top and bottom row where to find input pixels.
michael@0 273 // @param x0..1 offsets into the row for all eight input pixels.
michael@0 274 // @param all_y vector of 16 bit components containing the constant sub_y
michael@0 275 // @param neg_y vector of 16 bit components containing the constant 16 - sub_y
michael@0 276 // @param alpha vector of 16 bit components containing the alpha value to scale
michael@0 277 // the results by, when has_alpha is true.
michael@0 278 // @return
michael@0 279 // (alpha * ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
michael@0 280 // y * (Aa2' * (16-x1) + Aa3' * x1)),
michael@0 281 // ...
michael@0 282 // alpha * ((16-y) * (Ra0 * (16-x0) + Ra1 * x0) +
michael@0 283 // y * (Ra0' * (16-x0) + Ra1' * x0))
michael@0 284 // With the factor alpha removed when has_alpha is false.
michael@0 285 // The values are scaled back to 16 bit components, but with only the bottom
michael@0 286 // 8 bits being set.
michael@0 287 template<bool has_alpha>
michael@0 288 inline __m128i ProcessTwoPixelPairs(const uint32_t* row0,
michael@0 289 const uint32_t* row1,
michael@0 290 const int* x0,
michael@0 291 const int* x1,
michael@0 292 const __m128i& scale_x,
michael@0 293 const __m128i& all_y,
michael@0 294 const __m128i& neg_y,
michael@0 295 const __m128i& alpha) {
michael@0 296 __m128i sum0 = ProcessPixelPair(
michael@0 297 row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
michael@0 298 scale_x, neg_y);
michael@0 299 __m128i sum1 = ProcessPixelPair(
michael@0 300 row1[x0[0]], row1[x1[0]], row1[x0[1]], row1[x1[1]],
michael@0 301 scale_x, all_y);
michael@0 302
michael@0 303 // 2 samples fully summed.
michael@0 304 // ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
michael@0 305 // y * (Aa2' * (16-x1) + Aa3' * x1),
michael@0 306 // ...
michael@0 307 // (16-y) * (Ra0 * (16 - x0) + Ra1 * x0)) +
michael@0 308 // y * (Ra0' * (16-x0) + Ra1' * x0))
michael@0 309 // Each component, again can be at most 256 * 255 = 65280, so no overflow.
michael@0 310 sum0 = _mm_add_epi16(sum0, sum1);
michael@0 311
michael@0 312 return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
michael@0 313 }
michael@0 314
michael@0 315 // Similar to ProcessTwoPixelPairs except the pixel indexes.
michael@0 316 template<bool has_alpha>
michael@0 317 inline __m128i ProcessTwoPixelPairsDXDY(const uint32_t* row00,
michael@0 318 const uint32_t* row01,
michael@0 319 const uint32_t* row10,
michael@0 320 const uint32_t* row11,
michael@0 321 const int* xy0,
michael@0 322 const int* xy1,
michael@0 323 const __m128i& scale_x,
michael@0 324 const __m128i& all_y,
michael@0 325 const __m128i& neg_y,
michael@0 326 const __m128i& alpha) {
michael@0 327 // first row
michael@0 328 __m128i sum0 = ProcessPixelPair(
michael@0 329 row00[xy0[0]], row00[xy1[0]], row10[xy0[1]], row10[xy1[1]],
michael@0 330 scale_x, neg_y);
michael@0 331 // second row
michael@0 332 __m128i sum1 = ProcessPixelPair(
michael@0 333 row01[xy0[0]], row01[xy1[0]], row11[xy0[1]], row11[xy1[1]],
michael@0 334 scale_x, all_y);
michael@0 335
michael@0 336 // 2 samples fully summed.
michael@0 337 // ((16-y1) * (Aa2 * (16-x1) + Aa3 * x1) +
michael@0 338 // y0 * (Aa2' * (16-x1) + Aa3' * x1),
michael@0 339 // ...
michael@0 340 // (16-y0) * (Ra0 * (16 - x0) + Ra1 * x0)) +
michael@0 341 // y0 * (Ra0' * (16-x0) + Ra1' * x0))
michael@0 342 // Each component, again can be at most 256 * 255 = 65280, so no overflow.
michael@0 343 sum0 = _mm_add_epi16(sum0, sum1);
michael@0 344
michael@0 345 return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
michael@0 346 }
michael@0 347
michael@0 348
michael@0 349 // Same as ProcessPixelPair, except that performing the math one output pixel
michael@0 350 // at a time. This means that only the bottom four 16 bit components are set.
michael@0 351 inline __m128i ProcessOnePixel(uint32_t pixel0, uint32_t pixel1,
michael@0 352 const __m128i& scale_x, const __m128i& y) {
michael@0 353 __m128i a0 = _mm_cvtsi32_si128(pixel0);
michael@0 354 __m128i a1 = _mm_cvtsi32_si128(pixel1);
michael@0 355
michael@0 356 // Interleave
michael@0 357 // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
michael@0 358 a0 = _mm_unpacklo_epi8(a0, a1);
michael@0 359
michael@0 360 // (a0 * (16-x) + a1 * x)
michael@0 361 a0 = _mm_maddubs_epi16(a0, scale_x);
michael@0 362
michael@0 363 // scale row by y
michael@0 364 return _mm_mullo_epi16(a0, y);
michael@0 365 }
michael@0 366
michael@0 367 // Notes about the various tricks that are used in this implementation:
michael@0 368 // - specialization for sub_y == 0.
michael@0 369 // Statistically, 1/16th of the samples will have sub_y == 0. When this
michael@0 370 // happens, the math goes from:
michael@0 371 // (16 - x)*(16 - y)*a00 + x*(16 - y)*a01 + (16 - x)*y*a10 + x*y*a11
michael@0 372 // to:
michael@0 373 // (16 - x)*a00 + 16*x*a01
michael@0 374 // much simpler. The simplification makes for an easy boost in performance.
michael@0 375 // - calculating 4 output pixels at a time.
michael@0 376 // This allows loading the coefficients x0 and x1 and shuffling them to the
michael@0 377 // optimum location only once per loop, instead of twice per loop.
michael@0 378 // This also allows us to store the four pixels with a single store.
michael@0 379 // - Use of 2 special SSSE3 instructions (comparatively to the SSE2 instruction
michael@0 380 // version):
michael@0 381 // _mm_shuffle_epi8 : this allows us to spread the coefficients x[0-3] loaded
michael@0 382 // in 32 bit values to 8 bit values repeated four times.
michael@0 383 // _mm_maddubs_epi16 : this allows us to perform multiplications and additions
michael@0 384 // in one swoop of 8bit values storing the results in 16 bit values. This
michael@0 385 // instruction is actually crucial for the speed of the implementation since
michael@0 386 // as one can see in the SSE2 implementation, all inputs have to be used as
michael@0 387 // 16 bits because the results are 16 bits. This basically allows us to process
michael@0 388 // twice as many pixel components per iteration.
michael@0 389 //
michael@0 390 // As a result, this method behaves faster than the traditional SSE2. The actual
michael@0 391 // boost varies greatly on the underlying architecture.
michael@0 392 template<bool has_alpha>
michael@0 393 void S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
michael@0 394 const uint32_t* xy,
michael@0 395 int count, uint32_t* colors) {
michael@0 396 SkASSERT(count > 0 && colors != NULL);
michael@0 397 SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
michael@0 398 SkASSERT(s.fBitmap->config() == SkBitmap::kARGB_8888_Config);
michael@0 399 if (has_alpha) {
michael@0 400 SkASSERT(s.fAlphaScale < 256);
michael@0 401 } else {
michael@0 402 SkASSERT(s.fAlphaScale == 256);
michael@0 403 }
michael@0 404
michael@0 405 const uint8_t* src_addr =
michael@0 406 static_cast<const uint8_t*>(s.fBitmap->getPixels());
michael@0 407 const size_t rb = s.fBitmap->rowBytes();
michael@0 408 const uint32_t XY = *xy++;
michael@0 409 const unsigned y0 = XY >> 14;
michael@0 410 const uint32_t* row0 =
michael@0 411 reinterpret_cast<const uint32_t*>(src_addr + (y0 >> 4) * rb);
michael@0 412 const uint32_t* row1 =
michael@0 413 reinterpret_cast<const uint32_t*>(src_addr + (XY & 0x3FFF) * rb);
michael@0 414 const unsigned sub_y = y0 & 0xF;
michael@0 415
michael@0 416 // vector constants
michael@0 417 const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
michael@0 418 8, 8, 8, 8,
michael@0 419 4, 4, 4, 4,
michael@0 420 0, 0, 0, 0);
michael@0 421 const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
michael@0 422 const __m128i mask_000F = _mm_set1_epi32(0x000F);
michael@0 423 const __m128i sixteen_8bit = _mm_set1_epi8(16);
michael@0 424 // (0, 0, 0, 0, 0, 0, 0, 0)
michael@0 425 const __m128i zero = _mm_setzero_si128();
michael@0 426
michael@0 427 __m128i alpha = _mm_setzero_si128();
michael@0 428 if (has_alpha)
michael@0 429 // 8x(alpha)
michael@0 430 alpha = _mm_set1_epi16(s.fAlphaScale);
michael@0 431
michael@0 432 if (sub_y == 0) {
michael@0 433 // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
michael@0 434 while (count > 3) {
michael@0 435 count -= 4;
michael@0 436
michael@0 437 int x0[4];
michael@0 438 int x1[4];
michael@0 439 __m128i all_x, sixteen_minus_x;
michael@0 440 PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
michael@0 441 sixteen_8bit, mask_dist_select,
michael@0 442 &all_x, &sixteen_minus_x, x0, x1);
michael@0 443 xy += 4;
michael@0 444
michael@0 445 // First pair of pixel pairs.
michael@0 446 // (4x(x1, 16-x1), 4x(x0, 16-x0))
michael@0 447 __m128i scale_x;
michael@0 448 scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
michael@0 449
michael@0 450 __m128i sum0 = ProcessPixelPairZeroSubY<has_alpha>(
michael@0 451 row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
michael@0 452 scale_x, alpha);
michael@0 453
michael@0 454 // second pair of pixel pairs
michael@0 455 // (4x (x3, 16-x3), 4x (16-x2, x2))
michael@0 456 scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
michael@0 457
michael@0 458 __m128i sum1 = ProcessPixelPairZeroSubY<has_alpha>(
michael@0 459 row0[x0[2]], row0[x1[2]], row0[x0[3]], row0[x1[3]],
michael@0 460 scale_x, alpha);
michael@0 461
michael@0 462 // Pack lower 4 16 bit values of sum into lower 4 bytes.
michael@0 463 sum0 = _mm_packus_epi16(sum0, sum1);
michael@0 464
michael@0 465 // Extract low int and store.
michael@0 466 _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
michael@0 467
michael@0 468 colors += 4;
michael@0 469 }
michael@0 470
michael@0 471 // handle remainder
michael@0 472 while (count-- > 0) {
michael@0 473 uint32_t xx = *xy++; // x0:14 | 4 | x1:14
michael@0 474 unsigned x0 = xx >> 18;
michael@0 475 unsigned x1 = xx & 0x3FFF;
michael@0 476
michael@0 477 // 16x(x)
michael@0 478 const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
michael@0 479
michael@0 480 // (16x(16-x))
michael@0 481 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
michael@0 482
michael@0 483 scale_x = _mm_unpacklo_epi8(scale_x, all_x);
michael@0 484
michael@0 485 __m128i sum = ProcessOnePixelZeroSubY<has_alpha>(
michael@0 486 row0[x0], row0[x1],
michael@0 487 scale_x, alpha);
michael@0 488
michael@0 489 // Pack lower 4 16 bit values of sum into lower 4 bytes.
michael@0 490 sum = _mm_packus_epi16(sum, zero);
michael@0 491
michael@0 492 // Extract low int and store.
michael@0 493 *colors++ = _mm_cvtsi128_si32(sum);
michael@0 494 }
michael@0 495 } else { // more general case, y != 0
michael@0 496 // 8x(16)
michael@0 497 const __m128i sixteen_16bit = _mm_set1_epi16(16);
michael@0 498
michael@0 499 // 8x (y)
michael@0 500 const __m128i all_y = _mm_set1_epi16(sub_y);
michael@0 501
michael@0 502 // 8x (16-y)
michael@0 503 const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
michael@0 504
michael@0 505 // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
michael@0 506 while (count > 3) {
michael@0 507 count -= 4;
michael@0 508
michael@0 509 int x0[4];
michael@0 510 int x1[4];
michael@0 511 __m128i all_x, sixteen_minus_x;
michael@0 512 PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
michael@0 513 sixteen_8bit, mask_dist_select,
michael@0 514 &all_x, &sixteen_minus_x, x0, x1);
michael@0 515 xy += 4;
michael@0 516
michael@0 517 // First pair of pixel pairs
michael@0 518 // (4x(x1, 16-x1), 4x(x0, 16-x0))
michael@0 519 __m128i scale_x;
michael@0 520 scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
michael@0 521
michael@0 522 __m128i sum0 = ProcessTwoPixelPairs<has_alpha>(
michael@0 523 row0, row1, x0, x1,
michael@0 524 scale_x, all_y, neg_y, alpha);
michael@0 525
michael@0 526 // second pair of pixel pairs
michael@0 527 // (4x (x3, 16-x3), 4x (16-x2, x2))
michael@0 528 scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
michael@0 529
michael@0 530 __m128i sum1 = ProcessTwoPixelPairs<has_alpha>(
michael@0 531 row0, row1, x0 + 2, x1 + 2,
michael@0 532 scale_x, all_y, neg_y, alpha);
michael@0 533
michael@0 534 // Do the final packing of the two results
michael@0 535
michael@0 536 // Pack lower 4 16 bit values of sum into lower 4 bytes.
michael@0 537 sum0 = _mm_packus_epi16(sum0, sum1);
michael@0 538
michael@0 539 // Extract low int and store.
michael@0 540 _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
michael@0 541
michael@0 542 colors += 4;
michael@0 543 }
michael@0 544
michael@0 545 // Left over.
michael@0 546 while (count-- > 0) {
michael@0 547 const uint32_t xx = *xy++; // x0:14 | 4 | x1:14
michael@0 548 const unsigned x0 = xx >> 18;
michael@0 549 const unsigned x1 = xx & 0x3FFF;
michael@0 550
michael@0 551 // 16x(x)
michael@0 552 const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
michael@0 553
michael@0 554 // 16x (16-x)
michael@0 555 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
michael@0 556
michael@0 557 // (8x (x, 16-x))
michael@0 558 scale_x = _mm_unpacklo_epi8(scale_x, all_x);
michael@0 559
michael@0 560 // first row.
michael@0 561 __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
michael@0 562 // second row.
michael@0 563 __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
michael@0 564
michael@0 565 // Add both rows for full sample
michael@0 566 sum0 = _mm_add_epi16(sum0, sum1);
michael@0 567
michael@0 568 sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
michael@0 569
michael@0 570 // Pack lower 4 16 bit values of sum into lower 4 bytes.
michael@0 571 sum0 = _mm_packus_epi16(sum0, zero);
michael@0 572
michael@0 573 // Extract low int and store.
michael@0 574 *colors++ = _mm_cvtsi128_si32(sum0);
michael@0 575 }
michael@0 576 }
michael@0 577 }
michael@0 578
michael@0 579 /*
michael@0 580 * Similar to S32_generic_D32_filter_DX_SSSE3, we do not need to handle the
michael@0 581 * special case suby == 0 as suby is changing in every loop.
michael@0 582 */
michael@0 583 template<bool has_alpha>
michael@0 584 void S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
michael@0 585 const uint32_t* xy,
michael@0 586 int count, uint32_t* colors) {
michael@0 587 SkASSERT(count > 0 && colors != NULL);
michael@0 588 SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
michael@0 589 SkASSERT(s.fBitmap->config() == SkBitmap::kARGB_8888_Config);
michael@0 590 if (has_alpha) {
michael@0 591 SkASSERT(s.fAlphaScale < 256);
michael@0 592 } else {
michael@0 593 SkASSERT(s.fAlphaScale == 256);
michael@0 594 }
michael@0 595
michael@0 596 const uint8_t* src_addr =
michael@0 597 static_cast<const uint8_t*>(s.fBitmap->getPixels());
michael@0 598 const size_t rb = s.fBitmap->rowBytes();
michael@0 599
michael@0 600 // vector constants
michael@0 601 const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
michael@0 602 8, 8, 8, 8,
michael@0 603 4, 4, 4, 4,
michael@0 604 0, 0, 0, 0);
michael@0 605 const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
michael@0 606 const __m128i mask_000F = _mm_set1_epi32(0x000F);
michael@0 607 const __m128i sixteen_8bit = _mm_set1_epi8(16);
michael@0 608
michael@0 609 __m128i alpha;
michael@0 610 if (has_alpha) {
michael@0 611 // 8x(alpha)
michael@0 612 alpha = _mm_set1_epi16(s.fAlphaScale);
michael@0 613 }
michael@0 614
michael@0 615 // Unroll 2x, interleave bytes, use pmaddubsw (all_x is small)
michael@0 616 while (count >= 2) {
michael@0 617 int xy0[4];
michael@0 618 int xy1[4];
michael@0 619 __m128i all_xy, sixteen_minus_xy;
michael@0 620 PrepareConstantsTwoPixelPairsDXDY(xy, mask_3FFF, mask_000F,
michael@0 621 sixteen_8bit, mask_dist_select,
michael@0 622 &all_xy, &sixteen_minus_xy, xy0, xy1);
michael@0 623
michael@0 624 // (4x(x1, 16-x1), 4x(x0, 16-x0))
michael@0 625 __m128i scale_x = _mm_unpacklo_epi8(sixteen_minus_xy, all_xy);
michael@0 626 // (4x(0, y1), 4x(0, y0))
michael@0 627 __m128i all_y = _mm_unpackhi_epi8(all_xy, _mm_setzero_si128());
michael@0 628 __m128i neg_y = _mm_sub_epi16(_mm_set1_epi16(16), all_y);
michael@0 629
michael@0 630 const uint32_t* row00 =
michael@0 631 reinterpret_cast<const uint32_t*>(src_addr + xy0[2] * rb);
michael@0 632 const uint32_t* row01 =
michael@0 633 reinterpret_cast<const uint32_t*>(src_addr + xy1[2] * rb);
michael@0 634 const uint32_t* row10 =
michael@0 635 reinterpret_cast<const uint32_t*>(src_addr + xy0[3] * rb);
michael@0 636 const uint32_t* row11 =
michael@0 637 reinterpret_cast<const uint32_t*>(src_addr + xy1[3] * rb);
michael@0 638
michael@0 639 __m128i sum0 = ProcessTwoPixelPairsDXDY<has_alpha>(
michael@0 640 row00, row01, row10, row11, xy0, xy1,
michael@0 641 scale_x, all_y, neg_y, alpha);
michael@0 642
michael@0 643 // Pack lower 4 16 bit values of sum into lower 4 bytes.
michael@0 644 sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
michael@0 645
michael@0 646 // Extract low int and store.
michael@0 647 _mm_storel_epi64(reinterpret_cast<__m128i *>(colors), sum0);
michael@0 648
michael@0 649 xy += 4;
michael@0 650 colors += 2;
michael@0 651 count -= 2;
michael@0 652 }
michael@0 653
michael@0 654 // Handle the remainder
michael@0 655 while (count-- > 0) {
michael@0 656 uint32_t data = *xy++;
michael@0 657 unsigned y0 = data >> 14;
michael@0 658 unsigned y1 = data & 0x3FFF;
michael@0 659 unsigned subY = y0 & 0xF;
michael@0 660 y0 >>= 4;
michael@0 661
michael@0 662 data = *xy++;
michael@0 663 unsigned x0 = data >> 14;
michael@0 664 unsigned x1 = data & 0x3FFF;
michael@0 665 unsigned subX = x0 & 0xF;
michael@0 666 x0 >>= 4;
michael@0 667
michael@0 668 const uint32_t* row0 =
michael@0 669 reinterpret_cast<const uint32_t*>(src_addr + y0 * rb);
michael@0 670 const uint32_t* row1 =
michael@0 671 reinterpret_cast<const uint32_t*>(src_addr + y1 * rb);
michael@0 672
michael@0 673 // 16x(x)
michael@0 674 const __m128i all_x = _mm_set1_epi8(subX);
michael@0 675
michael@0 676 // 16x (16-x)
michael@0 677 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
michael@0 678
michael@0 679 // (8x (x, 16-x))
michael@0 680 scale_x = _mm_unpacklo_epi8(scale_x, all_x);
michael@0 681
michael@0 682 // 8x(16)
michael@0 683 const __m128i sixteen_16bit = _mm_set1_epi16(16);
michael@0 684
michael@0 685 // 8x (y)
michael@0 686 const __m128i all_y = _mm_set1_epi16(subY);
michael@0 687
michael@0 688 // 8x (16-y)
michael@0 689 const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
michael@0 690
michael@0 691 // first row.
michael@0 692 __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
michael@0 693 // second row.
michael@0 694 __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
michael@0 695
michael@0 696 // Add both rows for full sample
michael@0 697 sum0 = _mm_add_epi16(sum0, sum1);
michael@0 698
michael@0 699 sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
michael@0 700
michael@0 701 // Pack lower 4 16 bit values of sum into lower 4 bytes.
michael@0 702 sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
michael@0 703
michael@0 704 // Extract low int and store.
michael@0 705 *colors++ = _mm_cvtsi128_si32(sum0);
michael@0 706 }
michael@0 707 }
michael@0 708 } // namepace
michael@0 709
michael@0 710 void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
michael@0 711 const uint32_t* xy,
michael@0 712 int count, uint32_t* colors) {
michael@0 713 S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors);
michael@0 714 }
michael@0 715
michael@0 716 void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
michael@0 717 const uint32_t* xy,
michael@0 718 int count, uint32_t* colors) {
michael@0 719 S32_generic_D32_filter_DX_SSSE3<true>(s, xy, count, colors);
michael@0 720 }
michael@0 721
michael@0 722 void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
michael@0 723 const uint32_t* xy,
michael@0 724 int count, uint32_t* colors) {
michael@0 725 S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors);
michael@0 726 }
michael@0 727
michael@0 728 void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
michael@0 729 const uint32_t* xy,
michael@0 730 int count, uint32_t* colors) {
michael@0 731 S32_generic_D32_filter_DXDY_SSSE3<true>(s, xy, count, colors);
michael@0 732 }
michael@0 733
michael@0 734 #else // !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
michael@0 735
michael@0 736 void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
michael@0 737 const uint32_t* xy,
michael@0 738 int count, uint32_t* colors) {
michael@0 739 sk_throw();
michael@0 740 }
michael@0 741
michael@0 742 void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
michael@0 743 const uint32_t* xy,
michael@0 744 int count, uint32_t* colors) {
michael@0 745 sk_throw();
michael@0 746 }
michael@0 747
michael@0 748 void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
michael@0 749 const uint32_t* xy,
michael@0 750 int count, uint32_t* colors) {
michael@0 751 sk_throw();
michael@0 752 }
michael@0 753
michael@0 754 void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
michael@0 755 const uint32_t* xy,
michael@0 756 int count, uint32_t* colors) {
michael@0 757 sk_throw();
michael@0 758 }
michael@0 759
michael@0 760 #endif

mercurial