gfx/skia/trunk/src/opts/SkBitmapProcState_opts_SSSE3.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/gfx/skia/trunk/src/opts/SkBitmapProcState_opts_SSSE3.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,760 @@
     1.4 +/*
     1.5 + * Copyright 2012 The Android Open Source Project
     1.6 + *
     1.7 + * Use of this source code is governed by a BSD-style license that can be
     1.8 + * found in the LICENSE file.
     1.9 + */
    1.10 +
    1.11 +#include "SkBitmapProcState_opts_SSSE3.h"
    1.12 +#include "SkPaint.h"
    1.13 +#include "SkUtils.h"
    1.14 +
    1.15 +/* With the exception of the Android framework we always build the SSSE3 functions
    1.16 + * and enable the caller to determine SSSE3 support.  However for the Android framework
    1.17 + * if the device does not support SSSE3 then the compiler will not supply the required
    1.18 + * -mssse3 option needed to build this file, so instead we provide a stub implementation.
    1.19 + */
    1.20 +#if !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
    1.21 +
    1.22 +#include <tmmintrin.h>  // SSSE3
    1.23 +
    1.24 +// adding anonymous namespace seemed to force gcc to inline directly the
    1.25 +// instantiation, instead of creating the functions
    1.26 +// S32_generic_D32_filter_DX_SSSE3<true> and
    1.27 +// S32_generic_D32_filter_DX_SSSE3<false> which were then called by the
    1.28 +// external functions.
    1.29 +namespace {
    1.30 +// In this file, variations for alpha and non alpha versions are implemented
    1.31 +// with a template, as it makes the code more compact and a bit easier to
    1.32 +// maintain, while making the compiler generate the same exact code as with
    1.33 +// two functions that only differ by a few lines.
    1.34 +
    1.35 +
    1.36 +// Prepare all necessary constants for a round of processing for two pixel
    1.37 +// pairs.
    1.38 +// @param xy is the location where the xy parameters for four pixels should be
    1.39 +//           read from. It is identical in concept with argument two of
    1.40 +//           S32_{opaque}_D32_filter_DX methods.
    1.41 +// @param mask_3FFF vector of 32 bit constants containing 3FFF,
    1.42 +//                  suitable to mask the bottom 14 bits of a XY value.
    1.43 +// @param mask_000F vector of 32 bit constants containing 000F,
    1.44 +//                  suitable to mask the bottom 4 bits of a XY value.
    1.45 +// @param sixteen_8bit vector of 8 bit components containing the value 16.
    1.46 +// @param mask_dist_select vector of 8 bit components containing the shuffling
    1.47 +//                         parameters to reorder x[0-3] parameters.
    1.48 +// @param all_x_result vector of 8 bit components that will contain the
    1.49 +//              (4x(x3), 4x(x2), 4x(x1), 4x(x0)) upon return.
    1.50 +// @param sixteen_minus_x vector of 8 bit components, containing
    1.51 +//              (4x(16 - x3), 4x(16 - x2), 4x(16 - x1), 4x(16 - x0))
    1.52 +inline void PrepareConstantsTwoPixelPairs(const uint32_t* xy,
    1.53 +                                          const __m128i& mask_3FFF,
    1.54 +                                          const __m128i& mask_000F,
    1.55 +                                          const __m128i& sixteen_8bit,
    1.56 +                                          const __m128i& mask_dist_select,
    1.57 +                                          __m128i* all_x_result,
    1.58 +                                          __m128i* sixteen_minus_x,
    1.59 +                                          int* x0,
    1.60 +                                          int* x1) {
    1.61 +    const __m128i xx = _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
    1.62 +
    1.63 +    // 4 delta X
    1.64 +    // (x03, x02, x01, x00)
    1.65 +    const __m128i x0_wide = _mm_srli_epi32(xx, 18);
    1.66 +    // (x13, x12, x11, x10)
    1.67 +    const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF);
    1.68 +
    1.69 +    _mm_storeu_si128(reinterpret_cast<__m128i *>(x0), x0_wide);
    1.70 +    _mm_storeu_si128(reinterpret_cast<__m128i *>(x1), x1_wide);
    1.71 +
    1.72 +    __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F);
    1.73 +
    1.74 +    // (4x(x3), 4x(x2), 4x(x1), 4x(x0))
    1.75 +    all_x = _mm_shuffle_epi8(all_x, mask_dist_select);
    1.76 +
    1.77 +    *all_x_result = all_x;
    1.78 +    // (4x(16-x3), 4x(16-x2), 4x(16-x1), 4x(16-x0))
    1.79 +    *sixteen_minus_x = _mm_sub_epi8(sixteen_8bit, all_x);
    1.80 +}
    1.81 +
    1.82 +// Prepare all necessary constants for a round of processing for two pixel
    1.83 +// pairs.
    1.84 +// @param xy is the location where the xy parameters for four pixels should be
    1.85 +//           read from. It is identical in concept with argument two of
    1.86 +//           S32_{opaque}_D32_filter_DXDY methods.
    1.87 +// @param mask_3FFF vector of 32 bit constants containing 3FFF,
    1.88 +//                  suitable to mask the bottom 14 bits of a XY value.
    1.89 +// @param mask_000F vector of 32 bit constants containing 000F,
    1.90 +//                  suitable to mask the bottom 4 bits of a XY value.
    1.91 +// @param sixteen_8bit vector of 8 bit components containing the value 16.
    1.92 +// @param mask_dist_select vector of 8 bit components containing the shuffling
    1.93 +//                         parameters to reorder x[0-3] parameters.
    1.94 +// @param all_xy_result vector of 8 bit components that will contain the
    1.95 +//              (4x(y1), 4x(y0), 4x(x1), 4x(x0)) upon return.
    1.96 +// @param sixteen_minus_x vector of 8 bit components, containing
    1.97 +//              (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)).
    1.98 +inline void PrepareConstantsTwoPixelPairsDXDY(const uint32_t* xy,
    1.99 +                                              const __m128i& mask_3FFF,
   1.100 +                                              const __m128i& mask_000F,
   1.101 +                                              const __m128i& sixteen_8bit,
   1.102 +                                              const __m128i& mask_dist_select,
   1.103 +                                              __m128i* all_xy_result,
   1.104 +                                              __m128i* sixteen_minus_xy,
   1.105 +                                              int* xy0, int* xy1) {
   1.106 +    const __m128i xy_wide =
   1.107 +                        _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
   1.108 +
   1.109 +    // (x10, y10, x00, y00)
   1.110 +    __m128i xy0_wide = _mm_srli_epi32(xy_wide, 18);
   1.111 +    // (y10, y00, x10, x00)
   1.112 +    xy0_wide =  _mm_shuffle_epi32(xy0_wide, _MM_SHUFFLE(2, 0, 3, 1));
   1.113 +    // (x11, y11, x01, y01)
   1.114 +    __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF);
   1.115 +    // (y11, y01, x11, x01)
   1.116 +    xy1_wide = _mm_shuffle_epi32(xy1_wide, _MM_SHUFFLE(2, 0, 3, 1));
   1.117 +
   1.118 +    _mm_storeu_si128(reinterpret_cast<__m128i *>(xy0), xy0_wide);
   1.119 +    _mm_storeu_si128(reinterpret_cast<__m128i *>(xy1), xy1_wide);
   1.120 +
   1.121 +    // (x1, y1, x0, y0)
   1.122 +    __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F);
   1.123 +    // (y1, y0, x1, x0)
   1.124 +    all_xy = _mm_shuffle_epi32(all_xy, _MM_SHUFFLE(2, 0, 3, 1));
   1.125 +    // (4x(y1), 4x(y0), 4x(x1), 4x(x0))
   1.126 +    all_xy = _mm_shuffle_epi8(all_xy, mask_dist_select);
   1.127 +
   1.128 +    *all_xy_result = all_xy;
   1.129 +    // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0))
   1.130 +    *sixteen_minus_xy = _mm_sub_epi8(sixteen_8bit, all_xy);
   1.131 +}
   1.132 +
   1.133 +// Helper function used when processing one pixel pair.
   1.134 +// @param pixel0..3 are the four input pixels
   1.135 +// @param scale_x vector of 8 bit components to multiply the pixel[0:3]. This
   1.136 +//                will contain (4x(x1, 16-x1), 4x(x0, 16-x0))
   1.137 +//                or (4x(x3, 16-x3), 4x(x2, 16-x2))
   1.138 +// @return a vector of 16 bit components containing:
   1.139 +// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
   1.140 +inline __m128i ProcessPixelPairHelper(uint32_t pixel0,
   1.141 +                                      uint32_t pixel1,
   1.142 +                                      uint32_t pixel2,
   1.143 +                                      uint32_t pixel3,
   1.144 +                                      const __m128i& scale_x) {
   1.145 +    __m128i a0, a1, a2, a3;
   1.146 +    // Load 2 pairs of pixels
   1.147 +    a0 = _mm_cvtsi32_si128(pixel0);
   1.148 +    a1 = _mm_cvtsi32_si128(pixel1);
   1.149 +
   1.150 +    // Interleave pixels.
   1.151 +    // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
   1.152 +    a0 = _mm_unpacklo_epi8(a0, a1);
   1.153 +
   1.154 +    a2 = _mm_cvtsi32_si128(pixel2);
   1.155 +    a3 = _mm_cvtsi32_si128(pixel3);
   1.156 +    // (0, 0, 0, 0, 0, 0, 0, 0, Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2)
   1.157 +    a2 = _mm_unpacklo_epi8(a2, a3);
   1.158 +
   1.159 +    // two pairs of pixel pairs, interleaved.
   1.160 +    // (Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2,
   1.161 +    //  Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
   1.162 +    a0 = _mm_unpacklo_epi64(a0, a2);
   1.163 +
   1.164 +    // multiply and sum to 16 bit components.
   1.165 +    // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
   1.166 +    // At that point, we use up a bit less than 12 bits for each 16 bit
   1.167 +    // component:
   1.168 +    // All components are less than 255. So,
   1.169 +    // C0 * (16 - x) + C1 * x <= 255 * (16 - x) + 255 * x = 255 * 16.
   1.170 +    return _mm_maddubs_epi16(a0, scale_x);
   1.171 +}
   1.172 +
   1.173 +// Scale back the results after multiplications to the [0:255] range, and scale
   1.174 +// by alpha when has_alpha is true.
   1.175 +// Depending on whether one set or two sets of multiplications had been applied,
   1.176 +// the results have to be shifted by four places (dividing by 16), or shifted
   1.177 +// by eight places (dividing by 256), since each multiplication is by a quantity
   1.178 +// in the range [0:16].
   1.179 +template<bool has_alpha, int scale>
   1.180 +inline __m128i ScaleFourPixels(__m128i* pixels,
   1.181 +                               const __m128i& alpha) {
   1.182 +    // Divide each 16 bit component by 16 (or 256 depending on scale).
   1.183 +    *pixels = _mm_srli_epi16(*pixels, scale);
   1.184 +
   1.185 +    if (has_alpha) {
   1.186 +        // Multiply by alpha.
   1.187 +        *pixels = _mm_mullo_epi16(*pixels, alpha);
   1.188 +
   1.189 +        // Divide each 16 bit component by 256.
   1.190 +        *pixels = _mm_srli_epi16(*pixels, 8);
   1.191 +    }
   1.192 +    return *pixels;
   1.193 +}
   1.194 +
   1.195 +// Wrapper to calculate two output pixels from four input pixels. The
   1.196 +// arguments are the same as ProcessPixelPairHelper. Technically, there are
   1.197 +// eight input pixels, but since sub_y == 0, the factors applied to half of the
   1.198 +// pixels is zero (sub_y), and are therefore omitted here to save on some
   1.199 +// processing.
   1.200 +// @param alpha when has_alpha is true, scale all resulting components by this
   1.201 +//              value.
   1.202 +// @return a vector of 16 bit components containing:
   1.203 +// ((Aa2 * (16 - x1) + Aa3 * x1) * alpha, ...,
   1.204 +// (Ra0 * (16 - x0) + Ra1 * x0) * alpha) (when has_alpha is true)
   1.205 +// otherwise
   1.206 +// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
   1.207 +// In both cases, the results are renormalized (divided by 16) to match the
   1.208 +// expected formats when storing back the results into memory.
   1.209 +template<bool has_alpha>
   1.210 +inline __m128i ProcessPixelPairZeroSubY(uint32_t pixel0,
   1.211 +                                        uint32_t pixel1,
   1.212 +                                        uint32_t pixel2,
   1.213 +                                        uint32_t pixel3,
   1.214 +                                        const __m128i& scale_x,
   1.215 +                                        const __m128i& alpha) {
   1.216 +    __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
   1.217 +                                         scale_x);
   1.218 +    return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
   1.219 +}
   1.220 +
   1.221 +// Same as ProcessPixelPairZeroSubY, expect processing one output pixel at a
   1.222 +// time instead of two. As in the above function, only two pixels are needed
   1.223 +// to generate a single pixel since sub_y == 0.
   1.224 +// @return same as ProcessPixelPairZeroSubY, except that only the bottom 4
   1.225 +// 16 bit components are set.
   1.226 +template<bool has_alpha>
   1.227 +inline __m128i ProcessOnePixelZeroSubY(uint32_t pixel0,
   1.228 +                                       uint32_t pixel1,
   1.229 +                                       __m128i scale_x,
   1.230 +                                       __m128i alpha) {
   1.231 +    __m128i a0 = _mm_cvtsi32_si128(pixel0);
   1.232 +    __m128i a1 = _mm_cvtsi32_si128(pixel1);
   1.233 +
   1.234 +    // Interleave
   1.235 +    a0 = _mm_unpacklo_epi8(a0, a1);
   1.236 +
   1.237 +    // (a0 * (16-x) + a1 * x)
   1.238 +    __m128i sum = _mm_maddubs_epi16(a0, scale_x);
   1.239 +
   1.240 +    return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
   1.241 +}
   1.242 +
   1.243 +// Methods when sub_y != 0
   1.244 +
   1.245 +
   1.246 +// Same as ProcessPixelPairHelper, except that the values are scaled by y.
   1.247 +// @param y vector of 16 bit components containing 'y' values. There are two
   1.248 +//        cases in practice, where y will contain the sub_y constant, or will
   1.249 +//        contain the 16 - sub_y constant.
   1.250 +// @return vector of 16 bit components containing:
   1.251 +// (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , y * (Ra0 * (16 - x0) + Ra1 * x0))
   1.252 +inline __m128i ProcessPixelPair(uint32_t pixel0,
   1.253 +                                uint32_t pixel1,
   1.254 +                                uint32_t pixel2,
   1.255 +                                uint32_t pixel3,
   1.256 +                                const __m128i& scale_x,
   1.257 +                                const __m128i& y) {
   1.258 +    __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
   1.259 +                                         scale_x);
   1.260 +
   1.261 +    // first row times 16-y or y depending on whether 'y' represents one or
   1.262 +    // the other.
   1.263 +    // Values will be up to 255 * 16 * 16 = 65280.
   1.264 +    // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... ,
   1.265 +    //  y * (Ra0 * (16 - x0) + Ra1 * x0))
   1.266 +    sum = _mm_mullo_epi16(sum, y);
   1.267 +
   1.268 +    return sum;
   1.269 +}
   1.270 +
   1.271 +// Process two pixel pairs out of eight input pixels.
   1.272 +// In other methods, the distinct pixels are passed one by one, but in this
   1.273 +// case, the rows, and index offsets to the pixels into the row are passed
   1.274 +// to generate the 8 pixels.
   1.275 +// @param row0..1 top and bottom row where to find input pixels.
   1.276 +// @param x0..1 offsets into the row for all eight input pixels.
   1.277 +// @param all_y vector of 16 bit components containing the constant sub_y
   1.278 +// @param neg_y vector of 16 bit components containing the constant 16 - sub_y
   1.279 +// @param alpha vector of 16 bit components containing the alpha value to scale
   1.280 +//        the results by, when has_alpha is true.
   1.281 +// @return
   1.282 +// (alpha * ((16-y) * (Aa2  * (16-x1) + Aa3  * x1) +
   1.283 +//             y    * (Aa2' * (16-x1) + Aa3' * x1)),
   1.284 +// ...
   1.285 +//  alpha * ((16-y) * (Ra0  * (16-x0) + Ra1 * x0) +
   1.286 +//             y    * (Ra0' * (16-x0) + Ra1' * x0))
   1.287 +// With the factor alpha removed when has_alpha is false.
   1.288 +// The values are scaled back to 16 bit components, but with only the bottom
   1.289 +// 8 bits being set.
   1.290 +template<bool has_alpha>
   1.291 +inline __m128i ProcessTwoPixelPairs(const uint32_t* row0,
   1.292 +                                    const uint32_t* row1,
   1.293 +                                    const int* x0,
   1.294 +                                    const int* x1,
   1.295 +                                    const __m128i& scale_x,
   1.296 +                                    const __m128i& all_y,
   1.297 +                                    const __m128i& neg_y,
   1.298 +                                    const __m128i& alpha) {
   1.299 +    __m128i sum0 = ProcessPixelPair(
   1.300 +        row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
   1.301 +        scale_x, neg_y);
   1.302 +    __m128i sum1 = ProcessPixelPair(
   1.303 +        row1[x0[0]], row1[x1[0]], row1[x0[1]], row1[x1[1]],
   1.304 +        scale_x, all_y);
   1.305 +
   1.306 +    // 2 samples fully summed.
   1.307 +    // ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
   1.308 +    //  y * (Aa2' * (16-x1) + Aa3' * x1),
   1.309 +    // ...
   1.310 +    //  (16-y) * (Ra0 * (16 - x0) + Ra1 * x0)) +
   1.311 +    //  y * (Ra0' * (16-x0) + Ra1' * x0))
   1.312 +    // Each component, again can be at most 256 * 255 = 65280, so no overflow.
   1.313 +    sum0 = _mm_add_epi16(sum0, sum1);
   1.314 +
   1.315 +    return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
   1.316 +}
   1.317 +
   1.318 +// Similar to ProcessTwoPixelPairs except the pixel indexes.
   1.319 +template<bool has_alpha>
   1.320 +inline __m128i ProcessTwoPixelPairsDXDY(const uint32_t* row00,
   1.321 +                                        const uint32_t* row01,
   1.322 +                                        const uint32_t* row10,
   1.323 +                                        const uint32_t* row11,
   1.324 +                                        const int* xy0,
   1.325 +                                        const int* xy1,
   1.326 +                                        const __m128i& scale_x,
   1.327 +                                        const __m128i& all_y,
   1.328 +                                        const __m128i& neg_y,
   1.329 +                                        const __m128i& alpha) {
   1.330 +    // first row
   1.331 +    __m128i sum0 = ProcessPixelPair(
   1.332 +        row00[xy0[0]], row00[xy1[0]], row10[xy0[1]], row10[xy1[1]],
   1.333 +        scale_x, neg_y);
   1.334 +    // second row
   1.335 +    __m128i sum1 = ProcessPixelPair(
   1.336 +        row01[xy0[0]], row01[xy1[0]], row11[xy0[1]], row11[xy1[1]],
   1.337 +        scale_x, all_y);
   1.338 +
   1.339 +    // 2 samples fully summed.
   1.340 +    // ((16-y1) * (Aa2 * (16-x1) + Aa3 * x1) +
   1.341 +    //  y0 * (Aa2' * (16-x1) + Aa3' * x1),
   1.342 +    // ...
   1.343 +    //  (16-y0) * (Ra0 * (16 - x0) + Ra1 * x0)) +
   1.344 +    //  y0 * (Ra0' * (16-x0) + Ra1' * x0))
   1.345 +    // Each component, again can be at most 256 * 255 = 65280, so no overflow.
   1.346 +    sum0 = _mm_add_epi16(sum0, sum1);
   1.347 +
   1.348 +    return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
   1.349 +}
   1.350 +
   1.351 +
   1.352 +// Same as ProcessPixelPair, except that performing the math one output pixel
   1.353 +// at a time. This means that only the bottom four 16 bit components are set.
   1.354 +inline __m128i ProcessOnePixel(uint32_t pixel0, uint32_t pixel1,
   1.355 +                               const __m128i& scale_x, const __m128i& y) {
   1.356 +    __m128i a0 = _mm_cvtsi32_si128(pixel0);
   1.357 +    __m128i a1 = _mm_cvtsi32_si128(pixel1);
   1.358 +
   1.359 +    // Interleave
   1.360 +    // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
   1.361 +    a0 = _mm_unpacklo_epi8(a0, a1);
   1.362 +
   1.363 +    // (a0 * (16-x) + a1 * x)
   1.364 +    a0 = _mm_maddubs_epi16(a0, scale_x);
   1.365 +
   1.366 +    // scale row by y
   1.367 +    return _mm_mullo_epi16(a0, y);
   1.368 +}
   1.369 +
   1.370 +// Notes about the various tricks that are used in this implementation:
   1.371 +// - specialization for sub_y == 0.
   1.372 +// Statistically, 1/16th of the samples will have sub_y == 0. When this
   1.373 +// happens, the math goes from:
   1.374 +// (16 - x)*(16 - y)*a00 + x*(16 - y)*a01 + (16 - x)*y*a10 + x*y*a11
   1.375 +// to:
   1.376 +// (16 - x)*a00 + 16*x*a01
   1.377 +// much simpler. The simplification makes for an easy boost in performance.
   1.378 +// - calculating 4 output pixels at a time.
   1.379 +//  This allows loading the coefficients x0 and x1 and shuffling them to the
   1.380 +// optimum location only once per loop, instead of twice per loop.
   1.381 +// This also allows us to store the four pixels with a single store.
   1.382 +// - Use of 2 special SSSE3 instructions (comparatively to the SSE2 instruction
   1.383 +// version):
   1.384 +// _mm_shuffle_epi8 : this allows us to spread the coefficients x[0-3] loaded
   1.385 +// in 32 bit values to 8 bit values repeated four times.
   1.386 +// _mm_maddubs_epi16 : this allows us to perform multiplications and additions
   1.387 +// in one swoop of 8bit values storing the results in 16 bit values. This
   1.388 +// instruction is actually crucial for the speed of the implementation since
   1.389 +// as one can see in the SSE2 implementation, all inputs have to be used as
   1.390 +// 16 bits because the results are 16 bits. This basically allows us to process
   1.391 +// twice as many pixel components per iteration.
   1.392 +//
   1.393 +// As a result, this method behaves faster than the traditional SSE2. The actual
   1.394 +// boost varies greatly on the underlying architecture.
   1.395 +template<bool has_alpha>
   1.396 +void S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
   1.397 +                                     const uint32_t* xy,
   1.398 +                                     int count, uint32_t* colors) {
   1.399 +    SkASSERT(count > 0 && colors != NULL);
   1.400 +    SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
   1.401 +    SkASSERT(s.fBitmap->config() == SkBitmap::kARGB_8888_Config);
   1.402 +    if (has_alpha) {
   1.403 +        SkASSERT(s.fAlphaScale < 256);
   1.404 +    } else {
   1.405 +        SkASSERT(s.fAlphaScale == 256);
   1.406 +    }
   1.407 +
   1.408 +    const uint8_t* src_addr =
   1.409 +            static_cast<const uint8_t*>(s.fBitmap->getPixels());
   1.410 +    const size_t rb = s.fBitmap->rowBytes();
   1.411 +    const uint32_t XY = *xy++;
   1.412 +    const unsigned y0 = XY >> 14;
   1.413 +    const uint32_t* row0 =
   1.414 +            reinterpret_cast<const uint32_t*>(src_addr + (y0 >> 4) * rb);
   1.415 +    const uint32_t* row1 =
   1.416 +            reinterpret_cast<const uint32_t*>(src_addr + (XY & 0x3FFF) * rb);
   1.417 +    const unsigned sub_y = y0 & 0xF;
   1.418 +
   1.419 +    // vector constants
   1.420 +    const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
   1.421 +                                                  8,  8,  8,  8,
   1.422 +                                                  4,  4,  4,  4,
   1.423 +                                                  0,  0,  0,  0);
   1.424 +    const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
   1.425 +    const __m128i mask_000F = _mm_set1_epi32(0x000F);
   1.426 +    const __m128i sixteen_8bit = _mm_set1_epi8(16);
   1.427 +    // (0, 0, 0, 0, 0, 0, 0, 0)
   1.428 +    const __m128i zero = _mm_setzero_si128();
   1.429 +
   1.430 +    __m128i alpha = _mm_setzero_si128();
   1.431 +    if (has_alpha)
   1.432 +        // 8x(alpha)
   1.433 +        alpha = _mm_set1_epi16(s.fAlphaScale);
   1.434 +
   1.435 +    if (sub_y == 0) {
   1.436 +        // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
   1.437 +        while (count > 3) {
   1.438 +            count -= 4;
   1.439 +
   1.440 +            int x0[4];
   1.441 +            int x1[4];
   1.442 +            __m128i all_x, sixteen_minus_x;
   1.443 +            PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
   1.444 +                                          sixteen_8bit, mask_dist_select,
   1.445 +                                          &all_x, &sixteen_minus_x, x0, x1);
   1.446 +            xy += 4;
   1.447 +
   1.448 +            // First pair of pixel pairs.
   1.449 +            // (4x(x1, 16-x1), 4x(x0, 16-x0))
   1.450 +            __m128i scale_x;
   1.451 +            scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
   1.452 +
   1.453 +            __m128i sum0 = ProcessPixelPairZeroSubY<has_alpha>(
   1.454 +                row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
   1.455 +                scale_x, alpha);
   1.456 +
   1.457 +            // second pair of pixel pairs
   1.458 +            // (4x (x3, 16-x3), 4x (16-x2, x2))
   1.459 +            scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
   1.460 +
   1.461 +            __m128i sum1 = ProcessPixelPairZeroSubY<has_alpha>(
   1.462 +                row0[x0[2]], row0[x1[2]], row0[x0[3]], row0[x1[3]],
   1.463 +                scale_x, alpha);
   1.464 +
   1.465 +            // Pack lower 4 16 bit values of sum into lower 4 bytes.
   1.466 +            sum0 = _mm_packus_epi16(sum0, sum1);
   1.467 +
   1.468 +            // Extract low int and store.
   1.469 +            _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
   1.470 +
   1.471 +            colors += 4;
   1.472 +        }
   1.473 +
   1.474 +        // handle remainder
   1.475 +        while (count-- > 0) {
   1.476 +            uint32_t xx = *xy++;  // x0:14 | 4 | x1:14
   1.477 +            unsigned x0 = xx >> 18;
   1.478 +            unsigned x1 = xx & 0x3FFF;
   1.479 +
   1.480 +            // 16x(x)
   1.481 +            const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
   1.482 +
   1.483 +            // (16x(16-x))
   1.484 +            __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
   1.485 +
   1.486 +            scale_x = _mm_unpacklo_epi8(scale_x, all_x);
   1.487 +
   1.488 +            __m128i sum = ProcessOnePixelZeroSubY<has_alpha>(
   1.489 +                row0[x0], row0[x1],
   1.490 +                scale_x, alpha);
   1.491 +
   1.492 +            // Pack lower 4 16 bit values of sum into lower 4 bytes.
   1.493 +            sum = _mm_packus_epi16(sum, zero);
   1.494 +
   1.495 +            // Extract low int and store.
   1.496 +            *colors++ = _mm_cvtsi128_si32(sum);
   1.497 +        }
   1.498 +    } else {  // more general case, y != 0
   1.499 +        // 8x(16)
   1.500 +        const __m128i sixteen_16bit = _mm_set1_epi16(16);
   1.501 +
   1.502 +        // 8x (y)
   1.503 +        const __m128i all_y = _mm_set1_epi16(sub_y);
   1.504 +
   1.505 +        // 8x (16-y)
   1.506 +        const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
   1.507 +
   1.508 +        // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
   1.509 +        while (count > 3) {
   1.510 +            count -= 4;
   1.511 +
   1.512 +            int x0[4];
   1.513 +            int x1[4];
   1.514 +            __m128i all_x, sixteen_minus_x;
   1.515 +            PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
   1.516 +                                          sixteen_8bit, mask_dist_select,
   1.517 +                                          &all_x, &sixteen_minus_x, x0, x1);
   1.518 +            xy += 4;
   1.519 +
   1.520 +            // First pair of pixel pairs
   1.521 +            // (4x(x1, 16-x1), 4x(x0, 16-x0))
   1.522 +            __m128i scale_x;
   1.523 +            scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
   1.524 +
   1.525 +            __m128i sum0 = ProcessTwoPixelPairs<has_alpha>(
   1.526 +                row0, row1, x0, x1,
   1.527 +                scale_x, all_y, neg_y, alpha);
   1.528 +
   1.529 +            // second pair of pixel pairs
   1.530 +            // (4x (x3, 16-x3), 4x (16-x2, x2))
   1.531 +            scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
   1.532 +
   1.533 +            __m128i sum1 = ProcessTwoPixelPairs<has_alpha>(
   1.534 +                row0, row1, x0 + 2, x1 + 2,
   1.535 +                scale_x, all_y, neg_y, alpha);
   1.536 +
   1.537 +            // Do the final packing of the two results
   1.538 +
   1.539 +            // Pack lower 4 16 bit values of sum into lower 4 bytes.
   1.540 +            sum0 = _mm_packus_epi16(sum0, sum1);
   1.541 +
   1.542 +            // Extract low int and store.
   1.543 +            _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
   1.544 +
   1.545 +            colors += 4;
   1.546 +        }
   1.547 +
   1.548 +        // Left over.
   1.549 +        while (count-- > 0) {
   1.550 +            const uint32_t xx = *xy++;  // x0:14 | 4 | x1:14
   1.551 +            const unsigned x0 = xx >> 18;
   1.552 +            const unsigned x1 = xx & 0x3FFF;
   1.553 +
   1.554 +            // 16x(x)
   1.555 +            const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
   1.556 +
   1.557 +            // 16x (16-x)
   1.558 +            __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
   1.559 +
   1.560 +            // (8x (x, 16-x))
   1.561 +            scale_x = _mm_unpacklo_epi8(scale_x, all_x);
   1.562 +
   1.563 +            // first row.
   1.564 +            __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
   1.565 +            // second row.
   1.566 +            __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
   1.567 +
   1.568 +            // Add both rows for full sample
   1.569 +            sum0 = _mm_add_epi16(sum0, sum1);
   1.570 +
   1.571 +            sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
   1.572 +
   1.573 +            // Pack lower 4 16 bit values of sum into lower 4 bytes.
   1.574 +            sum0 = _mm_packus_epi16(sum0, zero);
   1.575 +
   1.576 +            // Extract low int and store.
   1.577 +            *colors++ = _mm_cvtsi128_si32(sum0);
   1.578 +        }
   1.579 +    }
   1.580 +}
   1.581 +
   1.582 +/*
   1.583 + * Similar to S32_generic_D32_filter_DX_SSSE3, we do not need to handle the
   1.584 + * special case suby == 0 as suby is changing in every loop.
   1.585 + */
   1.586 +template<bool has_alpha>
   1.587 +void S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
   1.588 +                                       const uint32_t* xy,
   1.589 +                                       int count, uint32_t* colors) {
   1.590 +    SkASSERT(count > 0 && colors != NULL);
   1.591 +    SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
   1.592 +    SkASSERT(s.fBitmap->config() == SkBitmap::kARGB_8888_Config);
   1.593 +    if (has_alpha) {
   1.594 +        SkASSERT(s.fAlphaScale < 256);
   1.595 +    } else {
   1.596 +        SkASSERT(s.fAlphaScale == 256);
   1.597 +    }
   1.598 +
   1.599 +    const uint8_t* src_addr =
   1.600 +                        static_cast<const uint8_t*>(s.fBitmap->getPixels());
   1.601 +    const size_t rb = s.fBitmap->rowBytes();
   1.602 +
   1.603 +    // vector constants
   1.604 +    const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
   1.605 +                                                  8,  8,  8,  8,
   1.606 +                                                  4,  4,  4,  4,
   1.607 +                                                  0,  0,  0,  0);
   1.608 +    const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
   1.609 +    const __m128i mask_000F = _mm_set1_epi32(0x000F);
   1.610 +    const __m128i sixteen_8bit = _mm_set1_epi8(16);
   1.611 +
   1.612 +    __m128i alpha;
   1.613 +    if (has_alpha) {
   1.614 +        // 8x(alpha)
   1.615 +        alpha = _mm_set1_epi16(s.fAlphaScale);
   1.616 +    }
   1.617 +
   1.618 +    // Unroll 2x, interleave bytes, use pmaddubsw (all_x is small)
   1.619 +    while (count >= 2) {
   1.620 +        int xy0[4];
   1.621 +        int xy1[4];
   1.622 +        __m128i all_xy, sixteen_minus_xy;
   1.623 +        PrepareConstantsTwoPixelPairsDXDY(xy, mask_3FFF, mask_000F,
   1.624 +                                          sixteen_8bit, mask_dist_select,
   1.625 +                                         &all_xy, &sixteen_minus_xy, xy0, xy1);
   1.626 +
   1.627 +        // (4x(x1, 16-x1), 4x(x0, 16-x0))
   1.628 +        __m128i scale_x = _mm_unpacklo_epi8(sixteen_minus_xy, all_xy);
   1.629 +        // (4x(0, y1), 4x(0, y0))
   1.630 +        __m128i all_y = _mm_unpackhi_epi8(all_xy, _mm_setzero_si128());
   1.631 +        __m128i neg_y = _mm_sub_epi16(_mm_set1_epi16(16), all_y);
   1.632 +
   1.633 +        const uint32_t* row00 =
   1.634 +                    reinterpret_cast<const uint32_t*>(src_addr + xy0[2] * rb);
   1.635 +        const uint32_t* row01 =
   1.636 +                    reinterpret_cast<const uint32_t*>(src_addr + xy1[2] * rb);
   1.637 +        const uint32_t* row10 =
   1.638 +                    reinterpret_cast<const uint32_t*>(src_addr + xy0[3] * rb);
   1.639 +        const uint32_t* row11 =
   1.640 +                    reinterpret_cast<const uint32_t*>(src_addr + xy1[3] * rb);
   1.641 +
   1.642 +        __m128i sum0 = ProcessTwoPixelPairsDXDY<has_alpha>(
   1.643 +                                        row00, row01, row10, row11, xy0, xy1,
   1.644 +                                        scale_x, all_y, neg_y, alpha);
   1.645 +
   1.646 +        // Pack lower 4 16 bit values of sum into lower 4 bytes.
   1.647 +        sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
   1.648 +
   1.649 +        // Extract low int and store.
   1.650 +        _mm_storel_epi64(reinterpret_cast<__m128i *>(colors), sum0);
   1.651 +
   1.652 +        xy += 4;
   1.653 +        colors += 2;
   1.654 +        count -= 2;
   1.655 +    }
   1.656 +
   1.657 +    // Handle the remainder
   1.658 +    while (count-- > 0) {
   1.659 +        uint32_t data = *xy++;
   1.660 +        unsigned y0 = data >> 14;
   1.661 +        unsigned y1 = data & 0x3FFF;
   1.662 +        unsigned subY = y0 & 0xF;
   1.663 +        y0 >>= 4;
   1.664 +
   1.665 +        data = *xy++;
   1.666 +        unsigned x0 = data >> 14;
   1.667 +        unsigned x1 = data & 0x3FFF;
   1.668 +        unsigned subX = x0 & 0xF;
   1.669 +        x0 >>= 4;
   1.670 +
   1.671 +        const uint32_t* row0 =
   1.672 +                        reinterpret_cast<const uint32_t*>(src_addr + y0 * rb);
   1.673 +        const uint32_t* row1 =
   1.674 +                        reinterpret_cast<const uint32_t*>(src_addr + y1 * rb);
   1.675 +
   1.676 +        // 16x(x)
   1.677 +        const __m128i all_x = _mm_set1_epi8(subX);
   1.678 +
   1.679 +        // 16x (16-x)
   1.680 +        __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
   1.681 +
   1.682 +        // (8x (x, 16-x))
   1.683 +        scale_x = _mm_unpacklo_epi8(scale_x, all_x);
   1.684 +
   1.685 +        // 8x(16)
   1.686 +        const __m128i sixteen_16bit = _mm_set1_epi16(16);
   1.687 +
   1.688 +        // 8x (y)
   1.689 +        const __m128i all_y = _mm_set1_epi16(subY);
   1.690 +
   1.691 +        // 8x (16-y)
   1.692 +        const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
   1.693 +
   1.694 +        // first row.
   1.695 +        __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
   1.696 +        // second row.
   1.697 +        __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
   1.698 +
   1.699 +        // Add both rows for full sample
   1.700 +        sum0 = _mm_add_epi16(sum0, sum1);
   1.701 +
   1.702 +        sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
   1.703 +
   1.704 +        // Pack lower 4 16 bit values of sum into lower 4 bytes.
   1.705 +        sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
   1.706 +
   1.707 +        // Extract low int and store.
   1.708 +        *colors++ = _mm_cvtsi128_si32(sum0);
   1.709 +    }
   1.710 +}
   1.711 +}  // namepace
   1.712 +
   1.713 +void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
   1.714 +                                    const uint32_t* xy,
   1.715 +                                    int count, uint32_t* colors) {
   1.716 +    S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors);
   1.717 +}
   1.718 +
   1.719 +void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
   1.720 +                                   const uint32_t* xy,
   1.721 +                                   int count, uint32_t* colors) {
   1.722 +    S32_generic_D32_filter_DX_SSSE3<true>(s, xy, count, colors);
   1.723 +}
   1.724 +
   1.725 +void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
   1.726 +                                    const uint32_t* xy,
   1.727 +                                    int count, uint32_t* colors) {
   1.728 +    S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors);
   1.729 +}
   1.730 +
   1.731 +void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
   1.732 +                                   const uint32_t* xy,
   1.733 +                                   int count, uint32_t* colors) {
   1.734 +    S32_generic_D32_filter_DXDY_SSSE3<true>(s, xy, count, colors);
   1.735 +}
   1.736 +
   1.737 +#else // !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
   1.738 +
   1.739 +void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
   1.740 +                                    const uint32_t* xy,
   1.741 +                                    int count, uint32_t* colors) {
   1.742 +    sk_throw();
   1.743 +}
   1.744 +
   1.745 +void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
   1.746 +                                   const uint32_t* xy,
   1.747 +                                   int count, uint32_t* colors) {
   1.748 +    sk_throw();
   1.749 +}
   1.750 +
   1.751 +void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
   1.752 +                                    const uint32_t* xy,
   1.753 +                                    int count, uint32_t* colors) {
   1.754 +    sk_throw();
   1.755 +}
   1.756 +
   1.757 +void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
   1.758 +                                   const uint32_t* xy,
   1.759 +                                   int count, uint32_t* colors) {
   1.760 +    sk_throw();
   1.761 +}
   1.762 +
   1.763 +#endif

mercurial