michael@0: /* michael@0: * Copyright (c) 2012 The WebM project authors. All Rights Reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include "vp8/encoder/denoising.h" michael@0: #include "vp8/common/reconinter.h" michael@0: #include "vpx/vpx_integer.h" michael@0: #include "vpx_mem/vpx_mem.h" michael@0: #include "vp8_rtcd.h" michael@0: michael@0: #include michael@0: #include "vpx_ports/emmintrin_compat.h" michael@0: michael@0: union sum_union { michael@0: __m128i v; michael@0: signed char e[16]; michael@0: }; michael@0: michael@0: int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg, michael@0: YV12_BUFFER_CONFIG *running_avg, michael@0: MACROBLOCK *signal, unsigned int motion_magnitude, michael@0: int y_offset, int uv_offset) michael@0: { michael@0: unsigned char *sig = signal->thismb; michael@0: int sig_stride = 16; michael@0: unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset; michael@0: int mc_avg_y_stride = mc_running_avg->y_stride; michael@0: unsigned char *running_avg_y = running_avg->y_buffer + y_offset; michael@0: int avg_y_stride = running_avg->y_stride; michael@0: int r; michael@0: __m128i acc_diff = _mm_setzero_si128(); michael@0: const __m128i k_0 = _mm_setzero_si128(); michael@0: const __m128i k_4 = _mm_set1_epi8(4); michael@0: const __m128i k_8 = _mm_set1_epi8(8); michael@0: const __m128i k_16 = _mm_set1_epi8(16); michael@0: /* Modify each level's adjustment according to motion_magnitude. */ michael@0: const __m128i l3 = _mm_set1_epi8( michael@0: (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 : 6); michael@0: /* Difference between level 3 and level 2 is 2. */ michael@0: const __m128i l32 = _mm_set1_epi8(2); michael@0: /* Difference between level 2 and level 1 is 1. */ michael@0: const __m128i l21 = _mm_set1_epi8(1); michael@0: michael@0: for (r = 0; r < 16; ++r) michael@0: { michael@0: /* Calculate differences */ michael@0: const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0])); michael@0: const __m128i v_mc_running_avg_y = _mm_loadu_si128( michael@0: (__m128i *)(&mc_running_avg_y[0])); michael@0: __m128i v_running_avg_y; michael@0: const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); michael@0: const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); michael@0: /* Obtain the sign. FF if diff is negative. */ michael@0: const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); michael@0: /* Clamp absolute difference to 16 to be used to get mask. Doing this michael@0: * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */ michael@0: const __m128i clamped_absdiff = _mm_min_epu8( michael@0: _mm_or_si128(pdiff, ndiff), k_16); michael@0: /* Get masks for l2 l1 and l0 adjustments */ michael@0: const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff); michael@0: const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff); michael@0: const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff); michael@0: /* Get adjustments for l2, l1, and l0 */ michael@0: __m128i adj2 = _mm_and_si128(mask2, l32); michael@0: const __m128i adj1 = _mm_and_si128(mask1, l21); michael@0: const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); michael@0: __m128i adj, padj, nadj; michael@0: michael@0: /* Combine the adjustments and get absolute adjustments. */ michael@0: adj2 = _mm_add_epi8(adj2, adj1); michael@0: adj = _mm_sub_epi8(l3, adj2); michael@0: adj = _mm_andnot_si128(mask0, adj); michael@0: adj = _mm_or_si128(adj, adj0); michael@0: michael@0: /* Restore the sign and get positive and negative adjustments. */ michael@0: padj = _mm_andnot_si128(diff_sign, adj); michael@0: nadj = _mm_and_si128(diff_sign, adj); michael@0: michael@0: /* Calculate filtered value. */ michael@0: v_running_avg_y = _mm_adds_epu8(v_sig, padj); michael@0: v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj); michael@0: _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); michael@0: michael@0: /* Adjustments <=7, and each element in acc_diff can fit in signed michael@0: * char. michael@0: */ michael@0: acc_diff = _mm_adds_epi8(acc_diff, padj); michael@0: acc_diff = _mm_subs_epi8(acc_diff, nadj); michael@0: michael@0: /* Update pointers for next iteration. */ michael@0: sig += sig_stride; michael@0: mc_running_avg_y += mc_avg_y_stride; michael@0: running_avg_y += avg_y_stride; michael@0: } michael@0: michael@0: { michael@0: /* Compute the sum of all pixel differences of this MB. */ michael@0: union sum_union s; michael@0: int sum_diff = 0; michael@0: s.v = acc_diff; michael@0: sum_diff = s.e[0] + s.e[1] + s.e[2] + s.e[3] + s.e[4] + s.e[5] michael@0: + s.e[6] + s.e[7] + s.e[8] + s.e[9] + s.e[10] + s.e[11] michael@0: + s.e[12] + s.e[13] + s.e[14] + s.e[15]; michael@0: michael@0: if (abs(sum_diff) > SUM_DIFF_THRESHOLD) michael@0: { michael@0: return COPY_BLOCK; michael@0: } michael@0: } michael@0: michael@0: vp8_copy_mem16x16(running_avg->y_buffer + y_offset, avg_y_stride, michael@0: signal->thismb, sig_stride); michael@0: return FILTER_BLOCK; michael@0: }