|
1 /*********************************************************************** |
|
2 Copyright (c) 2006-2011, Skype Limited. All rights reserved. |
|
3 Redistribution and use in source and binary forms, with or without |
|
4 modification, are permitted provided that the following conditions |
|
5 are met: |
|
6 - Redistributions of source code must retain the above copyright notice, |
|
7 this list of conditions and the following disclaimer. |
|
8 - Redistributions in binary form must reproduce the above copyright |
|
9 notice, this list of conditions and the following disclaimer in the |
|
10 documentation and/or other materials provided with the distribution. |
|
11 - Neither the name of Internet Society, IETF or IETF Trust, nor the |
|
12 names of specific contributors, may be used to endorse or promote |
|
13 products derived from this software without specific prior written |
|
14 permission. |
|
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
|
16 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
17 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
18 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
|
19 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
20 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
|
22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
|
23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
25 POSSIBILITY OF SUCH DAMAGE. |
|
26 ***********************************************************************/ |
|
27 |
|
28 #ifdef HAVE_CONFIG_H |
|
29 #include "config.h" |
|
30 #endif |
|
31 |
|
32 #include "main_FIX.h" |
|
33 #include "tuning_parameters.h" |
|
34 |
|
35 /* Head room for correlations */ |
|
36 #define LTP_CORRS_HEAD_ROOM 2 |
|
37 |
|
38 void silk_fit_LTP( |
|
39 opus_int32 LTP_coefs_Q16[ LTP_ORDER ], |
|
40 opus_int16 LTP_coefs_Q14[ LTP_ORDER ] |
|
41 ); |
|
42 |
|
43 void silk_find_LTP_FIX( |
|
44 opus_int16 b_Q14[ MAX_NB_SUBFR * LTP_ORDER ], /* O LTP coefs */ |
|
45 opus_int32 WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ |
|
46 opus_int *LTPredCodGain_Q7, /* O LTP coding gain */ |
|
47 const opus_int16 r_lpc[], /* I residual signal after LPC signal + state for first 10 ms */ |
|
48 const opus_int lag[ MAX_NB_SUBFR ], /* I LTP lags */ |
|
49 const opus_int32 Wght_Q15[ MAX_NB_SUBFR ], /* I weights */ |
|
50 const opus_int subfr_length, /* I subframe length */ |
|
51 const opus_int nb_subfr, /* I number of subframes */ |
|
52 const opus_int mem_offset, /* I number of samples in LTP memory */ |
|
53 opus_int corr_rshifts[ MAX_NB_SUBFR ] /* O right shifts applied to correlations */ |
|
54 ) |
|
55 { |
|
56 opus_int i, k, lshift; |
|
57 const opus_int16 *r_ptr, *lag_ptr; |
|
58 opus_int16 *b_Q14_ptr; |
|
59 |
|
60 opus_int32 regu; |
|
61 opus_int32 *WLTP_ptr; |
|
62 opus_int32 b_Q16[ LTP_ORDER ], delta_b_Q14[ LTP_ORDER ], d_Q14[ MAX_NB_SUBFR ], nrg[ MAX_NB_SUBFR ], g_Q26; |
|
63 opus_int32 w[ MAX_NB_SUBFR ], WLTP_max, max_abs_d_Q14, max_w_bits; |
|
64 |
|
65 opus_int32 temp32, denom32; |
|
66 opus_int extra_shifts; |
|
67 opus_int rr_shifts, maxRshifts, maxRshifts_wxtra, LZs; |
|
68 opus_int32 LPC_res_nrg, LPC_LTP_res_nrg, div_Q16; |
|
69 opus_int32 Rr[ LTP_ORDER ], rr[ MAX_NB_SUBFR ]; |
|
70 opus_int32 wd, m_Q12; |
|
71 |
|
72 b_Q14_ptr = b_Q14; |
|
73 WLTP_ptr = WLTP; |
|
74 r_ptr = &r_lpc[ mem_offset ]; |
|
75 for( k = 0; k < nb_subfr; k++ ) { |
|
76 lag_ptr = r_ptr - ( lag[ k ] + LTP_ORDER / 2 ); |
|
77 |
|
78 silk_sum_sqr_shift( &rr[ k ], &rr_shifts, r_ptr, subfr_length ); /* rr[ k ] in Q( -rr_shifts ) */ |
|
79 |
|
80 /* Assure headroom */ |
|
81 LZs = silk_CLZ32( rr[k] ); |
|
82 if( LZs < LTP_CORRS_HEAD_ROOM ) { |
|
83 rr[ k ] = silk_RSHIFT_ROUND( rr[ k ], LTP_CORRS_HEAD_ROOM - LZs ); |
|
84 rr_shifts += ( LTP_CORRS_HEAD_ROOM - LZs ); |
|
85 } |
|
86 corr_rshifts[ k ] = rr_shifts; |
|
87 silk_corrMatrix_FIX( lag_ptr, subfr_length, LTP_ORDER, LTP_CORRS_HEAD_ROOM, WLTP_ptr, &corr_rshifts[ k ] ); /* WLTP_fix_ptr in Q( -corr_rshifts[ k ] ) */ |
|
88 |
|
89 /* The correlation vector always has lower max abs value than rr and/or RR so head room is assured */ |
|
90 silk_corrVector_FIX( lag_ptr, r_ptr, subfr_length, LTP_ORDER, Rr, corr_rshifts[ k ] ); /* Rr_fix_ptr in Q( -corr_rshifts[ k ] ) */ |
|
91 if( corr_rshifts[ k ] > rr_shifts ) { |
|
92 rr[ k ] = silk_RSHIFT( rr[ k ], corr_rshifts[ k ] - rr_shifts ); /* rr[ k ] in Q( -corr_rshifts[ k ] ) */ |
|
93 } |
|
94 silk_assert( rr[ k ] >= 0 ); |
|
95 |
|
96 regu = 1; |
|
97 regu = silk_SMLAWB( regu, rr[ k ], SILK_FIX_CONST( LTP_DAMPING/3, 16 ) ); |
|
98 regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, 0, 0, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) ); |
|
99 regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, LTP_ORDER-1, LTP_ORDER-1, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) ); |
|
100 silk_regularize_correlations_FIX( WLTP_ptr, &rr[k], regu, LTP_ORDER ); |
|
101 |
|
102 silk_solve_LDL_FIX( WLTP_ptr, LTP_ORDER, Rr, b_Q16 ); /* WLTP_fix_ptr and Rr_fix_ptr both in Q(-corr_rshifts[k]) */ |
|
103 |
|
104 /* Limit and store in Q14 */ |
|
105 silk_fit_LTP( b_Q16, b_Q14_ptr ); |
|
106 |
|
107 /* Calculate residual energy */ |
|
108 nrg[ k ] = silk_residual_energy16_covar_FIX( b_Q14_ptr, WLTP_ptr, Rr, rr[ k ], LTP_ORDER, 14 ); /* nrg_fix in Q( -corr_rshifts[ k ] ) */ |
|
109 |
|
110 /* temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); */ |
|
111 extra_shifts = silk_min_int( corr_rshifts[ k ], LTP_CORRS_HEAD_ROOM ); |
|
112 denom32 = silk_LSHIFT_SAT32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 + extra_shifts ) + /* Q( -corr_rshifts[ k ] + extra_shifts ) */ |
|
113 silk_RSHIFT( silk_SMULWB( (opus_int32)subfr_length, 655 ), corr_rshifts[ k ] - extra_shifts ); /* Q( -corr_rshifts[ k ] + extra_shifts ) */ |
|
114 denom32 = silk_max( denom32, 1 ); |
|
115 silk_assert( ((opus_int64)Wght_Q15[ k ] << 16 ) < silk_int32_MAX ); /* Wght always < 0.5 in Q0 */ |
|
116 temp32 = silk_DIV32( silk_LSHIFT( (opus_int32)Wght_Q15[ k ], 16 ), denom32 ); /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */ |
|
117 temp32 = silk_RSHIFT( temp32, 31 + corr_rshifts[ k ] - extra_shifts - 26 ); /* Q26 */ |
|
118 |
|
119 /* Limit temp such that the below scaling never wraps around */ |
|
120 WLTP_max = 0; |
|
121 for( i = 0; i < LTP_ORDER * LTP_ORDER; i++ ) { |
|
122 WLTP_max = silk_max( WLTP_ptr[ i ], WLTP_max ); |
|
123 } |
|
124 lshift = silk_CLZ32( WLTP_max ) - 1 - 3; /* keep 3 bits free for vq_nearest_neighbor_fix */ |
|
125 silk_assert( 26 - 18 + lshift >= 0 ); |
|
126 if( 26 - 18 + lshift < 31 ) { |
|
127 temp32 = silk_min_32( temp32, silk_LSHIFT( (opus_int32)1, 26 - 18 + lshift ) ); |
|
128 } |
|
129 |
|
130 silk_scale_vector32_Q26_lshift_18( WLTP_ptr, temp32, LTP_ORDER * LTP_ORDER ); /* WLTP_ptr in Q( 18 - corr_rshifts[ k ] ) */ |
|
131 |
|
132 w[ k ] = matrix_ptr( WLTP_ptr, LTP_ORDER/2, LTP_ORDER/2, LTP_ORDER ); /* w in Q( 18 - corr_rshifts[ k ] ) */ |
|
133 silk_assert( w[k] >= 0 ); |
|
134 |
|
135 r_ptr += subfr_length; |
|
136 b_Q14_ptr += LTP_ORDER; |
|
137 WLTP_ptr += LTP_ORDER * LTP_ORDER; |
|
138 } |
|
139 |
|
140 maxRshifts = 0; |
|
141 for( k = 0; k < nb_subfr; k++ ) { |
|
142 maxRshifts = silk_max_int( corr_rshifts[ k ], maxRshifts ); |
|
143 } |
|
144 |
|
145 /* Compute LTP coding gain */ |
|
146 if( LTPredCodGain_Q7 != NULL ) { |
|
147 LPC_LTP_res_nrg = 0; |
|
148 LPC_res_nrg = 0; |
|
149 silk_assert( LTP_CORRS_HEAD_ROOM >= 2 ); /* Check that no overflow will happen when adding */ |
|
150 for( k = 0; k < nb_subfr; k++ ) { |
|
151 LPC_res_nrg = silk_ADD32( LPC_res_nrg, silk_RSHIFT( silk_ADD32( silk_SMULWB( rr[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /* Q( -maxRshifts ) */ |
|
152 LPC_LTP_res_nrg = silk_ADD32( LPC_LTP_res_nrg, silk_RSHIFT( silk_ADD32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /* Q( -maxRshifts ) */ |
|
153 } |
|
154 LPC_LTP_res_nrg = silk_max( LPC_LTP_res_nrg, 1 ); /* avoid division by zero */ |
|
155 |
|
156 div_Q16 = silk_DIV32_varQ( LPC_res_nrg, LPC_LTP_res_nrg, 16 ); |
|
157 *LTPredCodGain_Q7 = ( opus_int )silk_SMULBB( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) ); |
|
158 |
|
159 silk_assert( *LTPredCodGain_Q7 == ( opus_int )silk_SAT16( silk_MUL( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) ) ) ); |
|
160 } |
|
161 |
|
162 /* smoothing */ |
|
163 /* d = sum( B, 1 ); */ |
|
164 b_Q14_ptr = b_Q14; |
|
165 for( k = 0; k < nb_subfr; k++ ) { |
|
166 d_Q14[ k ] = 0; |
|
167 for( i = 0; i < LTP_ORDER; i++ ) { |
|
168 d_Q14[ k ] += b_Q14_ptr[ i ]; |
|
169 } |
|
170 b_Q14_ptr += LTP_ORDER; |
|
171 } |
|
172 |
|
173 /* m = ( w * d' ) / ( sum( w ) + 1e-3 ); */ |
|
174 |
|
175 /* Find maximum absolute value of d_Q14 and the bits used by w in Q0 */ |
|
176 max_abs_d_Q14 = 0; |
|
177 max_w_bits = 0; |
|
178 for( k = 0; k < nb_subfr; k++ ) { |
|
179 max_abs_d_Q14 = silk_max_32( max_abs_d_Q14, silk_abs( d_Q14[ k ] ) ); |
|
180 /* w[ k ] is in Q( 18 - corr_rshifts[ k ] ) */ |
|
181 /* Find bits needed in Q( 18 - maxRshifts ) */ |
|
182 max_w_bits = silk_max_32( max_w_bits, 32 - silk_CLZ32( w[ k ] ) + corr_rshifts[ k ] - maxRshifts ); |
|
183 } |
|
184 |
|
185 /* max_abs_d_Q14 = (5 << 15); worst case, i.e. LTP_ORDER * -silk_int16_MIN */ |
|
186 silk_assert( max_abs_d_Q14 <= ( 5 << 15 ) ); |
|
187 |
|
188 /* How many bits is needed for w*d' in Q( 18 - maxRshifts ) in the worst case, of all d_Q14's being equal to max_abs_d_Q14 */ |
|
189 extra_shifts = max_w_bits + 32 - silk_CLZ32( max_abs_d_Q14 ) - 14; |
|
190 |
|
191 /* Subtract what we got available; bits in output var plus maxRshifts */ |
|
192 extra_shifts -= ( 32 - 1 - 2 + maxRshifts ); /* Keep sign bit free as well as 2 bits for accumulation */ |
|
193 extra_shifts = silk_max_int( extra_shifts, 0 ); |
|
194 |
|
195 maxRshifts_wxtra = maxRshifts + extra_shifts; |
|
196 |
|
197 temp32 = silk_RSHIFT( 262, maxRshifts + extra_shifts ) + 1; /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */ |
|
198 wd = 0; |
|
199 for( k = 0; k < nb_subfr; k++ ) { |
|
200 /* w has at least 2 bits of headroom so no overflow should happen */ |
|
201 temp32 = silk_ADD32( temp32, silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ) ); /* Q( 18 - maxRshifts_wxtra ) */ |
|
202 wd = silk_ADD32( wd, silk_LSHIFT( silk_SMULWW( silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ), d_Q14[ k ] ), 2 ) ); /* Q( 18 - maxRshifts_wxtra ) */ |
|
203 } |
|
204 m_Q12 = silk_DIV32_varQ( wd, temp32, 12 ); |
|
205 |
|
206 b_Q14_ptr = b_Q14; |
|
207 for( k = 0; k < nb_subfr; k++ ) { |
|
208 /* w_fix[ k ] from Q( 18 - corr_rshifts[ k ] ) to Q( 16 ) */ |
|
209 if( 2 - corr_rshifts[k] > 0 ) { |
|
210 temp32 = silk_RSHIFT( w[ k ], 2 - corr_rshifts[ k ] ); |
|
211 } else { |
|
212 temp32 = silk_LSHIFT_SAT32( w[ k ], corr_rshifts[ k ] - 2 ); |
|
213 } |
|
214 |
|
215 g_Q26 = silk_MUL( |
|
216 silk_DIV32( |
|
217 SILK_FIX_CONST( LTP_SMOOTHING, 26 ), |
|
218 silk_RSHIFT( SILK_FIX_CONST( LTP_SMOOTHING, 26 ), 10 ) + temp32 ), /* Q10 */ |
|
219 silk_LSHIFT_SAT32( silk_SUB_SAT32( (opus_int32)m_Q12, silk_RSHIFT( d_Q14[ k ], 2 ) ), 4 ) ); /* Q16 */ |
|
220 |
|
221 temp32 = 0; |
|
222 for( i = 0; i < LTP_ORDER; i++ ) { |
|
223 delta_b_Q14[ i ] = silk_max_16( b_Q14_ptr[ i ], 1638 ); /* 1638_Q14 = 0.1_Q0 */ |
|
224 temp32 += delta_b_Q14[ i ]; /* Q14 */ |
|
225 } |
|
226 temp32 = silk_DIV32( g_Q26, temp32 ); /* Q14 -> Q12 */ |
|
227 for( i = 0; i < LTP_ORDER; i++ ) { |
|
228 b_Q14_ptr[ i ] = silk_LIMIT_32( (opus_int32)b_Q14_ptr[ i ] + silk_SMULWB( silk_LSHIFT_SAT32( temp32, 4 ), delta_b_Q14[ i ] ), -16000, 28000 ); |
|
229 } |
|
230 b_Q14_ptr += LTP_ORDER; |
|
231 } |
|
232 } |
|
233 |
|
234 void silk_fit_LTP( |
|
235 opus_int32 LTP_coefs_Q16[ LTP_ORDER ], |
|
236 opus_int16 LTP_coefs_Q14[ LTP_ORDER ] |
|
237 ) |
|
238 { |
|
239 opus_int i; |
|
240 |
|
241 for( i = 0; i < LTP_ORDER; i++ ) { |
|
242 LTP_coefs_Q14[ i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( LTP_coefs_Q16[ i ], 2 ) ); |
|
243 } |
|
244 } |