|
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "ConvolverNode.h" |
|
8 #include "mozilla/dom/ConvolverNodeBinding.h" |
|
9 #include "AudioNodeEngine.h" |
|
10 #include "AudioNodeStream.h" |
|
11 #include "blink/Reverb.h" |
|
12 #include "PlayingRefChangeHandler.h" |
|
13 |
|
14 namespace mozilla { |
|
15 namespace dom { |
|
16 |
|
17 NS_IMPL_CYCLE_COLLECTION_INHERITED(ConvolverNode, AudioNode, mBuffer) |
|
18 |
|
19 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(ConvolverNode) |
|
20 NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
|
21 |
|
22 NS_IMPL_ADDREF_INHERITED(ConvolverNode, AudioNode) |
|
23 NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode) |
|
24 |
|
25 class ConvolverNodeEngine : public AudioNodeEngine |
|
26 { |
|
27 typedef PlayingRefChangeHandler PlayingRefChanged; |
|
28 public: |
|
29 ConvolverNodeEngine(AudioNode* aNode, bool aNormalize) |
|
30 : AudioNodeEngine(aNode) |
|
31 , mBufferLength(0) |
|
32 , mLeftOverData(INT32_MIN) |
|
33 , mSampleRate(0.0f) |
|
34 , mUseBackgroundThreads(!aNode->Context()->IsOffline()) |
|
35 , mNormalize(aNormalize) |
|
36 { |
|
37 } |
|
38 |
|
39 enum Parameters { |
|
40 BUFFER_LENGTH, |
|
41 SAMPLE_RATE, |
|
42 NORMALIZE |
|
43 }; |
|
44 virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) MOZ_OVERRIDE |
|
45 { |
|
46 switch (aIndex) { |
|
47 case BUFFER_LENGTH: |
|
48 // BUFFER_LENGTH is the first parameter that we set when setting a new buffer, |
|
49 // so we should be careful to invalidate the rest of our state here. |
|
50 mBuffer = nullptr; |
|
51 mSampleRate = 0.0f; |
|
52 mBufferLength = aParam; |
|
53 mLeftOverData = INT32_MIN; |
|
54 break; |
|
55 case SAMPLE_RATE: |
|
56 mSampleRate = aParam; |
|
57 break; |
|
58 case NORMALIZE: |
|
59 mNormalize = !!aParam; |
|
60 break; |
|
61 default: |
|
62 NS_ERROR("Bad ConvolverNodeEngine Int32Parameter"); |
|
63 } |
|
64 } |
|
65 virtual void SetDoubleParameter(uint32_t aIndex, double aParam) MOZ_OVERRIDE |
|
66 { |
|
67 switch (aIndex) { |
|
68 case SAMPLE_RATE: |
|
69 mSampleRate = aParam; |
|
70 AdjustReverb(); |
|
71 break; |
|
72 default: |
|
73 NS_ERROR("Bad ConvolverNodeEngine DoubleParameter"); |
|
74 } |
|
75 } |
|
76 virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) |
|
77 { |
|
78 mBuffer = aBuffer; |
|
79 AdjustReverb(); |
|
80 } |
|
81 |
|
82 void AdjustReverb() |
|
83 { |
|
84 // Note about empirical tuning (this is copied from Blink) |
|
85 // The maximum FFT size affects reverb performance and accuracy. |
|
86 // If the reverb is single-threaded and processes entirely in the real-time audio thread, |
|
87 // it's important not to make this too high. In this case 8192 is a good value. |
|
88 // But, the Reverb object is multi-threaded, so we want this as high as possible without losing too much accuracy. |
|
89 // Very large FFTs will have worse phase errors. Given these constraints 32768 is a good compromise. |
|
90 const size_t MaxFFTSize = 32768; |
|
91 |
|
92 if (!mBuffer || !mBufferLength || !mSampleRate) { |
|
93 mReverb = nullptr; |
|
94 mLeftOverData = INT32_MIN; |
|
95 return; |
|
96 } |
|
97 |
|
98 mReverb = new WebCore::Reverb(mBuffer, mBufferLength, |
|
99 WEBAUDIO_BLOCK_SIZE, |
|
100 MaxFFTSize, 2, mUseBackgroundThreads, |
|
101 mNormalize, mSampleRate); |
|
102 } |
|
103 |
|
104 virtual void ProcessBlock(AudioNodeStream* aStream, |
|
105 const AudioChunk& aInput, |
|
106 AudioChunk* aOutput, |
|
107 bool* aFinished) |
|
108 { |
|
109 if (!mReverb) { |
|
110 *aOutput = aInput; |
|
111 return; |
|
112 } |
|
113 |
|
114 AudioChunk input = aInput; |
|
115 if (aInput.IsNull()) { |
|
116 if (mLeftOverData > 0) { |
|
117 mLeftOverData -= WEBAUDIO_BLOCK_SIZE; |
|
118 AllocateAudioBlock(1, &input); |
|
119 WriteZeroesToAudioBlock(&input, 0, WEBAUDIO_BLOCK_SIZE); |
|
120 } else { |
|
121 if (mLeftOverData != INT32_MIN) { |
|
122 mLeftOverData = INT32_MIN; |
|
123 nsRefPtr<PlayingRefChanged> refchanged = |
|
124 new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE); |
|
125 aStream->Graph()-> |
|
126 DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); |
|
127 } |
|
128 aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
|
129 return; |
|
130 } |
|
131 } else { |
|
132 if (aInput.mVolume != 1.0f) { |
|
133 // Pre-multiply the input's volume |
|
134 uint32_t numChannels = aInput.mChannelData.Length(); |
|
135 AllocateAudioBlock(numChannels, &input); |
|
136 for (uint32_t i = 0; i < numChannels; ++i) { |
|
137 const float* src = static_cast<const float*>(aInput.mChannelData[i]); |
|
138 float* dest = static_cast<float*>(const_cast<void*>(input.mChannelData[i])); |
|
139 AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest); |
|
140 } |
|
141 } |
|
142 |
|
143 if (mLeftOverData <= 0) { |
|
144 nsRefPtr<PlayingRefChanged> refchanged = |
|
145 new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF); |
|
146 aStream->Graph()-> |
|
147 DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); |
|
148 } |
|
149 mLeftOverData = mBufferLength; |
|
150 MOZ_ASSERT(mLeftOverData > 0); |
|
151 } |
|
152 AllocateAudioBlock(2, aOutput); |
|
153 |
|
154 mReverb->process(&input, aOutput, WEBAUDIO_BLOCK_SIZE); |
|
155 } |
|
156 |
|
157 virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
158 { |
|
159 size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
|
160 if (mBuffer && !mBuffer->IsShared()) { |
|
161 amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); |
|
162 } |
|
163 |
|
164 if (mReverb) { |
|
165 amount += mReverb->sizeOfIncludingThis(aMallocSizeOf); |
|
166 } |
|
167 |
|
168 return amount; |
|
169 } |
|
170 |
|
171 virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
172 { |
|
173 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
174 } |
|
175 |
|
176 private: |
|
177 nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer; |
|
178 nsAutoPtr<WebCore::Reverb> mReverb; |
|
179 int32_t mBufferLength; |
|
180 int32_t mLeftOverData; |
|
181 float mSampleRate; |
|
182 bool mUseBackgroundThreads; |
|
183 bool mNormalize; |
|
184 }; |
|
185 |
|
186 ConvolverNode::ConvolverNode(AudioContext* aContext) |
|
187 : AudioNode(aContext, |
|
188 2, |
|
189 ChannelCountMode::Clamped_max, |
|
190 ChannelInterpretation::Speakers) |
|
191 , mNormalize(true) |
|
192 { |
|
193 ConvolverNodeEngine* engine = new ConvolverNodeEngine(this, mNormalize); |
|
194 mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM); |
|
195 } |
|
196 |
|
197 size_t |
|
198 ConvolverNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
|
199 { |
|
200 size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
|
201 if (mBuffer) { |
|
202 // NB: mBuffer might be shared with the associated engine, by convention |
|
203 // the AudioNode will report. |
|
204 amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); |
|
205 } |
|
206 return amount; |
|
207 } |
|
208 |
|
209 size_t |
|
210 ConvolverNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
|
211 { |
|
212 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
213 } |
|
214 |
|
215 JSObject* |
|
216 ConvolverNode::WrapObject(JSContext* aCx) |
|
217 { |
|
218 return ConvolverNodeBinding::Wrap(aCx, this); |
|
219 } |
|
220 |
|
221 void |
|
222 ConvolverNode::SetBuffer(JSContext* aCx, AudioBuffer* aBuffer, ErrorResult& aRv) |
|
223 { |
|
224 if (aBuffer) { |
|
225 switch (aBuffer->NumberOfChannels()) { |
|
226 case 1: |
|
227 case 2: |
|
228 case 4: |
|
229 // Supported number of channels |
|
230 break; |
|
231 default: |
|
232 aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); |
|
233 return; |
|
234 } |
|
235 } |
|
236 |
|
237 mBuffer = aBuffer; |
|
238 |
|
239 // Send the buffer to the stream |
|
240 AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
|
241 MOZ_ASSERT(ns, "Why don't we have a stream here?"); |
|
242 if (mBuffer) { |
|
243 uint32_t length = mBuffer->Length(); |
|
244 nsRefPtr<ThreadSharedFloatArrayBufferList> data = |
|
245 mBuffer->GetThreadSharedChannelsForRate(aCx); |
|
246 if (data && length < WEBAUDIO_BLOCK_SIZE) { |
|
247 // For very small impulse response buffers, we need to pad the |
|
248 // buffer with 0 to make sure that the Reverb implementation |
|
249 // has enough data to compute FFTs from. |
|
250 length = WEBAUDIO_BLOCK_SIZE; |
|
251 nsRefPtr<ThreadSharedFloatArrayBufferList> paddedBuffer = |
|
252 new ThreadSharedFloatArrayBufferList(data->GetChannels()); |
|
253 float* channelData = (float*) malloc(sizeof(float) * length * data->GetChannels()); |
|
254 for (uint32_t i = 0; i < data->GetChannels(); ++i) { |
|
255 PodCopy(channelData + length * i, data->GetData(i), mBuffer->Length()); |
|
256 PodZero(channelData + length * i + mBuffer->Length(), WEBAUDIO_BLOCK_SIZE - mBuffer->Length()); |
|
257 paddedBuffer->SetData(i, (i == 0) ? channelData : nullptr, channelData); |
|
258 } |
|
259 data = paddedBuffer; |
|
260 } |
|
261 SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length); |
|
262 SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE, |
|
263 mBuffer->SampleRate()); |
|
264 ns->SetBuffer(data.forget()); |
|
265 } else { |
|
266 ns->SetBuffer(nullptr); |
|
267 } |
|
268 } |
|
269 |
|
270 void |
|
271 ConvolverNode::SetNormalize(bool aNormalize) |
|
272 { |
|
273 mNormalize = aNormalize; |
|
274 SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize); |
|
275 } |
|
276 |
|
277 } |
|
278 } |
|
279 |