|
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "AudioBufferSourceNode.h" |
|
8 #include "mozilla/dom/AudioBufferSourceNodeBinding.h" |
|
9 #include "mozilla/dom/AudioParam.h" |
|
10 #include "nsMathUtils.h" |
|
11 #include "AudioNodeEngine.h" |
|
12 #include "AudioNodeStream.h" |
|
13 #include "AudioDestinationNode.h" |
|
14 #include "AudioParamTimeline.h" |
|
15 #include "speex/speex_resampler.h" |
|
16 #include <limits> |
|
17 |
|
18 namespace mozilla { |
|
19 namespace dom { |
|
20 |
|
21 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioBufferSourceNode) |
|
22 |
|
23 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioBufferSourceNode) |
|
24 NS_IMPL_CYCLE_COLLECTION_UNLINK(mBuffer) |
|
25 NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlaybackRate) |
|
26 if (tmp->Context()) { |
|
27 // AudioNode's Unlink implementation disconnects us from the graph |
|
28 // too, but we need to do this right here to make sure that |
|
29 // UnregisterAudioBufferSourceNode can properly untangle us from |
|
30 // the possibly connected PannerNodes. |
|
31 tmp->DisconnectFromGraph(); |
|
32 tmp->Context()->UnregisterAudioBufferSourceNode(tmp); |
|
33 } |
|
34 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode) |
|
35 |
|
36 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioBufferSourceNode, AudioNode) |
|
37 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mBuffer) |
|
38 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPlaybackRate) |
|
39 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
|
40 |
|
41 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode) |
|
42 NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
|
43 |
|
44 NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioNode) |
|
45 NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioNode) |
|
46 |
|
47 /** |
|
48 * Media-thread playback engine for AudioBufferSourceNode. |
|
49 * Nothing is played until a non-null buffer has been set (via |
|
50 * AudioNodeStream::SetBuffer) and a non-zero mBufferEnd has been set (via |
|
51 * AudioNodeStream::SetInt32Parameter). |
|
52 */ |
|
53 class AudioBufferSourceNodeEngine : public AudioNodeEngine |
|
54 { |
|
55 public: |
|
56 explicit AudioBufferSourceNodeEngine(AudioNode* aNode, |
|
57 AudioDestinationNode* aDestination) : |
|
58 AudioNodeEngine(aNode), |
|
59 mStart(0.0), mBeginProcessing(0), |
|
60 mStop(TRACK_TICKS_MAX), |
|
61 mResampler(nullptr), mRemainingResamplerTail(0), |
|
62 mBufferEnd(0), |
|
63 mLoopStart(0), mLoopEnd(0), |
|
64 mBufferSampleRate(0), mBufferPosition(0), mChannels(0), |
|
65 mDopplerShift(1.0f), |
|
66 mDestination(static_cast<AudioNodeStream*>(aDestination->Stream())), |
|
67 mPlaybackRateTimeline(1.0f), mLoop(false) |
|
68 {} |
|
69 |
|
70 ~AudioBufferSourceNodeEngine() |
|
71 { |
|
72 if (mResampler) { |
|
73 speex_resampler_destroy(mResampler); |
|
74 } |
|
75 } |
|
76 |
|
77 void SetSourceStream(AudioNodeStream* aSource) |
|
78 { |
|
79 mSource = aSource; |
|
80 } |
|
81 |
|
82 virtual void SetTimelineParameter(uint32_t aIndex, |
|
83 const dom::AudioParamTimeline& aValue, |
|
84 TrackRate aSampleRate) MOZ_OVERRIDE |
|
85 { |
|
86 switch (aIndex) { |
|
87 case AudioBufferSourceNode::PLAYBACKRATE: |
|
88 mPlaybackRateTimeline = aValue; |
|
89 WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, mSource, mDestination); |
|
90 break; |
|
91 default: |
|
92 NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter"); |
|
93 } |
|
94 } |
|
95 virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam) |
|
96 { |
|
97 switch (aIndex) { |
|
98 case AudioBufferSourceNode::STOP: mStop = aParam; break; |
|
99 default: |
|
100 NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter"); |
|
101 } |
|
102 } |
|
103 virtual void SetDoubleParameter(uint32_t aIndex, double aParam) |
|
104 { |
|
105 switch (aIndex) { |
|
106 case AudioBufferSourceNode::START: |
|
107 MOZ_ASSERT(!mStart, "Another START?"); |
|
108 mStart = mSource->TimeFromDestinationTime(mDestination, aParam) * |
|
109 mSource->SampleRate(); |
|
110 // Round to nearest |
|
111 mBeginProcessing = mStart + 0.5; |
|
112 break; |
|
113 case AudioBufferSourceNode::DOPPLERSHIFT: |
|
114 mDopplerShift = aParam > 0 && aParam == aParam ? aParam : 1.0; |
|
115 break; |
|
116 default: |
|
117 NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter."); |
|
118 }; |
|
119 } |
|
120 virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) |
|
121 { |
|
122 switch (aIndex) { |
|
123 case AudioBufferSourceNode::SAMPLE_RATE: mBufferSampleRate = aParam; break; |
|
124 case AudioBufferSourceNode::BUFFERSTART: |
|
125 if (mBufferPosition == 0) { |
|
126 mBufferPosition = aParam; |
|
127 } |
|
128 break; |
|
129 case AudioBufferSourceNode::BUFFEREND: mBufferEnd = aParam; break; |
|
130 case AudioBufferSourceNode::LOOP: mLoop = !!aParam; break; |
|
131 case AudioBufferSourceNode::LOOPSTART: mLoopStart = aParam; break; |
|
132 case AudioBufferSourceNode::LOOPEND: mLoopEnd = aParam; break; |
|
133 default: |
|
134 NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter"); |
|
135 } |
|
136 } |
|
137 virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) |
|
138 { |
|
139 mBuffer = aBuffer; |
|
140 } |
|
141 |
|
142 bool BegunResampling() |
|
143 { |
|
144 return mBeginProcessing == -TRACK_TICKS_MAX; |
|
145 } |
|
146 |
|
147 void UpdateResampler(int32_t aOutRate, uint32_t aChannels) |
|
148 { |
|
149 if (mResampler && |
|
150 (aChannels != mChannels || |
|
151 // If the resampler has begun, then it will have moved |
|
152 // mBufferPosition to after the samples it has read, but it hasn't |
|
153 // output its buffered samples. Keep using the resampler, even if |
|
154 // the rates now match, so that this latent segment is output. |
|
155 (aOutRate == mBufferSampleRate && !BegunResampling()))) { |
|
156 speex_resampler_destroy(mResampler); |
|
157 mResampler = nullptr; |
|
158 mRemainingResamplerTail = 0; |
|
159 mBeginProcessing = mStart + 0.5; |
|
160 } |
|
161 |
|
162 if (aOutRate == mBufferSampleRate && !mResampler) { |
|
163 return; |
|
164 } |
|
165 |
|
166 if (!mResampler) { |
|
167 mChannels = aChannels; |
|
168 mResampler = speex_resampler_init(mChannels, mBufferSampleRate, aOutRate, |
|
169 SPEEX_RESAMPLER_QUALITY_DEFAULT, |
|
170 nullptr); |
|
171 } else { |
|
172 uint32_t currentOutSampleRate, currentInSampleRate; |
|
173 speex_resampler_get_rate(mResampler, ¤tInSampleRate, |
|
174 ¤tOutSampleRate); |
|
175 if (currentOutSampleRate == static_cast<uint32_t>(aOutRate)) { |
|
176 return; |
|
177 } |
|
178 speex_resampler_set_rate(mResampler, currentInSampleRate, aOutRate); |
|
179 } |
|
180 |
|
181 if (!BegunResampling()) { |
|
182 // Low pass filter effects from the resampler mean that samples before |
|
183 // the start time are influenced by resampling the buffer. The input |
|
184 // latency indicates half the filter width. |
|
185 int64_t inputLatency = speex_resampler_get_input_latency(mResampler); |
|
186 uint32_t ratioNum, ratioDen; |
|
187 speex_resampler_get_ratio(mResampler, &ratioNum, &ratioDen); |
|
188 // The output subsample resolution supported in aligning the resampler |
|
189 // is ratioNum. First round the start time to the nearest subsample. |
|
190 int64_t subsample = mStart * ratioNum + 0.5; |
|
191 // Now include the leading effects of the filter, and round *up* to the |
|
192 // next whole tick, because there is no effect on samples outside the |
|
193 // filter width. |
|
194 mBeginProcessing = |
|
195 (subsample - inputLatency * ratioDen + ratioNum - 1) / ratioNum; |
|
196 } |
|
197 } |
|
198 |
|
199 // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer |
|
200 // at offset aSourceOffset. This avoids copying memory. |
|
201 void BorrowFromInputBuffer(AudioChunk* aOutput, |
|
202 uint32_t aChannels) |
|
203 { |
|
204 aOutput->mDuration = WEBAUDIO_BLOCK_SIZE; |
|
205 aOutput->mBuffer = mBuffer; |
|
206 aOutput->mChannelData.SetLength(aChannels); |
|
207 for (uint32_t i = 0; i < aChannels; ++i) { |
|
208 aOutput->mChannelData[i] = mBuffer->GetData(i) + mBufferPosition; |
|
209 } |
|
210 aOutput->mVolume = 1.0f; |
|
211 aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32; |
|
212 } |
|
213 |
|
214 // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset |
|
215 // and put it at offset aBufferOffset in the destination buffer. |
|
216 void CopyFromInputBuffer(AudioChunk* aOutput, |
|
217 uint32_t aChannels, |
|
218 uintptr_t aOffsetWithinBlock, |
|
219 uint32_t aNumberOfFrames) { |
|
220 for (uint32_t i = 0; i < aChannels; ++i) { |
|
221 float* baseChannelData = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i])); |
|
222 memcpy(baseChannelData + aOffsetWithinBlock, |
|
223 mBuffer->GetData(i) + mBufferPosition, |
|
224 aNumberOfFrames * sizeof(float)); |
|
225 } |
|
226 } |
|
227 |
|
228 // Resamples input data to an output buffer, according to |mBufferSampleRate| and |
|
229 // the playbackRate. |
|
230 // The number of frames consumed/produced depends on the amount of space |
|
231 // remaining in both the input and output buffer, and the playback rate (that |
|
232 // is, the ratio between the output samplerate and the input samplerate). |
|
233 void CopyFromInputBufferWithResampling(AudioNodeStream* aStream, |
|
234 AudioChunk* aOutput, |
|
235 uint32_t aChannels, |
|
236 uint32_t* aOffsetWithinBlock, |
|
237 TrackTicks* aCurrentPosition, |
|
238 int32_t aBufferMax) { |
|
239 // TODO: adjust for mStop (see bug 913854 comment 9). |
|
240 uint32_t availableInOutputBuffer = |
|
241 WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock; |
|
242 SpeexResamplerState* resampler = mResampler; |
|
243 MOZ_ASSERT(aChannels > 0); |
|
244 |
|
245 if (mBufferPosition < aBufferMax) { |
|
246 uint32_t availableInInputBuffer = aBufferMax - mBufferPosition; |
|
247 uint32_t ratioNum, ratioDen; |
|
248 speex_resampler_get_ratio(resampler, &ratioNum, &ratioDen); |
|
249 // Limit the number of input samples copied and possibly |
|
250 // format-converted for resampling by estimating how many will be used. |
|
251 // This may be a little small if still filling the resampler with |
|
252 // initial data, but we'll get called again and it will work out. |
|
253 uint32_t inputLimit = availableInOutputBuffer * ratioNum / ratioDen + 10; |
|
254 if (!BegunResampling()) { |
|
255 // First time the resampler is used. |
|
256 uint32_t inputLatency = speex_resampler_get_input_latency(resampler); |
|
257 inputLimit += inputLatency; |
|
258 // If starting after mStart, then play from the beginning of the |
|
259 // buffer, but correct for input latency. If starting before mStart, |
|
260 // then align the resampler so that the time corresponding to the |
|
261 // first input sample is mStart. |
|
262 uint32_t skipFracNum = inputLatency * ratioDen; |
|
263 double leadTicks = mStart - *aCurrentPosition; |
|
264 if (leadTicks > 0.0) { |
|
265 // Round to nearest output subsample supported by the resampler at |
|
266 // these rates. |
|
267 skipFracNum -= leadTicks * ratioNum + 0.5; |
|
268 MOZ_ASSERT(skipFracNum < INT32_MAX, "mBeginProcessing is wrong?"); |
|
269 } |
|
270 speex_resampler_set_skip_frac_num(resampler, skipFracNum); |
|
271 |
|
272 mBeginProcessing = -TRACK_TICKS_MAX; |
|
273 } |
|
274 inputLimit = std::min(inputLimit, availableInInputBuffer); |
|
275 |
|
276 for (uint32_t i = 0; true; ) { |
|
277 uint32_t inSamples = inputLimit; |
|
278 const float* inputData = mBuffer->GetData(i) + mBufferPosition; |
|
279 |
|
280 uint32_t outSamples = availableInOutputBuffer; |
|
281 float* outputData = |
|
282 static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i])) + |
|
283 *aOffsetWithinBlock; |
|
284 |
|
285 WebAudioUtils::SpeexResamplerProcess(resampler, i, |
|
286 inputData, &inSamples, |
|
287 outputData, &outSamples); |
|
288 if (++i == aChannels) { |
|
289 mBufferPosition += inSamples; |
|
290 MOZ_ASSERT(mBufferPosition <= mBufferEnd || mLoop); |
|
291 *aOffsetWithinBlock += outSamples; |
|
292 *aCurrentPosition += outSamples; |
|
293 if (inSamples == availableInInputBuffer && !mLoop) { |
|
294 // We'll feed in enough zeros to empty out the resampler's memory. |
|
295 // This handles the output latency as well as capturing the low |
|
296 // pass effects of the resample filter. |
|
297 mRemainingResamplerTail = |
|
298 2 * speex_resampler_get_input_latency(resampler) - 1; |
|
299 } |
|
300 return; |
|
301 } |
|
302 } |
|
303 } else { |
|
304 for (uint32_t i = 0; true; ) { |
|
305 uint32_t inSamples = mRemainingResamplerTail; |
|
306 uint32_t outSamples = availableInOutputBuffer; |
|
307 float* outputData = |
|
308 static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i])) + |
|
309 *aOffsetWithinBlock; |
|
310 |
|
311 // AudioDataValue* for aIn selects the function that does not try to |
|
312 // copy and format-convert input data. |
|
313 WebAudioUtils::SpeexResamplerProcess(resampler, i, |
|
314 static_cast<AudioDataValue*>(nullptr), &inSamples, |
|
315 outputData, &outSamples); |
|
316 if (++i == aChannels) { |
|
317 mRemainingResamplerTail -= inSamples; |
|
318 MOZ_ASSERT(mRemainingResamplerTail >= 0); |
|
319 *aOffsetWithinBlock += outSamples; |
|
320 *aCurrentPosition += outSamples; |
|
321 break; |
|
322 } |
|
323 } |
|
324 } |
|
325 } |
|
326 |
|
327 /** |
|
328 * Fill aOutput with as many zero frames as we can, and advance |
|
329 * aOffsetWithinBlock and aCurrentPosition based on how many frames we write. |
|
330 * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or |
|
331 * aCurrentPosition past aMaxPos. This function knows when it needs to |
|
332 * allocate the output buffer, and also optimizes the case where it can avoid |
|
333 * memory allocations. |
|
334 */ |
|
335 void FillWithZeroes(AudioChunk* aOutput, |
|
336 uint32_t aChannels, |
|
337 uint32_t* aOffsetWithinBlock, |
|
338 TrackTicks* aCurrentPosition, |
|
339 TrackTicks aMaxPos) |
|
340 { |
|
341 MOZ_ASSERT(*aCurrentPosition < aMaxPos); |
|
342 uint32_t numFrames = |
|
343 std::min<TrackTicks>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, |
|
344 aMaxPos - *aCurrentPosition); |
|
345 if (numFrames == WEBAUDIO_BLOCK_SIZE) { |
|
346 aOutput->SetNull(numFrames); |
|
347 } else { |
|
348 if (*aOffsetWithinBlock == 0) { |
|
349 AllocateAudioBlock(aChannels, aOutput); |
|
350 } |
|
351 WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames); |
|
352 } |
|
353 *aOffsetWithinBlock += numFrames; |
|
354 *aCurrentPosition += numFrames; |
|
355 } |
|
356 |
|
357 /** |
|
358 * Copy as many frames as possible from the source buffer to aOutput, and |
|
359 * advance aOffsetWithinBlock and aCurrentPosition based on how many frames |
|
360 * we write. This will never advance aOffsetWithinBlock past |
|
361 * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop. It takes data from |
|
362 * the buffer at aBufferOffset, and never takes more data than aBufferMax. |
|
363 * This function knows when it needs to allocate the output buffer, and also |
|
364 * optimizes the case where it can avoid memory allocations. |
|
365 */ |
|
366 void CopyFromBuffer(AudioNodeStream* aStream, |
|
367 AudioChunk* aOutput, |
|
368 uint32_t aChannels, |
|
369 uint32_t* aOffsetWithinBlock, |
|
370 TrackTicks* aCurrentPosition, |
|
371 int32_t aBufferMax) |
|
372 { |
|
373 MOZ_ASSERT(*aCurrentPosition < mStop); |
|
374 uint32_t numFrames = |
|
375 std::min(std::min<TrackTicks>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, |
|
376 aBufferMax - mBufferPosition), |
|
377 mStop - *aCurrentPosition); |
|
378 if (numFrames == WEBAUDIO_BLOCK_SIZE && !mResampler) { |
|
379 MOZ_ASSERT(mBufferPosition < aBufferMax); |
|
380 BorrowFromInputBuffer(aOutput, aChannels); |
|
381 *aOffsetWithinBlock += numFrames; |
|
382 *aCurrentPosition += numFrames; |
|
383 mBufferPosition += numFrames; |
|
384 } else { |
|
385 if (*aOffsetWithinBlock == 0) { |
|
386 AllocateAudioBlock(aChannels, aOutput); |
|
387 } |
|
388 if (!mResampler) { |
|
389 MOZ_ASSERT(mBufferPosition < aBufferMax); |
|
390 CopyFromInputBuffer(aOutput, aChannels, *aOffsetWithinBlock, numFrames); |
|
391 *aOffsetWithinBlock += numFrames; |
|
392 *aCurrentPosition += numFrames; |
|
393 mBufferPosition += numFrames; |
|
394 } else { |
|
395 CopyFromInputBufferWithResampling(aStream, aOutput, aChannels, aOffsetWithinBlock, aCurrentPosition, aBufferMax); |
|
396 } |
|
397 } |
|
398 } |
|
399 |
|
400 int32_t ComputeFinalOutSampleRate(float aPlaybackRate) |
|
401 { |
|
402 // Make sure the playback rate and the doppler shift are something |
|
403 // our resampler can work with. |
|
404 int32_t rate = WebAudioUtils:: |
|
405 TruncateFloatToInt<int32_t>(mSource->SampleRate() / |
|
406 (aPlaybackRate * mDopplerShift)); |
|
407 return rate ? rate : mBufferSampleRate; |
|
408 } |
|
409 |
|
410 void UpdateSampleRateIfNeeded(uint32_t aChannels) |
|
411 { |
|
412 float playbackRate; |
|
413 |
|
414 if (mPlaybackRateTimeline.HasSimpleValue()) { |
|
415 playbackRate = mPlaybackRateTimeline.GetValue(); |
|
416 } else { |
|
417 playbackRate = mPlaybackRateTimeline.GetValueAtTime(mSource->GetCurrentPosition()); |
|
418 } |
|
419 if (playbackRate <= 0 || playbackRate != playbackRate) { |
|
420 playbackRate = 1.0f; |
|
421 } |
|
422 |
|
423 int32_t outRate = ComputeFinalOutSampleRate(playbackRate); |
|
424 UpdateResampler(outRate, aChannels); |
|
425 } |
|
426 |
|
427 virtual void ProcessBlock(AudioNodeStream* aStream, |
|
428 const AudioChunk& aInput, |
|
429 AudioChunk* aOutput, |
|
430 bool* aFinished) |
|
431 { |
|
432 if (!mBuffer || !mBufferEnd) { |
|
433 aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
|
434 return; |
|
435 } |
|
436 |
|
437 uint32_t channels = mBuffer->GetChannels(); |
|
438 if (!channels) { |
|
439 aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
|
440 return; |
|
441 } |
|
442 |
|
443 // WebKit treats the playbackRate as a k-rate parameter in their code, |
|
444 // despite the spec saying that it should be an a-rate parameter. We treat |
|
445 // it as k-rate. Spec bug: https://www.w3.org/Bugs/Public/show_bug.cgi?id=21592 |
|
446 UpdateSampleRateIfNeeded(channels); |
|
447 |
|
448 uint32_t written = 0; |
|
449 TrackTicks streamPosition = aStream->GetCurrentPosition(); |
|
450 while (written < WEBAUDIO_BLOCK_SIZE) { |
|
451 if (mStop != TRACK_TICKS_MAX && |
|
452 streamPosition >= mStop) { |
|
453 FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); |
|
454 continue; |
|
455 } |
|
456 if (streamPosition < mBeginProcessing) { |
|
457 FillWithZeroes(aOutput, channels, &written, &streamPosition, |
|
458 mBeginProcessing); |
|
459 continue; |
|
460 } |
|
461 if (mLoop) { |
|
462 // mLoopEnd can become less than mBufferPosition when a LOOPEND engine |
|
463 // parameter is received after "loopend" is changed on the node or a |
|
464 // new buffer with lower samplerate is set. |
|
465 if (mBufferPosition >= mLoopEnd) { |
|
466 mBufferPosition = mLoopStart; |
|
467 } |
|
468 CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mLoopEnd); |
|
469 } else { |
|
470 if (mBufferPosition < mBufferEnd || mRemainingResamplerTail) { |
|
471 CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mBufferEnd); |
|
472 } else { |
|
473 FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); |
|
474 } |
|
475 } |
|
476 } |
|
477 |
|
478 // We've finished if we've gone past mStop, or if we're past mDuration when |
|
479 // looping is disabled. |
|
480 if (streamPosition >= mStop || |
|
481 (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) { |
|
482 *aFinished = true; |
|
483 } |
|
484 } |
|
485 |
|
486 virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
487 { |
|
488 // Not owned: |
|
489 // - mBuffer - shared w/ AudioNode |
|
490 // - mPlaybackRateTimeline - shared w/ AudioNode |
|
491 |
|
492 size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
|
493 |
|
494 // NB: We need to modify speex if we want the full memory picture, internal |
|
495 // fields that need measuring noted below. |
|
496 // - mResampler->mem |
|
497 // - mResampler->sinc_table |
|
498 // - mResampler->last_sample |
|
499 // - mResampler->magic_samples |
|
500 // - mResampler->samp_frac_num |
|
501 amount += aMallocSizeOf(mResampler); |
|
502 |
|
503 return amount; |
|
504 } |
|
505 |
|
506 virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
507 { |
|
508 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
509 } |
|
510 |
|
511 double mStart; // including the fractional position between ticks |
|
512 // Low pass filter effects from the resampler mean that samples before the |
|
513 // start time are influenced by resampling the buffer. mBeginProcessing |
|
514 // includes the extent of this filter. The special value of -TRACK_TICKS_MAX |
|
515 // indicates that the resampler has begun processing. |
|
516 TrackTicks mBeginProcessing; |
|
517 TrackTicks mStop; |
|
518 nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer; |
|
519 SpeexResamplerState* mResampler; |
|
520 // mRemainingResamplerTail, like mBufferPosition, and |
|
521 // mBufferEnd, is measured in input buffer samples. |
|
522 int mRemainingResamplerTail; |
|
523 int32_t mBufferEnd; |
|
524 int32_t mLoopStart; |
|
525 int32_t mLoopEnd; |
|
526 int32_t mBufferSampleRate; |
|
527 int32_t mBufferPosition; |
|
528 uint32_t mChannels; |
|
529 float mDopplerShift; |
|
530 AudioNodeStream* mDestination; |
|
531 AudioNodeStream* mSource; |
|
532 AudioParamTimeline mPlaybackRateTimeline; |
|
533 bool mLoop; |
|
534 }; |
|
535 |
|
536 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext) |
|
537 : AudioNode(aContext, |
|
538 2, |
|
539 ChannelCountMode::Max, |
|
540 ChannelInterpretation::Speakers) |
|
541 , mLoopStart(0.0) |
|
542 , mLoopEnd(0.0) |
|
543 // mOffset and mDuration are initialized in Start(). |
|
544 , mPlaybackRate(new AudioParam(MOZ_THIS_IN_INITIALIZER_LIST(), |
|
545 SendPlaybackRateToStream, 1.0f)) |
|
546 , mLoop(false) |
|
547 , mStartCalled(false) |
|
548 , mStopped(false) |
|
549 { |
|
550 AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination()); |
|
551 mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::SOURCE_STREAM); |
|
552 engine->SetSourceStream(static_cast<AudioNodeStream*>(mStream.get())); |
|
553 mStream->AddMainThreadListener(this); |
|
554 } |
|
555 |
|
556 AudioBufferSourceNode::~AudioBufferSourceNode() |
|
557 { |
|
558 if (Context()) { |
|
559 Context()->UnregisterAudioBufferSourceNode(this); |
|
560 } |
|
561 } |
|
562 |
|
563 size_t |
|
564 AudioBufferSourceNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
|
565 { |
|
566 size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
|
567 if (mBuffer) { |
|
568 amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); |
|
569 } |
|
570 |
|
571 amount += mPlaybackRate->SizeOfIncludingThis(aMallocSizeOf); |
|
572 return amount; |
|
573 } |
|
574 |
|
575 size_t |
|
576 AudioBufferSourceNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
|
577 { |
|
578 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
579 } |
|
580 |
|
581 JSObject* |
|
582 AudioBufferSourceNode::WrapObject(JSContext* aCx) |
|
583 { |
|
584 return AudioBufferSourceNodeBinding::Wrap(aCx, this); |
|
585 } |
|
586 |
|
587 void |
|
588 AudioBufferSourceNode::Start(double aWhen, double aOffset, |
|
589 const Optional<double>& aDuration, ErrorResult& aRv) |
|
590 { |
|
591 if (!WebAudioUtils::IsTimeValid(aWhen) || |
|
592 (aDuration.WasPassed() && !WebAudioUtils::IsTimeValid(aDuration.Value()))) { |
|
593 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
594 return; |
|
595 } |
|
596 |
|
597 if (mStartCalled) { |
|
598 aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
|
599 return; |
|
600 } |
|
601 mStartCalled = true; |
|
602 |
|
603 AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
|
604 if (!ns) { |
|
605 // Nothing to play, or we're already dead for some reason |
|
606 return; |
|
607 } |
|
608 |
|
609 // Remember our arguments so that we can use them when we get a new buffer. |
|
610 mOffset = aOffset; |
|
611 mDuration = aDuration.WasPassed() ? aDuration.Value() |
|
612 : std::numeric_limits<double>::min(); |
|
613 // We can't send these parameters without a buffer because we don't know the |
|
614 // buffer's sample rate or length. |
|
615 if (mBuffer) { |
|
616 SendOffsetAndDurationParametersToStream(ns); |
|
617 } |
|
618 |
|
619 // Don't set parameter unnecessarily |
|
620 if (aWhen > 0.0) { |
|
621 ns->SetDoubleParameter(START, mContext->DOMTimeToStreamTime(aWhen)); |
|
622 } |
|
623 } |
|
624 |
|
625 void |
|
626 AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx) |
|
627 { |
|
628 AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
|
629 MOZ_ASSERT(ns, "Why don't we have a stream here?"); |
|
630 |
|
631 if (mBuffer) { |
|
632 float rate = mBuffer->SampleRate(); |
|
633 nsRefPtr<ThreadSharedFloatArrayBufferList> data = |
|
634 mBuffer->GetThreadSharedChannelsForRate(aCx); |
|
635 ns->SetBuffer(data.forget()); |
|
636 ns->SetInt32Parameter(SAMPLE_RATE, rate); |
|
637 |
|
638 if (mStartCalled) { |
|
639 SendOffsetAndDurationParametersToStream(ns); |
|
640 } |
|
641 } else { |
|
642 ns->SetBuffer(nullptr); |
|
643 |
|
644 MarkInactive(); |
|
645 } |
|
646 } |
|
647 |
|
648 void |
|
649 AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream) |
|
650 { |
|
651 NS_ASSERTION(mBuffer && mStartCalled, |
|
652 "Only call this when we have a buffer and start() has been called"); |
|
653 |
|
654 float rate = mBuffer->SampleRate(); |
|
655 int32_t bufferEnd = mBuffer->Length(); |
|
656 int32_t offsetSamples = std::max(0, NS_lround(mOffset * rate)); |
|
657 |
|
658 // Don't set parameter unnecessarily |
|
659 if (offsetSamples > 0) { |
|
660 aStream->SetInt32Parameter(BUFFERSTART, offsetSamples); |
|
661 } |
|
662 |
|
663 if (mDuration != std::numeric_limits<double>::min()) { |
|
664 bufferEnd = std::min(bufferEnd, |
|
665 offsetSamples + NS_lround(mDuration * rate)); |
|
666 } |
|
667 aStream->SetInt32Parameter(BUFFEREND, bufferEnd); |
|
668 |
|
669 MarkActive(); |
|
670 } |
|
671 |
|
672 void |
|
673 AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv) |
|
674 { |
|
675 if (!WebAudioUtils::IsTimeValid(aWhen)) { |
|
676 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
677 return; |
|
678 } |
|
679 |
|
680 if (!mStartCalled) { |
|
681 aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
|
682 return; |
|
683 } |
|
684 |
|
685 AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
|
686 if (!ns || !Context()) { |
|
687 // We've already stopped and had our stream shut down |
|
688 return; |
|
689 } |
|
690 |
|
691 ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen)); |
|
692 } |
|
693 |
|
694 void |
|
695 AudioBufferSourceNode::NotifyMainThreadStateChanged() |
|
696 { |
|
697 if (mStream->IsFinished()) { |
|
698 class EndedEventDispatcher : public nsRunnable |
|
699 { |
|
700 public: |
|
701 explicit EndedEventDispatcher(AudioBufferSourceNode* aNode) |
|
702 : mNode(aNode) {} |
|
703 NS_IMETHODIMP Run() |
|
704 { |
|
705 // If it's not safe to run scripts right now, schedule this to run later |
|
706 if (!nsContentUtils::IsSafeToRunScript()) { |
|
707 nsContentUtils::AddScriptRunner(this); |
|
708 return NS_OK; |
|
709 } |
|
710 |
|
711 mNode->DispatchTrustedEvent(NS_LITERAL_STRING("ended")); |
|
712 return NS_OK; |
|
713 } |
|
714 private: |
|
715 nsRefPtr<AudioBufferSourceNode> mNode; |
|
716 }; |
|
717 if (!mStopped) { |
|
718 // Only dispatch the ended event once |
|
719 NS_DispatchToMainThread(new EndedEventDispatcher(this)); |
|
720 mStopped = true; |
|
721 } |
|
722 |
|
723 // Drop the playing reference |
|
724 // Warning: The below line might delete this. |
|
725 MarkInactive(); |
|
726 } |
|
727 } |
|
728 |
|
729 void |
|
730 AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode) |
|
731 { |
|
732 AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode); |
|
733 SendTimelineParameterToStream(This, PLAYBACKRATE, *This->mPlaybackRate); |
|
734 } |
|
735 |
|
736 void |
|
737 AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift) |
|
738 { |
|
739 SendDoubleParameterToStream(DOPPLERSHIFT, aDopplerShift); |
|
740 } |
|
741 |
|
742 void |
|
743 AudioBufferSourceNode::SendLoopParametersToStream() |
|
744 { |
|
745 // Don't compute and set the loop parameters unnecessarily |
|
746 if (mLoop && mBuffer) { |
|
747 float rate = mBuffer->SampleRate(); |
|
748 double length = (double(mBuffer->Length()) / mBuffer->SampleRate()); |
|
749 double actualLoopStart, actualLoopEnd; |
|
750 if (mLoopStart >= 0.0 && mLoopEnd > 0.0 && |
|
751 mLoopStart < mLoopEnd) { |
|
752 MOZ_ASSERT(mLoopStart != 0.0 || mLoopEnd != 0.0); |
|
753 actualLoopStart = (mLoopStart > length) ? 0.0 : mLoopStart; |
|
754 actualLoopEnd = std::min(mLoopEnd, length); |
|
755 } else { |
|
756 actualLoopStart = 0.0; |
|
757 actualLoopEnd = length; |
|
758 } |
|
759 int32_t loopStartTicks = NS_lround(actualLoopStart * rate); |
|
760 int32_t loopEndTicks = NS_lround(actualLoopEnd * rate); |
|
761 if (loopStartTicks < loopEndTicks) { |
|
762 SendInt32ParameterToStream(LOOPSTART, loopStartTicks); |
|
763 SendInt32ParameterToStream(LOOPEND, loopEndTicks); |
|
764 SendInt32ParameterToStream(LOOP, 1); |
|
765 } else { |
|
766 // Be explicit about looping not happening if the offsets make |
|
767 // looping impossible. |
|
768 SendInt32ParameterToStream(LOOP, 0); |
|
769 } |
|
770 } else if (!mLoop) { |
|
771 SendInt32ParameterToStream(LOOP, 0); |
|
772 } |
|
773 } |
|
774 |
|
775 } |
|
776 } |