|
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "ScriptProcessorNode.h" |
|
8 #include "mozilla/dom/ScriptProcessorNodeBinding.h" |
|
9 #include "AudioBuffer.h" |
|
10 #include "AudioDestinationNode.h" |
|
11 #include "AudioNodeEngine.h" |
|
12 #include "AudioNodeStream.h" |
|
13 #include "AudioProcessingEvent.h" |
|
14 #include "WebAudioUtils.h" |
|
15 #include "nsCxPusher.h" |
|
16 #include "mozilla/Mutex.h" |
|
17 #include "mozilla/PodOperations.h" |
|
18 #include <deque> |
|
19 |
|
20 namespace mozilla { |
|
21 namespace dom { |
|
22 |
|
23 // The maximum latency, in seconds, that we can live with before dropping |
|
24 // buffers. |
|
25 static const float MAX_LATENCY_S = 0.5; |
|
26 |
|
27 NS_IMPL_ISUPPORTS_INHERITED0(ScriptProcessorNode, AudioNode) |
|
28 |
|
29 // This class manages a queue of output buffers shared between |
|
30 // the main thread and the Media Stream Graph thread. |
|
31 class SharedBuffers |
|
32 { |
|
33 private: |
|
34 class OutputQueue |
|
35 { |
|
36 public: |
|
37 explicit OutputQueue(const char* aName) |
|
38 : mMutex(aName) |
|
39 {} |
|
40 |
|
41 size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
|
42 { |
|
43 mMutex.AssertCurrentThreadOwns(); |
|
44 |
|
45 size_t amount = 0; |
|
46 for (size_t i = 0; i < mBufferList.size(); i++) { |
|
47 amount += mBufferList[i].SizeOfExcludingThis(aMallocSizeOf, false); |
|
48 } |
|
49 |
|
50 return amount; |
|
51 } |
|
52 |
|
53 Mutex& Lock() const { return const_cast<OutputQueue*>(this)->mMutex; } |
|
54 |
|
55 size_t ReadyToConsume() const |
|
56 { |
|
57 mMutex.AssertCurrentThreadOwns(); |
|
58 MOZ_ASSERT(!NS_IsMainThread()); |
|
59 return mBufferList.size(); |
|
60 } |
|
61 |
|
62 // Produce one buffer |
|
63 AudioChunk& Produce() |
|
64 { |
|
65 mMutex.AssertCurrentThreadOwns(); |
|
66 MOZ_ASSERT(NS_IsMainThread()); |
|
67 mBufferList.push_back(AudioChunk()); |
|
68 return mBufferList.back(); |
|
69 } |
|
70 |
|
71 // Consumes one buffer. |
|
72 AudioChunk Consume() |
|
73 { |
|
74 mMutex.AssertCurrentThreadOwns(); |
|
75 MOZ_ASSERT(!NS_IsMainThread()); |
|
76 MOZ_ASSERT(ReadyToConsume() > 0); |
|
77 AudioChunk front = mBufferList.front(); |
|
78 mBufferList.pop_front(); |
|
79 return front; |
|
80 } |
|
81 |
|
82 // Empties the buffer queue. |
|
83 void Clear() |
|
84 { |
|
85 mMutex.AssertCurrentThreadOwns(); |
|
86 mBufferList.clear(); |
|
87 } |
|
88 |
|
89 private: |
|
90 typedef std::deque<AudioChunk> BufferList; |
|
91 |
|
92 // Synchronizes access to mBufferList. Note that it's the responsibility |
|
93 // of the callers to perform the required locking, and we assert that every |
|
94 // time we access mBufferList. |
|
95 Mutex mMutex; |
|
96 // The list representing the queue. |
|
97 BufferList mBufferList; |
|
98 }; |
|
99 |
|
100 public: |
|
101 SharedBuffers(float aSampleRate) |
|
102 : mOutputQueue("SharedBuffers::outputQueue") |
|
103 , mDelaySoFar(TRACK_TICKS_MAX) |
|
104 , mSampleRate(aSampleRate) |
|
105 , mLatency(0.0) |
|
106 , mDroppingBuffers(false) |
|
107 { |
|
108 } |
|
109 |
|
110 size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
|
111 { |
|
112 size_t amount = aMallocSizeOf(this); |
|
113 |
|
114 { |
|
115 MutexAutoLock lock(mOutputQueue.Lock()); |
|
116 amount += mOutputQueue.SizeOfExcludingThis(aMallocSizeOf); |
|
117 } |
|
118 |
|
119 return amount; |
|
120 } |
|
121 |
|
122 // main thread |
|
123 void FinishProducingOutputBuffer(ThreadSharedFloatArrayBufferList* aBuffer, |
|
124 uint32_t aBufferSize) |
|
125 { |
|
126 MOZ_ASSERT(NS_IsMainThread()); |
|
127 |
|
128 TimeStamp now = TimeStamp::Now(); |
|
129 |
|
130 if (mLastEventTime.IsNull()) { |
|
131 mLastEventTime = now; |
|
132 } else { |
|
133 // When the main thread is blocked, and all the event are processed in a |
|
134 // burst after the main thread unblocks, the |(now - mLastEventTime)| |
|
135 // interval will be very short. |latency - bufferDuration| will be |
|
136 // negative, effectively moving back mLatency to a smaller and smaller |
|
137 // value, until it crosses zero, at which point we stop dropping buffers |
|
138 // and resume normal operation. This does not work if at the same time, |
|
139 // the MSG thread was also slowed down, so if the latency on the MSG |
|
140 // thread is normal, and we are still dropping buffers, and mLatency is |
|
141 // still more than twice the duration of a buffer, we reset it and stop |
|
142 // dropping buffers. |
|
143 float latency = (now - mLastEventTime).ToSeconds(); |
|
144 float bufferDuration = aBufferSize / mSampleRate; |
|
145 mLatency += latency - bufferDuration; |
|
146 mLastEventTime = now; |
|
147 if (mLatency > MAX_LATENCY_S || |
|
148 (mDroppingBuffers && mLatency > 0.0 && |
|
149 fabs(latency - bufferDuration) < bufferDuration)) { |
|
150 mDroppingBuffers = true; |
|
151 return; |
|
152 } else { |
|
153 if (mDroppingBuffers) { |
|
154 mLatency = 0; |
|
155 } |
|
156 mDroppingBuffers = false; |
|
157 } |
|
158 } |
|
159 |
|
160 MutexAutoLock lock(mOutputQueue.Lock()); |
|
161 for (uint32_t offset = 0; offset < aBufferSize; offset += WEBAUDIO_BLOCK_SIZE) { |
|
162 AudioChunk& chunk = mOutputQueue.Produce(); |
|
163 if (aBuffer) { |
|
164 chunk.mDuration = WEBAUDIO_BLOCK_SIZE; |
|
165 chunk.mBuffer = aBuffer; |
|
166 chunk.mChannelData.SetLength(aBuffer->GetChannels()); |
|
167 for (uint32_t i = 0; i < aBuffer->GetChannels(); ++i) { |
|
168 chunk.mChannelData[i] = aBuffer->GetData(i) + offset; |
|
169 } |
|
170 chunk.mVolume = 1.0f; |
|
171 chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32; |
|
172 } else { |
|
173 chunk.SetNull(WEBAUDIO_BLOCK_SIZE); |
|
174 } |
|
175 } |
|
176 } |
|
177 |
|
178 // graph thread |
|
179 AudioChunk GetOutputBuffer() |
|
180 { |
|
181 MOZ_ASSERT(!NS_IsMainThread()); |
|
182 AudioChunk buffer; |
|
183 |
|
184 { |
|
185 MutexAutoLock lock(mOutputQueue.Lock()); |
|
186 if (mOutputQueue.ReadyToConsume() > 0) { |
|
187 if (mDelaySoFar == TRACK_TICKS_MAX) { |
|
188 mDelaySoFar = 0; |
|
189 } |
|
190 buffer = mOutputQueue.Consume(); |
|
191 } else { |
|
192 // If we're out of buffers to consume, just output silence |
|
193 buffer.SetNull(WEBAUDIO_BLOCK_SIZE); |
|
194 if (mDelaySoFar != TRACK_TICKS_MAX) { |
|
195 // Remember the delay that we just hit |
|
196 mDelaySoFar += WEBAUDIO_BLOCK_SIZE; |
|
197 } |
|
198 } |
|
199 } |
|
200 |
|
201 return buffer; |
|
202 } |
|
203 |
|
204 TrackTicks DelaySoFar() const |
|
205 { |
|
206 MOZ_ASSERT(!NS_IsMainThread()); |
|
207 return mDelaySoFar == TRACK_TICKS_MAX ? 0 : mDelaySoFar; |
|
208 } |
|
209 |
|
210 void Reset() |
|
211 { |
|
212 MOZ_ASSERT(!NS_IsMainThread()); |
|
213 mDelaySoFar = TRACK_TICKS_MAX; |
|
214 mLatency = 0.0f; |
|
215 { |
|
216 MutexAutoLock lock(mOutputQueue.Lock()); |
|
217 mOutputQueue.Clear(); |
|
218 } |
|
219 mLastEventTime = TimeStamp(); |
|
220 } |
|
221 |
|
222 private: |
|
223 OutputQueue mOutputQueue; |
|
224 // How much delay we've seen so far. This measures the amount of delay |
|
225 // caused by the main thread lagging behind in producing output buffers. |
|
226 // TRACK_TICKS_MAX means that we have not received our first buffer yet. |
|
227 TrackTicks mDelaySoFar; |
|
228 // The samplerate of the context. |
|
229 float mSampleRate; |
|
230 // This is the latency caused by the buffering. If this grows too high, we |
|
231 // will drop buffers until it is acceptable. |
|
232 float mLatency; |
|
233 // This is the time at which we last produced a buffer, to detect if the main |
|
234 // thread has been blocked. |
|
235 TimeStamp mLastEventTime; |
|
236 // True if we should be dropping buffers. |
|
237 bool mDroppingBuffers; |
|
238 }; |
|
239 |
|
240 class ScriptProcessorNodeEngine : public AudioNodeEngine |
|
241 { |
|
242 public: |
|
243 typedef nsAutoTArray<nsAutoArrayPtr<float>, 2> InputChannels; |
|
244 |
|
245 ScriptProcessorNodeEngine(ScriptProcessorNode* aNode, |
|
246 AudioDestinationNode* aDestination, |
|
247 uint32_t aBufferSize, |
|
248 uint32_t aNumberOfInputChannels) |
|
249 : AudioNodeEngine(aNode) |
|
250 , mSharedBuffers(aNode->GetSharedBuffers()) |
|
251 , mSource(nullptr) |
|
252 , mDestination(static_cast<AudioNodeStream*> (aDestination->Stream())) |
|
253 , mBufferSize(aBufferSize) |
|
254 , mInputWriteIndex(0) |
|
255 , mSeenNonSilenceInput(false) |
|
256 { |
|
257 mInputChannels.SetLength(aNumberOfInputChannels); |
|
258 AllocateInputBlock(); |
|
259 } |
|
260 |
|
261 void SetSourceStream(AudioNodeStream* aSource) |
|
262 { |
|
263 mSource = aSource; |
|
264 } |
|
265 |
|
266 virtual void ProcessBlock(AudioNodeStream* aStream, |
|
267 const AudioChunk& aInput, |
|
268 AudioChunk* aOutput, |
|
269 bool* aFinished) MOZ_OVERRIDE |
|
270 { |
|
271 MutexAutoLock lock(NodeMutex()); |
|
272 |
|
273 // If our node is dead, just output silence. |
|
274 if (!Node()) { |
|
275 aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
|
276 return; |
|
277 } |
|
278 |
|
279 // This node is not connected to anything. Per spec, we don't fire the |
|
280 // onaudioprocess event. We also want to clear out the input and output |
|
281 // buffer queue, and output a null buffer. |
|
282 if (!(aStream->ConsumerCount() || |
|
283 aStream->AsProcessedStream()->InputPortCount())) { |
|
284 aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
|
285 mSharedBuffers->Reset(); |
|
286 mSeenNonSilenceInput = false; |
|
287 mInputWriteIndex = 0; |
|
288 return; |
|
289 } |
|
290 |
|
291 // First, record our input buffer |
|
292 for (uint32_t i = 0; i < mInputChannels.Length(); ++i) { |
|
293 if (aInput.IsNull()) { |
|
294 PodZero(mInputChannels[i] + mInputWriteIndex, |
|
295 aInput.GetDuration()); |
|
296 } else { |
|
297 mSeenNonSilenceInput = true; |
|
298 MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check"); |
|
299 MOZ_ASSERT(aInput.mChannelData.Length() == mInputChannels.Length()); |
|
300 AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]), |
|
301 aInput.mVolume, |
|
302 mInputChannels[i] + mInputWriteIndex); |
|
303 } |
|
304 } |
|
305 mInputWriteIndex += aInput.GetDuration(); |
|
306 |
|
307 // Now, see if we have data to output |
|
308 // Note that we need to do this before sending the buffer to the main |
|
309 // thread so that our delay time is updated. |
|
310 *aOutput = mSharedBuffers->GetOutputBuffer(); |
|
311 |
|
312 if (mInputWriteIndex >= mBufferSize) { |
|
313 SendBuffersToMainThread(aStream); |
|
314 mInputWriteIndex -= mBufferSize; |
|
315 mSeenNonSilenceInput = false; |
|
316 AllocateInputBlock(); |
|
317 } |
|
318 } |
|
319 |
|
320 virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
321 { |
|
322 // Not owned: |
|
323 // - mSharedBuffers |
|
324 // - mSource (probably) |
|
325 // - mDestination (probably) |
|
326 size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
|
327 amount += mInputChannels.SizeOfExcludingThis(aMallocSizeOf); |
|
328 for (size_t i = 0; i < mInputChannels.Length(); i++) { |
|
329 amount += mInputChannels[i].SizeOfExcludingThis(aMallocSizeOf); |
|
330 } |
|
331 |
|
332 return amount; |
|
333 } |
|
334 |
|
335 virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
336 { |
|
337 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
338 } |
|
339 |
|
340 private: |
|
341 void AllocateInputBlock() |
|
342 { |
|
343 for (unsigned i = 0; i < mInputChannels.Length(); ++i) { |
|
344 if (!mInputChannels[i]) { |
|
345 mInputChannels[i] = new float[mBufferSize]; |
|
346 } |
|
347 } |
|
348 } |
|
349 |
|
350 void SendBuffersToMainThread(AudioNodeStream* aStream) |
|
351 { |
|
352 MOZ_ASSERT(!NS_IsMainThread()); |
|
353 |
|
354 // we now have a full input buffer ready to be sent to the main thread. |
|
355 TrackTicks playbackTick = mSource->GetCurrentPosition(); |
|
356 // Add the duration of the current sample |
|
357 playbackTick += WEBAUDIO_BLOCK_SIZE; |
|
358 // Add the delay caused by the main thread |
|
359 playbackTick += mSharedBuffers->DelaySoFar(); |
|
360 // Compute the playback time in the coordinate system of the destination |
|
361 // FIXME: bug 970773 |
|
362 double playbackTime = |
|
363 mSource->DestinationTimeFromTicks(mDestination, playbackTick); |
|
364 |
|
365 class Command : public nsRunnable |
|
366 { |
|
367 public: |
|
368 Command(AudioNodeStream* aStream, |
|
369 InputChannels& aInputChannels, |
|
370 double aPlaybackTime, |
|
371 bool aNullInput) |
|
372 : mStream(aStream) |
|
373 , mPlaybackTime(aPlaybackTime) |
|
374 , mNullInput(aNullInput) |
|
375 { |
|
376 mInputChannels.SetLength(aInputChannels.Length()); |
|
377 if (!aNullInput) { |
|
378 for (uint32_t i = 0; i < mInputChannels.Length(); ++i) { |
|
379 mInputChannels[i] = aInputChannels[i].forget(); |
|
380 } |
|
381 } |
|
382 } |
|
383 |
|
384 NS_IMETHODIMP Run() |
|
385 { |
|
386 // If it's not safe to run scripts right now, schedule this to run later |
|
387 if (!nsContentUtils::IsSafeToRunScript()) { |
|
388 nsContentUtils::AddScriptRunner(this); |
|
389 return NS_OK; |
|
390 } |
|
391 |
|
392 nsRefPtr<ScriptProcessorNode> node; |
|
393 { |
|
394 // No need to keep holding the lock for the whole duration of this |
|
395 // function, since we're holding a strong reference to it, so if |
|
396 // we can obtain the reference, we will hold the node alive in |
|
397 // this function. |
|
398 MutexAutoLock lock(mStream->Engine()->NodeMutex()); |
|
399 node = static_cast<ScriptProcessorNode*>(mStream->Engine()->Node()); |
|
400 } |
|
401 if (!node || !node->Context()) { |
|
402 return NS_OK; |
|
403 } |
|
404 |
|
405 AutoPushJSContext cx(node->Context()->GetJSContext()); |
|
406 if (cx) { |
|
407 |
|
408 |
|
409 // Create the input buffer |
|
410 nsRefPtr<AudioBuffer> inputBuffer; |
|
411 if (!mNullInput) { |
|
412 ErrorResult rv; |
|
413 inputBuffer = |
|
414 AudioBuffer::Create(node->Context(), mInputChannels.Length(), |
|
415 node->BufferSize(), |
|
416 node->Context()->SampleRate(), cx, rv); |
|
417 if (rv.Failed()) { |
|
418 return NS_OK; |
|
419 } |
|
420 // Put the channel data inside it |
|
421 for (uint32_t i = 0; i < mInputChannels.Length(); ++i) { |
|
422 inputBuffer->SetRawChannelContents(cx, i, mInputChannels[i]); |
|
423 } |
|
424 } |
|
425 |
|
426 // Ask content to produce data in the output buffer |
|
427 // Note that we always avoid creating the output buffer here, and we try to |
|
428 // avoid creating the input buffer as well. The AudioProcessingEvent class |
|
429 // knows how to lazily create them if needed once the script tries to access |
|
430 // them. Otherwise, we may be able to get away without creating them! |
|
431 nsRefPtr<AudioProcessingEvent> event = new AudioProcessingEvent(node, nullptr, nullptr); |
|
432 event->InitEvent(inputBuffer, |
|
433 mInputChannels.Length(), |
|
434 mPlaybackTime); |
|
435 node->DispatchTrustedEvent(event); |
|
436 |
|
437 // Steal the output buffers if they have been set. Don't create a |
|
438 // buffer if it hasn't been used to return output; |
|
439 // FinishProducingOutputBuffer() will optimize output = null. |
|
440 // GetThreadSharedChannelsForRate() may also return null after OOM. |
|
441 nsRefPtr<ThreadSharedFloatArrayBufferList> output; |
|
442 if (event->HasOutputBuffer()) { |
|
443 ErrorResult rv; |
|
444 AudioBuffer* buffer = event->GetOutputBuffer(rv); |
|
445 // HasOutputBuffer() returning true means that GetOutputBuffer() |
|
446 // will not fail. |
|
447 MOZ_ASSERT(!rv.Failed()); |
|
448 output = buffer->GetThreadSharedChannelsForRate(cx); |
|
449 } |
|
450 |
|
451 // Append it to our output buffer queue |
|
452 node->GetSharedBuffers()->FinishProducingOutputBuffer(output, node->BufferSize()); |
|
453 } |
|
454 return NS_OK; |
|
455 } |
|
456 private: |
|
457 nsRefPtr<AudioNodeStream> mStream; |
|
458 InputChannels mInputChannels; |
|
459 double mPlaybackTime; |
|
460 bool mNullInput; |
|
461 }; |
|
462 |
|
463 NS_DispatchToMainThread(new Command(aStream, mInputChannels, |
|
464 playbackTime, |
|
465 !mSeenNonSilenceInput)); |
|
466 } |
|
467 |
|
468 friend class ScriptProcessorNode; |
|
469 |
|
470 SharedBuffers* mSharedBuffers; |
|
471 AudioNodeStream* mSource; |
|
472 AudioNodeStream* mDestination; |
|
473 InputChannels mInputChannels; |
|
474 const uint32_t mBufferSize; |
|
475 // The write index into the current input buffer |
|
476 uint32_t mInputWriteIndex; |
|
477 bool mSeenNonSilenceInput; |
|
478 }; |
|
479 |
|
480 ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext, |
|
481 uint32_t aBufferSize, |
|
482 uint32_t aNumberOfInputChannels, |
|
483 uint32_t aNumberOfOutputChannels) |
|
484 : AudioNode(aContext, |
|
485 aNumberOfInputChannels, |
|
486 mozilla::dom::ChannelCountMode::Explicit, |
|
487 mozilla::dom::ChannelInterpretation::Speakers) |
|
488 , mSharedBuffers(new SharedBuffers(aContext->SampleRate())) |
|
489 , mBufferSize(aBufferSize ? |
|
490 aBufferSize : // respect what the web developer requested |
|
491 4096) // choose our own buffer size -- 4KB for now |
|
492 , mNumberOfOutputChannels(aNumberOfOutputChannels) |
|
493 { |
|
494 MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size"); |
|
495 ScriptProcessorNodeEngine* engine = |
|
496 new ScriptProcessorNodeEngine(this, |
|
497 aContext->Destination(), |
|
498 BufferSize(), |
|
499 aNumberOfInputChannels); |
|
500 mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM); |
|
501 engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get())); |
|
502 } |
|
503 |
|
504 ScriptProcessorNode::~ScriptProcessorNode() |
|
505 { |
|
506 } |
|
507 |
|
508 size_t |
|
509 ScriptProcessorNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
|
510 { |
|
511 size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
|
512 amount += mSharedBuffers->SizeOfIncludingThis(aMallocSizeOf); |
|
513 return amount; |
|
514 } |
|
515 |
|
516 size_t |
|
517 ScriptProcessorNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
|
518 { |
|
519 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
520 } |
|
521 |
|
522 JSObject* |
|
523 ScriptProcessorNode::WrapObject(JSContext* aCx) |
|
524 { |
|
525 return ScriptProcessorNodeBinding::Wrap(aCx, this); |
|
526 } |
|
527 |
|
528 } |
|
529 } |
|
530 |