|
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "AudioContext.h" |
|
8 |
|
9 #include "nsPIDOMWindow.h" |
|
10 #include "mozilla/ErrorResult.h" |
|
11 #include "mozilla/dom/AnalyserNode.h" |
|
12 #include "mozilla/dom/AudioContextBinding.h" |
|
13 #include "mozilla/dom/HTMLMediaElement.h" |
|
14 #include "mozilla/dom/OfflineAudioContextBinding.h" |
|
15 #include "mozilla/dom/OwningNonNull.h" |
|
16 #include "MediaStreamGraph.h" |
|
17 #include "AudioDestinationNode.h" |
|
18 #include "AudioBufferSourceNode.h" |
|
19 #include "AudioBuffer.h" |
|
20 #include "GainNode.h" |
|
21 #include "MediaElementAudioSourceNode.h" |
|
22 #include "MediaStreamAudioSourceNode.h" |
|
23 #include "DelayNode.h" |
|
24 #include "PannerNode.h" |
|
25 #include "AudioListener.h" |
|
26 #include "DynamicsCompressorNode.h" |
|
27 #include "BiquadFilterNode.h" |
|
28 #include "ScriptProcessorNode.h" |
|
29 #include "ChannelMergerNode.h" |
|
30 #include "ChannelSplitterNode.h" |
|
31 #include "MediaStreamAudioDestinationNode.h" |
|
32 #include "WaveShaperNode.h" |
|
33 #include "PeriodicWave.h" |
|
34 #include "ConvolverNode.h" |
|
35 #include "OscillatorNode.h" |
|
36 #include "nsNetUtil.h" |
|
37 #include "AudioStream.h" |
|
38 |
|
39 namespace mozilla { |
|
40 namespace dom { |
|
41 |
|
42 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext) |
|
43 |
|
44 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext) |
|
45 NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination) |
|
46 NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener) |
|
47 if (!tmp->mIsStarted) { |
|
48 NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes) |
|
49 } |
|
50 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper) |
|
51 |
|
52 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext, |
|
53 DOMEventTargetHelper) |
|
54 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination) |
|
55 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener) |
|
56 if (!tmp->mIsStarted) { |
|
57 MOZ_ASSERT(tmp->mIsOffline, |
|
58 "Online AudioContexts should always be started"); |
|
59 NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes) |
|
60 } |
|
61 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
|
62 |
|
63 NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper) |
|
64 NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper) |
|
65 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioContext) |
|
66 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) |
|
67 |
|
68 static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) |
|
69 { |
|
70 if (aIsOffline) { |
|
71 return aSampleRate; |
|
72 } else { |
|
73 AudioStream::InitPreferredSampleRate(); |
|
74 return static_cast<float>(AudioStream::PreferredSampleRate()); |
|
75 } |
|
76 } |
|
77 |
|
78 AudioContext::AudioContext(nsPIDOMWindow* aWindow, |
|
79 bool aIsOffline, |
|
80 AudioChannel aChannel, |
|
81 uint32_t aNumberOfChannels, |
|
82 uint32_t aLength, |
|
83 float aSampleRate) |
|
84 : DOMEventTargetHelper(aWindow) |
|
85 , mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate)) |
|
86 , mNumberOfChannels(aNumberOfChannels) |
|
87 , mNodeCount(0) |
|
88 , mIsOffline(aIsOffline) |
|
89 , mIsStarted(!aIsOffline) |
|
90 , mIsShutDown(false) |
|
91 { |
|
92 aWindow->AddAudioContext(this); |
|
93 |
|
94 // Note: AudioDestinationNode needs an AudioContext that must already be |
|
95 // bound to the window. |
|
96 mDestination = new AudioDestinationNode(this, aIsOffline, aChannel, |
|
97 aNumberOfChannels, aLength, aSampleRate); |
|
98 // We skip calling SetIsOnlyNodeForContext during mDestination's constructor, |
|
99 // because we can only call SetIsOnlyNodeForContext after mDestination has |
|
100 // been set up. |
|
101 mDestination->SetIsOnlyNodeForContext(true); |
|
102 } |
|
103 |
|
104 AudioContext::~AudioContext() |
|
105 { |
|
106 nsPIDOMWindow* window = GetOwner(); |
|
107 if (window) { |
|
108 window->RemoveAudioContext(this); |
|
109 } |
|
110 |
|
111 UnregisterWeakMemoryReporter(this); |
|
112 } |
|
113 |
|
114 JSObject* |
|
115 AudioContext::WrapObject(JSContext* aCx) |
|
116 { |
|
117 if (mIsOffline) { |
|
118 return OfflineAudioContextBinding::Wrap(aCx, this); |
|
119 } else { |
|
120 return AudioContextBinding::Wrap(aCx, this); |
|
121 } |
|
122 } |
|
123 |
|
124 /* static */ already_AddRefed<AudioContext> |
|
125 AudioContext::Constructor(const GlobalObject& aGlobal, |
|
126 ErrorResult& aRv) |
|
127 { |
|
128 nsCOMPtr<nsPIDOMWindow> window = do_QueryInterface(aGlobal.GetAsSupports()); |
|
129 if (!window) { |
|
130 aRv.Throw(NS_ERROR_FAILURE); |
|
131 return nullptr; |
|
132 } |
|
133 |
|
134 nsRefPtr<AudioContext> object = new AudioContext(window, false); |
|
135 |
|
136 RegisterWeakMemoryReporter(object); |
|
137 |
|
138 return object.forget(); |
|
139 } |
|
140 |
|
141 /* static */ already_AddRefed<AudioContext> |
|
142 AudioContext::Constructor(const GlobalObject& aGlobal, |
|
143 AudioChannel aChannel, |
|
144 ErrorResult& aRv) |
|
145 { |
|
146 nsCOMPtr<nsPIDOMWindow> window = do_QueryInterface(aGlobal.GetAsSupports()); |
|
147 if (!window) { |
|
148 aRv.Throw(NS_ERROR_FAILURE); |
|
149 return nullptr; |
|
150 } |
|
151 |
|
152 nsRefPtr<AudioContext> object = new AudioContext(window, false, aChannel); |
|
153 |
|
154 RegisterWeakMemoryReporter(object); |
|
155 |
|
156 return object.forget(); |
|
157 } |
|
158 |
|
159 /* static */ already_AddRefed<AudioContext> |
|
160 AudioContext::Constructor(const GlobalObject& aGlobal, |
|
161 uint32_t aNumberOfChannels, |
|
162 uint32_t aLength, |
|
163 float aSampleRate, |
|
164 ErrorResult& aRv) |
|
165 { |
|
166 nsCOMPtr<nsPIDOMWindow> window = do_QueryInterface(aGlobal.GetAsSupports()); |
|
167 if (!window) { |
|
168 aRv.Throw(NS_ERROR_FAILURE); |
|
169 return nullptr; |
|
170 } |
|
171 |
|
172 if (aNumberOfChannels == 0 || |
|
173 aNumberOfChannels > WebAudioUtils::MaxChannelCount || |
|
174 aLength == 0 || |
|
175 aSampleRate < WebAudioUtils::MinSampleRate || |
|
176 aSampleRate > WebAudioUtils::MaxSampleRate) { |
|
177 // The DOM binding protects us against infinity and NaN |
|
178 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
179 return nullptr; |
|
180 } |
|
181 |
|
182 nsRefPtr<AudioContext> object = new AudioContext(window, |
|
183 true, |
|
184 AudioChannel::Normal, |
|
185 aNumberOfChannels, |
|
186 aLength, |
|
187 aSampleRate); |
|
188 |
|
189 RegisterWeakMemoryReporter(object); |
|
190 |
|
191 return object.forget(); |
|
192 } |
|
193 |
|
194 already_AddRefed<AudioBufferSourceNode> |
|
195 AudioContext::CreateBufferSource() |
|
196 { |
|
197 nsRefPtr<AudioBufferSourceNode> bufferNode = |
|
198 new AudioBufferSourceNode(this); |
|
199 return bufferNode.forget(); |
|
200 } |
|
201 |
|
202 already_AddRefed<AudioBuffer> |
|
203 AudioContext::CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels, |
|
204 uint32_t aLength, float aSampleRate, |
|
205 ErrorResult& aRv) |
|
206 { |
|
207 if (!aNumberOfChannels) { |
|
208 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); |
|
209 return nullptr; |
|
210 } |
|
211 |
|
212 return AudioBuffer::Create(this, aNumberOfChannels, aLength, |
|
213 aSampleRate, aJSContext, aRv); |
|
214 } |
|
215 |
|
216 namespace { |
|
217 |
|
218 bool IsValidBufferSize(uint32_t aBufferSize) { |
|
219 switch (aBufferSize) { |
|
220 case 0: // let the implementation choose the buffer size |
|
221 case 256: |
|
222 case 512: |
|
223 case 1024: |
|
224 case 2048: |
|
225 case 4096: |
|
226 case 8192: |
|
227 case 16384: |
|
228 return true; |
|
229 default: |
|
230 return false; |
|
231 } |
|
232 } |
|
233 |
|
234 } |
|
235 |
|
236 already_AddRefed<MediaStreamAudioDestinationNode> |
|
237 AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) |
|
238 { |
|
239 if (mIsOffline) { |
|
240 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
241 return nullptr; |
|
242 } |
|
243 |
|
244 nsRefPtr<MediaStreamAudioDestinationNode> node = |
|
245 new MediaStreamAudioDestinationNode(this); |
|
246 return node.forget(); |
|
247 } |
|
248 |
|
249 already_AddRefed<ScriptProcessorNode> |
|
250 AudioContext::CreateScriptProcessor(uint32_t aBufferSize, |
|
251 uint32_t aNumberOfInputChannels, |
|
252 uint32_t aNumberOfOutputChannels, |
|
253 ErrorResult& aRv) |
|
254 { |
|
255 if ((aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) || |
|
256 aNumberOfInputChannels > WebAudioUtils::MaxChannelCount || |
|
257 aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount || |
|
258 !IsValidBufferSize(aBufferSize)) { |
|
259 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); |
|
260 return nullptr; |
|
261 } |
|
262 |
|
263 nsRefPtr<ScriptProcessorNode> scriptProcessor = |
|
264 new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels, |
|
265 aNumberOfOutputChannels); |
|
266 return scriptProcessor.forget(); |
|
267 } |
|
268 |
|
269 already_AddRefed<AnalyserNode> |
|
270 AudioContext::CreateAnalyser() |
|
271 { |
|
272 nsRefPtr<AnalyserNode> analyserNode = new AnalyserNode(this); |
|
273 return analyserNode.forget(); |
|
274 } |
|
275 |
|
276 already_AddRefed<MediaElementAudioSourceNode> |
|
277 AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement, |
|
278 ErrorResult& aRv) |
|
279 { |
|
280 if (mIsOffline) { |
|
281 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
282 return nullptr; |
|
283 } |
|
284 nsRefPtr<DOMMediaStream> stream = aMediaElement.MozCaptureStream(aRv); |
|
285 if (aRv.Failed()) { |
|
286 return nullptr; |
|
287 } |
|
288 nsRefPtr<MediaElementAudioSourceNode> mediaElementAudioSourceNode = |
|
289 new MediaElementAudioSourceNode(this, stream); |
|
290 return mediaElementAudioSourceNode.forget(); |
|
291 } |
|
292 |
|
293 already_AddRefed<MediaStreamAudioSourceNode> |
|
294 AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream, |
|
295 ErrorResult& aRv) |
|
296 { |
|
297 if (mIsOffline) { |
|
298 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
299 return nullptr; |
|
300 } |
|
301 nsRefPtr<MediaStreamAudioSourceNode> mediaStreamAudioSourceNode = |
|
302 new MediaStreamAudioSourceNode(this, &aMediaStream); |
|
303 return mediaStreamAudioSourceNode.forget(); |
|
304 } |
|
305 |
|
306 already_AddRefed<GainNode> |
|
307 AudioContext::CreateGain() |
|
308 { |
|
309 nsRefPtr<GainNode> gainNode = new GainNode(this); |
|
310 return gainNode.forget(); |
|
311 } |
|
312 |
|
313 already_AddRefed<WaveShaperNode> |
|
314 AudioContext::CreateWaveShaper() |
|
315 { |
|
316 nsRefPtr<WaveShaperNode> waveShaperNode = new WaveShaperNode(this); |
|
317 return waveShaperNode.forget(); |
|
318 } |
|
319 |
|
320 already_AddRefed<DelayNode> |
|
321 AudioContext::CreateDelay(double aMaxDelayTime, ErrorResult& aRv) |
|
322 { |
|
323 if (aMaxDelayTime > 0. && aMaxDelayTime < 180.) { |
|
324 nsRefPtr<DelayNode> delayNode = new DelayNode(this, aMaxDelayTime); |
|
325 return delayNode.forget(); |
|
326 } |
|
327 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
328 return nullptr; |
|
329 } |
|
330 |
|
331 already_AddRefed<PannerNode> |
|
332 AudioContext::CreatePanner() |
|
333 { |
|
334 nsRefPtr<PannerNode> pannerNode = new PannerNode(this); |
|
335 mPannerNodes.PutEntry(pannerNode); |
|
336 return pannerNode.forget(); |
|
337 } |
|
338 |
|
339 already_AddRefed<ConvolverNode> |
|
340 AudioContext::CreateConvolver() |
|
341 { |
|
342 nsRefPtr<ConvolverNode> convolverNode = new ConvolverNode(this); |
|
343 return convolverNode.forget(); |
|
344 } |
|
345 |
|
346 already_AddRefed<ChannelSplitterNode> |
|
347 AudioContext::CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv) |
|
348 { |
|
349 if (aNumberOfOutputs == 0 || |
|
350 aNumberOfOutputs > WebAudioUtils::MaxChannelCount) { |
|
351 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); |
|
352 return nullptr; |
|
353 } |
|
354 |
|
355 nsRefPtr<ChannelSplitterNode> splitterNode = |
|
356 new ChannelSplitterNode(this, aNumberOfOutputs); |
|
357 return splitterNode.forget(); |
|
358 } |
|
359 |
|
360 already_AddRefed<ChannelMergerNode> |
|
361 AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv) |
|
362 { |
|
363 if (aNumberOfInputs == 0 || |
|
364 aNumberOfInputs > WebAudioUtils::MaxChannelCount) { |
|
365 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); |
|
366 return nullptr; |
|
367 } |
|
368 |
|
369 nsRefPtr<ChannelMergerNode> mergerNode = |
|
370 new ChannelMergerNode(this, aNumberOfInputs); |
|
371 return mergerNode.forget(); |
|
372 } |
|
373 |
|
374 already_AddRefed<DynamicsCompressorNode> |
|
375 AudioContext::CreateDynamicsCompressor() |
|
376 { |
|
377 nsRefPtr<DynamicsCompressorNode> compressorNode = |
|
378 new DynamicsCompressorNode(this); |
|
379 return compressorNode.forget(); |
|
380 } |
|
381 |
|
382 already_AddRefed<BiquadFilterNode> |
|
383 AudioContext::CreateBiquadFilter() |
|
384 { |
|
385 nsRefPtr<BiquadFilterNode> filterNode = |
|
386 new BiquadFilterNode(this); |
|
387 return filterNode.forget(); |
|
388 } |
|
389 |
|
390 already_AddRefed<OscillatorNode> |
|
391 AudioContext::CreateOscillator() |
|
392 { |
|
393 nsRefPtr<OscillatorNode> oscillatorNode = |
|
394 new OscillatorNode(this); |
|
395 return oscillatorNode.forget(); |
|
396 } |
|
397 |
|
398 already_AddRefed<PeriodicWave> |
|
399 AudioContext::CreatePeriodicWave(const Float32Array& aRealData, |
|
400 const Float32Array& aImagData, |
|
401 ErrorResult& aRv) |
|
402 { |
|
403 aRealData.ComputeLengthAndData(); |
|
404 aImagData.ComputeLengthAndData(); |
|
405 |
|
406 if (aRealData.Length() != aImagData.Length() || |
|
407 aRealData.Length() == 0 || |
|
408 aRealData.Length() > 4096) { |
|
409 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
|
410 return nullptr; |
|
411 } |
|
412 |
|
413 nsRefPtr<PeriodicWave> periodicWave = |
|
414 new PeriodicWave(this, aRealData.Data(), aImagData.Data(), |
|
415 aImagData.Length(), aRv); |
|
416 if (aRv.Failed()) { |
|
417 return nullptr; |
|
418 } |
|
419 return periodicWave.forget(); |
|
420 } |
|
421 |
|
422 AudioListener* |
|
423 AudioContext::Listener() |
|
424 { |
|
425 if (!mListener) { |
|
426 mListener = new AudioListener(this); |
|
427 } |
|
428 return mListener; |
|
429 } |
|
430 |
|
431 void |
|
432 AudioContext::DecodeAudioData(const ArrayBuffer& aBuffer, |
|
433 DecodeSuccessCallback& aSuccessCallback, |
|
434 const Optional<OwningNonNull<DecodeErrorCallback> >& aFailureCallback) |
|
435 { |
|
436 AutoJSAPI jsapi; |
|
437 JSContext* cx = jsapi.cx(); |
|
438 JSAutoCompartment ac(cx, aBuffer.Obj()); |
|
439 |
|
440 aBuffer.ComputeLengthAndData(); |
|
441 |
|
442 // Neuter the array buffer |
|
443 size_t length = aBuffer.Length(); |
|
444 JS::RootedObject obj(cx, aBuffer.Obj()); |
|
445 |
|
446 uint8_t* data = static_cast<uint8_t*>(JS_StealArrayBufferContents(cx, obj)); |
|
447 |
|
448 // Sniff the content of the media. |
|
449 // Failed type sniffing will be handled by AsyncDecodeMedia. |
|
450 nsAutoCString contentType; |
|
451 NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType); |
|
452 |
|
453 nsRefPtr<DecodeErrorCallback> failureCallback; |
|
454 if (aFailureCallback.WasPassed()) { |
|
455 failureCallback = &aFailureCallback.Value(); |
|
456 } |
|
457 nsRefPtr<WebAudioDecodeJob> job( |
|
458 new WebAudioDecodeJob(contentType, this, |
|
459 &aSuccessCallback, failureCallback)); |
|
460 mDecoder.AsyncDecodeMedia(contentType.get(), data, length, *job); |
|
461 // Transfer the ownership to mDecodeJobs |
|
462 mDecodeJobs.AppendElement(job); |
|
463 } |
|
464 |
|
465 void |
|
466 AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) |
|
467 { |
|
468 mDecodeJobs.RemoveElement(aDecodeJob); |
|
469 } |
|
470 |
|
471 void |
|
472 AudioContext::RegisterActiveNode(AudioNode* aNode) |
|
473 { |
|
474 if (!mIsShutDown) { |
|
475 mActiveNodes.PutEntry(aNode); |
|
476 } |
|
477 } |
|
478 |
|
479 void |
|
480 AudioContext::UnregisterActiveNode(AudioNode* aNode) |
|
481 { |
|
482 mActiveNodes.RemoveEntry(aNode); |
|
483 } |
|
484 |
|
485 void |
|
486 AudioContext::UnregisterAudioBufferSourceNode(AudioBufferSourceNode* aNode) |
|
487 { |
|
488 UpdatePannerSource(); |
|
489 } |
|
490 |
|
491 void |
|
492 AudioContext::UnregisterPannerNode(PannerNode* aNode) |
|
493 { |
|
494 mPannerNodes.RemoveEntry(aNode); |
|
495 if (mListener) { |
|
496 mListener->UnregisterPannerNode(aNode); |
|
497 } |
|
498 } |
|
499 |
|
500 static PLDHashOperator |
|
501 FindConnectedSourcesOn(nsPtrHashKey<PannerNode>* aEntry, void* aData) |
|
502 { |
|
503 aEntry->GetKey()->FindConnectedSources(); |
|
504 return PL_DHASH_NEXT; |
|
505 } |
|
506 |
|
507 void |
|
508 AudioContext::UpdatePannerSource() |
|
509 { |
|
510 mPannerNodes.EnumerateEntries(FindConnectedSourcesOn, nullptr); |
|
511 } |
|
512 |
|
513 uint32_t |
|
514 AudioContext::MaxChannelCount() const |
|
515 { |
|
516 return mIsOffline ? mNumberOfChannels : AudioStream::MaxNumberOfChannels(); |
|
517 } |
|
518 |
|
519 MediaStreamGraph* |
|
520 AudioContext::Graph() const |
|
521 { |
|
522 return Destination()->Stream()->Graph(); |
|
523 } |
|
524 |
|
525 MediaStream* |
|
526 AudioContext::DestinationStream() const |
|
527 { |
|
528 if (Destination()) { |
|
529 return Destination()->Stream(); |
|
530 } |
|
531 return nullptr; |
|
532 } |
|
533 |
|
534 double |
|
535 AudioContext::CurrentTime() const |
|
536 { |
|
537 return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime()) + |
|
538 ExtraCurrentTime(); |
|
539 } |
|
540 |
|
541 void |
|
542 AudioContext::Shutdown() |
|
543 { |
|
544 mIsShutDown = true; |
|
545 |
|
546 // We mute rather than suspending, because the delay between the ::Shutdown |
|
547 // call and the CC would make us overbuffer in the MediaStreamGraph. |
|
548 // See bug 936784 for details. |
|
549 if (!mIsOffline) { |
|
550 Mute(); |
|
551 } |
|
552 |
|
553 mDecoder.Shutdown(); |
|
554 |
|
555 // Release references to active nodes. |
|
556 // Active AudioNodes don't unregister in destructors, at which point the |
|
557 // Node is already unregistered. |
|
558 mActiveNodes.Clear(); |
|
559 |
|
560 // For offline contexts, we can destroy the MediaStreamGraph at this point. |
|
561 if (mIsOffline && mDestination) { |
|
562 mDestination->OfflineShutdown(); |
|
563 } |
|
564 } |
|
565 |
|
566 void |
|
567 AudioContext::Suspend() |
|
568 { |
|
569 MediaStream* ds = DestinationStream(); |
|
570 if (ds) { |
|
571 ds->ChangeExplicitBlockerCount(1); |
|
572 } |
|
573 } |
|
574 |
|
575 void |
|
576 AudioContext::Resume() |
|
577 { |
|
578 MediaStream* ds = DestinationStream(); |
|
579 if (ds) { |
|
580 ds->ChangeExplicitBlockerCount(-1); |
|
581 } |
|
582 } |
|
583 |
|
584 void |
|
585 AudioContext::UpdateNodeCount(int32_t aDelta) |
|
586 { |
|
587 bool firstNode = mNodeCount == 0; |
|
588 mNodeCount += aDelta; |
|
589 MOZ_ASSERT(mNodeCount >= 0); |
|
590 // mDestinationNode may be null when we're destroying nodes unlinked by CC |
|
591 if (!firstNode && mDestination) { |
|
592 mDestination->SetIsOnlyNodeForContext(mNodeCount == 1); |
|
593 } |
|
594 } |
|
595 |
|
596 JSContext* |
|
597 AudioContext::GetJSContext() const |
|
598 { |
|
599 MOZ_ASSERT(NS_IsMainThread()); |
|
600 |
|
601 nsCOMPtr<nsIScriptGlobalObject> scriptGlobal = |
|
602 do_QueryInterface(GetParentObject()); |
|
603 if (!scriptGlobal) { |
|
604 return nullptr; |
|
605 } |
|
606 nsIScriptContext* scriptContext = scriptGlobal->GetContext(); |
|
607 if (!scriptContext) { |
|
608 return nullptr; |
|
609 } |
|
610 return scriptContext->GetNativeContext(); |
|
611 } |
|
612 |
|
613 void |
|
614 AudioContext::StartRendering(ErrorResult& aRv) |
|
615 { |
|
616 MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext"); |
|
617 if (mIsStarted) { |
|
618 aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
|
619 return; |
|
620 } |
|
621 |
|
622 mIsStarted = true; |
|
623 mDestination->StartRendering(); |
|
624 } |
|
625 |
|
626 void |
|
627 AudioContext::Mute() const |
|
628 { |
|
629 MOZ_ASSERT(!mIsOffline); |
|
630 if (mDestination) { |
|
631 mDestination->Mute(); |
|
632 } |
|
633 } |
|
634 |
|
635 void |
|
636 AudioContext::Unmute() const |
|
637 { |
|
638 MOZ_ASSERT(!mIsOffline); |
|
639 if (mDestination) { |
|
640 mDestination->Unmute(); |
|
641 } |
|
642 } |
|
643 |
|
644 AudioChannel |
|
645 AudioContext::MozAudioChannelType() const |
|
646 { |
|
647 return mDestination->MozAudioChannelType(); |
|
648 } |
|
649 |
|
650 void |
|
651 AudioContext::SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv) |
|
652 { |
|
653 mDestination->SetMozAudioChannelType(aValue, aRv); |
|
654 } |
|
655 |
|
656 size_t |
|
657 AudioContext::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const |
|
658 { |
|
659 // AudioNodes are tracked separately because we do not want the AudioContext |
|
660 // to track all of the AudioNodes it creates, so we wouldn't be able to |
|
661 // traverse them from here. |
|
662 |
|
663 size_t amount = aMallocSizeOf(this); |
|
664 if (mListener) { |
|
665 amount += mListener->SizeOfIncludingThis(aMallocSizeOf); |
|
666 } |
|
667 amount += mDecoder.SizeOfExcludingThis(aMallocSizeOf); |
|
668 amount += mDecodeJobs.SizeOfExcludingThis(aMallocSizeOf); |
|
669 for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) { |
|
670 amount += mDecodeJobs[i]->SizeOfExcludingThis(aMallocSizeOf); |
|
671 } |
|
672 amount += mActiveNodes.SizeOfExcludingThis(nullptr, aMallocSizeOf); |
|
673 amount += mPannerNodes.SizeOfExcludingThis(nullptr, aMallocSizeOf); |
|
674 return amount; |
|
675 } |
|
676 |
|
677 NS_IMETHODIMP |
|
678 AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport, |
|
679 nsISupports* aData) |
|
680 { |
|
681 int64_t amount = SizeOfIncludingThis(MallocSizeOf); |
|
682 return MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES, |
|
683 amount, "Memory used by AudioContext objects (Web Audio)."); |
|
684 } |
|
685 |
|
686 double |
|
687 AudioContext::ExtraCurrentTime() const |
|
688 { |
|
689 return mDestination->ExtraCurrentTime(); |
|
690 } |
|
691 |
|
692 } |
|
693 } |