Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 * You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "AudioNodeStream.h"
8 #include "MediaStreamGraphImpl.h"
9 #include "AudioNodeEngine.h"
10 #include "ThreeDPoint.h"
11 #include "AudioChannelFormat.h"
12 #include "AudioParamTimeline.h"
13 #include "AudioContext.h"
15 using namespace mozilla::dom;
17 namespace mozilla {
19 /**
20 * An AudioNodeStream produces a single audio track with ID
21 * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
22 * for regular audio contexts, and the rate requested by the web content
23 * for offline audio contexts.
24 * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
25 * Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID
26 */
28 AudioNodeStream::~AudioNodeStream()
29 {
30 MOZ_COUNT_DTOR(AudioNodeStream);
31 }
33 size_t
34 AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
35 {
36 size_t amount = 0;
38 // Not reported:
39 // - mEngine
41 amount += ProcessedMediaStream::SizeOfExcludingThis(aMallocSizeOf);
42 amount += mLastChunks.SizeOfExcludingThis(aMallocSizeOf);
43 for (size_t i = 0; i < mLastChunks.Length(); i++) {
44 // NB: This is currently unshared only as there are instances of
45 // double reporting in DMD otherwise.
46 amount += mLastChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf);
47 }
49 return amount;
50 }
52 size_t
53 AudioNodeStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
54 {
55 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
56 }
58 void
59 AudioNodeStream::SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
60 AudioNodeSizes& aUsage) const
61 {
62 // Explicitly separate out the stream memory.
63 aUsage.mStream = SizeOfIncludingThis(aMallocSizeOf);
65 if (mEngine) {
66 // This will fill out the rest of |aUsage|.
67 mEngine->SizeOfIncludingThis(aMallocSizeOf, aUsage);
68 }
69 }
71 void
72 AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
73 double aStreamTime)
74 {
75 class Message : public ControlMessage {
76 public:
77 Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
78 double aStreamTime)
79 : ControlMessage(aStream), mStreamTime(aStreamTime),
80 mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
81 virtual void Run()
82 {
83 static_cast<AudioNodeStream*>(mStream)->
84 SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
85 }
86 double mStreamTime;
87 MediaStream* mRelativeToStream;
88 uint32_t mIndex;
89 };
91 MOZ_ASSERT(this);
92 GraphImpl()->AppendMessage(new Message(this, aIndex,
93 aContext->DestinationStream(),
94 aContext->DOMTimeToStreamTime(aStreamTime)));
95 }
97 void
98 AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
99 double aStreamTime)
100 {
101 TrackTicks ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime);
102 mEngine->SetStreamTimeParameter(aIndex, ticks);
103 }
105 void
106 AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
107 {
108 class Message : public ControlMessage {
109 public:
110 Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
111 : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
112 virtual void Run()
113 {
114 static_cast<AudioNodeStream*>(mStream)->Engine()->
115 SetDoubleParameter(mIndex, mValue);
116 }
117 double mValue;
118 uint32_t mIndex;
119 };
121 MOZ_ASSERT(this);
122 GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
123 }
125 void
126 AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
127 {
128 class Message : public ControlMessage {
129 public:
130 Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
131 : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
132 virtual void Run()
133 {
134 static_cast<AudioNodeStream*>(mStream)->Engine()->
135 SetInt32Parameter(mIndex, mValue);
136 }
137 int32_t mValue;
138 uint32_t mIndex;
139 };
141 MOZ_ASSERT(this);
142 GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
143 }
145 void
146 AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
147 const AudioParamTimeline& aValue)
148 {
149 class Message : public ControlMessage {
150 public:
151 Message(AudioNodeStream* aStream, uint32_t aIndex,
152 const AudioParamTimeline& aValue)
153 : ControlMessage(aStream),
154 mValue(aValue),
155 mSampleRate(aStream->SampleRate()),
156 mIndex(aIndex) {}
157 virtual void Run()
158 {
159 static_cast<AudioNodeStream*>(mStream)->Engine()->
160 SetTimelineParameter(mIndex, mValue, mSampleRate);
161 }
162 AudioParamTimeline mValue;
163 TrackRate mSampleRate;
164 uint32_t mIndex;
165 };
166 GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
167 }
169 void
170 AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
171 {
172 class Message : public ControlMessage {
173 public:
174 Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
175 : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
176 virtual void Run()
177 {
178 static_cast<AudioNodeStream*>(mStream)->Engine()->
179 SetThreeDPointParameter(mIndex, mValue);
180 }
181 ThreeDPoint mValue;
182 uint32_t mIndex;
183 };
185 MOZ_ASSERT(this);
186 GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
187 }
189 void
190 AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
191 {
192 class Message : public ControlMessage {
193 public:
194 Message(AudioNodeStream* aStream,
195 already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
196 : ControlMessage(aStream), mBuffer(aBuffer) {}
197 virtual void Run()
198 {
199 static_cast<AudioNodeStream*>(mStream)->Engine()->
200 SetBuffer(mBuffer.forget());
201 }
202 nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
203 };
205 MOZ_ASSERT(this);
206 GraphImpl()->AppendMessage(new Message(this, aBuffer));
207 }
209 void
210 AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
211 {
212 class Message : public ControlMessage {
213 public:
214 Message(AudioNodeStream* aStream,
215 nsTArray<float>& aData)
216 : ControlMessage(aStream)
217 {
218 mData.SwapElements(aData);
219 }
220 virtual void Run()
221 {
222 static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData);
223 }
224 nsTArray<float> mData;
225 };
227 MOZ_ASSERT(this);
228 GraphImpl()->AppendMessage(new Message(this, aData));
229 }
231 void
232 AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
233 ChannelCountMode aChannelCountMode,
234 ChannelInterpretation aChannelInterpretation)
235 {
236 class Message : public ControlMessage {
237 public:
238 Message(AudioNodeStream* aStream,
239 uint32_t aNumberOfChannels,
240 ChannelCountMode aChannelCountMode,
241 ChannelInterpretation aChannelInterpretation)
242 : ControlMessage(aStream),
243 mNumberOfChannels(aNumberOfChannels),
244 mChannelCountMode(aChannelCountMode),
245 mChannelInterpretation(aChannelInterpretation)
246 {}
247 virtual void Run()
248 {
249 static_cast<AudioNodeStream*>(mStream)->
250 SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
251 mChannelInterpretation);
252 }
253 uint32_t mNumberOfChannels;
254 ChannelCountMode mChannelCountMode;
255 ChannelInterpretation mChannelInterpretation;
256 };
258 MOZ_ASSERT(this);
259 GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
260 aChannelCountMode,
261 aChannelInterpretation));
262 }
264 void
265 AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
266 ChannelCountMode aChannelCountMode,
267 ChannelInterpretation aChannelInterpretation)
268 {
269 // Make sure that we're not clobbering any significant bits by fitting these
270 // values in 16 bits.
271 MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
272 MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
274 mNumberOfInputChannels = aNumberOfChannels;
275 mChannelCountMode = aChannelCountMode;
276 mChannelInterpretation = aChannelInterpretation;
277 }
279 uint32_t
280 AudioNodeStream::ComputedNumberOfChannels(uint32_t aInputChannelCount)
281 {
282 switch (mChannelCountMode) {
283 case ChannelCountMode::Explicit:
284 // Disregard the channel count we've calculated from inputs, and just use
285 // mNumberOfInputChannels.
286 return mNumberOfInputChannels;
287 case ChannelCountMode::Clamped_max:
288 // Clamp the computed output channel count to mNumberOfInputChannels.
289 return std::min(aInputChannelCount, mNumberOfInputChannels);
290 default:
291 case ChannelCountMode::Max:
292 // Nothing to do here, just shut up the compiler warning.
293 return aInputChannelCount;
294 }
295 }
297 void
298 AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
299 {
300 uint32_t inputCount = mInputs.Length();
301 uint32_t outputChannelCount = 1;
302 nsAutoTArray<AudioChunk*,250> inputChunks;
303 for (uint32_t i = 0; i < inputCount; ++i) {
304 if (aPortIndex != mInputs[i]->InputNumber()) {
305 // This input is connected to a different port
306 continue;
307 }
308 MediaStream* s = mInputs[i]->GetSource();
309 AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
310 MOZ_ASSERT(a == s->AsAudioNodeStream());
311 if (a->IsAudioParamStream()) {
312 continue;
313 }
315 // It is possible for mLastChunks to be empty here, because `a` might be a
316 // AudioNodeStream that has not been scheduled yet, because it is further
317 // down the graph _but_ as a connection to this node. Because we enforce the
318 // presence of at least one DelayNode, with at least one block of delay, and
319 // because the output of a DelayNode when it has been fed less that
320 // `delayTime` amount of audio is silence, we can simply continue here,
321 // because this input would not influence the output of this node. Next
322 // iteration, a->mLastChunks.IsEmpty() will be false, and everthing will
323 // work as usual.
324 if (a->mLastChunks.IsEmpty()) {
325 continue;
326 }
328 AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
329 MOZ_ASSERT(chunk);
330 if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
331 continue;
332 }
334 inputChunks.AppendElement(chunk);
335 outputChannelCount =
336 GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
337 }
339 outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
341 uint32_t inputChunkCount = inputChunks.Length();
342 if (inputChunkCount == 0 ||
343 (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
344 aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
345 return;
346 }
348 if (inputChunkCount == 1 &&
349 inputChunks[0]->mChannelData.Length() == outputChannelCount) {
350 aTmpChunk = *inputChunks[0];
351 return;
352 }
354 if (outputChannelCount == 0) {
355 aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
356 return;
357 }
359 AllocateAudioBlock(outputChannelCount, &aTmpChunk);
360 // The static storage here should be 1KB, so it's fine
361 nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
363 for (uint32_t i = 0; i < inputChunkCount; ++i) {
364 AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
365 }
366 }
368 void
369 AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
370 AudioChunk* aBlock,
371 nsTArray<float>* aDownmixBuffer)
372 {
373 nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
374 UpMixDownMixChunk(&aChunk, aBlock->mChannelData.Length(), channels, *aDownmixBuffer);
376 for (uint32_t c = 0; c < channels.Length(); ++c) {
377 const float* inputData = static_cast<const float*>(channels[c]);
378 float* outputData = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c]));
379 if (inputData) {
380 if (aInputIndex == 0) {
381 AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
382 } else {
383 AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
384 }
385 } else {
386 if (aInputIndex == 0) {
387 PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
388 }
389 }
390 }
391 }
393 void
394 AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
395 uint32_t aOutputChannelCount,
396 nsTArray<const void*>& aOutputChannels,
397 nsTArray<float>& aDownmixBuffer)
398 {
399 static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
401 aOutputChannels.AppendElements(aChunk->mChannelData);
402 if (aOutputChannels.Length() < aOutputChannelCount) {
403 if (mChannelInterpretation == ChannelInterpretation::Speakers) {
404 AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, nullptr);
405 NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
406 "We called GetAudioChannelsSuperset to avoid this");
407 } else {
408 // Fill up the remaining aOutputChannels by zeros
409 for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
410 aOutputChannels.AppendElement(silenceChannel);
411 }
412 }
413 } else if (aOutputChannels.Length() > aOutputChannelCount) {
414 if (mChannelInterpretation == ChannelInterpretation::Speakers) {
415 nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
416 outputChannels.SetLength(aOutputChannelCount);
417 aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
418 for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
419 outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
420 }
422 AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
423 aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
425 aOutputChannels.SetLength(aOutputChannelCount);
426 for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
427 aOutputChannels[j] = outputChannels[j];
428 }
429 } else {
430 // Drop the remaining aOutputChannels
431 aOutputChannels.RemoveElementsAt(aOutputChannelCount,
432 aOutputChannels.Length() - aOutputChannelCount);
433 }
434 }
435 }
437 // The MediaStreamGraph guarantees that this is actually one block, for
438 // AudioNodeStreams.
439 void
440 AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
441 {
442 EnsureTrack(AUDIO_TRACK, mSampleRate);
443 // No more tracks will be coming
444 mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);
446 uint16_t outputCount = std::max(uint16_t(1), mEngine->OutputCount());
447 mLastChunks.SetLength(outputCount);
449 // Consider this stream blocked if it has already finished output. Normally
450 // mBlocked would reflect this, but due to rounding errors our audio track may
451 // appear to extend slightly beyond aFrom, so we might not be blocked yet.
452 bool blocked = mFinished || mBlocked.GetAt(aFrom);
453 // If the stream has finished at this time, it will be blocked.
454 if (mMuted || blocked) {
455 for (uint16_t i = 0; i < outputCount; ++i) {
456 mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
457 }
458 } else {
459 // We need to generate at least one input
460 uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
461 OutputChunks inputChunks;
462 inputChunks.SetLength(maxInputs);
463 for (uint16_t i = 0; i < maxInputs; ++i) {
464 ObtainInputBlock(inputChunks[i], i);
465 }
466 bool finished = false;
467 if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
468 mEngine->ProcessBlock(this, inputChunks[0], &mLastChunks[0], &finished);
469 } else {
470 mEngine->ProcessBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
471 }
472 for (uint16_t i = 0; i < outputCount; ++i) {
473 NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
474 "Invalid WebAudio chunk size");
475 }
476 if (finished) {
477 mMarkAsFinishedAfterThisBlock = true;
478 }
480 if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
481 for (uint32_t i = 0; i < outputCount; ++i) {
482 mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
483 }
484 }
485 }
487 if (!blocked) {
488 // Don't output anything while blocked
489 AdvanceOutputSegment();
490 if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) {
491 // This stream was finished the last time that we looked at it, and all
492 // of the depending streams have finished their output as well, so now
493 // it's time to mark this stream as finished.
494 FinishOutput();
495 }
496 }
497 }
499 void
500 AudioNodeStream::AdvanceOutputSegment()
501 {
502 StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK, mSampleRate);
503 AudioSegment* segment = track->Get<AudioSegment>();
505 if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
506 segment->AppendAndConsumeChunk(&mLastChunks[0]);
507 } else {
508 segment->AppendNullData(mLastChunks[0].GetDuration());
509 }
511 for (uint32_t j = 0; j < mListeners.Length(); ++j) {
512 MediaStreamListener* l = mListeners[j];
513 AudioChunk copyChunk = mLastChunks[0];
514 AudioSegment tmpSegment;
515 tmpSegment.AppendAndConsumeChunk(©Chunk);
516 l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
517 mSampleRate, segment->GetDuration(), 0,
518 tmpSegment);
519 }
520 }
522 TrackTicks
523 AudioNodeStream::GetCurrentPosition()
524 {
525 return EnsureTrack(AUDIO_TRACK, mSampleRate)->Get<AudioSegment>()->GetDuration();
526 }
528 void
529 AudioNodeStream::FinishOutput()
530 {
531 if (IsFinishedOnGraphThread()) {
532 return;
533 }
535 StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK, mSampleRate);
536 track->SetEnded();
537 FinishOnGraphThread();
539 for (uint32_t j = 0; j < mListeners.Length(); ++j) {
540 MediaStreamListener* l = mListeners[j];
541 AudioSegment emptySegment;
542 l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
543 mSampleRate,
544 track->GetSegment()->GetDuration(),
545 MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
546 }
547 }
549 double
550 AudioNodeStream::TimeFromDestinationTime(AudioNodeStream* aDestination,
551 double aSeconds)
552 {
553 MOZ_ASSERT(aDestination->SampleRate() == SampleRate());
555 double destinationSeconds = std::max(0.0, aSeconds);
556 StreamTime streamTime = SecondsToMediaTime(destinationSeconds);
557 // MediaTime does not have the resolution of double
558 double offset = destinationSeconds - MediaTimeToSeconds(streamTime);
560 GraphTime graphTime = aDestination->StreamTimeToGraphTime(streamTime);
561 StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime);
562 double thisSeconds = MediaTimeToSeconds(thisStreamTime) + offset;
563 MOZ_ASSERT(thisSeconds >= 0.0);
564 return thisSeconds;
565 }
567 TrackTicks
568 AudioNodeStream::TicksFromDestinationTime(MediaStream* aDestination,
569 double aSeconds)
570 {
571 AudioNodeStream* destination = aDestination->AsAudioNodeStream();
572 MOZ_ASSERT(destination);
574 double thisSeconds = TimeFromDestinationTime(destination, aSeconds);
575 // Round to nearest
576 TrackTicks ticks = thisSeconds * SampleRate() + 0.5;
577 return ticks;
578 }
580 double
581 AudioNodeStream::DestinationTimeFromTicks(AudioNodeStream* aDestination,
582 TrackTicks aPosition)
583 {
584 MOZ_ASSERT(SampleRate() == aDestination->SampleRate());
585 StreamTime sourceTime = TicksToTimeRoundDown(SampleRate(), aPosition);
586 GraphTime graphTime = StreamTimeToGraphTime(sourceTime);
587 StreamTime destinationTime = aDestination->GraphTimeToStreamTimeOptimistic(graphTime);
588 return MediaTimeToSeconds(destinationTime);
589 }
591 }