michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "ScriptProcessorNode.h" michael@0: #include "mozilla/dom/ScriptProcessorNodeBinding.h" michael@0: #include "AudioBuffer.h" michael@0: #include "AudioDestinationNode.h" michael@0: #include "AudioNodeEngine.h" michael@0: #include "AudioNodeStream.h" michael@0: #include "AudioProcessingEvent.h" michael@0: #include "WebAudioUtils.h" michael@0: #include "nsCxPusher.h" michael@0: #include "mozilla/Mutex.h" michael@0: #include "mozilla/PodOperations.h" michael@0: #include michael@0: michael@0: namespace mozilla { michael@0: namespace dom { michael@0: michael@0: // The maximum latency, in seconds, that we can live with before dropping michael@0: // buffers. michael@0: static const float MAX_LATENCY_S = 0.5; michael@0: michael@0: NS_IMPL_ISUPPORTS_INHERITED0(ScriptProcessorNode, AudioNode) michael@0: michael@0: // This class manages a queue of output buffers shared between michael@0: // the main thread and the Media Stream Graph thread. michael@0: class SharedBuffers michael@0: { michael@0: private: michael@0: class OutputQueue michael@0: { michael@0: public: michael@0: explicit OutputQueue(const char* aName) michael@0: : mMutex(aName) michael@0: {} michael@0: michael@0: size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: mMutex.AssertCurrentThreadOwns(); michael@0: michael@0: size_t amount = 0; michael@0: for (size_t i = 0; i < mBufferList.size(); i++) { michael@0: amount += mBufferList[i].SizeOfExcludingThis(aMallocSizeOf, false); michael@0: } michael@0: michael@0: return amount; michael@0: } michael@0: michael@0: Mutex& Lock() const { return const_cast(this)->mMutex; } michael@0: michael@0: size_t ReadyToConsume() const michael@0: { michael@0: mMutex.AssertCurrentThreadOwns(); michael@0: MOZ_ASSERT(!NS_IsMainThread()); michael@0: return mBufferList.size(); michael@0: } michael@0: michael@0: // Produce one buffer michael@0: AudioChunk& Produce() michael@0: { michael@0: mMutex.AssertCurrentThreadOwns(); michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: mBufferList.push_back(AudioChunk()); michael@0: return mBufferList.back(); michael@0: } michael@0: michael@0: // Consumes one buffer. michael@0: AudioChunk Consume() michael@0: { michael@0: mMutex.AssertCurrentThreadOwns(); michael@0: MOZ_ASSERT(!NS_IsMainThread()); michael@0: MOZ_ASSERT(ReadyToConsume() > 0); michael@0: AudioChunk front = mBufferList.front(); michael@0: mBufferList.pop_front(); michael@0: return front; michael@0: } michael@0: michael@0: // Empties the buffer queue. michael@0: void Clear() michael@0: { michael@0: mMutex.AssertCurrentThreadOwns(); michael@0: mBufferList.clear(); michael@0: } michael@0: michael@0: private: michael@0: typedef std::deque BufferList; michael@0: michael@0: // Synchronizes access to mBufferList. Note that it's the responsibility michael@0: // of the callers to perform the required locking, and we assert that every michael@0: // time we access mBufferList. michael@0: Mutex mMutex; michael@0: // The list representing the queue. michael@0: BufferList mBufferList; michael@0: }; michael@0: michael@0: public: michael@0: SharedBuffers(float aSampleRate) michael@0: : mOutputQueue("SharedBuffers::outputQueue") michael@0: , mDelaySoFar(TRACK_TICKS_MAX) michael@0: , mSampleRate(aSampleRate) michael@0: , mLatency(0.0) michael@0: , mDroppingBuffers(false) michael@0: { michael@0: } michael@0: michael@0: size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: size_t amount = aMallocSizeOf(this); michael@0: michael@0: { michael@0: MutexAutoLock lock(mOutputQueue.Lock()); michael@0: amount += mOutputQueue.SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: return amount; michael@0: } michael@0: michael@0: // main thread michael@0: void FinishProducingOutputBuffer(ThreadSharedFloatArrayBufferList* aBuffer, michael@0: uint32_t aBufferSize) michael@0: { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: michael@0: TimeStamp now = TimeStamp::Now(); michael@0: michael@0: if (mLastEventTime.IsNull()) { michael@0: mLastEventTime = now; michael@0: } else { michael@0: // When the main thread is blocked, and all the event are processed in a michael@0: // burst after the main thread unblocks, the |(now - mLastEventTime)| michael@0: // interval will be very short. |latency - bufferDuration| will be michael@0: // negative, effectively moving back mLatency to a smaller and smaller michael@0: // value, until it crosses zero, at which point we stop dropping buffers michael@0: // and resume normal operation. This does not work if at the same time, michael@0: // the MSG thread was also slowed down, so if the latency on the MSG michael@0: // thread is normal, and we are still dropping buffers, and mLatency is michael@0: // still more than twice the duration of a buffer, we reset it and stop michael@0: // dropping buffers. michael@0: float latency = (now - mLastEventTime).ToSeconds(); michael@0: float bufferDuration = aBufferSize / mSampleRate; michael@0: mLatency += latency - bufferDuration; michael@0: mLastEventTime = now; michael@0: if (mLatency > MAX_LATENCY_S || michael@0: (mDroppingBuffers && mLatency > 0.0 && michael@0: fabs(latency - bufferDuration) < bufferDuration)) { michael@0: mDroppingBuffers = true; michael@0: return; michael@0: } else { michael@0: if (mDroppingBuffers) { michael@0: mLatency = 0; michael@0: } michael@0: mDroppingBuffers = false; michael@0: } michael@0: } michael@0: michael@0: MutexAutoLock lock(mOutputQueue.Lock()); michael@0: for (uint32_t offset = 0; offset < aBufferSize; offset += WEBAUDIO_BLOCK_SIZE) { michael@0: AudioChunk& chunk = mOutputQueue.Produce(); michael@0: if (aBuffer) { michael@0: chunk.mDuration = WEBAUDIO_BLOCK_SIZE; michael@0: chunk.mBuffer = aBuffer; michael@0: chunk.mChannelData.SetLength(aBuffer->GetChannels()); michael@0: for (uint32_t i = 0; i < aBuffer->GetChannels(); ++i) { michael@0: chunk.mChannelData[i] = aBuffer->GetData(i) + offset; michael@0: } michael@0: chunk.mVolume = 1.0f; michael@0: chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32; michael@0: } else { michael@0: chunk.SetNull(WEBAUDIO_BLOCK_SIZE); michael@0: } michael@0: } michael@0: } michael@0: michael@0: // graph thread michael@0: AudioChunk GetOutputBuffer() michael@0: { michael@0: MOZ_ASSERT(!NS_IsMainThread()); michael@0: AudioChunk buffer; michael@0: michael@0: { michael@0: MutexAutoLock lock(mOutputQueue.Lock()); michael@0: if (mOutputQueue.ReadyToConsume() > 0) { michael@0: if (mDelaySoFar == TRACK_TICKS_MAX) { michael@0: mDelaySoFar = 0; michael@0: } michael@0: buffer = mOutputQueue.Consume(); michael@0: } else { michael@0: // If we're out of buffers to consume, just output silence michael@0: buffer.SetNull(WEBAUDIO_BLOCK_SIZE); michael@0: if (mDelaySoFar != TRACK_TICKS_MAX) { michael@0: // Remember the delay that we just hit michael@0: mDelaySoFar += WEBAUDIO_BLOCK_SIZE; michael@0: } michael@0: } michael@0: } michael@0: michael@0: return buffer; michael@0: } michael@0: michael@0: TrackTicks DelaySoFar() const michael@0: { michael@0: MOZ_ASSERT(!NS_IsMainThread()); michael@0: return mDelaySoFar == TRACK_TICKS_MAX ? 0 : mDelaySoFar; michael@0: } michael@0: michael@0: void Reset() michael@0: { michael@0: MOZ_ASSERT(!NS_IsMainThread()); michael@0: mDelaySoFar = TRACK_TICKS_MAX; michael@0: mLatency = 0.0f; michael@0: { michael@0: MutexAutoLock lock(mOutputQueue.Lock()); michael@0: mOutputQueue.Clear(); michael@0: } michael@0: mLastEventTime = TimeStamp(); michael@0: } michael@0: michael@0: private: michael@0: OutputQueue mOutputQueue; michael@0: // How much delay we've seen so far. This measures the amount of delay michael@0: // caused by the main thread lagging behind in producing output buffers. michael@0: // TRACK_TICKS_MAX means that we have not received our first buffer yet. michael@0: TrackTicks mDelaySoFar; michael@0: // The samplerate of the context. michael@0: float mSampleRate; michael@0: // This is the latency caused by the buffering. If this grows too high, we michael@0: // will drop buffers until it is acceptable. michael@0: float mLatency; michael@0: // This is the time at which we last produced a buffer, to detect if the main michael@0: // thread has been blocked. michael@0: TimeStamp mLastEventTime; michael@0: // True if we should be dropping buffers. michael@0: bool mDroppingBuffers; michael@0: }; michael@0: michael@0: class ScriptProcessorNodeEngine : public AudioNodeEngine michael@0: { michael@0: public: michael@0: typedef nsAutoTArray, 2> InputChannels; michael@0: michael@0: ScriptProcessorNodeEngine(ScriptProcessorNode* aNode, michael@0: AudioDestinationNode* aDestination, michael@0: uint32_t aBufferSize, michael@0: uint32_t aNumberOfInputChannels) michael@0: : AudioNodeEngine(aNode) michael@0: , mSharedBuffers(aNode->GetSharedBuffers()) michael@0: , mSource(nullptr) michael@0: , mDestination(static_cast (aDestination->Stream())) michael@0: , mBufferSize(aBufferSize) michael@0: , mInputWriteIndex(0) michael@0: , mSeenNonSilenceInput(false) michael@0: { michael@0: mInputChannels.SetLength(aNumberOfInputChannels); michael@0: AllocateInputBlock(); michael@0: } michael@0: michael@0: void SetSourceStream(AudioNodeStream* aSource) michael@0: { michael@0: mSource = aSource; michael@0: } michael@0: michael@0: virtual void ProcessBlock(AudioNodeStream* aStream, michael@0: const AudioChunk& aInput, michael@0: AudioChunk* aOutput, michael@0: bool* aFinished) MOZ_OVERRIDE michael@0: { michael@0: MutexAutoLock lock(NodeMutex()); michael@0: michael@0: // If our node is dead, just output silence. michael@0: if (!Node()) { michael@0: aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); michael@0: return; michael@0: } michael@0: michael@0: // This node is not connected to anything. Per spec, we don't fire the michael@0: // onaudioprocess event. We also want to clear out the input and output michael@0: // buffer queue, and output a null buffer. michael@0: if (!(aStream->ConsumerCount() || michael@0: aStream->AsProcessedStream()->InputPortCount())) { michael@0: aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); michael@0: mSharedBuffers->Reset(); michael@0: mSeenNonSilenceInput = false; michael@0: mInputWriteIndex = 0; michael@0: return; michael@0: } michael@0: michael@0: // First, record our input buffer michael@0: for (uint32_t i = 0; i < mInputChannels.Length(); ++i) { michael@0: if (aInput.IsNull()) { michael@0: PodZero(mInputChannels[i] + mInputWriteIndex, michael@0: aInput.GetDuration()); michael@0: } else { michael@0: mSeenNonSilenceInput = true; michael@0: MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check"); michael@0: MOZ_ASSERT(aInput.mChannelData.Length() == mInputChannels.Length()); michael@0: AudioBlockCopyChannelWithScale(static_cast(aInput.mChannelData[i]), michael@0: aInput.mVolume, michael@0: mInputChannels[i] + mInputWriteIndex); michael@0: } michael@0: } michael@0: mInputWriteIndex += aInput.GetDuration(); michael@0: michael@0: // Now, see if we have data to output michael@0: // Note that we need to do this before sending the buffer to the main michael@0: // thread so that our delay time is updated. michael@0: *aOutput = mSharedBuffers->GetOutputBuffer(); michael@0: michael@0: if (mInputWriteIndex >= mBufferSize) { michael@0: SendBuffersToMainThread(aStream); michael@0: mInputWriteIndex -= mBufferSize; michael@0: mSeenNonSilenceInput = false; michael@0: AllocateInputBlock(); michael@0: } michael@0: } michael@0: michael@0: virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE michael@0: { michael@0: // Not owned: michael@0: // - mSharedBuffers michael@0: // - mSource (probably) michael@0: // - mDestination (probably) michael@0: size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); michael@0: amount += mInputChannels.SizeOfExcludingThis(aMallocSizeOf); michael@0: for (size_t i = 0; i < mInputChannels.Length(); i++) { michael@0: amount += mInputChannels[i].SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: return amount; michael@0: } michael@0: michael@0: virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: private: michael@0: void AllocateInputBlock() michael@0: { michael@0: for (unsigned i = 0; i < mInputChannels.Length(); ++i) { michael@0: if (!mInputChannels[i]) { michael@0: mInputChannels[i] = new float[mBufferSize]; michael@0: } michael@0: } michael@0: } michael@0: michael@0: void SendBuffersToMainThread(AudioNodeStream* aStream) michael@0: { michael@0: MOZ_ASSERT(!NS_IsMainThread()); michael@0: michael@0: // we now have a full input buffer ready to be sent to the main thread. michael@0: TrackTicks playbackTick = mSource->GetCurrentPosition(); michael@0: // Add the duration of the current sample michael@0: playbackTick += WEBAUDIO_BLOCK_SIZE; michael@0: // Add the delay caused by the main thread michael@0: playbackTick += mSharedBuffers->DelaySoFar(); michael@0: // Compute the playback time in the coordinate system of the destination michael@0: // FIXME: bug 970773 michael@0: double playbackTime = michael@0: mSource->DestinationTimeFromTicks(mDestination, playbackTick); michael@0: michael@0: class Command : public nsRunnable michael@0: { michael@0: public: michael@0: Command(AudioNodeStream* aStream, michael@0: InputChannels& aInputChannels, michael@0: double aPlaybackTime, michael@0: bool aNullInput) michael@0: : mStream(aStream) michael@0: , mPlaybackTime(aPlaybackTime) michael@0: , mNullInput(aNullInput) michael@0: { michael@0: mInputChannels.SetLength(aInputChannels.Length()); michael@0: if (!aNullInput) { michael@0: for (uint32_t i = 0; i < mInputChannels.Length(); ++i) { michael@0: mInputChannels[i] = aInputChannels[i].forget(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: NS_IMETHODIMP Run() michael@0: { michael@0: // If it's not safe to run scripts right now, schedule this to run later michael@0: if (!nsContentUtils::IsSafeToRunScript()) { michael@0: nsContentUtils::AddScriptRunner(this); michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsRefPtr node; michael@0: { michael@0: // No need to keep holding the lock for the whole duration of this michael@0: // function, since we're holding a strong reference to it, so if michael@0: // we can obtain the reference, we will hold the node alive in michael@0: // this function. michael@0: MutexAutoLock lock(mStream->Engine()->NodeMutex()); michael@0: node = static_cast(mStream->Engine()->Node()); michael@0: } michael@0: if (!node || !node->Context()) { michael@0: return NS_OK; michael@0: } michael@0: michael@0: AutoPushJSContext cx(node->Context()->GetJSContext()); michael@0: if (cx) { michael@0: michael@0: michael@0: // Create the input buffer michael@0: nsRefPtr inputBuffer; michael@0: if (!mNullInput) { michael@0: ErrorResult rv; michael@0: inputBuffer = michael@0: AudioBuffer::Create(node->Context(), mInputChannels.Length(), michael@0: node->BufferSize(), michael@0: node->Context()->SampleRate(), cx, rv); michael@0: if (rv.Failed()) { michael@0: return NS_OK; michael@0: } michael@0: // Put the channel data inside it michael@0: for (uint32_t i = 0; i < mInputChannels.Length(); ++i) { michael@0: inputBuffer->SetRawChannelContents(cx, i, mInputChannels[i]); michael@0: } michael@0: } michael@0: michael@0: // Ask content to produce data in the output buffer michael@0: // Note that we always avoid creating the output buffer here, and we try to michael@0: // avoid creating the input buffer as well. The AudioProcessingEvent class michael@0: // knows how to lazily create them if needed once the script tries to access michael@0: // them. Otherwise, we may be able to get away without creating them! michael@0: nsRefPtr event = new AudioProcessingEvent(node, nullptr, nullptr); michael@0: event->InitEvent(inputBuffer, michael@0: mInputChannels.Length(), michael@0: mPlaybackTime); michael@0: node->DispatchTrustedEvent(event); michael@0: michael@0: // Steal the output buffers if they have been set. Don't create a michael@0: // buffer if it hasn't been used to return output; michael@0: // FinishProducingOutputBuffer() will optimize output = null. michael@0: // GetThreadSharedChannelsForRate() may also return null after OOM. michael@0: nsRefPtr output; michael@0: if (event->HasOutputBuffer()) { michael@0: ErrorResult rv; michael@0: AudioBuffer* buffer = event->GetOutputBuffer(rv); michael@0: // HasOutputBuffer() returning true means that GetOutputBuffer() michael@0: // will not fail. michael@0: MOZ_ASSERT(!rv.Failed()); michael@0: output = buffer->GetThreadSharedChannelsForRate(cx); michael@0: } michael@0: michael@0: // Append it to our output buffer queue michael@0: node->GetSharedBuffers()->FinishProducingOutputBuffer(output, node->BufferSize()); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: private: michael@0: nsRefPtr mStream; michael@0: InputChannels mInputChannels; michael@0: double mPlaybackTime; michael@0: bool mNullInput; michael@0: }; michael@0: michael@0: NS_DispatchToMainThread(new Command(aStream, mInputChannels, michael@0: playbackTime, michael@0: !mSeenNonSilenceInput)); michael@0: } michael@0: michael@0: friend class ScriptProcessorNode; michael@0: michael@0: SharedBuffers* mSharedBuffers; michael@0: AudioNodeStream* mSource; michael@0: AudioNodeStream* mDestination; michael@0: InputChannels mInputChannels; michael@0: const uint32_t mBufferSize; michael@0: // The write index into the current input buffer michael@0: uint32_t mInputWriteIndex; michael@0: bool mSeenNonSilenceInput; michael@0: }; michael@0: michael@0: ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext, michael@0: uint32_t aBufferSize, michael@0: uint32_t aNumberOfInputChannels, michael@0: uint32_t aNumberOfOutputChannels) michael@0: : AudioNode(aContext, michael@0: aNumberOfInputChannels, michael@0: mozilla::dom::ChannelCountMode::Explicit, michael@0: mozilla::dom::ChannelInterpretation::Speakers) michael@0: , mSharedBuffers(new SharedBuffers(aContext->SampleRate())) michael@0: , mBufferSize(aBufferSize ? michael@0: aBufferSize : // respect what the web developer requested michael@0: 4096) // choose our own buffer size -- 4KB for now michael@0: , mNumberOfOutputChannels(aNumberOfOutputChannels) michael@0: { michael@0: MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size"); michael@0: ScriptProcessorNodeEngine* engine = michael@0: new ScriptProcessorNodeEngine(this, michael@0: aContext->Destination(), michael@0: BufferSize(), michael@0: aNumberOfInputChannels); michael@0: mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM); michael@0: engine->SetSourceStream(static_cast (mStream.get())); michael@0: } michael@0: michael@0: ScriptProcessorNode::~ScriptProcessorNode() michael@0: { michael@0: } michael@0: michael@0: size_t michael@0: ScriptProcessorNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); michael@0: amount += mSharedBuffers->SizeOfIncludingThis(aMallocSizeOf); michael@0: return amount; michael@0: } michael@0: michael@0: size_t michael@0: ScriptProcessorNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: JSObject* michael@0: ScriptProcessorNode::WrapObject(JSContext* aCx) michael@0: { michael@0: return ScriptProcessorNodeBinding::Wrap(aCx, this); michael@0: } michael@0: michael@0: } michael@0: } michael@0: