michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "mozilla/dom/AnalyserNode.h" michael@0: #include "mozilla/dom/AnalyserNodeBinding.h" michael@0: #include "AudioNodeEngine.h" michael@0: #include "AudioNodeStream.h" michael@0: #include "mozilla/Mutex.h" michael@0: #include "mozilla/PodOperations.h" michael@0: michael@0: namespace mozilla { michael@0: namespace dom { michael@0: michael@0: NS_IMPL_ISUPPORTS_INHERITED0(AnalyserNode, AudioNode) michael@0: michael@0: class AnalyserNodeEngine : public AudioNodeEngine michael@0: { michael@0: class TransferBuffer : public nsRunnable michael@0: { michael@0: public: michael@0: TransferBuffer(AudioNodeStream* aStream, michael@0: const AudioChunk& aChunk) michael@0: : mStream(aStream) michael@0: , mChunk(aChunk) michael@0: { michael@0: } michael@0: michael@0: NS_IMETHOD Run() michael@0: { michael@0: nsRefPtr node; michael@0: { michael@0: // No need to keep holding the lock for the whole duration of this michael@0: // function, since we're holding a strong reference to it, so if michael@0: // we can obtain the reference, we will hold the node alive in michael@0: // this function. michael@0: MutexAutoLock lock(mStream->Engine()->NodeMutex()); michael@0: node = static_cast(mStream->Engine()->Node()); michael@0: } michael@0: if (node) { michael@0: node->AppendChunk(mChunk); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: michael@0: private: michael@0: nsRefPtr mStream; michael@0: AudioChunk mChunk; michael@0: }; michael@0: michael@0: public: michael@0: explicit AnalyserNodeEngine(AnalyserNode* aNode) michael@0: : AudioNodeEngine(aNode) michael@0: { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: } michael@0: michael@0: virtual void ProcessBlock(AudioNodeStream* aStream, michael@0: const AudioChunk& aInput, michael@0: AudioChunk* aOutput, michael@0: bool* aFinished) MOZ_OVERRIDE michael@0: { michael@0: *aOutput = aInput; michael@0: michael@0: MutexAutoLock lock(NodeMutex()); michael@0: michael@0: if (Node() && michael@0: aInput.mChannelData.Length() > 0) { michael@0: nsRefPtr transfer = new TransferBuffer(aStream, aInput); michael@0: NS_DispatchToMainThread(transfer); michael@0: } michael@0: } michael@0: michael@0: virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: }; michael@0: michael@0: AnalyserNode::AnalyserNode(AudioContext* aContext) michael@0: : AudioNode(aContext, michael@0: 1, michael@0: ChannelCountMode::Explicit, michael@0: ChannelInterpretation::Speakers) michael@0: , mAnalysisBlock(2048) michael@0: , mMinDecibels(-100.) michael@0: , mMaxDecibels(-30.) michael@0: , mSmoothingTimeConstant(.8) michael@0: , mWriteIndex(0) michael@0: { michael@0: mStream = aContext->Graph()->CreateAudioNodeStream(new AnalyserNodeEngine(this), michael@0: MediaStreamGraph::INTERNAL_STREAM); michael@0: AllocateBuffer(); michael@0: } michael@0: michael@0: size_t michael@0: AnalyserNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); michael@0: amount += mAnalysisBlock.SizeOfExcludingThis(aMallocSizeOf); michael@0: amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf); michael@0: amount += mOutputBuffer.SizeOfExcludingThis(aMallocSizeOf); michael@0: return amount; michael@0: } michael@0: michael@0: size_t michael@0: AnalyserNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: JSObject* michael@0: AnalyserNode::WrapObject(JSContext* aCx) michael@0: { michael@0: return AnalyserNodeBinding::Wrap(aCx, this); michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::SetFftSize(uint32_t aValue, ErrorResult& aRv) michael@0: { michael@0: // Disallow values that are not a power of 2 and outside the [32,2048] range michael@0: if (aValue < 32 || michael@0: aValue > 2048 || michael@0: (aValue & (aValue - 1)) != 0) { michael@0: aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); michael@0: return; michael@0: } michael@0: if (FftSize() != aValue) { michael@0: mAnalysisBlock.SetFFTSize(aValue); michael@0: AllocateBuffer(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::SetMinDecibels(double aValue, ErrorResult& aRv) michael@0: { michael@0: if (aValue >= mMaxDecibels) { michael@0: aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); michael@0: return; michael@0: } michael@0: mMinDecibels = aValue; michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::SetMaxDecibels(double aValue, ErrorResult& aRv) michael@0: { michael@0: if (aValue <= mMinDecibels) { michael@0: aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); michael@0: return; michael@0: } michael@0: mMaxDecibels = aValue; michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::SetSmoothingTimeConstant(double aValue, ErrorResult& aRv) michael@0: { michael@0: if (aValue < 0 || aValue > 1) { michael@0: aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); michael@0: return; michael@0: } michael@0: mSmoothingTimeConstant = aValue; michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::GetFloatFrequencyData(const Float32Array& aArray) michael@0: { michael@0: if (!FFTAnalysis()) { michael@0: // Might fail to allocate memory michael@0: return; michael@0: } michael@0: michael@0: aArray.ComputeLengthAndData(); michael@0: michael@0: float* buffer = aArray.Data(); michael@0: uint32_t length = std::min(aArray.Length(), mOutputBuffer.Length()); michael@0: michael@0: for (uint32_t i = 0; i < length; ++i) { michael@0: buffer[i] = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels); michael@0: } michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::GetByteFrequencyData(const Uint8Array& aArray) michael@0: { michael@0: if (!FFTAnalysis()) { michael@0: // Might fail to allocate memory michael@0: return; michael@0: } michael@0: michael@0: const double rangeScaleFactor = 1.0 / (mMaxDecibels - mMinDecibels); michael@0: michael@0: aArray.ComputeLengthAndData(); michael@0: michael@0: unsigned char* buffer = aArray.Data(); michael@0: uint32_t length = std::min(aArray.Length(), mOutputBuffer.Length()); michael@0: michael@0: for (uint32_t i = 0; i < length; ++i) { michael@0: const double decibels = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels); michael@0: // scale down the value to the range of [0, UCHAR_MAX] michael@0: const double scaled = std::max(0.0, std::min(double(UCHAR_MAX), michael@0: UCHAR_MAX * (decibels - mMinDecibels) * rangeScaleFactor)); michael@0: buffer[i] = static_cast(scaled); michael@0: } michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::GetFloatTimeDomainData(const Float32Array& aArray) michael@0: { michael@0: aArray.ComputeLengthAndData(); michael@0: michael@0: float* buffer = aArray.Data(); michael@0: uint32_t length = std::min(aArray.Length(), mBuffer.Length()); michael@0: michael@0: for (uint32_t i = 0; i < length; ++i) { michael@0: buffer[i] = mBuffer[(i + mWriteIndex) % mBuffer.Length()];; michael@0: } michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::GetByteTimeDomainData(const Uint8Array& aArray) michael@0: { michael@0: aArray.ComputeLengthAndData(); michael@0: michael@0: unsigned char* buffer = aArray.Data(); michael@0: uint32_t length = std::min(aArray.Length(), mBuffer.Length()); michael@0: michael@0: for (uint32_t i = 0; i < length; ++i) { michael@0: const float value = mBuffer[(i + mWriteIndex) % mBuffer.Length()]; michael@0: // scale the value to the range of [0, UCHAR_MAX] michael@0: const float scaled = std::max(0.0f, std::min(float(UCHAR_MAX), michael@0: 128.0f * (value + 1.0f))); michael@0: buffer[i] = static_cast(scaled); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: AnalyserNode::FFTAnalysis() michael@0: { michael@0: float* inputBuffer; michael@0: bool allocated = false; michael@0: if (mWriteIndex == 0) { michael@0: inputBuffer = mBuffer.Elements(); michael@0: } else { michael@0: inputBuffer = static_cast(moz_malloc(FftSize() * sizeof(float))); michael@0: if (!inputBuffer) { michael@0: return false; michael@0: } michael@0: memcpy(inputBuffer, mBuffer.Elements() + mWriteIndex, sizeof(float) * (FftSize() - mWriteIndex)); michael@0: memcpy(inputBuffer + FftSize() - mWriteIndex, mBuffer.Elements(), sizeof(float) * mWriteIndex); michael@0: allocated = true; michael@0: } michael@0: michael@0: ApplyBlackmanWindow(inputBuffer, FftSize()); michael@0: michael@0: mAnalysisBlock.PerformFFT(inputBuffer); michael@0: michael@0: // Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT scaling factor). michael@0: const double magnitudeScale = 1.0 / FftSize(); michael@0: michael@0: for (uint32_t i = 0; i < mOutputBuffer.Length(); ++i) { michael@0: double scalarMagnitude = NS_hypot(mAnalysisBlock.RealData(i), michael@0: mAnalysisBlock.ImagData(i)) * michael@0: magnitudeScale; michael@0: mOutputBuffer[i] = mSmoothingTimeConstant * mOutputBuffer[i] + michael@0: (1.0 - mSmoothingTimeConstant) * scalarMagnitude; michael@0: } michael@0: michael@0: if (allocated) { michael@0: moz_free(inputBuffer); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::ApplyBlackmanWindow(float* aBuffer, uint32_t aSize) michael@0: { michael@0: double alpha = 0.16; michael@0: double a0 = 0.5 * (1.0 - alpha); michael@0: double a1 = 0.5; michael@0: double a2 = 0.5 * alpha; michael@0: michael@0: for (uint32_t i = 0; i < aSize; ++i) { michael@0: double x = double(i) / aSize; michael@0: double window = a0 - a1 * cos(2 * M_PI * x) + a2 * cos(4 * M_PI * x); michael@0: aBuffer[i] *= window; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: AnalyserNode::AllocateBuffer() michael@0: { michael@0: bool result = true; michael@0: if (mBuffer.Length() != FftSize()) { michael@0: result = mBuffer.SetLength(FftSize()); michael@0: if (result) { michael@0: memset(mBuffer.Elements(), 0, sizeof(float) * FftSize()); michael@0: mWriteIndex = 0; michael@0: michael@0: result = mOutputBuffer.SetLength(FrequencyBinCount()); michael@0: if (result) { michael@0: memset(mOutputBuffer.Elements(), 0, sizeof(float) * FrequencyBinCount()); michael@0: } michael@0: } michael@0: } michael@0: return result; michael@0: } michael@0: michael@0: void michael@0: AnalyserNode::AppendChunk(const AudioChunk& aChunk) michael@0: { michael@0: const uint32_t bufferSize = mBuffer.Length(); michael@0: const uint32_t channelCount = aChunk.mChannelData.Length(); michael@0: uint32_t chunkDuration = aChunk.mDuration; michael@0: MOZ_ASSERT((bufferSize & (bufferSize - 1)) == 0); // Must be a power of two! michael@0: MOZ_ASSERT(channelCount > 0); michael@0: MOZ_ASSERT(chunkDuration == WEBAUDIO_BLOCK_SIZE); michael@0: michael@0: if (chunkDuration > bufferSize) { michael@0: // Copy a maximum bufferSize samples. michael@0: chunkDuration = bufferSize; michael@0: } michael@0: michael@0: PodCopy(mBuffer.Elements() + mWriteIndex, static_cast(aChunk.mChannelData[0]), chunkDuration); michael@0: for (uint32_t i = 1; i < channelCount; ++i) { michael@0: AudioBlockAddChannelWithScale(static_cast(aChunk.mChannelData[i]), 1.0f, michael@0: mBuffer.Elements() + mWriteIndex); michael@0: } michael@0: if (channelCount > 1) { michael@0: AudioBlockInPlaceScale(mBuffer.Elements() + mWriteIndex, michael@0: 1.0f / aChunk.mChannelData.Length()); michael@0: } michael@0: mWriteIndex += chunkDuration; michael@0: MOZ_ASSERT(mWriteIndex <= bufferSize); michael@0: if (mWriteIndex >= bufferSize) { michael@0: mWriteIndex = 0; michael@0: } michael@0: } michael@0: michael@0: } michael@0: } michael@0: