michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "AudioBufferSourceNode.h" michael@0: #include "mozilla/dom/AudioBufferSourceNodeBinding.h" michael@0: #include "mozilla/dom/AudioParam.h" michael@0: #include "nsMathUtils.h" michael@0: #include "AudioNodeEngine.h" michael@0: #include "AudioNodeStream.h" michael@0: #include "AudioDestinationNode.h" michael@0: #include "AudioParamTimeline.h" michael@0: #include "speex/speex_resampler.h" michael@0: #include michael@0: michael@0: namespace mozilla { michael@0: namespace dom { michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_CLASS(AudioBufferSourceNode) michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioBufferSourceNode) michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK(mBuffer) michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlaybackRate) michael@0: if (tmp->Context()) { michael@0: // AudioNode's Unlink implementation disconnects us from the graph michael@0: // too, but we need to do this right here to make sure that michael@0: // UnregisterAudioBufferSourceNode can properly untangle us from michael@0: // the possibly connected PannerNodes. michael@0: tmp->DisconnectFromGraph(); michael@0: tmp->Context()->UnregisterAudioBufferSourceNode(tmp); michael@0: } michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode) michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioBufferSourceNode, AudioNode) michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mBuffer) michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPlaybackRate) michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END michael@0: michael@0: NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode) michael@0: NS_INTERFACE_MAP_END_INHERITING(AudioNode) michael@0: michael@0: NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioNode) michael@0: NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioNode) michael@0: michael@0: /** michael@0: * Media-thread playback engine for AudioBufferSourceNode. michael@0: * Nothing is played until a non-null buffer has been set (via michael@0: * AudioNodeStream::SetBuffer) and a non-zero mBufferEnd has been set (via michael@0: * AudioNodeStream::SetInt32Parameter). michael@0: */ michael@0: class AudioBufferSourceNodeEngine : public AudioNodeEngine michael@0: { michael@0: public: michael@0: explicit AudioBufferSourceNodeEngine(AudioNode* aNode, michael@0: AudioDestinationNode* aDestination) : michael@0: AudioNodeEngine(aNode), michael@0: mStart(0.0), mBeginProcessing(0), michael@0: mStop(TRACK_TICKS_MAX), michael@0: mResampler(nullptr), mRemainingResamplerTail(0), michael@0: mBufferEnd(0), michael@0: mLoopStart(0), mLoopEnd(0), michael@0: mBufferSampleRate(0), mBufferPosition(0), mChannels(0), michael@0: mDopplerShift(1.0f), michael@0: mDestination(static_cast(aDestination->Stream())), michael@0: mPlaybackRateTimeline(1.0f), mLoop(false) michael@0: {} michael@0: michael@0: ~AudioBufferSourceNodeEngine() michael@0: { michael@0: if (mResampler) { michael@0: speex_resampler_destroy(mResampler); michael@0: } michael@0: } michael@0: michael@0: void SetSourceStream(AudioNodeStream* aSource) michael@0: { michael@0: mSource = aSource; michael@0: } michael@0: michael@0: virtual void SetTimelineParameter(uint32_t aIndex, michael@0: const dom::AudioParamTimeline& aValue, michael@0: TrackRate aSampleRate) MOZ_OVERRIDE michael@0: { michael@0: switch (aIndex) { michael@0: case AudioBufferSourceNode::PLAYBACKRATE: michael@0: mPlaybackRateTimeline = aValue; michael@0: WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, mSource, mDestination); michael@0: break; michael@0: default: michael@0: NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter"); michael@0: } michael@0: } michael@0: virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam) michael@0: { michael@0: switch (aIndex) { michael@0: case AudioBufferSourceNode::STOP: mStop = aParam; break; michael@0: default: michael@0: NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter"); michael@0: } michael@0: } michael@0: virtual void SetDoubleParameter(uint32_t aIndex, double aParam) michael@0: { michael@0: switch (aIndex) { michael@0: case AudioBufferSourceNode::START: michael@0: MOZ_ASSERT(!mStart, "Another START?"); michael@0: mStart = mSource->TimeFromDestinationTime(mDestination, aParam) * michael@0: mSource->SampleRate(); michael@0: // Round to nearest michael@0: mBeginProcessing = mStart + 0.5; michael@0: break; michael@0: case AudioBufferSourceNode::DOPPLERSHIFT: michael@0: mDopplerShift = aParam > 0 && aParam == aParam ? aParam : 1.0; michael@0: break; michael@0: default: michael@0: NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter."); michael@0: }; michael@0: } michael@0: virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) michael@0: { michael@0: switch (aIndex) { michael@0: case AudioBufferSourceNode::SAMPLE_RATE: mBufferSampleRate = aParam; break; michael@0: case AudioBufferSourceNode::BUFFERSTART: michael@0: if (mBufferPosition == 0) { michael@0: mBufferPosition = aParam; michael@0: } michael@0: break; michael@0: case AudioBufferSourceNode::BUFFEREND: mBufferEnd = aParam; break; michael@0: case AudioBufferSourceNode::LOOP: mLoop = !!aParam; break; michael@0: case AudioBufferSourceNode::LOOPSTART: mLoopStart = aParam; break; michael@0: case AudioBufferSourceNode::LOOPEND: mLoopEnd = aParam; break; michael@0: default: michael@0: NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter"); michael@0: } michael@0: } michael@0: virtual void SetBuffer(already_AddRefed aBuffer) michael@0: { michael@0: mBuffer = aBuffer; michael@0: } michael@0: michael@0: bool BegunResampling() michael@0: { michael@0: return mBeginProcessing == -TRACK_TICKS_MAX; michael@0: } michael@0: michael@0: void UpdateResampler(int32_t aOutRate, uint32_t aChannels) michael@0: { michael@0: if (mResampler && michael@0: (aChannels != mChannels || michael@0: // If the resampler has begun, then it will have moved michael@0: // mBufferPosition to after the samples it has read, but it hasn't michael@0: // output its buffered samples. Keep using the resampler, even if michael@0: // the rates now match, so that this latent segment is output. michael@0: (aOutRate == mBufferSampleRate && !BegunResampling()))) { michael@0: speex_resampler_destroy(mResampler); michael@0: mResampler = nullptr; michael@0: mRemainingResamplerTail = 0; michael@0: mBeginProcessing = mStart + 0.5; michael@0: } michael@0: michael@0: if (aOutRate == mBufferSampleRate && !mResampler) { michael@0: return; michael@0: } michael@0: michael@0: if (!mResampler) { michael@0: mChannels = aChannels; michael@0: mResampler = speex_resampler_init(mChannels, mBufferSampleRate, aOutRate, michael@0: SPEEX_RESAMPLER_QUALITY_DEFAULT, michael@0: nullptr); michael@0: } else { michael@0: uint32_t currentOutSampleRate, currentInSampleRate; michael@0: speex_resampler_get_rate(mResampler, ¤tInSampleRate, michael@0: ¤tOutSampleRate); michael@0: if (currentOutSampleRate == static_cast(aOutRate)) { michael@0: return; michael@0: } michael@0: speex_resampler_set_rate(mResampler, currentInSampleRate, aOutRate); michael@0: } michael@0: michael@0: if (!BegunResampling()) { michael@0: // Low pass filter effects from the resampler mean that samples before michael@0: // the start time are influenced by resampling the buffer. The input michael@0: // latency indicates half the filter width. michael@0: int64_t inputLatency = speex_resampler_get_input_latency(mResampler); michael@0: uint32_t ratioNum, ratioDen; michael@0: speex_resampler_get_ratio(mResampler, &ratioNum, &ratioDen); michael@0: // The output subsample resolution supported in aligning the resampler michael@0: // is ratioNum. First round the start time to the nearest subsample. michael@0: int64_t subsample = mStart * ratioNum + 0.5; michael@0: // Now include the leading effects of the filter, and round *up* to the michael@0: // next whole tick, because there is no effect on samples outside the michael@0: // filter width. michael@0: mBeginProcessing = michael@0: (subsample - inputLatency * ratioDen + ratioNum - 1) / ratioNum; michael@0: } michael@0: } michael@0: michael@0: // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer michael@0: // at offset aSourceOffset. This avoids copying memory. michael@0: void BorrowFromInputBuffer(AudioChunk* aOutput, michael@0: uint32_t aChannels) michael@0: { michael@0: aOutput->mDuration = WEBAUDIO_BLOCK_SIZE; michael@0: aOutput->mBuffer = mBuffer; michael@0: aOutput->mChannelData.SetLength(aChannels); michael@0: for (uint32_t i = 0; i < aChannels; ++i) { michael@0: aOutput->mChannelData[i] = mBuffer->GetData(i) + mBufferPosition; michael@0: } michael@0: aOutput->mVolume = 1.0f; michael@0: aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32; michael@0: } michael@0: michael@0: // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset michael@0: // and put it at offset aBufferOffset in the destination buffer. michael@0: void CopyFromInputBuffer(AudioChunk* aOutput, michael@0: uint32_t aChannels, michael@0: uintptr_t aOffsetWithinBlock, michael@0: uint32_t aNumberOfFrames) { michael@0: for (uint32_t i = 0; i < aChannels; ++i) { michael@0: float* baseChannelData = static_cast(const_cast(aOutput->mChannelData[i])); michael@0: memcpy(baseChannelData + aOffsetWithinBlock, michael@0: mBuffer->GetData(i) + mBufferPosition, michael@0: aNumberOfFrames * sizeof(float)); michael@0: } michael@0: } michael@0: michael@0: // Resamples input data to an output buffer, according to |mBufferSampleRate| and michael@0: // the playbackRate. michael@0: // The number of frames consumed/produced depends on the amount of space michael@0: // remaining in both the input and output buffer, and the playback rate (that michael@0: // is, the ratio between the output samplerate and the input samplerate). michael@0: void CopyFromInputBufferWithResampling(AudioNodeStream* aStream, michael@0: AudioChunk* aOutput, michael@0: uint32_t aChannels, michael@0: uint32_t* aOffsetWithinBlock, michael@0: TrackTicks* aCurrentPosition, michael@0: int32_t aBufferMax) { michael@0: // TODO: adjust for mStop (see bug 913854 comment 9). michael@0: uint32_t availableInOutputBuffer = michael@0: WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock; michael@0: SpeexResamplerState* resampler = mResampler; michael@0: MOZ_ASSERT(aChannels > 0); michael@0: michael@0: if (mBufferPosition < aBufferMax) { michael@0: uint32_t availableInInputBuffer = aBufferMax - mBufferPosition; michael@0: uint32_t ratioNum, ratioDen; michael@0: speex_resampler_get_ratio(resampler, &ratioNum, &ratioDen); michael@0: // Limit the number of input samples copied and possibly michael@0: // format-converted for resampling by estimating how many will be used. michael@0: // This may be a little small if still filling the resampler with michael@0: // initial data, but we'll get called again and it will work out. michael@0: uint32_t inputLimit = availableInOutputBuffer * ratioNum / ratioDen + 10; michael@0: if (!BegunResampling()) { michael@0: // First time the resampler is used. michael@0: uint32_t inputLatency = speex_resampler_get_input_latency(resampler); michael@0: inputLimit += inputLatency; michael@0: // If starting after mStart, then play from the beginning of the michael@0: // buffer, but correct for input latency. If starting before mStart, michael@0: // then align the resampler so that the time corresponding to the michael@0: // first input sample is mStart. michael@0: uint32_t skipFracNum = inputLatency * ratioDen; michael@0: double leadTicks = mStart - *aCurrentPosition; michael@0: if (leadTicks > 0.0) { michael@0: // Round to nearest output subsample supported by the resampler at michael@0: // these rates. michael@0: skipFracNum -= leadTicks * ratioNum + 0.5; michael@0: MOZ_ASSERT(skipFracNum < INT32_MAX, "mBeginProcessing is wrong?"); michael@0: } michael@0: speex_resampler_set_skip_frac_num(resampler, skipFracNum); michael@0: michael@0: mBeginProcessing = -TRACK_TICKS_MAX; michael@0: } michael@0: inputLimit = std::min(inputLimit, availableInInputBuffer); michael@0: michael@0: for (uint32_t i = 0; true; ) { michael@0: uint32_t inSamples = inputLimit; michael@0: const float* inputData = mBuffer->GetData(i) + mBufferPosition; michael@0: michael@0: uint32_t outSamples = availableInOutputBuffer; michael@0: float* outputData = michael@0: static_cast(const_cast(aOutput->mChannelData[i])) + michael@0: *aOffsetWithinBlock; michael@0: michael@0: WebAudioUtils::SpeexResamplerProcess(resampler, i, michael@0: inputData, &inSamples, michael@0: outputData, &outSamples); michael@0: if (++i == aChannels) { michael@0: mBufferPosition += inSamples; michael@0: MOZ_ASSERT(mBufferPosition <= mBufferEnd || mLoop); michael@0: *aOffsetWithinBlock += outSamples; michael@0: *aCurrentPosition += outSamples; michael@0: if (inSamples == availableInInputBuffer && !mLoop) { michael@0: // We'll feed in enough zeros to empty out the resampler's memory. michael@0: // This handles the output latency as well as capturing the low michael@0: // pass effects of the resample filter. michael@0: mRemainingResamplerTail = michael@0: 2 * speex_resampler_get_input_latency(resampler) - 1; michael@0: } michael@0: return; michael@0: } michael@0: } michael@0: } else { michael@0: for (uint32_t i = 0; true; ) { michael@0: uint32_t inSamples = mRemainingResamplerTail; michael@0: uint32_t outSamples = availableInOutputBuffer; michael@0: float* outputData = michael@0: static_cast(const_cast(aOutput->mChannelData[i])) + michael@0: *aOffsetWithinBlock; michael@0: michael@0: // AudioDataValue* for aIn selects the function that does not try to michael@0: // copy and format-convert input data. michael@0: WebAudioUtils::SpeexResamplerProcess(resampler, i, michael@0: static_cast(nullptr), &inSamples, michael@0: outputData, &outSamples); michael@0: if (++i == aChannels) { michael@0: mRemainingResamplerTail -= inSamples; michael@0: MOZ_ASSERT(mRemainingResamplerTail >= 0); michael@0: *aOffsetWithinBlock += outSamples; michael@0: *aCurrentPosition += outSamples; michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: /** michael@0: * Fill aOutput with as many zero frames as we can, and advance michael@0: * aOffsetWithinBlock and aCurrentPosition based on how many frames we write. michael@0: * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or michael@0: * aCurrentPosition past aMaxPos. This function knows when it needs to michael@0: * allocate the output buffer, and also optimizes the case where it can avoid michael@0: * memory allocations. michael@0: */ michael@0: void FillWithZeroes(AudioChunk* aOutput, michael@0: uint32_t aChannels, michael@0: uint32_t* aOffsetWithinBlock, michael@0: TrackTicks* aCurrentPosition, michael@0: TrackTicks aMaxPos) michael@0: { michael@0: MOZ_ASSERT(*aCurrentPosition < aMaxPos); michael@0: uint32_t numFrames = michael@0: std::min(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, michael@0: aMaxPos - *aCurrentPosition); michael@0: if (numFrames == WEBAUDIO_BLOCK_SIZE) { michael@0: aOutput->SetNull(numFrames); michael@0: } else { michael@0: if (*aOffsetWithinBlock == 0) { michael@0: AllocateAudioBlock(aChannels, aOutput); michael@0: } michael@0: WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames); michael@0: } michael@0: *aOffsetWithinBlock += numFrames; michael@0: *aCurrentPosition += numFrames; michael@0: } michael@0: michael@0: /** michael@0: * Copy as many frames as possible from the source buffer to aOutput, and michael@0: * advance aOffsetWithinBlock and aCurrentPosition based on how many frames michael@0: * we write. This will never advance aOffsetWithinBlock past michael@0: * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop. It takes data from michael@0: * the buffer at aBufferOffset, and never takes more data than aBufferMax. michael@0: * This function knows when it needs to allocate the output buffer, and also michael@0: * optimizes the case where it can avoid memory allocations. michael@0: */ michael@0: void CopyFromBuffer(AudioNodeStream* aStream, michael@0: AudioChunk* aOutput, michael@0: uint32_t aChannels, michael@0: uint32_t* aOffsetWithinBlock, michael@0: TrackTicks* aCurrentPosition, michael@0: int32_t aBufferMax) michael@0: { michael@0: MOZ_ASSERT(*aCurrentPosition < mStop); michael@0: uint32_t numFrames = michael@0: std::min(std::min(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, michael@0: aBufferMax - mBufferPosition), michael@0: mStop - *aCurrentPosition); michael@0: if (numFrames == WEBAUDIO_BLOCK_SIZE && !mResampler) { michael@0: MOZ_ASSERT(mBufferPosition < aBufferMax); michael@0: BorrowFromInputBuffer(aOutput, aChannels); michael@0: *aOffsetWithinBlock += numFrames; michael@0: *aCurrentPosition += numFrames; michael@0: mBufferPosition += numFrames; michael@0: } else { michael@0: if (*aOffsetWithinBlock == 0) { michael@0: AllocateAudioBlock(aChannels, aOutput); michael@0: } michael@0: if (!mResampler) { michael@0: MOZ_ASSERT(mBufferPosition < aBufferMax); michael@0: CopyFromInputBuffer(aOutput, aChannels, *aOffsetWithinBlock, numFrames); michael@0: *aOffsetWithinBlock += numFrames; michael@0: *aCurrentPosition += numFrames; michael@0: mBufferPosition += numFrames; michael@0: } else { michael@0: CopyFromInputBufferWithResampling(aStream, aOutput, aChannels, aOffsetWithinBlock, aCurrentPosition, aBufferMax); michael@0: } michael@0: } michael@0: } michael@0: michael@0: int32_t ComputeFinalOutSampleRate(float aPlaybackRate) michael@0: { michael@0: // Make sure the playback rate and the doppler shift are something michael@0: // our resampler can work with. michael@0: int32_t rate = WebAudioUtils:: michael@0: TruncateFloatToInt(mSource->SampleRate() / michael@0: (aPlaybackRate * mDopplerShift)); michael@0: return rate ? rate : mBufferSampleRate; michael@0: } michael@0: michael@0: void UpdateSampleRateIfNeeded(uint32_t aChannels) michael@0: { michael@0: float playbackRate; michael@0: michael@0: if (mPlaybackRateTimeline.HasSimpleValue()) { michael@0: playbackRate = mPlaybackRateTimeline.GetValue(); michael@0: } else { michael@0: playbackRate = mPlaybackRateTimeline.GetValueAtTime(mSource->GetCurrentPosition()); michael@0: } michael@0: if (playbackRate <= 0 || playbackRate != playbackRate) { michael@0: playbackRate = 1.0f; michael@0: } michael@0: michael@0: int32_t outRate = ComputeFinalOutSampleRate(playbackRate); michael@0: UpdateResampler(outRate, aChannels); michael@0: } michael@0: michael@0: virtual void ProcessBlock(AudioNodeStream* aStream, michael@0: const AudioChunk& aInput, michael@0: AudioChunk* aOutput, michael@0: bool* aFinished) michael@0: { michael@0: if (!mBuffer || !mBufferEnd) { michael@0: aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); michael@0: return; michael@0: } michael@0: michael@0: uint32_t channels = mBuffer->GetChannels(); michael@0: if (!channels) { michael@0: aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); michael@0: return; michael@0: } michael@0: michael@0: // WebKit treats the playbackRate as a k-rate parameter in their code, michael@0: // despite the spec saying that it should be an a-rate parameter. We treat michael@0: // it as k-rate. Spec bug: https://www.w3.org/Bugs/Public/show_bug.cgi?id=21592 michael@0: UpdateSampleRateIfNeeded(channels); michael@0: michael@0: uint32_t written = 0; michael@0: TrackTicks streamPosition = aStream->GetCurrentPosition(); michael@0: while (written < WEBAUDIO_BLOCK_SIZE) { michael@0: if (mStop != TRACK_TICKS_MAX && michael@0: streamPosition >= mStop) { michael@0: FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); michael@0: continue; michael@0: } michael@0: if (streamPosition < mBeginProcessing) { michael@0: FillWithZeroes(aOutput, channels, &written, &streamPosition, michael@0: mBeginProcessing); michael@0: continue; michael@0: } michael@0: if (mLoop) { michael@0: // mLoopEnd can become less than mBufferPosition when a LOOPEND engine michael@0: // parameter is received after "loopend" is changed on the node or a michael@0: // new buffer with lower samplerate is set. michael@0: if (mBufferPosition >= mLoopEnd) { michael@0: mBufferPosition = mLoopStart; michael@0: } michael@0: CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mLoopEnd); michael@0: } else { michael@0: if (mBufferPosition < mBufferEnd || mRemainingResamplerTail) { michael@0: CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mBufferEnd); michael@0: } else { michael@0: FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); michael@0: } michael@0: } michael@0: } michael@0: michael@0: // We've finished if we've gone past mStop, or if we're past mDuration when michael@0: // looping is disabled. michael@0: if (streamPosition >= mStop || michael@0: (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) { michael@0: *aFinished = true; michael@0: } michael@0: } michael@0: michael@0: virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE michael@0: { michael@0: // Not owned: michael@0: // - mBuffer - shared w/ AudioNode michael@0: // - mPlaybackRateTimeline - shared w/ AudioNode michael@0: michael@0: size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); michael@0: michael@0: // NB: We need to modify speex if we want the full memory picture, internal michael@0: // fields that need measuring noted below. michael@0: // - mResampler->mem michael@0: // - mResampler->sinc_table michael@0: // - mResampler->last_sample michael@0: // - mResampler->magic_samples michael@0: // - mResampler->samp_frac_num michael@0: amount += aMallocSizeOf(mResampler); michael@0: michael@0: return amount; michael@0: } michael@0: michael@0: virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: double mStart; // including the fractional position between ticks michael@0: // Low pass filter effects from the resampler mean that samples before the michael@0: // start time are influenced by resampling the buffer. mBeginProcessing michael@0: // includes the extent of this filter. The special value of -TRACK_TICKS_MAX michael@0: // indicates that the resampler has begun processing. michael@0: TrackTicks mBeginProcessing; michael@0: TrackTicks mStop; michael@0: nsRefPtr mBuffer; michael@0: SpeexResamplerState* mResampler; michael@0: // mRemainingResamplerTail, like mBufferPosition, and michael@0: // mBufferEnd, is measured in input buffer samples. michael@0: int mRemainingResamplerTail; michael@0: int32_t mBufferEnd; michael@0: int32_t mLoopStart; michael@0: int32_t mLoopEnd; michael@0: int32_t mBufferSampleRate; michael@0: int32_t mBufferPosition; michael@0: uint32_t mChannels; michael@0: float mDopplerShift; michael@0: AudioNodeStream* mDestination; michael@0: AudioNodeStream* mSource; michael@0: AudioParamTimeline mPlaybackRateTimeline; michael@0: bool mLoop; michael@0: }; michael@0: michael@0: AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext) michael@0: : AudioNode(aContext, michael@0: 2, michael@0: ChannelCountMode::Max, michael@0: ChannelInterpretation::Speakers) michael@0: , mLoopStart(0.0) michael@0: , mLoopEnd(0.0) michael@0: // mOffset and mDuration are initialized in Start(). michael@0: , mPlaybackRate(new AudioParam(MOZ_THIS_IN_INITIALIZER_LIST(), michael@0: SendPlaybackRateToStream, 1.0f)) michael@0: , mLoop(false) michael@0: , mStartCalled(false) michael@0: , mStopped(false) michael@0: { michael@0: AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination()); michael@0: mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::SOURCE_STREAM); michael@0: engine->SetSourceStream(static_cast(mStream.get())); michael@0: mStream->AddMainThreadListener(this); michael@0: } michael@0: michael@0: AudioBufferSourceNode::~AudioBufferSourceNode() michael@0: { michael@0: if (Context()) { michael@0: Context()->UnregisterAudioBufferSourceNode(this); michael@0: } michael@0: } michael@0: michael@0: size_t michael@0: AudioBufferSourceNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); michael@0: if (mBuffer) { michael@0: amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: amount += mPlaybackRate->SizeOfIncludingThis(aMallocSizeOf); michael@0: return amount; michael@0: } michael@0: michael@0: size_t michael@0: AudioBufferSourceNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: JSObject* michael@0: AudioBufferSourceNode::WrapObject(JSContext* aCx) michael@0: { michael@0: return AudioBufferSourceNodeBinding::Wrap(aCx, this); michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::Start(double aWhen, double aOffset, michael@0: const Optional& aDuration, ErrorResult& aRv) michael@0: { michael@0: if (!WebAudioUtils::IsTimeValid(aWhen) || michael@0: (aDuration.WasPassed() && !WebAudioUtils::IsTimeValid(aDuration.Value()))) { michael@0: aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); michael@0: return; michael@0: } michael@0: michael@0: if (mStartCalled) { michael@0: aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); michael@0: return; michael@0: } michael@0: mStartCalled = true; michael@0: michael@0: AudioNodeStream* ns = static_cast(mStream.get()); michael@0: if (!ns) { michael@0: // Nothing to play, or we're already dead for some reason michael@0: return; michael@0: } michael@0: michael@0: // Remember our arguments so that we can use them when we get a new buffer. michael@0: mOffset = aOffset; michael@0: mDuration = aDuration.WasPassed() ? aDuration.Value() michael@0: : std::numeric_limits::min(); michael@0: // We can't send these parameters without a buffer because we don't know the michael@0: // buffer's sample rate or length. michael@0: if (mBuffer) { michael@0: SendOffsetAndDurationParametersToStream(ns); michael@0: } michael@0: michael@0: // Don't set parameter unnecessarily michael@0: if (aWhen > 0.0) { michael@0: ns->SetDoubleParameter(START, mContext->DOMTimeToStreamTime(aWhen)); michael@0: } michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx) michael@0: { michael@0: AudioNodeStream* ns = static_cast(mStream.get()); michael@0: MOZ_ASSERT(ns, "Why don't we have a stream here?"); michael@0: michael@0: if (mBuffer) { michael@0: float rate = mBuffer->SampleRate(); michael@0: nsRefPtr data = michael@0: mBuffer->GetThreadSharedChannelsForRate(aCx); michael@0: ns->SetBuffer(data.forget()); michael@0: ns->SetInt32Parameter(SAMPLE_RATE, rate); michael@0: michael@0: if (mStartCalled) { michael@0: SendOffsetAndDurationParametersToStream(ns); michael@0: } michael@0: } else { michael@0: ns->SetBuffer(nullptr); michael@0: michael@0: MarkInactive(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream) michael@0: { michael@0: NS_ASSERTION(mBuffer && mStartCalled, michael@0: "Only call this when we have a buffer and start() has been called"); michael@0: michael@0: float rate = mBuffer->SampleRate(); michael@0: int32_t bufferEnd = mBuffer->Length(); michael@0: int32_t offsetSamples = std::max(0, NS_lround(mOffset * rate)); michael@0: michael@0: // Don't set parameter unnecessarily michael@0: if (offsetSamples > 0) { michael@0: aStream->SetInt32Parameter(BUFFERSTART, offsetSamples); michael@0: } michael@0: michael@0: if (mDuration != std::numeric_limits::min()) { michael@0: bufferEnd = std::min(bufferEnd, michael@0: offsetSamples + NS_lround(mDuration * rate)); michael@0: } michael@0: aStream->SetInt32Parameter(BUFFEREND, bufferEnd); michael@0: michael@0: MarkActive(); michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv) michael@0: { michael@0: if (!WebAudioUtils::IsTimeValid(aWhen)) { michael@0: aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); michael@0: return; michael@0: } michael@0: michael@0: if (!mStartCalled) { michael@0: aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); michael@0: return; michael@0: } michael@0: michael@0: AudioNodeStream* ns = static_cast(mStream.get()); michael@0: if (!ns || !Context()) { michael@0: // We've already stopped and had our stream shut down michael@0: return; michael@0: } michael@0: michael@0: ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen)); michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::NotifyMainThreadStateChanged() michael@0: { michael@0: if (mStream->IsFinished()) { michael@0: class EndedEventDispatcher : public nsRunnable michael@0: { michael@0: public: michael@0: explicit EndedEventDispatcher(AudioBufferSourceNode* aNode) michael@0: : mNode(aNode) {} michael@0: NS_IMETHODIMP Run() michael@0: { michael@0: // If it's not safe to run scripts right now, schedule this to run later michael@0: if (!nsContentUtils::IsSafeToRunScript()) { michael@0: nsContentUtils::AddScriptRunner(this); michael@0: return NS_OK; michael@0: } michael@0: michael@0: mNode->DispatchTrustedEvent(NS_LITERAL_STRING("ended")); michael@0: return NS_OK; michael@0: } michael@0: private: michael@0: nsRefPtr mNode; michael@0: }; michael@0: if (!mStopped) { michael@0: // Only dispatch the ended event once michael@0: NS_DispatchToMainThread(new EndedEventDispatcher(this)); michael@0: mStopped = true; michael@0: } michael@0: michael@0: // Drop the playing reference michael@0: // Warning: The below line might delete this. michael@0: MarkInactive(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode) michael@0: { michael@0: AudioBufferSourceNode* This = static_cast(aNode); michael@0: SendTimelineParameterToStream(This, PLAYBACKRATE, *This->mPlaybackRate); michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift) michael@0: { michael@0: SendDoubleParameterToStream(DOPPLERSHIFT, aDopplerShift); michael@0: } michael@0: michael@0: void michael@0: AudioBufferSourceNode::SendLoopParametersToStream() michael@0: { michael@0: // Don't compute and set the loop parameters unnecessarily michael@0: if (mLoop && mBuffer) { michael@0: float rate = mBuffer->SampleRate(); michael@0: double length = (double(mBuffer->Length()) / mBuffer->SampleRate()); michael@0: double actualLoopStart, actualLoopEnd; michael@0: if (mLoopStart >= 0.0 && mLoopEnd > 0.0 && michael@0: mLoopStart < mLoopEnd) { michael@0: MOZ_ASSERT(mLoopStart != 0.0 || mLoopEnd != 0.0); michael@0: actualLoopStart = (mLoopStart > length) ? 0.0 : mLoopStart; michael@0: actualLoopEnd = std::min(mLoopEnd, length); michael@0: } else { michael@0: actualLoopStart = 0.0; michael@0: actualLoopEnd = length; michael@0: } michael@0: int32_t loopStartTicks = NS_lround(actualLoopStart * rate); michael@0: int32_t loopEndTicks = NS_lround(actualLoopEnd * rate); michael@0: if (loopStartTicks < loopEndTicks) { michael@0: SendInt32ParameterToStream(LOOPSTART, loopStartTicks); michael@0: SendInt32ParameterToStream(LOOPEND, loopEndTicks); michael@0: SendInt32ParameterToStream(LOOP, 1); michael@0: } else { michael@0: // Be explicit about looping not happening if the offsets make michael@0: // looping impossible. michael@0: SendInt32ParameterToStream(LOOP, 0); michael@0: } michael@0: } else if (!mLoop) { michael@0: SendInt32ParameterToStream(LOOP, 0); michael@0: } michael@0: } michael@0: michael@0: } michael@0: }