Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "ConvolverNode.h" |
michael@0 | 8 | #include "mozilla/dom/ConvolverNodeBinding.h" |
michael@0 | 9 | #include "AudioNodeEngine.h" |
michael@0 | 10 | #include "AudioNodeStream.h" |
michael@0 | 11 | #include "blink/Reverb.h" |
michael@0 | 12 | #include "PlayingRefChangeHandler.h" |
michael@0 | 13 | |
michael@0 | 14 | namespace mozilla { |
michael@0 | 15 | namespace dom { |
michael@0 | 16 | |
michael@0 | 17 | NS_IMPL_CYCLE_COLLECTION_INHERITED(ConvolverNode, AudioNode, mBuffer) |
michael@0 | 18 | |
michael@0 | 19 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(ConvolverNode) |
michael@0 | 20 | NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
michael@0 | 21 | |
michael@0 | 22 | NS_IMPL_ADDREF_INHERITED(ConvolverNode, AudioNode) |
michael@0 | 23 | NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode) |
michael@0 | 24 | |
michael@0 | 25 | class ConvolverNodeEngine : public AudioNodeEngine |
michael@0 | 26 | { |
michael@0 | 27 | typedef PlayingRefChangeHandler PlayingRefChanged; |
michael@0 | 28 | public: |
michael@0 | 29 | ConvolverNodeEngine(AudioNode* aNode, bool aNormalize) |
michael@0 | 30 | : AudioNodeEngine(aNode) |
michael@0 | 31 | , mBufferLength(0) |
michael@0 | 32 | , mLeftOverData(INT32_MIN) |
michael@0 | 33 | , mSampleRate(0.0f) |
michael@0 | 34 | , mUseBackgroundThreads(!aNode->Context()->IsOffline()) |
michael@0 | 35 | , mNormalize(aNormalize) |
michael@0 | 36 | { |
michael@0 | 37 | } |
michael@0 | 38 | |
michael@0 | 39 | enum Parameters { |
michael@0 | 40 | BUFFER_LENGTH, |
michael@0 | 41 | SAMPLE_RATE, |
michael@0 | 42 | NORMALIZE |
michael@0 | 43 | }; |
michael@0 | 44 | virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) MOZ_OVERRIDE |
michael@0 | 45 | { |
michael@0 | 46 | switch (aIndex) { |
michael@0 | 47 | case BUFFER_LENGTH: |
michael@0 | 48 | // BUFFER_LENGTH is the first parameter that we set when setting a new buffer, |
michael@0 | 49 | // so we should be careful to invalidate the rest of our state here. |
michael@0 | 50 | mBuffer = nullptr; |
michael@0 | 51 | mSampleRate = 0.0f; |
michael@0 | 52 | mBufferLength = aParam; |
michael@0 | 53 | mLeftOverData = INT32_MIN; |
michael@0 | 54 | break; |
michael@0 | 55 | case SAMPLE_RATE: |
michael@0 | 56 | mSampleRate = aParam; |
michael@0 | 57 | break; |
michael@0 | 58 | case NORMALIZE: |
michael@0 | 59 | mNormalize = !!aParam; |
michael@0 | 60 | break; |
michael@0 | 61 | default: |
michael@0 | 62 | NS_ERROR("Bad ConvolverNodeEngine Int32Parameter"); |
michael@0 | 63 | } |
michael@0 | 64 | } |
michael@0 | 65 | virtual void SetDoubleParameter(uint32_t aIndex, double aParam) MOZ_OVERRIDE |
michael@0 | 66 | { |
michael@0 | 67 | switch (aIndex) { |
michael@0 | 68 | case SAMPLE_RATE: |
michael@0 | 69 | mSampleRate = aParam; |
michael@0 | 70 | AdjustReverb(); |
michael@0 | 71 | break; |
michael@0 | 72 | default: |
michael@0 | 73 | NS_ERROR("Bad ConvolverNodeEngine DoubleParameter"); |
michael@0 | 74 | } |
michael@0 | 75 | } |
michael@0 | 76 | virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) |
michael@0 | 77 | { |
michael@0 | 78 | mBuffer = aBuffer; |
michael@0 | 79 | AdjustReverb(); |
michael@0 | 80 | } |
michael@0 | 81 | |
michael@0 | 82 | void AdjustReverb() |
michael@0 | 83 | { |
michael@0 | 84 | // Note about empirical tuning (this is copied from Blink) |
michael@0 | 85 | // The maximum FFT size affects reverb performance and accuracy. |
michael@0 | 86 | // If the reverb is single-threaded and processes entirely in the real-time audio thread, |
michael@0 | 87 | // it's important not to make this too high. In this case 8192 is a good value. |
michael@0 | 88 | // But, the Reverb object is multi-threaded, so we want this as high as possible without losing too much accuracy. |
michael@0 | 89 | // Very large FFTs will have worse phase errors. Given these constraints 32768 is a good compromise. |
michael@0 | 90 | const size_t MaxFFTSize = 32768; |
michael@0 | 91 | |
michael@0 | 92 | if (!mBuffer || !mBufferLength || !mSampleRate) { |
michael@0 | 93 | mReverb = nullptr; |
michael@0 | 94 | mLeftOverData = INT32_MIN; |
michael@0 | 95 | return; |
michael@0 | 96 | } |
michael@0 | 97 | |
michael@0 | 98 | mReverb = new WebCore::Reverb(mBuffer, mBufferLength, |
michael@0 | 99 | WEBAUDIO_BLOCK_SIZE, |
michael@0 | 100 | MaxFFTSize, 2, mUseBackgroundThreads, |
michael@0 | 101 | mNormalize, mSampleRate); |
michael@0 | 102 | } |
michael@0 | 103 | |
michael@0 | 104 | virtual void ProcessBlock(AudioNodeStream* aStream, |
michael@0 | 105 | const AudioChunk& aInput, |
michael@0 | 106 | AudioChunk* aOutput, |
michael@0 | 107 | bool* aFinished) |
michael@0 | 108 | { |
michael@0 | 109 | if (!mReverb) { |
michael@0 | 110 | *aOutput = aInput; |
michael@0 | 111 | return; |
michael@0 | 112 | } |
michael@0 | 113 | |
michael@0 | 114 | AudioChunk input = aInput; |
michael@0 | 115 | if (aInput.IsNull()) { |
michael@0 | 116 | if (mLeftOverData > 0) { |
michael@0 | 117 | mLeftOverData -= WEBAUDIO_BLOCK_SIZE; |
michael@0 | 118 | AllocateAudioBlock(1, &input); |
michael@0 | 119 | WriteZeroesToAudioBlock(&input, 0, WEBAUDIO_BLOCK_SIZE); |
michael@0 | 120 | } else { |
michael@0 | 121 | if (mLeftOverData != INT32_MIN) { |
michael@0 | 122 | mLeftOverData = INT32_MIN; |
michael@0 | 123 | nsRefPtr<PlayingRefChanged> refchanged = |
michael@0 | 124 | new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE); |
michael@0 | 125 | aStream->Graph()-> |
michael@0 | 126 | DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); |
michael@0 | 127 | } |
michael@0 | 128 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
michael@0 | 129 | return; |
michael@0 | 130 | } |
michael@0 | 131 | } else { |
michael@0 | 132 | if (aInput.mVolume != 1.0f) { |
michael@0 | 133 | // Pre-multiply the input's volume |
michael@0 | 134 | uint32_t numChannels = aInput.mChannelData.Length(); |
michael@0 | 135 | AllocateAudioBlock(numChannels, &input); |
michael@0 | 136 | for (uint32_t i = 0; i < numChannels; ++i) { |
michael@0 | 137 | const float* src = static_cast<const float*>(aInput.mChannelData[i]); |
michael@0 | 138 | float* dest = static_cast<float*>(const_cast<void*>(input.mChannelData[i])); |
michael@0 | 139 | AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest); |
michael@0 | 140 | } |
michael@0 | 141 | } |
michael@0 | 142 | |
michael@0 | 143 | if (mLeftOverData <= 0) { |
michael@0 | 144 | nsRefPtr<PlayingRefChanged> refchanged = |
michael@0 | 145 | new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF); |
michael@0 | 146 | aStream->Graph()-> |
michael@0 | 147 | DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); |
michael@0 | 148 | } |
michael@0 | 149 | mLeftOverData = mBufferLength; |
michael@0 | 150 | MOZ_ASSERT(mLeftOverData > 0); |
michael@0 | 151 | } |
michael@0 | 152 | AllocateAudioBlock(2, aOutput); |
michael@0 | 153 | |
michael@0 | 154 | mReverb->process(&input, aOutput, WEBAUDIO_BLOCK_SIZE); |
michael@0 | 155 | } |
michael@0 | 156 | |
michael@0 | 157 | virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
michael@0 | 158 | { |
michael@0 | 159 | size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 160 | if (mBuffer && !mBuffer->IsShared()) { |
michael@0 | 161 | amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); |
michael@0 | 162 | } |
michael@0 | 163 | |
michael@0 | 164 | if (mReverb) { |
michael@0 | 165 | amount += mReverb->sizeOfIncludingThis(aMallocSizeOf); |
michael@0 | 166 | } |
michael@0 | 167 | |
michael@0 | 168 | return amount; |
michael@0 | 169 | } |
michael@0 | 170 | |
michael@0 | 171 | virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
michael@0 | 172 | { |
michael@0 | 173 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 174 | } |
michael@0 | 175 | |
michael@0 | 176 | private: |
michael@0 | 177 | nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer; |
michael@0 | 178 | nsAutoPtr<WebCore::Reverb> mReverb; |
michael@0 | 179 | int32_t mBufferLength; |
michael@0 | 180 | int32_t mLeftOverData; |
michael@0 | 181 | float mSampleRate; |
michael@0 | 182 | bool mUseBackgroundThreads; |
michael@0 | 183 | bool mNormalize; |
michael@0 | 184 | }; |
michael@0 | 185 | |
michael@0 | 186 | ConvolverNode::ConvolverNode(AudioContext* aContext) |
michael@0 | 187 | : AudioNode(aContext, |
michael@0 | 188 | 2, |
michael@0 | 189 | ChannelCountMode::Clamped_max, |
michael@0 | 190 | ChannelInterpretation::Speakers) |
michael@0 | 191 | , mNormalize(true) |
michael@0 | 192 | { |
michael@0 | 193 | ConvolverNodeEngine* engine = new ConvolverNodeEngine(this, mNormalize); |
michael@0 | 194 | mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM); |
michael@0 | 195 | } |
michael@0 | 196 | |
michael@0 | 197 | size_t |
michael@0 | 198 | ConvolverNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
michael@0 | 199 | { |
michael@0 | 200 | size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 201 | if (mBuffer) { |
michael@0 | 202 | // NB: mBuffer might be shared with the associated engine, by convention |
michael@0 | 203 | // the AudioNode will report. |
michael@0 | 204 | amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); |
michael@0 | 205 | } |
michael@0 | 206 | return amount; |
michael@0 | 207 | } |
michael@0 | 208 | |
michael@0 | 209 | size_t |
michael@0 | 210 | ConvolverNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
michael@0 | 211 | { |
michael@0 | 212 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 213 | } |
michael@0 | 214 | |
michael@0 | 215 | JSObject* |
michael@0 | 216 | ConvolverNode::WrapObject(JSContext* aCx) |
michael@0 | 217 | { |
michael@0 | 218 | return ConvolverNodeBinding::Wrap(aCx, this); |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | void |
michael@0 | 222 | ConvolverNode::SetBuffer(JSContext* aCx, AudioBuffer* aBuffer, ErrorResult& aRv) |
michael@0 | 223 | { |
michael@0 | 224 | if (aBuffer) { |
michael@0 | 225 | switch (aBuffer->NumberOfChannels()) { |
michael@0 | 226 | case 1: |
michael@0 | 227 | case 2: |
michael@0 | 228 | case 4: |
michael@0 | 229 | // Supported number of channels |
michael@0 | 230 | break; |
michael@0 | 231 | default: |
michael@0 | 232 | aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); |
michael@0 | 233 | return; |
michael@0 | 234 | } |
michael@0 | 235 | } |
michael@0 | 236 | |
michael@0 | 237 | mBuffer = aBuffer; |
michael@0 | 238 | |
michael@0 | 239 | // Send the buffer to the stream |
michael@0 | 240 | AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
michael@0 | 241 | MOZ_ASSERT(ns, "Why don't we have a stream here?"); |
michael@0 | 242 | if (mBuffer) { |
michael@0 | 243 | uint32_t length = mBuffer->Length(); |
michael@0 | 244 | nsRefPtr<ThreadSharedFloatArrayBufferList> data = |
michael@0 | 245 | mBuffer->GetThreadSharedChannelsForRate(aCx); |
michael@0 | 246 | if (data && length < WEBAUDIO_BLOCK_SIZE) { |
michael@0 | 247 | // For very small impulse response buffers, we need to pad the |
michael@0 | 248 | // buffer with 0 to make sure that the Reverb implementation |
michael@0 | 249 | // has enough data to compute FFTs from. |
michael@0 | 250 | length = WEBAUDIO_BLOCK_SIZE; |
michael@0 | 251 | nsRefPtr<ThreadSharedFloatArrayBufferList> paddedBuffer = |
michael@0 | 252 | new ThreadSharedFloatArrayBufferList(data->GetChannels()); |
michael@0 | 253 | float* channelData = (float*) malloc(sizeof(float) * length * data->GetChannels()); |
michael@0 | 254 | for (uint32_t i = 0; i < data->GetChannels(); ++i) { |
michael@0 | 255 | PodCopy(channelData + length * i, data->GetData(i), mBuffer->Length()); |
michael@0 | 256 | PodZero(channelData + length * i + mBuffer->Length(), WEBAUDIO_BLOCK_SIZE - mBuffer->Length()); |
michael@0 | 257 | paddedBuffer->SetData(i, (i == 0) ? channelData : nullptr, channelData); |
michael@0 | 258 | } |
michael@0 | 259 | data = paddedBuffer; |
michael@0 | 260 | } |
michael@0 | 261 | SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length); |
michael@0 | 262 | SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE, |
michael@0 | 263 | mBuffer->SampleRate()); |
michael@0 | 264 | ns->SetBuffer(data.forget()); |
michael@0 | 265 | } else { |
michael@0 | 266 | ns->SetBuffer(nullptr); |
michael@0 | 267 | } |
michael@0 | 268 | } |
michael@0 | 269 | |
michael@0 | 270 | void |
michael@0 | 271 | ConvolverNode::SetNormalize(bool aNormalize) |
michael@0 | 272 | { |
michael@0 | 273 | mNormalize = aNormalize; |
michael@0 | 274 | SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize); |
michael@0 | 275 | } |
michael@0 | 276 | |
michael@0 | 277 | } |
michael@0 | 278 | } |
michael@0 | 279 |