Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozilla/dom/AnalyserNode.h"
8 #include "mozilla/dom/AnalyserNodeBinding.h"
9 #include "AudioNodeEngine.h"
10 #include "AudioNodeStream.h"
11 #include "mozilla/Mutex.h"
12 #include "mozilla/PodOperations.h"
14 namespace mozilla {
15 namespace dom {
17 NS_IMPL_ISUPPORTS_INHERITED0(AnalyserNode, AudioNode)
19 class AnalyserNodeEngine : public AudioNodeEngine
20 {
21 class TransferBuffer : public nsRunnable
22 {
23 public:
24 TransferBuffer(AudioNodeStream* aStream,
25 const AudioChunk& aChunk)
26 : mStream(aStream)
27 , mChunk(aChunk)
28 {
29 }
31 NS_IMETHOD Run()
32 {
33 nsRefPtr<AnalyserNode> node;
34 {
35 // No need to keep holding the lock for the whole duration of this
36 // function, since we're holding a strong reference to it, so if
37 // we can obtain the reference, we will hold the node alive in
38 // this function.
39 MutexAutoLock lock(mStream->Engine()->NodeMutex());
40 node = static_cast<AnalyserNode*>(mStream->Engine()->Node());
41 }
42 if (node) {
43 node->AppendChunk(mChunk);
44 }
45 return NS_OK;
46 }
48 private:
49 nsRefPtr<AudioNodeStream> mStream;
50 AudioChunk mChunk;
51 };
53 public:
54 explicit AnalyserNodeEngine(AnalyserNode* aNode)
55 : AudioNodeEngine(aNode)
56 {
57 MOZ_ASSERT(NS_IsMainThread());
58 }
60 virtual void ProcessBlock(AudioNodeStream* aStream,
61 const AudioChunk& aInput,
62 AudioChunk* aOutput,
63 bool* aFinished) MOZ_OVERRIDE
64 {
65 *aOutput = aInput;
67 MutexAutoLock lock(NodeMutex());
69 if (Node() &&
70 aInput.mChannelData.Length() > 0) {
71 nsRefPtr<TransferBuffer> transfer = new TransferBuffer(aStream, aInput);
72 NS_DispatchToMainThread(transfer);
73 }
74 }
76 virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE
77 {
78 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
79 }
80 };
82 AnalyserNode::AnalyserNode(AudioContext* aContext)
83 : AudioNode(aContext,
84 1,
85 ChannelCountMode::Explicit,
86 ChannelInterpretation::Speakers)
87 , mAnalysisBlock(2048)
88 , mMinDecibels(-100.)
89 , mMaxDecibels(-30.)
90 , mSmoothingTimeConstant(.8)
91 , mWriteIndex(0)
92 {
93 mStream = aContext->Graph()->CreateAudioNodeStream(new AnalyserNodeEngine(this),
94 MediaStreamGraph::INTERNAL_STREAM);
95 AllocateBuffer();
96 }
98 size_t
99 AnalyserNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
100 {
101 size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
102 amount += mAnalysisBlock.SizeOfExcludingThis(aMallocSizeOf);
103 amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf);
104 amount += mOutputBuffer.SizeOfExcludingThis(aMallocSizeOf);
105 return amount;
106 }
108 size_t
109 AnalyserNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
110 {
111 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
112 }
114 JSObject*
115 AnalyserNode::WrapObject(JSContext* aCx)
116 {
117 return AnalyserNodeBinding::Wrap(aCx, this);
118 }
120 void
121 AnalyserNode::SetFftSize(uint32_t aValue, ErrorResult& aRv)
122 {
123 // Disallow values that are not a power of 2 and outside the [32,2048] range
124 if (aValue < 32 ||
125 aValue > 2048 ||
126 (aValue & (aValue - 1)) != 0) {
127 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
128 return;
129 }
130 if (FftSize() != aValue) {
131 mAnalysisBlock.SetFFTSize(aValue);
132 AllocateBuffer();
133 }
134 }
136 void
137 AnalyserNode::SetMinDecibels(double aValue, ErrorResult& aRv)
138 {
139 if (aValue >= mMaxDecibels) {
140 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
141 return;
142 }
143 mMinDecibels = aValue;
144 }
146 void
147 AnalyserNode::SetMaxDecibels(double aValue, ErrorResult& aRv)
148 {
149 if (aValue <= mMinDecibels) {
150 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
151 return;
152 }
153 mMaxDecibels = aValue;
154 }
156 void
157 AnalyserNode::SetSmoothingTimeConstant(double aValue, ErrorResult& aRv)
158 {
159 if (aValue < 0 || aValue > 1) {
160 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
161 return;
162 }
163 mSmoothingTimeConstant = aValue;
164 }
166 void
167 AnalyserNode::GetFloatFrequencyData(const Float32Array& aArray)
168 {
169 if (!FFTAnalysis()) {
170 // Might fail to allocate memory
171 return;
172 }
174 aArray.ComputeLengthAndData();
176 float* buffer = aArray.Data();
177 uint32_t length = std::min(aArray.Length(), mOutputBuffer.Length());
179 for (uint32_t i = 0; i < length; ++i) {
180 buffer[i] = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels);
181 }
182 }
184 void
185 AnalyserNode::GetByteFrequencyData(const Uint8Array& aArray)
186 {
187 if (!FFTAnalysis()) {
188 // Might fail to allocate memory
189 return;
190 }
192 const double rangeScaleFactor = 1.0 / (mMaxDecibels - mMinDecibels);
194 aArray.ComputeLengthAndData();
196 unsigned char* buffer = aArray.Data();
197 uint32_t length = std::min(aArray.Length(), mOutputBuffer.Length());
199 for (uint32_t i = 0; i < length; ++i) {
200 const double decibels = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels);
201 // scale down the value to the range of [0, UCHAR_MAX]
202 const double scaled = std::max(0.0, std::min(double(UCHAR_MAX),
203 UCHAR_MAX * (decibels - mMinDecibels) * rangeScaleFactor));
204 buffer[i] = static_cast<unsigned char>(scaled);
205 }
206 }
208 void
209 AnalyserNode::GetFloatTimeDomainData(const Float32Array& aArray)
210 {
211 aArray.ComputeLengthAndData();
213 float* buffer = aArray.Data();
214 uint32_t length = std::min(aArray.Length(), mBuffer.Length());
216 for (uint32_t i = 0; i < length; ++i) {
217 buffer[i] = mBuffer[(i + mWriteIndex) % mBuffer.Length()];;
218 }
219 }
221 void
222 AnalyserNode::GetByteTimeDomainData(const Uint8Array& aArray)
223 {
224 aArray.ComputeLengthAndData();
226 unsigned char* buffer = aArray.Data();
227 uint32_t length = std::min(aArray.Length(), mBuffer.Length());
229 for (uint32_t i = 0; i < length; ++i) {
230 const float value = mBuffer[(i + mWriteIndex) % mBuffer.Length()];
231 // scale the value to the range of [0, UCHAR_MAX]
232 const float scaled = std::max(0.0f, std::min(float(UCHAR_MAX),
233 128.0f * (value + 1.0f)));
234 buffer[i] = static_cast<unsigned char>(scaled);
235 }
236 }
238 bool
239 AnalyserNode::FFTAnalysis()
240 {
241 float* inputBuffer;
242 bool allocated = false;
243 if (mWriteIndex == 0) {
244 inputBuffer = mBuffer.Elements();
245 } else {
246 inputBuffer = static_cast<float*>(moz_malloc(FftSize() * sizeof(float)));
247 if (!inputBuffer) {
248 return false;
249 }
250 memcpy(inputBuffer, mBuffer.Elements() + mWriteIndex, sizeof(float) * (FftSize() - mWriteIndex));
251 memcpy(inputBuffer + FftSize() - mWriteIndex, mBuffer.Elements(), sizeof(float) * mWriteIndex);
252 allocated = true;
253 }
255 ApplyBlackmanWindow(inputBuffer, FftSize());
257 mAnalysisBlock.PerformFFT(inputBuffer);
259 // Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT scaling factor).
260 const double magnitudeScale = 1.0 / FftSize();
262 for (uint32_t i = 0; i < mOutputBuffer.Length(); ++i) {
263 double scalarMagnitude = NS_hypot(mAnalysisBlock.RealData(i),
264 mAnalysisBlock.ImagData(i)) *
265 magnitudeScale;
266 mOutputBuffer[i] = mSmoothingTimeConstant * mOutputBuffer[i] +
267 (1.0 - mSmoothingTimeConstant) * scalarMagnitude;
268 }
270 if (allocated) {
271 moz_free(inputBuffer);
272 }
273 return true;
274 }
276 void
277 AnalyserNode::ApplyBlackmanWindow(float* aBuffer, uint32_t aSize)
278 {
279 double alpha = 0.16;
280 double a0 = 0.5 * (1.0 - alpha);
281 double a1 = 0.5;
282 double a2 = 0.5 * alpha;
284 for (uint32_t i = 0; i < aSize; ++i) {
285 double x = double(i) / aSize;
286 double window = a0 - a1 * cos(2 * M_PI * x) + a2 * cos(4 * M_PI * x);
287 aBuffer[i] *= window;
288 }
289 }
291 bool
292 AnalyserNode::AllocateBuffer()
293 {
294 bool result = true;
295 if (mBuffer.Length() != FftSize()) {
296 result = mBuffer.SetLength(FftSize());
297 if (result) {
298 memset(mBuffer.Elements(), 0, sizeof(float) * FftSize());
299 mWriteIndex = 0;
301 result = mOutputBuffer.SetLength(FrequencyBinCount());
302 if (result) {
303 memset(mOutputBuffer.Elements(), 0, sizeof(float) * FrequencyBinCount());
304 }
305 }
306 }
307 return result;
308 }
310 void
311 AnalyserNode::AppendChunk(const AudioChunk& aChunk)
312 {
313 const uint32_t bufferSize = mBuffer.Length();
314 const uint32_t channelCount = aChunk.mChannelData.Length();
315 uint32_t chunkDuration = aChunk.mDuration;
316 MOZ_ASSERT((bufferSize & (bufferSize - 1)) == 0); // Must be a power of two!
317 MOZ_ASSERT(channelCount > 0);
318 MOZ_ASSERT(chunkDuration == WEBAUDIO_BLOCK_SIZE);
320 if (chunkDuration > bufferSize) {
321 // Copy a maximum bufferSize samples.
322 chunkDuration = bufferSize;
323 }
325 PodCopy(mBuffer.Elements() + mWriteIndex, static_cast<const float*>(aChunk.mChannelData[0]), chunkDuration);
326 for (uint32_t i = 1; i < channelCount; ++i) {
327 AudioBlockAddChannelWithScale(static_cast<const float*>(aChunk.mChannelData[i]), 1.0f,
328 mBuffer.Elements() + mWriteIndex);
329 }
330 if (channelCount > 1) {
331 AudioBlockInPlaceScale(mBuffer.Elements() + mWriteIndex,
332 1.0f / aChunk.mChannelData.Length());
333 }
334 mWriteIndex += chunkDuration;
335 MOZ_ASSERT(mWriteIndex <= bufferSize);
336 if (mWriteIndex >= bufferSize) {
337 mWriteIndex = 0;
338 }
339 }
341 }
342 }