|
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "PannerNode.h" |
|
8 #include "AudioNodeEngine.h" |
|
9 #include "AudioNodeStream.h" |
|
10 #include "AudioListener.h" |
|
11 #include "AudioBufferSourceNode.h" |
|
12 #include "PlayingRefChangeHandler.h" |
|
13 #include "blink/HRTFPanner.h" |
|
14 #include "blink/HRTFDatabaseLoader.h" |
|
15 |
|
16 using WebCore::HRTFDatabaseLoader; |
|
17 using WebCore::HRTFPanner; |
|
18 |
|
19 namespace mozilla { |
|
20 namespace dom { |
|
21 |
|
22 using namespace std; |
|
23 |
|
24 NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode) |
|
25 |
|
26 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(PannerNode) |
|
27 if (tmp->Context()) { |
|
28 tmp->Context()->UnregisterPannerNode(tmp); |
|
29 } |
|
30 NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode) |
|
31 |
|
32 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode) |
|
33 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
|
34 |
|
35 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(PannerNode) |
|
36 NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
|
37 |
|
38 NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode) |
|
39 NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode) |
|
40 |
|
41 class PannerNodeEngine : public AudioNodeEngine |
|
42 { |
|
43 public: |
|
44 explicit PannerNodeEngine(AudioNode* aNode) |
|
45 : AudioNodeEngine(aNode) |
|
46 // Please keep these default values consistent with PannerNode::PannerNode below. |
|
47 , mPanningModelFunction(&PannerNodeEngine::HRTFPanningFunction) |
|
48 , mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction) |
|
49 , mPosition() |
|
50 , mOrientation(1., 0., 0.) |
|
51 , mVelocity() |
|
52 , mRefDistance(1.) |
|
53 , mMaxDistance(10000.) |
|
54 , mRolloffFactor(1.) |
|
55 , mConeInnerAngle(360.) |
|
56 , mConeOuterAngle(360.) |
|
57 , mConeOuterGain(0.) |
|
58 // These will be initialized when a PannerNode is created, so just initialize them |
|
59 // to some dummy values here. |
|
60 , mListenerDopplerFactor(0.) |
|
61 , mListenerSpeedOfSound(0.) |
|
62 , mLeftOverData(INT_MIN) |
|
63 { |
|
64 // HRTFDatabaseLoader needs to be fetched on the main thread. |
|
65 TemporaryRef<HRTFDatabaseLoader> loader = |
|
66 HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(aNode->Context()->SampleRate()); |
|
67 mHRTFPanner = new HRTFPanner(aNode->Context()->SampleRate(), loader); |
|
68 } |
|
69 |
|
70 virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) MOZ_OVERRIDE |
|
71 { |
|
72 switch (aIndex) { |
|
73 case PannerNode::PANNING_MODEL: |
|
74 switch (PanningModelType(aParam)) { |
|
75 case PanningModelType::Equalpower: |
|
76 mPanningModelFunction = &PannerNodeEngine::EqualPowerPanningFunction; |
|
77 break; |
|
78 case PanningModelType::HRTF: |
|
79 mPanningModelFunction = &PannerNodeEngine::HRTFPanningFunction; |
|
80 break; |
|
81 default: |
|
82 NS_NOTREACHED("We should never see the alternate names here"); |
|
83 break; |
|
84 } |
|
85 break; |
|
86 case PannerNode::DISTANCE_MODEL: |
|
87 switch (DistanceModelType(aParam)) { |
|
88 case DistanceModelType::Inverse: |
|
89 mDistanceModelFunction = &PannerNodeEngine::InverseGainFunction; |
|
90 break; |
|
91 case DistanceModelType::Linear: |
|
92 mDistanceModelFunction = &PannerNodeEngine::LinearGainFunction; |
|
93 break; |
|
94 case DistanceModelType::Exponential: |
|
95 mDistanceModelFunction = &PannerNodeEngine::ExponentialGainFunction; |
|
96 break; |
|
97 default: |
|
98 NS_NOTREACHED("We should never see the alternate names here"); |
|
99 break; |
|
100 } |
|
101 break; |
|
102 default: |
|
103 NS_ERROR("Bad PannerNodeEngine Int32Parameter"); |
|
104 } |
|
105 } |
|
106 virtual void SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aParam) MOZ_OVERRIDE |
|
107 { |
|
108 switch (aIndex) { |
|
109 case PannerNode::LISTENER_POSITION: mListenerPosition = aParam; break; |
|
110 case PannerNode::LISTENER_FRONT_VECTOR: mListenerFrontVector = aParam; break; |
|
111 case PannerNode::LISTENER_RIGHT_VECTOR: mListenerRightVector = aParam; break; |
|
112 case PannerNode::LISTENER_VELOCITY: mListenerVelocity = aParam; break; |
|
113 case PannerNode::POSITION: mPosition = aParam; break; |
|
114 case PannerNode::ORIENTATION: mOrientation = aParam; break; |
|
115 case PannerNode::VELOCITY: mVelocity = aParam; break; |
|
116 default: |
|
117 NS_ERROR("Bad PannerNodeEngine ThreeDPointParameter"); |
|
118 } |
|
119 } |
|
120 virtual void SetDoubleParameter(uint32_t aIndex, double aParam) MOZ_OVERRIDE |
|
121 { |
|
122 switch (aIndex) { |
|
123 case PannerNode::LISTENER_DOPPLER_FACTOR: mListenerDopplerFactor = aParam; break; |
|
124 case PannerNode::LISTENER_SPEED_OF_SOUND: mListenerSpeedOfSound = aParam; break; |
|
125 case PannerNode::REF_DISTANCE: mRefDistance = aParam; break; |
|
126 case PannerNode::MAX_DISTANCE: mMaxDistance = aParam; break; |
|
127 case PannerNode::ROLLOFF_FACTOR: mRolloffFactor = aParam; break; |
|
128 case PannerNode::CONE_INNER_ANGLE: mConeInnerAngle = aParam; break; |
|
129 case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break; |
|
130 case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break; |
|
131 default: |
|
132 NS_ERROR("Bad PannerNodeEngine DoubleParameter"); |
|
133 } |
|
134 } |
|
135 |
|
136 virtual void ProcessBlock(AudioNodeStream* aStream, |
|
137 const AudioChunk& aInput, |
|
138 AudioChunk* aOutput, |
|
139 bool *aFinished) MOZ_OVERRIDE |
|
140 { |
|
141 if (aInput.IsNull()) { |
|
142 // mLeftOverData != INT_MIN means that the panning model was HRTF and a |
|
143 // tail-time reference was added. Even if the model is now equalpower, |
|
144 // the reference will need to be removed. |
|
145 if (mLeftOverData > 0 && |
|
146 mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { |
|
147 mLeftOverData -= WEBAUDIO_BLOCK_SIZE; |
|
148 } else { |
|
149 if (mLeftOverData != INT_MIN) { |
|
150 mLeftOverData = INT_MIN; |
|
151 mHRTFPanner->reset(); |
|
152 |
|
153 nsRefPtr<PlayingRefChangeHandler> refchanged = |
|
154 new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE); |
|
155 aStream->Graph()-> |
|
156 DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); |
|
157 } |
|
158 *aOutput = aInput; |
|
159 return; |
|
160 } |
|
161 } else if (mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { |
|
162 if (mLeftOverData == INT_MIN) { |
|
163 nsRefPtr<PlayingRefChangeHandler> refchanged = |
|
164 new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF); |
|
165 aStream->Graph()-> |
|
166 DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); |
|
167 } |
|
168 mLeftOverData = mHRTFPanner->maxTailFrames(); |
|
169 } |
|
170 |
|
171 (this->*mPanningModelFunction)(aInput, aOutput); |
|
172 } |
|
173 |
|
174 void ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation); |
|
175 float ComputeConeGain(); |
|
176 // Compute how much the distance contributes to the gain reduction. |
|
177 float ComputeDistanceGain(); |
|
178 |
|
179 void GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, |
|
180 float aGainL, float aGainR); |
|
181 void GainStereoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, |
|
182 float aGainL, float aGainR, double aAzimuth); |
|
183 |
|
184 void EqualPowerPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput); |
|
185 void HRTFPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput); |
|
186 |
|
187 float LinearGainFunction(float aDistance); |
|
188 float InverseGainFunction(float aDistance); |
|
189 float ExponentialGainFunction(float aDistance); |
|
190 |
|
191 virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
192 { |
|
193 size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
|
194 if (mHRTFPanner) { |
|
195 amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf); |
|
196 } |
|
197 |
|
198 return amount; |
|
199 } |
|
200 |
|
201 virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
|
202 { |
|
203 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
204 } |
|
205 |
|
206 nsAutoPtr<HRTFPanner> mHRTFPanner; |
|
207 typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioChunk& aInput, AudioChunk* aOutput); |
|
208 PanningModelFunction mPanningModelFunction; |
|
209 typedef float (PannerNodeEngine::*DistanceModelFunction)(float aDistance); |
|
210 DistanceModelFunction mDistanceModelFunction; |
|
211 ThreeDPoint mPosition; |
|
212 ThreeDPoint mOrientation; |
|
213 ThreeDPoint mVelocity; |
|
214 double mRefDistance; |
|
215 double mMaxDistance; |
|
216 double mRolloffFactor; |
|
217 double mConeInnerAngle; |
|
218 double mConeOuterAngle; |
|
219 double mConeOuterGain; |
|
220 ThreeDPoint mListenerPosition; |
|
221 ThreeDPoint mListenerFrontVector; |
|
222 ThreeDPoint mListenerRightVector; |
|
223 ThreeDPoint mListenerVelocity; |
|
224 double mListenerDopplerFactor; |
|
225 double mListenerSpeedOfSound; |
|
226 int mLeftOverData; |
|
227 }; |
|
228 |
|
229 PannerNode::PannerNode(AudioContext* aContext) |
|
230 : AudioNode(aContext, |
|
231 2, |
|
232 ChannelCountMode::Clamped_max, |
|
233 ChannelInterpretation::Speakers) |
|
234 // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above. |
|
235 , mPanningModel(PanningModelType::HRTF) |
|
236 , mDistanceModel(DistanceModelType::Inverse) |
|
237 , mPosition() |
|
238 , mOrientation(1., 0., 0.) |
|
239 , mVelocity() |
|
240 , mRefDistance(1.) |
|
241 , mMaxDistance(10000.) |
|
242 , mRolloffFactor(1.) |
|
243 , mConeInnerAngle(360.) |
|
244 , mConeOuterAngle(360.) |
|
245 , mConeOuterGain(0.) |
|
246 { |
|
247 mStream = aContext->Graph()->CreateAudioNodeStream(new PannerNodeEngine(this), |
|
248 MediaStreamGraph::INTERNAL_STREAM); |
|
249 // We should register once we have set up our stream and engine. |
|
250 Context()->Listener()->RegisterPannerNode(this); |
|
251 } |
|
252 |
|
253 PannerNode::~PannerNode() |
|
254 { |
|
255 if (Context()) { |
|
256 Context()->UnregisterPannerNode(this); |
|
257 } |
|
258 } |
|
259 |
|
260 size_t |
|
261 PannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
|
262 { |
|
263 size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
|
264 amount += mSources.SizeOfExcludingThis(aMallocSizeOf); |
|
265 return amount; |
|
266 } |
|
267 |
|
268 size_t |
|
269 PannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
|
270 { |
|
271 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
|
272 } |
|
273 |
|
274 JSObject* |
|
275 PannerNode::WrapObject(JSContext* aCx) |
|
276 { |
|
277 return PannerNodeBinding::Wrap(aCx, this); |
|
278 } |
|
279 |
|
280 void PannerNode::DestroyMediaStream() |
|
281 { |
|
282 if (Context()) { |
|
283 Context()->UnregisterPannerNode(this); |
|
284 } |
|
285 AudioNode::DestroyMediaStream(); |
|
286 } |
|
287 |
|
288 // Those three functions are described in the spec. |
|
289 float |
|
290 PannerNodeEngine::LinearGainFunction(float aDistance) |
|
291 { |
|
292 return 1 - mRolloffFactor * (aDistance - mRefDistance) / (mMaxDistance - mRefDistance); |
|
293 } |
|
294 |
|
295 float |
|
296 PannerNodeEngine::InverseGainFunction(float aDistance) |
|
297 { |
|
298 return mRefDistance / (mRefDistance + mRolloffFactor * (aDistance - mRefDistance)); |
|
299 } |
|
300 |
|
301 float |
|
302 PannerNodeEngine::ExponentialGainFunction(float aDistance) |
|
303 { |
|
304 return pow(aDistance / mRefDistance, -mRolloffFactor); |
|
305 } |
|
306 |
|
307 void |
|
308 PannerNodeEngine::HRTFPanningFunction(const AudioChunk& aInput, |
|
309 AudioChunk* aOutput) |
|
310 { |
|
311 // The output of this node is always stereo, no matter what the inputs are. |
|
312 AllocateAudioBlock(2, aOutput); |
|
313 |
|
314 float azimuth, elevation; |
|
315 ComputeAzimuthAndElevation(azimuth, elevation); |
|
316 |
|
317 AudioChunk input = aInput; |
|
318 // Gain is applied before the delay and convolution of the HRTF |
|
319 input.mVolume *= ComputeConeGain() * ComputeDistanceGain(); |
|
320 |
|
321 mHRTFPanner->pan(azimuth, elevation, &input, aOutput); |
|
322 } |
|
323 |
|
324 void |
|
325 PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput, |
|
326 AudioChunk* aOutput) |
|
327 { |
|
328 float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain; |
|
329 int inputChannels = aInput.mChannelData.Length(); |
|
330 |
|
331 // If both the listener are in the same spot, and no cone gain is specified, |
|
332 // this node is noop. |
|
333 if (mListenerPosition == mPosition && |
|
334 mConeInnerAngle == 360 && |
|
335 mConeOuterAngle == 360) { |
|
336 *aOutput = aInput; |
|
337 return; |
|
338 } |
|
339 |
|
340 // The output of this node is always stereo, no matter what the inputs are. |
|
341 AllocateAudioBlock(2, aOutput); |
|
342 |
|
343 ComputeAzimuthAndElevation(azimuth, elevation); |
|
344 coneGain = ComputeConeGain(); |
|
345 |
|
346 // The following algorithm is described in the spec. |
|
347 // Clamp azimuth in the [-90, 90] range. |
|
348 azimuth = min(180.f, max(-180.f, azimuth)); |
|
349 |
|
350 // Wrap around |
|
351 if (azimuth < -90.f) { |
|
352 azimuth = -180.f - azimuth; |
|
353 } else if (azimuth > 90) { |
|
354 azimuth = 180.f - azimuth; |
|
355 } |
|
356 |
|
357 // Normalize the value in the [0, 1] range. |
|
358 if (inputChannels == 1) { |
|
359 normalizedAzimuth = (azimuth + 90.f) / 180.f; |
|
360 } else { |
|
361 if (azimuth <= 0) { |
|
362 normalizedAzimuth = (azimuth + 90.f) / 90.f; |
|
363 } else { |
|
364 normalizedAzimuth = azimuth / 90.f; |
|
365 } |
|
366 } |
|
367 |
|
368 distanceGain = ComputeDistanceGain(); |
|
369 |
|
370 // Actually compute the left and right gain. |
|
371 gainL = cos(0.5 * M_PI * normalizedAzimuth); |
|
372 gainR = sin(0.5 * M_PI * normalizedAzimuth); |
|
373 |
|
374 // Compute the output. |
|
375 if (inputChannels == 1) { |
|
376 GainMonoToStereo(aInput, aOutput, gainL, gainR); |
|
377 } else { |
|
378 GainStereoToStereo(aInput, aOutput, gainL, gainR, azimuth); |
|
379 } |
|
380 |
|
381 aOutput->mVolume = aInput.mVolume * distanceGain * coneGain; |
|
382 } |
|
383 |
|
384 void |
|
385 PannerNodeEngine::GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, |
|
386 float aGainL, float aGainR) |
|
387 { |
|
388 float* outputL = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[0])); |
|
389 float* outputR = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[1])); |
|
390 const float* input = static_cast<float*>(const_cast<void*>(aInput.mChannelData[0])); |
|
391 |
|
392 AudioBlockPanMonoToStereo(input, aGainL, aGainR, outputL, outputR); |
|
393 } |
|
394 |
|
395 void |
|
396 PannerNodeEngine::GainStereoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, |
|
397 float aGainL, float aGainR, double aAzimuth) |
|
398 { |
|
399 float* outputL = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[0])); |
|
400 float* outputR = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[1])); |
|
401 const float* inputL = static_cast<float*>(const_cast<void*>(aInput.mChannelData[0])); |
|
402 const float* inputR = static_cast<float*>(const_cast<void*>(aInput.mChannelData[1])); |
|
403 |
|
404 AudioBlockPanStereoToStereo(inputL, inputR, aGainL, aGainR, aAzimuth <= 0, outputL, outputR); |
|
405 } |
|
406 |
|
407 // This algorithm is specified in the webaudio spec. |
|
408 void |
|
409 PannerNodeEngine::ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation) |
|
410 { |
|
411 ThreeDPoint sourceListener = mPosition - mListenerPosition; |
|
412 |
|
413 if (sourceListener.IsZero()) { |
|
414 aAzimuth = 0.0; |
|
415 aElevation = 0.0; |
|
416 return; |
|
417 } |
|
418 |
|
419 sourceListener.Normalize(); |
|
420 |
|
421 // Project the source-listener vector on the x-z plane. |
|
422 const ThreeDPoint& listenerFront = mListenerFrontVector; |
|
423 const ThreeDPoint& listenerRight = mListenerRightVector; |
|
424 ThreeDPoint up = listenerRight.CrossProduct(listenerFront); |
|
425 |
|
426 double upProjection = sourceListener.DotProduct(up); |
|
427 aElevation = 90 - 180 * acos(upProjection) / M_PI; |
|
428 |
|
429 if (aElevation > 90) { |
|
430 aElevation = 180 - aElevation; |
|
431 } else if (aElevation < -90) { |
|
432 aElevation = -180 - aElevation; |
|
433 } |
|
434 |
|
435 ThreeDPoint projectedSource = sourceListener - up * upProjection; |
|
436 if (projectedSource.IsZero()) { |
|
437 // source - listener direction is up or down. |
|
438 aAzimuth = 0.0; |
|
439 return; |
|
440 } |
|
441 projectedSource.Normalize(); |
|
442 |
|
443 // Actually compute the angle, and convert to degrees |
|
444 double projection = projectedSource.DotProduct(listenerRight); |
|
445 aAzimuth = 180 * acos(projection) / M_PI; |
|
446 |
|
447 // Compute whether the source is in front or behind the listener. |
|
448 double frontBack = projectedSource.DotProduct(listenerFront); |
|
449 if (frontBack < 0) { |
|
450 aAzimuth = 360 - aAzimuth; |
|
451 } |
|
452 // Rotate the azimuth so it is relative to the listener front vector instead |
|
453 // of the right vector. |
|
454 if ((aAzimuth >= 0) && (aAzimuth <= 270)) { |
|
455 aAzimuth = 90 - aAzimuth; |
|
456 } else { |
|
457 aAzimuth = 450 - aAzimuth; |
|
458 } |
|
459 } |
|
460 |
|
461 // This algorithm is described in the WebAudio spec. |
|
462 float |
|
463 PannerNodeEngine::ComputeConeGain() |
|
464 { |
|
465 // Omnidirectional source |
|
466 if (mOrientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) { |
|
467 return 1; |
|
468 } |
|
469 |
|
470 // Normalized source-listener vector |
|
471 ThreeDPoint sourceToListener = mListenerPosition - mPosition; |
|
472 sourceToListener.Normalize(); |
|
473 |
|
474 // Angle between the source orientation vector and the source-listener vector |
|
475 double dotProduct = sourceToListener.DotProduct(mOrientation); |
|
476 double angle = 180 * acos(dotProduct) / M_PI; |
|
477 double absAngle = fabs(angle); |
|
478 |
|
479 // Divide by 2 here since API is entire angle (not half-angle) |
|
480 double absInnerAngle = fabs(mConeInnerAngle) / 2; |
|
481 double absOuterAngle = fabs(mConeOuterAngle) / 2; |
|
482 double gain = 1; |
|
483 |
|
484 if (absAngle <= absInnerAngle) { |
|
485 // No attenuation |
|
486 gain = 1; |
|
487 } else if (absAngle >= absOuterAngle) { |
|
488 // Max attenuation |
|
489 gain = mConeOuterGain; |
|
490 } else { |
|
491 // Between inner and outer cones |
|
492 // inner -> outer, x goes from 0 -> 1 |
|
493 double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle); |
|
494 gain = (1 - x) + mConeOuterGain * x; |
|
495 } |
|
496 |
|
497 return gain; |
|
498 } |
|
499 |
|
500 float |
|
501 PannerNodeEngine::ComputeDistanceGain() |
|
502 { |
|
503 ThreeDPoint distanceVec = mPosition - mListenerPosition; |
|
504 float distance = sqrt(distanceVec.DotProduct(distanceVec)); |
|
505 return (this->*mDistanceModelFunction)(distance); |
|
506 } |
|
507 |
|
508 float |
|
509 PannerNode::ComputeDopplerShift() |
|
510 { |
|
511 double dopplerShift = 1.0; // Initialize to default value |
|
512 |
|
513 AudioListener* listener = Context()->Listener(); |
|
514 |
|
515 if (listener->DopplerFactor() > 0) { |
|
516 // Don't bother if both source and listener have no velocity. |
|
517 if (!mVelocity.IsZero() || !listener->Velocity().IsZero()) { |
|
518 // Calculate the source to listener vector. |
|
519 ThreeDPoint sourceToListener = mPosition - listener->Velocity(); |
|
520 |
|
521 double sourceListenerMagnitude = sourceToListener.Magnitude(); |
|
522 |
|
523 double listenerProjection = sourceToListener.DotProduct(listener->Velocity()) / sourceListenerMagnitude; |
|
524 double sourceProjection = sourceToListener.DotProduct(mVelocity) / sourceListenerMagnitude; |
|
525 |
|
526 listenerProjection = -listenerProjection; |
|
527 sourceProjection = -sourceProjection; |
|
528 |
|
529 double scaledSpeedOfSound = listener->DopplerFactor() / listener->DopplerFactor(); |
|
530 listenerProjection = min(listenerProjection, scaledSpeedOfSound); |
|
531 sourceProjection = min(sourceProjection, scaledSpeedOfSound); |
|
532 |
|
533 dopplerShift = ((listener->SpeedOfSound() - listener->DopplerFactor() * listenerProjection) / (listener->SpeedOfSound() - listener->DopplerFactor() * sourceProjection)); |
|
534 |
|
535 WebAudioUtils::FixNaN(dopplerShift); // Avoid illegal values |
|
536 |
|
537 // Limit the pitch shifting to 4 octaves up and 3 octaves down. |
|
538 dopplerShift = min(dopplerShift, 16.); |
|
539 dopplerShift = max(dopplerShift, 0.125); |
|
540 } |
|
541 } |
|
542 |
|
543 return dopplerShift; |
|
544 } |
|
545 |
|
546 void |
|
547 PannerNode::FindConnectedSources() |
|
548 { |
|
549 mSources.Clear(); |
|
550 std::set<AudioNode*> cycleSet; |
|
551 FindConnectedSources(this, mSources, cycleSet); |
|
552 } |
|
553 |
|
554 void |
|
555 PannerNode::FindConnectedSources(AudioNode* aNode, |
|
556 nsTArray<AudioBufferSourceNode*>& aSources, |
|
557 std::set<AudioNode*>& aNodesSeen) |
|
558 { |
|
559 if (!aNode) { |
|
560 return; |
|
561 } |
|
562 |
|
563 const nsTArray<InputNode>& inputNodes = aNode->InputNodes(); |
|
564 |
|
565 for(unsigned i = 0; i < inputNodes.Length(); i++) { |
|
566 // Return if we find a node that we have seen already. |
|
567 if (aNodesSeen.find(inputNodes[i].mInputNode) != aNodesSeen.end()) { |
|
568 return; |
|
569 } |
|
570 aNodesSeen.insert(inputNodes[i].mInputNode); |
|
571 // Recurse |
|
572 FindConnectedSources(inputNodes[i].mInputNode, aSources, aNodesSeen); |
|
573 |
|
574 // Check if this node is an AudioBufferSourceNode |
|
575 AudioBufferSourceNode* node = inputNodes[i].mInputNode->AsAudioBufferSourceNode(); |
|
576 if (node) { |
|
577 aSources.AppendElement(node); |
|
578 } |
|
579 } |
|
580 } |
|
581 |
|
582 void |
|
583 PannerNode::SendDopplerToSourcesIfNeeded() |
|
584 { |
|
585 // Don't bother sending the doppler shift if both the source and the listener |
|
586 // are not moving, because the doppler shift is going to be 1.0. |
|
587 if (!(Context()->Listener()->Velocity().IsZero() && mVelocity.IsZero())) { |
|
588 for(uint32_t i = 0; i < mSources.Length(); i++) { |
|
589 mSources[i]->SendDopplerShiftToStream(ComputeDopplerShift()); |
|
590 } |
|
591 } |
|
592 } |
|
593 |
|
594 |
|
595 } |
|
596 } |
|
597 |