michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "PannerNode.h" michael@0: #include "AudioNodeEngine.h" michael@0: #include "AudioNodeStream.h" michael@0: #include "AudioListener.h" michael@0: #include "AudioBufferSourceNode.h" michael@0: #include "PlayingRefChangeHandler.h" michael@0: #include "blink/HRTFPanner.h" michael@0: #include "blink/HRTFDatabaseLoader.h" michael@0: michael@0: using WebCore::HRTFDatabaseLoader; michael@0: using WebCore::HRTFPanner; michael@0: michael@0: namespace mozilla { michael@0: namespace dom { michael@0: michael@0: using namespace std; michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode) michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(PannerNode) michael@0: if (tmp->Context()) { michael@0: tmp->Context()->UnregisterPannerNode(tmp); michael@0: } michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode) michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode) michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END michael@0: michael@0: NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(PannerNode) michael@0: NS_INTERFACE_MAP_END_INHERITING(AudioNode) michael@0: michael@0: NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode) michael@0: NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode) michael@0: michael@0: class PannerNodeEngine : public AudioNodeEngine michael@0: { michael@0: public: michael@0: explicit PannerNodeEngine(AudioNode* aNode) michael@0: : AudioNodeEngine(aNode) michael@0: // Please keep these default values consistent with PannerNode::PannerNode below. michael@0: , mPanningModelFunction(&PannerNodeEngine::HRTFPanningFunction) michael@0: , mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction) michael@0: , mPosition() michael@0: , mOrientation(1., 0., 0.) michael@0: , mVelocity() michael@0: , mRefDistance(1.) michael@0: , mMaxDistance(10000.) michael@0: , mRolloffFactor(1.) michael@0: , mConeInnerAngle(360.) michael@0: , mConeOuterAngle(360.) michael@0: , mConeOuterGain(0.) michael@0: // These will be initialized when a PannerNode is created, so just initialize them michael@0: // to some dummy values here. michael@0: , mListenerDopplerFactor(0.) michael@0: , mListenerSpeedOfSound(0.) michael@0: , mLeftOverData(INT_MIN) michael@0: { michael@0: // HRTFDatabaseLoader needs to be fetched on the main thread. michael@0: TemporaryRef loader = michael@0: HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(aNode->Context()->SampleRate()); michael@0: mHRTFPanner = new HRTFPanner(aNode->Context()->SampleRate(), loader); michael@0: } michael@0: michael@0: virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) MOZ_OVERRIDE michael@0: { michael@0: switch (aIndex) { michael@0: case PannerNode::PANNING_MODEL: michael@0: switch (PanningModelType(aParam)) { michael@0: case PanningModelType::Equalpower: michael@0: mPanningModelFunction = &PannerNodeEngine::EqualPowerPanningFunction; michael@0: break; michael@0: case PanningModelType::HRTF: michael@0: mPanningModelFunction = &PannerNodeEngine::HRTFPanningFunction; michael@0: break; michael@0: default: michael@0: NS_NOTREACHED("We should never see the alternate names here"); michael@0: break; michael@0: } michael@0: break; michael@0: case PannerNode::DISTANCE_MODEL: michael@0: switch (DistanceModelType(aParam)) { michael@0: case DistanceModelType::Inverse: michael@0: mDistanceModelFunction = &PannerNodeEngine::InverseGainFunction; michael@0: break; michael@0: case DistanceModelType::Linear: michael@0: mDistanceModelFunction = &PannerNodeEngine::LinearGainFunction; michael@0: break; michael@0: case DistanceModelType::Exponential: michael@0: mDistanceModelFunction = &PannerNodeEngine::ExponentialGainFunction; michael@0: break; michael@0: default: michael@0: NS_NOTREACHED("We should never see the alternate names here"); michael@0: break; michael@0: } michael@0: break; michael@0: default: michael@0: NS_ERROR("Bad PannerNodeEngine Int32Parameter"); michael@0: } michael@0: } michael@0: virtual void SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aParam) MOZ_OVERRIDE michael@0: { michael@0: switch (aIndex) { michael@0: case PannerNode::LISTENER_POSITION: mListenerPosition = aParam; break; michael@0: case PannerNode::LISTENER_FRONT_VECTOR: mListenerFrontVector = aParam; break; michael@0: case PannerNode::LISTENER_RIGHT_VECTOR: mListenerRightVector = aParam; break; michael@0: case PannerNode::LISTENER_VELOCITY: mListenerVelocity = aParam; break; michael@0: case PannerNode::POSITION: mPosition = aParam; break; michael@0: case PannerNode::ORIENTATION: mOrientation = aParam; break; michael@0: case PannerNode::VELOCITY: mVelocity = aParam; break; michael@0: default: michael@0: NS_ERROR("Bad PannerNodeEngine ThreeDPointParameter"); michael@0: } michael@0: } michael@0: virtual void SetDoubleParameter(uint32_t aIndex, double aParam) MOZ_OVERRIDE michael@0: { michael@0: switch (aIndex) { michael@0: case PannerNode::LISTENER_DOPPLER_FACTOR: mListenerDopplerFactor = aParam; break; michael@0: case PannerNode::LISTENER_SPEED_OF_SOUND: mListenerSpeedOfSound = aParam; break; michael@0: case PannerNode::REF_DISTANCE: mRefDistance = aParam; break; michael@0: case PannerNode::MAX_DISTANCE: mMaxDistance = aParam; break; michael@0: case PannerNode::ROLLOFF_FACTOR: mRolloffFactor = aParam; break; michael@0: case PannerNode::CONE_INNER_ANGLE: mConeInnerAngle = aParam; break; michael@0: case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break; michael@0: case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break; michael@0: default: michael@0: NS_ERROR("Bad PannerNodeEngine DoubleParameter"); michael@0: } michael@0: } michael@0: michael@0: virtual void ProcessBlock(AudioNodeStream* aStream, michael@0: const AudioChunk& aInput, michael@0: AudioChunk* aOutput, michael@0: bool *aFinished) MOZ_OVERRIDE michael@0: { michael@0: if (aInput.IsNull()) { michael@0: // mLeftOverData != INT_MIN means that the panning model was HRTF and a michael@0: // tail-time reference was added. Even if the model is now equalpower, michael@0: // the reference will need to be removed. michael@0: if (mLeftOverData > 0 && michael@0: mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { michael@0: mLeftOverData -= WEBAUDIO_BLOCK_SIZE; michael@0: } else { michael@0: if (mLeftOverData != INT_MIN) { michael@0: mLeftOverData = INT_MIN; michael@0: mHRTFPanner->reset(); michael@0: michael@0: nsRefPtr refchanged = michael@0: new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE); michael@0: aStream->Graph()-> michael@0: DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); michael@0: } michael@0: *aOutput = aInput; michael@0: return; michael@0: } michael@0: } else if (mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { michael@0: if (mLeftOverData == INT_MIN) { michael@0: nsRefPtr refchanged = michael@0: new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF); michael@0: aStream->Graph()-> michael@0: DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); michael@0: } michael@0: mLeftOverData = mHRTFPanner->maxTailFrames(); michael@0: } michael@0: michael@0: (this->*mPanningModelFunction)(aInput, aOutput); michael@0: } michael@0: michael@0: void ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation); michael@0: float ComputeConeGain(); michael@0: // Compute how much the distance contributes to the gain reduction. michael@0: float ComputeDistanceGain(); michael@0: michael@0: void GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, michael@0: float aGainL, float aGainR); michael@0: void GainStereoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, michael@0: float aGainL, float aGainR, double aAzimuth); michael@0: michael@0: void EqualPowerPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput); michael@0: void HRTFPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput); michael@0: michael@0: float LinearGainFunction(float aDistance); michael@0: float InverseGainFunction(float aDistance); michael@0: float ExponentialGainFunction(float aDistance); michael@0: michael@0: virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE michael@0: { michael@0: size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); michael@0: if (mHRTFPanner) { michael@0: amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: return amount; michael@0: } michael@0: michael@0: virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: nsAutoPtr mHRTFPanner; michael@0: typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioChunk& aInput, AudioChunk* aOutput); michael@0: PanningModelFunction mPanningModelFunction; michael@0: typedef float (PannerNodeEngine::*DistanceModelFunction)(float aDistance); michael@0: DistanceModelFunction mDistanceModelFunction; michael@0: ThreeDPoint mPosition; michael@0: ThreeDPoint mOrientation; michael@0: ThreeDPoint mVelocity; michael@0: double mRefDistance; michael@0: double mMaxDistance; michael@0: double mRolloffFactor; michael@0: double mConeInnerAngle; michael@0: double mConeOuterAngle; michael@0: double mConeOuterGain; michael@0: ThreeDPoint mListenerPosition; michael@0: ThreeDPoint mListenerFrontVector; michael@0: ThreeDPoint mListenerRightVector; michael@0: ThreeDPoint mListenerVelocity; michael@0: double mListenerDopplerFactor; michael@0: double mListenerSpeedOfSound; michael@0: int mLeftOverData; michael@0: }; michael@0: michael@0: PannerNode::PannerNode(AudioContext* aContext) michael@0: : AudioNode(aContext, michael@0: 2, michael@0: ChannelCountMode::Clamped_max, michael@0: ChannelInterpretation::Speakers) michael@0: // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above. michael@0: , mPanningModel(PanningModelType::HRTF) michael@0: , mDistanceModel(DistanceModelType::Inverse) michael@0: , mPosition() michael@0: , mOrientation(1., 0., 0.) michael@0: , mVelocity() michael@0: , mRefDistance(1.) michael@0: , mMaxDistance(10000.) michael@0: , mRolloffFactor(1.) michael@0: , mConeInnerAngle(360.) michael@0: , mConeOuterAngle(360.) michael@0: , mConeOuterGain(0.) michael@0: { michael@0: mStream = aContext->Graph()->CreateAudioNodeStream(new PannerNodeEngine(this), michael@0: MediaStreamGraph::INTERNAL_STREAM); michael@0: // We should register once we have set up our stream and engine. michael@0: Context()->Listener()->RegisterPannerNode(this); michael@0: } michael@0: michael@0: PannerNode::~PannerNode() michael@0: { michael@0: if (Context()) { michael@0: Context()->UnregisterPannerNode(this); michael@0: } michael@0: } michael@0: michael@0: size_t michael@0: PannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); michael@0: amount += mSources.SizeOfExcludingThis(aMallocSizeOf); michael@0: return amount; michael@0: } michael@0: michael@0: size_t michael@0: PannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: michael@0: JSObject* michael@0: PannerNode::WrapObject(JSContext* aCx) michael@0: { michael@0: return PannerNodeBinding::Wrap(aCx, this); michael@0: } michael@0: michael@0: void PannerNode::DestroyMediaStream() michael@0: { michael@0: if (Context()) { michael@0: Context()->UnregisterPannerNode(this); michael@0: } michael@0: AudioNode::DestroyMediaStream(); michael@0: } michael@0: michael@0: // Those three functions are described in the spec. michael@0: float michael@0: PannerNodeEngine::LinearGainFunction(float aDistance) michael@0: { michael@0: return 1 - mRolloffFactor * (aDistance - mRefDistance) / (mMaxDistance - mRefDistance); michael@0: } michael@0: michael@0: float michael@0: PannerNodeEngine::InverseGainFunction(float aDistance) michael@0: { michael@0: return mRefDistance / (mRefDistance + mRolloffFactor * (aDistance - mRefDistance)); michael@0: } michael@0: michael@0: float michael@0: PannerNodeEngine::ExponentialGainFunction(float aDistance) michael@0: { michael@0: return pow(aDistance / mRefDistance, -mRolloffFactor); michael@0: } michael@0: michael@0: void michael@0: PannerNodeEngine::HRTFPanningFunction(const AudioChunk& aInput, michael@0: AudioChunk* aOutput) michael@0: { michael@0: // The output of this node is always stereo, no matter what the inputs are. michael@0: AllocateAudioBlock(2, aOutput); michael@0: michael@0: float azimuth, elevation; michael@0: ComputeAzimuthAndElevation(azimuth, elevation); michael@0: michael@0: AudioChunk input = aInput; michael@0: // Gain is applied before the delay and convolution of the HRTF michael@0: input.mVolume *= ComputeConeGain() * ComputeDistanceGain(); michael@0: michael@0: mHRTFPanner->pan(azimuth, elevation, &input, aOutput); michael@0: } michael@0: michael@0: void michael@0: PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput, michael@0: AudioChunk* aOutput) michael@0: { michael@0: float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain; michael@0: int inputChannels = aInput.mChannelData.Length(); michael@0: michael@0: // If both the listener are in the same spot, and no cone gain is specified, michael@0: // this node is noop. michael@0: if (mListenerPosition == mPosition && michael@0: mConeInnerAngle == 360 && michael@0: mConeOuterAngle == 360) { michael@0: *aOutput = aInput; michael@0: return; michael@0: } michael@0: michael@0: // The output of this node is always stereo, no matter what the inputs are. michael@0: AllocateAudioBlock(2, aOutput); michael@0: michael@0: ComputeAzimuthAndElevation(azimuth, elevation); michael@0: coneGain = ComputeConeGain(); michael@0: michael@0: // The following algorithm is described in the spec. michael@0: // Clamp azimuth in the [-90, 90] range. michael@0: azimuth = min(180.f, max(-180.f, azimuth)); michael@0: michael@0: // Wrap around michael@0: if (azimuth < -90.f) { michael@0: azimuth = -180.f - azimuth; michael@0: } else if (azimuth > 90) { michael@0: azimuth = 180.f - azimuth; michael@0: } michael@0: michael@0: // Normalize the value in the [0, 1] range. michael@0: if (inputChannels == 1) { michael@0: normalizedAzimuth = (azimuth + 90.f) / 180.f; michael@0: } else { michael@0: if (azimuth <= 0) { michael@0: normalizedAzimuth = (azimuth + 90.f) / 90.f; michael@0: } else { michael@0: normalizedAzimuth = azimuth / 90.f; michael@0: } michael@0: } michael@0: michael@0: distanceGain = ComputeDistanceGain(); michael@0: michael@0: // Actually compute the left and right gain. michael@0: gainL = cos(0.5 * M_PI * normalizedAzimuth); michael@0: gainR = sin(0.5 * M_PI * normalizedAzimuth); michael@0: michael@0: // Compute the output. michael@0: if (inputChannels == 1) { michael@0: GainMonoToStereo(aInput, aOutput, gainL, gainR); michael@0: } else { michael@0: GainStereoToStereo(aInput, aOutput, gainL, gainR, azimuth); michael@0: } michael@0: michael@0: aOutput->mVolume = aInput.mVolume * distanceGain * coneGain; michael@0: } michael@0: michael@0: void michael@0: PannerNodeEngine::GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, michael@0: float aGainL, float aGainR) michael@0: { michael@0: float* outputL = static_cast(const_cast(aOutput->mChannelData[0])); michael@0: float* outputR = static_cast(const_cast(aOutput->mChannelData[1])); michael@0: const float* input = static_cast(const_cast(aInput.mChannelData[0])); michael@0: michael@0: AudioBlockPanMonoToStereo(input, aGainL, aGainR, outputL, outputR); michael@0: } michael@0: michael@0: void michael@0: PannerNodeEngine::GainStereoToStereo(const AudioChunk& aInput, AudioChunk* aOutput, michael@0: float aGainL, float aGainR, double aAzimuth) michael@0: { michael@0: float* outputL = static_cast(const_cast(aOutput->mChannelData[0])); michael@0: float* outputR = static_cast(const_cast(aOutput->mChannelData[1])); michael@0: const float* inputL = static_cast(const_cast(aInput.mChannelData[0])); michael@0: const float* inputR = static_cast(const_cast(aInput.mChannelData[1])); michael@0: michael@0: AudioBlockPanStereoToStereo(inputL, inputR, aGainL, aGainR, aAzimuth <= 0, outputL, outputR); michael@0: } michael@0: michael@0: // This algorithm is specified in the webaudio spec. michael@0: void michael@0: PannerNodeEngine::ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation) michael@0: { michael@0: ThreeDPoint sourceListener = mPosition - mListenerPosition; michael@0: michael@0: if (sourceListener.IsZero()) { michael@0: aAzimuth = 0.0; michael@0: aElevation = 0.0; michael@0: return; michael@0: } michael@0: michael@0: sourceListener.Normalize(); michael@0: michael@0: // Project the source-listener vector on the x-z plane. michael@0: const ThreeDPoint& listenerFront = mListenerFrontVector; michael@0: const ThreeDPoint& listenerRight = mListenerRightVector; michael@0: ThreeDPoint up = listenerRight.CrossProduct(listenerFront); michael@0: michael@0: double upProjection = sourceListener.DotProduct(up); michael@0: aElevation = 90 - 180 * acos(upProjection) / M_PI; michael@0: michael@0: if (aElevation > 90) { michael@0: aElevation = 180 - aElevation; michael@0: } else if (aElevation < -90) { michael@0: aElevation = -180 - aElevation; michael@0: } michael@0: michael@0: ThreeDPoint projectedSource = sourceListener - up * upProjection; michael@0: if (projectedSource.IsZero()) { michael@0: // source - listener direction is up or down. michael@0: aAzimuth = 0.0; michael@0: return; michael@0: } michael@0: projectedSource.Normalize(); michael@0: michael@0: // Actually compute the angle, and convert to degrees michael@0: double projection = projectedSource.DotProduct(listenerRight); michael@0: aAzimuth = 180 * acos(projection) / M_PI; michael@0: michael@0: // Compute whether the source is in front or behind the listener. michael@0: double frontBack = projectedSource.DotProduct(listenerFront); michael@0: if (frontBack < 0) { michael@0: aAzimuth = 360 - aAzimuth; michael@0: } michael@0: // Rotate the azimuth so it is relative to the listener front vector instead michael@0: // of the right vector. michael@0: if ((aAzimuth >= 0) && (aAzimuth <= 270)) { michael@0: aAzimuth = 90 - aAzimuth; michael@0: } else { michael@0: aAzimuth = 450 - aAzimuth; michael@0: } michael@0: } michael@0: michael@0: // This algorithm is described in the WebAudio spec. michael@0: float michael@0: PannerNodeEngine::ComputeConeGain() michael@0: { michael@0: // Omnidirectional source michael@0: if (mOrientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) { michael@0: return 1; michael@0: } michael@0: michael@0: // Normalized source-listener vector michael@0: ThreeDPoint sourceToListener = mListenerPosition - mPosition; michael@0: sourceToListener.Normalize(); michael@0: michael@0: // Angle between the source orientation vector and the source-listener vector michael@0: double dotProduct = sourceToListener.DotProduct(mOrientation); michael@0: double angle = 180 * acos(dotProduct) / M_PI; michael@0: double absAngle = fabs(angle); michael@0: michael@0: // Divide by 2 here since API is entire angle (not half-angle) michael@0: double absInnerAngle = fabs(mConeInnerAngle) / 2; michael@0: double absOuterAngle = fabs(mConeOuterAngle) / 2; michael@0: double gain = 1; michael@0: michael@0: if (absAngle <= absInnerAngle) { michael@0: // No attenuation michael@0: gain = 1; michael@0: } else if (absAngle >= absOuterAngle) { michael@0: // Max attenuation michael@0: gain = mConeOuterGain; michael@0: } else { michael@0: // Between inner and outer cones michael@0: // inner -> outer, x goes from 0 -> 1 michael@0: double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle); michael@0: gain = (1 - x) + mConeOuterGain * x; michael@0: } michael@0: michael@0: return gain; michael@0: } michael@0: michael@0: float michael@0: PannerNodeEngine::ComputeDistanceGain() michael@0: { michael@0: ThreeDPoint distanceVec = mPosition - mListenerPosition; michael@0: float distance = sqrt(distanceVec.DotProduct(distanceVec)); michael@0: return (this->*mDistanceModelFunction)(distance); michael@0: } michael@0: michael@0: float michael@0: PannerNode::ComputeDopplerShift() michael@0: { michael@0: double dopplerShift = 1.0; // Initialize to default value michael@0: michael@0: AudioListener* listener = Context()->Listener(); michael@0: michael@0: if (listener->DopplerFactor() > 0) { michael@0: // Don't bother if both source and listener have no velocity. michael@0: if (!mVelocity.IsZero() || !listener->Velocity().IsZero()) { michael@0: // Calculate the source to listener vector. michael@0: ThreeDPoint sourceToListener = mPosition - listener->Velocity(); michael@0: michael@0: double sourceListenerMagnitude = sourceToListener.Magnitude(); michael@0: michael@0: double listenerProjection = sourceToListener.DotProduct(listener->Velocity()) / sourceListenerMagnitude; michael@0: double sourceProjection = sourceToListener.DotProduct(mVelocity) / sourceListenerMagnitude; michael@0: michael@0: listenerProjection = -listenerProjection; michael@0: sourceProjection = -sourceProjection; michael@0: michael@0: double scaledSpeedOfSound = listener->DopplerFactor() / listener->DopplerFactor(); michael@0: listenerProjection = min(listenerProjection, scaledSpeedOfSound); michael@0: sourceProjection = min(sourceProjection, scaledSpeedOfSound); michael@0: michael@0: dopplerShift = ((listener->SpeedOfSound() - listener->DopplerFactor() * listenerProjection) / (listener->SpeedOfSound() - listener->DopplerFactor() * sourceProjection)); michael@0: michael@0: WebAudioUtils::FixNaN(dopplerShift); // Avoid illegal values michael@0: michael@0: // Limit the pitch shifting to 4 octaves up and 3 octaves down. michael@0: dopplerShift = min(dopplerShift, 16.); michael@0: dopplerShift = max(dopplerShift, 0.125); michael@0: } michael@0: } michael@0: michael@0: return dopplerShift; michael@0: } michael@0: michael@0: void michael@0: PannerNode::FindConnectedSources() michael@0: { michael@0: mSources.Clear(); michael@0: std::set cycleSet; michael@0: FindConnectedSources(this, mSources, cycleSet); michael@0: } michael@0: michael@0: void michael@0: PannerNode::FindConnectedSources(AudioNode* aNode, michael@0: nsTArray& aSources, michael@0: std::set& aNodesSeen) michael@0: { michael@0: if (!aNode) { michael@0: return; michael@0: } michael@0: michael@0: const nsTArray& inputNodes = aNode->InputNodes(); michael@0: michael@0: for(unsigned i = 0; i < inputNodes.Length(); i++) { michael@0: // Return if we find a node that we have seen already. michael@0: if (aNodesSeen.find(inputNodes[i].mInputNode) != aNodesSeen.end()) { michael@0: return; michael@0: } michael@0: aNodesSeen.insert(inputNodes[i].mInputNode); michael@0: // Recurse michael@0: FindConnectedSources(inputNodes[i].mInputNode, aSources, aNodesSeen); michael@0: michael@0: // Check if this node is an AudioBufferSourceNode michael@0: AudioBufferSourceNode* node = inputNodes[i].mInputNode->AsAudioBufferSourceNode(); michael@0: if (node) { michael@0: aSources.AppendElement(node); michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: PannerNode::SendDopplerToSourcesIfNeeded() michael@0: { michael@0: // Don't bother sending the doppler shift if both the source and the listener michael@0: // are not moving, because the doppler shift is going to be 1.0. michael@0: if (!(Context()->Listener()->Velocity().IsZero() && mVelocity.IsZero())) { michael@0: for(uint32_t i = 0; i < mSources.Length(); i++) { michael@0: mSources[i]->SendDopplerShiftToStream(ComputeDopplerShift()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: } michael@0: } michael@0: