content/media/webaudio/PannerNode.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/webaudio/PannerNode.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,597 @@
     1.4 +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     1.5 +/* vim:set ts=2 sw=2 sts=2 et cindent: */
     1.6 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#include "PannerNode.h"
    1.11 +#include "AudioNodeEngine.h"
    1.12 +#include "AudioNodeStream.h"
    1.13 +#include "AudioListener.h"
    1.14 +#include "AudioBufferSourceNode.h"
    1.15 +#include "PlayingRefChangeHandler.h"
    1.16 +#include "blink/HRTFPanner.h"
    1.17 +#include "blink/HRTFDatabaseLoader.h"
    1.18 +
    1.19 +using WebCore::HRTFDatabaseLoader;
    1.20 +using WebCore::HRTFPanner;
    1.21 +
    1.22 +namespace mozilla {
    1.23 +namespace dom {
    1.24 +
    1.25 +using namespace std;
    1.26 +
    1.27 +NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode)
    1.28 +
    1.29 +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(PannerNode)
    1.30 +  if (tmp->Context()) {
    1.31 +    tmp->Context()->UnregisterPannerNode(tmp);
    1.32 +  }
    1.33 +NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode)
    1.34 +
    1.35 +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode)
    1.36 +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
    1.37 +
    1.38 +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(PannerNode)
    1.39 +NS_INTERFACE_MAP_END_INHERITING(AudioNode)
    1.40 +
    1.41 +NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode)
    1.42 +NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode)
    1.43 +
    1.44 +class PannerNodeEngine : public AudioNodeEngine
    1.45 +{
    1.46 +public:
    1.47 +  explicit PannerNodeEngine(AudioNode* aNode)
    1.48 +    : AudioNodeEngine(aNode)
    1.49 +    // Please keep these default values consistent with PannerNode::PannerNode below.
    1.50 +    , mPanningModelFunction(&PannerNodeEngine::HRTFPanningFunction)
    1.51 +    , mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction)
    1.52 +    , mPosition()
    1.53 +    , mOrientation(1., 0., 0.)
    1.54 +    , mVelocity()
    1.55 +    , mRefDistance(1.)
    1.56 +    , mMaxDistance(10000.)
    1.57 +    , mRolloffFactor(1.)
    1.58 +    , mConeInnerAngle(360.)
    1.59 +    , mConeOuterAngle(360.)
    1.60 +    , mConeOuterGain(0.)
    1.61 +    // These will be initialized when a PannerNode is created, so just initialize them
    1.62 +    // to some dummy values here.
    1.63 +    , mListenerDopplerFactor(0.)
    1.64 +    , mListenerSpeedOfSound(0.)
    1.65 +    , mLeftOverData(INT_MIN)
    1.66 +  {
    1.67 +    // HRTFDatabaseLoader needs to be fetched on the main thread.
    1.68 +    TemporaryRef<HRTFDatabaseLoader> loader =
    1.69 +      HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(aNode->Context()->SampleRate());
    1.70 +    mHRTFPanner = new HRTFPanner(aNode->Context()->SampleRate(), loader);
    1.71 +  }
    1.72 +
    1.73 +  virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) MOZ_OVERRIDE
    1.74 +  {
    1.75 +    switch (aIndex) {
    1.76 +    case PannerNode::PANNING_MODEL:
    1.77 +      switch (PanningModelType(aParam)) {
    1.78 +        case PanningModelType::Equalpower:
    1.79 +          mPanningModelFunction = &PannerNodeEngine::EqualPowerPanningFunction;
    1.80 +          break;
    1.81 +        case PanningModelType::HRTF:
    1.82 +          mPanningModelFunction = &PannerNodeEngine::HRTFPanningFunction;
    1.83 +          break;
    1.84 +        default:
    1.85 +          NS_NOTREACHED("We should never see the alternate names here");
    1.86 +          break;
    1.87 +      }
    1.88 +      break;
    1.89 +    case PannerNode::DISTANCE_MODEL:
    1.90 +      switch (DistanceModelType(aParam)) {
    1.91 +        case DistanceModelType::Inverse:
    1.92 +          mDistanceModelFunction = &PannerNodeEngine::InverseGainFunction;
    1.93 +          break;
    1.94 +        case DistanceModelType::Linear:
    1.95 +          mDistanceModelFunction = &PannerNodeEngine::LinearGainFunction;
    1.96 +          break;
    1.97 +        case DistanceModelType::Exponential:
    1.98 +          mDistanceModelFunction = &PannerNodeEngine::ExponentialGainFunction;
    1.99 +          break;
   1.100 +        default:
   1.101 +          NS_NOTREACHED("We should never see the alternate names here");
   1.102 +          break;
   1.103 +      }
   1.104 +      break;
   1.105 +    default:
   1.106 +      NS_ERROR("Bad PannerNodeEngine Int32Parameter");
   1.107 +    }
   1.108 +  }
   1.109 +  virtual void SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aParam) MOZ_OVERRIDE
   1.110 +  {
   1.111 +    switch (aIndex) {
   1.112 +    case PannerNode::LISTENER_POSITION: mListenerPosition = aParam; break;
   1.113 +    case PannerNode::LISTENER_FRONT_VECTOR: mListenerFrontVector = aParam; break;
   1.114 +    case PannerNode::LISTENER_RIGHT_VECTOR: mListenerRightVector = aParam; break;
   1.115 +    case PannerNode::LISTENER_VELOCITY: mListenerVelocity = aParam; break;
   1.116 +    case PannerNode::POSITION: mPosition = aParam; break;
   1.117 +    case PannerNode::ORIENTATION: mOrientation = aParam; break;
   1.118 +    case PannerNode::VELOCITY: mVelocity = aParam; break;
   1.119 +    default:
   1.120 +      NS_ERROR("Bad PannerNodeEngine ThreeDPointParameter");
   1.121 +    }
   1.122 +  }
   1.123 +  virtual void SetDoubleParameter(uint32_t aIndex, double aParam) MOZ_OVERRIDE
   1.124 +  {
   1.125 +    switch (aIndex) {
   1.126 +    case PannerNode::LISTENER_DOPPLER_FACTOR: mListenerDopplerFactor = aParam; break;
   1.127 +    case PannerNode::LISTENER_SPEED_OF_SOUND: mListenerSpeedOfSound = aParam; break;
   1.128 +    case PannerNode::REF_DISTANCE: mRefDistance = aParam; break;
   1.129 +    case PannerNode::MAX_DISTANCE: mMaxDistance = aParam; break;
   1.130 +    case PannerNode::ROLLOFF_FACTOR: mRolloffFactor = aParam; break;
   1.131 +    case PannerNode::CONE_INNER_ANGLE: mConeInnerAngle = aParam; break;
   1.132 +    case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break;
   1.133 +    case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break;
   1.134 +    default:
   1.135 +      NS_ERROR("Bad PannerNodeEngine DoubleParameter");
   1.136 +    }
   1.137 +  }
   1.138 +
   1.139 +  virtual void ProcessBlock(AudioNodeStream* aStream,
   1.140 +                            const AudioChunk& aInput,
   1.141 +                            AudioChunk* aOutput,
   1.142 +                            bool *aFinished) MOZ_OVERRIDE
   1.143 +  {
   1.144 +    if (aInput.IsNull()) {
   1.145 +      // mLeftOverData != INT_MIN means that the panning model was HRTF and a
   1.146 +      // tail-time reference was added.  Even if the model is now equalpower,
   1.147 +      // the reference will need to be removed.
   1.148 +      if (mLeftOverData > 0 &&
   1.149 +          mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) {
   1.150 +        mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
   1.151 +      } else {
   1.152 +        if (mLeftOverData != INT_MIN) {
   1.153 +          mLeftOverData = INT_MIN;
   1.154 +          mHRTFPanner->reset();
   1.155 +
   1.156 +          nsRefPtr<PlayingRefChangeHandler> refchanged =
   1.157 +            new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE);
   1.158 +          aStream->Graph()->
   1.159 +            DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
   1.160 +        }
   1.161 +        *aOutput = aInput;
   1.162 +        return;
   1.163 +      }
   1.164 +    } else if (mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) {
   1.165 +      if (mLeftOverData == INT_MIN) {
   1.166 +        nsRefPtr<PlayingRefChangeHandler> refchanged =
   1.167 +          new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF);
   1.168 +        aStream->Graph()->
   1.169 +          DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
   1.170 +      }
   1.171 +      mLeftOverData = mHRTFPanner->maxTailFrames();
   1.172 +    }
   1.173 +
   1.174 +    (this->*mPanningModelFunction)(aInput, aOutput);
   1.175 +  }
   1.176 +
   1.177 +  void ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation);
   1.178 +  float ComputeConeGain();
   1.179 +  // Compute how much the distance contributes to the gain reduction.
   1.180 +  float ComputeDistanceGain();
   1.181 +
   1.182 +  void GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput,
   1.183 +                        float aGainL, float aGainR);
   1.184 +  void GainStereoToStereo(const AudioChunk& aInput, AudioChunk* aOutput,
   1.185 +                          float aGainL, float aGainR, double aAzimuth);
   1.186 +
   1.187 +  void EqualPowerPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput);
   1.188 +  void HRTFPanningFunction(const AudioChunk& aInput, AudioChunk* aOutput);
   1.189 +
   1.190 +  float LinearGainFunction(float aDistance);
   1.191 +  float InverseGainFunction(float aDistance);
   1.192 +  float ExponentialGainFunction(float aDistance);
   1.193 +
   1.194 +  virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE
   1.195 +  {
   1.196 +    size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
   1.197 +    if (mHRTFPanner) {
   1.198 +      amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf);
   1.199 +    }
   1.200 +
   1.201 +    return amount;
   1.202 +  }
   1.203 +
   1.204 +  virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE
   1.205 +  {
   1.206 +    return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   1.207 +  }
   1.208 +
   1.209 +  nsAutoPtr<HRTFPanner> mHRTFPanner;
   1.210 +  typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioChunk& aInput, AudioChunk* aOutput);
   1.211 +  PanningModelFunction mPanningModelFunction;
   1.212 +  typedef float (PannerNodeEngine::*DistanceModelFunction)(float aDistance);
   1.213 +  DistanceModelFunction mDistanceModelFunction;
   1.214 +  ThreeDPoint mPosition;
   1.215 +  ThreeDPoint mOrientation;
   1.216 +  ThreeDPoint mVelocity;
   1.217 +  double mRefDistance;
   1.218 +  double mMaxDistance;
   1.219 +  double mRolloffFactor;
   1.220 +  double mConeInnerAngle;
   1.221 +  double mConeOuterAngle;
   1.222 +  double mConeOuterGain;
   1.223 +  ThreeDPoint mListenerPosition;
   1.224 +  ThreeDPoint mListenerFrontVector;
   1.225 +  ThreeDPoint mListenerRightVector;
   1.226 +  ThreeDPoint mListenerVelocity;
   1.227 +  double mListenerDopplerFactor;
   1.228 +  double mListenerSpeedOfSound;
   1.229 +  int mLeftOverData;
   1.230 +};
   1.231 +
   1.232 +PannerNode::PannerNode(AudioContext* aContext)
   1.233 +  : AudioNode(aContext,
   1.234 +              2,
   1.235 +              ChannelCountMode::Clamped_max,
   1.236 +              ChannelInterpretation::Speakers)
   1.237 +  // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above.
   1.238 +  , mPanningModel(PanningModelType::HRTF)
   1.239 +  , mDistanceModel(DistanceModelType::Inverse)
   1.240 +  , mPosition()
   1.241 +  , mOrientation(1., 0., 0.)
   1.242 +  , mVelocity()
   1.243 +  , mRefDistance(1.)
   1.244 +  , mMaxDistance(10000.)
   1.245 +  , mRolloffFactor(1.)
   1.246 +  , mConeInnerAngle(360.)
   1.247 +  , mConeOuterAngle(360.)
   1.248 +  , mConeOuterGain(0.)
   1.249 +{
   1.250 +  mStream = aContext->Graph()->CreateAudioNodeStream(new PannerNodeEngine(this),
   1.251 +                                                     MediaStreamGraph::INTERNAL_STREAM);
   1.252 +  // We should register once we have set up our stream and engine.
   1.253 +  Context()->Listener()->RegisterPannerNode(this);
   1.254 +}
   1.255 +
   1.256 +PannerNode::~PannerNode()
   1.257 +{
   1.258 +  if (Context()) {
   1.259 +    Context()->UnregisterPannerNode(this);
   1.260 +  }
   1.261 +}
   1.262 +
   1.263 +size_t
   1.264 +PannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
   1.265 +{
   1.266 +  size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
   1.267 +  amount += mSources.SizeOfExcludingThis(aMallocSizeOf);
   1.268 +  return amount;
   1.269 +}
   1.270 +
   1.271 +size_t
   1.272 +PannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
   1.273 +{
   1.274 +  return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   1.275 +}
   1.276 +
   1.277 +JSObject*
   1.278 +PannerNode::WrapObject(JSContext* aCx)
   1.279 +{
   1.280 +  return PannerNodeBinding::Wrap(aCx, this);
   1.281 +}
   1.282 +
   1.283 +void PannerNode::DestroyMediaStream()
   1.284 +{
   1.285 +  if (Context()) {
   1.286 +    Context()->UnregisterPannerNode(this);
   1.287 +  }
   1.288 +  AudioNode::DestroyMediaStream();
   1.289 +}
   1.290 +
   1.291 +// Those three functions are described in the spec.
   1.292 +float
   1.293 +PannerNodeEngine::LinearGainFunction(float aDistance)
   1.294 +{
   1.295 +  return 1 - mRolloffFactor * (aDistance - mRefDistance) / (mMaxDistance - mRefDistance);
   1.296 +}
   1.297 +
   1.298 +float
   1.299 +PannerNodeEngine::InverseGainFunction(float aDistance)
   1.300 +{
   1.301 +  return mRefDistance / (mRefDistance + mRolloffFactor * (aDistance - mRefDistance));
   1.302 +}
   1.303 +
   1.304 +float
   1.305 +PannerNodeEngine::ExponentialGainFunction(float aDistance)
   1.306 +{
   1.307 +  return pow(aDistance / mRefDistance, -mRolloffFactor);
   1.308 +}
   1.309 +
   1.310 +void
   1.311 +PannerNodeEngine::HRTFPanningFunction(const AudioChunk& aInput,
   1.312 +                                      AudioChunk* aOutput)
   1.313 +{
   1.314 +  // The output of this node is always stereo, no matter what the inputs are.
   1.315 +  AllocateAudioBlock(2, aOutput);
   1.316 +
   1.317 +  float azimuth, elevation;
   1.318 +  ComputeAzimuthAndElevation(azimuth, elevation);
   1.319 +
   1.320 +  AudioChunk input = aInput;
   1.321 +  // Gain is applied before the delay and convolution of the HRTF
   1.322 +  input.mVolume *= ComputeConeGain() * ComputeDistanceGain();
   1.323 +
   1.324 +  mHRTFPanner->pan(azimuth, elevation, &input, aOutput);
   1.325 +}
   1.326 +
   1.327 +void
   1.328 +PannerNodeEngine::EqualPowerPanningFunction(const AudioChunk& aInput,
   1.329 +                                            AudioChunk* aOutput)
   1.330 +{
   1.331 +  float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
   1.332 +  int inputChannels = aInput.mChannelData.Length();
   1.333 +
   1.334 +  // If both the listener are in the same spot, and no cone gain is specified,
   1.335 +  // this node is noop.
   1.336 +  if (mListenerPosition == mPosition &&
   1.337 +      mConeInnerAngle == 360 &&
   1.338 +      mConeOuterAngle == 360) {
   1.339 +    *aOutput = aInput;
   1.340 +    return;
   1.341 +  }
   1.342 +
   1.343 +  // The output of this node is always stereo, no matter what the inputs are.
   1.344 +  AllocateAudioBlock(2, aOutput);
   1.345 +
   1.346 +  ComputeAzimuthAndElevation(azimuth, elevation);
   1.347 +  coneGain = ComputeConeGain();
   1.348 +
   1.349 +  // The following algorithm is described in the spec.
   1.350 +  // Clamp azimuth in the [-90, 90] range.
   1.351 +  azimuth = min(180.f, max(-180.f, azimuth));
   1.352 +
   1.353 +  // Wrap around
   1.354 +  if (azimuth < -90.f) {
   1.355 +    azimuth = -180.f - azimuth;
   1.356 +  } else if (azimuth > 90) {
   1.357 +    azimuth = 180.f - azimuth;
   1.358 +  }
   1.359 +
   1.360 +  // Normalize the value in the [0, 1] range.
   1.361 +  if (inputChannels == 1) {
   1.362 +    normalizedAzimuth = (azimuth + 90.f) / 180.f;
   1.363 +  } else {
   1.364 +    if (azimuth <= 0) {
   1.365 +      normalizedAzimuth = (azimuth + 90.f) / 90.f;
   1.366 +    } else {
   1.367 +      normalizedAzimuth = azimuth / 90.f;
   1.368 +    }
   1.369 +  }
   1.370 +
   1.371 +  distanceGain = ComputeDistanceGain();
   1.372 +
   1.373 +  // Actually compute the left and right gain.
   1.374 +  gainL = cos(0.5 * M_PI * normalizedAzimuth);
   1.375 +  gainR = sin(0.5 * M_PI * normalizedAzimuth);
   1.376 +
   1.377 +  // Compute the output.
   1.378 +  if (inputChannels == 1) {
   1.379 +    GainMonoToStereo(aInput, aOutput, gainL, gainR);
   1.380 +  } else {
   1.381 +    GainStereoToStereo(aInput, aOutput, gainL, gainR, azimuth);
   1.382 +  }
   1.383 +
   1.384 +  aOutput->mVolume = aInput.mVolume * distanceGain * coneGain;
   1.385 +}
   1.386 +
   1.387 +void
   1.388 +PannerNodeEngine::GainMonoToStereo(const AudioChunk& aInput, AudioChunk* aOutput,
   1.389 +                                   float aGainL, float aGainR)
   1.390 +{
   1.391 +  float* outputL = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[0]));
   1.392 +  float* outputR = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[1]));
   1.393 +  const float* input = static_cast<float*>(const_cast<void*>(aInput.mChannelData[0]));
   1.394 +
   1.395 +  AudioBlockPanMonoToStereo(input, aGainL, aGainR, outputL, outputR);
   1.396 +}
   1.397 +
   1.398 +void
   1.399 +PannerNodeEngine::GainStereoToStereo(const AudioChunk& aInput, AudioChunk* aOutput,
   1.400 +                                     float aGainL, float aGainR, double aAzimuth)
   1.401 +{
   1.402 +  float* outputL = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[0]));
   1.403 +  float* outputR = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[1]));
   1.404 +  const float* inputL = static_cast<float*>(const_cast<void*>(aInput.mChannelData[0]));
   1.405 +  const float* inputR = static_cast<float*>(const_cast<void*>(aInput.mChannelData[1]));
   1.406 +
   1.407 +  AudioBlockPanStereoToStereo(inputL, inputR, aGainL, aGainR, aAzimuth <= 0, outputL, outputR);
   1.408 +}
   1.409 +
   1.410 +// This algorithm is specified in the webaudio spec.
   1.411 +void
   1.412 +PannerNodeEngine::ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation)
   1.413 +{
   1.414 +  ThreeDPoint sourceListener = mPosition - mListenerPosition;
   1.415 +
   1.416 +  if (sourceListener.IsZero()) {
   1.417 +    aAzimuth = 0.0;
   1.418 +    aElevation = 0.0;
   1.419 +    return;
   1.420 +  }
   1.421 +
   1.422 +  sourceListener.Normalize();
   1.423 +
   1.424 +  // Project the source-listener vector on the x-z plane.
   1.425 +  const ThreeDPoint& listenerFront = mListenerFrontVector;
   1.426 +  const ThreeDPoint& listenerRight = mListenerRightVector;
   1.427 +  ThreeDPoint up = listenerRight.CrossProduct(listenerFront);
   1.428 +
   1.429 +  double upProjection = sourceListener.DotProduct(up);
   1.430 +  aElevation = 90 - 180 * acos(upProjection) / M_PI;
   1.431 +
   1.432 +  if (aElevation > 90) {
   1.433 +    aElevation = 180 - aElevation;
   1.434 +  } else if (aElevation < -90) {
   1.435 +    aElevation = -180 - aElevation;
   1.436 +  }
   1.437 +
   1.438 +  ThreeDPoint projectedSource = sourceListener - up * upProjection;
   1.439 +  if (projectedSource.IsZero()) {
   1.440 +    // source - listener direction is up or down.
   1.441 +    aAzimuth = 0.0;
   1.442 +    return;
   1.443 +  }
   1.444 +  projectedSource.Normalize();
   1.445 +
   1.446 +  // Actually compute the angle, and convert to degrees
   1.447 +  double projection = projectedSource.DotProduct(listenerRight);
   1.448 +  aAzimuth = 180 * acos(projection) / M_PI;
   1.449 +
   1.450 +  // Compute whether the source is in front or behind the listener.
   1.451 +  double frontBack = projectedSource.DotProduct(listenerFront);
   1.452 +  if (frontBack < 0) {
   1.453 +    aAzimuth = 360 - aAzimuth;
   1.454 +  }
   1.455 +  // Rotate the azimuth so it is relative to the listener front vector instead
   1.456 +  // of the right vector.
   1.457 +  if ((aAzimuth >= 0) && (aAzimuth <= 270)) {
   1.458 +    aAzimuth = 90 - aAzimuth;
   1.459 +  } else {
   1.460 +    aAzimuth = 450 - aAzimuth;
   1.461 +  }
   1.462 +}
   1.463 +
   1.464 +// This algorithm is described in the WebAudio spec.
   1.465 +float
   1.466 +PannerNodeEngine::ComputeConeGain()
   1.467 +{
   1.468 +  // Omnidirectional source
   1.469 +  if (mOrientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) {
   1.470 +    return 1;
   1.471 +  }
   1.472 +
   1.473 +  // Normalized source-listener vector
   1.474 +  ThreeDPoint sourceToListener = mListenerPosition - mPosition;
   1.475 +  sourceToListener.Normalize();
   1.476 +
   1.477 +  // Angle between the source orientation vector and the source-listener vector
   1.478 +  double dotProduct = sourceToListener.DotProduct(mOrientation);
   1.479 +  double angle = 180 * acos(dotProduct) / M_PI;
   1.480 +  double absAngle = fabs(angle);
   1.481 +
   1.482 +  // Divide by 2 here since API is entire angle (not half-angle)
   1.483 +  double absInnerAngle = fabs(mConeInnerAngle) / 2;
   1.484 +  double absOuterAngle = fabs(mConeOuterAngle) / 2;
   1.485 +  double gain = 1;
   1.486 +
   1.487 +  if (absAngle <= absInnerAngle) {
   1.488 +    // No attenuation
   1.489 +    gain = 1;
   1.490 +  } else if (absAngle >= absOuterAngle) {
   1.491 +    // Max attenuation
   1.492 +    gain = mConeOuterGain;
   1.493 +  } else {
   1.494 +    // Between inner and outer cones
   1.495 +    // inner -> outer, x goes from 0 -> 1
   1.496 +    double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle);
   1.497 +    gain = (1 - x) + mConeOuterGain * x;
   1.498 +  }
   1.499 +
   1.500 +  return gain;
   1.501 +}
   1.502 +
   1.503 +float
   1.504 +PannerNodeEngine::ComputeDistanceGain()
   1.505 +{
   1.506 +  ThreeDPoint distanceVec = mPosition - mListenerPosition;
   1.507 +  float distance = sqrt(distanceVec.DotProduct(distanceVec));
   1.508 +  return (this->*mDistanceModelFunction)(distance);
   1.509 +}
   1.510 +
   1.511 +float
   1.512 +PannerNode::ComputeDopplerShift()
   1.513 +{
   1.514 +  double dopplerShift = 1.0; // Initialize to default value
   1.515 +
   1.516 +  AudioListener* listener = Context()->Listener();
   1.517 +
   1.518 +  if (listener->DopplerFactor() > 0) {
   1.519 +    // Don't bother if both source and listener have no velocity.
   1.520 +    if (!mVelocity.IsZero() || !listener->Velocity().IsZero()) {
   1.521 +      // Calculate the source to listener vector.
   1.522 +      ThreeDPoint sourceToListener = mPosition - listener->Velocity();
   1.523 +
   1.524 +      double sourceListenerMagnitude = sourceToListener.Magnitude();
   1.525 +
   1.526 +      double listenerProjection = sourceToListener.DotProduct(listener->Velocity()) / sourceListenerMagnitude;
   1.527 +      double sourceProjection = sourceToListener.DotProduct(mVelocity) / sourceListenerMagnitude;
   1.528 +
   1.529 +      listenerProjection = -listenerProjection;
   1.530 +      sourceProjection = -sourceProjection;
   1.531 +
   1.532 +      double scaledSpeedOfSound = listener->DopplerFactor() / listener->DopplerFactor();
   1.533 +      listenerProjection = min(listenerProjection, scaledSpeedOfSound);
   1.534 +      sourceProjection = min(sourceProjection, scaledSpeedOfSound);
   1.535 +
   1.536 +      dopplerShift = ((listener->SpeedOfSound() - listener->DopplerFactor() * listenerProjection) / (listener->SpeedOfSound() - listener->DopplerFactor() * sourceProjection));
   1.537 +
   1.538 +      WebAudioUtils::FixNaN(dopplerShift); // Avoid illegal values
   1.539 +
   1.540 +      // Limit the pitch shifting to 4 octaves up and 3 octaves down.
   1.541 +      dopplerShift = min(dopplerShift, 16.);
   1.542 +      dopplerShift = max(dopplerShift, 0.125);
   1.543 +    }
   1.544 +  }
   1.545 +
   1.546 +  return dopplerShift;
   1.547 +}
   1.548 +
   1.549 +void
   1.550 +PannerNode::FindConnectedSources()
   1.551 +{
   1.552 +  mSources.Clear();
   1.553 +  std::set<AudioNode*> cycleSet;
   1.554 +  FindConnectedSources(this, mSources, cycleSet);
   1.555 +}
   1.556 +
   1.557 +void
   1.558 +PannerNode::FindConnectedSources(AudioNode* aNode,
   1.559 +                                 nsTArray<AudioBufferSourceNode*>& aSources,
   1.560 +                                 std::set<AudioNode*>& aNodesSeen)
   1.561 +{
   1.562 +  if (!aNode) {
   1.563 +    return;
   1.564 +  }
   1.565 +
   1.566 +  const nsTArray<InputNode>& inputNodes = aNode->InputNodes();
   1.567 +
   1.568 +  for(unsigned i = 0; i < inputNodes.Length(); i++) {
   1.569 +    // Return if we find a node that we have seen already.
   1.570 +    if (aNodesSeen.find(inputNodes[i].mInputNode) != aNodesSeen.end()) {
   1.571 +      return;
   1.572 +    }
   1.573 +    aNodesSeen.insert(inputNodes[i].mInputNode);
   1.574 +    // Recurse
   1.575 +    FindConnectedSources(inputNodes[i].mInputNode, aSources, aNodesSeen);
   1.576 +
   1.577 +    // Check if this node is an AudioBufferSourceNode
   1.578 +    AudioBufferSourceNode* node = inputNodes[i].mInputNode->AsAudioBufferSourceNode();
   1.579 +    if (node) {
   1.580 +      aSources.AppendElement(node);
   1.581 +    }
   1.582 +  }
   1.583 +}
   1.584 +
   1.585 +void
   1.586 +PannerNode::SendDopplerToSourcesIfNeeded()
   1.587 +{
   1.588 +  // Don't bother sending the doppler shift if both the source and the listener
   1.589 +  // are not moving, because the doppler shift is going to be 1.0.
   1.590 +  if (!(Context()->Listener()->Velocity().IsZero() && mVelocity.IsZero())) {
   1.591 +    for(uint32_t i = 0; i < mSources.Length(); i++) {
   1.592 +      mSources[i]->SendDopplerShiftToStream(ComputeDopplerShift());
   1.593 +    }
   1.594 +  }
   1.595 +}
   1.596 +
   1.597 +
   1.598 +}
   1.599 +}
   1.600 +

mercurial