content/media/AudioNodeStream.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/AudioNodeStream.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,591 @@
     1.4 +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
     1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.7 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.8 +
     1.9 +#include "AudioNodeStream.h"
    1.10 +
    1.11 +#include "MediaStreamGraphImpl.h"
    1.12 +#include "AudioNodeEngine.h"
    1.13 +#include "ThreeDPoint.h"
    1.14 +#include "AudioChannelFormat.h"
    1.15 +#include "AudioParamTimeline.h"
    1.16 +#include "AudioContext.h"
    1.17 +
    1.18 +using namespace mozilla::dom;
    1.19 +
    1.20 +namespace mozilla {
    1.21 +
    1.22 +/**
    1.23 + * An AudioNodeStream produces a single audio track with ID
    1.24 + * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
    1.25 + * for regular audio contexts, and the rate requested by the web content
    1.26 + * for offline audio contexts.
    1.27 + * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
    1.28 + * Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID
    1.29 + */
    1.30 +
    1.31 +AudioNodeStream::~AudioNodeStream()
    1.32 +{
    1.33 +  MOZ_COUNT_DTOR(AudioNodeStream);
    1.34 +}
    1.35 +
    1.36 +size_t
    1.37 +AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
    1.38 +{
    1.39 +  size_t amount = 0;
    1.40 +
    1.41 +  // Not reported:
    1.42 +  // - mEngine
    1.43 +
    1.44 +  amount += ProcessedMediaStream::SizeOfExcludingThis(aMallocSizeOf);
    1.45 +  amount += mLastChunks.SizeOfExcludingThis(aMallocSizeOf);
    1.46 +  for (size_t i = 0; i < mLastChunks.Length(); i++) {
    1.47 +    // NB: This is currently unshared only as there are instances of
    1.48 +    //     double reporting in DMD otherwise.
    1.49 +    amount += mLastChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf);
    1.50 +  }
    1.51 +
    1.52 +  return amount;
    1.53 +}
    1.54 +
    1.55 +size_t
    1.56 +AudioNodeStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
    1.57 +{
    1.58 +  return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
    1.59 +}
    1.60 +
    1.61 +void
    1.62 +AudioNodeStream::SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
    1.63 +                                               AudioNodeSizes& aUsage) const
    1.64 +{
    1.65 +  // Explicitly separate out the stream memory.
    1.66 +  aUsage.mStream = SizeOfIncludingThis(aMallocSizeOf);
    1.67 +
    1.68 +  if (mEngine) {
    1.69 +    // This will fill out the rest of |aUsage|.
    1.70 +    mEngine->SizeOfIncludingThis(aMallocSizeOf, aUsage);
    1.71 +  }
    1.72 +}
    1.73 +
    1.74 +void
    1.75 +AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
    1.76 +                                        double aStreamTime)
    1.77 +{
    1.78 +  class Message : public ControlMessage {
    1.79 +  public:
    1.80 +    Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
    1.81 +            double aStreamTime)
    1.82 +      : ControlMessage(aStream), mStreamTime(aStreamTime),
    1.83 +        mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
    1.84 +    virtual void Run()
    1.85 +    {
    1.86 +      static_cast<AudioNodeStream*>(mStream)->
    1.87 +          SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
    1.88 +    }
    1.89 +    double mStreamTime;
    1.90 +    MediaStream* mRelativeToStream;
    1.91 +    uint32_t mIndex;
    1.92 +  };
    1.93 +
    1.94 +  MOZ_ASSERT(this);
    1.95 +  GraphImpl()->AppendMessage(new Message(this, aIndex,
    1.96 +      aContext->DestinationStream(),
    1.97 +      aContext->DOMTimeToStreamTime(aStreamTime)));
    1.98 +}
    1.99 +
   1.100 +void
   1.101 +AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
   1.102 +                                            double aStreamTime)
   1.103 +{
   1.104 +  TrackTicks ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime);
   1.105 +  mEngine->SetStreamTimeParameter(aIndex, ticks);
   1.106 +}
   1.107 +
   1.108 +void
   1.109 +AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
   1.110 +{
   1.111 +  class Message : public ControlMessage {
   1.112 +  public:
   1.113 +    Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
   1.114 +      : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
   1.115 +    virtual void Run()
   1.116 +    {
   1.117 +      static_cast<AudioNodeStream*>(mStream)->Engine()->
   1.118 +          SetDoubleParameter(mIndex, mValue);
   1.119 +    }
   1.120 +    double mValue;
   1.121 +    uint32_t mIndex;
   1.122 +  };
   1.123 +
   1.124 +  MOZ_ASSERT(this);
   1.125 +  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
   1.126 +}
   1.127 +
   1.128 +void
   1.129 +AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
   1.130 +{
   1.131 +  class Message : public ControlMessage {
   1.132 +  public:
   1.133 +    Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
   1.134 +      : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
   1.135 +    virtual void Run()
   1.136 +    {
   1.137 +      static_cast<AudioNodeStream*>(mStream)->Engine()->
   1.138 +          SetInt32Parameter(mIndex, mValue);
   1.139 +    }
   1.140 +    int32_t mValue;
   1.141 +    uint32_t mIndex;
   1.142 +  };
   1.143 +
   1.144 +  MOZ_ASSERT(this);
   1.145 +  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
   1.146 +}
   1.147 +
   1.148 +void
   1.149 +AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
   1.150 +                                      const AudioParamTimeline& aValue)
   1.151 +{
   1.152 +  class Message : public ControlMessage {
   1.153 +  public:
   1.154 +    Message(AudioNodeStream* aStream, uint32_t aIndex,
   1.155 +            const AudioParamTimeline& aValue)
   1.156 +      : ControlMessage(aStream),
   1.157 +        mValue(aValue),
   1.158 +        mSampleRate(aStream->SampleRate()),
   1.159 +        mIndex(aIndex) {}
   1.160 +    virtual void Run()
   1.161 +    {
   1.162 +      static_cast<AudioNodeStream*>(mStream)->Engine()->
   1.163 +          SetTimelineParameter(mIndex, mValue, mSampleRate);
   1.164 +    }
   1.165 +    AudioParamTimeline mValue;
   1.166 +    TrackRate mSampleRate;
   1.167 +    uint32_t mIndex;
   1.168 +  };
   1.169 +  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
   1.170 +}
   1.171 +
   1.172 +void
   1.173 +AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
   1.174 +{
   1.175 +  class Message : public ControlMessage {
   1.176 +  public:
   1.177 +    Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
   1.178 +      : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
   1.179 +    virtual void Run()
   1.180 +    {
   1.181 +      static_cast<AudioNodeStream*>(mStream)->Engine()->
   1.182 +          SetThreeDPointParameter(mIndex, mValue);
   1.183 +    }
   1.184 +    ThreeDPoint mValue;
   1.185 +    uint32_t mIndex;
   1.186 +  };
   1.187 +
   1.188 +  MOZ_ASSERT(this);
   1.189 +  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
   1.190 +}
   1.191 +
   1.192 +void
   1.193 +AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
   1.194 +{
   1.195 +  class Message : public ControlMessage {
   1.196 +  public:
   1.197 +    Message(AudioNodeStream* aStream,
   1.198 +            already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
   1.199 +      : ControlMessage(aStream), mBuffer(aBuffer) {}
   1.200 +    virtual void Run()
   1.201 +    {
   1.202 +      static_cast<AudioNodeStream*>(mStream)->Engine()->
   1.203 +          SetBuffer(mBuffer.forget());
   1.204 +    }
   1.205 +    nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
   1.206 +  };
   1.207 +
   1.208 +  MOZ_ASSERT(this);
   1.209 +  GraphImpl()->AppendMessage(new Message(this, aBuffer));
   1.210 +}
   1.211 +
   1.212 +void
   1.213 +AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
   1.214 +{
   1.215 +  class Message : public ControlMessage {
   1.216 +  public:
   1.217 +    Message(AudioNodeStream* aStream,
   1.218 +            nsTArray<float>& aData)
   1.219 +      : ControlMessage(aStream)
   1.220 +    {
   1.221 +      mData.SwapElements(aData);
   1.222 +    }
   1.223 +    virtual void Run()
   1.224 +    {
   1.225 +      static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData);
   1.226 +    }
   1.227 +    nsTArray<float> mData;
   1.228 +  };
   1.229 +
   1.230 +  MOZ_ASSERT(this);
   1.231 +  GraphImpl()->AppendMessage(new Message(this, aData));
   1.232 +}
   1.233 +
   1.234 +void
   1.235 +AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
   1.236 +                                            ChannelCountMode aChannelCountMode,
   1.237 +                                            ChannelInterpretation aChannelInterpretation)
   1.238 +{
   1.239 +  class Message : public ControlMessage {
   1.240 +  public:
   1.241 +    Message(AudioNodeStream* aStream,
   1.242 +            uint32_t aNumberOfChannels,
   1.243 +            ChannelCountMode aChannelCountMode,
   1.244 +            ChannelInterpretation aChannelInterpretation)
   1.245 +      : ControlMessage(aStream),
   1.246 +        mNumberOfChannels(aNumberOfChannels),
   1.247 +        mChannelCountMode(aChannelCountMode),
   1.248 +        mChannelInterpretation(aChannelInterpretation)
   1.249 +    {}
   1.250 +    virtual void Run()
   1.251 +    {
   1.252 +      static_cast<AudioNodeStream*>(mStream)->
   1.253 +        SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
   1.254 +                                       mChannelInterpretation);
   1.255 +    }
   1.256 +    uint32_t mNumberOfChannels;
   1.257 +    ChannelCountMode mChannelCountMode;
   1.258 +    ChannelInterpretation mChannelInterpretation;
   1.259 +  };
   1.260 +
   1.261 +  MOZ_ASSERT(this);
   1.262 +  GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
   1.263 +                                         aChannelCountMode,
   1.264 +                                         aChannelInterpretation));
   1.265 +}
   1.266 +
   1.267 +void
   1.268 +AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
   1.269 +                                                ChannelCountMode aChannelCountMode,
   1.270 +                                                ChannelInterpretation aChannelInterpretation)
   1.271 +{
   1.272 +  // Make sure that we're not clobbering any significant bits by fitting these
   1.273 +  // values in 16 bits.
   1.274 +  MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
   1.275 +  MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
   1.276 +
   1.277 +  mNumberOfInputChannels = aNumberOfChannels;
   1.278 +  mChannelCountMode = aChannelCountMode;
   1.279 +  mChannelInterpretation = aChannelInterpretation;
   1.280 +}
   1.281 +
   1.282 +uint32_t
   1.283 +AudioNodeStream::ComputedNumberOfChannels(uint32_t aInputChannelCount)
   1.284 +{
   1.285 +  switch (mChannelCountMode) {
   1.286 +  case ChannelCountMode::Explicit:
   1.287 +    // Disregard the channel count we've calculated from inputs, and just use
   1.288 +    // mNumberOfInputChannels.
   1.289 +    return mNumberOfInputChannels;
   1.290 +  case ChannelCountMode::Clamped_max:
   1.291 +    // Clamp the computed output channel count to mNumberOfInputChannels.
   1.292 +    return std::min(aInputChannelCount, mNumberOfInputChannels);
   1.293 +  default:
   1.294 +  case ChannelCountMode::Max:
   1.295 +    // Nothing to do here, just shut up the compiler warning.
   1.296 +    return aInputChannelCount;
   1.297 +  }
   1.298 +}
   1.299 +
   1.300 +void
   1.301 +AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
   1.302 +{
   1.303 +  uint32_t inputCount = mInputs.Length();
   1.304 +  uint32_t outputChannelCount = 1;
   1.305 +  nsAutoTArray<AudioChunk*,250> inputChunks;
   1.306 +  for (uint32_t i = 0; i < inputCount; ++i) {
   1.307 +    if (aPortIndex != mInputs[i]->InputNumber()) {
   1.308 +      // This input is connected to a different port
   1.309 +      continue;
   1.310 +    }
   1.311 +    MediaStream* s = mInputs[i]->GetSource();
   1.312 +    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
   1.313 +    MOZ_ASSERT(a == s->AsAudioNodeStream());
   1.314 +    if (a->IsAudioParamStream()) {
   1.315 +      continue;
   1.316 +    }
   1.317 +
   1.318 +    // It is possible for mLastChunks to be empty here, because `a` might be a
   1.319 +    // AudioNodeStream that has not been scheduled yet, because it is further
   1.320 +    // down the graph _but_ as a connection to this node. Because we enforce the
   1.321 +    // presence of at least one DelayNode, with at least one block of delay, and
   1.322 +    // because the output of a DelayNode when it has been fed less that
   1.323 +    // `delayTime` amount of audio is silence, we can simply continue here,
   1.324 +    // because this input would not influence the output of this node. Next
   1.325 +    // iteration, a->mLastChunks.IsEmpty() will be false, and everthing will
   1.326 +    // work as usual.
   1.327 +    if (a->mLastChunks.IsEmpty()) {
   1.328 +      continue;
   1.329 +    }
   1.330 +
   1.331 +    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
   1.332 +    MOZ_ASSERT(chunk);
   1.333 +    if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
   1.334 +      continue;
   1.335 +    }
   1.336 +
   1.337 +    inputChunks.AppendElement(chunk);
   1.338 +    outputChannelCount =
   1.339 +      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
   1.340 +  }
   1.341 +
   1.342 +  outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
   1.343 +
   1.344 +  uint32_t inputChunkCount = inputChunks.Length();
   1.345 +  if (inputChunkCount == 0 ||
   1.346 +      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
   1.347 +    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
   1.348 +    return;
   1.349 +  }
   1.350 +
   1.351 +  if (inputChunkCount == 1 &&
   1.352 +      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
   1.353 +    aTmpChunk = *inputChunks[0];
   1.354 +    return;
   1.355 +  }
   1.356 +
   1.357 +  if (outputChannelCount == 0) {
   1.358 +    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
   1.359 +    return;
   1.360 +  }
   1.361 +
   1.362 +  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
   1.363 +  // The static storage here should be 1KB, so it's fine
   1.364 +  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
   1.365 +
   1.366 +  for (uint32_t i = 0; i < inputChunkCount; ++i) {
   1.367 +    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
   1.368 +  }
   1.369 +}
   1.370 +
   1.371 +void
   1.372 +AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
   1.373 +                                      AudioChunk* aBlock,
   1.374 +                                      nsTArray<float>* aDownmixBuffer)
   1.375 +{
   1.376 +  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
   1.377 +  UpMixDownMixChunk(&aChunk, aBlock->mChannelData.Length(), channels, *aDownmixBuffer);
   1.378 +
   1.379 +  for (uint32_t c = 0; c < channels.Length(); ++c) {
   1.380 +    const float* inputData = static_cast<const float*>(channels[c]);
   1.381 +    float* outputData = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c]));
   1.382 +    if (inputData) {
   1.383 +      if (aInputIndex == 0) {
   1.384 +        AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
   1.385 +      } else {
   1.386 +        AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
   1.387 +      }
   1.388 +    } else {
   1.389 +      if (aInputIndex == 0) {
   1.390 +        PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
   1.391 +      }
   1.392 +    }
   1.393 +  }
   1.394 +}
   1.395 +
   1.396 +void
   1.397 +AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
   1.398 +                                   uint32_t aOutputChannelCount,
   1.399 +                                   nsTArray<const void*>& aOutputChannels,
   1.400 +                                   nsTArray<float>& aDownmixBuffer)
   1.401 +{
   1.402 +  static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
   1.403 +
   1.404 +  aOutputChannels.AppendElements(aChunk->mChannelData);
   1.405 +  if (aOutputChannels.Length() < aOutputChannelCount) {
   1.406 +    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
   1.407 +      AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, nullptr);
   1.408 +      NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
   1.409 +                   "We called GetAudioChannelsSuperset to avoid this");
   1.410 +    } else {
   1.411 +      // Fill up the remaining aOutputChannels by zeros
   1.412 +      for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
   1.413 +        aOutputChannels.AppendElement(silenceChannel);
   1.414 +      }
   1.415 +    }
   1.416 +  } else if (aOutputChannels.Length() > aOutputChannelCount) {
   1.417 +    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
   1.418 +      nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
   1.419 +      outputChannels.SetLength(aOutputChannelCount);
   1.420 +      aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
   1.421 +      for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
   1.422 +        outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
   1.423 +      }
   1.424 +
   1.425 +      AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
   1.426 +                           aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
   1.427 +
   1.428 +      aOutputChannels.SetLength(aOutputChannelCount);
   1.429 +      for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
   1.430 +        aOutputChannels[j] = outputChannels[j];
   1.431 +      }
   1.432 +    } else {
   1.433 +      // Drop the remaining aOutputChannels
   1.434 +      aOutputChannels.RemoveElementsAt(aOutputChannelCount,
   1.435 +        aOutputChannels.Length() - aOutputChannelCount);
   1.436 +    }
   1.437 +  }
   1.438 +}
   1.439 +
   1.440 +// The MediaStreamGraph guarantees that this is actually one block, for
   1.441 +// AudioNodeStreams.
   1.442 +void
   1.443 +AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
   1.444 +{
   1.445 +  EnsureTrack(AUDIO_TRACK, mSampleRate);
   1.446 +  // No more tracks will be coming
   1.447 +  mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);
   1.448 +
   1.449 +  uint16_t outputCount = std::max(uint16_t(1), mEngine->OutputCount());
   1.450 +  mLastChunks.SetLength(outputCount);
   1.451 +
   1.452 +  // Consider this stream blocked if it has already finished output. Normally
   1.453 +  // mBlocked would reflect this, but due to rounding errors our audio track may
   1.454 +  // appear to extend slightly beyond aFrom, so we might not be blocked yet.
   1.455 +  bool blocked = mFinished || mBlocked.GetAt(aFrom);
   1.456 +  // If the stream has finished at this time, it will be blocked.
   1.457 +  if (mMuted || blocked) {
   1.458 +    for (uint16_t i = 0; i < outputCount; ++i) {
   1.459 +      mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
   1.460 +    }
   1.461 +  } else {
   1.462 +    // We need to generate at least one input
   1.463 +    uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
   1.464 +    OutputChunks inputChunks;
   1.465 +    inputChunks.SetLength(maxInputs);
   1.466 +    for (uint16_t i = 0; i < maxInputs; ++i) {
   1.467 +      ObtainInputBlock(inputChunks[i], i);
   1.468 +    }
   1.469 +    bool finished = false;
   1.470 +    if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
   1.471 +      mEngine->ProcessBlock(this, inputChunks[0], &mLastChunks[0], &finished);
   1.472 +    } else {
   1.473 +      mEngine->ProcessBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
   1.474 +    }
   1.475 +    for (uint16_t i = 0; i < outputCount; ++i) {
   1.476 +      NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
   1.477 +                   "Invalid WebAudio chunk size");
   1.478 +    }
   1.479 +    if (finished) {
   1.480 +      mMarkAsFinishedAfterThisBlock = true;
   1.481 +    }
   1.482 +
   1.483 +    if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
   1.484 +      for (uint32_t i = 0; i < outputCount; ++i) {
   1.485 +        mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
   1.486 +      }
   1.487 +    }
   1.488 +  }
   1.489 +
   1.490 +  if (!blocked) {
   1.491 +    // Don't output anything while blocked
   1.492 +    AdvanceOutputSegment();
   1.493 +    if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) {
   1.494 +      // This stream was finished the last time that we looked at it, and all
   1.495 +      // of the depending streams have finished their output as well, so now
   1.496 +      // it's time to mark this stream as finished.
   1.497 +      FinishOutput();
   1.498 +    }
   1.499 +  }
   1.500 +}
   1.501 +
   1.502 +void
   1.503 +AudioNodeStream::AdvanceOutputSegment()
   1.504 +{
   1.505 +  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK, mSampleRate);
   1.506 +  AudioSegment* segment = track->Get<AudioSegment>();
   1.507 +
   1.508 +  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
   1.509 +    segment->AppendAndConsumeChunk(&mLastChunks[0]);
   1.510 +  } else {
   1.511 +    segment->AppendNullData(mLastChunks[0].GetDuration());
   1.512 +  }
   1.513 +
   1.514 +  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
   1.515 +    MediaStreamListener* l = mListeners[j];
   1.516 +    AudioChunk copyChunk = mLastChunks[0];
   1.517 +    AudioSegment tmpSegment;
   1.518 +    tmpSegment.AppendAndConsumeChunk(&copyChunk);
   1.519 +    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
   1.520 +                                mSampleRate, segment->GetDuration(), 0,
   1.521 +                                tmpSegment);
   1.522 +  }
   1.523 +}
   1.524 +
   1.525 +TrackTicks
   1.526 +AudioNodeStream::GetCurrentPosition()
   1.527 +{
   1.528 +  return EnsureTrack(AUDIO_TRACK, mSampleRate)->Get<AudioSegment>()->GetDuration();
   1.529 +}
   1.530 +
   1.531 +void
   1.532 +AudioNodeStream::FinishOutput()
   1.533 +{
   1.534 +  if (IsFinishedOnGraphThread()) {
   1.535 +    return;
   1.536 +  }
   1.537 +
   1.538 +  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK, mSampleRate);
   1.539 +  track->SetEnded();
   1.540 +  FinishOnGraphThread();
   1.541 +
   1.542 +  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
   1.543 +    MediaStreamListener* l = mListeners[j];
   1.544 +    AudioSegment emptySegment;
   1.545 +    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
   1.546 +                                mSampleRate,
   1.547 +                                track->GetSegment()->GetDuration(),
   1.548 +                                MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
   1.549 +  }
   1.550 +}
   1.551 +
   1.552 +double
   1.553 +AudioNodeStream::TimeFromDestinationTime(AudioNodeStream* aDestination,
   1.554 +                                         double aSeconds)
   1.555 +{
   1.556 +  MOZ_ASSERT(aDestination->SampleRate() == SampleRate());
   1.557 +
   1.558 +  double destinationSeconds = std::max(0.0, aSeconds);
   1.559 +  StreamTime streamTime = SecondsToMediaTime(destinationSeconds);
   1.560 +  // MediaTime does not have the resolution of double
   1.561 +  double offset = destinationSeconds - MediaTimeToSeconds(streamTime);
   1.562 +
   1.563 +  GraphTime graphTime = aDestination->StreamTimeToGraphTime(streamTime);
   1.564 +  StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime);
   1.565 +  double thisSeconds = MediaTimeToSeconds(thisStreamTime) + offset;
   1.566 +  MOZ_ASSERT(thisSeconds >= 0.0);
   1.567 +  return thisSeconds;
   1.568 +}
   1.569 +
   1.570 +TrackTicks
   1.571 +AudioNodeStream::TicksFromDestinationTime(MediaStream* aDestination,
   1.572 +                                          double aSeconds)
   1.573 +{
   1.574 +  AudioNodeStream* destination = aDestination->AsAudioNodeStream();
   1.575 +  MOZ_ASSERT(destination);
   1.576 +
   1.577 +  double thisSeconds = TimeFromDestinationTime(destination, aSeconds);
   1.578 +  // Round to nearest
   1.579 +  TrackTicks ticks = thisSeconds * SampleRate() + 0.5;
   1.580 +  return ticks;
   1.581 +}
   1.582 +
   1.583 +double
   1.584 +AudioNodeStream::DestinationTimeFromTicks(AudioNodeStream* aDestination,
   1.585 +                                          TrackTicks aPosition)
   1.586 +{
   1.587 +  MOZ_ASSERT(SampleRate() == aDestination->SampleRate());
   1.588 +  StreamTime sourceTime = TicksToTimeRoundDown(SampleRate(), aPosition);
   1.589 +  GraphTime graphTime = StreamTimeToGraphTime(sourceTime);
   1.590 +  StreamTime destinationTime = aDestination->GraphTimeToStreamTimeOptimistic(graphTime);
   1.591 +  return MediaTimeToSeconds(destinationTime);
   1.592 +}
   1.593 +
   1.594 +}

mercurial