content/media/AudioNodeExternalInputStream.cpp

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rw-r--r--

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
michael@0 2 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 3 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
michael@0 4 * You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 5
michael@0 6 #include "AudioNodeEngine.h"
michael@0 7 #include "AudioNodeExternalInputStream.h"
michael@0 8 #include "AudioChannelFormat.h"
michael@0 9 #include "speex/speex_resampler.h"
michael@0 10
michael@0 11 using namespace mozilla::dom;
michael@0 12
michael@0 13 namespace mozilla {
michael@0 14
michael@0 15 AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
michael@0 16 : AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate)
michael@0 17 , mCurrentOutputPosition(0)
michael@0 18 {
michael@0 19 MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
michael@0 20 }
michael@0 21
michael@0 22 AudioNodeExternalInputStream::~AudioNodeExternalInputStream()
michael@0 23 {
michael@0 24 MOZ_COUNT_DTOR(AudioNodeExternalInputStream);
michael@0 25 }
michael@0 26
michael@0 27 AudioNodeExternalInputStream::TrackMapEntry::~TrackMapEntry()
michael@0 28 {
michael@0 29 if (mResampler) {
michael@0 30 speex_resampler_destroy(mResampler);
michael@0 31 }
michael@0 32 }
michael@0 33
michael@0 34 uint32_t
michael@0 35 AudioNodeExternalInputStream::GetTrackMapEntry(const StreamBuffer::Track& aTrack,
michael@0 36 GraphTime aFrom)
michael@0 37 {
michael@0 38 AudioSegment* segment = aTrack.Get<AudioSegment>();
michael@0 39
michael@0 40 // Check the map for an existing entry corresponding to the input track.
michael@0 41 for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
michael@0 42 TrackMapEntry* map = &mTrackMap[i];
michael@0 43 if (map->mTrackID == aTrack.GetID()) {
michael@0 44 return i;
michael@0 45 }
michael@0 46 }
michael@0 47
michael@0 48 // Determine channel count by finding the first entry with non-silent data.
michael@0 49 AudioSegment::ChunkIterator ci(*segment);
michael@0 50 while (!ci.IsEnded() && ci->IsNull()) {
michael@0 51 ci.Next();
michael@0 52 }
michael@0 53 if (ci.IsEnded()) {
michael@0 54 // The track is entirely silence so far, we can ignore it for now.
michael@0 55 return nsTArray<TrackMapEntry>::NoIndex;
michael@0 56 }
michael@0 57
michael@0 58 // Create a speex resampler with the same sample rate and number of channels
michael@0 59 // as the track.
michael@0 60 SpeexResamplerState* resampler = nullptr;
michael@0 61 uint32_t channelCount = std::min((*ci).mChannelData.Length(),
michael@0 62 WebAudioUtils::MaxChannelCount);
michael@0 63 if (aTrack.GetRate() != mSampleRate) {
michael@0 64 resampler = speex_resampler_init(channelCount,
michael@0 65 aTrack.GetRate(), mSampleRate, SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr);
michael@0 66 speex_resampler_skip_zeros(resampler);
michael@0 67 }
michael@0 68
michael@0 69 TrackMapEntry* map = mTrackMap.AppendElement();
michael@0 70 map->mEndOfConsumedInputTicks = 0;
michael@0 71 map->mEndOfLastInputIntervalInInputStream = -1;
michael@0 72 map->mEndOfLastInputIntervalInOutputStream = -1;
michael@0 73 map->mSamplesPassedToResampler =
michael@0 74 TimeToTicksRoundUp(aTrack.GetRate(), GraphTimeToStreamTime(aFrom));
michael@0 75 map->mResampler = resampler;
michael@0 76 map->mResamplerChannelCount = channelCount;
michael@0 77 map->mTrackID = aTrack.GetID();
michael@0 78 return mTrackMap.Length() - 1;
michael@0 79 }
michael@0 80
michael@0 81 static const uint32_t SPEEX_RESAMPLER_PROCESS_MAX_OUTPUT = 1000;
michael@0 82
michael@0 83 template <typename T> static void
michael@0 84 ResampleChannelBuffer(SpeexResamplerState* aResampler, uint32_t aChannel,
michael@0 85 const T* aInput, uint32_t aInputDuration,
michael@0 86 nsTArray<float>* aOutput)
michael@0 87 {
michael@0 88 if (!aResampler) {
michael@0 89 float* out = aOutput->AppendElements(aInputDuration);
michael@0 90 for (uint32_t i = 0; i < aInputDuration; ++i) {
michael@0 91 out[i] = AudioSampleToFloat(aInput[i]);
michael@0 92 }
michael@0 93 return;
michael@0 94 }
michael@0 95
michael@0 96 uint32_t processed = 0;
michael@0 97 while (processed < aInputDuration) {
michael@0 98 uint32_t prevLength = aOutput->Length();
michael@0 99 float* output = aOutput->AppendElements(SPEEX_RESAMPLER_PROCESS_MAX_OUTPUT);
michael@0 100 uint32_t in = aInputDuration - processed;
michael@0 101 uint32_t out = aOutput->Length() - prevLength;
michael@0 102 WebAudioUtils::SpeexResamplerProcess(aResampler, aChannel,
michael@0 103 aInput + processed, &in,
michael@0 104 output, &out);
michael@0 105 processed += in;
michael@0 106 aOutput->SetLength(prevLength + out);
michael@0 107 }
michael@0 108 }
michael@0 109
michael@0 110 void
michael@0 111 AudioNodeExternalInputStream::TrackMapEntry::ResampleChannels(const nsTArray<const void*>& aBuffers,
michael@0 112 uint32_t aInputDuration,
michael@0 113 AudioSampleFormat aFormat,
michael@0 114 float aVolume)
michael@0 115 {
michael@0 116 NS_ASSERTION(aBuffers.Length() == mResamplerChannelCount,
michael@0 117 "Channel count must be correct here");
michael@0 118
michael@0 119 nsAutoTArray<nsTArray<float>,2> resampledBuffers;
michael@0 120 resampledBuffers.SetLength(aBuffers.Length());
michael@0 121 nsTArray<float> samplesAdjustedForVolume;
michael@0 122 nsAutoTArray<const float*,2> bufferPtrs;
michael@0 123 bufferPtrs.SetLength(aBuffers.Length());
michael@0 124
michael@0 125 for (uint32_t i = 0; i < aBuffers.Length(); ++i) {
michael@0 126 AudioSampleFormat format = aFormat;
michael@0 127 const void* buffer = aBuffers[i];
michael@0 128
michael@0 129 if (aVolume != 1.0f) {
michael@0 130 format = AUDIO_FORMAT_FLOAT32;
michael@0 131 samplesAdjustedForVolume.SetLength(aInputDuration);
michael@0 132 switch (aFormat) {
michael@0 133 case AUDIO_FORMAT_FLOAT32:
michael@0 134 ConvertAudioSamplesWithScale(static_cast<const float*>(buffer),
michael@0 135 samplesAdjustedForVolume.Elements(),
michael@0 136 aInputDuration, aVolume);
michael@0 137 break;
michael@0 138 case AUDIO_FORMAT_S16:
michael@0 139 ConvertAudioSamplesWithScale(static_cast<const int16_t*>(buffer),
michael@0 140 samplesAdjustedForVolume.Elements(),
michael@0 141 aInputDuration, aVolume);
michael@0 142 break;
michael@0 143 default:
michael@0 144 MOZ_ASSERT(false);
michael@0 145 return;
michael@0 146 }
michael@0 147 buffer = samplesAdjustedForVolume.Elements();
michael@0 148 }
michael@0 149
michael@0 150 switch (format) {
michael@0 151 case AUDIO_FORMAT_FLOAT32:
michael@0 152 ResampleChannelBuffer(mResampler, i,
michael@0 153 static_cast<const float*>(buffer),
michael@0 154 aInputDuration, &resampledBuffers[i]);
michael@0 155 break;
michael@0 156 case AUDIO_FORMAT_S16:
michael@0 157 ResampleChannelBuffer(mResampler, i,
michael@0 158 static_cast<const int16_t*>(buffer),
michael@0 159 aInputDuration, &resampledBuffers[i]);
michael@0 160 break;
michael@0 161 default:
michael@0 162 MOZ_ASSERT(false);
michael@0 163 return;
michael@0 164 }
michael@0 165 bufferPtrs[i] = resampledBuffers[i].Elements();
michael@0 166 NS_ASSERTION(i == 0 ||
michael@0 167 resampledBuffers[i].Length() == resampledBuffers[0].Length(),
michael@0 168 "Resampler made different decisions for different channels!");
michael@0 169 }
michael@0 170
michael@0 171 uint32_t length = resampledBuffers[0].Length();
michael@0 172 nsRefPtr<ThreadSharedObject> buf = new SharedChannelArrayBuffer<float>(&resampledBuffers);
michael@0 173 mResampledData.AppendFrames(buf.forget(), bufferPtrs, length);
michael@0 174 }
michael@0 175
michael@0 176 void
michael@0 177 AudioNodeExternalInputStream::TrackMapEntry::ResampleInputData(AudioSegment* aSegment)
michael@0 178 {
michael@0 179 AudioSegment::ChunkIterator ci(*aSegment);
michael@0 180 while (!ci.IsEnded()) {
michael@0 181 const AudioChunk& chunk = *ci;
michael@0 182 nsAutoTArray<const void*,2> channels;
michael@0 183 if (chunk.GetDuration() > UINT32_MAX) {
michael@0 184 // This will cause us to OOM or overflow below. So let's just bail.
michael@0 185 NS_ERROR("Chunk duration out of bounds");
michael@0 186 return;
michael@0 187 }
michael@0 188 uint32_t duration = uint32_t(chunk.GetDuration());
michael@0 189
michael@0 190 if (chunk.IsNull()) {
michael@0 191 nsAutoTArray<AudioDataValue,1024> silence;
michael@0 192 silence.SetLength(duration);
michael@0 193 PodZero(silence.Elements(), silence.Length());
michael@0 194 channels.SetLength(mResamplerChannelCount);
michael@0 195 for (uint32_t i = 0; i < channels.Length(); ++i) {
michael@0 196 channels[i] = silence.Elements();
michael@0 197 }
michael@0 198 ResampleChannels(channels, duration, AUDIO_OUTPUT_FORMAT, 0.0f);
michael@0 199 } else if (chunk.mChannelData.Length() == mResamplerChannelCount) {
michael@0 200 // Common case, since mResamplerChannelCount is set to the first chunk's
michael@0 201 // number of channels.
michael@0 202 channels.AppendElements(chunk.mChannelData);
michael@0 203 ResampleChannels(channels, duration, chunk.mBufferFormat, chunk.mVolume);
michael@0 204 } else {
michael@0 205 // Uncommon case. Since downmixing requires channels to be floats,
michael@0 206 // convert everything to floats now.
michael@0 207 uint32_t upChannels = GetAudioChannelsSuperset(chunk.mChannelData.Length(), mResamplerChannelCount);
michael@0 208 nsTArray<float> buffer;
michael@0 209 if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
michael@0 210 channels.AppendElements(chunk.mChannelData);
michael@0 211 } else {
michael@0 212 NS_ASSERTION(chunk.mBufferFormat == AUDIO_FORMAT_S16, "Unknown format");
michael@0 213 if (duration > UINT32_MAX/chunk.mChannelData.Length()) {
michael@0 214 NS_ERROR("Chunk duration out of bounds");
michael@0 215 return;
michael@0 216 }
michael@0 217 buffer.SetLength(chunk.mChannelData.Length()*duration);
michael@0 218 for (uint32_t i = 0; i < chunk.mChannelData.Length(); ++i) {
michael@0 219 const int16_t* samples = static_cast<const int16_t*>(chunk.mChannelData[i]);
michael@0 220 float* converted = &buffer[i*duration];
michael@0 221 for (uint32_t j = 0; j < duration; ++j) {
michael@0 222 converted[j] = AudioSampleToFloat(samples[j]);
michael@0 223 }
michael@0 224 channels.AppendElement(converted);
michael@0 225 }
michael@0 226 }
michael@0 227 nsTArray<float> zeroes;
michael@0 228 if (channels.Length() < upChannels) {
michael@0 229 zeroes.SetLength(duration);
michael@0 230 PodZero(zeroes.Elements(), zeroes.Length());
michael@0 231 AudioChannelsUpMix(&channels, upChannels, zeroes.Elements());
michael@0 232 }
michael@0 233 if (channels.Length() == mResamplerChannelCount) {
michael@0 234 ResampleChannels(channels, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume);
michael@0 235 } else {
michael@0 236 nsTArray<float> output;
michael@0 237 if (duration > UINT32_MAX/mResamplerChannelCount) {
michael@0 238 NS_ERROR("Chunk duration out of bounds");
michael@0 239 return;
michael@0 240 }
michael@0 241 output.SetLength(duration*mResamplerChannelCount);
michael@0 242 nsAutoTArray<float*,2> outputPtrs;
michael@0 243 nsAutoTArray<const void*,2> outputPtrsConst;
michael@0 244 for (uint32_t i = 0; i < mResamplerChannelCount; ++i) {
michael@0 245 outputPtrs.AppendElement(output.Elements() + i*duration);
michael@0 246 outputPtrsConst.AppendElement(outputPtrs[i]);
michael@0 247 }
michael@0 248 AudioChannelsDownMix(channels, outputPtrs.Elements(), outputPtrs.Length(), duration);
michael@0 249 ResampleChannels(outputPtrsConst, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume);
michael@0 250 }
michael@0 251 }
michael@0 252 ci.Next();
michael@0 253 }
michael@0 254 }
michael@0 255
michael@0 256 /**
michael@0 257 * Copies the data in aInput to aOffsetInBlock within aBlock. All samples must
michael@0 258 * be float. Both chunks must have the same number of channels (or else
michael@0 259 * aInput is null). aBlock must have been allocated with AllocateInputBlock.
michael@0 260 */
michael@0 261 static void
michael@0 262 CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock, uint32_t aOffsetInBlock)
michael@0 263 {
michael@0 264 uint32_t d = aInput.GetDuration();
michael@0 265 for (uint32_t i = 0; i < aBlock->mChannelData.Length(); ++i) {
michael@0 266 float* out = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[i])) +
michael@0 267 aOffsetInBlock;
michael@0 268 if (aInput.IsNull()) {
michael@0 269 PodZero(out, d);
michael@0 270 } else {
michael@0 271 const float* in = static_cast<const float*>(aInput.mChannelData[i]);
michael@0 272 ConvertAudioSamplesWithScale(in, out, d, aInput.mVolume);
michael@0 273 }
michael@0 274 }
michael@0 275 }
michael@0 276
michael@0 277 /**
michael@0 278 * Converts the data in aSegment to a single chunk aChunk. Every chunk in
michael@0 279 * aSegment must have the same number of channels (or be null). aSegment must have
michael@0 280 * duration WEBAUDIO_BLOCK_SIZE. Every chunk in aSegment must be in float format.
michael@0 281 */
michael@0 282 static void
michael@0 283 ConvertSegmentToAudioBlock(AudioSegment* aSegment, AudioChunk* aBlock)
michael@0 284 {
michael@0 285 NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration");
michael@0 286
michael@0 287 {
michael@0 288 AudioSegment::ChunkIterator ci(*aSegment);
michael@0 289 NS_ASSERTION(!ci.IsEnded(), "Segment must have at least one chunk");
michael@0 290 AudioChunk& firstChunk = *ci;
michael@0 291 ci.Next();
michael@0 292 if (ci.IsEnded()) {
michael@0 293 *aBlock = firstChunk;
michael@0 294 return;
michael@0 295 }
michael@0 296
michael@0 297 while (ci->IsNull() && !ci.IsEnded()) {
michael@0 298 ci.Next();
michael@0 299 }
michael@0 300 if (ci.IsEnded()) {
michael@0 301 // All null.
michael@0 302 aBlock->SetNull(WEBAUDIO_BLOCK_SIZE);
michael@0 303 return;
michael@0 304 }
michael@0 305
michael@0 306 AllocateAudioBlock(ci->mChannelData.Length(), aBlock);
michael@0 307 }
michael@0 308
michael@0 309 AudioSegment::ChunkIterator ci(*aSegment);
michael@0 310 uint32_t duration = 0;
michael@0 311 while (!ci.IsEnded()) {
michael@0 312 CopyChunkToBlock(*ci, aBlock, duration);
michael@0 313 duration += ci->GetDuration();
michael@0 314 ci.Next();
michael@0 315 }
michael@0 316 }
michael@0 317
michael@0 318 void
michael@0 319 AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
michael@0 320 uint32_t aFlags)
michael@0 321 {
michael@0 322 // According to spec, number of outputs is always 1.
michael@0 323 mLastChunks.SetLength(1);
michael@0 324
michael@0 325 // GC stuff can result in our input stream being destroyed before this stream.
michael@0 326 // Handle that.
michael@0 327 if (mInputs.IsEmpty()) {
michael@0 328 mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
michael@0 329 AdvanceOutputSegment();
michael@0 330 return;
michael@0 331 }
michael@0 332
michael@0 333 MOZ_ASSERT(mInputs.Length() == 1);
michael@0 334
michael@0 335 MediaStream* source = mInputs[0]->GetSource();
michael@0 336 nsAutoTArray<AudioSegment,1> audioSegments;
michael@0 337 nsAutoTArray<bool,1> trackMapEntriesUsed;
michael@0 338 uint32_t inputChannels = 0;
michael@0 339 for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
michael@0 340 !tracks.IsEnded(); tracks.Next()) {
michael@0 341 const StreamBuffer::Track& inputTrack = *tracks;
michael@0 342 // Create a TrackMapEntry if necessary.
michael@0 343 uint32_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
michael@0 344 // Maybe there's nothing in this track yet. If so, ignore it. (While the
michael@0 345 // track is only playing silence, we may not be able to determine the
michael@0 346 // correct number of channels to start resampling.)
michael@0 347 if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
michael@0 348 continue;
michael@0 349 }
michael@0 350
michael@0 351 while (trackMapEntriesUsed.Length() <= trackMapIndex) {
michael@0 352 trackMapEntriesUsed.AppendElement(false);
michael@0 353 }
michael@0 354 trackMapEntriesUsed[trackMapIndex] = true;
michael@0 355
michael@0 356 TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
michael@0 357 AudioSegment segment;
michael@0 358 GraphTime next;
michael@0 359 TrackRate inputTrackRate = inputTrack.GetRate();
michael@0 360 for (GraphTime t = aFrom; t < aTo; t = next) {
michael@0 361 MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
michael@0 362 interval.mEnd = std::min(interval.mEnd, aTo);
michael@0 363 if (interval.mStart >= interval.mEnd)
michael@0 364 break;
michael@0 365 next = interval.mEnd;
michael@0 366
michael@0 367 // Ticks >= startTicks and < endTicks are in the interval
michael@0 368 StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
michael@0 369 TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
michael@0 370 StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
michael@0 371 NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
michael@0 372 "Samples missing");
michael@0 373 TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
michael@0 374 TrackTicks ticks = endTicks - startTicks;
michael@0 375
michael@0 376 if (interval.mInputIsBlocked) {
michael@0 377 segment.AppendNullData(ticks);
michael@0 378 } else {
michael@0 379 // See comments in TrackUnionStream::CopyTrackData
michael@0 380 StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
michael@0 381 StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
michael@0 382 TrackTicks inputTrackEndPoint =
michael@0 383 inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;
michael@0 384
michael@0 385 if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
michael@0 386 trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
michael@0 387 // Start of a new series of intervals where neither stream is blocked.
michael@0 388 trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
michael@0 389 }
michael@0 390 TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
michael@0 391 TrackTicks inputEndTicks = inputStartTicks + ticks;
michael@0 392 trackMap->mEndOfConsumedInputTicks = inputEndTicks;
michael@0 393 trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
michael@0 394 trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;
michael@0 395
michael@0 396 if (inputStartTicks < 0) {
michael@0 397 // Data before the start of the track is just null.
michael@0 398 segment.AppendNullData(-inputStartTicks);
michael@0 399 inputStartTicks = 0;
michael@0 400 }
michael@0 401 if (inputEndTicks > inputStartTicks) {
michael@0 402 segment.AppendSlice(*inputTrack.GetSegment(),
michael@0 403 std::min(inputTrackEndPoint, inputStartTicks),
michael@0 404 std::min(inputTrackEndPoint, inputEndTicks));
michael@0 405 }
michael@0 406 // Pad if we're looking past the end of the track
michael@0 407 segment.AppendNullData(ticks - segment.GetDuration());
michael@0 408 }
michael@0 409 }
michael@0 410
michael@0 411 trackMap->mSamplesPassedToResampler += segment.GetDuration();
michael@0 412 trackMap->ResampleInputData(&segment);
michael@0 413
michael@0 414 if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
michael@0 415 // We don't have enough data. Delay it.
michael@0 416 trackMap->mResampledData.InsertNullDataAtStart(
michael@0 417 mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
michael@0 418 }
michael@0 419 audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData,
michael@0 420 mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
michael@0 421 trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
michael@0 422 inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount);
michael@0 423 }
michael@0 424
michael@0 425 for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
michael@0 426 if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) {
michael@0 427 mTrackMap.RemoveElementAt(i);
michael@0 428 }
michael@0 429 }
michael@0 430
michael@0 431 uint32_t accumulateIndex = 0;
michael@0 432 if (inputChannels) {
michael@0 433 nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
michael@0 434 for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
michael@0 435 AudioChunk tmpChunk;
michael@0 436 ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk);
michael@0 437 if (!tmpChunk.IsNull()) {
michael@0 438 if (accumulateIndex == 0) {
michael@0 439 AllocateAudioBlock(inputChannels, &mLastChunks[0]);
michael@0 440 }
michael@0 441 AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
michael@0 442 accumulateIndex++;
michael@0 443 }
michael@0 444 }
michael@0 445 }
michael@0 446 if (accumulateIndex == 0) {
michael@0 447 mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
michael@0 448 }
michael@0 449 mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE;
michael@0 450
michael@0 451 // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
michael@0 452 AdvanceOutputSegment();
michael@0 453 }
michael@0 454
michael@0 455 }

mercurial