Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "AudioBufferSourceNode.h" |
michael@0 | 8 | #include "mozilla/dom/AudioBufferSourceNodeBinding.h" |
michael@0 | 9 | #include "mozilla/dom/AudioParam.h" |
michael@0 | 10 | #include "nsMathUtils.h" |
michael@0 | 11 | #include "AudioNodeEngine.h" |
michael@0 | 12 | #include "AudioNodeStream.h" |
michael@0 | 13 | #include "AudioDestinationNode.h" |
michael@0 | 14 | #include "AudioParamTimeline.h" |
michael@0 | 15 | #include "speex/speex_resampler.h" |
michael@0 | 16 | #include <limits> |
michael@0 | 17 | |
michael@0 | 18 | namespace mozilla { |
michael@0 | 19 | namespace dom { |
michael@0 | 20 | |
michael@0 | 21 | NS_IMPL_CYCLE_COLLECTION_CLASS(AudioBufferSourceNode) |
michael@0 | 22 | |
michael@0 | 23 | NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioBufferSourceNode) |
michael@0 | 24 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mBuffer) |
michael@0 | 25 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlaybackRate) |
michael@0 | 26 | if (tmp->Context()) { |
michael@0 | 27 | // AudioNode's Unlink implementation disconnects us from the graph |
michael@0 | 28 | // too, but we need to do this right here to make sure that |
michael@0 | 29 | // UnregisterAudioBufferSourceNode can properly untangle us from |
michael@0 | 30 | // the possibly connected PannerNodes. |
michael@0 | 31 | tmp->DisconnectFromGraph(); |
michael@0 | 32 | tmp->Context()->UnregisterAudioBufferSourceNode(tmp); |
michael@0 | 33 | } |
michael@0 | 34 | NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode) |
michael@0 | 35 | |
michael@0 | 36 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioBufferSourceNode, AudioNode) |
michael@0 | 37 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mBuffer) |
michael@0 | 38 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPlaybackRate) |
michael@0 | 39 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
michael@0 | 40 | |
michael@0 | 41 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode) |
michael@0 | 42 | NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
michael@0 | 43 | |
michael@0 | 44 | NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioNode) |
michael@0 | 45 | NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioNode) |
michael@0 | 46 | |
michael@0 | 47 | /** |
michael@0 | 48 | * Media-thread playback engine for AudioBufferSourceNode. |
michael@0 | 49 | * Nothing is played until a non-null buffer has been set (via |
michael@0 | 50 | * AudioNodeStream::SetBuffer) and a non-zero mBufferEnd has been set (via |
michael@0 | 51 | * AudioNodeStream::SetInt32Parameter). |
michael@0 | 52 | */ |
michael@0 | 53 | class AudioBufferSourceNodeEngine : public AudioNodeEngine |
michael@0 | 54 | { |
michael@0 | 55 | public: |
michael@0 | 56 | explicit AudioBufferSourceNodeEngine(AudioNode* aNode, |
michael@0 | 57 | AudioDestinationNode* aDestination) : |
michael@0 | 58 | AudioNodeEngine(aNode), |
michael@0 | 59 | mStart(0.0), mBeginProcessing(0), |
michael@0 | 60 | mStop(TRACK_TICKS_MAX), |
michael@0 | 61 | mResampler(nullptr), mRemainingResamplerTail(0), |
michael@0 | 62 | mBufferEnd(0), |
michael@0 | 63 | mLoopStart(0), mLoopEnd(0), |
michael@0 | 64 | mBufferSampleRate(0), mBufferPosition(0), mChannels(0), |
michael@0 | 65 | mDopplerShift(1.0f), |
michael@0 | 66 | mDestination(static_cast<AudioNodeStream*>(aDestination->Stream())), |
michael@0 | 67 | mPlaybackRateTimeline(1.0f), mLoop(false) |
michael@0 | 68 | {} |
michael@0 | 69 | |
michael@0 | 70 | ~AudioBufferSourceNodeEngine() |
michael@0 | 71 | { |
michael@0 | 72 | if (mResampler) { |
michael@0 | 73 | speex_resampler_destroy(mResampler); |
michael@0 | 74 | } |
michael@0 | 75 | } |
michael@0 | 76 | |
michael@0 | 77 | void SetSourceStream(AudioNodeStream* aSource) |
michael@0 | 78 | { |
michael@0 | 79 | mSource = aSource; |
michael@0 | 80 | } |
michael@0 | 81 | |
michael@0 | 82 | virtual void SetTimelineParameter(uint32_t aIndex, |
michael@0 | 83 | const dom::AudioParamTimeline& aValue, |
michael@0 | 84 | TrackRate aSampleRate) MOZ_OVERRIDE |
michael@0 | 85 | { |
michael@0 | 86 | switch (aIndex) { |
michael@0 | 87 | case AudioBufferSourceNode::PLAYBACKRATE: |
michael@0 | 88 | mPlaybackRateTimeline = aValue; |
michael@0 | 89 | WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, mSource, mDestination); |
michael@0 | 90 | break; |
michael@0 | 91 | default: |
michael@0 | 92 | NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter"); |
michael@0 | 93 | } |
michael@0 | 94 | } |
michael@0 | 95 | virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam) |
michael@0 | 96 | { |
michael@0 | 97 | switch (aIndex) { |
michael@0 | 98 | case AudioBufferSourceNode::STOP: mStop = aParam; break; |
michael@0 | 99 | default: |
michael@0 | 100 | NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter"); |
michael@0 | 101 | } |
michael@0 | 102 | } |
michael@0 | 103 | virtual void SetDoubleParameter(uint32_t aIndex, double aParam) |
michael@0 | 104 | { |
michael@0 | 105 | switch (aIndex) { |
michael@0 | 106 | case AudioBufferSourceNode::START: |
michael@0 | 107 | MOZ_ASSERT(!mStart, "Another START?"); |
michael@0 | 108 | mStart = mSource->TimeFromDestinationTime(mDestination, aParam) * |
michael@0 | 109 | mSource->SampleRate(); |
michael@0 | 110 | // Round to nearest |
michael@0 | 111 | mBeginProcessing = mStart + 0.5; |
michael@0 | 112 | break; |
michael@0 | 113 | case AudioBufferSourceNode::DOPPLERSHIFT: |
michael@0 | 114 | mDopplerShift = aParam > 0 && aParam == aParam ? aParam : 1.0; |
michael@0 | 115 | break; |
michael@0 | 116 | default: |
michael@0 | 117 | NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter."); |
michael@0 | 118 | }; |
michael@0 | 119 | } |
michael@0 | 120 | virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) |
michael@0 | 121 | { |
michael@0 | 122 | switch (aIndex) { |
michael@0 | 123 | case AudioBufferSourceNode::SAMPLE_RATE: mBufferSampleRate = aParam; break; |
michael@0 | 124 | case AudioBufferSourceNode::BUFFERSTART: |
michael@0 | 125 | if (mBufferPosition == 0) { |
michael@0 | 126 | mBufferPosition = aParam; |
michael@0 | 127 | } |
michael@0 | 128 | break; |
michael@0 | 129 | case AudioBufferSourceNode::BUFFEREND: mBufferEnd = aParam; break; |
michael@0 | 130 | case AudioBufferSourceNode::LOOP: mLoop = !!aParam; break; |
michael@0 | 131 | case AudioBufferSourceNode::LOOPSTART: mLoopStart = aParam; break; |
michael@0 | 132 | case AudioBufferSourceNode::LOOPEND: mLoopEnd = aParam; break; |
michael@0 | 133 | default: |
michael@0 | 134 | NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter"); |
michael@0 | 135 | } |
michael@0 | 136 | } |
michael@0 | 137 | virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) |
michael@0 | 138 | { |
michael@0 | 139 | mBuffer = aBuffer; |
michael@0 | 140 | } |
michael@0 | 141 | |
michael@0 | 142 | bool BegunResampling() |
michael@0 | 143 | { |
michael@0 | 144 | return mBeginProcessing == -TRACK_TICKS_MAX; |
michael@0 | 145 | } |
michael@0 | 146 | |
michael@0 | 147 | void UpdateResampler(int32_t aOutRate, uint32_t aChannels) |
michael@0 | 148 | { |
michael@0 | 149 | if (mResampler && |
michael@0 | 150 | (aChannels != mChannels || |
michael@0 | 151 | // If the resampler has begun, then it will have moved |
michael@0 | 152 | // mBufferPosition to after the samples it has read, but it hasn't |
michael@0 | 153 | // output its buffered samples. Keep using the resampler, even if |
michael@0 | 154 | // the rates now match, so that this latent segment is output. |
michael@0 | 155 | (aOutRate == mBufferSampleRate && !BegunResampling()))) { |
michael@0 | 156 | speex_resampler_destroy(mResampler); |
michael@0 | 157 | mResampler = nullptr; |
michael@0 | 158 | mRemainingResamplerTail = 0; |
michael@0 | 159 | mBeginProcessing = mStart + 0.5; |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | if (aOutRate == mBufferSampleRate && !mResampler) { |
michael@0 | 163 | return; |
michael@0 | 164 | } |
michael@0 | 165 | |
michael@0 | 166 | if (!mResampler) { |
michael@0 | 167 | mChannels = aChannels; |
michael@0 | 168 | mResampler = speex_resampler_init(mChannels, mBufferSampleRate, aOutRate, |
michael@0 | 169 | SPEEX_RESAMPLER_QUALITY_DEFAULT, |
michael@0 | 170 | nullptr); |
michael@0 | 171 | } else { |
michael@0 | 172 | uint32_t currentOutSampleRate, currentInSampleRate; |
michael@0 | 173 | speex_resampler_get_rate(mResampler, ¤tInSampleRate, |
michael@0 | 174 | ¤tOutSampleRate); |
michael@0 | 175 | if (currentOutSampleRate == static_cast<uint32_t>(aOutRate)) { |
michael@0 | 176 | return; |
michael@0 | 177 | } |
michael@0 | 178 | speex_resampler_set_rate(mResampler, currentInSampleRate, aOutRate); |
michael@0 | 179 | } |
michael@0 | 180 | |
michael@0 | 181 | if (!BegunResampling()) { |
michael@0 | 182 | // Low pass filter effects from the resampler mean that samples before |
michael@0 | 183 | // the start time are influenced by resampling the buffer. The input |
michael@0 | 184 | // latency indicates half the filter width. |
michael@0 | 185 | int64_t inputLatency = speex_resampler_get_input_latency(mResampler); |
michael@0 | 186 | uint32_t ratioNum, ratioDen; |
michael@0 | 187 | speex_resampler_get_ratio(mResampler, &ratioNum, &ratioDen); |
michael@0 | 188 | // The output subsample resolution supported in aligning the resampler |
michael@0 | 189 | // is ratioNum. First round the start time to the nearest subsample. |
michael@0 | 190 | int64_t subsample = mStart * ratioNum + 0.5; |
michael@0 | 191 | // Now include the leading effects of the filter, and round *up* to the |
michael@0 | 192 | // next whole tick, because there is no effect on samples outside the |
michael@0 | 193 | // filter width. |
michael@0 | 194 | mBeginProcessing = |
michael@0 | 195 | (subsample - inputLatency * ratioDen + ratioNum - 1) / ratioNum; |
michael@0 | 196 | } |
michael@0 | 197 | } |
michael@0 | 198 | |
michael@0 | 199 | // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer |
michael@0 | 200 | // at offset aSourceOffset. This avoids copying memory. |
michael@0 | 201 | void BorrowFromInputBuffer(AudioChunk* aOutput, |
michael@0 | 202 | uint32_t aChannels) |
michael@0 | 203 | { |
michael@0 | 204 | aOutput->mDuration = WEBAUDIO_BLOCK_SIZE; |
michael@0 | 205 | aOutput->mBuffer = mBuffer; |
michael@0 | 206 | aOutput->mChannelData.SetLength(aChannels); |
michael@0 | 207 | for (uint32_t i = 0; i < aChannels; ++i) { |
michael@0 | 208 | aOutput->mChannelData[i] = mBuffer->GetData(i) + mBufferPosition; |
michael@0 | 209 | } |
michael@0 | 210 | aOutput->mVolume = 1.0f; |
michael@0 | 211 | aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32; |
michael@0 | 212 | } |
michael@0 | 213 | |
michael@0 | 214 | // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset |
michael@0 | 215 | // and put it at offset aBufferOffset in the destination buffer. |
michael@0 | 216 | void CopyFromInputBuffer(AudioChunk* aOutput, |
michael@0 | 217 | uint32_t aChannels, |
michael@0 | 218 | uintptr_t aOffsetWithinBlock, |
michael@0 | 219 | uint32_t aNumberOfFrames) { |
michael@0 | 220 | for (uint32_t i = 0; i < aChannels; ++i) { |
michael@0 | 221 | float* baseChannelData = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i])); |
michael@0 | 222 | memcpy(baseChannelData + aOffsetWithinBlock, |
michael@0 | 223 | mBuffer->GetData(i) + mBufferPosition, |
michael@0 | 224 | aNumberOfFrames * sizeof(float)); |
michael@0 | 225 | } |
michael@0 | 226 | } |
michael@0 | 227 | |
michael@0 | 228 | // Resamples input data to an output buffer, according to |mBufferSampleRate| and |
michael@0 | 229 | // the playbackRate. |
michael@0 | 230 | // The number of frames consumed/produced depends on the amount of space |
michael@0 | 231 | // remaining in both the input and output buffer, and the playback rate (that |
michael@0 | 232 | // is, the ratio between the output samplerate and the input samplerate). |
michael@0 | 233 | void CopyFromInputBufferWithResampling(AudioNodeStream* aStream, |
michael@0 | 234 | AudioChunk* aOutput, |
michael@0 | 235 | uint32_t aChannels, |
michael@0 | 236 | uint32_t* aOffsetWithinBlock, |
michael@0 | 237 | TrackTicks* aCurrentPosition, |
michael@0 | 238 | int32_t aBufferMax) { |
michael@0 | 239 | // TODO: adjust for mStop (see bug 913854 comment 9). |
michael@0 | 240 | uint32_t availableInOutputBuffer = |
michael@0 | 241 | WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock; |
michael@0 | 242 | SpeexResamplerState* resampler = mResampler; |
michael@0 | 243 | MOZ_ASSERT(aChannels > 0); |
michael@0 | 244 | |
michael@0 | 245 | if (mBufferPosition < aBufferMax) { |
michael@0 | 246 | uint32_t availableInInputBuffer = aBufferMax - mBufferPosition; |
michael@0 | 247 | uint32_t ratioNum, ratioDen; |
michael@0 | 248 | speex_resampler_get_ratio(resampler, &ratioNum, &ratioDen); |
michael@0 | 249 | // Limit the number of input samples copied and possibly |
michael@0 | 250 | // format-converted for resampling by estimating how many will be used. |
michael@0 | 251 | // This may be a little small if still filling the resampler with |
michael@0 | 252 | // initial data, but we'll get called again and it will work out. |
michael@0 | 253 | uint32_t inputLimit = availableInOutputBuffer * ratioNum / ratioDen + 10; |
michael@0 | 254 | if (!BegunResampling()) { |
michael@0 | 255 | // First time the resampler is used. |
michael@0 | 256 | uint32_t inputLatency = speex_resampler_get_input_latency(resampler); |
michael@0 | 257 | inputLimit += inputLatency; |
michael@0 | 258 | // If starting after mStart, then play from the beginning of the |
michael@0 | 259 | // buffer, but correct for input latency. If starting before mStart, |
michael@0 | 260 | // then align the resampler so that the time corresponding to the |
michael@0 | 261 | // first input sample is mStart. |
michael@0 | 262 | uint32_t skipFracNum = inputLatency * ratioDen; |
michael@0 | 263 | double leadTicks = mStart - *aCurrentPosition; |
michael@0 | 264 | if (leadTicks > 0.0) { |
michael@0 | 265 | // Round to nearest output subsample supported by the resampler at |
michael@0 | 266 | // these rates. |
michael@0 | 267 | skipFracNum -= leadTicks * ratioNum + 0.5; |
michael@0 | 268 | MOZ_ASSERT(skipFracNum < INT32_MAX, "mBeginProcessing is wrong?"); |
michael@0 | 269 | } |
michael@0 | 270 | speex_resampler_set_skip_frac_num(resampler, skipFracNum); |
michael@0 | 271 | |
michael@0 | 272 | mBeginProcessing = -TRACK_TICKS_MAX; |
michael@0 | 273 | } |
michael@0 | 274 | inputLimit = std::min(inputLimit, availableInInputBuffer); |
michael@0 | 275 | |
michael@0 | 276 | for (uint32_t i = 0; true; ) { |
michael@0 | 277 | uint32_t inSamples = inputLimit; |
michael@0 | 278 | const float* inputData = mBuffer->GetData(i) + mBufferPosition; |
michael@0 | 279 | |
michael@0 | 280 | uint32_t outSamples = availableInOutputBuffer; |
michael@0 | 281 | float* outputData = |
michael@0 | 282 | static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i])) + |
michael@0 | 283 | *aOffsetWithinBlock; |
michael@0 | 284 | |
michael@0 | 285 | WebAudioUtils::SpeexResamplerProcess(resampler, i, |
michael@0 | 286 | inputData, &inSamples, |
michael@0 | 287 | outputData, &outSamples); |
michael@0 | 288 | if (++i == aChannels) { |
michael@0 | 289 | mBufferPosition += inSamples; |
michael@0 | 290 | MOZ_ASSERT(mBufferPosition <= mBufferEnd || mLoop); |
michael@0 | 291 | *aOffsetWithinBlock += outSamples; |
michael@0 | 292 | *aCurrentPosition += outSamples; |
michael@0 | 293 | if (inSamples == availableInInputBuffer && !mLoop) { |
michael@0 | 294 | // We'll feed in enough zeros to empty out the resampler's memory. |
michael@0 | 295 | // This handles the output latency as well as capturing the low |
michael@0 | 296 | // pass effects of the resample filter. |
michael@0 | 297 | mRemainingResamplerTail = |
michael@0 | 298 | 2 * speex_resampler_get_input_latency(resampler) - 1; |
michael@0 | 299 | } |
michael@0 | 300 | return; |
michael@0 | 301 | } |
michael@0 | 302 | } |
michael@0 | 303 | } else { |
michael@0 | 304 | for (uint32_t i = 0; true; ) { |
michael@0 | 305 | uint32_t inSamples = mRemainingResamplerTail; |
michael@0 | 306 | uint32_t outSamples = availableInOutputBuffer; |
michael@0 | 307 | float* outputData = |
michael@0 | 308 | static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i])) + |
michael@0 | 309 | *aOffsetWithinBlock; |
michael@0 | 310 | |
michael@0 | 311 | // AudioDataValue* for aIn selects the function that does not try to |
michael@0 | 312 | // copy and format-convert input data. |
michael@0 | 313 | WebAudioUtils::SpeexResamplerProcess(resampler, i, |
michael@0 | 314 | static_cast<AudioDataValue*>(nullptr), &inSamples, |
michael@0 | 315 | outputData, &outSamples); |
michael@0 | 316 | if (++i == aChannels) { |
michael@0 | 317 | mRemainingResamplerTail -= inSamples; |
michael@0 | 318 | MOZ_ASSERT(mRemainingResamplerTail >= 0); |
michael@0 | 319 | *aOffsetWithinBlock += outSamples; |
michael@0 | 320 | *aCurrentPosition += outSamples; |
michael@0 | 321 | break; |
michael@0 | 322 | } |
michael@0 | 323 | } |
michael@0 | 324 | } |
michael@0 | 325 | } |
michael@0 | 326 | |
michael@0 | 327 | /** |
michael@0 | 328 | * Fill aOutput with as many zero frames as we can, and advance |
michael@0 | 329 | * aOffsetWithinBlock and aCurrentPosition based on how many frames we write. |
michael@0 | 330 | * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or |
michael@0 | 331 | * aCurrentPosition past aMaxPos. This function knows when it needs to |
michael@0 | 332 | * allocate the output buffer, and also optimizes the case where it can avoid |
michael@0 | 333 | * memory allocations. |
michael@0 | 334 | */ |
michael@0 | 335 | void FillWithZeroes(AudioChunk* aOutput, |
michael@0 | 336 | uint32_t aChannels, |
michael@0 | 337 | uint32_t* aOffsetWithinBlock, |
michael@0 | 338 | TrackTicks* aCurrentPosition, |
michael@0 | 339 | TrackTicks aMaxPos) |
michael@0 | 340 | { |
michael@0 | 341 | MOZ_ASSERT(*aCurrentPosition < aMaxPos); |
michael@0 | 342 | uint32_t numFrames = |
michael@0 | 343 | std::min<TrackTicks>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, |
michael@0 | 344 | aMaxPos - *aCurrentPosition); |
michael@0 | 345 | if (numFrames == WEBAUDIO_BLOCK_SIZE) { |
michael@0 | 346 | aOutput->SetNull(numFrames); |
michael@0 | 347 | } else { |
michael@0 | 348 | if (*aOffsetWithinBlock == 0) { |
michael@0 | 349 | AllocateAudioBlock(aChannels, aOutput); |
michael@0 | 350 | } |
michael@0 | 351 | WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames); |
michael@0 | 352 | } |
michael@0 | 353 | *aOffsetWithinBlock += numFrames; |
michael@0 | 354 | *aCurrentPosition += numFrames; |
michael@0 | 355 | } |
michael@0 | 356 | |
michael@0 | 357 | /** |
michael@0 | 358 | * Copy as many frames as possible from the source buffer to aOutput, and |
michael@0 | 359 | * advance aOffsetWithinBlock and aCurrentPosition based on how many frames |
michael@0 | 360 | * we write. This will never advance aOffsetWithinBlock past |
michael@0 | 361 | * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop. It takes data from |
michael@0 | 362 | * the buffer at aBufferOffset, and never takes more data than aBufferMax. |
michael@0 | 363 | * This function knows when it needs to allocate the output buffer, and also |
michael@0 | 364 | * optimizes the case where it can avoid memory allocations. |
michael@0 | 365 | */ |
michael@0 | 366 | void CopyFromBuffer(AudioNodeStream* aStream, |
michael@0 | 367 | AudioChunk* aOutput, |
michael@0 | 368 | uint32_t aChannels, |
michael@0 | 369 | uint32_t* aOffsetWithinBlock, |
michael@0 | 370 | TrackTicks* aCurrentPosition, |
michael@0 | 371 | int32_t aBufferMax) |
michael@0 | 372 | { |
michael@0 | 373 | MOZ_ASSERT(*aCurrentPosition < mStop); |
michael@0 | 374 | uint32_t numFrames = |
michael@0 | 375 | std::min(std::min<TrackTicks>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, |
michael@0 | 376 | aBufferMax - mBufferPosition), |
michael@0 | 377 | mStop - *aCurrentPosition); |
michael@0 | 378 | if (numFrames == WEBAUDIO_BLOCK_SIZE && !mResampler) { |
michael@0 | 379 | MOZ_ASSERT(mBufferPosition < aBufferMax); |
michael@0 | 380 | BorrowFromInputBuffer(aOutput, aChannels); |
michael@0 | 381 | *aOffsetWithinBlock += numFrames; |
michael@0 | 382 | *aCurrentPosition += numFrames; |
michael@0 | 383 | mBufferPosition += numFrames; |
michael@0 | 384 | } else { |
michael@0 | 385 | if (*aOffsetWithinBlock == 0) { |
michael@0 | 386 | AllocateAudioBlock(aChannels, aOutput); |
michael@0 | 387 | } |
michael@0 | 388 | if (!mResampler) { |
michael@0 | 389 | MOZ_ASSERT(mBufferPosition < aBufferMax); |
michael@0 | 390 | CopyFromInputBuffer(aOutput, aChannels, *aOffsetWithinBlock, numFrames); |
michael@0 | 391 | *aOffsetWithinBlock += numFrames; |
michael@0 | 392 | *aCurrentPosition += numFrames; |
michael@0 | 393 | mBufferPosition += numFrames; |
michael@0 | 394 | } else { |
michael@0 | 395 | CopyFromInputBufferWithResampling(aStream, aOutput, aChannels, aOffsetWithinBlock, aCurrentPosition, aBufferMax); |
michael@0 | 396 | } |
michael@0 | 397 | } |
michael@0 | 398 | } |
michael@0 | 399 | |
michael@0 | 400 | int32_t ComputeFinalOutSampleRate(float aPlaybackRate) |
michael@0 | 401 | { |
michael@0 | 402 | // Make sure the playback rate and the doppler shift are something |
michael@0 | 403 | // our resampler can work with. |
michael@0 | 404 | int32_t rate = WebAudioUtils:: |
michael@0 | 405 | TruncateFloatToInt<int32_t>(mSource->SampleRate() / |
michael@0 | 406 | (aPlaybackRate * mDopplerShift)); |
michael@0 | 407 | return rate ? rate : mBufferSampleRate; |
michael@0 | 408 | } |
michael@0 | 409 | |
michael@0 | 410 | void UpdateSampleRateIfNeeded(uint32_t aChannels) |
michael@0 | 411 | { |
michael@0 | 412 | float playbackRate; |
michael@0 | 413 | |
michael@0 | 414 | if (mPlaybackRateTimeline.HasSimpleValue()) { |
michael@0 | 415 | playbackRate = mPlaybackRateTimeline.GetValue(); |
michael@0 | 416 | } else { |
michael@0 | 417 | playbackRate = mPlaybackRateTimeline.GetValueAtTime(mSource->GetCurrentPosition()); |
michael@0 | 418 | } |
michael@0 | 419 | if (playbackRate <= 0 || playbackRate != playbackRate) { |
michael@0 | 420 | playbackRate = 1.0f; |
michael@0 | 421 | } |
michael@0 | 422 | |
michael@0 | 423 | int32_t outRate = ComputeFinalOutSampleRate(playbackRate); |
michael@0 | 424 | UpdateResampler(outRate, aChannels); |
michael@0 | 425 | } |
michael@0 | 426 | |
michael@0 | 427 | virtual void ProcessBlock(AudioNodeStream* aStream, |
michael@0 | 428 | const AudioChunk& aInput, |
michael@0 | 429 | AudioChunk* aOutput, |
michael@0 | 430 | bool* aFinished) |
michael@0 | 431 | { |
michael@0 | 432 | if (!mBuffer || !mBufferEnd) { |
michael@0 | 433 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
michael@0 | 434 | return; |
michael@0 | 435 | } |
michael@0 | 436 | |
michael@0 | 437 | uint32_t channels = mBuffer->GetChannels(); |
michael@0 | 438 | if (!channels) { |
michael@0 | 439 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
michael@0 | 440 | return; |
michael@0 | 441 | } |
michael@0 | 442 | |
michael@0 | 443 | // WebKit treats the playbackRate as a k-rate parameter in their code, |
michael@0 | 444 | // despite the spec saying that it should be an a-rate parameter. We treat |
michael@0 | 445 | // it as k-rate. Spec bug: https://www.w3.org/Bugs/Public/show_bug.cgi?id=21592 |
michael@0 | 446 | UpdateSampleRateIfNeeded(channels); |
michael@0 | 447 | |
michael@0 | 448 | uint32_t written = 0; |
michael@0 | 449 | TrackTicks streamPosition = aStream->GetCurrentPosition(); |
michael@0 | 450 | while (written < WEBAUDIO_BLOCK_SIZE) { |
michael@0 | 451 | if (mStop != TRACK_TICKS_MAX && |
michael@0 | 452 | streamPosition >= mStop) { |
michael@0 | 453 | FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); |
michael@0 | 454 | continue; |
michael@0 | 455 | } |
michael@0 | 456 | if (streamPosition < mBeginProcessing) { |
michael@0 | 457 | FillWithZeroes(aOutput, channels, &written, &streamPosition, |
michael@0 | 458 | mBeginProcessing); |
michael@0 | 459 | continue; |
michael@0 | 460 | } |
michael@0 | 461 | if (mLoop) { |
michael@0 | 462 | // mLoopEnd can become less than mBufferPosition when a LOOPEND engine |
michael@0 | 463 | // parameter is received after "loopend" is changed on the node or a |
michael@0 | 464 | // new buffer with lower samplerate is set. |
michael@0 | 465 | if (mBufferPosition >= mLoopEnd) { |
michael@0 | 466 | mBufferPosition = mLoopStart; |
michael@0 | 467 | } |
michael@0 | 468 | CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mLoopEnd); |
michael@0 | 469 | } else { |
michael@0 | 470 | if (mBufferPosition < mBufferEnd || mRemainingResamplerTail) { |
michael@0 | 471 | CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mBufferEnd); |
michael@0 | 472 | } else { |
michael@0 | 473 | FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); |
michael@0 | 474 | } |
michael@0 | 475 | } |
michael@0 | 476 | } |
michael@0 | 477 | |
michael@0 | 478 | // We've finished if we've gone past mStop, or if we're past mDuration when |
michael@0 | 479 | // looping is disabled. |
michael@0 | 480 | if (streamPosition >= mStop || |
michael@0 | 481 | (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) { |
michael@0 | 482 | *aFinished = true; |
michael@0 | 483 | } |
michael@0 | 484 | } |
michael@0 | 485 | |
michael@0 | 486 | virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
michael@0 | 487 | { |
michael@0 | 488 | // Not owned: |
michael@0 | 489 | // - mBuffer - shared w/ AudioNode |
michael@0 | 490 | // - mPlaybackRateTimeline - shared w/ AudioNode |
michael@0 | 491 | |
michael@0 | 492 | size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 493 | |
michael@0 | 494 | // NB: We need to modify speex if we want the full memory picture, internal |
michael@0 | 495 | // fields that need measuring noted below. |
michael@0 | 496 | // - mResampler->mem |
michael@0 | 497 | // - mResampler->sinc_table |
michael@0 | 498 | // - mResampler->last_sample |
michael@0 | 499 | // - mResampler->magic_samples |
michael@0 | 500 | // - mResampler->samp_frac_num |
michael@0 | 501 | amount += aMallocSizeOf(mResampler); |
michael@0 | 502 | |
michael@0 | 503 | return amount; |
michael@0 | 504 | } |
michael@0 | 505 | |
michael@0 | 506 | virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
michael@0 | 507 | { |
michael@0 | 508 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 509 | } |
michael@0 | 510 | |
michael@0 | 511 | double mStart; // including the fractional position between ticks |
michael@0 | 512 | // Low pass filter effects from the resampler mean that samples before the |
michael@0 | 513 | // start time are influenced by resampling the buffer. mBeginProcessing |
michael@0 | 514 | // includes the extent of this filter. The special value of -TRACK_TICKS_MAX |
michael@0 | 515 | // indicates that the resampler has begun processing. |
michael@0 | 516 | TrackTicks mBeginProcessing; |
michael@0 | 517 | TrackTicks mStop; |
michael@0 | 518 | nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer; |
michael@0 | 519 | SpeexResamplerState* mResampler; |
michael@0 | 520 | // mRemainingResamplerTail, like mBufferPosition, and |
michael@0 | 521 | // mBufferEnd, is measured in input buffer samples. |
michael@0 | 522 | int mRemainingResamplerTail; |
michael@0 | 523 | int32_t mBufferEnd; |
michael@0 | 524 | int32_t mLoopStart; |
michael@0 | 525 | int32_t mLoopEnd; |
michael@0 | 526 | int32_t mBufferSampleRate; |
michael@0 | 527 | int32_t mBufferPosition; |
michael@0 | 528 | uint32_t mChannels; |
michael@0 | 529 | float mDopplerShift; |
michael@0 | 530 | AudioNodeStream* mDestination; |
michael@0 | 531 | AudioNodeStream* mSource; |
michael@0 | 532 | AudioParamTimeline mPlaybackRateTimeline; |
michael@0 | 533 | bool mLoop; |
michael@0 | 534 | }; |
michael@0 | 535 | |
michael@0 | 536 | AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext) |
michael@0 | 537 | : AudioNode(aContext, |
michael@0 | 538 | 2, |
michael@0 | 539 | ChannelCountMode::Max, |
michael@0 | 540 | ChannelInterpretation::Speakers) |
michael@0 | 541 | , mLoopStart(0.0) |
michael@0 | 542 | , mLoopEnd(0.0) |
michael@0 | 543 | // mOffset and mDuration are initialized in Start(). |
michael@0 | 544 | , mPlaybackRate(new AudioParam(MOZ_THIS_IN_INITIALIZER_LIST(), |
michael@0 | 545 | SendPlaybackRateToStream, 1.0f)) |
michael@0 | 546 | , mLoop(false) |
michael@0 | 547 | , mStartCalled(false) |
michael@0 | 548 | , mStopped(false) |
michael@0 | 549 | { |
michael@0 | 550 | AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination()); |
michael@0 | 551 | mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::SOURCE_STREAM); |
michael@0 | 552 | engine->SetSourceStream(static_cast<AudioNodeStream*>(mStream.get())); |
michael@0 | 553 | mStream->AddMainThreadListener(this); |
michael@0 | 554 | } |
michael@0 | 555 | |
michael@0 | 556 | AudioBufferSourceNode::~AudioBufferSourceNode() |
michael@0 | 557 | { |
michael@0 | 558 | if (Context()) { |
michael@0 | 559 | Context()->UnregisterAudioBufferSourceNode(this); |
michael@0 | 560 | } |
michael@0 | 561 | } |
michael@0 | 562 | |
michael@0 | 563 | size_t |
michael@0 | 564 | AudioBufferSourceNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
michael@0 | 565 | { |
michael@0 | 566 | size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 567 | if (mBuffer) { |
michael@0 | 568 | amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); |
michael@0 | 569 | } |
michael@0 | 570 | |
michael@0 | 571 | amount += mPlaybackRate->SizeOfIncludingThis(aMallocSizeOf); |
michael@0 | 572 | return amount; |
michael@0 | 573 | } |
michael@0 | 574 | |
michael@0 | 575 | size_t |
michael@0 | 576 | AudioBufferSourceNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
michael@0 | 577 | { |
michael@0 | 578 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 579 | } |
michael@0 | 580 | |
michael@0 | 581 | JSObject* |
michael@0 | 582 | AudioBufferSourceNode::WrapObject(JSContext* aCx) |
michael@0 | 583 | { |
michael@0 | 584 | return AudioBufferSourceNodeBinding::Wrap(aCx, this); |
michael@0 | 585 | } |
michael@0 | 586 | |
michael@0 | 587 | void |
michael@0 | 588 | AudioBufferSourceNode::Start(double aWhen, double aOffset, |
michael@0 | 589 | const Optional<double>& aDuration, ErrorResult& aRv) |
michael@0 | 590 | { |
michael@0 | 591 | if (!WebAudioUtils::IsTimeValid(aWhen) || |
michael@0 | 592 | (aDuration.WasPassed() && !WebAudioUtils::IsTimeValid(aDuration.Value()))) { |
michael@0 | 593 | aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
michael@0 | 594 | return; |
michael@0 | 595 | } |
michael@0 | 596 | |
michael@0 | 597 | if (mStartCalled) { |
michael@0 | 598 | aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
michael@0 | 599 | return; |
michael@0 | 600 | } |
michael@0 | 601 | mStartCalled = true; |
michael@0 | 602 | |
michael@0 | 603 | AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
michael@0 | 604 | if (!ns) { |
michael@0 | 605 | // Nothing to play, or we're already dead for some reason |
michael@0 | 606 | return; |
michael@0 | 607 | } |
michael@0 | 608 | |
michael@0 | 609 | // Remember our arguments so that we can use them when we get a new buffer. |
michael@0 | 610 | mOffset = aOffset; |
michael@0 | 611 | mDuration = aDuration.WasPassed() ? aDuration.Value() |
michael@0 | 612 | : std::numeric_limits<double>::min(); |
michael@0 | 613 | // We can't send these parameters without a buffer because we don't know the |
michael@0 | 614 | // buffer's sample rate or length. |
michael@0 | 615 | if (mBuffer) { |
michael@0 | 616 | SendOffsetAndDurationParametersToStream(ns); |
michael@0 | 617 | } |
michael@0 | 618 | |
michael@0 | 619 | // Don't set parameter unnecessarily |
michael@0 | 620 | if (aWhen > 0.0) { |
michael@0 | 621 | ns->SetDoubleParameter(START, mContext->DOMTimeToStreamTime(aWhen)); |
michael@0 | 622 | } |
michael@0 | 623 | } |
michael@0 | 624 | |
michael@0 | 625 | void |
michael@0 | 626 | AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx) |
michael@0 | 627 | { |
michael@0 | 628 | AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
michael@0 | 629 | MOZ_ASSERT(ns, "Why don't we have a stream here?"); |
michael@0 | 630 | |
michael@0 | 631 | if (mBuffer) { |
michael@0 | 632 | float rate = mBuffer->SampleRate(); |
michael@0 | 633 | nsRefPtr<ThreadSharedFloatArrayBufferList> data = |
michael@0 | 634 | mBuffer->GetThreadSharedChannelsForRate(aCx); |
michael@0 | 635 | ns->SetBuffer(data.forget()); |
michael@0 | 636 | ns->SetInt32Parameter(SAMPLE_RATE, rate); |
michael@0 | 637 | |
michael@0 | 638 | if (mStartCalled) { |
michael@0 | 639 | SendOffsetAndDurationParametersToStream(ns); |
michael@0 | 640 | } |
michael@0 | 641 | } else { |
michael@0 | 642 | ns->SetBuffer(nullptr); |
michael@0 | 643 | |
michael@0 | 644 | MarkInactive(); |
michael@0 | 645 | } |
michael@0 | 646 | } |
michael@0 | 647 | |
michael@0 | 648 | void |
michael@0 | 649 | AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream) |
michael@0 | 650 | { |
michael@0 | 651 | NS_ASSERTION(mBuffer && mStartCalled, |
michael@0 | 652 | "Only call this when we have a buffer and start() has been called"); |
michael@0 | 653 | |
michael@0 | 654 | float rate = mBuffer->SampleRate(); |
michael@0 | 655 | int32_t bufferEnd = mBuffer->Length(); |
michael@0 | 656 | int32_t offsetSamples = std::max(0, NS_lround(mOffset * rate)); |
michael@0 | 657 | |
michael@0 | 658 | // Don't set parameter unnecessarily |
michael@0 | 659 | if (offsetSamples > 0) { |
michael@0 | 660 | aStream->SetInt32Parameter(BUFFERSTART, offsetSamples); |
michael@0 | 661 | } |
michael@0 | 662 | |
michael@0 | 663 | if (mDuration != std::numeric_limits<double>::min()) { |
michael@0 | 664 | bufferEnd = std::min(bufferEnd, |
michael@0 | 665 | offsetSamples + NS_lround(mDuration * rate)); |
michael@0 | 666 | } |
michael@0 | 667 | aStream->SetInt32Parameter(BUFFEREND, bufferEnd); |
michael@0 | 668 | |
michael@0 | 669 | MarkActive(); |
michael@0 | 670 | } |
michael@0 | 671 | |
michael@0 | 672 | void |
michael@0 | 673 | AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv) |
michael@0 | 674 | { |
michael@0 | 675 | if (!WebAudioUtils::IsTimeValid(aWhen)) { |
michael@0 | 676 | aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
michael@0 | 677 | return; |
michael@0 | 678 | } |
michael@0 | 679 | |
michael@0 | 680 | if (!mStartCalled) { |
michael@0 | 681 | aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
michael@0 | 682 | return; |
michael@0 | 683 | } |
michael@0 | 684 | |
michael@0 | 685 | AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get()); |
michael@0 | 686 | if (!ns || !Context()) { |
michael@0 | 687 | // We've already stopped and had our stream shut down |
michael@0 | 688 | return; |
michael@0 | 689 | } |
michael@0 | 690 | |
michael@0 | 691 | ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen)); |
michael@0 | 692 | } |
michael@0 | 693 | |
michael@0 | 694 | void |
michael@0 | 695 | AudioBufferSourceNode::NotifyMainThreadStateChanged() |
michael@0 | 696 | { |
michael@0 | 697 | if (mStream->IsFinished()) { |
michael@0 | 698 | class EndedEventDispatcher : public nsRunnable |
michael@0 | 699 | { |
michael@0 | 700 | public: |
michael@0 | 701 | explicit EndedEventDispatcher(AudioBufferSourceNode* aNode) |
michael@0 | 702 | : mNode(aNode) {} |
michael@0 | 703 | NS_IMETHODIMP Run() |
michael@0 | 704 | { |
michael@0 | 705 | // If it's not safe to run scripts right now, schedule this to run later |
michael@0 | 706 | if (!nsContentUtils::IsSafeToRunScript()) { |
michael@0 | 707 | nsContentUtils::AddScriptRunner(this); |
michael@0 | 708 | return NS_OK; |
michael@0 | 709 | } |
michael@0 | 710 | |
michael@0 | 711 | mNode->DispatchTrustedEvent(NS_LITERAL_STRING("ended")); |
michael@0 | 712 | return NS_OK; |
michael@0 | 713 | } |
michael@0 | 714 | private: |
michael@0 | 715 | nsRefPtr<AudioBufferSourceNode> mNode; |
michael@0 | 716 | }; |
michael@0 | 717 | if (!mStopped) { |
michael@0 | 718 | // Only dispatch the ended event once |
michael@0 | 719 | NS_DispatchToMainThread(new EndedEventDispatcher(this)); |
michael@0 | 720 | mStopped = true; |
michael@0 | 721 | } |
michael@0 | 722 | |
michael@0 | 723 | // Drop the playing reference |
michael@0 | 724 | // Warning: The below line might delete this. |
michael@0 | 725 | MarkInactive(); |
michael@0 | 726 | } |
michael@0 | 727 | } |
michael@0 | 728 | |
michael@0 | 729 | void |
michael@0 | 730 | AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode) |
michael@0 | 731 | { |
michael@0 | 732 | AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode); |
michael@0 | 733 | SendTimelineParameterToStream(This, PLAYBACKRATE, *This->mPlaybackRate); |
michael@0 | 734 | } |
michael@0 | 735 | |
michael@0 | 736 | void |
michael@0 | 737 | AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift) |
michael@0 | 738 | { |
michael@0 | 739 | SendDoubleParameterToStream(DOPPLERSHIFT, aDopplerShift); |
michael@0 | 740 | } |
michael@0 | 741 | |
michael@0 | 742 | void |
michael@0 | 743 | AudioBufferSourceNode::SendLoopParametersToStream() |
michael@0 | 744 | { |
michael@0 | 745 | // Don't compute and set the loop parameters unnecessarily |
michael@0 | 746 | if (mLoop && mBuffer) { |
michael@0 | 747 | float rate = mBuffer->SampleRate(); |
michael@0 | 748 | double length = (double(mBuffer->Length()) / mBuffer->SampleRate()); |
michael@0 | 749 | double actualLoopStart, actualLoopEnd; |
michael@0 | 750 | if (mLoopStart >= 0.0 && mLoopEnd > 0.0 && |
michael@0 | 751 | mLoopStart < mLoopEnd) { |
michael@0 | 752 | MOZ_ASSERT(mLoopStart != 0.0 || mLoopEnd != 0.0); |
michael@0 | 753 | actualLoopStart = (mLoopStart > length) ? 0.0 : mLoopStart; |
michael@0 | 754 | actualLoopEnd = std::min(mLoopEnd, length); |
michael@0 | 755 | } else { |
michael@0 | 756 | actualLoopStart = 0.0; |
michael@0 | 757 | actualLoopEnd = length; |
michael@0 | 758 | } |
michael@0 | 759 | int32_t loopStartTicks = NS_lround(actualLoopStart * rate); |
michael@0 | 760 | int32_t loopEndTicks = NS_lround(actualLoopEnd * rate); |
michael@0 | 761 | if (loopStartTicks < loopEndTicks) { |
michael@0 | 762 | SendInt32ParameterToStream(LOOPSTART, loopStartTicks); |
michael@0 | 763 | SendInt32ParameterToStream(LOOPEND, loopEndTicks); |
michael@0 | 764 | SendInt32ParameterToStream(LOOP, 1); |
michael@0 | 765 | } else { |
michael@0 | 766 | // Be explicit about looping not happening if the offsets make |
michael@0 | 767 | // looping impossible. |
michael@0 | 768 | SendInt32ParameterToStream(LOOP, 0); |
michael@0 | 769 | } |
michael@0 | 770 | } else if (!mLoop) { |
michael@0 | 771 | SendInt32ParameterToStream(LOOP, 0); |
michael@0 | 772 | } |
michael@0 | 773 | } |
michael@0 | 774 | |
michael@0 | 775 | } |
michael@0 | 776 | } |