Fri, 16 Jan 2015 04:50:19 +0100
Replace accessor implementation with direct member state manipulation, by
request https://trac.torproject.org/projects/tor/ticket/9701#comment:32
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
michael@0 | 4 | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #ifndef MOZILLA_AUDIOSEGMENT_H_ |
michael@0 | 7 | #define MOZILLA_AUDIOSEGMENT_H_ |
michael@0 | 8 | |
michael@0 | 9 | #include "MediaSegment.h" |
michael@0 | 10 | #include "AudioSampleFormat.h" |
michael@0 | 11 | #include "SharedBuffer.h" |
michael@0 | 12 | #include "WebAudioUtils.h" |
michael@0 | 13 | #ifdef MOZILLA_INTERNAL_API |
michael@0 | 14 | #include "mozilla/TimeStamp.h" |
michael@0 | 15 | #endif |
michael@0 | 16 | |
michael@0 | 17 | namespace mozilla { |
michael@0 | 18 | |
michael@0 | 19 | template<typename T> |
michael@0 | 20 | class SharedChannelArrayBuffer : public ThreadSharedObject { |
michael@0 | 21 | public: |
michael@0 | 22 | SharedChannelArrayBuffer(nsTArray<nsTArray<T> >* aBuffers) |
michael@0 | 23 | { |
michael@0 | 24 | mBuffers.SwapElements(*aBuffers); |
michael@0 | 25 | } |
michael@0 | 26 | |
michael@0 | 27 | virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
michael@0 | 28 | { |
michael@0 | 29 | size_t amount = 0; |
michael@0 | 30 | amount += mBuffers.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 31 | for (size_t i = 0; i < mBuffers.Length(); i++) { |
michael@0 | 32 | amount += mBuffers[i].SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 33 | } |
michael@0 | 34 | |
michael@0 | 35 | return amount; |
michael@0 | 36 | } |
michael@0 | 37 | |
michael@0 | 38 | virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
michael@0 | 39 | { |
michael@0 | 40 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 41 | } |
michael@0 | 42 | |
michael@0 | 43 | nsTArray<nsTArray<T> > mBuffers; |
michael@0 | 44 | }; |
michael@0 | 45 | |
michael@0 | 46 | class AudioStream; |
michael@0 | 47 | class AudioMixer; |
michael@0 | 48 | |
michael@0 | 49 | /** |
michael@0 | 50 | * For auto-arrays etc, guess this as the common number of channels. |
michael@0 | 51 | */ |
michael@0 | 52 | const int GUESS_AUDIO_CHANNELS = 2; |
michael@0 | 53 | |
michael@0 | 54 | // We ensure that the graph advances in steps that are multiples of the Web |
michael@0 | 55 | // Audio block size |
michael@0 | 56 | const uint32_t WEBAUDIO_BLOCK_SIZE_BITS = 7; |
michael@0 | 57 | const uint32_t WEBAUDIO_BLOCK_SIZE = 1 << WEBAUDIO_BLOCK_SIZE_BITS; |
michael@0 | 58 | |
michael@0 | 59 | void InterleaveAndConvertBuffer(const void** aSourceChannels, |
michael@0 | 60 | AudioSampleFormat aSourceFormat, |
michael@0 | 61 | int32_t aLength, float aVolume, |
michael@0 | 62 | int32_t aChannels, |
michael@0 | 63 | AudioDataValue* aOutput); |
michael@0 | 64 | |
michael@0 | 65 | /** |
michael@0 | 66 | * Given an array of input channels (aChannelData), downmix to aOutputChannels, |
michael@0 | 67 | * interleave the channel data. A total of aOutputChannels*aDuration |
michael@0 | 68 | * interleaved samples will be copied to a channel buffer in aOutput. |
michael@0 | 69 | */ |
michael@0 | 70 | void DownmixAndInterleave(const nsTArray<const void*>& aChannelData, |
michael@0 | 71 | AudioSampleFormat aSourceFormat, int32_t aDuration, |
michael@0 | 72 | float aVolume, uint32_t aOutputChannels, |
michael@0 | 73 | AudioDataValue* aOutput); |
michael@0 | 74 | |
michael@0 | 75 | /** |
michael@0 | 76 | * An AudioChunk represents a multi-channel buffer of audio samples. |
michael@0 | 77 | * It references an underlying ThreadSharedObject which manages the lifetime |
michael@0 | 78 | * of the buffer. An AudioChunk maintains its own duration and channel data |
michael@0 | 79 | * pointers so it can represent a subinterval of a buffer without copying. |
michael@0 | 80 | * An AudioChunk can store its individual channels anywhere; it maintains |
michael@0 | 81 | * separate pointers to each channel's buffer. |
michael@0 | 82 | */ |
michael@0 | 83 | struct AudioChunk { |
michael@0 | 84 | typedef mozilla::AudioSampleFormat SampleFormat; |
michael@0 | 85 | |
michael@0 | 86 | // Generic methods |
michael@0 | 87 | void SliceTo(TrackTicks aStart, TrackTicks aEnd) |
michael@0 | 88 | { |
michael@0 | 89 | NS_ASSERTION(aStart >= 0 && aStart < aEnd && aEnd <= mDuration, |
michael@0 | 90 | "Slice out of bounds"); |
michael@0 | 91 | if (mBuffer) { |
michael@0 | 92 | MOZ_ASSERT(aStart < INT32_MAX, "Can't slice beyond 32-bit sample lengths"); |
michael@0 | 93 | for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) { |
michael@0 | 94 | mChannelData[channel] = AddAudioSampleOffset(mChannelData[channel], |
michael@0 | 95 | mBufferFormat, int32_t(aStart)); |
michael@0 | 96 | } |
michael@0 | 97 | } |
michael@0 | 98 | mDuration = aEnd - aStart; |
michael@0 | 99 | } |
michael@0 | 100 | TrackTicks GetDuration() const { return mDuration; } |
michael@0 | 101 | bool CanCombineWithFollowing(const AudioChunk& aOther) const |
michael@0 | 102 | { |
michael@0 | 103 | if (aOther.mBuffer != mBuffer) { |
michael@0 | 104 | return false; |
michael@0 | 105 | } |
michael@0 | 106 | if (mBuffer) { |
michael@0 | 107 | NS_ASSERTION(aOther.mBufferFormat == mBufferFormat, |
michael@0 | 108 | "Wrong metadata about buffer"); |
michael@0 | 109 | NS_ASSERTION(aOther.mChannelData.Length() == mChannelData.Length(), |
michael@0 | 110 | "Mismatched channel count"); |
michael@0 | 111 | if (mDuration > INT32_MAX) { |
michael@0 | 112 | return false; |
michael@0 | 113 | } |
michael@0 | 114 | for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) { |
michael@0 | 115 | if (aOther.mChannelData[channel] != AddAudioSampleOffset(mChannelData[channel], |
michael@0 | 116 | mBufferFormat, int32_t(mDuration))) { |
michael@0 | 117 | return false; |
michael@0 | 118 | } |
michael@0 | 119 | } |
michael@0 | 120 | } |
michael@0 | 121 | return true; |
michael@0 | 122 | } |
michael@0 | 123 | bool IsNull() const { return mBuffer == nullptr; } |
michael@0 | 124 | void SetNull(TrackTicks aDuration) |
michael@0 | 125 | { |
michael@0 | 126 | mBuffer = nullptr; |
michael@0 | 127 | mChannelData.Clear(); |
michael@0 | 128 | mDuration = aDuration; |
michael@0 | 129 | mVolume = 1.0f; |
michael@0 | 130 | mBufferFormat = AUDIO_FORMAT_SILENCE; |
michael@0 | 131 | } |
michael@0 | 132 | int ChannelCount() const { return mChannelData.Length(); } |
michael@0 | 133 | |
michael@0 | 134 | size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const |
michael@0 | 135 | { |
michael@0 | 136 | return SizeOfExcludingThis(aMallocSizeOf, true); |
michael@0 | 137 | } |
michael@0 | 138 | |
michael@0 | 139 | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf, bool aUnshared) const |
michael@0 | 140 | { |
michael@0 | 141 | size_t amount = 0; |
michael@0 | 142 | |
michael@0 | 143 | // Possibly owned: |
michael@0 | 144 | // - mBuffer - Can hold data that is also in the decoded audio queue. If it |
michael@0 | 145 | // is not shared, or unshared == false it gets counted. |
michael@0 | 146 | if (mBuffer && (!aUnshared || !mBuffer->IsShared())) { |
michael@0 | 147 | amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); |
michael@0 | 148 | } |
michael@0 | 149 | |
michael@0 | 150 | // Memory in the array is owned by mBuffer. |
michael@0 | 151 | amount += mChannelData.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 152 | return amount; |
michael@0 | 153 | } |
michael@0 | 154 | |
michael@0 | 155 | TrackTicks mDuration; // in frames within the buffer |
michael@0 | 156 | nsRefPtr<ThreadSharedObject> mBuffer; // the buffer object whose lifetime is managed; null means data is all zeroes |
michael@0 | 157 | nsTArray<const void*> mChannelData; // one pointer per channel; empty if and only if mBuffer is null |
michael@0 | 158 | float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull) |
michael@0 | 159 | SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull) |
michael@0 | 160 | #ifdef MOZILLA_INTERNAL_API |
michael@0 | 161 | mozilla::TimeStamp mTimeStamp; // time at which this has been fetched from the MediaEngine |
michael@0 | 162 | #endif |
michael@0 | 163 | }; |
michael@0 | 164 | |
michael@0 | 165 | |
michael@0 | 166 | /** |
michael@0 | 167 | * A list of audio samples consisting of a sequence of slices of SharedBuffers. |
michael@0 | 168 | * The audio rate is determined by the track, not stored in this class. |
michael@0 | 169 | */ |
michael@0 | 170 | class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> { |
michael@0 | 171 | public: |
michael@0 | 172 | typedef mozilla::AudioSampleFormat SampleFormat; |
michael@0 | 173 | |
michael@0 | 174 | AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO) {} |
michael@0 | 175 | |
michael@0 | 176 | // Resample the whole segment in place. |
michael@0 | 177 | template<typename T> |
michael@0 | 178 | void Resample(SpeexResamplerState* aResampler, uint32_t aInRate, uint32_t aOutRate) |
michael@0 | 179 | { |
michael@0 | 180 | mDuration = 0; |
michael@0 | 181 | |
michael@0 | 182 | for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { |
michael@0 | 183 | nsAutoTArray<nsTArray<T>, GUESS_AUDIO_CHANNELS> output; |
michael@0 | 184 | nsAutoTArray<const T*, GUESS_AUDIO_CHANNELS> bufferPtrs; |
michael@0 | 185 | AudioChunk& c = *ci; |
michael@0 | 186 | // If this chunk is null, don't bother resampling, just alter its duration |
michael@0 | 187 | if (c.IsNull()) { |
michael@0 | 188 | c.mDuration = (c.mDuration * aOutRate) / aInRate; |
michael@0 | 189 | mDuration += c.mDuration; |
michael@0 | 190 | continue; |
michael@0 | 191 | } |
michael@0 | 192 | uint32_t channels = c.mChannelData.Length(); |
michael@0 | 193 | output.SetLength(channels); |
michael@0 | 194 | bufferPtrs.SetLength(channels); |
michael@0 | 195 | uint32_t inFrames = c.mDuration; |
michael@0 | 196 | // Round up to allocate; the last frame may not be used. |
michael@0 | 197 | NS_ASSERTION((UINT32_MAX - aInRate + 1) / c.mDuration >= aOutRate, |
michael@0 | 198 | "Dropping samples"); |
michael@0 | 199 | uint32_t outSize = (c.mDuration * aOutRate + aInRate - 1) / aInRate; |
michael@0 | 200 | for (uint32_t i = 0; i < channels; i++) { |
michael@0 | 201 | const T* in = static_cast<const T*>(c.mChannelData[i]); |
michael@0 | 202 | T* out = output[i].AppendElements(outSize); |
michael@0 | 203 | uint32_t outFrames = outSize; |
michael@0 | 204 | |
michael@0 | 205 | dom::WebAudioUtils::SpeexResamplerProcess(aResampler, i, |
michael@0 | 206 | in, &inFrames, |
michael@0 | 207 | out, &outFrames); |
michael@0 | 208 | MOZ_ASSERT(inFrames == c.mDuration); |
michael@0 | 209 | bufferPtrs[i] = out; |
michael@0 | 210 | output[i].SetLength(outFrames); |
michael@0 | 211 | } |
michael@0 | 212 | MOZ_ASSERT(channels > 0); |
michael@0 | 213 | c.mDuration = output[0].Length(); |
michael@0 | 214 | c.mBuffer = new mozilla::SharedChannelArrayBuffer<T>(&output); |
michael@0 | 215 | for (uint32_t i = 0; i < channels; i++) { |
michael@0 | 216 | c.mChannelData[i] = bufferPtrs[i]; |
michael@0 | 217 | } |
michael@0 | 218 | mDuration += c.mDuration; |
michael@0 | 219 | } |
michael@0 | 220 | } |
michael@0 | 221 | |
michael@0 | 222 | void ResampleChunks(SpeexResamplerState* aResampler); |
michael@0 | 223 | |
michael@0 | 224 | void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer, |
michael@0 | 225 | const nsTArray<const float*>& aChannelData, |
michael@0 | 226 | int32_t aDuration) |
michael@0 | 227 | { |
michael@0 | 228 | AudioChunk* chunk = AppendChunk(aDuration); |
michael@0 | 229 | chunk->mBuffer = aBuffer; |
michael@0 | 230 | for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) { |
michael@0 | 231 | chunk->mChannelData.AppendElement(aChannelData[channel]); |
michael@0 | 232 | } |
michael@0 | 233 | chunk->mVolume = 1.0f; |
michael@0 | 234 | chunk->mBufferFormat = AUDIO_FORMAT_FLOAT32; |
michael@0 | 235 | #ifdef MOZILLA_INTERNAL_API |
michael@0 | 236 | chunk->mTimeStamp = TimeStamp::Now(); |
michael@0 | 237 | #endif |
michael@0 | 238 | } |
michael@0 | 239 | void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer, |
michael@0 | 240 | const nsTArray<const int16_t*>& aChannelData, |
michael@0 | 241 | int32_t aDuration) |
michael@0 | 242 | { |
michael@0 | 243 | AudioChunk* chunk = AppendChunk(aDuration); |
michael@0 | 244 | chunk->mBuffer = aBuffer; |
michael@0 | 245 | for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) { |
michael@0 | 246 | chunk->mChannelData.AppendElement(aChannelData[channel]); |
michael@0 | 247 | } |
michael@0 | 248 | chunk->mVolume = 1.0f; |
michael@0 | 249 | chunk->mBufferFormat = AUDIO_FORMAT_S16; |
michael@0 | 250 | #ifdef MOZILLA_INTERNAL_API |
michael@0 | 251 | chunk->mTimeStamp = TimeStamp::Now(); |
michael@0 | 252 | #endif |
michael@0 | 253 | } |
michael@0 | 254 | // Consumes aChunk, and returns a pointer to the persistent copy of aChunk |
michael@0 | 255 | // in the segment. |
michael@0 | 256 | AudioChunk* AppendAndConsumeChunk(AudioChunk* aChunk) |
michael@0 | 257 | { |
michael@0 | 258 | AudioChunk* chunk = AppendChunk(aChunk->mDuration); |
michael@0 | 259 | chunk->mBuffer = aChunk->mBuffer.forget(); |
michael@0 | 260 | chunk->mChannelData.SwapElements(aChunk->mChannelData); |
michael@0 | 261 | chunk->mVolume = aChunk->mVolume; |
michael@0 | 262 | chunk->mBufferFormat = aChunk->mBufferFormat; |
michael@0 | 263 | #ifdef MOZILLA_INTERNAL_API |
michael@0 | 264 | chunk->mTimeStamp = TimeStamp::Now(); |
michael@0 | 265 | #endif |
michael@0 | 266 | return chunk; |
michael@0 | 267 | } |
michael@0 | 268 | void ApplyVolume(float aVolume); |
michael@0 | 269 | void WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer = nullptr); |
michael@0 | 270 | |
michael@0 | 271 | int ChannelCount() { |
michael@0 | 272 | NS_WARN_IF_FALSE(!mChunks.IsEmpty(), |
michael@0 | 273 | "Cannot query channel count on a AudioSegment with no chunks."); |
michael@0 | 274 | // Find the first chunk that has non-zero channels. A chunk that hs zero |
michael@0 | 275 | // channels is just silence and we can simply discard it. |
michael@0 | 276 | for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { |
michael@0 | 277 | if (ci->ChannelCount()) { |
michael@0 | 278 | return ci->ChannelCount(); |
michael@0 | 279 | } |
michael@0 | 280 | } |
michael@0 | 281 | return 0; |
michael@0 | 282 | } |
michael@0 | 283 | |
michael@0 | 284 | static Type StaticType() { return AUDIO; } |
michael@0 | 285 | |
michael@0 | 286 | virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE |
michael@0 | 287 | { |
michael@0 | 288 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 289 | } |
michael@0 | 290 | }; |
michael@0 | 291 | |
michael@0 | 292 | } |
michael@0 | 293 | |
michael@0 | 294 | #endif /* MOZILLA_AUDIOSEGMENT_H_ */ |