|
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "DelayBuffer.h" |
|
8 |
|
9 #include "mozilla/PodOperations.h" |
|
10 #include "AudioChannelFormat.h" |
|
11 #include "AudioNodeEngine.h" |
|
12 |
|
13 namespace mozilla { |
|
14 |
|
15 size_t |
|
16 DelayBuffer::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
|
17 { |
|
18 size_t amount = 0; |
|
19 amount += mChunks.SizeOfExcludingThis(aMallocSizeOf); |
|
20 for (size_t i = 0; i < mChunks.Length(); i++) { |
|
21 amount += mChunks[i].SizeOfExcludingThis(aMallocSizeOf, false); |
|
22 } |
|
23 |
|
24 amount += mUpmixChannels.SizeOfExcludingThis(aMallocSizeOf); |
|
25 return amount; |
|
26 } |
|
27 |
|
28 void |
|
29 DelayBuffer::Write(const AudioChunk& aInputChunk) |
|
30 { |
|
31 // We must have a reference to the buffer if there are channels |
|
32 MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.mChannelData.Length()); |
|
33 #ifdef DEBUG |
|
34 MOZ_ASSERT(!mHaveWrittenBlock); |
|
35 mHaveWrittenBlock = true; |
|
36 #endif |
|
37 |
|
38 if (!EnsureBuffer()) { |
|
39 return; |
|
40 } |
|
41 |
|
42 if (mCurrentChunk == mLastReadChunk) { |
|
43 mLastReadChunk = -1; // invalidate cache |
|
44 } |
|
45 mChunks[mCurrentChunk] = aInputChunk; |
|
46 } |
|
47 |
|
48 void |
|
49 DelayBuffer::Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], |
|
50 AudioChunk* aOutputChunk, |
|
51 ChannelInterpretation aChannelInterpretation) |
|
52 { |
|
53 int chunkCount = mChunks.Length(); |
|
54 if (!chunkCount) { |
|
55 aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE); |
|
56 return; |
|
57 } |
|
58 |
|
59 // Find the maximum number of contributing channels to determine the output |
|
60 // channel count that retains all signal information. Buffered blocks will |
|
61 // be upmixed if necessary. |
|
62 // |
|
63 // First find the range of "delay" offsets backwards from the current |
|
64 // position. Note that these may be negative for frames that are after the |
|
65 // current position (including i). |
|
66 double minDelay = aPerFrameDelays[0]; |
|
67 double maxDelay = minDelay; |
|
68 for (unsigned i = 1; i < WEBAUDIO_BLOCK_SIZE; ++i) { |
|
69 minDelay = std::min(minDelay, aPerFrameDelays[i] - i); |
|
70 maxDelay = std::max(maxDelay, aPerFrameDelays[i] - i); |
|
71 } |
|
72 |
|
73 // Now find the chunks touched by this range and check their channel counts. |
|
74 int oldestChunk = ChunkForDelay(int(maxDelay) + 1); |
|
75 int youngestChunk = ChunkForDelay(minDelay); |
|
76 |
|
77 uint32_t channelCount = 0; |
|
78 for (int i = oldestChunk; true; i = (i + 1) % chunkCount) { |
|
79 channelCount = GetAudioChannelsSuperset(channelCount, |
|
80 mChunks[i].ChannelCount()); |
|
81 if (i == youngestChunk) { |
|
82 break; |
|
83 } |
|
84 } |
|
85 |
|
86 if (channelCount) { |
|
87 AllocateAudioBlock(channelCount, aOutputChunk); |
|
88 ReadChannels(aPerFrameDelays, aOutputChunk, |
|
89 0, channelCount, aChannelInterpretation); |
|
90 } else { |
|
91 aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE); |
|
92 } |
|
93 |
|
94 // Remember currentDelayFrames for the next ProcessBlock call |
|
95 mCurrentDelay = aPerFrameDelays[WEBAUDIO_BLOCK_SIZE - 1]; |
|
96 } |
|
97 |
|
98 void |
|
99 DelayBuffer::ReadChannel(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], |
|
100 const AudioChunk* aOutputChunk, uint32_t aChannel, |
|
101 ChannelInterpretation aChannelInterpretation) |
|
102 { |
|
103 if (!mChunks.Length()) { |
|
104 float* outputChannel = static_cast<float*> |
|
105 (const_cast<void*>(aOutputChunk->mChannelData[aChannel])); |
|
106 PodZero(outputChannel, WEBAUDIO_BLOCK_SIZE); |
|
107 return; |
|
108 } |
|
109 |
|
110 ReadChannels(aPerFrameDelays, aOutputChunk, |
|
111 aChannel, 1, aChannelInterpretation); |
|
112 } |
|
113 |
|
114 void |
|
115 DelayBuffer::ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], |
|
116 const AudioChunk* aOutputChunk, |
|
117 uint32_t aFirstChannel, uint32_t aNumChannelsToRead, |
|
118 ChannelInterpretation aChannelInterpretation) |
|
119 { |
|
120 uint32_t totalChannelCount = aOutputChunk->mChannelData.Length(); |
|
121 uint32_t readChannelsEnd = aFirstChannel + aNumChannelsToRead; |
|
122 MOZ_ASSERT(readChannelsEnd <= totalChannelCount); |
|
123 |
|
124 if (mUpmixChannels.Length() != totalChannelCount) { |
|
125 mLastReadChunk = -1; // invalidate cache |
|
126 } |
|
127 |
|
128 float* const* outputChannels = reinterpret_cast<float* const*> |
|
129 (const_cast<void* const*>(aOutputChunk->mChannelData.Elements())); |
|
130 for (uint32_t channel = aFirstChannel; |
|
131 channel < readChannelsEnd; ++channel) { |
|
132 PodZero(outputChannels[channel], WEBAUDIO_BLOCK_SIZE); |
|
133 } |
|
134 |
|
135 for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { |
|
136 double currentDelay = aPerFrameDelays[i]; |
|
137 MOZ_ASSERT(currentDelay >= 0.0); |
|
138 MOZ_ASSERT(currentDelay <= (mChunks.Length() - 1) * WEBAUDIO_BLOCK_SIZE); |
|
139 |
|
140 // Interpolate two input frames in case the read position does not match |
|
141 // an integer index. |
|
142 // Use the larger delay, for the older frame, first, as this is more |
|
143 // likely to use the cached upmixed channel arrays. |
|
144 int floorDelay = int(currentDelay); |
|
145 double interpolationFactor = currentDelay - floorDelay; |
|
146 int positions[2]; |
|
147 positions[1] = PositionForDelay(floorDelay) + i; |
|
148 positions[0] = positions[1] - 1; |
|
149 |
|
150 for (unsigned tick = 0; tick < ArrayLength(positions); ++tick) { |
|
151 int readChunk = ChunkForPosition(positions[tick]); |
|
152 // mVolume is not set on default initialized chunks so handle null |
|
153 // chunks specially. |
|
154 if (!mChunks[readChunk].IsNull()) { |
|
155 int readOffset = OffsetForPosition(positions[tick]); |
|
156 UpdateUpmixChannels(readChunk, totalChannelCount, |
|
157 aChannelInterpretation); |
|
158 double multiplier = interpolationFactor * mChunks[readChunk].mVolume; |
|
159 for (uint32_t channel = aFirstChannel; |
|
160 channel < readChannelsEnd; ++channel) { |
|
161 outputChannels[channel][i] += multiplier * |
|
162 static_cast<const float*>(mUpmixChannels[channel])[readOffset]; |
|
163 } |
|
164 } |
|
165 |
|
166 interpolationFactor = 1.0 - interpolationFactor; |
|
167 } |
|
168 } |
|
169 } |
|
170 |
|
171 void |
|
172 DelayBuffer::Read(double aDelayTicks, AudioChunk* aOutputChunk, |
|
173 ChannelInterpretation aChannelInterpretation) |
|
174 { |
|
175 const bool firstTime = mCurrentDelay < 0.0; |
|
176 double currentDelay = firstTime ? aDelayTicks : mCurrentDelay; |
|
177 |
|
178 double computedDelay[WEBAUDIO_BLOCK_SIZE]; |
|
179 |
|
180 for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { |
|
181 // If the value has changed, smoothly approach it |
|
182 currentDelay += (aDelayTicks - currentDelay) * mSmoothingRate; |
|
183 computedDelay[i] = currentDelay; |
|
184 } |
|
185 |
|
186 Read(computedDelay, aOutputChunk, aChannelInterpretation); |
|
187 } |
|
188 |
|
189 bool |
|
190 DelayBuffer::EnsureBuffer() |
|
191 { |
|
192 if (mChunks.Length() == 0) { |
|
193 // The length of the buffer is at least one block greater than the maximum |
|
194 // delay so that writing an input block does not overwrite the block that |
|
195 // would subsequently be read at maximum delay. Also round up to the next |
|
196 // block size, so that no block of writes will need to wrap. |
|
197 const int chunkCount = (mMaxDelayTicks + 2 * WEBAUDIO_BLOCK_SIZE - 1) >> |
|
198 WEBAUDIO_BLOCK_SIZE_BITS; |
|
199 if (!mChunks.SetLength(chunkCount)) { |
|
200 return false; |
|
201 } |
|
202 |
|
203 mLastReadChunk = -1; |
|
204 } |
|
205 return true; |
|
206 } |
|
207 |
|
208 int |
|
209 DelayBuffer::PositionForDelay(int aDelay) { |
|
210 // Adding mChunks.Length() keeps integers positive for defined and |
|
211 // appropriate bitshift, remainder, and bitwise operations. |
|
212 return ((mCurrentChunk + mChunks.Length()) * WEBAUDIO_BLOCK_SIZE) - aDelay; |
|
213 } |
|
214 |
|
215 int |
|
216 DelayBuffer::ChunkForPosition(int aPosition) |
|
217 { |
|
218 MOZ_ASSERT(aPosition >= 0); |
|
219 return (aPosition >> WEBAUDIO_BLOCK_SIZE_BITS) % mChunks.Length(); |
|
220 } |
|
221 |
|
222 int |
|
223 DelayBuffer::OffsetForPosition(int aPosition) |
|
224 { |
|
225 MOZ_ASSERT(aPosition >= 0); |
|
226 return aPosition & (WEBAUDIO_BLOCK_SIZE - 1); |
|
227 } |
|
228 |
|
229 int |
|
230 DelayBuffer::ChunkForDelay(int aDelay) |
|
231 { |
|
232 return ChunkForPosition(PositionForDelay(aDelay)); |
|
233 } |
|
234 |
|
235 void |
|
236 DelayBuffer::UpdateUpmixChannels(int aNewReadChunk, uint32_t aChannelCount, |
|
237 ChannelInterpretation aChannelInterpretation) |
|
238 { |
|
239 if (aNewReadChunk == mLastReadChunk) { |
|
240 MOZ_ASSERT(mUpmixChannels.Length() == aChannelCount); |
|
241 return; |
|
242 } |
|
243 |
|
244 static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {}; |
|
245 |
|
246 NS_WARN_IF_FALSE(mHaveWrittenBlock || aNewReadChunk != mCurrentChunk, |
|
247 "Smoothing is making feedback delay too small."); |
|
248 |
|
249 mLastReadChunk = aNewReadChunk; |
|
250 // Missing assignment operator is bug 976927 |
|
251 mUpmixChannels.ReplaceElementsAt(0, mUpmixChannels.Length(), |
|
252 mChunks[aNewReadChunk].mChannelData); |
|
253 MOZ_ASSERT(mUpmixChannels.Length() <= aChannelCount); |
|
254 if (mUpmixChannels.Length() < aChannelCount) { |
|
255 if (aChannelInterpretation == ChannelInterpretation::Speakers) { |
|
256 AudioChannelsUpMix(&mUpmixChannels, aChannelCount, silenceChannel); |
|
257 MOZ_ASSERT(mUpmixChannels.Length() == aChannelCount, |
|
258 "We called GetAudioChannelsSuperset to avoid this"); |
|
259 } else { |
|
260 // Fill up the remaining channels with zeros |
|
261 for (uint32_t channel = mUpmixChannels.Length(); |
|
262 channel < aChannelCount; ++channel) { |
|
263 mUpmixChannels.AppendElement(silenceChannel); |
|
264 } |
|
265 } |
|
266 } |
|
267 } |
|
268 |
|
269 } // mozilla |