|
1 <!DOCTYPE HTML> |
|
2 <html> |
|
3 <head> |
|
4 <title>Test the decodeAudioData API and Resampling</title> |
|
5 <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> |
|
6 <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> |
|
7 </head> |
|
8 <body> |
|
9 <pre id="test"> |
|
10 <script src="webaudio.js" type="text/javascript"></script> |
|
11 <script type="text/javascript"> |
|
12 |
|
13 // These routines have been copied verbatim from WebKit, and are used in order |
|
14 // to convert a memory buffer into a wave buffer. |
|
15 function writeString(s, a, offset) { |
|
16 for (var i = 0; i < s.length; ++i) { |
|
17 a[offset + i] = s.charCodeAt(i); |
|
18 } |
|
19 } |
|
20 |
|
21 function writeInt16(n, a, offset) { |
|
22 n = Math.floor(n); |
|
23 |
|
24 var b1 = n & 255; |
|
25 var b2 = (n >> 8) & 255; |
|
26 |
|
27 a[offset + 0] = b1; |
|
28 a[offset + 1] = b2; |
|
29 } |
|
30 |
|
31 function writeInt32(n, a, offset) { |
|
32 n = Math.floor(n); |
|
33 var b1 = n & 255; |
|
34 var b2 = (n >> 8) & 255; |
|
35 var b3 = (n >> 16) & 255; |
|
36 var b4 = (n >> 24) & 255; |
|
37 |
|
38 a[offset + 0] = b1; |
|
39 a[offset + 1] = b2; |
|
40 a[offset + 2] = b3; |
|
41 a[offset + 3] = b4; |
|
42 } |
|
43 |
|
44 function writeAudioBuffer(audioBuffer, a, offset) { |
|
45 var n = audioBuffer.length; |
|
46 var channels = audioBuffer.numberOfChannels; |
|
47 |
|
48 for (var i = 0; i < n; ++i) { |
|
49 for (var k = 0; k < channels; ++k) { |
|
50 var buffer = audioBuffer.getChannelData(k); |
|
51 var sample = buffer[i] * 32768.0; |
|
52 |
|
53 // Clip samples to the limitations of 16-bit. |
|
54 // If we don't do this then we'll get nasty wrap-around distortion. |
|
55 if (sample < -32768) |
|
56 sample = -32768; |
|
57 if (sample > 32767) |
|
58 sample = 32767; |
|
59 |
|
60 writeInt16(sample, a, offset); |
|
61 offset += 2; |
|
62 } |
|
63 } |
|
64 } |
|
65 |
|
66 function createWaveFileData(audioBuffer) { |
|
67 var frameLength = audioBuffer.length; |
|
68 var numberOfChannels = audioBuffer.numberOfChannels; |
|
69 var sampleRate = audioBuffer.sampleRate; |
|
70 var bitsPerSample = 16; |
|
71 var byteRate = sampleRate * numberOfChannels * bitsPerSample/8; |
|
72 var blockAlign = numberOfChannels * bitsPerSample/8; |
|
73 var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio |
|
74 var headerByteLength = 44; |
|
75 var totalLength = headerByteLength + wavDataByteLength; |
|
76 |
|
77 var waveFileData = new Uint8Array(totalLength); |
|
78 |
|
79 var subChunk1Size = 16; // for linear PCM |
|
80 var subChunk2Size = wavDataByteLength; |
|
81 var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size); |
|
82 |
|
83 writeString("RIFF", waveFileData, 0); |
|
84 writeInt32(chunkSize, waveFileData, 4); |
|
85 writeString("WAVE", waveFileData, 8); |
|
86 writeString("fmt ", waveFileData, 12); |
|
87 |
|
88 writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4) |
|
89 writeInt16(1, waveFileData, 20); // AudioFormat (2) |
|
90 writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2) |
|
91 writeInt32(sampleRate, waveFileData, 24); // SampleRate (4) |
|
92 writeInt32(byteRate, waveFileData, 28); // ByteRate (4) |
|
93 writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2) |
|
94 writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4) |
|
95 |
|
96 writeString("data", waveFileData, 36); |
|
97 writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4) |
|
98 |
|
99 // Write actual audio data starting at offset 44. |
|
100 writeAudioBuffer(audioBuffer, waveFileData, 44); |
|
101 |
|
102 return waveFileData; |
|
103 } |
|
104 |
|
105 </script> |
|
106 <script class="testbody" type="text/javascript"> |
|
107 |
|
108 SimpleTest.waitForExplicitFinish(); |
|
109 |
|
110 // fuzzTolerance and fuzzToleranceMobile are used to determine fuzziness |
|
111 // thresholds. They're needed to make sure that we can deal with neglibible |
|
112 // differences in the binary buffer caused as a result of resampling the |
|
113 // audio. fuzzToleranceMobile is typically larger on mobile platforms since |
|
114 // we do fixed-point resampling as opposed to floating-point resampling on |
|
115 // those platforms. |
|
116 var files = [ |
|
117 // An ogg file, 44.1khz, mono |
|
118 { |
|
119 url: "ting-44.1k-1ch.ogg", |
|
120 valid: true, |
|
121 expectedUrl: "ting-44.1k-1ch.wav", |
|
122 numberOfChannels: 1, |
|
123 frames: 30592, |
|
124 sampleRate: 44100, |
|
125 duration: 0.693, |
|
126 fuzzTolerance: 5, |
|
127 fuzzToleranceMobile: 1284 |
|
128 }, |
|
129 // An ogg file, 44.1khz, stereo |
|
130 { |
|
131 url: "ting-44.1k-2ch.ogg", |
|
132 valid: true, |
|
133 expectedUrl: "ting-44.1k-2ch.wav", |
|
134 numberOfChannels: 2, |
|
135 frames: 30592, |
|
136 sampleRate: 44100, |
|
137 duration: 0.693, |
|
138 fuzzTolerance: 6, |
|
139 fuzzToleranceMobile: 2544 |
|
140 }, |
|
141 // An ogg file, 48khz, mono |
|
142 { |
|
143 url: "ting-48k-1ch.ogg", |
|
144 valid: true, |
|
145 expectedUrl: "ting-48k-1ch.wav", |
|
146 numberOfChannels: 1, |
|
147 frames: 33297, |
|
148 sampleRate: 48000, |
|
149 duration: 0.693, |
|
150 fuzzTolerance: 5, |
|
151 fuzzToleranceMobile: 1388 |
|
152 }, |
|
153 // An ogg file, 48khz, stereo |
|
154 { |
|
155 url: "ting-48k-2ch.ogg", |
|
156 valid: true, |
|
157 expectedUrl: "ting-48k-2ch.wav", |
|
158 numberOfChannels: 2, |
|
159 frames: 33297, |
|
160 sampleRate: 48000, |
|
161 duration: 0.693, |
|
162 fuzzTolerance: 14, |
|
163 fuzzToleranceMobile: 2752 |
|
164 }, |
|
165 // Make sure decoding a wave file results in the same buffer (for both the |
|
166 // resampling and non-resampling cases) |
|
167 { |
|
168 url: "ting-44.1k-1ch.wav", |
|
169 valid: true, |
|
170 expectedUrl: "ting-44.1k-1ch.wav", |
|
171 numberOfChannels: 1, |
|
172 frames: 30592, |
|
173 sampleRate: 44100, |
|
174 duration: 0.693, |
|
175 fuzzTolerance: 0, |
|
176 fuzzToleranceMobile: 0 |
|
177 }, |
|
178 { |
|
179 url: "ting-48k-1ch.wav", |
|
180 valid: true, |
|
181 expectedUrl: "ting-48k-1ch.wav", |
|
182 numberOfChannels: 1, |
|
183 frames: 33297, |
|
184 sampleRate: 48000, |
|
185 duration: 0.693, |
|
186 fuzzTolerance: 0, |
|
187 fuzzToleranceMobile: 0 |
|
188 }, |
|
189 // // A wave file |
|
190 // //{ url: "24bit-44khz.wav", valid: true, expectedUrl: "24bit-44khz-expected.wav" }, |
|
191 // A non-audio file |
|
192 { url: "invalid.txt", valid: false, sampleRate: 44100 }, |
|
193 // A webm file with no audio |
|
194 { url: "noaudio.webm", valid: false, sampleRate: 48000 }, |
|
195 // A video ogg file with audio |
|
196 { |
|
197 url: "audio.ogv", |
|
198 valid: true, |
|
199 expectedUrl: "audio-expected.wav", |
|
200 numberOfChannels: 2, |
|
201 sampleRate: 44100, |
|
202 frames: 47680, |
|
203 duration: 1.0807, |
|
204 fuzzTolerance: 106, |
|
205 fuzzToleranceMobile: 3482 |
|
206 } |
|
207 ]; |
|
208 |
|
209 // Returns true if the memory buffers are less different that |fuzz| bytes |
|
210 function fuzzyMemcmp(buf1, buf2, fuzz) { |
|
211 var result = true; |
|
212 var difference = 0; |
|
213 is(buf1.length, buf2.length, "same length"); |
|
214 for (var i = 0; i < buf1.length; ++i) { |
|
215 if (Math.abs(buf1[i] - buf2[i])) { |
|
216 ++difference; |
|
217 } |
|
218 } |
|
219 if (difference > fuzz) { |
|
220 ok(false, "Expected at most " + fuzz + " bytes difference, found " + difference + " bytes"); |
|
221 } |
|
222 return difference <= fuzz; |
|
223 } |
|
224 |
|
225 function getFuzzTolerance(test) { |
|
226 var kIsMobile = |
|
227 navigator.userAgent.indexOf("Mobile") != -1 || // b2g |
|
228 navigator.userAgent.indexOf("Android") != -1; // android |
|
229 return kIsMobile ? test.fuzzToleranceMobile : test.fuzzTolerance; |
|
230 } |
|
231 |
|
232 function bufferIsSilent(buffer) { |
|
233 for (var i = 0; i < buffer.length; ++i) { |
|
234 if (buffer.getChannelData(0)[i] != 0) { |
|
235 return false; |
|
236 } |
|
237 } |
|
238 return true; |
|
239 } |
|
240 |
|
241 function checkAudioBuffer(buffer, test) { |
|
242 if (buffer.numberOfChannels != test.numberOfChannels) { |
|
243 is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels"); |
|
244 return; |
|
245 } |
|
246 ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration"); |
|
247 if (Math.abs(buffer.duration - test.duration) >= 1e-3) { |
|
248 ok(false, "got: " + buffer.duration + ", expected: " + test.duration); |
|
249 } |
|
250 is(buffer.sampleRate, test.sampleRate, "Correct sample rate"); |
|
251 is(buffer.length, test.frames, "Correct length"); |
|
252 |
|
253 var wave = createWaveFileData(buffer); |
|
254 ok(fuzzyMemcmp(wave, test.expectedWaveData, getFuzzTolerance(test)), "Received expected decoded data"); |
|
255 } |
|
256 |
|
257 function checkResampledBuffer(buffer, test, callback) { |
|
258 if (buffer.numberOfChannels != test.numberOfChannels) { |
|
259 is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels"); |
|
260 return; |
|
261 } |
|
262 ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration"); |
|
263 if (Math.abs(buffer.duration - test.duration) >= 1e-3) { |
|
264 ok(false, "got: " + buffer.duration + ", expected: " + test.duration); |
|
265 } |
|
266 // Take into account the resampling when checking the size |
|
267 var expectedLength = test.frames * buffer.sampleRate / test.sampleRate; |
|
268 ok(Math.abs(buffer.length - expectedLength) < 1.0, "Correct length", "got " + buffer.length + ", expected about " + expectedLength); |
|
269 |
|
270 // Playback the buffer in the original context, to resample back to the |
|
271 // original rate and compare with the decoded buffer without resampling. |
|
272 cx = test.nativeContext; |
|
273 var expected = cx.createBufferSource(); |
|
274 expected.buffer = test.expectedBuffer; |
|
275 expected.start(); |
|
276 var inverse = cx.createGain(); |
|
277 inverse.gain.value = -1; |
|
278 expected.connect(inverse); |
|
279 inverse.connect(cx.destination); |
|
280 var resampled = cx.createBufferSource(); |
|
281 resampled.buffer = buffer; |
|
282 resampled.start(); |
|
283 // This stop should do nothing, but it tests for bug 937475 |
|
284 resampled.stop(test.frames / cx.sampleRate); |
|
285 resampled.connect(cx.destination); |
|
286 cx.oncomplete = function(e) { |
|
287 ok(!bufferIsSilent(e.renderedBuffer), "Expect buffer not silent"); |
|
288 // Resampling will lose the highest frequency components, so we should |
|
289 // pass the difference through a low pass filter. However, either the |
|
290 // input files don't have significant high frequency components or the |
|
291 // tolerance in compareBuffers() is too high to detect them. |
|
292 compareBuffers(e.renderedBuffer, |
|
293 cx.createBuffer(test.numberOfChannels, |
|
294 test.frames, test.sampleRate)); |
|
295 callback(); |
|
296 } |
|
297 cx.startRendering(); |
|
298 } |
|
299 |
|
300 function runResampling(test, response, callback) { |
|
301 var sampleRate = test.sampleRate == 44100 ? 48000 : 44100; |
|
302 var cx = new OfflineAudioContext(1, 1, sampleRate); |
|
303 cx.decodeAudioData(response, function onSuccess(asyncResult) { |
|
304 is(asyncResult.sampleRate, sampleRate, "Correct sample rate"); |
|
305 |
|
306 checkResampledBuffer(asyncResult, test, callback); |
|
307 }, function onFailure() { |
|
308 ok(false, "Expected successful decode with resample"); |
|
309 callback(); |
|
310 }); |
|
311 } |
|
312 |
|
313 function runTest(test, response, callback) { |
|
314 // We need to copy the array here, because decodeAudioData is going to neuter |
|
315 // the array. |
|
316 var compressedAudio = response.slice(0); |
|
317 var expectCallback = false; |
|
318 var cx = new OfflineAudioContext(test.numberOfChannels || 1, |
|
319 test.frames || 1, test.sampleRate); |
|
320 cx.decodeAudioData(response, function onSuccess(asyncResult) { |
|
321 ok(expectCallback, "Success callback should fire asynchronously"); |
|
322 ok(test.valid, "Did expect success for test " + test.url); |
|
323 |
|
324 checkAudioBuffer(asyncResult, test); |
|
325 |
|
326 test.expectedBuffer = asyncResult; |
|
327 test.nativeContext = cx; |
|
328 runResampling(test, compressedAudio, callback); |
|
329 }, function onFailure() { |
|
330 ok(expectCallback, "Failure callback should fire asynchronously"); |
|
331 ok(!test.valid, "Did expect failure for test " + test.url); |
|
332 callback(); |
|
333 }); |
|
334 expectCallback = true; |
|
335 } |
|
336 |
|
337 function loadTest(test, callback) { |
|
338 var xhr = new XMLHttpRequest(); |
|
339 xhr.open("GET", test.url, true); |
|
340 xhr.responseType = "arraybuffer"; |
|
341 xhr.onload = function() { |
|
342 var getExpected = new XMLHttpRequest(); |
|
343 getExpected.open("GET", test.expectedUrl, true); |
|
344 getExpected.responseType = "arraybuffer"; |
|
345 getExpected.onload = function() { |
|
346 test.expectedWaveData = new Uint8Array(getExpected.response); |
|
347 runTest(test, xhr.response, callback); |
|
348 }; |
|
349 getExpected.send(); |
|
350 }; |
|
351 xhr.send(); |
|
352 } |
|
353 |
|
354 function loadNextTest() { |
|
355 if (files.length) { |
|
356 loadTest(files.shift(), loadNextTest); |
|
357 } else { |
|
358 SimpleTest.finish(); |
|
359 } |
|
360 } |
|
361 |
|
362 // Run some simple tests first |
|
363 function callbackShouldNeverRun() { |
|
364 ok(false, "callback should not fire"); |
|
365 } |
|
366 (function() { |
|
367 var cx = new AudioContext(); |
|
368 expectTypeError(function() { |
|
369 cx.decodeAudioData(null, callbackShouldNeverRun, callbackShouldNeverRun); |
|
370 }); |
|
371 expectTypeError(function() { |
|
372 cx.decodeAudioData(undefined, callbackShouldNeverRun, callbackShouldNeverRun); |
|
373 }); |
|
374 expectTypeError(function() { |
|
375 cx.decodeAudioData(123, callbackShouldNeverRun, callbackShouldNeverRun); |
|
376 }); |
|
377 expectTypeError(function() { |
|
378 cx.decodeAudioData("buffer", callbackShouldNeverRun, callbackShouldNeverRun); |
|
379 }); |
|
380 expectTypeError(function() { |
|
381 cx.decodeAudioData(new Uint8Array(100), callbackShouldNeverRun, callbackShouldNeverRun); |
|
382 }); |
|
383 })(); |
|
384 |
|
385 // Now, let's get real! |
|
386 loadNextTest(); |
|
387 |
|
388 </script> |
|
389 </pre> |
|
390 </body> |
|
391 </html> |