|
1 var gSpeechRegistry = SpecialPowers.Cc["@mozilla.org/synth-voice-registry;1"] |
|
2 .getService(SpecialPowers.Ci.nsISynthVoiceRegistry); |
|
3 |
|
4 var gAddedVoices = []; |
|
5 |
|
6 function SpeechTaskCallback(onpause, onresume, oncancel) { |
|
7 this.onpause = onpause; |
|
8 this.onresume = onresume; |
|
9 this.oncancel = oncancel; |
|
10 } |
|
11 |
|
12 SpeechTaskCallback.prototype = { |
|
13 QueryInterface: function(iid) { |
|
14 return this; |
|
15 }, |
|
16 |
|
17 getInterfaces: function(c) {}, |
|
18 |
|
19 getHelperForLanguage: function() {}, |
|
20 |
|
21 onPause: function onPause() { |
|
22 if (this.onpause) |
|
23 this.onpause(); |
|
24 }, |
|
25 |
|
26 onResume: function onResume() { |
|
27 if (this.onresume) |
|
28 this.onresume(); |
|
29 }, |
|
30 |
|
31 onCancel: function onCancel() { |
|
32 if (this.oncancel) |
|
33 this.oncancel(); |
|
34 } |
|
35 }; |
|
36 |
|
37 var TestSpeechServiceWithAudio = SpecialPowers.wrapCallbackObject({ |
|
38 CHANNELS: 1, |
|
39 SAMPLE_RATE: 16000, |
|
40 |
|
41 serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_DIRECT_AUDIO, |
|
42 |
|
43 speak: function speak(aText, aUri, aRate, aPitch, aTask) { |
|
44 var task = SpecialPowers.wrap(aTask); |
|
45 |
|
46 window.setTimeout( |
|
47 function () { |
|
48 task.setup(SpecialPowers.wrapCallbackObject(new SpeechTaskCallback()), this.CHANNELS, this.SAMPLE_RATE); |
|
49 // 0.025 seconds per character. |
|
50 task.sendAudio(new Int16Array((this.SAMPLE_RATE/40)*aText.length), []); |
|
51 task.sendAudio(new Int16Array(0), []); |
|
52 }.bind(this), 0); |
|
53 }, |
|
54 |
|
55 QueryInterface: function(iid) { |
|
56 return this; |
|
57 }, |
|
58 |
|
59 getInterfaces: function(c) {}, |
|
60 |
|
61 getHelperForLanguage: function() {} |
|
62 }); |
|
63 |
|
64 var TestSpeechServiceNoAudio = SpecialPowers.wrapCallbackObject({ |
|
65 serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_INDIRECT_AUDIO, |
|
66 |
|
67 speak: function speak(aText, aUri, aRate, aPitch, aTask) { |
|
68 var pair = this.expectedSpeaks.shift(); |
|
69 if (pair) { |
|
70 // XXX: These tests do not happen in OOP |
|
71 var utterance = pair[0]; |
|
72 var expected = pair[1]; |
|
73 |
|
74 is(aText, utterance.text, "Speak text matches utterance text"); |
|
75 |
|
76 var args = {uri: aUri, rate: aRate, pitch: aPitch}; |
|
77 |
|
78 for (var attr in args) { |
|
79 if (expected[attr] != undefined) |
|
80 is(args[attr], expected[attr], "expected service arg " + attr); |
|
81 } |
|
82 } |
|
83 |
|
84 var task = SpecialPowers.wrap(aTask); |
|
85 task.setup(SpecialPowers.wrapCallbackObject(new SpeechTaskCallback())); |
|
86 setTimeout(function () { |
|
87 task.dispatchStart(); |
|
88 setTimeout(function () { |
|
89 task.dispatchEnd(aText.length / 2.0, aText.length); |
|
90 }, 0); |
|
91 |
|
92 }, 0); |
|
93 }, |
|
94 |
|
95 QueryInterface: function(iid) { |
|
96 return this; |
|
97 }, |
|
98 |
|
99 getInterfaces: function(c) {}, |
|
100 |
|
101 getHelperForLanguage: function() {}, |
|
102 |
|
103 expectedSpeaks: [] |
|
104 }); |
|
105 |
|
106 function synthAddVoice(aServiceName, aName, aLang, aIsLocal) { |
|
107 if (SpecialPowers.isMainProcess()) { |
|
108 var voicesBefore = speechSynthesis.getVoices().length; |
|
109 var uri = "urn:moz-tts:mylittleservice:" + encodeURI(aName + '?' + aLang); |
|
110 gSpeechRegistry.addVoice(window[aServiceName], uri, aName, aLang, aIsLocal); |
|
111 |
|
112 gAddedVoices.push([window[aServiceName], uri]); |
|
113 var voicesAfter = speechSynthesis.getVoices().length; |
|
114 |
|
115 is(voicesBefore + 1, voicesAfter, "Voice added"); |
|
116 var voice = speechSynthesis.getVoices()[voicesAfter - 1]; |
|
117 is(voice.voiceURI, uri, "voice URI matches"); |
|
118 is(voice.name, aName, "voice name matches"); |
|
119 is(voice.lang, aLang, "voice lang matches"); |
|
120 is(voice.localService, aIsLocal, "voice localService matches"); |
|
121 |
|
122 return uri; |
|
123 } else { |
|
124 // XXX: It would be nice to check here that the child gets the voice |
|
125 // added update, but alas, it is aynchronous. |
|
126 var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"] |
|
127 .getService(SpecialPowers.Ci.nsISyncMessageSender); |
|
128 |
|
129 return mm.sendSyncMessage( |
|
130 'test:SpeechSynthesis:ipcSynthAddVoice', |
|
131 [aServiceName, aName, aLang, aIsLocal])[0]; |
|
132 } |
|
133 } |
|
134 |
|
135 function synthSetDefault(aUri, aIsDefault) { |
|
136 if (SpecialPowers.isMainProcess()) { |
|
137 gSpeechRegistry.setDefaultVoice(aUri, aIsDefault); |
|
138 var voices = speechSynthesis.getVoices(); |
|
139 for (var i in voices) { |
|
140 if (voices[i].voiceURI == aUri) |
|
141 ok(voices[i]['default'], "Voice set to default"); |
|
142 } |
|
143 } else { |
|
144 // XXX: It would be nice to check here that the child gets the voice |
|
145 // added update, but alas, it is aynchronous. |
|
146 var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"] |
|
147 .getService(SpecialPowers.Ci.nsISyncMessageSender); |
|
148 |
|
149 return mm.sendSyncMessage( |
|
150 'test:SpeechSynthesis:ipcSynthSetDefault', [aUri, aIsDefault])[0]; |
|
151 } |
|
152 } |
|
153 |
|
154 function synthCleanup() { |
|
155 if (SpecialPowers.isMainProcess()) { |
|
156 var voicesBefore = speechSynthesis.getVoices().length; |
|
157 var toRemove = gAddedVoices.length; |
|
158 var removeArgs; |
|
159 while ((removeArgs = gAddedVoices.shift())) |
|
160 gSpeechRegistry.removeVoice.apply(gSpeechRegistry.removeVoice, removeArgs); |
|
161 |
|
162 var voicesAfter = speechSynthesis.getVoices().length; |
|
163 is(voicesAfter, voicesBefore - toRemove, "Successfully removed test voices"); |
|
164 } else { |
|
165 // XXX: It would be nice to check here that the child gets the voice |
|
166 // removed update, but alas, it is aynchronous. |
|
167 var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"] |
|
168 .getService(SpecialPowers.Ci.nsISyncMessageSender); |
|
169 mm.sendSyncMessage('test:SpeechSynthesis:ipcSynthCleanup'); |
|
170 } |
|
171 } |
|
172 |
|
173 function synthTestQueue(aTestArgs, aEndFunc) { |
|
174 var utterances = []; |
|
175 for (var i in aTestArgs) { |
|
176 var uargs = aTestArgs[i][0]; |
|
177 var u = new SpeechSynthesisUtterance(uargs.text); |
|
178 |
|
179 delete uargs.text; |
|
180 |
|
181 for (var attr in uargs) |
|
182 u[attr] = uargs[attr]; |
|
183 |
|
184 function onend_handler(e) { |
|
185 is(e.target, utterances.shift(), "Target matches utterances"); |
|
186 ok(!speechSynthesis.speaking, "speechSynthesis is not speaking."); |
|
187 |
|
188 isnot(e.eventType, 'error', "Error in utterance"); |
|
189 |
|
190 if (utterances.length) { |
|
191 ok(speechSynthesis.pending, "other utterances queued"); |
|
192 } else { |
|
193 ok(!speechSynthesis.pending, "queue is empty, nothing pending."); |
|
194 if (aEndFunc) |
|
195 aEndFunc(); |
|
196 } |
|
197 } |
|
198 |
|
199 u.addEventListener('end', onend_handler); |
|
200 u.addEventListener('error', onend_handler); |
|
201 |
|
202 u.addEventListener( |
|
203 'error', function onerror_handler(e) { |
|
204 ok(false, "Error in speech utterance '" + e.target.text + "'"); |
|
205 }); |
|
206 |
|
207 utterances.push(u); |
|
208 TestSpeechServiceNoAudio.expectedSpeaks.push([u, aTestArgs[i][1]]); |
|
209 speechSynthesis.speak(u); |
|
210 } |
|
211 |
|
212 ok(!speechSynthesis.speaking, "speechSynthesis is not speaking yet."); |
|
213 ok(speechSynthesis.pending, "speechSynthesis has an utterance queued."); |
|
214 } |