|
1 "use strict"; |
|
2 |
|
3 const DEFAULT_AUDIO_SAMPLE_FILE = "hello.ogg"; |
|
4 const SPEECH_RECOGNITION_TEST_REQUEST_EVENT_TOPIC = "SpeechRecognitionTest:RequestEvent"; |
|
5 const SPEECH_RECOGNITION_TEST_END_TOPIC = "SpeechRecognitionTest:End"; |
|
6 |
|
7 var errorCodes = { |
|
8 NO_SPEECH : "no-speech", |
|
9 ABORTED : "aborted", |
|
10 AUDIO_CAPTURE : "audio-capture", |
|
11 NETWORK : "network", |
|
12 NOT_ALLOWED : "not-allowed", |
|
13 SERVICE_NOT_ALLOWED : "service-not-allowed", |
|
14 BAD_GRAMMAR : "bad-grammar", |
|
15 LANGUAGE_NOT_SUPPORTED : "language-not-supported" |
|
16 }; |
|
17 |
|
18 var Services = SpecialPowers.Cu.import("resource://gre/modules/Services.jsm").Services; |
|
19 |
|
20 function EventManager(sr) { |
|
21 var self = this; |
|
22 var nEventsExpected = 0; |
|
23 self.eventsReceived = []; |
|
24 |
|
25 var allEvents = [ |
|
26 "audiostart", |
|
27 "soundstart", |
|
28 "speechstart", |
|
29 "speechend", |
|
30 "soundend", |
|
31 "audioend", |
|
32 "result", |
|
33 "nomatch", |
|
34 "error", |
|
35 "start", |
|
36 "end" |
|
37 ]; |
|
38 |
|
39 var eventDependencies = { |
|
40 "speechend": "speechstart", |
|
41 "soundent": "soundstart", |
|
42 "audioend": "audiostart" |
|
43 }; |
|
44 |
|
45 var isDone = false; |
|
46 |
|
47 // AUDIO_DATA events are asynchronous, |
|
48 // so we queue events requested while they are being |
|
49 // issued to make them seem synchronous |
|
50 var isSendingAudioData = false; |
|
51 var queuedEventRequests = []; |
|
52 |
|
53 // register default handlers |
|
54 for (var i = 0; i < allEvents.length; i++) { |
|
55 (function (eventName) { |
|
56 sr["on" + eventName] = function (evt) { |
|
57 var message = "unexpected event: " + eventName; |
|
58 if (eventName == "error") { |
|
59 message += " -- " + evt.message; |
|
60 } |
|
61 |
|
62 ok(false, message); |
|
63 if (self.doneFunc && !isDone) { |
|
64 isDone = true; |
|
65 self.doneFunc(); |
|
66 } |
|
67 }; |
|
68 })(allEvents[i]); |
|
69 } |
|
70 |
|
71 self.expect = function EventManager_expect(eventName, cb) { |
|
72 nEventsExpected++; |
|
73 |
|
74 sr["on" + eventName] = function(evt) { |
|
75 self.eventsReceived.push(eventName); |
|
76 ok(true, "received event " + eventName); |
|
77 |
|
78 var dep = eventDependencies[eventName]; |
|
79 if (dep) { |
|
80 ok(self.eventsReceived.indexOf(dep) >= 0, |
|
81 eventName + " must come after " + dep); |
|
82 } |
|
83 |
|
84 cb && cb(evt, sr); |
|
85 if (self.doneFunc && !isDone && |
|
86 nEventsExpected === self.eventsReceived.length) { |
|
87 isDone = true; |
|
88 self.doneFunc(); |
|
89 } |
|
90 } |
|
91 } |
|
92 |
|
93 self.requestFSMEvent = function EventManager_requestFSMEvent(eventName) { |
|
94 if (isSendingAudioData) { |
|
95 info("Queuing event " + eventName + " until we're done sending audio data"); |
|
96 queuedEventRequests.push(eventName); |
|
97 return; |
|
98 } |
|
99 |
|
100 var subject = null; |
|
101 |
|
102 if (eventName === "EVENT_AUDIO_DATA") { |
|
103 isSendingAudioData = true; |
|
104 var audioTag = document.createElement("audio"); |
|
105 audioTag.src = self.audioSampleFile; |
|
106 |
|
107 subject = audioTag.mozCaptureStreamUntilEnded(); |
|
108 audioTag.addEventListener("ended", function() { |
|
109 info("Sample stream ended, requesting queued events"); |
|
110 isSendingAudioData = false; |
|
111 while (queuedEventRequests.length) { |
|
112 self.requestFSMEvent(queuedEventRequests.shift()); |
|
113 } |
|
114 }); |
|
115 |
|
116 audioTag.play(); |
|
117 } |
|
118 |
|
119 info("requesting " + eventName); |
|
120 Services.obs.notifyObservers(subject, |
|
121 SPEECH_RECOGNITION_TEST_REQUEST_EVENT_TOPIC, |
|
122 eventName); |
|
123 } |
|
124 |
|
125 self.requestTestEnd = function EventManager_requestTestEnd() { |
|
126 Services.obs.notifyObservers(null, SPEECH_RECOGNITION_TEST_END_TOPIC, null); |
|
127 } |
|
128 } |
|
129 |
|
130 function buildResultCallback(transcript) { |
|
131 return (function(evt) { |
|
132 is(evt.results[0][0].transcript, transcript, "expect correct transcript"); |
|
133 }); |
|
134 } |
|
135 |
|
136 function buildErrorCallback(errcode) { |
|
137 return (function(err) { |
|
138 is(err.error, errcode, "expect correct error code"); |
|
139 }); |
|
140 } |
|
141 |
|
142 function performTest(options) { |
|
143 var prefs = options.prefs; |
|
144 |
|
145 prefs.unshift( |
|
146 ["media.webspeech.recognition.enable", true], |
|
147 ["media.webspeech.test.enable", true] |
|
148 ); |
|
149 |
|
150 SpecialPowers.pushPrefEnv({set: prefs}, function() { |
|
151 var sr = new SpeechRecognition(); |
|
152 var em = new EventManager(sr); |
|
153 |
|
154 for (var eventName in options.expectedEvents) { |
|
155 var cb = options.expectedEvents[eventName]; |
|
156 em.expect(eventName, cb); |
|
157 } |
|
158 |
|
159 em.doneFunc = function() { |
|
160 em.requestTestEnd(); |
|
161 if (options.doneFunc) { |
|
162 options.doneFunc(); |
|
163 } |
|
164 } |
|
165 |
|
166 em.audioSampleFile = DEFAULT_AUDIO_SAMPLE_FILE; |
|
167 if (options.audioSampleFile) { |
|
168 em.audioSampleFile = options.audioSampleFile; |
|
169 } |
|
170 |
|
171 for (var i = 0; i < options.eventsToRequest.length; i++) { |
|
172 em.requestFSMEvent(options.eventsToRequest[i]); |
|
173 } |
|
174 }); |
|
175 } |