michael@0: "use strict"; michael@0: michael@0: const DEFAULT_AUDIO_SAMPLE_FILE = "hello.ogg"; michael@0: const SPEECH_RECOGNITION_TEST_REQUEST_EVENT_TOPIC = "SpeechRecognitionTest:RequestEvent"; michael@0: const SPEECH_RECOGNITION_TEST_END_TOPIC = "SpeechRecognitionTest:End"; michael@0: michael@0: var errorCodes = { michael@0: NO_SPEECH : "no-speech", michael@0: ABORTED : "aborted", michael@0: AUDIO_CAPTURE : "audio-capture", michael@0: NETWORK : "network", michael@0: NOT_ALLOWED : "not-allowed", michael@0: SERVICE_NOT_ALLOWED : "service-not-allowed", michael@0: BAD_GRAMMAR : "bad-grammar", michael@0: LANGUAGE_NOT_SUPPORTED : "language-not-supported" michael@0: }; michael@0: michael@0: var Services = SpecialPowers.Cu.import("resource://gre/modules/Services.jsm").Services; michael@0: michael@0: function EventManager(sr) { michael@0: var self = this; michael@0: var nEventsExpected = 0; michael@0: self.eventsReceived = []; michael@0: michael@0: var allEvents = [ michael@0: "audiostart", michael@0: "soundstart", michael@0: "speechstart", michael@0: "speechend", michael@0: "soundend", michael@0: "audioend", michael@0: "result", michael@0: "nomatch", michael@0: "error", michael@0: "start", michael@0: "end" michael@0: ]; michael@0: michael@0: var eventDependencies = { michael@0: "speechend": "speechstart", michael@0: "soundent": "soundstart", michael@0: "audioend": "audiostart" michael@0: }; michael@0: michael@0: var isDone = false; michael@0: michael@0: // AUDIO_DATA events are asynchronous, michael@0: // so we queue events requested while they are being michael@0: // issued to make them seem synchronous michael@0: var isSendingAudioData = false; michael@0: var queuedEventRequests = []; michael@0: michael@0: // register default handlers michael@0: for (var i = 0; i < allEvents.length; i++) { michael@0: (function (eventName) { michael@0: sr["on" + eventName] = function (evt) { michael@0: var message = "unexpected event: " + eventName; michael@0: if (eventName == "error") { michael@0: message += " -- " + evt.message; michael@0: } michael@0: michael@0: ok(false, message); michael@0: if (self.doneFunc && !isDone) { michael@0: isDone = true; michael@0: self.doneFunc(); michael@0: } michael@0: }; michael@0: })(allEvents[i]); michael@0: } michael@0: michael@0: self.expect = function EventManager_expect(eventName, cb) { michael@0: nEventsExpected++; michael@0: michael@0: sr["on" + eventName] = function(evt) { michael@0: self.eventsReceived.push(eventName); michael@0: ok(true, "received event " + eventName); michael@0: michael@0: var dep = eventDependencies[eventName]; michael@0: if (dep) { michael@0: ok(self.eventsReceived.indexOf(dep) >= 0, michael@0: eventName + " must come after " + dep); michael@0: } michael@0: michael@0: cb && cb(evt, sr); michael@0: if (self.doneFunc && !isDone && michael@0: nEventsExpected === self.eventsReceived.length) { michael@0: isDone = true; michael@0: self.doneFunc(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: self.requestFSMEvent = function EventManager_requestFSMEvent(eventName) { michael@0: if (isSendingAudioData) { michael@0: info("Queuing event " + eventName + " until we're done sending audio data"); michael@0: queuedEventRequests.push(eventName); michael@0: return; michael@0: } michael@0: michael@0: var subject = null; michael@0: michael@0: if (eventName === "EVENT_AUDIO_DATA") { michael@0: isSendingAudioData = true; michael@0: var audioTag = document.createElement("audio"); michael@0: audioTag.src = self.audioSampleFile; michael@0: michael@0: subject = audioTag.mozCaptureStreamUntilEnded(); michael@0: audioTag.addEventListener("ended", function() { michael@0: info("Sample stream ended, requesting queued events"); michael@0: isSendingAudioData = false; michael@0: while (queuedEventRequests.length) { michael@0: self.requestFSMEvent(queuedEventRequests.shift()); michael@0: } michael@0: }); michael@0: michael@0: audioTag.play(); michael@0: } michael@0: michael@0: info("requesting " + eventName); michael@0: Services.obs.notifyObservers(subject, michael@0: SPEECH_RECOGNITION_TEST_REQUEST_EVENT_TOPIC, michael@0: eventName); michael@0: } michael@0: michael@0: self.requestTestEnd = function EventManager_requestTestEnd() { michael@0: Services.obs.notifyObservers(null, SPEECH_RECOGNITION_TEST_END_TOPIC, null); michael@0: } michael@0: } michael@0: michael@0: function buildResultCallback(transcript) { michael@0: return (function(evt) { michael@0: is(evt.results[0][0].transcript, transcript, "expect correct transcript"); michael@0: }); michael@0: } michael@0: michael@0: function buildErrorCallback(errcode) { michael@0: return (function(err) { michael@0: is(err.error, errcode, "expect correct error code"); michael@0: }); michael@0: } michael@0: michael@0: function performTest(options) { michael@0: var prefs = options.prefs; michael@0: michael@0: prefs.unshift( michael@0: ["media.webspeech.recognition.enable", true], michael@0: ["media.webspeech.test.enable", true] michael@0: ); michael@0: michael@0: SpecialPowers.pushPrefEnv({set: prefs}, function() { michael@0: var sr = new SpeechRecognition(); michael@0: var em = new EventManager(sr); michael@0: michael@0: for (var eventName in options.expectedEvents) { michael@0: var cb = options.expectedEvents[eventName]; michael@0: em.expect(eventName, cb); michael@0: } michael@0: michael@0: em.doneFunc = function() { michael@0: em.requestTestEnd(); michael@0: if (options.doneFunc) { michael@0: options.doneFunc(); michael@0: } michael@0: } michael@0: michael@0: em.audioSampleFile = DEFAULT_AUDIO_SAMPLE_FILE; michael@0: if (options.audioSampleFile) { michael@0: em.audioSampleFile = options.audioSampleFile; michael@0: } michael@0: michael@0: for (var i = 0; i < options.eventsToRequest.length; i++) { michael@0: em.requestFSMEvent(options.eventsToRequest[i]); michael@0: } michael@0: }); michael@0: }