content/media/webspeech/synth/test/common.js

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/webspeech/synth/test/common.js	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,214 @@
     1.4 +var gSpeechRegistry = SpecialPowers.Cc["@mozilla.org/synth-voice-registry;1"]
     1.5 +  .getService(SpecialPowers.Ci.nsISynthVoiceRegistry);
     1.6 +
     1.7 +var gAddedVoices = [];
     1.8 +
     1.9 +function SpeechTaskCallback(onpause, onresume, oncancel) {
    1.10 +  this.onpause = onpause;
    1.11 +  this.onresume = onresume;
    1.12 +  this.oncancel = oncancel;
    1.13 +}
    1.14 +
    1.15 +SpeechTaskCallback.prototype = {
    1.16 +  QueryInterface: function(iid) {
    1.17 +    return this;
    1.18 +  },
    1.19 +
    1.20 +  getInterfaces: function(c) {},
    1.21 +
    1.22 +  getHelperForLanguage: function() {},
    1.23 +
    1.24 +  onPause: function onPause() {
    1.25 +    if (this.onpause)
    1.26 +      this.onpause();
    1.27 +  },
    1.28 +
    1.29 +  onResume: function onResume() {
    1.30 +    if (this.onresume)
    1.31 +      this.onresume();
    1.32 +  },
    1.33 +
    1.34 +  onCancel: function onCancel() {
    1.35 +    if (this.oncancel)
    1.36 +      this.oncancel();
    1.37 +  }
    1.38 +};
    1.39 +
    1.40 +var TestSpeechServiceWithAudio = SpecialPowers.wrapCallbackObject({
    1.41 +  CHANNELS: 1,
    1.42 +  SAMPLE_RATE: 16000,
    1.43 +
    1.44 +  serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_DIRECT_AUDIO,
    1.45 +
    1.46 +  speak: function speak(aText, aUri, aRate, aPitch, aTask) {
    1.47 +    var task = SpecialPowers.wrap(aTask);
    1.48 +
    1.49 +    window.setTimeout(
    1.50 +      function () {
    1.51 +        task.setup(SpecialPowers.wrapCallbackObject(new SpeechTaskCallback()), this.CHANNELS, this.SAMPLE_RATE);
    1.52 +        // 0.025 seconds per character.
    1.53 +        task.sendAudio(new Int16Array((this.SAMPLE_RATE/40)*aText.length), []);
    1.54 +        task.sendAudio(new Int16Array(0), []);
    1.55 +      }.bind(this), 0);
    1.56 +  },
    1.57 +
    1.58 +  QueryInterface: function(iid) {
    1.59 +    return this;
    1.60 +  },
    1.61 +
    1.62 +  getInterfaces: function(c) {},
    1.63 +
    1.64 +  getHelperForLanguage: function() {}
    1.65 +});
    1.66 +
    1.67 +var TestSpeechServiceNoAudio = SpecialPowers.wrapCallbackObject({
    1.68 +  serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_INDIRECT_AUDIO,
    1.69 +
    1.70 +  speak: function speak(aText, aUri, aRate, aPitch, aTask) {
    1.71 +    var pair = this.expectedSpeaks.shift();
    1.72 +    if (pair) {
    1.73 +      // XXX: These tests do not happen in OOP
    1.74 +      var utterance = pair[0];
    1.75 +      var expected = pair[1];
    1.76 +
    1.77 +      is(aText, utterance.text, "Speak text matches utterance text");
    1.78 +
    1.79 +      var args = {uri: aUri, rate: aRate, pitch: aPitch};
    1.80 +
    1.81 +      for (var attr in args) {
    1.82 +        if (expected[attr] != undefined)
    1.83 +          is(args[attr], expected[attr], "expected service arg " + attr);
    1.84 +      }
    1.85 +    }
    1.86 +
    1.87 +    var task = SpecialPowers.wrap(aTask);
    1.88 +    task.setup(SpecialPowers.wrapCallbackObject(new SpeechTaskCallback()));
    1.89 +    setTimeout(function () {
    1.90 +                 task.dispatchStart();
    1.91 +                 setTimeout(function () {
    1.92 +                              task.dispatchEnd(aText.length / 2.0, aText.length);
    1.93 +                            }, 0);
    1.94 +
    1.95 +               }, 0);
    1.96 +  },
    1.97 +
    1.98 +  QueryInterface: function(iid) {
    1.99 +    return this;
   1.100 +  },
   1.101 +
   1.102 +  getInterfaces: function(c) {},
   1.103 +
   1.104 +  getHelperForLanguage: function() {},
   1.105 +
   1.106 +  expectedSpeaks: []
   1.107 +});
   1.108 +
   1.109 +function synthAddVoice(aServiceName, aName, aLang, aIsLocal) {
   1.110 +  if (SpecialPowers.isMainProcess()) {
   1.111 +    var voicesBefore = speechSynthesis.getVoices().length;
   1.112 +    var uri = "urn:moz-tts:mylittleservice:" + encodeURI(aName + '?' + aLang);
   1.113 +    gSpeechRegistry.addVoice(window[aServiceName], uri, aName, aLang, aIsLocal);
   1.114 +
   1.115 +    gAddedVoices.push([window[aServiceName], uri]);
   1.116 +    var voicesAfter = speechSynthesis.getVoices().length;
   1.117 +
   1.118 +    is(voicesBefore + 1, voicesAfter, "Voice added");
   1.119 +    var voice = speechSynthesis.getVoices()[voicesAfter - 1];
   1.120 +    is(voice.voiceURI, uri, "voice URI matches");
   1.121 +    is(voice.name, aName, "voice name matches");
   1.122 +    is(voice.lang, aLang, "voice lang matches");
   1.123 +    is(voice.localService, aIsLocal, "voice localService matches");
   1.124 +
   1.125 +    return uri;
   1.126 +  } else {
   1.127 +    // XXX: It would be nice to check here that the child gets the voice
   1.128 +    // added update, but alas, it is aynchronous.
   1.129 +    var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"]
   1.130 +      .getService(SpecialPowers.Ci.nsISyncMessageSender);
   1.131 +
   1.132 +    return mm.sendSyncMessage(
   1.133 +      'test:SpeechSynthesis:ipcSynthAddVoice',
   1.134 +      [aServiceName, aName, aLang, aIsLocal])[0];
   1.135 +  }
   1.136 +}
   1.137 +
   1.138 +function synthSetDefault(aUri, aIsDefault) {
   1.139 +  if (SpecialPowers.isMainProcess()) {
   1.140 +    gSpeechRegistry.setDefaultVoice(aUri, aIsDefault);
   1.141 +    var voices = speechSynthesis.getVoices();
   1.142 +    for (var i in voices) {
   1.143 +      if (voices[i].voiceURI == aUri)
   1.144 +        ok(voices[i]['default'], "Voice set to default");
   1.145 +    }
   1.146 +  } else {
   1.147 +    // XXX: It would be nice to check here that the child gets the voice
   1.148 +    // added update, but alas, it is aynchronous.
   1.149 +    var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"]
   1.150 +      .getService(SpecialPowers.Ci.nsISyncMessageSender);
   1.151 +
   1.152 +    return mm.sendSyncMessage(
   1.153 +      'test:SpeechSynthesis:ipcSynthSetDefault', [aUri, aIsDefault])[0];
   1.154 +  }
   1.155 +}
   1.156 +
   1.157 +function synthCleanup() {
   1.158 +  if (SpecialPowers.isMainProcess()) {
   1.159 +    var voicesBefore = speechSynthesis.getVoices().length;
   1.160 +    var toRemove = gAddedVoices.length;
   1.161 +    var removeArgs;
   1.162 +    while ((removeArgs = gAddedVoices.shift()))
   1.163 +      gSpeechRegistry.removeVoice.apply(gSpeechRegistry.removeVoice, removeArgs);
   1.164 +
   1.165 +    var voicesAfter = speechSynthesis.getVoices().length;
   1.166 +    is(voicesAfter, voicesBefore - toRemove, "Successfully removed test voices");
   1.167 +  } else {
   1.168 +    // XXX: It would be nice to check here that the child gets the voice
   1.169 +    // removed update, but alas, it is aynchronous.
   1.170 +    var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"]
   1.171 +      .getService(SpecialPowers.Ci.nsISyncMessageSender);
   1.172 +    mm.sendSyncMessage('test:SpeechSynthesis:ipcSynthCleanup');
   1.173 +  }
   1.174 +}
   1.175 +
   1.176 +function synthTestQueue(aTestArgs, aEndFunc) {
   1.177 +  var utterances = [];
   1.178 +  for (var i in aTestArgs) {
   1.179 +    var uargs = aTestArgs[i][0];
   1.180 +    var u = new SpeechSynthesisUtterance(uargs.text);
   1.181 +
   1.182 +    delete uargs.text;
   1.183 +
   1.184 +    for (var attr in uargs)
   1.185 +      u[attr] = uargs[attr];
   1.186 +
   1.187 +    function onend_handler(e) {
   1.188 +      is(e.target, utterances.shift(), "Target matches utterances");
   1.189 +      ok(!speechSynthesis.speaking, "speechSynthesis is not speaking.");
   1.190 +
   1.191 +      isnot(e.eventType, 'error', "Error in utterance");
   1.192 +
   1.193 +      if (utterances.length) {
   1.194 +        ok(speechSynthesis.pending, "other utterances queued");
   1.195 +      } else {
   1.196 +        ok(!speechSynthesis.pending, "queue is empty, nothing pending.");
   1.197 +        if (aEndFunc)
   1.198 +          aEndFunc();
   1.199 +      }
   1.200 +    }
   1.201 +
   1.202 +    u.addEventListener('end', onend_handler);
   1.203 +    u.addEventListener('error', onend_handler);
   1.204 +
   1.205 +    u.addEventListener(
   1.206 +      'error', function onerror_handler(e) {
   1.207 +        ok(false, "Error in speech utterance '" + e.target.text + "'");
   1.208 +      });
   1.209 +
   1.210 +    utterances.push(u);
   1.211 +    TestSpeechServiceNoAudio.expectedSpeaks.push([u, aTestArgs[i][1]]);
   1.212 +    speechSynthesis.speak(u);
   1.213 +  }
   1.214 +
   1.215 +  ok(!speechSynthesis.speaking, "speechSynthesis is not speaking yet.");
   1.216 +  ok(speechSynthesis.pending, "speechSynthesis has an utterance queued.");
   1.217 +}

mercurial