content/media/webspeech/synth/test/common.js

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     1 var gSpeechRegistry = SpecialPowers.Cc["@mozilla.org/synth-voice-registry;1"]
     2   .getService(SpecialPowers.Ci.nsISynthVoiceRegistry);
     4 var gAddedVoices = [];
     6 function SpeechTaskCallback(onpause, onresume, oncancel) {
     7   this.onpause = onpause;
     8   this.onresume = onresume;
     9   this.oncancel = oncancel;
    10 }
    12 SpeechTaskCallback.prototype = {
    13   QueryInterface: function(iid) {
    14     return this;
    15   },
    17   getInterfaces: function(c) {},
    19   getHelperForLanguage: function() {},
    21   onPause: function onPause() {
    22     if (this.onpause)
    23       this.onpause();
    24   },
    26   onResume: function onResume() {
    27     if (this.onresume)
    28       this.onresume();
    29   },
    31   onCancel: function onCancel() {
    32     if (this.oncancel)
    33       this.oncancel();
    34   }
    35 };
    37 var TestSpeechServiceWithAudio = SpecialPowers.wrapCallbackObject({
    38   CHANNELS: 1,
    39   SAMPLE_RATE: 16000,
    41   serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_DIRECT_AUDIO,
    43   speak: function speak(aText, aUri, aRate, aPitch, aTask) {
    44     var task = SpecialPowers.wrap(aTask);
    46     window.setTimeout(
    47       function () {
    48         task.setup(SpecialPowers.wrapCallbackObject(new SpeechTaskCallback()), this.CHANNELS, this.SAMPLE_RATE);
    49         // 0.025 seconds per character.
    50         task.sendAudio(new Int16Array((this.SAMPLE_RATE/40)*aText.length), []);
    51         task.sendAudio(new Int16Array(0), []);
    52       }.bind(this), 0);
    53   },
    55   QueryInterface: function(iid) {
    56     return this;
    57   },
    59   getInterfaces: function(c) {},
    61   getHelperForLanguage: function() {}
    62 });
    64 var TestSpeechServiceNoAudio = SpecialPowers.wrapCallbackObject({
    65   serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_INDIRECT_AUDIO,
    67   speak: function speak(aText, aUri, aRate, aPitch, aTask) {
    68     var pair = this.expectedSpeaks.shift();
    69     if (pair) {
    70       // XXX: These tests do not happen in OOP
    71       var utterance = pair[0];
    72       var expected = pair[1];
    74       is(aText, utterance.text, "Speak text matches utterance text");
    76       var args = {uri: aUri, rate: aRate, pitch: aPitch};
    78       for (var attr in args) {
    79         if (expected[attr] != undefined)
    80           is(args[attr], expected[attr], "expected service arg " + attr);
    81       }
    82     }
    84     var task = SpecialPowers.wrap(aTask);
    85     task.setup(SpecialPowers.wrapCallbackObject(new SpeechTaskCallback()));
    86     setTimeout(function () {
    87                  task.dispatchStart();
    88                  setTimeout(function () {
    89                               task.dispatchEnd(aText.length / 2.0, aText.length);
    90                             }, 0);
    92                }, 0);
    93   },
    95   QueryInterface: function(iid) {
    96     return this;
    97   },
    99   getInterfaces: function(c) {},
   101   getHelperForLanguage: function() {},
   103   expectedSpeaks: []
   104 });
   106 function synthAddVoice(aServiceName, aName, aLang, aIsLocal) {
   107   if (SpecialPowers.isMainProcess()) {
   108     var voicesBefore = speechSynthesis.getVoices().length;
   109     var uri = "urn:moz-tts:mylittleservice:" + encodeURI(aName + '?' + aLang);
   110     gSpeechRegistry.addVoice(window[aServiceName], uri, aName, aLang, aIsLocal);
   112     gAddedVoices.push([window[aServiceName], uri]);
   113     var voicesAfter = speechSynthesis.getVoices().length;
   115     is(voicesBefore + 1, voicesAfter, "Voice added");
   116     var voice = speechSynthesis.getVoices()[voicesAfter - 1];
   117     is(voice.voiceURI, uri, "voice URI matches");
   118     is(voice.name, aName, "voice name matches");
   119     is(voice.lang, aLang, "voice lang matches");
   120     is(voice.localService, aIsLocal, "voice localService matches");
   122     return uri;
   123   } else {
   124     // XXX: It would be nice to check here that the child gets the voice
   125     // added update, but alas, it is aynchronous.
   126     var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"]
   127       .getService(SpecialPowers.Ci.nsISyncMessageSender);
   129     return mm.sendSyncMessage(
   130       'test:SpeechSynthesis:ipcSynthAddVoice',
   131       [aServiceName, aName, aLang, aIsLocal])[0];
   132   }
   133 }
   135 function synthSetDefault(aUri, aIsDefault) {
   136   if (SpecialPowers.isMainProcess()) {
   137     gSpeechRegistry.setDefaultVoice(aUri, aIsDefault);
   138     var voices = speechSynthesis.getVoices();
   139     for (var i in voices) {
   140       if (voices[i].voiceURI == aUri)
   141         ok(voices[i]['default'], "Voice set to default");
   142     }
   143   } else {
   144     // XXX: It would be nice to check here that the child gets the voice
   145     // added update, but alas, it is aynchronous.
   146     var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"]
   147       .getService(SpecialPowers.Ci.nsISyncMessageSender);
   149     return mm.sendSyncMessage(
   150       'test:SpeechSynthesis:ipcSynthSetDefault', [aUri, aIsDefault])[0];
   151   }
   152 }
   154 function synthCleanup() {
   155   if (SpecialPowers.isMainProcess()) {
   156     var voicesBefore = speechSynthesis.getVoices().length;
   157     var toRemove = gAddedVoices.length;
   158     var removeArgs;
   159     while ((removeArgs = gAddedVoices.shift()))
   160       gSpeechRegistry.removeVoice.apply(gSpeechRegistry.removeVoice, removeArgs);
   162     var voicesAfter = speechSynthesis.getVoices().length;
   163     is(voicesAfter, voicesBefore - toRemove, "Successfully removed test voices");
   164   } else {
   165     // XXX: It would be nice to check here that the child gets the voice
   166     // removed update, but alas, it is aynchronous.
   167     var mm = SpecialPowers.Cc["@mozilla.org/childprocessmessagemanager;1"]
   168       .getService(SpecialPowers.Ci.nsISyncMessageSender);
   169     mm.sendSyncMessage('test:SpeechSynthesis:ipcSynthCleanup');
   170   }
   171 }
   173 function synthTestQueue(aTestArgs, aEndFunc) {
   174   var utterances = [];
   175   for (var i in aTestArgs) {
   176     var uargs = aTestArgs[i][0];
   177     var u = new SpeechSynthesisUtterance(uargs.text);
   179     delete uargs.text;
   181     for (var attr in uargs)
   182       u[attr] = uargs[attr];
   184     function onend_handler(e) {
   185       is(e.target, utterances.shift(), "Target matches utterances");
   186       ok(!speechSynthesis.speaking, "speechSynthesis is not speaking.");
   188       isnot(e.eventType, 'error', "Error in utterance");
   190       if (utterances.length) {
   191         ok(speechSynthesis.pending, "other utterances queued");
   192       } else {
   193         ok(!speechSynthesis.pending, "queue is empty, nothing pending.");
   194         if (aEndFunc)
   195           aEndFunc();
   196       }
   197     }
   199     u.addEventListener('end', onend_handler);
   200     u.addEventListener('error', onend_handler);
   202     u.addEventListener(
   203       'error', function onerror_handler(e) {
   204         ok(false, "Error in speech utterance '" + e.target.text + "'");
   205       });
   207     utterances.push(u);
   208     TestSpeechServiceNoAudio.expectedSpeaks.push([u, aTestArgs[i][1]]);
   209     speechSynthesis.speak(u);
   210   }
   212   ok(!speechSynthesis.speaking, "speechSynthesis is not speaking yet.");
   213   ok(speechSynthesis.pending, "speechSynthesis has an utterance queued.");
   214 }

mercurial