content/media/webspeech/synth/nsISpeechService.idl

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     1 /* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     2 /* This Source Code Form is subject to the terms of the Mozilla Public
     3  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     4  * You can obtain one at http://mozilla.org/MPL/2.0/. */
     6 #include "nsISupports.idl"
     8 typedef unsigned short SpeechServiceType;
    10 /**
    11  * A callback is implemented by the service. For direct audio services, it is
    12  * required to implement these, although it could be helpful to use the
    13  * cancel method for shutting down the speech resources.
    14  */
    15 [scriptable, uuid(408251b0-1d7b-4876-888f-718859ce8c9d)]
    16 interface nsISpeechTaskCallback : nsISupports
    17 {
    18   /**
    19    * The user or application has paused the speech.
    20    */
    21   void onPause();
    23   /**
    24    * The user or application has resumed the speech.
    25    */
    26   void onResume();
    28   /**
    29    * The user or application has canceled the speech.
    30    */
    31   void onCancel();
    32 };
    35 /**
    36  * A task is associated with a single utterance. It is provided by the browser
    37  * to the service in the speak() method.
    38  */
    39 [scriptable, builtinclass, uuid(ad59949c-2437-4b35-8eeb-d760caab75c5)]
    40 interface nsISpeechTask : nsISupports
    41 {
    42   /**
    43    * Prepare browser for speech.
    44    *
    45    * @param aCallback callback object for mid-speech operations.
    46    * @param aChannels number of audio channels. Only required
    47    *                    in direct audio services
    48    * @param aRate     audio rate. Only required in direct audio services
    49    */
    50   [optional_argc] void setup(in nsISpeechTaskCallback aCallback,
    51                                [optional] in uint32_t aChannels,
    52                                [optional] in uint32_t aRate);
    54   /**
    55    * Send audio data to browser.
    56    *
    57    * @param aData     an Int16Array with PCM-16 audio data.
    58    * @param aLandmarks an array of sample offset and landmark pairs.
    59    *                     Used for emiting boundary and mark events.
    60    */
    61   [implicit_jscontext]
    62   void sendAudio(in jsval aData, in jsval aLandmarks);
    64   [noscript]
    65   void sendAudioNative([array, size_is(aDataLen)] in short aData, in unsigned long aDataLen);
    67   /**
    68    * Dispatch start event.
    69    */
    70   void dispatchStart();
    72   /**
    73    * Dispatch end event.
    74    *
    75    * @param aElapsedTime time in seconds since speech has started.
    76    * @param aCharIndex   offset of spoken characters.
    77    */
    78   void dispatchEnd(in float aElapsedTime, in unsigned long aCharIndex);
    80   /**
    81    * Dispatch pause event. Should not be called directly by service.
    82    *
    83    * @param aElapsedTime time in seconds since speech has started.
    84    * @param aCharIndex   offset of spoken characters.
    85    */
    86   void dispatchPause(in float aElapsedTime, in unsigned long aCharIndex);
    88   /**
    89    * Dispatch resume event. Should not be called directly by service.
    90    *
    91    * @param aElapsedTime time in seconds since speech has started.
    92    * @param aCharIndex   offset of spoken characters.
    93    */
    94   void dispatchResume(in float aElapsedTime, in unsigned long aCharIndex);
    96   /**
    97    * Dispatch error event.
    98    *
    99    * @param aElapsedTime time in seconds since speech has started.
   100    * @param aCharIndex   offset of spoken characters.
   101    */
   102   void dispatchError(in float aElapsedTime, in unsigned long aCharIndex);
   104   /**
   105    * Dispatch boundary event.
   106    *
   107    * @param aName        name of boundary, 'word' or 'sentence'
   108    * @param aElapsedTime time in seconds since speech has started.
   109    * @param aCharIndex   offset of spoken characters.
   110    */
   111   void dispatchBoundary(in DOMString aName, in float aElapsedTime,
   112                         in unsigned long aCharIndex);
   114   /**
   115    * Dispatch mark event.
   116    *
   117    * @param aName        mark identifier.
   118    * @param aElapsedTime time in seconds since speech has started.
   119    * @param aCharIndex   offset of spoken characters.
   120    */
   121   void dispatchMark(in DOMString aName, in float aElapsedTime, in unsigned long aCharIndex);
   122 };
   124 /**
   125  * The main interface of a speech synthesis service.
   126  *
   127  * A service's speak method could be implemented in two ways:
   128  *  1. Indirect audio - the service is responsible for outputting audio.
   129  *    The service calls the nsISpeechTask.dispatch* methods directly. Starting
   130  *    with dispatchStart() and ending with dispatchEnd or dispatchError().
   131  *
   132  *  2. Direct audio - the service provides us with PCM-16 data, and we output it.
   133  *    The service does not call the dispatch task methods directly. Instead,
   134  *    audio information is provided at setup(), and audio data is sent with
   135  *    sendAudio(). The utterance is terminated with an empty sendAudio().
   136  */
   137 [scriptable, uuid(3952d388-050c-47ba-a70f-5fc1cadf1db0)]
   138 interface nsISpeechService : nsISupports
   139 {
   140   /**
   141    * Speak the given text using the voice identified byu the given uri. See
   142    * W3C Speech API spec for information about pitch and rate.
   143    * https://dvcs.w3.org/hg/speech-api/raw-file/tip/speechapi.html#utterance-attributes
   144    *
   145    * @param aText  text to utter.
   146    * @param aUri   unique voice identifier.
   147    * @param aRate  rate to speak voice in.
   148    * @param aPitch pitch to speak voice in.
   149    * @param aTask  task instance for utterance, used for sending events or audio
   150    *                 data back to browser.
   151    */
   152   void speak(in DOMString aText, in DOMString aUri,
   153              in float aRate, in float aPitch,
   154              in nsISpeechTask aTask);
   156   const SpeechServiceType SERVICETYPE_DIRECT_AUDIO = 1;
   157   const SpeechServiceType SERVICETYPE_INDIRECT_AUDIO = 2;
   159   readonly attribute SpeechServiceType serviceType;
   160 };

mercurial