content/media/webspeech/synth/nsISpeechService.idl

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/webspeech/synth/nsISpeechService.idl	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,160 @@
     1.4 +/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.7 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.8 +
     1.9 +#include "nsISupports.idl"
    1.10 +
    1.11 +typedef unsigned short SpeechServiceType;
    1.12 +
    1.13 +/**
    1.14 + * A callback is implemented by the service. For direct audio services, it is
    1.15 + * required to implement these, although it could be helpful to use the
    1.16 + * cancel method for shutting down the speech resources.
    1.17 + */
    1.18 +[scriptable, uuid(408251b0-1d7b-4876-888f-718859ce8c9d)]
    1.19 +interface nsISpeechTaskCallback : nsISupports
    1.20 +{
    1.21 +  /**
    1.22 +   * The user or application has paused the speech.
    1.23 +   */
    1.24 +  void onPause();
    1.25 +
    1.26 +  /**
    1.27 +   * The user or application has resumed the speech.
    1.28 +   */
    1.29 +  void onResume();
    1.30 +
    1.31 +  /**
    1.32 +   * The user or application has canceled the speech.
    1.33 +   */
    1.34 +  void onCancel();
    1.35 +};
    1.36 +
    1.37 +
    1.38 +/**
    1.39 + * A task is associated with a single utterance. It is provided by the browser
    1.40 + * to the service in the speak() method.
    1.41 + */
    1.42 +[scriptable, builtinclass, uuid(ad59949c-2437-4b35-8eeb-d760caab75c5)]
    1.43 +interface nsISpeechTask : nsISupports
    1.44 +{
    1.45 +  /**
    1.46 +   * Prepare browser for speech.
    1.47 +   *
    1.48 +   * @param aCallback callback object for mid-speech operations.
    1.49 +   * @param aChannels number of audio channels. Only required
    1.50 +   *                    in direct audio services
    1.51 +   * @param aRate     audio rate. Only required in direct audio services
    1.52 +   */
    1.53 +  [optional_argc] void setup(in nsISpeechTaskCallback aCallback,
    1.54 +                               [optional] in uint32_t aChannels,
    1.55 +                               [optional] in uint32_t aRate);
    1.56 +
    1.57 +  /**
    1.58 +   * Send audio data to browser.
    1.59 +   *
    1.60 +   * @param aData     an Int16Array with PCM-16 audio data.
    1.61 +   * @param aLandmarks an array of sample offset and landmark pairs.
    1.62 +   *                     Used for emiting boundary and mark events.
    1.63 +   */
    1.64 +  [implicit_jscontext]
    1.65 +  void sendAudio(in jsval aData, in jsval aLandmarks);
    1.66 +
    1.67 +  [noscript]
    1.68 +  void sendAudioNative([array, size_is(aDataLen)] in short aData, in unsigned long aDataLen);
    1.69 +
    1.70 +  /**
    1.71 +   * Dispatch start event.
    1.72 +   */
    1.73 +  void dispatchStart();
    1.74 +
    1.75 +  /**
    1.76 +   * Dispatch end event.
    1.77 +   *
    1.78 +   * @param aElapsedTime time in seconds since speech has started.
    1.79 +   * @param aCharIndex   offset of spoken characters.
    1.80 +   */
    1.81 +  void dispatchEnd(in float aElapsedTime, in unsigned long aCharIndex);
    1.82 +
    1.83 +  /**
    1.84 +   * Dispatch pause event. Should not be called directly by service.
    1.85 +   *
    1.86 +   * @param aElapsedTime time in seconds since speech has started.
    1.87 +   * @param aCharIndex   offset of spoken characters.
    1.88 +   */
    1.89 +  void dispatchPause(in float aElapsedTime, in unsigned long aCharIndex);
    1.90 +
    1.91 +  /**
    1.92 +   * Dispatch resume event. Should not be called directly by service.
    1.93 +   *
    1.94 +   * @param aElapsedTime time in seconds since speech has started.
    1.95 +   * @param aCharIndex   offset of spoken characters.
    1.96 +   */
    1.97 +  void dispatchResume(in float aElapsedTime, in unsigned long aCharIndex);
    1.98 +
    1.99 +  /**
   1.100 +   * Dispatch error event.
   1.101 +   *
   1.102 +   * @param aElapsedTime time in seconds since speech has started.
   1.103 +   * @param aCharIndex   offset of spoken characters.
   1.104 +   */
   1.105 +  void dispatchError(in float aElapsedTime, in unsigned long aCharIndex);
   1.106 +
   1.107 +  /**
   1.108 +   * Dispatch boundary event.
   1.109 +   *
   1.110 +   * @param aName        name of boundary, 'word' or 'sentence'
   1.111 +   * @param aElapsedTime time in seconds since speech has started.
   1.112 +   * @param aCharIndex   offset of spoken characters.
   1.113 +   */
   1.114 +  void dispatchBoundary(in DOMString aName, in float aElapsedTime,
   1.115 +                        in unsigned long aCharIndex);
   1.116 +
   1.117 +  /**
   1.118 +   * Dispatch mark event.
   1.119 +   *
   1.120 +   * @param aName        mark identifier.
   1.121 +   * @param aElapsedTime time in seconds since speech has started.
   1.122 +   * @param aCharIndex   offset of spoken characters.
   1.123 +   */
   1.124 +  void dispatchMark(in DOMString aName, in float aElapsedTime, in unsigned long aCharIndex);
   1.125 +};
   1.126 +
   1.127 +/**
   1.128 + * The main interface of a speech synthesis service.
   1.129 + *
   1.130 + * A service's speak method could be implemented in two ways:
   1.131 + *  1. Indirect audio - the service is responsible for outputting audio.
   1.132 + *    The service calls the nsISpeechTask.dispatch* methods directly. Starting
   1.133 + *    with dispatchStart() and ending with dispatchEnd or dispatchError().
   1.134 + *
   1.135 + *  2. Direct audio - the service provides us with PCM-16 data, and we output it.
   1.136 + *    The service does not call the dispatch task methods directly. Instead,
   1.137 + *    audio information is provided at setup(), and audio data is sent with
   1.138 + *    sendAudio(). The utterance is terminated with an empty sendAudio().
   1.139 + */
   1.140 +[scriptable, uuid(3952d388-050c-47ba-a70f-5fc1cadf1db0)]
   1.141 +interface nsISpeechService : nsISupports
   1.142 +{
   1.143 +  /**
   1.144 +   * Speak the given text using the voice identified byu the given uri. See
   1.145 +   * W3C Speech API spec for information about pitch and rate.
   1.146 +   * https://dvcs.w3.org/hg/speech-api/raw-file/tip/speechapi.html#utterance-attributes
   1.147 +   *
   1.148 +   * @param aText  text to utter.
   1.149 +   * @param aUri   unique voice identifier.
   1.150 +   * @param aRate  rate to speak voice in.
   1.151 +   * @param aPitch pitch to speak voice in.
   1.152 +   * @param aTask  task instance for utterance, used for sending events or audio
   1.153 +   *                 data back to browser.
   1.154 +   */
   1.155 +  void speak(in DOMString aText, in DOMString aUri,
   1.156 +             in float aRate, in float aPitch,
   1.157 +             in nsISpeechTask aTask);
   1.158 +
   1.159 +  const SpeechServiceType SERVICETYPE_DIRECT_AUDIO = 1;
   1.160 +  const SpeechServiceType SERVICETYPE_INDIRECT_AUDIO = 2;
   1.161 +
   1.162 +  readonly attribute SpeechServiceType serviceType;
   1.163 +};

mercurial