content/media/webspeech/synth/nsISpeechService.idl

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rw-r--r--

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
michael@0 2 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 3 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
michael@0 4 * You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 5
michael@0 6 #include "nsISupports.idl"
michael@0 7
michael@0 8 typedef unsigned short SpeechServiceType;
michael@0 9
michael@0 10 /**
michael@0 11 * A callback is implemented by the service. For direct audio services, it is
michael@0 12 * required to implement these, although it could be helpful to use the
michael@0 13 * cancel method for shutting down the speech resources.
michael@0 14 */
michael@0 15 [scriptable, uuid(408251b0-1d7b-4876-888f-718859ce8c9d)]
michael@0 16 interface nsISpeechTaskCallback : nsISupports
michael@0 17 {
michael@0 18 /**
michael@0 19 * The user or application has paused the speech.
michael@0 20 */
michael@0 21 void onPause();
michael@0 22
michael@0 23 /**
michael@0 24 * The user or application has resumed the speech.
michael@0 25 */
michael@0 26 void onResume();
michael@0 27
michael@0 28 /**
michael@0 29 * The user or application has canceled the speech.
michael@0 30 */
michael@0 31 void onCancel();
michael@0 32 };
michael@0 33
michael@0 34
michael@0 35 /**
michael@0 36 * A task is associated with a single utterance. It is provided by the browser
michael@0 37 * to the service in the speak() method.
michael@0 38 */
michael@0 39 [scriptable, builtinclass, uuid(ad59949c-2437-4b35-8eeb-d760caab75c5)]
michael@0 40 interface nsISpeechTask : nsISupports
michael@0 41 {
michael@0 42 /**
michael@0 43 * Prepare browser for speech.
michael@0 44 *
michael@0 45 * @param aCallback callback object for mid-speech operations.
michael@0 46 * @param aChannels number of audio channels. Only required
michael@0 47 * in direct audio services
michael@0 48 * @param aRate audio rate. Only required in direct audio services
michael@0 49 */
michael@0 50 [optional_argc] void setup(in nsISpeechTaskCallback aCallback,
michael@0 51 [optional] in uint32_t aChannels,
michael@0 52 [optional] in uint32_t aRate);
michael@0 53
michael@0 54 /**
michael@0 55 * Send audio data to browser.
michael@0 56 *
michael@0 57 * @param aData an Int16Array with PCM-16 audio data.
michael@0 58 * @param aLandmarks an array of sample offset and landmark pairs.
michael@0 59 * Used for emiting boundary and mark events.
michael@0 60 */
michael@0 61 [implicit_jscontext]
michael@0 62 void sendAudio(in jsval aData, in jsval aLandmarks);
michael@0 63
michael@0 64 [noscript]
michael@0 65 void sendAudioNative([array, size_is(aDataLen)] in short aData, in unsigned long aDataLen);
michael@0 66
michael@0 67 /**
michael@0 68 * Dispatch start event.
michael@0 69 */
michael@0 70 void dispatchStart();
michael@0 71
michael@0 72 /**
michael@0 73 * Dispatch end event.
michael@0 74 *
michael@0 75 * @param aElapsedTime time in seconds since speech has started.
michael@0 76 * @param aCharIndex offset of spoken characters.
michael@0 77 */
michael@0 78 void dispatchEnd(in float aElapsedTime, in unsigned long aCharIndex);
michael@0 79
michael@0 80 /**
michael@0 81 * Dispatch pause event. Should not be called directly by service.
michael@0 82 *
michael@0 83 * @param aElapsedTime time in seconds since speech has started.
michael@0 84 * @param aCharIndex offset of spoken characters.
michael@0 85 */
michael@0 86 void dispatchPause(in float aElapsedTime, in unsigned long aCharIndex);
michael@0 87
michael@0 88 /**
michael@0 89 * Dispatch resume event. Should not be called directly by service.
michael@0 90 *
michael@0 91 * @param aElapsedTime time in seconds since speech has started.
michael@0 92 * @param aCharIndex offset of spoken characters.
michael@0 93 */
michael@0 94 void dispatchResume(in float aElapsedTime, in unsigned long aCharIndex);
michael@0 95
michael@0 96 /**
michael@0 97 * Dispatch error event.
michael@0 98 *
michael@0 99 * @param aElapsedTime time in seconds since speech has started.
michael@0 100 * @param aCharIndex offset of spoken characters.
michael@0 101 */
michael@0 102 void dispatchError(in float aElapsedTime, in unsigned long aCharIndex);
michael@0 103
michael@0 104 /**
michael@0 105 * Dispatch boundary event.
michael@0 106 *
michael@0 107 * @param aName name of boundary, 'word' or 'sentence'
michael@0 108 * @param aElapsedTime time in seconds since speech has started.
michael@0 109 * @param aCharIndex offset of spoken characters.
michael@0 110 */
michael@0 111 void dispatchBoundary(in DOMString aName, in float aElapsedTime,
michael@0 112 in unsigned long aCharIndex);
michael@0 113
michael@0 114 /**
michael@0 115 * Dispatch mark event.
michael@0 116 *
michael@0 117 * @param aName mark identifier.
michael@0 118 * @param aElapsedTime time in seconds since speech has started.
michael@0 119 * @param aCharIndex offset of spoken characters.
michael@0 120 */
michael@0 121 void dispatchMark(in DOMString aName, in float aElapsedTime, in unsigned long aCharIndex);
michael@0 122 };
michael@0 123
michael@0 124 /**
michael@0 125 * The main interface of a speech synthesis service.
michael@0 126 *
michael@0 127 * A service's speak method could be implemented in two ways:
michael@0 128 * 1. Indirect audio - the service is responsible for outputting audio.
michael@0 129 * The service calls the nsISpeechTask.dispatch* methods directly. Starting
michael@0 130 * with dispatchStart() and ending with dispatchEnd or dispatchError().
michael@0 131 *
michael@0 132 * 2. Direct audio - the service provides us with PCM-16 data, and we output it.
michael@0 133 * The service does not call the dispatch task methods directly. Instead,
michael@0 134 * audio information is provided at setup(), and audio data is sent with
michael@0 135 * sendAudio(). The utterance is terminated with an empty sendAudio().
michael@0 136 */
michael@0 137 [scriptable, uuid(3952d388-050c-47ba-a70f-5fc1cadf1db0)]
michael@0 138 interface nsISpeechService : nsISupports
michael@0 139 {
michael@0 140 /**
michael@0 141 * Speak the given text using the voice identified byu the given uri. See
michael@0 142 * W3C Speech API spec for information about pitch and rate.
michael@0 143 * https://dvcs.w3.org/hg/speech-api/raw-file/tip/speechapi.html#utterance-attributes
michael@0 144 *
michael@0 145 * @param aText text to utter.
michael@0 146 * @param aUri unique voice identifier.
michael@0 147 * @param aRate rate to speak voice in.
michael@0 148 * @param aPitch pitch to speak voice in.
michael@0 149 * @param aTask task instance for utterance, used for sending events or audio
michael@0 150 * data back to browser.
michael@0 151 */
michael@0 152 void speak(in DOMString aText, in DOMString aUri,
michael@0 153 in float aRate, in float aPitch,
michael@0 154 in nsISpeechTask aTask);
michael@0 155
michael@0 156 const SpeechServiceType SERVICETYPE_DIRECT_AUDIO = 1;
michael@0 157 const SpeechServiceType SERVICETYPE_INDIRECT_AUDIO = 2;
michael@0 158
michael@0 159 readonly attribute SpeechServiceType serviceType;
michael@0 160 };

mercurial