Fri, 16 Jan 2015 04:50:19 +0100
Replace accessor implementation with direct member state manipulation, by
request https://trac.torproject.org/projects/tor/ticket/9701#comment:32
michael@0 | 1 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 2 | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
michael@0 | 3 | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 4 | |
michael@0 | 5 | #include "MediaEngineWebRTC.h" |
michael@0 | 6 | #include <stdio.h> |
michael@0 | 7 | #include <algorithm> |
michael@0 | 8 | #include "mozilla/Assertions.h" |
michael@0 | 9 | #include "MediaTrackConstraints.h" |
michael@0 | 10 | |
michael@0 | 11 | // scoped_ptr.h uses FF |
michael@0 | 12 | #ifdef FF |
michael@0 | 13 | #undef FF |
michael@0 | 14 | #endif |
michael@0 | 15 | #include "webrtc/modules/audio_device/opensl/single_rw_fifo.h" |
michael@0 | 16 | |
michael@0 | 17 | #define CHANNELS 1 |
michael@0 | 18 | #define ENCODING "L16" |
michael@0 | 19 | #define DEFAULT_PORT 5555 |
michael@0 | 20 | |
michael@0 | 21 | #define SAMPLE_RATE 256000 |
michael@0 | 22 | #define SAMPLE_FREQUENCY 16000 |
michael@0 | 23 | #define SAMPLE_LENGTH ((SAMPLE_FREQUENCY*10)/1000) |
michael@0 | 24 | |
michael@0 | 25 | // These are restrictions from the webrtc.org code |
michael@0 | 26 | #define MAX_CHANNELS 2 |
michael@0 | 27 | #define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100 |
michael@0 | 28 | |
michael@0 | 29 | #define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10 |
michael@0 | 30 | static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH"); |
michael@0 | 31 | |
michael@0 | 32 | namespace mozilla { |
michael@0 | 33 | |
michael@0 | 34 | #ifdef LOG |
michael@0 | 35 | #undef LOG |
michael@0 | 36 | #endif |
michael@0 | 37 | |
michael@0 | 38 | #ifdef PR_LOGGING |
michael@0 | 39 | extern PRLogModuleInfo* GetMediaManagerLog(); |
michael@0 | 40 | #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg) |
michael@0 | 41 | #else |
michael@0 | 42 | #define LOG(msg) |
michael@0 | 43 | #endif |
michael@0 | 44 | |
michael@0 | 45 | /** |
michael@0 | 46 | * Webrtc audio source. |
michael@0 | 47 | */ |
michael@0 | 48 | NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource) |
michael@0 | 49 | |
michael@0 | 50 | // XXX temp until MSG supports registration |
michael@0 | 51 | StaticAutoPtr<AudioOutputObserver> gFarendObserver; |
michael@0 | 52 | |
michael@0 | 53 | AudioOutputObserver::AudioOutputObserver() |
michael@0 | 54 | : mPlayoutFreq(0) |
michael@0 | 55 | , mPlayoutChannels(0) |
michael@0 | 56 | , mChunkSize(0) |
michael@0 | 57 | , mSamplesSaved(0) |
michael@0 | 58 | { |
michael@0 | 59 | // Buffers of 10ms chunks |
michael@0 | 60 | mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10); |
michael@0 | 61 | } |
michael@0 | 62 | |
michael@0 | 63 | AudioOutputObserver::~AudioOutputObserver() |
michael@0 | 64 | { |
michael@0 | 65 | } |
michael@0 | 66 | |
michael@0 | 67 | void |
michael@0 | 68 | AudioOutputObserver::Clear() |
michael@0 | 69 | { |
michael@0 | 70 | while (mPlayoutFifo->size() > 0) { |
michael@0 | 71 | (void) mPlayoutFifo->Pop(); |
michael@0 | 72 | } |
michael@0 | 73 | } |
michael@0 | 74 | |
michael@0 | 75 | FarEndAudioChunk * |
michael@0 | 76 | AudioOutputObserver::Pop() |
michael@0 | 77 | { |
michael@0 | 78 | return (FarEndAudioChunk *) mPlayoutFifo->Pop(); |
michael@0 | 79 | } |
michael@0 | 80 | |
michael@0 | 81 | uint32_t |
michael@0 | 82 | AudioOutputObserver::Size() |
michael@0 | 83 | { |
michael@0 | 84 | return mPlayoutFifo->size(); |
michael@0 | 85 | } |
michael@0 | 86 | |
michael@0 | 87 | // static |
michael@0 | 88 | void |
michael@0 | 89 | AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aSamples, bool aOverran, |
michael@0 | 90 | int aFreq, int aChannels, AudioSampleFormat aFormat) |
michael@0 | 91 | { |
michael@0 | 92 | if (mPlayoutChannels != 0) { |
michael@0 | 93 | if (mPlayoutChannels != static_cast<uint32_t>(aChannels)) { |
michael@0 | 94 | MOZ_CRASH(); |
michael@0 | 95 | } |
michael@0 | 96 | } else { |
michael@0 | 97 | MOZ_ASSERT(aChannels <= MAX_CHANNELS); |
michael@0 | 98 | mPlayoutChannels = static_cast<uint32_t>(aChannels); |
michael@0 | 99 | } |
michael@0 | 100 | if (mPlayoutFreq != 0) { |
michael@0 | 101 | if (mPlayoutFreq != static_cast<uint32_t>(aFreq)) { |
michael@0 | 102 | MOZ_CRASH(); |
michael@0 | 103 | } |
michael@0 | 104 | } else { |
michael@0 | 105 | MOZ_ASSERT(aFreq <= MAX_SAMPLING_FREQ); |
michael@0 | 106 | MOZ_ASSERT(!(aFreq % 100), "Sampling rate for far end data should be multiple of 100."); |
michael@0 | 107 | mPlayoutFreq = aFreq; |
michael@0 | 108 | mChunkSize = aFreq/100; // 10ms |
michael@0 | 109 | } |
michael@0 | 110 | |
michael@0 | 111 | #ifdef LOG_FAREND_INSERTION |
michael@0 | 112 | static FILE *fp = fopen("insertfarend.pcm","wb"); |
michael@0 | 113 | #endif |
michael@0 | 114 | |
michael@0 | 115 | if (mSaved) { |
michael@0 | 116 | // flag overrun as soon as possible, and only once |
michael@0 | 117 | mSaved->mOverrun = aOverran; |
michael@0 | 118 | aOverran = false; |
michael@0 | 119 | } |
michael@0 | 120 | // Rechunk to 10ms. |
michael@0 | 121 | // The AnalyzeReverseStream() and WebRtcAec_BufferFarend() functions insist on 10ms |
michael@0 | 122 | // samples per call. Annoying... |
michael@0 | 123 | while (aSamples) { |
michael@0 | 124 | if (!mSaved) { |
michael@0 | 125 | mSaved = (FarEndAudioChunk *) moz_xmalloc(sizeof(FarEndAudioChunk) + |
michael@0 | 126 | (mChunkSize * aChannels - 1)*sizeof(int16_t)); |
michael@0 | 127 | mSaved->mSamples = mChunkSize; |
michael@0 | 128 | mSaved->mOverrun = aOverran; |
michael@0 | 129 | aOverran = false; |
michael@0 | 130 | } |
michael@0 | 131 | uint32_t to_copy = mChunkSize - mSamplesSaved; |
michael@0 | 132 | if (to_copy > aSamples) { |
michael@0 | 133 | to_copy = aSamples; |
michael@0 | 134 | } |
michael@0 | 135 | |
michael@0 | 136 | int16_t *dest = &(mSaved->mData[mSamplesSaved * aChannels]); |
michael@0 | 137 | ConvertAudioSamples(aBuffer, dest, to_copy * aChannels); |
michael@0 | 138 | |
michael@0 | 139 | #ifdef LOG_FAREND_INSERTION |
michael@0 | 140 | if (fp) { |
michael@0 | 141 | fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp); |
michael@0 | 142 | } |
michael@0 | 143 | #endif |
michael@0 | 144 | aSamples -= to_copy; |
michael@0 | 145 | mSamplesSaved += to_copy; |
michael@0 | 146 | aBuffer += to_copy * aChannels; |
michael@0 | 147 | |
michael@0 | 148 | if (mSamplesSaved >= mChunkSize) { |
michael@0 | 149 | int free_slots = mPlayoutFifo->capacity() - mPlayoutFifo->size(); |
michael@0 | 150 | if (free_slots <= 0) { |
michael@0 | 151 | // XXX We should flag an overrun for the reader. We can't drop data from it due to |
michael@0 | 152 | // thread safety issues. |
michael@0 | 153 | break; |
michael@0 | 154 | } else { |
michael@0 | 155 | mPlayoutFifo->Push((int8_t *) mSaved.forget()); // takes ownership |
michael@0 | 156 | mSamplesSaved = 0; |
michael@0 | 157 | } |
michael@0 | 158 | } |
michael@0 | 159 | } |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | void |
michael@0 | 163 | MediaEngineWebRTCAudioSource::GetName(nsAString& aName) |
michael@0 | 164 | { |
michael@0 | 165 | if (mInitDone) { |
michael@0 | 166 | aName.Assign(mDeviceName); |
michael@0 | 167 | } |
michael@0 | 168 | |
michael@0 | 169 | return; |
michael@0 | 170 | } |
michael@0 | 171 | |
michael@0 | 172 | void |
michael@0 | 173 | MediaEngineWebRTCAudioSource::GetUUID(nsAString& aUUID) |
michael@0 | 174 | { |
michael@0 | 175 | if (mInitDone) { |
michael@0 | 176 | aUUID.Assign(mDeviceUUID); |
michael@0 | 177 | } |
michael@0 | 178 | |
michael@0 | 179 | return; |
michael@0 | 180 | } |
michael@0 | 181 | |
michael@0 | 182 | nsresult |
michael@0 | 183 | MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho, |
michael@0 | 184 | bool aAgcOn, uint32_t aAGC, |
michael@0 | 185 | bool aNoiseOn, uint32_t aNoise, |
michael@0 | 186 | int32_t aPlayoutDelay) |
michael@0 | 187 | { |
michael@0 | 188 | LOG(("Audio config: aec: %d, agc: %d, noise: %d", |
michael@0 | 189 | aEchoOn ? aEcho : -1, |
michael@0 | 190 | aAgcOn ? aAGC : -1, |
michael@0 | 191 | aNoiseOn ? aNoise : -1)); |
michael@0 | 192 | |
michael@0 | 193 | bool update_echo = (mEchoOn != aEchoOn); |
michael@0 | 194 | bool update_agc = (mAgcOn != aAgcOn); |
michael@0 | 195 | bool update_noise = (mNoiseOn != aNoiseOn); |
michael@0 | 196 | mEchoOn = aEchoOn; |
michael@0 | 197 | mAgcOn = aAgcOn; |
michael@0 | 198 | mNoiseOn = aNoiseOn; |
michael@0 | 199 | |
michael@0 | 200 | if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged) { |
michael@0 | 201 | if (mEchoCancel != (webrtc::EcModes) aEcho) { |
michael@0 | 202 | update_echo = true; |
michael@0 | 203 | mEchoCancel = (webrtc::EcModes) aEcho; |
michael@0 | 204 | } |
michael@0 | 205 | } |
michael@0 | 206 | if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) { |
michael@0 | 207 | if (mAGC != (webrtc::AgcModes) aAGC) { |
michael@0 | 208 | update_agc = true; |
michael@0 | 209 | mAGC = (webrtc::AgcModes) aAGC; |
michael@0 | 210 | } |
michael@0 | 211 | } |
michael@0 | 212 | if ((webrtc::NsModes) aNoise != webrtc::kNsUnchanged) { |
michael@0 | 213 | if (mNoiseSuppress != (webrtc::NsModes) aNoise) { |
michael@0 | 214 | update_noise = true; |
michael@0 | 215 | mNoiseSuppress = (webrtc::NsModes) aNoise; |
michael@0 | 216 | } |
michael@0 | 217 | } |
michael@0 | 218 | mPlayoutDelay = aPlayoutDelay; |
michael@0 | 219 | |
michael@0 | 220 | if (mInitDone) { |
michael@0 | 221 | int error; |
michael@0 | 222 | |
michael@0 | 223 | if (update_echo && |
michael@0 | 224 | 0 != (error = mVoEProcessing->SetEcStatus(mEchoOn, (webrtc::EcModes) aEcho))) { |
michael@0 | 225 | LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error)); |
michael@0 | 226 | // Overhead of capturing all the time is very low (<0.1% of an audio only call) |
michael@0 | 227 | if (mEchoOn) { |
michael@0 | 228 | if (0 != (error = mVoEProcessing->SetEcMetricsStatus(true))) { |
michael@0 | 229 | LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error)); |
michael@0 | 230 | } |
michael@0 | 231 | } |
michael@0 | 232 | } |
michael@0 | 233 | if (update_agc && |
michael@0 | 234 | 0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) { |
michael@0 | 235 | LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error)); |
michael@0 | 236 | } |
michael@0 | 237 | if (update_noise && |
michael@0 | 238 | 0 != (error = mVoEProcessing->SetNsStatus(mNoiseOn, (webrtc::NsModes) aNoise))) { |
michael@0 | 239 | LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error)); |
michael@0 | 240 | } |
michael@0 | 241 | } |
michael@0 | 242 | return NS_OK; |
michael@0 | 243 | } |
michael@0 | 244 | |
michael@0 | 245 | nsresult |
michael@0 | 246 | MediaEngineWebRTCAudioSource::Allocate(const AudioTrackConstraintsN &aConstraints, |
michael@0 | 247 | const MediaEnginePrefs &aPrefs) |
michael@0 | 248 | { |
michael@0 | 249 | if (mState == kReleased) { |
michael@0 | 250 | if (mInitDone) { |
michael@0 | 251 | ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine)); |
michael@0 | 252 | if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) { |
michael@0 | 253 | return NS_ERROR_FAILURE; |
michael@0 | 254 | } |
michael@0 | 255 | mState = kAllocated; |
michael@0 | 256 | LOG(("Audio device %d allocated", mCapIndex)); |
michael@0 | 257 | } else { |
michael@0 | 258 | LOG(("Audio device is not initalized")); |
michael@0 | 259 | return NS_ERROR_FAILURE; |
michael@0 | 260 | } |
michael@0 | 261 | } else if (mSources.IsEmpty()) { |
michael@0 | 262 | LOG(("Audio device %d reallocated", mCapIndex)); |
michael@0 | 263 | } else { |
michael@0 | 264 | LOG(("Audio device %d allocated shared", mCapIndex)); |
michael@0 | 265 | } |
michael@0 | 266 | return NS_OK; |
michael@0 | 267 | } |
michael@0 | 268 | |
michael@0 | 269 | nsresult |
michael@0 | 270 | MediaEngineWebRTCAudioSource::Deallocate() |
michael@0 | 271 | { |
michael@0 | 272 | if (mSources.IsEmpty()) { |
michael@0 | 273 | if (mState != kStopped && mState != kAllocated) { |
michael@0 | 274 | return NS_ERROR_FAILURE; |
michael@0 | 275 | } |
michael@0 | 276 | |
michael@0 | 277 | mState = kReleased; |
michael@0 | 278 | LOG(("Audio device %d deallocated", mCapIndex)); |
michael@0 | 279 | } else { |
michael@0 | 280 | LOG(("Audio device %d deallocated but still in use", mCapIndex)); |
michael@0 | 281 | } |
michael@0 | 282 | return NS_OK; |
michael@0 | 283 | } |
michael@0 | 284 | |
michael@0 | 285 | nsresult |
michael@0 | 286 | MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID) |
michael@0 | 287 | { |
michael@0 | 288 | if (!mInitDone || !aStream) { |
michael@0 | 289 | return NS_ERROR_FAILURE; |
michael@0 | 290 | } |
michael@0 | 291 | |
michael@0 | 292 | { |
michael@0 | 293 | MonitorAutoLock lock(mMonitor); |
michael@0 | 294 | mSources.AppendElement(aStream); |
michael@0 | 295 | } |
michael@0 | 296 | |
michael@0 | 297 | AudioSegment* segment = new AudioSegment(); |
michael@0 | 298 | aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment); |
michael@0 | 299 | aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX); |
michael@0 | 300 | // XXX Make this based on the pref. |
michael@0 | 301 | aStream->RegisterForAudioMixing(); |
michael@0 | 302 | LOG(("Start audio for stream %p", aStream)); |
michael@0 | 303 | |
michael@0 | 304 | if (mState == kStarted) { |
michael@0 | 305 | MOZ_ASSERT(aID == mTrackID); |
michael@0 | 306 | return NS_OK; |
michael@0 | 307 | } |
michael@0 | 308 | mState = kStarted; |
michael@0 | 309 | mTrackID = aID; |
michael@0 | 310 | |
michael@0 | 311 | // Make sure logger starts before capture |
michael@0 | 312 | AsyncLatencyLogger::Get(true); |
michael@0 | 313 | |
michael@0 | 314 | // Register output observer |
michael@0 | 315 | // XXX |
michael@0 | 316 | MOZ_ASSERT(gFarendObserver); |
michael@0 | 317 | gFarendObserver->Clear(); |
michael@0 | 318 | |
michael@0 | 319 | // Configure audio processing in webrtc code |
michael@0 | 320 | Config(mEchoOn, webrtc::kEcUnchanged, |
michael@0 | 321 | mAgcOn, webrtc::kAgcUnchanged, |
michael@0 | 322 | mNoiseOn, webrtc::kNsUnchanged, |
michael@0 | 323 | mPlayoutDelay); |
michael@0 | 324 | |
michael@0 | 325 | if (mVoEBase->StartReceive(mChannel)) { |
michael@0 | 326 | return NS_ERROR_FAILURE; |
michael@0 | 327 | } |
michael@0 | 328 | if (mVoEBase->StartSend(mChannel)) { |
michael@0 | 329 | return NS_ERROR_FAILURE; |
michael@0 | 330 | } |
michael@0 | 331 | |
michael@0 | 332 | // Attach external media processor, so this::Process will be called. |
michael@0 | 333 | mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this); |
michael@0 | 334 | |
michael@0 | 335 | return NS_OK; |
michael@0 | 336 | } |
michael@0 | 337 | |
michael@0 | 338 | nsresult |
michael@0 | 339 | MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID) |
michael@0 | 340 | { |
michael@0 | 341 | { |
michael@0 | 342 | MonitorAutoLock lock(mMonitor); |
michael@0 | 343 | |
michael@0 | 344 | if (!mSources.RemoveElement(aSource)) { |
michael@0 | 345 | // Already stopped - this is allowed |
michael@0 | 346 | return NS_OK; |
michael@0 | 347 | } |
michael@0 | 348 | if (!mSources.IsEmpty()) { |
michael@0 | 349 | return NS_OK; |
michael@0 | 350 | } |
michael@0 | 351 | if (mState != kStarted) { |
michael@0 | 352 | return NS_ERROR_FAILURE; |
michael@0 | 353 | } |
michael@0 | 354 | if (!mVoEBase) { |
michael@0 | 355 | return NS_ERROR_FAILURE; |
michael@0 | 356 | } |
michael@0 | 357 | |
michael@0 | 358 | mState = kStopped; |
michael@0 | 359 | aSource->EndTrack(aID); |
michael@0 | 360 | } |
michael@0 | 361 | |
michael@0 | 362 | mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel); |
michael@0 | 363 | |
michael@0 | 364 | if (mVoEBase->StopSend(mChannel)) { |
michael@0 | 365 | return NS_ERROR_FAILURE; |
michael@0 | 366 | } |
michael@0 | 367 | if (mVoEBase->StopReceive(mChannel)) { |
michael@0 | 368 | return NS_ERROR_FAILURE; |
michael@0 | 369 | } |
michael@0 | 370 | return NS_OK; |
michael@0 | 371 | } |
michael@0 | 372 | |
michael@0 | 373 | void |
michael@0 | 374 | MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph, |
michael@0 | 375 | SourceMediaStream *aSource, |
michael@0 | 376 | TrackID aID, |
michael@0 | 377 | StreamTime aDesiredTime, |
michael@0 | 378 | TrackTicks &aLastEndTime) |
michael@0 | 379 | { |
michael@0 | 380 | // Ignore - we push audio data |
michael@0 | 381 | #ifdef DEBUG |
michael@0 | 382 | TrackTicks target = TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime); |
michael@0 | 383 | TrackTicks delta = target - aLastEndTime; |
michael@0 | 384 | LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta)); |
michael@0 | 385 | aLastEndTime = target; |
michael@0 | 386 | #endif |
michael@0 | 387 | } |
michael@0 | 388 | |
michael@0 | 389 | nsresult |
michael@0 | 390 | MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile) |
michael@0 | 391 | { |
michael@0 | 392 | return NS_ERROR_NOT_IMPLEMENTED; |
michael@0 | 393 | } |
michael@0 | 394 | |
michael@0 | 395 | void |
michael@0 | 396 | MediaEngineWebRTCAudioSource::Init() |
michael@0 | 397 | { |
michael@0 | 398 | mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine); |
michael@0 | 399 | |
michael@0 | 400 | mVoEBase->Init(); |
michael@0 | 401 | |
michael@0 | 402 | mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine); |
michael@0 | 403 | if (!mVoERender) { |
michael@0 | 404 | return; |
michael@0 | 405 | } |
michael@0 | 406 | mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine); |
michael@0 | 407 | if (!mVoENetwork) { |
michael@0 | 408 | return; |
michael@0 | 409 | } |
michael@0 | 410 | |
michael@0 | 411 | mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine); |
michael@0 | 412 | if (!mVoEProcessing) { |
michael@0 | 413 | return; |
michael@0 | 414 | } |
michael@0 | 415 | |
michael@0 | 416 | mVoECallReport = webrtc::VoECallReport::GetInterface(mVoiceEngine); |
michael@0 | 417 | if (!mVoECallReport) { |
michael@0 | 418 | return; |
michael@0 | 419 | } |
michael@0 | 420 | |
michael@0 | 421 | mChannel = mVoEBase->CreateChannel(); |
michael@0 | 422 | if (mChannel < 0) { |
michael@0 | 423 | return; |
michael@0 | 424 | } |
michael@0 | 425 | mNullTransport = new NullTransport(); |
michael@0 | 426 | if (mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) { |
michael@0 | 427 | return; |
michael@0 | 428 | } |
michael@0 | 429 | |
michael@0 | 430 | // Check for availability. |
michael@0 | 431 | ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine)); |
michael@0 | 432 | if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) { |
michael@0 | 433 | return; |
michael@0 | 434 | } |
michael@0 | 435 | |
michael@0 | 436 | #ifndef MOZ_B2G |
michael@0 | 437 | // Because of the permission mechanism of B2G, we need to skip the status |
michael@0 | 438 | // check here. |
michael@0 | 439 | bool avail = false; |
michael@0 | 440 | ptrVoEHw->GetRecordingDeviceStatus(avail); |
michael@0 | 441 | if (!avail) { |
michael@0 | 442 | return; |
michael@0 | 443 | } |
michael@0 | 444 | #endif // MOZ_B2G |
michael@0 | 445 | |
michael@0 | 446 | // Set "codec" to PCM, 32kHz on 1 channel |
michael@0 | 447 | ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine)); |
michael@0 | 448 | if (!ptrVoECodec) { |
michael@0 | 449 | return; |
michael@0 | 450 | } |
michael@0 | 451 | |
michael@0 | 452 | webrtc::CodecInst codec; |
michael@0 | 453 | strcpy(codec.plname, ENCODING); |
michael@0 | 454 | codec.channels = CHANNELS; |
michael@0 | 455 | codec.rate = SAMPLE_RATE; |
michael@0 | 456 | codec.plfreq = SAMPLE_FREQUENCY; |
michael@0 | 457 | codec.pacsize = SAMPLE_LENGTH; |
michael@0 | 458 | codec.pltype = 0; // Default payload type |
michael@0 | 459 | |
michael@0 | 460 | if (!ptrVoECodec->SetSendCodec(mChannel, codec)) { |
michael@0 | 461 | mInitDone = true; |
michael@0 | 462 | } |
michael@0 | 463 | } |
michael@0 | 464 | |
michael@0 | 465 | void |
michael@0 | 466 | MediaEngineWebRTCAudioSource::Shutdown() |
michael@0 | 467 | { |
michael@0 | 468 | if (!mInitDone) { |
michael@0 | 469 | // duplicate these here in case we failed during Init() |
michael@0 | 470 | if (mChannel != -1) { |
michael@0 | 471 | mVoENetwork->DeRegisterExternalTransport(mChannel); |
michael@0 | 472 | } |
michael@0 | 473 | |
michael@0 | 474 | delete mNullTransport; |
michael@0 | 475 | return; |
michael@0 | 476 | } |
michael@0 | 477 | |
michael@0 | 478 | if (mState == kStarted) { |
michael@0 | 479 | while (!mSources.IsEmpty()) { |
michael@0 | 480 | Stop(mSources[0], kAudioTrack); // XXX change to support multiple tracks |
michael@0 | 481 | } |
michael@0 | 482 | MOZ_ASSERT(mState == kStopped); |
michael@0 | 483 | } |
michael@0 | 484 | |
michael@0 | 485 | if (mState == kAllocated || mState == kStopped) { |
michael@0 | 486 | Deallocate(); |
michael@0 | 487 | } |
michael@0 | 488 | |
michael@0 | 489 | mVoEBase->Terminate(); |
michael@0 | 490 | if (mChannel != -1) { |
michael@0 | 491 | mVoENetwork->DeRegisterExternalTransport(mChannel); |
michael@0 | 492 | } |
michael@0 | 493 | |
michael@0 | 494 | delete mNullTransport; |
michael@0 | 495 | |
michael@0 | 496 | mVoEProcessing = nullptr; |
michael@0 | 497 | mVoENetwork = nullptr; |
michael@0 | 498 | mVoERender = nullptr; |
michael@0 | 499 | mVoEBase = nullptr; |
michael@0 | 500 | |
michael@0 | 501 | mState = kReleased; |
michael@0 | 502 | mInitDone = false; |
michael@0 | 503 | } |
michael@0 | 504 | |
michael@0 | 505 | typedef int16_t sample; |
michael@0 | 506 | |
michael@0 | 507 | void |
michael@0 | 508 | MediaEngineWebRTCAudioSource::Process(int channel, |
michael@0 | 509 | webrtc::ProcessingTypes type, sample* audio10ms, |
michael@0 | 510 | int length, int samplingFreq, bool isStereo) |
michael@0 | 511 | { |
michael@0 | 512 | // On initial capture, throw away all far-end data except the most recent sample |
michael@0 | 513 | // since it's already irrelevant and we want to keep avoid confusing the AEC far-end |
michael@0 | 514 | // input code with "old" audio. |
michael@0 | 515 | if (!mStarted) { |
michael@0 | 516 | mStarted = true; |
michael@0 | 517 | while (gFarendObserver->Size() > 1) { |
michael@0 | 518 | FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 |
michael@0 | 519 | free(buffer); |
michael@0 | 520 | } |
michael@0 | 521 | } |
michael@0 | 522 | |
michael@0 | 523 | while (gFarendObserver->Size() > 0) { |
michael@0 | 524 | FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 |
michael@0 | 525 | if (buffer) { |
michael@0 | 526 | int length = buffer->mSamples; |
michael@0 | 527 | if (mVoERender->ExternalPlayoutData(buffer->mData, |
michael@0 | 528 | gFarendObserver->PlayoutFrequency(), |
michael@0 | 529 | gFarendObserver->PlayoutChannels(), |
michael@0 | 530 | mPlayoutDelay, |
michael@0 | 531 | length) == -1) { |
michael@0 | 532 | return; |
michael@0 | 533 | } |
michael@0 | 534 | } |
michael@0 | 535 | free(buffer); |
michael@0 | 536 | } |
michael@0 | 537 | |
michael@0 | 538 | #ifdef PR_LOGGING |
michael@0 | 539 | mSamples += length; |
michael@0 | 540 | if (mSamples > samplingFreq) { |
michael@0 | 541 | mSamples %= samplingFreq; // just in case mSamples >> samplingFreq |
michael@0 | 542 | if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) { |
michael@0 | 543 | webrtc::EchoStatistics echo; |
michael@0 | 544 | |
michael@0 | 545 | mVoECallReport->GetEchoMetricSummary(echo); |
michael@0 | 546 | #define DUMP_STATVAL(x) (x).min, (x).max, (x).average |
michael@0 | 547 | LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d", |
michael@0 | 548 | DUMP_STATVAL(echo.erl), |
michael@0 | 549 | DUMP_STATVAL(echo.erle), |
michael@0 | 550 | DUMP_STATVAL(echo.rerl), |
michael@0 | 551 | DUMP_STATVAL(echo.a_nlp))); |
michael@0 | 552 | } |
michael@0 | 553 | } |
michael@0 | 554 | #endif |
michael@0 | 555 | |
michael@0 | 556 | MonitorAutoLock lock(mMonitor); |
michael@0 | 557 | if (mState != kStarted) |
michael@0 | 558 | return; |
michael@0 | 559 | |
michael@0 | 560 | uint32_t len = mSources.Length(); |
michael@0 | 561 | for (uint32_t i = 0; i < len; i++) { |
michael@0 | 562 | nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); |
michael@0 | 563 | |
michael@0 | 564 | sample* dest = static_cast<sample*>(buffer->Data()); |
michael@0 | 565 | memcpy(dest, audio10ms, length * sizeof(sample)); |
michael@0 | 566 | |
michael@0 | 567 | AudioSegment segment; |
michael@0 | 568 | nsAutoTArray<const sample*,1> channels; |
michael@0 | 569 | channels.AppendElement(dest); |
michael@0 | 570 | segment.AppendFrames(buffer.forget(), channels, length); |
michael@0 | 571 | TimeStamp insertTime; |
michael@0 | 572 | segment.GetStartTime(insertTime); |
michael@0 | 573 | |
michael@0 | 574 | SourceMediaStream *source = mSources[i]; |
michael@0 | 575 | if (source) { |
michael@0 | 576 | // This is safe from any thread, and is safe if the track is Finished |
michael@0 | 577 | // or Destroyed. |
michael@0 | 578 | // Make sure we include the stream and the track. |
michael@0 | 579 | // The 0:1 is a flag to note when we've done the final insert for a given input block. |
michael@0 | 580 | LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID), |
michael@0 | 581 | (i+1 < len) ? 0 : 1, insertTime); |
michael@0 | 582 | |
michael@0 | 583 | source->AppendToTrack(mTrackID, &segment); |
michael@0 | 584 | } |
michael@0 | 585 | } |
michael@0 | 586 | |
michael@0 | 587 | return; |
michael@0 | 588 | } |
michael@0 | 589 | |
michael@0 | 590 | } |