content/media/webrtc/MediaEngineWebRTCAudio.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,590 @@
     1.4 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.5 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.6 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.7 +
     1.8 +#include "MediaEngineWebRTC.h"
     1.9 +#include <stdio.h>
    1.10 +#include <algorithm>
    1.11 +#include "mozilla/Assertions.h"
    1.12 +#include "MediaTrackConstraints.h"
    1.13 +
    1.14 +// scoped_ptr.h uses FF
    1.15 +#ifdef FF
    1.16 +#undef FF
    1.17 +#endif
    1.18 +#include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
    1.19 +
    1.20 +#define CHANNELS 1
    1.21 +#define ENCODING "L16"
    1.22 +#define DEFAULT_PORT 5555
    1.23 +
    1.24 +#define SAMPLE_RATE 256000
    1.25 +#define SAMPLE_FREQUENCY 16000
    1.26 +#define SAMPLE_LENGTH ((SAMPLE_FREQUENCY*10)/1000)
    1.27 +
    1.28 +// These are restrictions from the webrtc.org code
    1.29 +#define MAX_CHANNELS 2
    1.30 +#define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100
    1.31 +
    1.32 +#define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10
    1.33 +static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH");
    1.34 +
    1.35 +namespace mozilla {
    1.36 +
    1.37 +#ifdef LOG
    1.38 +#undef LOG
    1.39 +#endif
    1.40 +
    1.41 +#ifdef PR_LOGGING
    1.42 +extern PRLogModuleInfo* GetMediaManagerLog();
    1.43 +#define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
    1.44 +#else
    1.45 +#define LOG(msg)
    1.46 +#endif
    1.47 +
    1.48 +/**
    1.49 + * Webrtc audio source.
    1.50 + */
    1.51 +NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
    1.52 +
    1.53 +// XXX temp until MSG supports registration
    1.54 +StaticAutoPtr<AudioOutputObserver> gFarendObserver;
    1.55 +
    1.56 +AudioOutputObserver::AudioOutputObserver()
    1.57 +  : mPlayoutFreq(0)
    1.58 +  , mPlayoutChannels(0)
    1.59 +  , mChunkSize(0)
    1.60 +  , mSamplesSaved(0)
    1.61 +{
    1.62 +  // Buffers of 10ms chunks
    1.63 +  mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10);
    1.64 +}
    1.65 +
    1.66 +AudioOutputObserver::~AudioOutputObserver()
    1.67 +{
    1.68 +}
    1.69 +
    1.70 +void
    1.71 +AudioOutputObserver::Clear()
    1.72 +{
    1.73 +  while (mPlayoutFifo->size() > 0) {
    1.74 +    (void) mPlayoutFifo->Pop();
    1.75 +  }
    1.76 +}
    1.77 +
    1.78 +FarEndAudioChunk *
    1.79 +AudioOutputObserver::Pop()
    1.80 +{
    1.81 +  return (FarEndAudioChunk *) mPlayoutFifo->Pop();
    1.82 +}
    1.83 +
    1.84 +uint32_t
    1.85 +AudioOutputObserver::Size()
    1.86 +{
    1.87 +  return mPlayoutFifo->size();
    1.88 +}
    1.89 +
    1.90 +// static
    1.91 +void
    1.92 +AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aSamples, bool aOverran,
    1.93 +                                  int aFreq, int aChannels, AudioSampleFormat aFormat)
    1.94 +{
    1.95 +  if (mPlayoutChannels != 0) {
    1.96 +    if (mPlayoutChannels != static_cast<uint32_t>(aChannels)) {
    1.97 +      MOZ_CRASH();
    1.98 +    }
    1.99 +  } else {
   1.100 +    MOZ_ASSERT(aChannels <= MAX_CHANNELS);
   1.101 +    mPlayoutChannels = static_cast<uint32_t>(aChannels);
   1.102 +  }
   1.103 +  if (mPlayoutFreq != 0) {
   1.104 +    if (mPlayoutFreq != static_cast<uint32_t>(aFreq)) {
   1.105 +      MOZ_CRASH();
   1.106 +    }
   1.107 +  } else {
   1.108 +    MOZ_ASSERT(aFreq <= MAX_SAMPLING_FREQ);
   1.109 +    MOZ_ASSERT(!(aFreq % 100), "Sampling rate for far end data should be multiple of 100.");
   1.110 +    mPlayoutFreq = aFreq;
   1.111 +    mChunkSize = aFreq/100; // 10ms
   1.112 +  }
   1.113 +
   1.114 +#ifdef LOG_FAREND_INSERTION
   1.115 +  static FILE *fp = fopen("insertfarend.pcm","wb");
   1.116 +#endif
   1.117 +
   1.118 +  if (mSaved) {
   1.119 +    // flag overrun as soon as possible, and only once
   1.120 +    mSaved->mOverrun = aOverran;
   1.121 +    aOverran = false;
   1.122 +  }
   1.123 +  // Rechunk to 10ms.
   1.124 +  // The AnalyzeReverseStream() and WebRtcAec_BufferFarend() functions insist on 10ms
   1.125 +  // samples per call.  Annoying...
   1.126 +  while (aSamples) {
   1.127 +    if (!mSaved) {
   1.128 +      mSaved = (FarEndAudioChunk *) moz_xmalloc(sizeof(FarEndAudioChunk) +
   1.129 +                                                (mChunkSize * aChannels - 1)*sizeof(int16_t));
   1.130 +      mSaved->mSamples = mChunkSize;
   1.131 +      mSaved->mOverrun = aOverran;
   1.132 +      aOverran = false;
   1.133 +    }
   1.134 +    uint32_t to_copy = mChunkSize - mSamplesSaved;
   1.135 +    if (to_copy > aSamples) {
   1.136 +      to_copy = aSamples;
   1.137 +    }
   1.138 +
   1.139 +    int16_t *dest = &(mSaved->mData[mSamplesSaved * aChannels]);
   1.140 +    ConvertAudioSamples(aBuffer, dest, to_copy * aChannels);
   1.141 +
   1.142 +#ifdef LOG_FAREND_INSERTION
   1.143 +    if (fp) {
   1.144 +      fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp);
   1.145 +    }
   1.146 +#endif
   1.147 +    aSamples -= to_copy;
   1.148 +    mSamplesSaved += to_copy;
   1.149 +    aBuffer += to_copy * aChannels;
   1.150 +
   1.151 +    if (mSamplesSaved >= mChunkSize) {
   1.152 +      int free_slots = mPlayoutFifo->capacity() - mPlayoutFifo->size();
   1.153 +      if (free_slots <= 0) {
   1.154 +        // XXX We should flag an overrun for the reader.  We can't drop data from it due to
   1.155 +        // thread safety issues.
   1.156 +        break;
   1.157 +      } else {
   1.158 +        mPlayoutFifo->Push((int8_t *) mSaved.forget()); // takes ownership
   1.159 +        mSamplesSaved = 0;
   1.160 +      }
   1.161 +    }
   1.162 +  }
   1.163 +}
   1.164 +
   1.165 +void
   1.166 +MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
   1.167 +{
   1.168 +  if (mInitDone) {
   1.169 +    aName.Assign(mDeviceName);
   1.170 +  }
   1.171 +
   1.172 +  return;
   1.173 +}
   1.174 +
   1.175 +void
   1.176 +MediaEngineWebRTCAudioSource::GetUUID(nsAString& aUUID)
   1.177 +{
   1.178 +  if (mInitDone) {
   1.179 +    aUUID.Assign(mDeviceUUID);
   1.180 +  }
   1.181 +
   1.182 +  return;
   1.183 +}
   1.184 +
   1.185 +nsresult
   1.186 +MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
   1.187 +                                     bool aAgcOn, uint32_t aAGC,
   1.188 +                                     bool aNoiseOn, uint32_t aNoise,
   1.189 +                                     int32_t aPlayoutDelay)
   1.190 +{
   1.191 +  LOG(("Audio config: aec: %d, agc: %d, noise: %d",
   1.192 +       aEchoOn ? aEcho : -1,
   1.193 +       aAgcOn ? aAGC : -1,
   1.194 +       aNoiseOn ? aNoise : -1));
   1.195 +
   1.196 +  bool update_echo = (mEchoOn != aEchoOn);
   1.197 +  bool update_agc = (mAgcOn != aAgcOn);
   1.198 +  bool update_noise = (mNoiseOn != aNoiseOn);
   1.199 +  mEchoOn = aEchoOn;
   1.200 +  mAgcOn = aAgcOn;
   1.201 +  mNoiseOn = aNoiseOn;
   1.202 +
   1.203 +  if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged) {
   1.204 +    if (mEchoCancel != (webrtc::EcModes) aEcho) {
   1.205 +      update_echo = true;
   1.206 +      mEchoCancel = (webrtc::EcModes) aEcho;
   1.207 +    }
   1.208 +  }
   1.209 +  if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) {
   1.210 +    if (mAGC != (webrtc::AgcModes) aAGC) {
   1.211 +      update_agc = true;
   1.212 +      mAGC = (webrtc::AgcModes) aAGC;
   1.213 +    }
   1.214 +  }
   1.215 +  if ((webrtc::NsModes) aNoise != webrtc::kNsUnchanged) {
   1.216 +    if (mNoiseSuppress != (webrtc::NsModes) aNoise) {
   1.217 +      update_noise = true;
   1.218 +      mNoiseSuppress = (webrtc::NsModes) aNoise;
   1.219 +    }
   1.220 +  }
   1.221 +  mPlayoutDelay = aPlayoutDelay;
   1.222 +
   1.223 +  if (mInitDone) {
   1.224 +    int error;
   1.225 +
   1.226 +    if (update_echo &&
   1.227 +      0 != (error = mVoEProcessing->SetEcStatus(mEchoOn, (webrtc::EcModes) aEcho))) {
   1.228 +      LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error));
   1.229 +      // Overhead of capturing all the time is very low (<0.1% of an audio only call)
   1.230 +      if (mEchoOn) {
   1.231 +        if (0 != (error = mVoEProcessing->SetEcMetricsStatus(true))) {
   1.232 +          LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error));
   1.233 +        }
   1.234 +      }
   1.235 +    }
   1.236 +    if (update_agc &&
   1.237 +      0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) {
   1.238 +      LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error));
   1.239 +    }
   1.240 +    if (update_noise &&
   1.241 +      0 != (error = mVoEProcessing->SetNsStatus(mNoiseOn, (webrtc::NsModes) aNoise))) {
   1.242 +      LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error));
   1.243 +    }
   1.244 +  }
   1.245 +  return NS_OK;
   1.246 +}
   1.247 +
   1.248 +nsresult
   1.249 +MediaEngineWebRTCAudioSource::Allocate(const AudioTrackConstraintsN &aConstraints,
   1.250 +                                       const MediaEnginePrefs &aPrefs)
   1.251 +{
   1.252 +  if (mState == kReleased) {
   1.253 +    if (mInitDone) {
   1.254 +      ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
   1.255 +      if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
   1.256 +        return NS_ERROR_FAILURE;
   1.257 +      }
   1.258 +      mState = kAllocated;
   1.259 +      LOG(("Audio device %d allocated", mCapIndex));
   1.260 +    } else {
   1.261 +      LOG(("Audio device is not initalized"));
   1.262 +      return NS_ERROR_FAILURE;
   1.263 +    }
   1.264 +  } else if (mSources.IsEmpty()) {
   1.265 +    LOG(("Audio device %d reallocated", mCapIndex));
   1.266 +  } else {
   1.267 +    LOG(("Audio device %d allocated shared", mCapIndex));
   1.268 +  }
   1.269 +  return NS_OK;
   1.270 +}
   1.271 +
   1.272 +nsresult
   1.273 +MediaEngineWebRTCAudioSource::Deallocate()
   1.274 +{
   1.275 +  if (mSources.IsEmpty()) {
   1.276 +    if (mState != kStopped && mState != kAllocated) {
   1.277 +      return NS_ERROR_FAILURE;
   1.278 +    }
   1.279 +
   1.280 +    mState = kReleased;
   1.281 +    LOG(("Audio device %d deallocated", mCapIndex));
   1.282 +  } else {
   1.283 +    LOG(("Audio device %d deallocated but still in use", mCapIndex));
   1.284 +  }
   1.285 +  return NS_OK;
   1.286 +}
   1.287 +
   1.288 +nsresult
   1.289 +MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
   1.290 +{
   1.291 +  if (!mInitDone || !aStream) {
   1.292 +    return NS_ERROR_FAILURE;
   1.293 +  }
   1.294 +
   1.295 +  {
   1.296 +    MonitorAutoLock lock(mMonitor);
   1.297 +    mSources.AppendElement(aStream);
   1.298 +  }
   1.299 +
   1.300 +  AudioSegment* segment = new AudioSegment();
   1.301 +  aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment);
   1.302 +  aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
   1.303 +  // XXX Make this based on the pref.
   1.304 +  aStream->RegisterForAudioMixing();
   1.305 +  LOG(("Start audio for stream %p", aStream));
   1.306 +
   1.307 +  if (mState == kStarted) {
   1.308 +    MOZ_ASSERT(aID == mTrackID);
   1.309 +    return NS_OK;
   1.310 +  }
   1.311 +  mState = kStarted;
   1.312 +  mTrackID = aID;
   1.313 +
   1.314 +  // Make sure logger starts before capture
   1.315 +  AsyncLatencyLogger::Get(true);
   1.316 +
   1.317 +  // Register output observer
   1.318 +  // XXX
   1.319 +  MOZ_ASSERT(gFarendObserver);
   1.320 +  gFarendObserver->Clear();
   1.321 +
   1.322 +  // Configure audio processing in webrtc code
   1.323 +  Config(mEchoOn, webrtc::kEcUnchanged,
   1.324 +         mAgcOn, webrtc::kAgcUnchanged,
   1.325 +         mNoiseOn, webrtc::kNsUnchanged,
   1.326 +         mPlayoutDelay);
   1.327 +
   1.328 +  if (mVoEBase->StartReceive(mChannel)) {
   1.329 +    return NS_ERROR_FAILURE;
   1.330 +  }
   1.331 +  if (mVoEBase->StartSend(mChannel)) {
   1.332 +    return NS_ERROR_FAILURE;
   1.333 +  }
   1.334 +
   1.335 +  // Attach external media processor, so this::Process will be called.
   1.336 +  mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
   1.337 +
   1.338 +  return NS_OK;
   1.339 +}
   1.340 +
   1.341 +nsresult
   1.342 +MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
   1.343 +{
   1.344 +  {
   1.345 +    MonitorAutoLock lock(mMonitor);
   1.346 +
   1.347 +    if (!mSources.RemoveElement(aSource)) {
   1.348 +      // Already stopped - this is allowed
   1.349 +      return NS_OK;
   1.350 +    }
   1.351 +    if (!mSources.IsEmpty()) {
   1.352 +      return NS_OK;
   1.353 +    }
   1.354 +    if (mState != kStarted) {
   1.355 +      return NS_ERROR_FAILURE;
   1.356 +    }
   1.357 +    if (!mVoEBase) {
   1.358 +      return NS_ERROR_FAILURE;
   1.359 +    }
   1.360 +
   1.361 +    mState = kStopped;
   1.362 +    aSource->EndTrack(aID);
   1.363 +  }
   1.364 +
   1.365 +  mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
   1.366 +
   1.367 +  if (mVoEBase->StopSend(mChannel)) {
   1.368 +    return NS_ERROR_FAILURE;
   1.369 +  }
   1.370 +  if (mVoEBase->StopReceive(mChannel)) {
   1.371 +    return NS_ERROR_FAILURE;
   1.372 +  }
   1.373 +  return NS_OK;
   1.374 +}
   1.375 +
   1.376 +void
   1.377 +MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
   1.378 +                                         SourceMediaStream *aSource,
   1.379 +                                         TrackID aID,
   1.380 +                                         StreamTime aDesiredTime,
   1.381 +                                         TrackTicks &aLastEndTime)
   1.382 +{
   1.383 +  // Ignore - we push audio data
   1.384 +#ifdef DEBUG
   1.385 +  TrackTicks target = TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime);
   1.386 +  TrackTicks delta = target - aLastEndTime;
   1.387 +  LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta));
   1.388 +  aLastEndTime = target;
   1.389 +#endif
   1.390 +}
   1.391 +
   1.392 +nsresult
   1.393 +MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
   1.394 +{
   1.395 +   return NS_ERROR_NOT_IMPLEMENTED;
   1.396 +}
   1.397 +
   1.398 +void
   1.399 +MediaEngineWebRTCAudioSource::Init()
   1.400 +{
   1.401 +  mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
   1.402 +
   1.403 +  mVoEBase->Init();
   1.404 +
   1.405 +  mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
   1.406 +  if (!mVoERender) {
   1.407 +    return;
   1.408 +  }
   1.409 +  mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine);
   1.410 +  if (!mVoENetwork) {
   1.411 +    return;
   1.412 +  }
   1.413 +
   1.414 +  mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine);
   1.415 +  if (!mVoEProcessing) {
   1.416 +    return;
   1.417 +  }
   1.418 +
   1.419 +  mVoECallReport = webrtc::VoECallReport::GetInterface(mVoiceEngine);
   1.420 +  if (!mVoECallReport) {
   1.421 +    return;
   1.422 +  }
   1.423 +
   1.424 +  mChannel = mVoEBase->CreateChannel();
   1.425 +  if (mChannel < 0) {
   1.426 +    return;
   1.427 +  }
   1.428 +  mNullTransport = new NullTransport();
   1.429 +  if (mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) {
   1.430 +    return;
   1.431 +  }
   1.432 +
   1.433 +  // Check for availability.
   1.434 +  ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
   1.435 +  if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
   1.436 +    return;
   1.437 +  }
   1.438 +
   1.439 +#ifndef MOZ_B2G
   1.440 +  // Because of the permission mechanism of B2G, we need to skip the status
   1.441 +  // check here.
   1.442 +  bool avail = false;
   1.443 +  ptrVoEHw->GetRecordingDeviceStatus(avail);
   1.444 +  if (!avail) {
   1.445 +    return;
   1.446 +  }
   1.447 +#endif // MOZ_B2G
   1.448 +
   1.449 +  // Set "codec" to PCM, 32kHz on 1 channel
   1.450 +  ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
   1.451 +  if (!ptrVoECodec) {
   1.452 +    return;
   1.453 +  }
   1.454 +
   1.455 +  webrtc::CodecInst codec;
   1.456 +  strcpy(codec.plname, ENCODING);
   1.457 +  codec.channels = CHANNELS;
   1.458 +  codec.rate = SAMPLE_RATE;
   1.459 +  codec.plfreq = SAMPLE_FREQUENCY;
   1.460 +  codec.pacsize = SAMPLE_LENGTH;
   1.461 +  codec.pltype = 0; // Default payload type
   1.462 +
   1.463 +  if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
   1.464 +    mInitDone = true;
   1.465 +  }
   1.466 +}
   1.467 +
   1.468 +void
   1.469 +MediaEngineWebRTCAudioSource::Shutdown()
   1.470 +{
   1.471 +  if (!mInitDone) {
   1.472 +    // duplicate these here in case we failed during Init()
   1.473 +    if (mChannel != -1) {
   1.474 +      mVoENetwork->DeRegisterExternalTransport(mChannel);
   1.475 +    }
   1.476 +
   1.477 +    delete mNullTransport;
   1.478 +    return;
   1.479 +  }
   1.480 +
   1.481 +  if (mState == kStarted) {
   1.482 +    while (!mSources.IsEmpty()) {
   1.483 +      Stop(mSources[0], kAudioTrack); // XXX change to support multiple tracks
   1.484 +    }
   1.485 +    MOZ_ASSERT(mState == kStopped);
   1.486 +  }
   1.487 +
   1.488 +  if (mState == kAllocated || mState == kStopped) {
   1.489 +    Deallocate();
   1.490 +  }
   1.491 +
   1.492 +  mVoEBase->Terminate();
   1.493 +  if (mChannel != -1) {
   1.494 +    mVoENetwork->DeRegisterExternalTransport(mChannel);
   1.495 +  }
   1.496 +
   1.497 +  delete mNullTransport;
   1.498 +
   1.499 +  mVoEProcessing = nullptr;
   1.500 +  mVoENetwork = nullptr;
   1.501 +  mVoERender = nullptr;
   1.502 +  mVoEBase = nullptr;
   1.503 +
   1.504 +  mState = kReleased;
   1.505 +  mInitDone = false;
   1.506 +}
   1.507 +
   1.508 +typedef int16_t sample;
   1.509 +
   1.510 +void
   1.511 +MediaEngineWebRTCAudioSource::Process(int channel,
   1.512 +  webrtc::ProcessingTypes type, sample* audio10ms,
   1.513 +  int length, int samplingFreq, bool isStereo)
   1.514 +{
   1.515 +  // On initial capture, throw away all far-end data except the most recent sample
   1.516 +  // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
   1.517 +  // input code with "old" audio.
   1.518 +  if (!mStarted) {
   1.519 +    mStarted  = true;
   1.520 +    while (gFarendObserver->Size() > 1) {
   1.521 +      FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
   1.522 +      free(buffer);
   1.523 +    }
   1.524 +  }
   1.525 +
   1.526 +  while (gFarendObserver->Size() > 0) {
   1.527 +    FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
   1.528 +    if (buffer) {
   1.529 +      int length = buffer->mSamples;
   1.530 +      if (mVoERender->ExternalPlayoutData(buffer->mData,
   1.531 +                                          gFarendObserver->PlayoutFrequency(),
   1.532 +                                          gFarendObserver->PlayoutChannels(),
   1.533 +                                          mPlayoutDelay,
   1.534 +                                          length) == -1) {
   1.535 +        return;
   1.536 +      }
   1.537 +    }
   1.538 +    free(buffer);
   1.539 +  }
   1.540 +
   1.541 +#ifdef PR_LOGGING
   1.542 +  mSamples += length;
   1.543 +  if (mSamples > samplingFreq) {
   1.544 +    mSamples %= samplingFreq; // just in case mSamples >> samplingFreq
   1.545 +    if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) {
   1.546 +      webrtc::EchoStatistics echo;
   1.547 +
   1.548 +      mVoECallReport->GetEchoMetricSummary(echo);
   1.549 +#define DUMP_STATVAL(x) (x).min, (x).max, (x).average
   1.550 +      LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d",
   1.551 +           DUMP_STATVAL(echo.erl),
   1.552 +           DUMP_STATVAL(echo.erle),
   1.553 +           DUMP_STATVAL(echo.rerl),
   1.554 +           DUMP_STATVAL(echo.a_nlp)));
   1.555 +    }
   1.556 +  }
   1.557 +#endif
   1.558 +
   1.559 +  MonitorAutoLock lock(mMonitor);
   1.560 +  if (mState != kStarted)
   1.561 +    return;
   1.562 +
   1.563 +  uint32_t len = mSources.Length();
   1.564 +  for (uint32_t i = 0; i < len; i++) {
   1.565 +    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
   1.566 +
   1.567 +    sample* dest = static_cast<sample*>(buffer->Data());
   1.568 +    memcpy(dest, audio10ms, length * sizeof(sample));
   1.569 +
   1.570 +    AudioSegment segment;
   1.571 +    nsAutoTArray<const sample*,1> channels;
   1.572 +    channels.AppendElement(dest);
   1.573 +    segment.AppendFrames(buffer.forget(), channels, length);
   1.574 +    TimeStamp insertTime;
   1.575 +    segment.GetStartTime(insertTime);
   1.576 +
   1.577 +    SourceMediaStream *source = mSources[i];
   1.578 +    if (source) {
   1.579 +      // This is safe from any thread, and is safe if the track is Finished
   1.580 +      // or Destroyed.
   1.581 +      // Make sure we include the stream and the track.
   1.582 +      // The 0:1 is a flag to note when we've done the final insert for a given input block.
   1.583 +      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
   1.584 +              (i+1 < len) ? 0 : 1, insertTime);
   1.585 +
   1.586 +      source->AppendToTrack(mTrackID, &segment);
   1.587 +    }
   1.588 +  }
   1.589 +
   1.590 +  return;
   1.591 +}
   1.592 +
   1.593 +}

mercurial