|
1 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
|
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
4 |
|
5 #include "MediaEngineWebRTC.h" |
|
6 #include <stdio.h> |
|
7 #include <algorithm> |
|
8 #include "mozilla/Assertions.h" |
|
9 #include "MediaTrackConstraints.h" |
|
10 |
|
11 // scoped_ptr.h uses FF |
|
12 #ifdef FF |
|
13 #undef FF |
|
14 #endif |
|
15 #include "webrtc/modules/audio_device/opensl/single_rw_fifo.h" |
|
16 |
|
17 #define CHANNELS 1 |
|
18 #define ENCODING "L16" |
|
19 #define DEFAULT_PORT 5555 |
|
20 |
|
21 #define SAMPLE_RATE 256000 |
|
22 #define SAMPLE_FREQUENCY 16000 |
|
23 #define SAMPLE_LENGTH ((SAMPLE_FREQUENCY*10)/1000) |
|
24 |
|
25 // These are restrictions from the webrtc.org code |
|
26 #define MAX_CHANNELS 2 |
|
27 #define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100 |
|
28 |
|
29 #define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10 |
|
30 static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH"); |
|
31 |
|
32 namespace mozilla { |
|
33 |
|
34 #ifdef LOG |
|
35 #undef LOG |
|
36 #endif |
|
37 |
|
38 #ifdef PR_LOGGING |
|
39 extern PRLogModuleInfo* GetMediaManagerLog(); |
|
40 #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg) |
|
41 #else |
|
42 #define LOG(msg) |
|
43 #endif |
|
44 |
|
45 /** |
|
46 * Webrtc audio source. |
|
47 */ |
|
48 NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource) |
|
49 |
|
50 // XXX temp until MSG supports registration |
|
51 StaticAutoPtr<AudioOutputObserver> gFarendObserver; |
|
52 |
|
53 AudioOutputObserver::AudioOutputObserver() |
|
54 : mPlayoutFreq(0) |
|
55 , mPlayoutChannels(0) |
|
56 , mChunkSize(0) |
|
57 , mSamplesSaved(0) |
|
58 { |
|
59 // Buffers of 10ms chunks |
|
60 mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10); |
|
61 } |
|
62 |
|
63 AudioOutputObserver::~AudioOutputObserver() |
|
64 { |
|
65 } |
|
66 |
|
67 void |
|
68 AudioOutputObserver::Clear() |
|
69 { |
|
70 while (mPlayoutFifo->size() > 0) { |
|
71 (void) mPlayoutFifo->Pop(); |
|
72 } |
|
73 } |
|
74 |
|
75 FarEndAudioChunk * |
|
76 AudioOutputObserver::Pop() |
|
77 { |
|
78 return (FarEndAudioChunk *) mPlayoutFifo->Pop(); |
|
79 } |
|
80 |
|
81 uint32_t |
|
82 AudioOutputObserver::Size() |
|
83 { |
|
84 return mPlayoutFifo->size(); |
|
85 } |
|
86 |
|
87 // static |
|
88 void |
|
89 AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aSamples, bool aOverran, |
|
90 int aFreq, int aChannels, AudioSampleFormat aFormat) |
|
91 { |
|
92 if (mPlayoutChannels != 0) { |
|
93 if (mPlayoutChannels != static_cast<uint32_t>(aChannels)) { |
|
94 MOZ_CRASH(); |
|
95 } |
|
96 } else { |
|
97 MOZ_ASSERT(aChannels <= MAX_CHANNELS); |
|
98 mPlayoutChannels = static_cast<uint32_t>(aChannels); |
|
99 } |
|
100 if (mPlayoutFreq != 0) { |
|
101 if (mPlayoutFreq != static_cast<uint32_t>(aFreq)) { |
|
102 MOZ_CRASH(); |
|
103 } |
|
104 } else { |
|
105 MOZ_ASSERT(aFreq <= MAX_SAMPLING_FREQ); |
|
106 MOZ_ASSERT(!(aFreq % 100), "Sampling rate for far end data should be multiple of 100."); |
|
107 mPlayoutFreq = aFreq; |
|
108 mChunkSize = aFreq/100; // 10ms |
|
109 } |
|
110 |
|
111 #ifdef LOG_FAREND_INSERTION |
|
112 static FILE *fp = fopen("insertfarend.pcm","wb"); |
|
113 #endif |
|
114 |
|
115 if (mSaved) { |
|
116 // flag overrun as soon as possible, and only once |
|
117 mSaved->mOverrun = aOverran; |
|
118 aOverran = false; |
|
119 } |
|
120 // Rechunk to 10ms. |
|
121 // The AnalyzeReverseStream() and WebRtcAec_BufferFarend() functions insist on 10ms |
|
122 // samples per call. Annoying... |
|
123 while (aSamples) { |
|
124 if (!mSaved) { |
|
125 mSaved = (FarEndAudioChunk *) moz_xmalloc(sizeof(FarEndAudioChunk) + |
|
126 (mChunkSize * aChannels - 1)*sizeof(int16_t)); |
|
127 mSaved->mSamples = mChunkSize; |
|
128 mSaved->mOverrun = aOverran; |
|
129 aOverran = false; |
|
130 } |
|
131 uint32_t to_copy = mChunkSize - mSamplesSaved; |
|
132 if (to_copy > aSamples) { |
|
133 to_copy = aSamples; |
|
134 } |
|
135 |
|
136 int16_t *dest = &(mSaved->mData[mSamplesSaved * aChannels]); |
|
137 ConvertAudioSamples(aBuffer, dest, to_copy * aChannels); |
|
138 |
|
139 #ifdef LOG_FAREND_INSERTION |
|
140 if (fp) { |
|
141 fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp); |
|
142 } |
|
143 #endif |
|
144 aSamples -= to_copy; |
|
145 mSamplesSaved += to_copy; |
|
146 aBuffer += to_copy * aChannels; |
|
147 |
|
148 if (mSamplesSaved >= mChunkSize) { |
|
149 int free_slots = mPlayoutFifo->capacity() - mPlayoutFifo->size(); |
|
150 if (free_slots <= 0) { |
|
151 // XXX We should flag an overrun for the reader. We can't drop data from it due to |
|
152 // thread safety issues. |
|
153 break; |
|
154 } else { |
|
155 mPlayoutFifo->Push((int8_t *) mSaved.forget()); // takes ownership |
|
156 mSamplesSaved = 0; |
|
157 } |
|
158 } |
|
159 } |
|
160 } |
|
161 |
|
162 void |
|
163 MediaEngineWebRTCAudioSource::GetName(nsAString& aName) |
|
164 { |
|
165 if (mInitDone) { |
|
166 aName.Assign(mDeviceName); |
|
167 } |
|
168 |
|
169 return; |
|
170 } |
|
171 |
|
172 void |
|
173 MediaEngineWebRTCAudioSource::GetUUID(nsAString& aUUID) |
|
174 { |
|
175 if (mInitDone) { |
|
176 aUUID.Assign(mDeviceUUID); |
|
177 } |
|
178 |
|
179 return; |
|
180 } |
|
181 |
|
182 nsresult |
|
183 MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho, |
|
184 bool aAgcOn, uint32_t aAGC, |
|
185 bool aNoiseOn, uint32_t aNoise, |
|
186 int32_t aPlayoutDelay) |
|
187 { |
|
188 LOG(("Audio config: aec: %d, agc: %d, noise: %d", |
|
189 aEchoOn ? aEcho : -1, |
|
190 aAgcOn ? aAGC : -1, |
|
191 aNoiseOn ? aNoise : -1)); |
|
192 |
|
193 bool update_echo = (mEchoOn != aEchoOn); |
|
194 bool update_agc = (mAgcOn != aAgcOn); |
|
195 bool update_noise = (mNoiseOn != aNoiseOn); |
|
196 mEchoOn = aEchoOn; |
|
197 mAgcOn = aAgcOn; |
|
198 mNoiseOn = aNoiseOn; |
|
199 |
|
200 if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged) { |
|
201 if (mEchoCancel != (webrtc::EcModes) aEcho) { |
|
202 update_echo = true; |
|
203 mEchoCancel = (webrtc::EcModes) aEcho; |
|
204 } |
|
205 } |
|
206 if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) { |
|
207 if (mAGC != (webrtc::AgcModes) aAGC) { |
|
208 update_agc = true; |
|
209 mAGC = (webrtc::AgcModes) aAGC; |
|
210 } |
|
211 } |
|
212 if ((webrtc::NsModes) aNoise != webrtc::kNsUnchanged) { |
|
213 if (mNoiseSuppress != (webrtc::NsModes) aNoise) { |
|
214 update_noise = true; |
|
215 mNoiseSuppress = (webrtc::NsModes) aNoise; |
|
216 } |
|
217 } |
|
218 mPlayoutDelay = aPlayoutDelay; |
|
219 |
|
220 if (mInitDone) { |
|
221 int error; |
|
222 |
|
223 if (update_echo && |
|
224 0 != (error = mVoEProcessing->SetEcStatus(mEchoOn, (webrtc::EcModes) aEcho))) { |
|
225 LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error)); |
|
226 // Overhead of capturing all the time is very low (<0.1% of an audio only call) |
|
227 if (mEchoOn) { |
|
228 if (0 != (error = mVoEProcessing->SetEcMetricsStatus(true))) { |
|
229 LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error)); |
|
230 } |
|
231 } |
|
232 } |
|
233 if (update_agc && |
|
234 0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) { |
|
235 LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error)); |
|
236 } |
|
237 if (update_noise && |
|
238 0 != (error = mVoEProcessing->SetNsStatus(mNoiseOn, (webrtc::NsModes) aNoise))) { |
|
239 LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error)); |
|
240 } |
|
241 } |
|
242 return NS_OK; |
|
243 } |
|
244 |
|
245 nsresult |
|
246 MediaEngineWebRTCAudioSource::Allocate(const AudioTrackConstraintsN &aConstraints, |
|
247 const MediaEnginePrefs &aPrefs) |
|
248 { |
|
249 if (mState == kReleased) { |
|
250 if (mInitDone) { |
|
251 ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine)); |
|
252 if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) { |
|
253 return NS_ERROR_FAILURE; |
|
254 } |
|
255 mState = kAllocated; |
|
256 LOG(("Audio device %d allocated", mCapIndex)); |
|
257 } else { |
|
258 LOG(("Audio device is not initalized")); |
|
259 return NS_ERROR_FAILURE; |
|
260 } |
|
261 } else if (mSources.IsEmpty()) { |
|
262 LOG(("Audio device %d reallocated", mCapIndex)); |
|
263 } else { |
|
264 LOG(("Audio device %d allocated shared", mCapIndex)); |
|
265 } |
|
266 return NS_OK; |
|
267 } |
|
268 |
|
269 nsresult |
|
270 MediaEngineWebRTCAudioSource::Deallocate() |
|
271 { |
|
272 if (mSources.IsEmpty()) { |
|
273 if (mState != kStopped && mState != kAllocated) { |
|
274 return NS_ERROR_FAILURE; |
|
275 } |
|
276 |
|
277 mState = kReleased; |
|
278 LOG(("Audio device %d deallocated", mCapIndex)); |
|
279 } else { |
|
280 LOG(("Audio device %d deallocated but still in use", mCapIndex)); |
|
281 } |
|
282 return NS_OK; |
|
283 } |
|
284 |
|
285 nsresult |
|
286 MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID) |
|
287 { |
|
288 if (!mInitDone || !aStream) { |
|
289 return NS_ERROR_FAILURE; |
|
290 } |
|
291 |
|
292 { |
|
293 MonitorAutoLock lock(mMonitor); |
|
294 mSources.AppendElement(aStream); |
|
295 } |
|
296 |
|
297 AudioSegment* segment = new AudioSegment(); |
|
298 aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment); |
|
299 aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX); |
|
300 // XXX Make this based on the pref. |
|
301 aStream->RegisterForAudioMixing(); |
|
302 LOG(("Start audio for stream %p", aStream)); |
|
303 |
|
304 if (mState == kStarted) { |
|
305 MOZ_ASSERT(aID == mTrackID); |
|
306 return NS_OK; |
|
307 } |
|
308 mState = kStarted; |
|
309 mTrackID = aID; |
|
310 |
|
311 // Make sure logger starts before capture |
|
312 AsyncLatencyLogger::Get(true); |
|
313 |
|
314 // Register output observer |
|
315 // XXX |
|
316 MOZ_ASSERT(gFarendObserver); |
|
317 gFarendObserver->Clear(); |
|
318 |
|
319 // Configure audio processing in webrtc code |
|
320 Config(mEchoOn, webrtc::kEcUnchanged, |
|
321 mAgcOn, webrtc::kAgcUnchanged, |
|
322 mNoiseOn, webrtc::kNsUnchanged, |
|
323 mPlayoutDelay); |
|
324 |
|
325 if (mVoEBase->StartReceive(mChannel)) { |
|
326 return NS_ERROR_FAILURE; |
|
327 } |
|
328 if (mVoEBase->StartSend(mChannel)) { |
|
329 return NS_ERROR_FAILURE; |
|
330 } |
|
331 |
|
332 // Attach external media processor, so this::Process will be called. |
|
333 mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this); |
|
334 |
|
335 return NS_OK; |
|
336 } |
|
337 |
|
338 nsresult |
|
339 MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID) |
|
340 { |
|
341 { |
|
342 MonitorAutoLock lock(mMonitor); |
|
343 |
|
344 if (!mSources.RemoveElement(aSource)) { |
|
345 // Already stopped - this is allowed |
|
346 return NS_OK; |
|
347 } |
|
348 if (!mSources.IsEmpty()) { |
|
349 return NS_OK; |
|
350 } |
|
351 if (mState != kStarted) { |
|
352 return NS_ERROR_FAILURE; |
|
353 } |
|
354 if (!mVoEBase) { |
|
355 return NS_ERROR_FAILURE; |
|
356 } |
|
357 |
|
358 mState = kStopped; |
|
359 aSource->EndTrack(aID); |
|
360 } |
|
361 |
|
362 mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel); |
|
363 |
|
364 if (mVoEBase->StopSend(mChannel)) { |
|
365 return NS_ERROR_FAILURE; |
|
366 } |
|
367 if (mVoEBase->StopReceive(mChannel)) { |
|
368 return NS_ERROR_FAILURE; |
|
369 } |
|
370 return NS_OK; |
|
371 } |
|
372 |
|
373 void |
|
374 MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph, |
|
375 SourceMediaStream *aSource, |
|
376 TrackID aID, |
|
377 StreamTime aDesiredTime, |
|
378 TrackTicks &aLastEndTime) |
|
379 { |
|
380 // Ignore - we push audio data |
|
381 #ifdef DEBUG |
|
382 TrackTicks target = TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime); |
|
383 TrackTicks delta = target - aLastEndTime; |
|
384 LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta)); |
|
385 aLastEndTime = target; |
|
386 #endif |
|
387 } |
|
388 |
|
389 nsresult |
|
390 MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile) |
|
391 { |
|
392 return NS_ERROR_NOT_IMPLEMENTED; |
|
393 } |
|
394 |
|
395 void |
|
396 MediaEngineWebRTCAudioSource::Init() |
|
397 { |
|
398 mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine); |
|
399 |
|
400 mVoEBase->Init(); |
|
401 |
|
402 mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine); |
|
403 if (!mVoERender) { |
|
404 return; |
|
405 } |
|
406 mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine); |
|
407 if (!mVoENetwork) { |
|
408 return; |
|
409 } |
|
410 |
|
411 mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine); |
|
412 if (!mVoEProcessing) { |
|
413 return; |
|
414 } |
|
415 |
|
416 mVoECallReport = webrtc::VoECallReport::GetInterface(mVoiceEngine); |
|
417 if (!mVoECallReport) { |
|
418 return; |
|
419 } |
|
420 |
|
421 mChannel = mVoEBase->CreateChannel(); |
|
422 if (mChannel < 0) { |
|
423 return; |
|
424 } |
|
425 mNullTransport = new NullTransport(); |
|
426 if (mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) { |
|
427 return; |
|
428 } |
|
429 |
|
430 // Check for availability. |
|
431 ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine)); |
|
432 if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) { |
|
433 return; |
|
434 } |
|
435 |
|
436 #ifndef MOZ_B2G |
|
437 // Because of the permission mechanism of B2G, we need to skip the status |
|
438 // check here. |
|
439 bool avail = false; |
|
440 ptrVoEHw->GetRecordingDeviceStatus(avail); |
|
441 if (!avail) { |
|
442 return; |
|
443 } |
|
444 #endif // MOZ_B2G |
|
445 |
|
446 // Set "codec" to PCM, 32kHz on 1 channel |
|
447 ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine)); |
|
448 if (!ptrVoECodec) { |
|
449 return; |
|
450 } |
|
451 |
|
452 webrtc::CodecInst codec; |
|
453 strcpy(codec.plname, ENCODING); |
|
454 codec.channels = CHANNELS; |
|
455 codec.rate = SAMPLE_RATE; |
|
456 codec.plfreq = SAMPLE_FREQUENCY; |
|
457 codec.pacsize = SAMPLE_LENGTH; |
|
458 codec.pltype = 0; // Default payload type |
|
459 |
|
460 if (!ptrVoECodec->SetSendCodec(mChannel, codec)) { |
|
461 mInitDone = true; |
|
462 } |
|
463 } |
|
464 |
|
465 void |
|
466 MediaEngineWebRTCAudioSource::Shutdown() |
|
467 { |
|
468 if (!mInitDone) { |
|
469 // duplicate these here in case we failed during Init() |
|
470 if (mChannel != -1) { |
|
471 mVoENetwork->DeRegisterExternalTransport(mChannel); |
|
472 } |
|
473 |
|
474 delete mNullTransport; |
|
475 return; |
|
476 } |
|
477 |
|
478 if (mState == kStarted) { |
|
479 while (!mSources.IsEmpty()) { |
|
480 Stop(mSources[0], kAudioTrack); // XXX change to support multiple tracks |
|
481 } |
|
482 MOZ_ASSERT(mState == kStopped); |
|
483 } |
|
484 |
|
485 if (mState == kAllocated || mState == kStopped) { |
|
486 Deallocate(); |
|
487 } |
|
488 |
|
489 mVoEBase->Terminate(); |
|
490 if (mChannel != -1) { |
|
491 mVoENetwork->DeRegisterExternalTransport(mChannel); |
|
492 } |
|
493 |
|
494 delete mNullTransport; |
|
495 |
|
496 mVoEProcessing = nullptr; |
|
497 mVoENetwork = nullptr; |
|
498 mVoERender = nullptr; |
|
499 mVoEBase = nullptr; |
|
500 |
|
501 mState = kReleased; |
|
502 mInitDone = false; |
|
503 } |
|
504 |
|
505 typedef int16_t sample; |
|
506 |
|
507 void |
|
508 MediaEngineWebRTCAudioSource::Process(int channel, |
|
509 webrtc::ProcessingTypes type, sample* audio10ms, |
|
510 int length, int samplingFreq, bool isStereo) |
|
511 { |
|
512 // On initial capture, throw away all far-end data except the most recent sample |
|
513 // since it's already irrelevant and we want to keep avoid confusing the AEC far-end |
|
514 // input code with "old" audio. |
|
515 if (!mStarted) { |
|
516 mStarted = true; |
|
517 while (gFarendObserver->Size() > 1) { |
|
518 FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 |
|
519 free(buffer); |
|
520 } |
|
521 } |
|
522 |
|
523 while (gFarendObserver->Size() > 0) { |
|
524 FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 |
|
525 if (buffer) { |
|
526 int length = buffer->mSamples; |
|
527 if (mVoERender->ExternalPlayoutData(buffer->mData, |
|
528 gFarendObserver->PlayoutFrequency(), |
|
529 gFarendObserver->PlayoutChannels(), |
|
530 mPlayoutDelay, |
|
531 length) == -1) { |
|
532 return; |
|
533 } |
|
534 } |
|
535 free(buffer); |
|
536 } |
|
537 |
|
538 #ifdef PR_LOGGING |
|
539 mSamples += length; |
|
540 if (mSamples > samplingFreq) { |
|
541 mSamples %= samplingFreq; // just in case mSamples >> samplingFreq |
|
542 if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) { |
|
543 webrtc::EchoStatistics echo; |
|
544 |
|
545 mVoECallReport->GetEchoMetricSummary(echo); |
|
546 #define DUMP_STATVAL(x) (x).min, (x).max, (x).average |
|
547 LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d", |
|
548 DUMP_STATVAL(echo.erl), |
|
549 DUMP_STATVAL(echo.erle), |
|
550 DUMP_STATVAL(echo.rerl), |
|
551 DUMP_STATVAL(echo.a_nlp))); |
|
552 } |
|
553 } |
|
554 #endif |
|
555 |
|
556 MonitorAutoLock lock(mMonitor); |
|
557 if (mState != kStarted) |
|
558 return; |
|
559 |
|
560 uint32_t len = mSources.Length(); |
|
561 for (uint32_t i = 0; i < len; i++) { |
|
562 nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); |
|
563 |
|
564 sample* dest = static_cast<sample*>(buffer->Data()); |
|
565 memcpy(dest, audio10ms, length * sizeof(sample)); |
|
566 |
|
567 AudioSegment segment; |
|
568 nsAutoTArray<const sample*,1> channels; |
|
569 channels.AppendElement(dest); |
|
570 segment.AppendFrames(buffer.forget(), channels, length); |
|
571 TimeStamp insertTime; |
|
572 segment.GetStartTime(insertTime); |
|
573 |
|
574 SourceMediaStream *source = mSources[i]; |
|
575 if (source) { |
|
576 // This is safe from any thread, and is safe if the track is Finished |
|
577 // or Destroyed. |
|
578 // Make sure we include the stream and the track. |
|
579 // The 0:1 is a flag to note when we've done the final insert for a given input block. |
|
580 LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID), |
|
581 (i+1 < len) ? 0 : 1, insertTime); |
|
582 |
|
583 source->AppendToTrack(mTrackID, &segment); |
|
584 } |
|
585 } |
|
586 |
|
587 return; |
|
588 } |
|
589 |
|
590 } |