|
1 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
|
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
4 |
|
5 #include "CSFLog.h" |
|
6 #include "nspr.h" |
|
7 |
|
8 #ifdef HAVE_NETINET_IN_H |
|
9 #include <netinet/in.h> |
|
10 #elif defined XP_WIN |
|
11 #include <winsock2.h> |
|
12 #endif |
|
13 |
|
14 #include "AudioConduit.h" |
|
15 #include "nsCOMPtr.h" |
|
16 #include "mozilla/Services.h" |
|
17 #include "nsServiceManagerUtils.h" |
|
18 #include "nsIPrefService.h" |
|
19 #include "nsIPrefBranch.h" |
|
20 #include "nsThreadUtils.h" |
|
21 #ifdef MOZILLA_INTERNAL_API |
|
22 #include "Latency.h" |
|
23 #include "mozilla/Telemetry.h" |
|
24 #endif |
|
25 |
|
26 #include "webrtc/voice_engine/include/voe_errors.h" |
|
27 #include "webrtc/system_wrappers/interface/clock.h" |
|
28 |
|
29 #ifdef MOZ_WIDGET_ANDROID |
|
30 #include "AndroidJNIWrapper.h" |
|
31 #endif |
|
32 |
|
33 namespace mozilla { |
|
34 |
|
35 static const char* logTag ="WebrtcAudioSessionConduit"; |
|
36 |
|
37 // 32 bytes is what WebRTC CodecInst expects |
|
38 const unsigned int WebrtcAudioConduit::CODEC_PLNAME_SIZE = 32; |
|
39 |
|
40 /** |
|
41 * Factory Method for AudioConduit |
|
42 */ |
|
43 mozilla::RefPtr<AudioSessionConduit> AudioSessionConduit::Create(AudioSessionConduit *aOther) |
|
44 { |
|
45 CSFLogDebug(logTag, "%s ", __FUNCTION__); |
|
46 #ifdef MOZILLA_INTERNAL_API |
|
47 // unit tests create their own "main thread" |
|
48 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
|
49 #endif |
|
50 |
|
51 WebrtcAudioConduit* obj = new WebrtcAudioConduit(); |
|
52 if(obj->Init(static_cast<WebrtcAudioConduit*>(aOther)) != kMediaConduitNoError) |
|
53 { |
|
54 CSFLogError(logTag, "%s AudioConduit Init Failed ", __FUNCTION__); |
|
55 delete obj; |
|
56 return nullptr; |
|
57 } |
|
58 CSFLogDebug(logTag, "%s Successfully created AudioConduit ", __FUNCTION__); |
|
59 return obj; |
|
60 } |
|
61 |
|
62 /** |
|
63 * Destruction defines for our super-classes |
|
64 */ |
|
65 WebrtcAudioConduit::~WebrtcAudioConduit() |
|
66 { |
|
67 #ifdef MOZILLA_INTERNAL_API |
|
68 // unit tests create their own "main thread" |
|
69 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
|
70 #endif |
|
71 |
|
72 CSFLogDebug(logTag, "%s ", __FUNCTION__); |
|
73 for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) |
|
74 { |
|
75 delete mRecvCodecList[i]; |
|
76 } |
|
77 delete mCurSendCodecConfig; |
|
78 |
|
79 // The first one of a pair to be deleted shuts down media for both |
|
80 if(mPtrVoEXmedia) |
|
81 { |
|
82 if (!mShutDown) { |
|
83 mPtrVoEXmedia->SetExternalRecordingStatus(false); |
|
84 mPtrVoEXmedia->SetExternalPlayoutStatus(false); |
|
85 } |
|
86 } |
|
87 |
|
88 //Deal with the transport |
|
89 if(mPtrVoENetwork) |
|
90 { |
|
91 if (!mShutDown) { |
|
92 mPtrVoENetwork->DeRegisterExternalTransport(mChannel); |
|
93 } |
|
94 } |
|
95 |
|
96 if(mPtrVoEBase) |
|
97 { |
|
98 if (!mShutDown) { |
|
99 mPtrVoEBase->StopPlayout(mChannel); |
|
100 mPtrVoEBase->StopSend(mChannel); |
|
101 mPtrVoEBase->StopReceive(mChannel); |
|
102 mPtrVoEBase->DeleteChannel(mChannel); |
|
103 mPtrVoEBase->Terminate(); |
|
104 } |
|
105 } |
|
106 |
|
107 if (mOtherDirection) |
|
108 { |
|
109 // mOtherDirection owns these now! |
|
110 mOtherDirection->mOtherDirection = nullptr; |
|
111 // let other side we terminated the channel |
|
112 mOtherDirection->mShutDown = true; |
|
113 mVoiceEngine = nullptr; |
|
114 } else { |
|
115 // We shouldn't delete the VoiceEngine until all these are released! |
|
116 // And we can't use a Scoped ptr, since the order is arbitrary |
|
117 mPtrVoENetwork = nullptr; |
|
118 mPtrVoEBase = nullptr; |
|
119 mPtrVoECodec = nullptr; |
|
120 mPtrVoEXmedia = nullptr; |
|
121 mPtrVoEProcessing = nullptr; |
|
122 mPtrVoEVideoSync = nullptr; |
|
123 mPtrVoERTP_RTCP = nullptr; |
|
124 mPtrRTP = nullptr; |
|
125 |
|
126 // only one opener can call Delete. Have it be the last to close. |
|
127 if(mVoiceEngine) |
|
128 { |
|
129 webrtc::VoiceEngine::Delete(mVoiceEngine); |
|
130 } |
|
131 } |
|
132 } |
|
133 |
|
134 bool WebrtcAudioConduit::GetLocalSSRC(unsigned int* ssrc) { |
|
135 return !mPtrRTP->GetLocalSSRC(mChannel, *ssrc); |
|
136 } |
|
137 |
|
138 bool WebrtcAudioConduit::GetRemoteSSRC(unsigned int* ssrc) { |
|
139 return !mPtrRTP->GetRemoteSSRC(mChannel, *ssrc); |
|
140 } |
|
141 |
|
142 bool WebrtcAudioConduit::GetAVStats(int32_t* jitterBufferDelayMs, |
|
143 int32_t* playoutBufferDelayMs, |
|
144 int32_t* avSyncOffsetMs) { |
|
145 return !mPtrVoEVideoSync->GetDelayEstimate(mChannel, |
|
146 jitterBufferDelayMs, |
|
147 playoutBufferDelayMs, |
|
148 avSyncOffsetMs); |
|
149 } |
|
150 |
|
151 bool WebrtcAudioConduit::GetRTPStats(unsigned int* jitterMs, |
|
152 unsigned int* cumulativeLost) { |
|
153 unsigned int maxJitterMs = 0; |
|
154 unsigned int discardedPackets; |
|
155 *jitterMs = 0; |
|
156 *cumulativeLost = 0; |
|
157 return !mPtrRTP->GetRTPStatistics(mChannel, *jitterMs, maxJitterMs, |
|
158 discardedPackets, *cumulativeLost); |
|
159 } |
|
160 |
|
161 DOMHighResTimeStamp |
|
162 NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow) { |
|
163 return (uint32_t(ntpHigh - webrtc::kNtpJan1970) + |
|
164 double(ntpLow) / webrtc::kMagicNtpFractionalUnit) * 1000; |
|
165 } |
|
166 |
|
167 bool WebrtcAudioConduit::GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp, |
|
168 uint32_t* jitterMs, |
|
169 uint32_t* packetsReceived, |
|
170 uint64_t* bytesReceived, |
|
171 uint32_t* cumulativeLost, |
|
172 int32_t* rttMs) { |
|
173 uint32_t ntpHigh, ntpLow; |
|
174 uint16_t fractionLost; |
|
175 bool result = !mPtrRTP->GetRemoteRTCPReceiverInfo(mChannel, ntpHigh, ntpLow, |
|
176 *packetsReceived, |
|
177 *bytesReceived, |
|
178 *jitterMs, |
|
179 fractionLost, |
|
180 *cumulativeLost, |
|
181 *rttMs); |
|
182 if (result) { |
|
183 *timestamp = NTPtoDOMHighResTimeStamp(ntpHigh, ntpLow); |
|
184 } |
|
185 return result; |
|
186 } |
|
187 |
|
188 bool WebrtcAudioConduit::GetRTCPSenderReport(DOMHighResTimeStamp* timestamp, |
|
189 unsigned int* packetsSent, |
|
190 uint64_t* bytesSent) { |
|
191 struct webrtc::SenderInfo senderInfo; |
|
192 bool result = !mPtrRTP->GetRemoteRTCPSenderInfo(mChannel, &senderInfo); |
|
193 if (result) { |
|
194 *timestamp = NTPtoDOMHighResTimeStamp(senderInfo.NTP_timestamp_high, |
|
195 senderInfo.NTP_timestamp_low); |
|
196 *packetsSent = senderInfo.sender_packet_count; |
|
197 *bytesSent = senderInfo.sender_octet_count; |
|
198 } |
|
199 return result; |
|
200 } |
|
201 |
|
202 /* |
|
203 * WebRTCAudioConduit Implementation |
|
204 */ |
|
205 MediaConduitErrorCode WebrtcAudioConduit::Init(WebrtcAudioConduit *other) |
|
206 { |
|
207 CSFLogDebug(logTag, "%s this=%p other=%p", __FUNCTION__, this, other); |
|
208 |
|
209 if (other) { |
|
210 MOZ_ASSERT(!other->mOtherDirection); |
|
211 other->mOtherDirection = this; |
|
212 mOtherDirection = other; |
|
213 |
|
214 // only one can call ::Create()/GetVoiceEngine() |
|
215 MOZ_ASSERT(other->mVoiceEngine); |
|
216 mVoiceEngine = other->mVoiceEngine; |
|
217 } else { |
|
218 #ifdef MOZ_WIDGET_ANDROID |
|
219 jobject context = jsjni_GetGlobalContextRef(); |
|
220 |
|
221 // get the JVM |
|
222 JavaVM *jvm = jsjni_GetVM(); |
|
223 JNIEnv* jenv = jsjni_GetJNIForThread(); |
|
224 |
|
225 if (webrtc::VoiceEngine::SetAndroidObjects(jvm, jenv, (void*)context) != 0) { |
|
226 CSFLogError(logTag, "%s Unable to set Android objects", __FUNCTION__); |
|
227 return kMediaConduitSessionNotInited; |
|
228 } |
|
229 #endif |
|
230 |
|
231 // Per WebRTC APIs below function calls return nullptr on failure |
|
232 if(!(mVoiceEngine = webrtc::VoiceEngine::Create())) |
|
233 { |
|
234 CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__); |
|
235 return kMediaConduitSessionNotInited; |
|
236 } |
|
237 |
|
238 PRLogModuleInfo *logs = GetWebRTCLogInfo(); |
|
239 if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { |
|
240 // no need to a critical section or lock here |
|
241 gWebrtcTraceLoggingOn = 1; |
|
242 |
|
243 const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); |
|
244 if (!file) { |
|
245 file = "WebRTC.log"; |
|
246 } |
|
247 CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, |
|
248 file, logs->level); |
|
249 mVoiceEngine->SetTraceFilter(logs->level); |
|
250 mVoiceEngine->SetTraceFile(file); |
|
251 } |
|
252 } |
|
253 |
|
254 if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine))) |
|
255 { |
|
256 CSFLogError(logTag, "%s Unable to initialize VoEBase", __FUNCTION__); |
|
257 return kMediaConduitSessionNotInited; |
|
258 } |
|
259 |
|
260 if(!(mPtrVoENetwork = VoENetwork::GetInterface(mVoiceEngine))) |
|
261 { |
|
262 CSFLogError(logTag, "%s Unable to initialize VoENetwork", __FUNCTION__); |
|
263 return kMediaConduitSessionNotInited; |
|
264 } |
|
265 |
|
266 if(!(mPtrVoECodec = VoECodec::GetInterface(mVoiceEngine))) |
|
267 { |
|
268 CSFLogError(logTag, "%s Unable to initialize VoEBCodec", __FUNCTION__); |
|
269 return kMediaConduitSessionNotInited; |
|
270 } |
|
271 |
|
272 if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine))) |
|
273 { |
|
274 CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__); |
|
275 return kMediaConduitSessionNotInited; |
|
276 } |
|
277 if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine))) |
|
278 { |
|
279 CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__); |
|
280 return kMediaConduitSessionNotInited; |
|
281 } |
|
282 if(!(mPtrVoERTP_RTCP = VoERTP_RTCP::GetInterface(mVoiceEngine))) |
|
283 { |
|
284 CSFLogError(logTag, "%s Unable to initialize VoERTP_RTCP", __FUNCTION__); |
|
285 return kMediaConduitSessionNotInited; |
|
286 } |
|
287 |
|
288 if(!(mPtrVoEVideoSync = VoEVideoSync::GetInterface(mVoiceEngine))) |
|
289 { |
|
290 CSFLogError(logTag, "%s Unable to initialize VoEVideoSync", __FUNCTION__); |
|
291 return kMediaConduitSessionNotInited; |
|
292 } |
|
293 if (!(mPtrRTP = webrtc::VoERTP_RTCP::GetInterface(mVoiceEngine))) |
|
294 { |
|
295 CSFLogError(logTag, "%s Unable to get audio RTP/RTCP interface ", |
|
296 __FUNCTION__); |
|
297 return kMediaConduitSessionNotInited; |
|
298 } |
|
299 |
|
300 if (other) { |
|
301 mChannel = other->mChannel; |
|
302 } else { |
|
303 // init the engine with our audio device layer |
|
304 if(mPtrVoEBase->Init() == -1) |
|
305 { |
|
306 CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__); |
|
307 return kMediaConduitSessionNotInited; |
|
308 } |
|
309 |
|
310 if( (mChannel = mPtrVoEBase->CreateChannel()) == -1) |
|
311 { |
|
312 CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__); |
|
313 return kMediaConduitChannelError; |
|
314 } |
|
315 |
|
316 CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel); |
|
317 |
|
318 if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1) |
|
319 { |
|
320 CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__); |
|
321 return kMediaConduitTransportRegistrationFail; |
|
322 } |
|
323 |
|
324 if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1) |
|
325 { |
|
326 CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__, |
|
327 mPtrVoEBase->LastError()); |
|
328 return kMediaConduitExternalPlayoutError; |
|
329 } |
|
330 |
|
331 if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1) |
|
332 { |
|
333 CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__, |
|
334 mPtrVoEBase->LastError()); |
|
335 return kMediaConduitExternalRecordingError; |
|
336 } |
|
337 CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this); |
|
338 } |
|
339 return kMediaConduitNoError; |
|
340 } |
|
341 |
|
342 // AudioSessionConduit Implementation |
|
343 MediaConduitErrorCode |
|
344 WebrtcAudioConduit::AttachTransport(mozilla::RefPtr<TransportInterface> aTransport) |
|
345 { |
|
346 CSFLogDebug(logTag, "%s ", __FUNCTION__); |
|
347 |
|
348 if(!aTransport) |
|
349 { |
|
350 CSFLogError(logTag, "%s NULL Transport", __FUNCTION__); |
|
351 return kMediaConduitInvalidTransport; |
|
352 } |
|
353 // set the transport |
|
354 mTransport = aTransport; |
|
355 return kMediaConduitNoError; |
|
356 } |
|
357 |
|
358 MediaConduitErrorCode |
|
359 WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) |
|
360 { |
|
361 CSFLogDebug(logTag, "%s ", __FUNCTION__); |
|
362 MediaConduitErrorCode condError = kMediaConduitNoError; |
|
363 int error = 0;//webrtc engine errors |
|
364 webrtc::CodecInst cinst; |
|
365 |
|
366 //validate codec param |
|
367 if((condError = ValidateCodecConfig(codecConfig, true)) != kMediaConduitNoError) |
|
368 { |
|
369 return condError; |
|
370 } |
|
371 |
|
372 //are we transmitting already, stop and apply the send codec |
|
373 if(mEngineTransmitting) |
|
374 { |
|
375 CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__); |
|
376 if(mPtrVoEBase->StopSend(mChannel) == -1) |
|
377 { |
|
378 CSFLogError(logTag, "%s StopSend() Failed %d ", __FUNCTION__, |
|
379 mPtrVoEBase->LastError()); |
|
380 return kMediaConduitUnknownError; |
|
381 } |
|
382 } |
|
383 |
|
384 mEngineTransmitting = false; |
|
385 |
|
386 if(!CodecConfigToWebRTCCodec(codecConfig,cinst)) |
|
387 { |
|
388 CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); |
|
389 return kMediaConduitMalformedArgument; |
|
390 } |
|
391 |
|
392 if(mPtrVoECodec->SetSendCodec(mChannel, cinst) == -1) |
|
393 { |
|
394 error = mPtrVoEBase->LastError(); |
|
395 CSFLogError(logTag, "%s SetSendCodec - Invalid Codec %d ",__FUNCTION__, |
|
396 error); |
|
397 |
|
398 if(error == VE_CANNOT_SET_SEND_CODEC || error == VE_CODEC_ERROR) |
|
399 { |
|
400 CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__); |
|
401 return kMediaConduitInvalidSendCodec; |
|
402 } |
|
403 CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__, |
|
404 mPtrVoEBase->LastError()); |
|
405 return kMediaConduitUnknownError; |
|
406 } |
|
407 |
|
408 #ifdef MOZILLA_INTERNAL_API |
|
409 // TEMPORARY - see bug 694814 comment 2 |
|
410 nsresult rv; |
|
411 nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); |
|
412 if (NS_SUCCEEDED(rv)) { |
|
413 nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); |
|
414 |
|
415 if (branch) { |
|
416 branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay); |
|
417 } |
|
418 } |
|
419 #endif |
|
420 |
|
421 //Let's Send Transport State-machine on the Engine |
|
422 if(mPtrVoEBase->StartSend(mChannel) == -1) |
|
423 { |
|
424 error = mPtrVoEBase->LastError(); |
|
425 CSFLogError(logTag, "%s StartSend failed %d", __FUNCTION__, error); |
|
426 return kMediaConduitUnknownError; |
|
427 } |
|
428 |
|
429 //Copy the applied config for future reference. |
|
430 delete mCurSendCodecConfig; |
|
431 |
|
432 mCurSendCodecConfig = new AudioCodecConfig(codecConfig->mType, |
|
433 codecConfig->mName, |
|
434 codecConfig->mFreq, |
|
435 codecConfig->mPacSize, |
|
436 codecConfig->mChannels, |
|
437 codecConfig->mRate, |
|
438 codecConfig->mLoadManager); |
|
439 |
|
440 mEngineTransmitting = true; |
|
441 return kMediaConduitNoError; |
|
442 } |
|
443 |
|
444 MediaConduitErrorCode |
|
445 WebrtcAudioConduit::ConfigureRecvMediaCodecs( |
|
446 const std::vector<AudioCodecConfig*>& codecConfigList) |
|
447 { |
|
448 CSFLogDebug(logTag, "%s ", __FUNCTION__); |
|
449 MediaConduitErrorCode condError = kMediaConduitNoError; |
|
450 int error = 0; //webrtc engine errors |
|
451 bool success = false; |
|
452 |
|
453 // Are we receiving already? If so, stop receiving and playout |
|
454 // since we can't apply new recv codec when the engine is playing. |
|
455 if(mEngineReceiving) |
|
456 { |
|
457 CSFLogDebug(logTag, "%s Engine Already Receiving. Attemping to Stop ", __FUNCTION__); |
|
458 // AudioEngine doesn't fail fatally on stopping reception. Ref:voe_errors.h. |
|
459 // hence we need not be strict in failing here on errors |
|
460 mPtrVoEBase->StopReceive(mChannel); |
|
461 CSFLogDebug(logTag, "%s Attemping to Stop playout ", __FUNCTION__); |
|
462 if(mPtrVoEBase->StopPlayout(mChannel) == -1) |
|
463 { |
|
464 if( mPtrVoEBase->LastError() == VE_CANNOT_STOP_PLAYOUT) |
|
465 { |
|
466 CSFLogDebug(logTag, "%s Stop-Playout Failed %d", __FUNCTION__, mPtrVoEBase->LastError()); |
|
467 return kMediaConduitPlayoutError; |
|
468 } |
|
469 } |
|
470 } |
|
471 |
|
472 mEngineReceiving = false; |
|
473 |
|
474 if(codecConfigList.empty()) |
|
475 { |
|
476 CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); |
|
477 return kMediaConduitMalformedArgument; |
|
478 } |
|
479 |
|
480 // Try Applying the codecs in the list. |
|
481 // We succeed if at least one codec was applied and reception was |
|
482 // started successfully. |
|
483 for(std::vector<AudioCodecConfig*>::size_type i=0 ;i<codecConfigList.size();i++) |
|
484 { |
|
485 //if the codec param is invalid or diplicate, return error |
|
486 if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) |
|
487 { |
|
488 return condError; |
|
489 } |
|
490 |
|
491 webrtc::CodecInst cinst; |
|
492 if(!CodecConfigToWebRTCCodec(codecConfigList[i],cinst)) |
|
493 { |
|
494 CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); |
|
495 continue; |
|
496 } |
|
497 |
|
498 if(mPtrVoECodec->SetRecPayloadType(mChannel,cinst) == -1) |
|
499 { |
|
500 error = mPtrVoEBase->LastError(); |
|
501 CSFLogError(logTag, "%s SetRecvCodec Failed %d ",__FUNCTION__, error); |
|
502 continue; |
|
503 } else { |
|
504 CSFLogDebug(logTag, "%s Successfully Set RecvCodec %s", __FUNCTION__, |
|
505 codecConfigList[i]->mName.c_str()); |
|
506 //copy this to local database |
|
507 if(CopyCodecToDB(codecConfigList[i])) |
|
508 { |
|
509 success = true; |
|
510 } else { |
|
511 CSFLogError(logTag,"%s Unable to updated Codec Database", __FUNCTION__); |
|
512 return kMediaConduitUnknownError; |
|
513 } |
|
514 |
|
515 } |
|
516 |
|
517 } //end for |
|
518 |
|
519 if(!success) |
|
520 { |
|
521 CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); |
|
522 return kMediaConduitInvalidReceiveCodec; |
|
523 } |
|
524 |
|
525 //If we are here, atleast one codec should have been set |
|
526 if(mPtrVoEBase->StartReceive(mChannel) == -1) |
|
527 { |
|
528 error = mPtrVoEBase->LastError(); |
|
529 CSFLogError(logTag , "%s StartReceive Failed %d ",__FUNCTION__, error); |
|
530 if(error == VE_RECV_SOCKET_ERROR) |
|
531 { |
|
532 return kMediaConduitSocketError; |
|
533 } |
|
534 return kMediaConduitUnknownError; |
|
535 } |
|
536 |
|
537 |
|
538 if(mPtrVoEBase->StartPlayout(mChannel) == -1) |
|
539 { |
|
540 CSFLogError(logTag, "%s Starting playout Failed", __FUNCTION__); |
|
541 return kMediaConduitPlayoutError; |
|
542 } |
|
543 //we should be good here for setting this. |
|
544 mEngineReceiving = true; |
|
545 DumpCodecDB(); |
|
546 return kMediaConduitNoError; |
|
547 } |
|
548 MediaConduitErrorCode |
|
549 WebrtcAudioConduit::EnableAudioLevelExtension(bool enabled, uint8_t id) |
|
550 { |
|
551 CSFLogDebug(logTag, "%s %d %d ", __FUNCTION__, enabled, id); |
|
552 |
|
553 if (mPtrVoERTP_RTCP->SetRTPAudioLevelIndicationStatus(mChannel, enabled, id) == -1) |
|
554 { |
|
555 CSFLogError(logTag, "%s SetRTPAudioLevelIndicationStatus Failed", __FUNCTION__); |
|
556 return kMediaConduitUnknownError; |
|
557 } |
|
558 |
|
559 return kMediaConduitNoError; |
|
560 } |
|
561 |
|
562 MediaConduitErrorCode |
|
563 WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[], |
|
564 int32_t lengthSamples, |
|
565 int32_t samplingFreqHz, |
|
566 int32_t capture_delay) |
|
567 { |
|
568 CSFLogDebug(logTag, "%s ", __FUNCTION__); |
|
569 // Following checks need to be performed |
|
570 // 1. Non null audio buffer pointer, |
|
571 // 2. invalid sampling frequency - less than 0 or unsupported ones |
|
572 // 3. Appropriate Sample Length for 10 ms audio-frame. This represents |
|
573 // block size the VoiceEngine feeds into encoder for passed in audio-frame |
|
574 // Ex: for 16000 sampling rate , valid block-length is 160 |
|
575 // Similarly for 32000 sampling rate, valid block length is 320 |
|
576 // We do the check by the verify modular operator below to be zero |
|
577 |
|
578 if(!audio_data || (lengthSamples <= 0) || |
|
579 (IsSamplingFreqSupported(samplingFreqHz) == false) || |
|
580 ((lengthSamples % (samplingFreqHz / 100) != 0)) ) |
|
581 { |
|
582 CSFLogError(logTag, "%s Invalid Parameters ",__FUNCTION__); |
|
583 MOZ_ASSERT(PR_FALSE); |
|
584 return kMediaConduitMalformedArgument; |
|
585 } |
|
586 |
|
587 //validate capture time |
|
588 if(capture_delay < 0 ) |
|
589 { |
|
590 CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__); |
|
591 MOZ_ASSERT(PR_FALSE); |
|
592 return kMediaConduitMalformedArgument; |
|
593 } |
|
594 |
|
595 // if transmission is not started .. conduit cannot insert frames |
|
596 if(!mEngineTransmitting) |
|
597 { |
|
598 CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__); |
|
599 return kMediaConduitSessionNotInited; |
|
600 } |
|
601 |
|
602 #ifdef MOZILLA_INTERNAL_API |
|
603 if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { |
|
604 struct Processing insert = { TimeStamp::Now(), 0 }; |
|
605 mProcessing.AppendElement(insert); |
|
606 } |
|
607 #endif |
|
608 |
|
609 capture_delay = mCaptureDelay; |
|
610 //Insert the samples |
|
611 if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data, |
|
612 lengthSamples, |
|
613 samplingFreqHz, |
|
614 capture_delay) == -1) |
|
615 { |
|
616 int error = mPtrVoEBase->LastError(); |
|
617 CSFLogError(logTag, "%s Inserting audio data Failed %d", __FUNCTION__, error); |
|
618 if(error == VE_RUNTIME_REC_ERROR) |
|
619 { |
|
620 return kMediaConduitRecordingError; |
|
621 } |
|
622 return kMediaConduitUnknownError; |
|
623 } |
|
624 // we should be good here |
|
625 return kMediaConduitNoError; |
|
626 } |
|
627 |
|
628 MediaConduitErrorCode |
|
629 WebrtcAudioConduit::GetAudioFrame(int16_t speechData[], |
|
630 int32_t samplingFreqHz, |
|
631 int32_t capture_delay, |
|
632 int& lengthSamples) |
|
633 { |
|
634 |
|
635 CSFLogDebug(logTag, "%s ", __FUNCTION__); |
|
636 unsigned int numSamples = 0; |
|
637 |
|
638 //validate params |
|
639 if(!speechData ) |
|
640 { |
|
641 CSFLogError(logTag,"%s Null Audio Buffer Pointer", __FUNCTION__); |
|
642 MOZ_ASSERT(PR_FALSE); |
|
643 return kMediaConduitMalformedArgument; |
|
644 } |
|
645 |
|
646 // Validate sample length |
|
647 if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0 ) |
|
648 { |
|
649 CSFLogError(logTag,"%s Invalid Sampling Frequency ", __FUNCTION__); |
|
650 MOZ_ASSERT(PR_FALSE); |
|
651 return kMediaConduitMalformedArgument; |
|
652 } |
|
653 |
|
654 //validate capture time |
|
655 if(capture_delay < 0 ) |
|
656 { |
|
657 CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__); |
|
658 MOZ_ASSERT(PR_FALSE); |
|
659 return kMediaConduitMalformedArgument; |
|
660 } |
|
661 |
|
662 //Conduit should have reception enabled before we ask for decoded |
|
663 // samples |
|
664 if(!mEngineReceiving) |
|
665 { |
|
666 CSFLogError(logTag, "%s Engine not Receiving ", __FUNCTION__); |
|
667 return kMediaConduitSessionNotInited; |
|
668 } |
|
669 |
|
670 |
|
671 lengthSamples = 0; //output paramter |
|
672 |
|
673 if(mPtrVoEXmedia->ExternalPlayoutGetData( speechData, |
|
674 samplingFreqHz, |
|
675 capture_delay, |
|
676 lengthSamples) == -1) |
|
677 { |
|
678 int error = mPtrVoEBase->LastError(); |
|
679 CSFLogError(logTag, "%s Getting audio data Failed %d", __FUNCTION__, error); |
|
680 if(error == VE_RUNTIME_PLAY_ERROR) |
|
681 { |
|
682 return kMediaConduitPlayoutError; |
|
683 } |
|
684 return kMediaConduitUnknownError; |
|
685 } |
|
686 |
|
687 // Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc |
|
688 mSamples += lengthSamples; |
|
689 if (mSamples >= mLastSyncLog + samplingFreqHz) { |
|
690 int jitter_buffer_delay_ms; |
|
691 int playout_buffer_delay_ms; |
|
692 int avsync_offset_ms; |
|
693 if (GetAVStats(&jitter_buffer_delay_ms, |
|
694 &playout_buffer_delay_ms, |
|
695 &avsync_offset_ms)) { |
|
696 #ifdef MOZILLA_INTERNAL_API |
|
697 if (avsync_offset_ms < 0) { |
|
698 Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_VIDEO_LAGS_AUDIO_MS, |
|
699 -avsync_offset_ms); |
|
700 } else { |
|
701 Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_AUDIO_LAGS_VIDEO_MS, |
|
702 avsync_offset_ms); |
|
703 } |
|
704 #endif |
|
705 CSFLogError(logTag, |
|
706 "A/V sync: sync delta: %dms, audio jitter delay %dms, playout delay %dms", |
|
707 avsync_offset_ms, jitter_buffer_delay_ms, playout_buffer_delay_ms); |
|
708 } else { |
|
709 CSFLogError(logTag, "A/V sync: GetAVStats failed"); |
|
710 } |
|
711 mLastSyncLog = mSamples; |
|
712 } |
|
713 |
|
714 #ifdef MOZILLA_INTERNAL_API |
|
715 if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { |
|
716 if (mProcessing.Length() > 0) { |
|
717 unsigned int now; |
|
718 mPtrVoEVideoSync->GetPlayoutTimestamp(mChannel, now); |
|
719 if (static_cast<uint32_t>(now) != mLastTimestamp) { |
|
720 mLastTimestamp = static_cast<uint32_t>(now); |
|
721 // Find the block that includes this timestamp in the network input |
|
722 while (mProcessing.Length() > 0) { |
|
723 // FIX! assumes 20ms @ 48000Hz |
|
724 // FIX handle wrap-around |
|
725 if (mProcessing[0].mRTPTimeStamp + 20*(48000/1000) >= now) { |
|
726 TimeDuration t = TimeStamp::Now() - mProcessing[0].mTimeStamp; |
|
727 // Wrap-around? |
|
728 int64_t delta = t.ToMilliseconds() + (now - mProcessing[0].mRTPTimeStamp)/(48000/1000); |
|
729 LogTime(AsyncLatencyLogger::AudioRecvRTP, ((uint64_t) this), delta); |
|
730 break; |
|
731 } |
|
732 mProcessing.RemoveElementAt(0); |
|
733 } |
|
734 } |
|
735 } |
|
736 } |
|
737 #endif |
|
738 CSFLogDebug(logTag,"%s GetAudioFrame:Got samples: length %d ",__FUNCTION__, |
|
739 lengthSamples); |
|
740 return kMediaConduitNoError; |
|
741 } |
|
742 |
|
743 // Transport Layer Callbacks |
|
744 MediaConduitErrorCode |
|
745 WebrtcAudioConduit::ReceivedRTPPacket(const void *data, int len) |
|
746 { |
|
747 CSFLogDebug(logTag, "%s : channel %d", __FUNCTION__, mChannel); |
|
748 |
|
749 if(mEngineReceiving) |
|
750 { |
|
751 #ifdef MOZILLA_INTERNAL_API |
|
752 if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { |
|
753 // timestamp is at 32 bits in ([1]) |
|
754 struct Processing insert = { TimeStamp::Now(), |
|
755 ntohl(static_cast<const uint32_t *>(data)[1]) }; |
|
756 mProcessing.AppendElement(insert); |
|
757 } |
|
758 #endif |
|
759 |
|
760 if(mPtrVoENetwork->ReceivedRTPPacket(mChannel,data,len) == -1) |
|
761 { |
|
762 int error = mPtrVoEBase->LastError(); |
|
763 CSFLogError(logTag, "%s RTP Processing Error %d", __FUNCTION__, error); |
|
764 if(error == VE_RTP_RTCP_MODULE_ERROR) |
|
765 { |
|
766 return kMediaConduitRTPRTCPModuleError; |
|
767 } |
|
768 return kMediaConduitUnknownError; |
|
769 } |
|
770 } else { |
|
771 CSFLogError(logTag, "Error: %s when not receiving", __FUNCTION__); |
|
772 return kMediaConduitSessionNotInited; |
|
773 } |
|
774 |
|
775 return kMediaConduitNoError; |
|
776 } |
|
777 |
|
778 MediaConduitErrorCode |
|
779 WebrtcAudioConduit::ReceivedRTCPPacket(const void *data, int len) |
|
780 { |
|
781 CSFLogDebug(logTag, "%s : channel %d",__FUNCTION__, mChannel); |
|
782 |
|
783 if(mEngineTransmitting) |
|
784 { |
|
785 if(mPtrVoENetwork->ReceivedRTCPPacket(mChannel, data, len) == -1) |
|
786 { |
|
787 int error = mPtrVoEBase->LastError(); |
|
788 CSFLogError(logTag, "%s RTCP Processing Error %d", __FUNCTION__, error); |
|
789 if(error == VE_RTP_RTCP_MODULE_ERROR) |
|
790 { |
|
791 return kMediaConduitRTPRTCPModuleError; |
|
792 } |
|
793 return kMediaConduitUnknownError; |
|
794 } |
|
795 } else { |
|
796 CSFLogError(logTag, "Error: %s when not receiving", __FUNCTION__); |
|
797 return kMediaConduitSessionNotInited; |
|
798 } |
|
799 return kMediaConduitNoError; |
|
800 } |
|
801 |
|
802 //WebRTC::RTP Callback Implementation |
|
803 int WebrtcAudioConduit::SendPacket(int channel, const void* data, int len) |
|
804 { |
|
805 CSFLogDebug(logTag, "%s : channel %d %s", __FUNCTION__, channel, |
|
806 (mEngineReceiving && mOtherDirection) ? "(using mOtherDirection)" : ""); |
|
807 |
|
808 if (mEngineReceiving) |
|
809 { |
|
810 if (mOtherDirection) |
|
811 { |
|
812 return mOtherDirection->SendPacket(channel, data, len); |
|
813 } |
|
814 CSFLogDebug(logTag, "%s : Asked to send RTP without an RTP sender on channel %d", |
|
815 __FUNCTION__, channel); |
|
816 return -1; |
|
817 } else { |
|
818 #ifdef MOZILLA_INTERNAL_API |
|
819 if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { |
|
820 if (mProcessing.Length() > 0) { |
|
821 TimeStamp started = mProcessing[0].mTimeStamp; |
|
822 mProcessing.RemoveElementAt(0); |
|
823 mProcessing.RemoveElementAt(0); // 20ms packetization! Could automate this by watching sizes |
|
824 TimeDuration t = TimeStamp::Now() - started; |
|
825 int64_t delta = t.ToMilliseconds(); |
|
826 LogTime(AsyncLatencyLogger::AudioSendRTP, ((uint64_t) this), delta); |
|
827 } |
|
828 } |
|
829 #endif |
|
830 if(mTransport && (mTransport->SendRtpPacket(data, len) == NS_OK)) |
|
831 { |
|
832 CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__); |
|
833 return len; |
|
834 } else { |
|
835 CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__); |
|
836 return -1; |
|
837 } |
|
838 } |
|
839 } |
|
840 |
|
841 int WebrtcAudioConduit::SendRTCPPacket(int channel, const void* data, int len) |
|
842 { |
|
843 CSFLogDebug(logTag, "%s : channel %d", __FUNCTION__, channel); |
|
844 |
|
845 if (mEngineTransmitting) |
|
846 { |
|
847 if (mOtherDirection) |
|
848 { |
|
849 return mOtherDirection->SendRTCPPacket(channel, data, len); |
|
850 } |
|
851 } |
|
852 |
|
853 // We come here if we have only one pipeline/conduit setup, |
|
854 // such as for unidirectional streams. |
|
855 // We also end up here if we are receiving |
|
856 if(mTransport && mTransport->SendRtcpPacket(data, len) == NS_OK) |
|
857 { |
|
858 CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__); |
|
859 return len; |
|
860 } else { |
|
861 CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__); |
|
862 return -1; |
|
863 } |
|
864 } |
|
865 |
|
866 /** |
|
867 * Converts between CodecConfig to WebRTC Codec Structure. |
|
868 */ |
|
869 |
|
870 bool |
|
871 WebrtcAudioConduit::CodecConfigToWebRTCCodec(const AudioCodecConfig* codecInfo, |
|
872 webrtc::CodecInst& cinst) |
|
873 { |
|
874 const unsigned int plNameLength = codecInfo->mName.length()+1; |
|
875 memset(&cinst, 0, sizeof(webrtc::CodecInst)); |
|
876 if(sizeof(cinst.plname) < plNameLength) |
|
877 { |
|
878 CSFLogError(logTag, "%s Payload name buffer capacity mismatch ", |
|
879 __FUNCTION__); |
|
880 return false; |
|
881 } |
|
882 memcpy(cinst.plname, codecInfo->mName.c_str(),codecInfo->mName.length()); |
|
883 cinst.plname[plNameLength]='\0'; |
|
884 cinst.pltype = codecInfo->mType; |
|
885 cinst.rate = codecInfo->mRate; |
|
886 cinst.pacsize = codecInfo->mPacSize; |
|
887 cinst.plfreq = codecInfo->mFreq; |
|
888 cinst.channels = codecInfo->mChannels; |
|
889 return true; |
|
890 } |
|
891 |
|
892 /** |
|
893 * Supported Sampling Frequncies. |
|
894 */ |
|
895 bool |
|
896 WebrtcAudioConduit::IsSamplingFreqSupported(int freq) const |
|
897 { |
|
898 if(GetNum10msSamplesForFrequency(freq)) |
|
899 { |
|
900 return true; |
|
901 } else { |
|
902 return false; |
|
903 } |
|
904 } |
|
905 |
|
906 /* Return block-length of 10 ms audio frame in number of samples */ |
|
907 unsigned int |
|
908 WebrtcAudioConduit::GetNum10msSamplesForFrequency(int samplingFreqHz) const |
|
909 { |
|
910 switch(samplingFreqHz) |
|
911 { |
|
912 case 16000: return 160; //160 samples |
|
913 case 32000: return 320; //320 samples |
|
914 case 44100: return 441; //441 samples |
|
915 case 48000: return 480; //480 samples |
|
916 default: return 0; // invalid or unsupported |
|
917 } |
|
918 } |
|
919 |
|
920 //Copy the codec passed into Conduit's database |
|
921 bool |
|
922 WebrtcAudioConduit::CopyCodecToDB(const AudioCodecConfig* codecInfo) |
|
923 { |
|
924 |
|
925 AudioCodecConfig* cdcConfig = new AudioCodecConfig(codecInfo->mType, |
|
926 codecInfo->mName, |
|
927 codecInfo->mFreq, |
|
928 codecInfo->mPacSize, |
|
929 codecInfo->mChannels, |
|
930 codecInfo->mRate, |
|
931 codecInfo->mLoadManager); |
|
932 mRecvCodecList.push_back(cdcConfig); |
|
933 return true; |
|
934 } |
|
935 |
|
936 /** |
|
937 * Checks if 2 codec structs are same |
|
938 */ |
|
939 bool |
|
940 WebrtcAudioConduit::CheckCodecsForMatch(const AudioCodecConfig* curCodecConfig, |
|
941 const AudioCodecConfig* codecInfo) const |
|
942 { |
|
943 if(!curCodecConfig) |
|
944 { |
|
945 return false; |
|
946 } |
|
947 |
|
948 if(curCodecConfig->mType == codecInfo->mType && |
|
949 (curCodecConfig->mName.compare(codecInfo->mName) == 0) && |
|
950 curCodecConfig->mFreq == codecInfo->mFreq && |
|
951 curCodecConfig->mPacSize == codecInfo->mPacSize && |
|
952 curCodecConfig->mChannels == codecInfo->mChannels && |
|
953 curCodecConfig->mRate == codecInfo->mRate) |
|
954 { |
|
955 return true; |
|
956 } |
|
957 |
|
958 return false; |
|
959 } |
|
960 |
|
961 /** |
|
962 * Checks if the codec is already in Conduit's database |
|
963 */ |
|
964 bool |
|
965 WebrtcAudioConduit::CheckCodecForMatch(const AudioCodecConfig* codecInfo) const |
|
966 { |
|
967 //the db should have atleast one codec |
|
968 for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) |
|
969 { |
|
970 if(CheckCodecsForMatch(mRecvCodecList[i],codecInfo)) |
|
971 { |
|
972 //match |
|
973 return true; |
|
974 } |
|
975 } |
|
976 //no match or empty local db |
|
977 return false; |
|
978 } |
|
979 |
|
980 |
|
981 /** |
|
982 * Perform validation on the codecConfig to be applied. |
|
983 * Verifies if the codec is already applied. |
|
984 */ |
|
985 MediaConduitErrorCode |
|
986 WebrtcAudioConduit::ValidateCodecConfig(const AudioCodecConfig* codecInfo, |
|
987 bool send) const |
|
988 { |
|
989 bool codecAppliedAlready = false; |
|
990 |
|
991 if(!codecInfo) |
|
992 { |
|
993 CSFLogError(logTag, "%s Null CodecConfig ", __FUNCTION__); |
|
994 return kMediaConduitMalformedArgument; |
|
995 } |
|
996 |
|
997 if((codecInfo->mName.empty()) || |
|
998 (codecInfo->mName.length() >= CODEC_PLNAME_SIZE)) |
|
999 { |
|
1000 CSFLogError(logTag, "%s Invalid Payload Name Length ", __FUNCTION__); |
|
1001 return kMediaConduitMalformedArgument; |
|
1002 } |
|
1003 |
|
1004 //Only mono or stereo channels supported |
|
1005 if( (codecInfo->mChannels != 1) && (codecInfo->mChannels != 2)) |
|
1006 { |
|
1007 CSFLogError(logTag, "%s Channel Unsupported ", __FUNCTION__); |
|
1008 return kMediaConduitMalformedArgument; |
|
1009 } |
|
1010 |
|
1011 //check if we have the same codec already applied |
|
1012 if(send) |
|
1013 { |
|
1014 codecAppliedAlready = CheckCodecsForMatch(mCurSendCodecConfig,codecInfo); |
|
1015 } else { |
|
1016 codecAppliedAlready = CheckCodecForMatch(codecInfo); |
|
1017 } |
|
1018 |
|
1019 if(codecAppliedAlready) |
|
1020 { |
|
1021 CSFLogDebug(logTag, "%s Codec %s Already Applied ", __FUNCTION__, codecInfo->mName.c_str()); |
|
1022 return kMediaConduitCodecInUse; |
|
1023 } |
|
1024 return kMediaConduitNoError; |
|
1025 } |
|
1026 |
|
1027 void |
|
1028 WebrtcAudioConduit::DumpCodecDB() const |
|
1029 { |
|
1030 for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) |
|
1031 { |
|
1032 CSFLogDebug(logTag,"Payload Name: %s", mRecvCodecList[i]->mName.c_str()); |
|
1033 CSFLogDebug(logTag,"Payload Type: %d", mRecvCodecList[i]->mType); |
|
1034 CSFLogDebug(logTag,"Payload Frequency: %d", mRecvCodecList[i]->mFreq); |
|
1035 CSFLogDebug(logTag,"Payload PacketSize: %d", mRecvCodecList[i]->mPacSize); |
|
1036 CSFLogDebug(logTag,"Payload Channels: %d", mRecvCodecList[i]->mChannels); |
|
1037 CSFLogDebug(logTag,"Payload Sampling Rate: %d", mRecvCodecList[i]->mRate); |
|
1038 } |
|
1039 } |
|
1040 }// end namespace |