Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 * You can obtain one at http://mozilla.org/MPL/2.0/. */
6 // Original author: ekr@rtfm.com
8 #include "logging.h"
9 #include "MediaPipeline.h"
11 #ifndef USE_FAKE_MEDIA_STREAMS
12 #include "MediaStreamGraphImpl.h"
13 #endif
15 #include <math.h>
17 #include "nspr.h"
18 #include "srtp.h"
20 #ifdef MOZILLA_INTERNAL_API
21 #include "VideoSegment.h"
22 #include "Layers.h"
23 #include "ImageTypes.h"
24 #include "ImageContainer.h"
25 #include "VideoUtils.h"
26 #ifdef WEBRTC_GONK
27 #include "GrallocImages.h"
28 #include "mozilla/layers/GrallocTextureClient.h"
29 #endif
30 #endif
32 #include "nsError.h"
33 #include "AudioSegment.h"
34 #include "MediaSegment.h"
35 #include "databuffer.h"
36 #include "transportflow.h"
37 #include "transportlayer.h"
38 #include "transportlayerdtls.h"
39 #include "transportlayerice.h"
40 #include "runnable_utils.h"
41 #include "libyuv/convert.h"
42 #include "mozilla/gfx/Point.h"
43 #include "mozilla/gfx/Types.h"
45 #include "webrtc/modules/interface/module_common_types.h"
46 #include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
48 using namespace mozilla;
49 using namespace mozilla::gfx;
51 // Logging context
52 MOZ_MTLOG_MODULE("mediapipeline")
54 namespace mozilla {
56 static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
58 MediaPipeline::~MediaPipeline() {
59 ASSERT_ON_THREAD(main_thread_);
60 MOZ_ASSERT(!stream_); // Check that we have shut down already.
61 MOZ_MTLOG(ML_INFO, "Destroying MediaPipeline: " << description_);
62 }
64 nsresult MediaPipeline::Init() {
65 ASSERT_ON_THREAD(main_thread_);
67 RUN_ON_THREAD(sts_thread_,
68 WrapRunnable(
69 nsRefPtr<MediaPipeline>(this),
70 &MediaPipeline::Init_s),
71 NS_DISPATCH_NORMAL);
73 return NS_OK;
74 }
76 nsresult MediaPipeline::Init_s() {
77 ASSERT_ON_THREAD(sts_thread_);
78 conduit_->AttachTransport(transport_);
80 nsresult res;
81 MOZ_ASSERT(rtp_.transport_);
82 MOZ_ASSERT(rtcp_.transport_);
83 res = ConnectTransport_s(rtp_);
84 if (NS_FAILED(res)) {
85 return res;
86 }
88 if (rtcp_.transport_ != rtp_.transport_) {
89 res = ConnectTransport_s(rtcp_);
90 if (NS_FAILED(res)) {
91 return res;
92 }
93 }
95 if (possible_bundle_rtp_) {
96 MOZ_ASSERT(possible_bundle_rtcp_);
97 MOZ_ASSERT(possible_bundle_rtp_->transport_);
98 MOZ_ASSERT(possible_bundle_rtcp_->transport_);
100 res = ConnectTransport_s(*possible_bundle_rtp_);
101 if (NS_FAILED(res)) {
102 return res;
103 }
105 if (possible_bundle_rtcp_->transport_ != possible_bundle_rtp_->transport_) {
106 res = ConnectTransport_s(*possible_bundle_rtcp_);
107 if (NS_FAILED(res)) {
108 return res;
109 }
110 }
111 }
113 return NS_OK;
114 }
117 // Disconnect us from the transport so that we can cleanly destruct the
118 // pipeline on the main thread. ShutdownMedia_m() must have already been
119 // called
120 void MediaPipeline::ShutdownTransport_s() {
121 ASSERT_ON_THREAD(sts_thread_);
122 MOZ_ASSERT(!stream_); // verifies that ShutdownMedia_m() has run
124 disconnect_all();
125 transport_->Detach();
126 rtp_.transport_ = nullptr;
127 rtcp_.transport_ = nullptr;
128 possible_bundle_rtp_ = nullptr;
129 possible_bundle_rtcp_ = nullptr;
130 }
132 void MediaPipeline::StateChange(TransportFlow *flow, TransportLayer::State state) {
133 TransportInfo* info = GetTransportInfo_s(flow);
134 MOZ_ASSERT(info);
136 if (state == TransportLayer::TS_OPEN) {
137 MOZ_MTLOG(ML_INFO, "Flow is ready");
138 TransportReady_s(*info);
139 } else if (state == TransportLayer::TS_CLOSED ||
140 state == TransportLayer::TS_ERROR) {
141 TransportFailed_s(*info);
142 }
143 }
145 static bool MakeRtpTypeToStringArray(const char** array) {
146 static const char* RTP_str = "RTP";
147 static const char* RTCP_str = "RTCP";
148 static const char* MUX_str = "RTP/RTCP mux";
149 array[MediaPipeline::RTP] = RTP_str;
150 array[MediaPipeline::RTCP] = RTCP_str;
151 array[MediaPipeline::MUX] = MUX_str;
152 return true;
153 }
155 static const char* ToString(MediaPipeline::RtpType type) {
156 static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = {nullptr};
157 // Dummy variable to cause init to happen only on first call
158 static bool dummy = MakeRtpTypeToStringArray(array);
159 (void)dummy;
160 return array[type];
161 }
163 nsresult MediaPipeline::TransportReady_s(TransportInfo &info) {
164 MOZ_ASSERT(!description_.empty());
166 // TODO(ekr@rtfm.com): implement some kind of notification on
167 // failure. bug 852665.
168 if (info.state_ != MP_CONNECTING) {
169 MOZ_MTLOG(ML_ERROR, "Transport ready for flow in wrong state:" <<
170 description_ << ": " << ToString(info.type_));
171 return NS_ERROR_FAILURE;
172 }
174 MOZ_MTLOG(ML_INFO, "Transport ready for pipeline " <<
175 static_cast<void *>(this) << " flow " << description_ << ": " <<
176 ToString(info.type_));
178 // TODO(bcampen@mozilla.com): Should we disconnect from the flow on failure?
179 nsresult res;
181 // Now instantiate the SRTP objects
182 TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
183 info.transport_->GetLayer(TransportLayerDtls::ID()));
184 MOZ_ASSERT(dtls); // DTLS is mandatory
186 uint16_t cipher_suite;
187 res = dtls->GetSrtpCipher(&cipher_suite);
188 if (NS_FAILED(res)) {
189 MOZ_MTLOG(ML_ERROR, "Failed to negotiate DTLS-SRTP. This is an error");
190 info.state_ = MP_CLOSED;
191 UpdateRtcpMuxState(info);
192 return res;
193 }
195 // SRTP Key Exporter as per RFC 5764 S 4.2
196 unsigned char srtp_block[SRTP_TOTAL_KEY_LENGTH * 2];
197 res = dtls->ExportKeyingMaterial(kDTLSExporterLabel, false, "",
198 srtp_block, sizeof(srtp_block));
199 if (NS_FAILED(res)) {
200 MOZ_MTLOG(ML_ERROR, "Failed to compute DTLS-SRTP keys. This is an error");
201 info.state_ = MP_CLOSED;
202 UpdateRtcpMuxState(info);
203 MOZ_CRASH(); // TODO: Remove once we have enough field experience to
204 // know it doesn't happen. bug 798797. Note that the
205 // code after this never executes.
206 return res;
207 }
209 // Slice and dice as per RFC 5764 S 4.2
210 unsigned char client_write_key[SRTP_TOTAL_KEY_LENGTH];
211 unsigned char server_write_key[SRTP_TOTAL_KEY_LENGTH];
212 int offset = 0;
213 memcpy(client_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
214 offset += SRTP_MASTER_KEY_LENGTH;
215 memcpy(server_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
216 offset += SRTP_MASTER_KEY_LENGTH;
217 memcpy(client_write_key + SRTP_MASTER_KEY_LENGTH,
218 srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
219 offset += SRTP_MASTER_SALT_LENGTH;
220 memcpy(server_write_key + SRTP_MASTER_KEY_LENGTH,
221 srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
222 offset += SRTP_MASTER_SALT_LENGTH;
223 MOZ_ASSERT(offset == sizeof(srtp_block));
225 unsigned char *write_key;
226 unsigned char *read_key;
228 if (dtls->role() == TransportLayerDtls::CLIENT) {
229 write_key = client_write_key;
230 read_key = server_write_key;
231 } else {
232 write_key = server_write_key;
233 read_key = client_write_key;
234 }
236 MOZ_ASSERT(!info.send_srtp_ && !info.recv_srtp_);
237 info.send_srtp_ = SrtpFlow::Create(cipher_suite, false, write_key,
238 SRTP_TOTAL_KEY_LENGTH);
239 info.recv_srtp_ = SrtpFlow::Create(cipher_suite, true, read_key,
240 SRTP_TOTAL_KEY_LENGTH);
241 if (!info.send_srtp_ || !info.recv_srtp_) {
242 MOZ_MTLOG(ML_ERROR, "Couldn't create SRTP flow for "
243 << ToString(info.type_));
244 info.state_ = MP_CLOSED;
245 UpdateRtcpMuxState(info);
246 return NS_ERROR_FAILURE;
247 }
249 MOZ_MTLOG(ML_INFO, "Listening for " << ToString(info.type_)
250 << " packets received on " <<
251 static_cast<void *>(dtls->downward()));
253 switch (info.type_) {
254 case RTP:
255 dtls->downward()->SignalPacketReceived.connect(
256 this,
257 &MediaPipeline::RtpPacketReceived);
258 break;
259 case RTCP:
260 dtls->downward()->SignalPacketReceived.connect(
261 this,
262 &MediaPipeline::RtcpPacketReceived);
263 break;
264 case MUX:
265 dtls->downward()->SignalPacketReceived.connect(
266 this,
267 &MediaPipeline::PacketReceived);
268 break;
269 default:
270 MOZ_CRASH();
271 }
273 info.state_ = MP_OPEN;
274 UpdateRtcpMuxState(info);
275 return NS_OK;
276 }
278 nsresult MediaPipeline::TransportFailed_s(TransportInfo &info) {
279 ASSERT_ON_THREAD(sts_thread_);
281 info.state_ = MP_CLOSED;
282 UpdateRtcpMuxState(info);
284 MOZ_MTLOG(ML_INFO, "Transport closed for flow " << ToString(info.type_));
286 NS_WARNING(
287 "MediaPipeline Transport failed. This is not properly cleaned up yet");
289 // TODO(ekr@rtfm.com): SECURITY: Figure out how to clean up if the
290 // connection was good and now it is bad.
291 // TODO(ekr@rtfm.com): Report up so that the PC knows we
292 // have experienced an error.
294 return NS_OK;
295 }
297 void MediaPipeline::UpdateRtcpMuxState(TransportInfo &info) {
298 if (info.type_ == MUX) {
299 if (info.transport_ == rtcp_.transport_) {
300 rtcp_.state_ = info.state_;
301 if (!rtcp_.send_srtp_) {
302 rtcp_.send_srtp_ = info.send_srtp_;
303 rtcp_.recv_srtp_ = info.recv_srtp_;
304 }
305 } else if (possible_bundle_rtcp_ &&
306 info.transport_ == possible_bundle_rtcp_->transport_) {
307 possible_bundle_rtcp_->state_ = info.state_;
308 if (!possible_bundle_rtcp_->send_srtp_) {
309 possible_bundle_rtcp_->send_srtp_ = info.send_srtp_;
310 possible_bundle_rtcp_->recv_srtp_ = info.recv_srtp_;
311 }
312 }
313 }
314 }
316 nsresult MediaPipeline::SendPacket(TransportFlow *flow, const void *data,
317 int len) {
318 ASSERT_ON_THREAD(sts_thread_);
320 // Note that we bypass the DTLS layer here
321 TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
322 flow->GetLayer(TransportLayerDtls::ID()));
323 MOZ_ASSERT(dtls);
325 TransportResult res = dtls->downward()->
326 SendPacket(static_cast<const unsigned char *>(data), len);
328 if (res != len) {
329 // Ignore blocking indications
330 if (res == TE_WOULDBLOCK)
331 return NS_OK;
333 MOZ_MTLOG(ML_ERROR, "Failed write on stream");
334 return NS_BASE_STREAM_CLOSED;
335 }
337 return NS_OK;
338 }
340 void MediaPipeline::increment_rtp_packets_sent(int32_t bytes) {
341 ++rtp_packets_sent_;
342 rtp_bytes_sent_ += bytes;
344 if (!(rtp_packets_sent_ % 100)) {
345 MOZ_MTLOG(ML_INFO, "RTP sent packet count for " << description_
346 << " Pipeline " << static_cast<void *>(this)
347 << " Flow : " << static_cast<void *>(rtp_.transport_)
348 << ": " << rtp_packets_sent_
349 << " (" << rtp_bytes_sent_ << " bytes)");
350 }
351 }
353 void MediaPipeline::increment_rtcp_packets_sent() {
354 ++rtcp_packets_sent_;
355 if (!(rtcp_packets_sent_ % 100)) {
356 MOZ_MTLOG(ML_INFO, "RTCP sent packet count for " << description_
357 << " Pipeline " << static_cast<void *>(this)
358 << " Flow : " << static_cast<void *>(rtcp_.transport_)
359 << ": " << rtcp_packets_sent_);
360 }
361 }
363 void MediaPipeline::increment_rtp_packets_received(int32_t bytes) {
364 ++rtp_packets_received_;
365 rtp_bytes_received_ += bytes;
366 if (!(rtp_packets_received_ % 100)) {
367 MOZ_MTLOG(ML_INFO, "RTP received packet count for " << description_
368 << " Pipeline " << static_cast<void *>(this)
369 << " Flow : " << static_cast<void *>(rtp_.transport_)
370 << ": " << rtp_packets_received_
371 << " (" << rtp_bytes_received_ << " bytes)");
372 }
373 }
375 void MediaPipeline::increment_rtcp_packets_received() {
376 ++rtcp_packets_received_;
377 if (!(rtcp_packets_received_ % 100)) {
378 MOZ_MTLOG(ML_INFO, "RTCP received packet count for " << description_
379 << " Pipeline " << static_cast<void *>(this)
380 << " Flow : " << static_cast<void *>(rtcp_.transport_)
381 << ": " << rtcp_packets_received_);
382 }
383 }
385 void MediaPipeline::RtpPacketReceived(TransportLayer *layer,
386 const unsigned char *data,
387 size_t len) {
388 if (!transport_->pipeline()) {
389 MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport disconnected");
390 return;
391 }
393 if (!conduit_) {
394 MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
395 return;
396 }
398 TransportInfo* info = &rtp_;
400 if (possible_bundle_rtp_ &&
401 possible_bundle_rtp_->transport_->Contains(layer)) {
402 // Received this on our possible bundle transport. Override info.
403 info = possible_bundle_rtp_;
404 }
406 // TODO(bcampen@mozilla.com): Can either of these actually happen? If not,
407 // the info variable can be removed, and this function gets simpler.
408 if (info->state_ != MP_OPEN) {
409 MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; pipeline not open");
410 return;
411 }
413 if (info->transport_->state() != TransportLayer::TS_OPEN) {
414 MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
415 return;
416 }
418 // This should never happen.
419 MOZ_ASSERT(info->recv_srtp_);
421 if (direction_ == TRANSMIT) {
422 return;
423 }
425 if (possible_bundle_rtp_ && (info == &rtp_)) {
426 // We were not sure we would be using rtp_ or possible_bundle_rtp_, but we
427 // have just received traffic that clears this up.
428 // Don't let our filter prevent us from noticing this, if the filter is
429 // incomplete (ie; no SSRCs in remote SDP, or no remote SDP at all).
430 SetUsingBundle_s(false);
431 MOZ_MTLOG(ML_INFO, "Ruled out the possibility that we're receiving bundle "
432 "for " << description_);
433 // TODO(bcampen@mozilla.com): Might be nice to detect when every
434 // MediaPipeline but the master has determined that it isn't doing bundle,
435 // since that means the master isn't doing bundle either. We could maybe
436 // do this by putting some refcounted dummy variable in the filters, and
437 // checking the value of the refcount. It is not clear whether this is
438 // going to be useful in practice.
439 }
441 if (!len) {
442 return;
443 }
445 // Filter out everything but RTP/RTCP
446 if (data[0] < 128 || data[0] > 191) {
447 return;
448 }
450 if (filter_) {
451 webrtc::RTPHeader header;
452 if (!rtp_parser_->Parse(data, len, &header) ||
453 !filter_->Filter(header)) {
454 return;
455 }
456 }
458 if (possible_bundle_rtp_) {
459 // Just got traffic that passed our filter on the potential bundle
460 // transport. Must be doing bundle.
461 SetUsingBundle_s(true);
462 MOZ_MTLOG(ML_INFO, "Confirmed the possibility that we're receiving bundle");
463 }
465 // Everything is decided now; just use rtp_
466 MOZ_ASSERT(!possible_bundle_rtp_);
467 MOZ_ASSERT(!possible_bundle_rtcp_);
469 // Make a copy rather than cast away constness
470 ScopedDeletePtr<unsigned char> inner_data(
471 new unsigned char[len]);
472 memcpy(inner_data, data, len);
473 int out_len = 0;
474 nsresult res = rtp_.recv_srtp_->UnprotectRtp(inner_data,
475 len, len, &out_len);
476 if (!NS_SUCCEEDED(res)) {
477 char tmp[16];
479 PR_snprintf(tmp, sizeof(tmp), "%.2x %.2x %.2x %.2x",
480 inner_data[0],
481 inner_data[1],
482 inner_data[2],
483 inner_data[3]);
485 MOZ_MTLOG(ML_NOTICE, "Error unprotecting RTP in " << description_
486 << "len= " << len << "[" << tmp << "...]");
488 return;
489 }
490 increment_rtp_packets_received(out_len);
492 (void)conduit_->ReceivedRTPPacket(inner_data, out_len); // Ignore error codes
493 }
495 void MediaPipeline::RtcpPacketReceived(TransportLayer *layer,
496 const unsigned char *data,
497 size_t len) {
498 if (!transport_->pipeline()) {
499 MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
500 return;
501 }
503 if (!conduit_) {
504 MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
505 return;
506 }
508 TransportInfo* info = &rtcp_;
509 if (possible_bundle_rtcp_ &&
510 possible_bundle_rtcp_->transport_->Contains(layer)) {
511 info = possible_bundle_rtcp_;
512 }
514 if (info->state_ != MP_OPEN) {
515 MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; pipeline not open");
516 return;
517 }
519 if (info->transport_->state() != TransportLayer::TS_OPEN) {
520 MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
521 return;
522 }
524 if (possible_bundle_rtp_ && (info == &rtcp_)) {
525 // We have offered bundle, and received our first packet on a non-bundle
526 // address. We are definitely not using the bundle address.
527 SetUsingBundle_s(false);
528 }
530 if (!len) {
531 return;
532 }
534 // Filter out everything but RTP/RTCP
535 if (data[0] < 128 || data[0] > 191) {
536 return;
537 }
539 MediaPipelineFilter::Result filter_result = MediaPipelineFilter::PASS;
540 if (filter_) {
541 filter_result = filter_->FilterRTCP(data, len);
542 if (filter_result == MediaPipelineFilter::FAIL) {
543 return;
544 }
545 }
547 if (filter_result == MediaPipelineFilter::PASS && possible_bundle_rtp_) {
548 // Just got traffic that passed our filter on the potential bundle
549 // transport. Must be doing bundle.
550 SetUsingBundle_s(true);
551 }
553 // We continue using info here, since it is possible that the filter did not
554 // support the payload type (ie; returned MediaPipelineFilter::UNSUPPORTED).
555 // In this case, we just let it pass, and hope the webrtc.org code does
556 // something sane.
557 increment_rtcp_packets_received();
559 MOZ_ASSERT(info->recv_srtp_); // This should never happen
561 // Make a copy rather than cast away constness
562 ScopedDeletePtr<unsigned char> inner_data(
563 new unsigned char[len]);
564 memcpy(inner_data, data, len);
565 int out_len;
567 nsresult res = info->recv_srtp_->UnprotectRtcp(inner_data,
568 len,
569 len,
570 &out_len);
572 if (!NS_SUCCEEDED(res))
573 return;
575 (void)conduit_->ReceivedRTCPPacket(inner_data, out_len); // Ignore error codes
576 }
578 bool MediaPipeline::IsRtp(const unsigned char *data, size_t len) {
579 if (len < 2)
580 return false;
582 // Check if this is a RTCP packet. Logic based on the types listed in
583 // media/webrtc/trunk/src/modules/rtp_rtcp/source/rtp_utility.cc
585 // Anything outside this range is RTP.
586 if ((data[1] < 192) || (data[1] > 207))
587 return true;
589 if (data[1] == 192) // FIR
590 return false;
592 if (data[1] == 193) // NACK, but could also be RTP. This makes us sad
593 return true; // but it's how webrtc.org behaves.
595 if (data[1] == 194)
596 return true;
598 if (data[1] == 195) // IJ.
599 return false;
601 if ((data[1] > 195) && (data[1] < 200)) // the > 195 is redundant
602 return true;
604 if ((data[1] >= 200) && (data[1] <= 207)) // SR, RR, SDES, BYE,
605 return false; // APP, RTPFB, PSFB, XR
607 MOZ_ASSERT(false); // Not reached, belt and suspenders.
608 return true;
609 }
611 void MediaPipeline::PacketReceived(TransportLayer *layer,
612 const unsigned char *data,
613 size_t len) {
614 if (!transport_->pipeline()) {
615 MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
616 return;
617 }
619 if (IsRtp(data, len)) {
620 RtpPacketReceived(layer, data, len);
621 } else {
622 RtcpPacketReceived(layer, data, len);
623 }
624 }
626 nsresult MediaPipelineTransmit::Init() {
627 char track_id_string[11];
628 ASSERT_ON_THREAD(main_thread_);
630 // We can replace this when we are allowed to do streams or std::to_string
631 PR_snprintf(track_id_string, sizeof(track_id_string), "%d", track_id_);
633 description_ = pc_ + "| ";
634 description_ += conduit_->type() == MediaSessionConduit::AUDIO ?
635 "Transmit audio[" : "Transmit video[";
636 description_ += track_id_string;
637 description_ += "]";
639 // TODO(ekr@rtfm.com): Check for errors
640 MOZ_MTLOG(ML_DEBUG, "Attaching pipeline to stream "
641 << static_cast<void *>(stream_) << " conduit type=" <<
642 (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
644 stream_->AddListener(listener_);
646 // Is this a gUM mediastream? If so, also register the Listener directly with
647 // the SourceMediaStream that's attached to the TrackUnion so we can get direct
648 // unqueued (and not resampled) data
649 if (domstream_->AddDirectListener(listener_)) {
650 listener_->direct_connect_ = true;
651 }
653 return MediaPipeline::Init();
654 }
656 nsresult MediaPipelineTransmit::TransportReady_s(TransportInfo &info) {
657 ASSERT_ON_THREAD(sts_thread_);
658 // Call base ready function.
659 MediaPipeline::TransportReady_s(info);
661 // Should not be set for a transmitter
662 MOZ_ASSERT(!possible_bundle_rtp_);
663 if (&info == &rtp_) {
664 // TODO(ekr@rtfm.com): Move onto MSG thread.
665 listener_->SetActive(true);
666 }
668 return NS_OK;
669 }
671 void MediaPipeline::DisconnectTransport_s(TransportInfo &info) {
672 MOZ_ASSERT(info.transport_);
673 ASSERT_ON_THREAD(sts_thread_);
675 info.transport_->SignalStateChange.disconnect(this);
676 // We do this even if we're a transmitter, since we are still possibly
677 // registered to receive RTCP.
678 TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
679 info.transport_->GetLayer(TransportLayerDtls::ID()));
680 MOZ_ASSERT(dtls); // DTLS is mandatory
681 MOZ_ASSERT(dtls->downward());
682 dtls->downward()->SignalPacketReceived.disconnect(this);
683 }
685 nsresult MediaPipeline::ConnectTransport_s(TransportInfo &info) {
686 MOZ_ASSERT(info.transport_);
687 ASSERT_ON_THREAD(sts_thread_);
689 // Look to see if the transport is ready
690 if (info.transport_->state() == TransportLayer::TS_OPEN) {
691 nsresult res = TransportReady_s(info);
692 if (NS_FAILED(res)) {
693 MOZ_MTLOG(ML_ERROR, "Error calling TransportReady(); res="
694 << static_cast<uint32_t>(res) << " in " << __FUNCTION__);
695 return res;
696 }
697 } else if (info.transport_->state() == TransportLayer::TS_ERROR) {
698 MOZ_MTLOG(ML_ERROR, ToString(info.type_)
699 << "transport is already in error state");
700 TransportFailed_s(info);
701 return NS_ERROR_FAILURE;
702 }
704 info.transport_->SignalStateChange.connect(this,
705 &MediaPipeline::StateChange);
707 return NS_OK;
708 }
710 MediaPipeline::TransportInfo* MediaPipeline::GetTransportInfo_s(
711 TransportFlow *flow) {
712 ASSERT_ON_THREAD(sts_thread_);
713 if (flow == rtp_.transport_) {
714 return &rtp_;
715 }
717 if (flow == rtcp_.transport_) {
718 return &rtcp_;
719 }
721 if (possible_bundle_rtp_) {
722 if (flow == possible_bundle_rtp_->transport_) {
723 return possible_bundle_rtp_;
724 }
726 if (flow == possible_bundle_rtcp_->transport_) {
727 return possible_bundle_rtcp_;
728 }
729 }
731 return nullptr;
732 }
734 void MediaPipeline::SetUsingBundle_s(bool decision) {
735 ASSERT_ON_THREAD(sts_thread_);
736 // Note: This can be called either because of events on the STS thread, or
737 // by events on the main thread (ie; receiving a remote description). It is
738 // best to be careful of races here, so don't assume that transports are open.
739 if (!possible_bundle_rtp_) {
740 if (!decision) {
741 // This can happen on the master pipeline.
742 filter_ = nullptr;
743 }
744 return;
745 }
747 if (direction_ == RECEIVE) {
748 if (decision) {
749 MOZ_MTLOG(ML_INFO, "Non-master pipeline confirmed bundle for "
750 << description_);
751 // We're doing bundle. Release the unused flows, and copy the ones we
752 // are using into the less wishy-washy members.
753 DisconnectTransport_s(rtp_);
754 DisconnectTransport_s(rtcp_);
755 rtp_ = *possible_bundle_rtp_;
756 rtcp_ = *possible_bundle_rtcp_;
757 } else {
758 MOZ_MTLOG(ML_INFO, "Non-master pipeline confirmed no bundle for "
759 << description_);
760 // We are not doing bundle
761 DisconnectTransport_s(*possible_bundle_rtp_);
762 DisconnectTransport_s(*possible_bundle_rtcp_);
763 filter_ = nullptr;
764 }
766 // We are no longer in an ambiguous state.
767 possible_bundle_rtp_ = nullptr;
768 possible_bundle_rtcp_ = nullptr;
769 }
770 }
772 MediaPipelineFilter* MediaPipeline::UpdateFilterFromRemoteDescription_s(
773 nsAutoPtr<MediaPipelineFilter> filter) {
774 ASSERT_ON_THREAD(sts_thread_);
775 // This is only supposed to relax the filter. Relaxing a missing filter is
776 // not possible.
777 MOZ_ASSERT(filter_);
779 if (!filter) {
780 filter_ = nullptr;
781 } else {
782 filter_->IncorporateRemoteDescription(*filter);
783 }
785 return filter_.get();
786 }
788 nsresult MediaPipeline::PipelineTransport::SendRtpPacket(
789 const void *data, int len) {
791 nsAutoPtr<DataBuffer> buf(new DataBuffer(static_cast<const uint8_t *>(data),
792 len));
794 RUN_ON_THREAD(sts_thread_,
795 WrapRunnable(
796 RefPtr<MediaPipeline::PipelineTransport>(this),
797 &MediaPipeline::PipelineTransport::SendRtpPacket_s,
798 buf),
799 NS_DISPATCH_NORMAL);
801 return NS_OK;
802 }
804 nsresult MediaPipeline::PipelineTransport::SendRtpPacket_s(
805 nsAutoPtr<DataBuffer> data) {
806 ASSERT_ON_THREAD(sts_thread_);
807 if (!pipeline_)
808 return NS_OK; // Detached
810 if (!pipeline_->rtp_.send_srtp_) {
811 MOZ_MTLOG(ML_DEBUG, "Couldn't write RTP packet; SRTP not set up yet");
812 return NS_OK;
813 }
815 MOZ_ASSERT(pipeline_->rtp_.transport_);
816 NS_ENSURE_TRUE(pipeline_->rtp_.transport_, NS_ERROR_NULL_POINTER);
818 // libsrtp enciphers in place, so we need a new, big enough
819 // buffer.
820 // XXX. allocates and deletes one buffer per packet sent.
821 // Bug 822129
822 int max_len = data->len() + SRTP_MAX_EXPANSION;
823 ScopedDeletePtr<unsigned char> inner_data(
824 new unsigned char[max_len]);
825 memcpy(inner_data, data->data(), data->len());
827 int out_len;
828 nsresult res = pipeline_->rtp_.send_srtp_->ProtectRtp(inner_data,
829 data->len(),
830 max_len,
831 &out_len);
832 if (!NS_SUCCEEDED(res))
833 return res;
835 pipeline_->increment_rtp_packets_sent(out_len);
836 return pipeline_->SendPacket(pipeline_->rtp_.transport_, inner_data,
837 out_len);
838 }
840 nsresult MediaPipeline::PipelineTransport::SendRtcpPacket(
841 const void *data, int len) {
843 nsAutoPtr<DataBuffer> buf(new DataBuffer(static_cast<const uint8_t *>(data),
844 len));
846 RUN_ON_THREAD(sts_thread_,
847 WrapRunnable(
848 RefPtr<MediaPipeline::PipelineTransport>(this),
849 &MediaPipeline::PipelineTransport::SendRtcpPacket_s,
850 buf),
851 NS_DISPATCH_NORMAL);
853 return NS_OK;
854 }
856 nsresult MediaPipeline::PipelineTransport::SendRtcpPacket_s(
857 nsAutoPtr<DataBuffer> data) {
858 ASSERT_ON_THREAD(sts_thread_);
859 if (!pipeline_)
860 return NS_OK; // Detached
862 if (!pipeline_->rtcp_.send_srtp_) {
863 MOZ_MTLOG(ML_DEBUG, "Couldn't write RTCP packet; SRTCP not set up yet");
864 return NS_OK;
865 }
867 MOZ_ASSERT(pipeline_->rtcp_.transport_);
868 NS_ENSURE_TRUE(pipeline_->rtcp_.transport_, NS_ERROR_NULL_POINTER);
870 // libsrtp enciphers in place, so we need a new, big enough
871 // buffer.
872 // XXX. allocates and deletes one buffer per packet sent.
873 // Bug 822129.
874 int max_len = data->len() + SRTP_MAX_EXPANSION;
875 ScopedDeletePtr<unsigned char> inner_data(
876 new unsigned char[max_len]);
877 memcpy(inner_data, data->data(), data->len());
879 int out_len;
880 nsresult res = pipeline_->rtcp_.send_srtp_->ProtectRtcp(inner_data,
881 data->len(),
882 max_len,
883 &out_len);
885 if (!NS_SUCCEEDED(res))
886 return res;
888 pipeline_->increment_rtcp_packets_sent();
889 return pipeline_->SendPacket(pipeline_->rtcp_.transport_, inner_data,
890 out_len);
891 }
893 // Called if we're attached with AddDirectListener()
894 void MediaPipelineTransmit::PipelineListener::
895 NotifyRealtimeData(MediaStreamGraph* graph, TrackID tid,
896 TrackRate rate,
897 TrackTicks offset,
898 uint32_t events,
899 const MediaSegment& media) {
900 MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyRealtimeData()");
902 NewData(graph, tid, rate, offset, events, media);
903 }
905 void MediaPipelineTransmit::PipelineListener::
906 NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
907 TrackRate rate,
908 TrackTicks offset,
909 uint32_t events,
910 const MediaSegment& queued_media) {
911 MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyQueuedTrackChanges()");
913 // ignore non-direct data if we're also getting direct data
914 if (!direct_connect_) {
915 NewData(graph, tid, rate, offset, events, queued_media);
916 }
917 }
919 void MediaPipelineTransmit::PipelineListener::
920 NewData(MediaStreamGraph* graph, TrackID tid,
921 TrackRate rate,
922 TrackTicks offset,
923 uint32_t events,
924 const MediaSegment& media) {
925 if (!active_) {
926 MOZ_MTLOG(ML_DEBUG, "Discarding packets because transport not ready");
927 return;
928 }
930 // TODO(ekr@rtfm.com): For now assume that we have only one
931 // track type and it's destined for us
932 // See bug 784517
933 if (media.GetType() == MediaSegment::AUDIO) {
934 if (conduit_->type() != MediaSessionConduit::AUDIO) {
935 // Ignore data in case we have a muxed stream
936 return;
937 }
938 AudioSegment* audio = const_cast<AudioSegment *>(
939 static_cast<const AudioSegment *>(&media));
941 AudioSegment::ChunkIterator iter(*audio);
942 while(!iter.IsEnded()) {
943 ProcessAudioChunk(static_cast<AudioSessionConduit*>(conduit_.get()),
944 rate, *iter);
945 iter.Next();
946 }
947 } else if (media.GetType() == MediaSegment::VIDEO) {
948 #ifdef MOZILLA_INTERNAL_API
949 if (conduit_->type() != MediaSessionConduit::VIDEO) {
950 // Ignore data in case we have a muxed stream
951 return;
952 }
953 VideoSegment* video = const_cast<VideoSegment *>(
954 static_cast<const VideoSegment *>(&media));
956 VideoSegment::ChunkIterator iter(*video);
957 while(!iter.IsEnded()) {
958 ProcessVideoChunk(static_cast<VideoSessionConduit*>(conduit_.get()),
959 rate, *iter);
960 iter.Next();
961 }
962 #endif
963 } else {
964 // Ignore
965 }
966 }
968 void MediaPipelineTransmit::PipelineListener::ProcessAudioChunk(
969 AudioSessionConduit *conduit,
970 TrackRate rate,
971 AudioChunk& chunk) {
972 // TODO(ekr@rtfm.com): Do more than one channel
973 nsAutoArrayPtr<int16_t> samples(new int16_t[chunk.mDuration]);
975 if (chunk.mBuffer) {
976 switch (chunk.mBufferFormat) {
977 case AUDIO_FORMAT_FLOAT32:
978 {
979 const float* buf = static_cast<const float *>(chunk.mChannelData[0]);
980 ConvertAudioSamplesWithScale(buf, static_cast<int16_t*>(samples),
981 chunk.mDuration, chunk.mVolume);
982 }
983 break;
984 case AUDIO_FORMAT_S16:
985 {
986 const short* buf = static_cast<const short *>(chunk.mChannelData[0]);
987 ConvertAudioSamplesWithScale(buf, samples, chunk.mDuration, chunk.mVolume);
988 }
989 break;
990 case AUDIO_FORMAT_SILENCE:
991 memset(samples, 0, chunk.mDuration * sizeof(samples[0]));
992 break;
993 default:
994 MOZ_ASSERT(PR_FALSE);
995 return;
996 break;
997 }
998 } else {
999 // This means silence.
1000 memset(samples, 0, chunk.mDuration * sizeof(samples[0]));
1001 }
1003 MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
1005 // Check if the rate has changed since the last time we came through
1006 // I realize it may be overkill to check if the rate has changed, but
1007 // I believe it is possible (e.g. if we change sources) and it costs us
1008 // very little to handle this case
1010 if (samplenum_10ms_ != rate/100) {
1011 // Determine number of samples in 10 ms from the rate:
1012 samplenum_10ms_ = rate/100;
1013 // If we switch sample rates (e.g. if we switch codecs),
1014 // we throw away what was in the sample_10ms_buffer at the old rate
1015 samples_10ms_buffer_ = new int16_t[samplenum_10ms_];
1016 buffer_current_ = 0;
1017 }
1019 // Vars to handle the non-sunny-day case (where the audio chunks
1020 // we got are not multiples of 10ms OR there were samples left over
1021 // from the last run)
1022 int64_t chunk_remaining;
1023 int64_t tocpy;
1024 int16_t *samples_tmp = samples.get();
1026 chunk_remaining = chunk.mDuration;
1028 MOZ_ASSERT(chunk_remaining >= 0);
1030 if (buffer_current_) {
1031 tocpy = std::min(chunk_remaining, samplenum_10ms_ - buffer_current_);
1032 memcpy(&samples_10ms_buffer_[buffer_current_], samples_tmp, tocpy * sizeof(int16_t));
1033 buffer_current_ += tocpy;
1034 samples_tmp += tocpy;
1035 chunk_remaining -= tocpy;
1037 if (buffer_current_ == samplenum_10ms_) {
1038 // Send out the audio buffer we just finished filling
1039 conduit->SendAudioFrame(samples_10ms_buffer_, samplenum_10ms_, rate, 0);
1040 buffer_current_ = 0;
1041 } else {
1042 // We still don't have enough data to send a buffer
1043 return;
1044 }
1045 }
1047 // Now send (more) frames if there is more than 10ms of input left
1048 tocpy = (chunk_remaining / samplenum_10ms_) * samplenum_10ms_;
1049 if (tocpy > 0) {
1050 conduit->SendAudioFrame(samples_tmp, tocpy, rate, 0);
1051 samples_tmp += tocpy;
1052 chunk_remaining -= tocpy;
1053 }
1054 // Copy what remains for the next run
1056 MOZ_ASSERT(chunk_remaining < samplenum_10ms_);
1058 if (chunk_remaining) {
1059 memcpy(samples_10ms_buffer_, samples_tmp, chunk_remaining * sizeof(int16_t));
1060 buffer_current_ = chunk_remaining;
1061 }
1063 }
1065 #ifdef MOZILLA_INTERNAL_API
1066 void MediaPipelineTransmit::PipelineListener::ProcessVideoChunk(
1067 VideoSessionConduit* conduit,
1068 TrackRate rate,
1069 VideoChunk& chunk) {
1070 layers::Image *img = chunk.mFrame.GetImage();
1072 // We now need to send the video frame to the other side
1073 if (!img) {
1074 // segment.AppendFrame() allows null images, which show up here as null
1075 return;
1076 }
1078 gfx::IntSize size = img->GetSize();
1079 if ((size.width & 1) != 0 || (size.height & 1) != 0) {
1080 MOZ_ASSERT(false, "Can't handle odd-sized images");
1081 return;
1082 }
1084 if (chunk.mFrame.GetForceBlack()) {
1085 uint32_t yPlaneLen = size.width*size.height;
1086 uint32_t cbcrPlaneLen = yPlaneLen/2;
1087 uint32_t length = yPlaneLen + cbcrPlaneLen;
1089 // Send a black image.
1090 nsAutoArrayPtr<uint8_t> pixelData;
1091 static const fallible_t fallible = fallible_t();
1092 pixelData = new (fallible) uint8_t[length];
1093 if (pixelData) {
1094 memset(pixelData, 0x10, yPlaneLen);
1095 // Fill Cb/Cr planes
1096 memset(pixelData + yPlaneLen, 0x80, cbcrPlaneLen);
1098 MOZ_MTLOG(ML_DEBUG, "Sending a black video frame");
1099 conduit->SendVideoFrame(pixelData, length, size.width, size.height,
1100 mozilla::kVideoI420, 0);
1101 }
1102 return;
1103 }
1105 // We get passed duplicate frames every ~10ms even if there's no frame change!
1106 int32_t serial = img->GetSerial();
1107 if (serial == last_img_) {
1108 return;
1109 }
1110 last_img_ = serial;
1112 ImageFormat format = img->GetFormat();
1113 #ifdef WEBRTC_GONK
1114 if (format == ImageFormat::GRALLOC_PLANAR_YCBCR) {
1115 layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(img);
1116 android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
1117 void *basePtr;
1118 graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &basePtr);
1119 conduit->SendVideoFrame(static_cast<unsigned char*>(basePtr),
1120 (graphicBuffer->getWidth() * graphicBuffer->getHeight() * 3) / 2,
1121 graphicBuffer->getWidth(),
1122 graphicBuffer->getHeight(),
1123 mozilla::kVideoNV21, 0);
1124 graphicBuffer->unlock();
1125 } else
1126 #endif
1127 if (format == ImageFormat::PLANAR_YCBCR) {
1128 // Cast away constness b/c some of the accessors are non-const
1129 layers::PlanarYCbCrImage* yuv =
1130 const_cast<layers::PlanarYCbCrImage *>(
1131 static_cast<const layers::PlanarYCbCrImage *>(img));
1132 // Big-time assumption here that this is all contiguous data coming
1133 // from getUserMedia or other sources.
1134 const layers::PlanarYCbCrData *data = yuv->GetData();
1136 uint8_t *y = data->mYChannel;
1137 #ifdef DEBUG
1138 uint8_t *cb = data->mCbChannel;
1139 uint8_t *cr = data->mCrChannel;
1140 #endif
1141 uint32_t width = yuv->GetSize().width;
1142 uint32_t height = yuv->GetSize().height;
1143 uint32_t length = yuv->GetDataSize();
1145 // SendVideoFrame only supports contiguous YCrCb 4:2:0 buffers
1146 // Verify it's contiguous and in the right order
1147 MOZ_ASSERT(cb == (y + width*height) &&
1148 cr == (cb + width*height/4));
1149 // XXX Consider making this a non-debug-only check if we ever implement
1150 // any subclasses of PlanarYCbCrImage that allow disjoint buffers such
1151 // that y+3(width*height)/2 might go outside the allocation.
1152 // GrallocImage can have wider strides, and so in some cases
1153 // would encode as garbage. If we need to encode it we'll either want to
1154 // modify SendVideoFrame or copy/move the data in the buffer.
1156 // OK, pass it on to the conduit
1157 MOZ_MTLOG(ML_DEBUG, "Sending a video frame");
1158 // Not much for us to do with an error
1159 conduit->SendVideoFrame(y, length, width, height, mozilla::kVideoI420, 0);
1160 } else if(format == ImageFormat::CAIRO_SURFACE) {
1161 layers::CairoImage* rgb =
1162 const_cast<layers::CairoImage *>(
1163 static_cast<const layers::CairoImage *>(img));
1165 gfx::IntSize size = rgb->GetSize();
1166 int half_width = (size.width + 1) >> 1;
1167 int half_height = (size.height + 1) >> 1;
1168 int c_size = half_width * half_height;
1169 int buffer_size = size.width * size.height + 2 * c_size;
1170 uint8* yuv = (uint8*) malloc(buffer_size);
1171 if (!yuv)
1172 return;
1174 int cb_offset = size.width * size.height;
1175 int cr_offset = cb_offset + c_size;
1176 RefPtr<gfx::SourceSurface> tempSurf = rgb->GetAsSourceSurface();
1177 RefPtr<gfx::DataSourceSurface> surf = tempSurf->GetDataSurface();
1179 switch (surf->GetFormat()) {
1180 case gfx::SurfaceFormat::B8G8R8A8:
1181 case gfx::SurfaceFormat::B8G8R8X8:
1182 libyuv::ARGBToI420(static_cast<uint8*>(surf->GetData()), surf->Stride(),
1183 yuv, size.width,
1184 yuv + cb_offset, half_width,
1185 yuv + cr_offset, half_width,
1186 size.width, size.height);
1187 break;
1188 case gfx::SurfaceFormat::R5G6B5:
1189 libyuv::RGB565ToI420(static_cast<uint8*>(surf->GetData()), surf->Stride(),
1190 yuv, size.width,
1191 yuv + cb_offset, half_width,
1192 yuv + cr_offset, half_width,
1193 size.width, size.height);
1194 break;
1195 default:
1196 MOZ_MTLOG(ML_ERROR, "Unsupported RGB video format");
1197 MOZ_ASSERT(PR_FALSE);
1198 }
1199 conduit->SendVideoFrame(yuv, buffer_size, size.width, size.height, mozilla::kVideoI420, 0);
1200 } else {
1201 MOZ_MTLOG(ML_ERROR, "Unsupported video format");
1202 MOZ_ASSERT(PR_FALSE);
1203 return;
1204 }
1205 }
1206 #endif
1208 nsresult MediaPipelineReceiveAudio::Init() {
1209 char track_id_string[11];
1210 ASSERT_ON_THREAD(main_thread_);
1211 MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
1213 // We can replace this when we are allowed to do streams or std::to_string
1214 PR_snprintf(track_id_string, sizeof(track_id_string), "%d", track_id_);
1216 description_ = pc_ + "| Receive audio[";
1217 description_ += track_id_string;
1218 description_ += "]";
1220 listener_->AddSelf(new AudioSegment());
1222 return MediaPipelineReceive::Init();
1223 }
1226 // Add a track and listener on the MSG thread using the MSG command queue
1227 static void AddTrackAndListener(MediaStream* source,
1228 TrackID track_id, TrackRate track_rate,
1229 MediaStreamListener* listener, MediaSegment* segment,
1230 const RefPtr<TrackAddedCallback>& completed) {
1231 // This both adds the listener and the track
1232 #ifdef MOZILLA_INTERNAL_API
1233 class Message : public ControlMessage {
1234 public:
1235 Message(MediaStream* stream, TrackID track, TrackRate rate,
1236 MediaSegment* segment, MediaStreamListener* listener,
1237 const RefPtr<TrackAddedCallback>& completed)
1238 : ControlMessage(stream),
1239 track_id_(track),
1240 track_rate_(rate),
1241 segment_(segment),
1242 listener_(listener),
1243 completed_(completed) {}
1245 virtual void Run() MOZ_OVERRIDE {
1246 StreamTime current_end = mStream->GetBufferEnd();
1247 TrackTicks current_ticks = TimeToTicksRoundUp(track_rate_, current_end);
1249 mStream->AddListenerImpl(listener_.forget());
1251 // Add a track 'now' to avoid possible underrun, especially if we add
1252 // a track "later".
1254 if (current_end != 0L) {
1255 MOZ_MTLOG(ML_DEBUG, "added track @ " << current_end <<
1256 " -> " << MediaTimeToSeconds(current_end));
1257 }
1259 // To avoid assertions, we need to insert a dummy segment that covers up
1260 // to the "start" time for the track
1261 segment_->AppendNullData(current_ticks);
1262 mStream->AsSourceStream()->AddTrack(track_id_, track_rate_,
1263 current_ticks, segment_);
1264 // AdvanceKnownTracksTicksTime(HEAT_DEATH_OF_UNIVERSE) means that in
1265 // theory per the API, we can't add more tracks before that
1266 // time. However, the impl actually allows it, and it avoids a whole
1267 // bunch of locking that would be required (and potential blocking)
1268 // if we used smaller values and updated them on each NotifyPull.
1269 mStream->AsSourceStream()->AdvanceKnownTracksTime(STREAM_TIME_MAX);
1271 // We need to know how much has been "inserted" because we're given absolute
1272 // times in NotifyPull.
1273 completed_->TrackAdded(current_ticks);
1274 }
1275 private:
1276 TrackID track_id_;
1277 TrackRate track_rate_;
1278 MediaSegment* segment_;
1279 nsRefPtr<MediaStreamListener> listener_;
1280 const RefPtr<TrackAddedCallback> completed_;
1281 };
1283 MOZ_ASSERT(listener);
1285 source->GraphImpl()->AppendMessage(new Message(source, track_id, track_rate, segment, listener, completed));
1286 #else
1287 source->AddListener(listener);
1288 source->AsSourceStream()->AddTrack(track_id, track_rate, 0, segment);
1289 #endif
1290 }
1292 void GenericReceiveListener::AddSelf(MediaSegment* segment) {
1293 RefPtr<TrackAddedCallback> callback = new GenericReceiveCallback(this);
1294 AddTrackAndListener(source_, track_id_, track_rate_, this, segment, callback);
1295 }
1297 MediaPipelineReceiveAudio::PipelineListener::PipelineListener(
1298 SourceMediaStream * source, TrackID track_id,
1299 const RefPtr<MediaSessionConduit>& conduit)
1300 : GenericReceiveListener(source, track_id, 16000), // XXX rate assumption
1301 conduit_(conduit)
1302 {
1303 MOZ_ASSERT(track_rate_%100 == 0);
1304 }
1306 void MediaPipelineReceiveAudio::PipelineListener::
1307 NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) {
1308 MOZ_ASSERT(source_);
1309 if (!source_) {
1310 MOZ_MTLOG(ML_ERROR, "NotifyPull() called from a non-SourceMediaStream");
1311 return;
1312 }
1314 // This comparison is done in total time to avoid accumulated roundoff errors.
1315 while (TicksToTimeRoundDown(track_rate_, played_ticks_) < desired_time) {
1316 // TODO(ekr@rtfm.com): Is there a way to avoid mallocating here? Or reduce the size?
1317 // Max size given mono is 480*2*1 = 960 (48KHz)
1318 #define AUDIO_SAMPLE_BUFFER_MAX 1000
1319 MOZ_ASSERT((track_rate_/100)*sizeof(uint16_t) <= AUDIO_SAMPLE_BUFFER_MAX);
1321 nsRefPtr<SharedBuffer> samples = SharedBuffer::Create(AUDIO_SAMPLE_BUFFER_MAX);
1322 int16_t *samples_data = static_cast<int16_t *>(samples->Data());
1323 int samples_length;
1325 // This fetches 10ms of data
1326 MediaConduitErrorCode err =
1327 static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
1328 samples_data,
1329 track_rate_,
1330 0, // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
1331 samples_length);
1332 MOZ_ASSERT(samples_length < AUDIO_SAMPLE_BUFFER_MAX);
1334 if (err != kMediaConduitNoError) {
1335 // Insert silence on conduit/GIPS failure (extremely unlikely)
1336 MOZ_MTLOG(ML_ERROR, "Audio conduit failed (" << err
1337 << ") to return data @ " << played_ticks_
1338 << " (desired " << desired_time << " -> "
1339 << MediaTimeToSeconds(desired_time) << ")");
1340 MOZ_ASSERT(err == kMediaConduitNoError);
1341 samples_length = (track_rate_/100)*sizeof(uint16_t); // if this is not enough we'll loop and provide more
1342 memset(samples_data, '\0', samples_length);
1343 }
1345 MOZ_MTLOG(ML_DEBUG, "Audio conduit returned buffer of length "
1346 << samples_length);
1348 AudioSegment segment;
1349 nsAutoTArray<const int16_t*,1> channels;
1350 channels.AppendElement(samples_data);
1351 segment.AppendFrames(samples.forget(), channels, samples_length);
1353 // Handle track not actually added yet or removed/finished
1354 if (source_->AppendToTrack(track_id_, &segment)) {
1355 played_ticks_ += track_rate_/100; // 10ms in TrackTicks
1356 } else {
1357 MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
1358 // we can't un-read the data, but that's ok since we don't want to
1359 // buffer - but don't i-loop!
1360 return;
1361 }
1362 }
1363 }
1365 nsresult MediaPipelineReceiveVideo::Init() {
1366 char track_id_string[11];
1367 ASSERT_ON_THREAD(main_thread_);
1368 MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
1370 // We can replace this when we are allowed to do streams or std::to_string
1371 PR_snprintf(track_id_string, sizeof(track_id_string), "%d", track_id_);
1373 description_ = pc_ + "| Receive video[";
1374 description_ += track_id_string;
1375 description_ += "]";
1377 #ifdef MOZILLA_INTERNAL_API
1378 listener_->AddSelf(new VideoSegment());
1379 #endif
1381 // Always happens before we can DetachMediaStream()
1382 static_cast<VideoSessionConduit *>(conduit_.get())->
1383 AttachRenderer(renderer_);
1385 return MediaPipelineReceive::Init();
1386 }
1388 MediaPipelineReceiveVideo::PipelineListener::PipelineListener(
1389 SourceMediaStream* source, TrackID track_id)
1390 : GenericReceiveListener(source, track_id, USECS_PER_S),
1391 width_(640),
1392 height_(480),
1393 #ifdef MOZILLA_INTERNAL_API
1394 image_container_(),
1395 image_(),
1396 #endif
1397 monitor_("Video PipelineListener") {
1398 #ifdef MOZILLA_INTERNAL_API
1399 image_container_ = layers::LayerManager::CreateImageContainer();
1400 #endif
1401 }
1403 void MediaPipelineReceiveVideo::PipelineListener::RenderVideoFrame(
1404 const unsigned char* buffer,
1405 unsigned int buffer_size,
1406 uint32_t time_stamp,
1407 int64_t render_time,
1408 const RefPtr<layers::Image>& video_image) {
1409 #ifdef MOZILLA_INTERNAL_API
1410 ReentrantMonitorAutoEnter enter(monitor_);
1412 if (buffer) {
1413 // Create a video frame using |buffer|.
1414 #ifdef MOZ_WIDGET_GONK
1415 ImageFormat format = ImageFormat::GRALLOC_PLANAR_YCBCR;
1416 #else
1417 ImageFormat format = ImageFormat::PLANAR_YCBCR;
1418 #endif
1419 nsRefPtr<layers::Image> image = image_container_->CreateImage(format);
1420 layers::PlanarYCbCrImage* yuvImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
1421 uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
1422 const uint8_t lumaBpp = 8;
1423 const uint8_t chromaBpp = 4;
1425 layers::PlanarYCbCrData yuvData;
1426 yuvData.mYChannel = frame;
1427 yuvData.mYSize = IntSize(width_, height_);
1428 yuvData.mYStride = width_ * lumaBpp/ 8;
1429 yuvData.mCbCrStride = width_ * chromaBpp / 8;
1430 yuvData.mCbChannel = frame + height_ * yuvData.mYStride;
1431 yuvData.mCrChannel = yuvData.mCbChannel + height_ * yuvData.mCbCrStride / 2;
1432 yuvData.mCbCrSize = IntSize(width_/ 2, height_/ 2);
1433 yuvData.mPicX = 0;
1434 yuvData.mPicY = 0;
1435 yuvData.mPicSize = IntSize(width_, height_);
1436 yuvData.mStereoMode = StereoMode::MONO;
1438 yuvImage->SetData(yuvData);
1440 image_ = image.forget();
1441 }
1442 #ifdef WEBRTC_GONK
1443 else {
1444 // Decoder produced video frame that can be appended to the track directly.
1445 MOZ_ASSERT(video_image);
1446 image_ = video_image;
1447 }
1448 #endif // WEBRTC_GONK
1449 #endif // MOZILLA_INTERNAL_API
1450 }
1452 void MediaPipelineReceiveVideo::PipelineListener::
1453 NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) {
1454 ReentrantMonitorAutoEnter enter(monitor_);
1456 #ifdef MOZILLA_INTERNAL_API
1457 nsRefPtr<layers::Image> image = image_;
1458 TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, desired_time);
1459 TrackTicks delta = target - played_ticks_;
1461 // Don't append if we've already provided a frame that supposedly
1462 // goes past the current aDesiredTime Doing so means a negative
1463 // delta and thus messes up handling of the graph
1464 if (delta > 0) {
1465 VideoSegment segment;
1466 segment.AppendFrame(image.forget(), delta, IntSize(width_, height_));
1467 // Handle track not actually added yet or removed/finished
1468 if (source_->AppendToTrack(track_id_, &segment)) {
1469 played_ticks_ = target;
1470 } else {
1471 MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
1472 return;
1473 }
1474 }
1475 #endif
1476 }
1479 } // end namespace