media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1479 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.7 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.8 +
     1.9 +// Original author: ekr@rtfm.com
    1.10 +
    1.11 +#include "logging.h"
    1.12 +#include "MediaPipeline.h"
    1.13 +
    1.14 +#ifndef USE_FAKE_MEDIA_STREAMS
    1.15 +#include "MediaStreamGraphImpl.h"
    1.16 +#endif
    1.17 +
    1.18 +#include <math.h>
    1.19 +
    1.20 +#include "nspr.h"
    1.21 +#include "srtp.h"
    1.22 +
    1.23 +#ifdef MOZILLA_INTERNAL_API
    1.24 +#include "VideoSegment.h"
    1.25 +#include "Layers.h"
    1.26 +#include "ImageTypes.h"
    1.27 +#include "ImageContainer.h"
    1.28 +#include "VideoUtils.h"
    1.29 +#ifdef WEBRTC_GONK
    1.30 +#include "GrallocImages.h"
    1.31 +#include "mozilla/layers/GrallocTextureClient.h"
    1.32 +#endif
    1.33 +#endif
    1.34 +
    1.35 +#include "nsError.h"
    1.36 +#include "AudioSegment.h"
    1.37 +#include "MediaSegment.h"
    1.38 +#include "databuffer.h"
    1.39 +#include "transportflow.h"
    1.40 +#include "transportlayer.h"
    1.41 +#include "transportlayerdtls.h"
    1.42 +#include "transportlayerice.h"
    1.43 +#include "runnable_utils.h"
    1.44 +#include "libyuv/convert.h"
    1.45 +#include "mozilla/gfx/Point.h"
    1.46 +#include "mozilla/gfx/Types.h"
    1.47 +
    1.48 +#include "webrtc/modules/interface/module_common_types.h"
    1.49 +#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
    1.50 +
    1.51 +using namespace mozilla;
    1.52 +using namespace mozilla::gfx;
    1.53 +
    1.54 +// Logging context
    1.55 +MOZ_MTLOG_MODULE("mediapipeline")
    1.56 +
    1.57 +namespace mozilla {
    1.58 +
    1.59 +static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
    1.60 +
    1.61 +MediaPipeline::~MediaPipeline() {
    1.62 +  ASSERT_ON_THREAD(main_thread_);
    1.63 +  MOZ_ASSERT(!stream_);  // Check that we have shut down already.
    1.64 +  MOZ_MTLOG(ML_INFO, "Destroying MediaPipeline: " << description_);
    1.65 +}
    1.66 +
    1.67 +nsresult MediaPipeline::Init() {
    1.68 +  ASSERT_ON_THREAD(main_thread_);
    1.69 +
    1.70 +  RUN_ON_THREAD(sts_thread_,
    1.71 +                WrapRunnable(
    1.72 +                    nsRefPtr<MediaPipeline>(this),
    1.73 +                    &MediaPipeline::Init_s),
    1.74 +                NS_DISPATCH_NORMAL);
    1.75 +
    1.76 +  return NS_OK;
    1.77 +}
    1.78 +
    1.79 +nsresult MediaPipeline::Init_s() {
    1.80 +  ASSERT_ON_THREAD(sts_thread_);
    1.81 +  conduit_->AttachTransport(transport_);
    1.82 +
    1.83 +  nsresult res;
    1.84 +  MOZ_ASSERT(rtp_.transport_);
    1.85 +  MOZ_ASSERT(rtcp_.transport_);
    1.86 +  res = ConnectTransport_s(rtp_);
    1.87 +  if (NS_FAILED(res)) {
    1.88 +    return res;
    1.89 +  }
    1.90 +
    1.91 +  if (rtcp_.transport_ != rtp_.transport_) {
    1.92 +    res = ConnectTransport_s(rtcp_);
    1.93 +    if (NS_FAILED(res)) {
    1.94 +      return res;
    1.95 +    }
    1.96 +  }
    1.97 +
    1.98 +  if (possible_bundle_rtp_) {
    1.99 +    MOZ_ASSERT(possible_bundle_rtcp_);
   1.100 +    MOZ_ASSERT(possible_bundle_rtp_->transport_);
   1.101 +    MOZ_ASSERT(possible_bundle_rtcp_->transport_);
   1.102 +
   1.103 +    res = ConnectTransport_s(*possible_bundle_rtp_);
   1.104 +    if (NS_FAILED(res)) {
   1.105 +      return res;
   1.106 +    }
   1.107 +
   1.108 +    if (possible_bundle_rtcp_->transport_ != possible_bundle_rtp_->transport_) {
   1.109 +      res = ConnectTransport_s(*possible_bundle_rtcp_);
   1.110 +      if (NS_FAILED(res)) {
   1.111 +        return res;
   1.112 +      }
   1.113 +    }
   1.114 +  }
   1.115 +
   1.116 +  return NS_OK;
   1.117 +}
   1.118 +
   1.119 +
   1.120 +// Disconnect us from the transport so that we can cleanly destruct the
   1.121 +// pipeline on the main thread.  ShutdownMedia_m() must have already been
   1.122 +// called
   1.123 +void MediaPipeline::ShutdownTransport_s() {
   1.124 +  ASSERT_ON_THREAD(sts_thread_);
   1.125 +  MOZ_ASSERT(!stream_); // verifies that ShutdownMedia_m() has run
   1.126 +
   1.127 +  disconnect_all();
   1.128 +  transport_->Detach();
   1.129 +  rtp_.transport_ = nullptr;
   1.130 +  rtcp_.transport_ = nullptr;
   1.131 +  possible_bundle_rtp_ = nullptr;
   1.132 +  possible_bundle_rtcp_ = nullptr;
   1.133 +}
   1.134 +
   1.135 +void MediaPipeline::StateChange(TransportFlow *flow, TransportLayer::State state) {
   1.136 +  TransportInfo* info = GetTransportInfo_s(flow);
   1.137 +  MOZ_ASSERT(info);
   1.138 +
   1.139 +  if (state == TransportLayer::TS_OPEN) {
   1.140 +    MOZ_MTLOG(ML_INFO, "Flow is ready");
   1.141 +    TransportReady_s(*info);
   1.142 +  } else if (state == TransportLayer::TS_CLOSED ||
   1.143 +             state == TransportLayer::TS_ERROR) {
   1.144 +    TransportFailed_s(*info);
   1.145 +  }
   1.146 +}
   1.147 +
   1.148 +static bool MakeRtpTypeToStringArray(const char** array) {
   1.149 +  static const char* RTP_str = "RTP";
   1.150 +  static const char* RTCP_str = "RTCP";
   1.151 +  static const char* MUX_str = "RTP/RTCP mux";
   1.152 +  array[MediaPipeline::RTP] = RTP_str;
   1.153 +  array[MediaPipeline::RTCP] = RTCP_str;
   1.154 +  array[MediaPipeline::MUX] = MUX_str;
   1.155 +  return true;
   1.156 +}
   1.157 +
   1.158 +static const char* ToString(MediaPipeline::RtpType type) {
   1.159 +  static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = {nullptr};
   1.160 +  // Dummy variable to cause init to happen only on first call
   1.161 +  static bool dummy = MakeRtpTypeToStringArray(array);
   1.162 +  (void)dummy;
   1.163 +  return array[type];
   1.164 +}
   1.165 +
   1.166 +nsresult MediaPipeline::TransportReady_s(TransportInfo &info) {
   1.167 +  MOZ_ASSERT(!description_.empty());
   1.168 +
   1.169 +  // TODO(ekr@rtfm.com): implement some kind of notification on
   1.170 +  // failure. bug 852665.
   1.171 +  if (info.state_ != MP_CONNECTING) {
   1.172 +    MOZ_MTLOG(ML_ERROR, "Transport ready for flow in wrong state:" <<
   1.173 +              description_ << ": " << ToString(info.type_));
   1.174 +    return NS_ERROR_FAILURE;
   1.175 +  }
   1.176 +
   1.177 +  MOZ_MTLOG(ML_INFO, "Transport ready for pipeline " <<
   1.178 +            static_cast<void *>(this) << " flow " << description_ << ": " <<
   1.179 +            ToString(info.type_));
   1.180 +
   1.181 +  // TODO(bcampen@mozilla.com): Should we disconnect from the flow on failure?
   1.182 +  nsresult res;
   1.183 +
   1.184 +  // Now instantiate the SRTP objects
   1.185 +  TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
   1.186 +      info.transport_->GetLayer(TransportLayerDtls::ID()));
   1.187 +  MOZ_ASSERT(dtls);  // DTLS is mandatory
   1.188 +
   1.189 +  uint16_t cipher_suite;
   1.190 +  res = dtls->GetSrtpCipher(&cipher_suite);
   1.191 +  if (NS_FAILED(res)) {
   1.192 +    MOZ_MTLOG(ML_ERROR, "Failed to negotiate DTLS-SRTP. This is an error");
   1.193 +    info.state_ = MP_CLOSED;
   1.194 +    UpdateRtcpMuxState(info);
   1.195 +    return res;
   1.196 +  }
   1.197 +
   1.198 +  // SRTP Key Exporter as per RFC 5764 S 4.2
   1.199 +  unsigned char srtp_block[SRTP_TOTAL_KEY_LENGTH * 2];
   1.200 +  res = dtls->ExportKeyingMaterial(kDTLSExporterLabel, false, "",
   1.201 +                                   srtp_block, sizeof(srtp_block));
   1.202 +  if (NS_FAILED(res)) {
   1.203 +    MOZ_MTLOG(ML_ERROR, "Failed to compute DTLS-SRTP keys. This is an error");
   1.204 +    info.state_ = MP_CLOSED;
   1.205 +    UpdateRtcpMuxState(info);
   1.206 +    MOZ_CRASH();  // TODO: Remove once we have enough field experience to
   1.207 +                  // know it doesn't happen. bug 798797. Note that the
   1.208 +                  // code after this never executes.
   1.209 +    return res;
   1.210 +  }
   1.211 +
   1.212 +  // Slice and dice as per RFC 5764 S 4.2
   1.213 +  unsigned char client_write_key[SRTP_TOTAL_KEY_LENGTH];
   1.214 +  unsigned char server_write_key[SRTP_TOTAL_KEY_LENGTH];
   1.215 +  int offset = 0;
   1.216 +  memcpy(client_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
   1.217 +  offset += SRTP_MASTER_KEY_LENGTH;
   1.218 +  memcpy(server_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
   1.219 +  offset += SRTP_MASTER_KEY_LENGTH;
   1.220 +  memcpy(client_write_key + SRTP_MASTER_KEY_LENGTH,
   1.221 +         srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
   1.222 +  offset += SRTP_MASTER_SALT_LENGTH;
   1.223 +  memcpy(server_write_key + SRTP_MASTER_KEY_LENGTH,
   1.224 +         srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
   1.225 +  offset += SRTP_MASTER_SALT_LENGTH;
   1.226 +  MOZ_ASSERT(offset == sizeof(srtp_block));
   1.227 +
   1.228 +  unsigned char *write_key;
   1.229 +  unsigned char *read_key;
   1.230 +
   1.231 +  if (dtls->role() == TransportLayerDtls::CLIENT) {
   1.232 +    write_key = client_write_key;
   1.233 +    read_key = server_write_key;
   1.234 +  } else {
   1.235 +    write_key = server_write_key;
   1.236 +    read_key = client_write_key;
   1.237 +  }
   1.238 +
   1.239 +  MOZ_ASSERT(!info.send_srtp_ && !info.recv_srtp_);
   1.240 +  info.send_srtp_ = SrtpFlow::Create(cipher_suite, false, write_key,
   1.241 +                                     SRTP_TOTAL_KEY_LENGTH);
   1.242 +  info.recv_srtp_ = SrtpFlow::Create(cipher_suite, true, read_key,
   1.243 +                                     SRTP_TOTAL_KEY_LENGTH);
   1.244 +  if (!info.send_srtp_ || !info.recv_srtp_) {
   1.245 +    MOZ_MTLOG(ML_ERROR, "Couldn't create SRTP flow for "
   1.246 +              << ToString(info.type_));
   1.247 +    info.state_ = MP_CLOSED;
   1.248 +    UpdateRtcpMuxState(info);
   1.249 +    return NS_ERROR_FAILURE;
   1.250 +  }
   1.251 +
   1.252 +    MOZ_MTLOG(ML_INFO, "Listening for " << ToString(info.type_)
   1.253 +                       << " packets received on " <<
   1.254 +                       static_cast<void *>(dtls->downward()));
   1.255 +
   1.256 +  switch (info.type_) {
   1.257 +    case RTP:
   1.258 +      dtls->downward()->SignalPacketReceived.connect(
   1.259 +          this,
   1.260 +          &MediaPipeline::RtpPacketReceived);
   1.261 +      break;
   1.262 +    case RTCP:
   1.263 +      dtls->downward()->SignalPacketReceived.connect(
   1.264 +          this,
   1.265 +          &MediaPipeline::RtcpPacketReceived);
   1.266 +      break;
   1.267 +    case MUX:
   1.268 +      dtls->downward()->SignalPacketReceived.connect(
   1.269 +          this,
   1.270 +          &MediaPipeline::PacketReceived);
   1.271 +      break;
   1.272 +    default:
   1.273 +      MOZ_CRASH();
   1.274 +  }
   1.275 +
   1.276 +  info.state_ = MP_OPEN;
   1.277 +  UpdateRtcpMuxState(info);
   1.278 +  return NS_OK;
   1.279 +}
   1.280 +
   1.281 +nsresult MediaPipeline::TransportFailed_s(TransportInfo &info) {
   1.282 +  ASSERT_ON_THREAD(sts_thread_);
   1.283 +
   1.284 +  info.state_ = MP_CLOSED;
   1.285 +  UpdateRtcpMuxState(info);
   1.286 +
   1.287 +  MOZ_MTLOG(ML_INFO, "Transport closed for flow " << ToString(info.type_));
   1.288 +
   1.289 +  NS_WARNING(
   1.290 +      "MediaPipeline Transport failed. This is not properly cleaned up yet");
   1.291 +
   1.292 +  // TODO(ekr@rtfm.com): SECURITY: Figure out how to clean up if the
   1.293 +  // connection was good and now it is bad.
   1.294 +  // TODO(ekr@rtfm.com): Report up so that the PC knows we
   1.295 +  // have experienced an error.
   1.296 +
   1.297 +  return NS_OK;
   1.298 +}
   1.299 +
   1.300 +void MediaPipeline::UpdateRtcpMuxState(TransportInfo &info) {
   1.301 +  if (info.type_ == MUX) {
   1.302 +    if (info.transport_ == rtcp_.transport_) {
   1.303 +      rtcp_.state_ = info.state_;
   1.304 +      if (!rtcp_.send_srtp_) {
   1.305 +        rtcp_.send_srtp_ = info.send_srtp_;
   1.306 +        rtcp_.recv_srtp_ = info.recv_srtp_;
   1.307 +      }
   1.308 +    } else if (possible_bundle_rtcp_ &&
   1.309 +               info.transport_ == possible_bundle_rtcp_->transport_) {
   1.310 +      possible_bundle_rtcp_->state_ = info.state_;
   1.311 +      if (!possible_bundle_rtcp_->send_srtp_) {
   1.312 +        possible_bundle_rtcp_->send_srtp_ = info.send_srtp_;
   1.313 +        possible_bundle_rtcp_->recv_srtp_ = info.recv_srtp_;
   1.314 +      }
   1.315 +    }
   1.316 +  }
   1.317 +}
   1.318 +
   1.319 +nsresult MediaPipeline::SendPacket(TransportFlow *flow, const void *data,
   1.320 +                                   int len) {
   1.321 +  ASSERT_ON_THREAD(sts_thread_);
   1.322 +
   1.323 +  // Note that we bypass the DTLS layer here
   1.324 +  TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
   1.325 +      flow->GetLayer(TransportLayerDtls::ID()));
   1.326 +  MOZ_ASSERT(dtls);
   1.327 +
   1.328 +  TransportResult res = dtls->downward()->
   1.329 +      SendPacket(static_cast<const unsigned char *>(data), len);
   1.330 +
   1.331 +  if (res != len) {
   1.332 +    // Ignore blocking indications
   1.333 +    if (res == TE_WOULDBLOCK)
   1.334 +      return NS_OK;
   1.335 +
   1.336 +    MOZ_MTLOG(ML_ERROR, "Failed write on stream");
   1.337 +    return NS_BASE_STREAM_CLOSED;
   1.338 +  }
   1.339 +
   1.340 +  return NS_OK;
   1.341 +}
   1.342 +
   1.343 +void MediaPipeline::increment_rtp_packets_sent(int32_t bytes) {
   1.344 +  ++rtp_packets_sent_;
   1.345 +  rtp_bytes_sent_ += bytes;
   1.346 +
   1.347 +  if (!(rtp_packets_sent_ % 100)) {
   1.348 +    MOZ_MTLOG(ML_INFO, "RTP sent packet count for " << description_
   1.349 +              << " Pipeline " << static_cast<void *>(this)
   1.350 +              << " Flow : " << static_cast<void *>(rtp_.transport_)
   1.351 +              << ": " << rtp_packets_sent_
   1.352 +              << " (" << rtp_bytes_sent_ << " bytes)");
   1.353 +  }
   1.354 +}
   1.355 +
   1.356 +void MediaPipeline::increment_rtcp_packets_sent() {
   1.357 +  ++rtcp_packets_sent_;
   1.358 +  if (!(rtcp_packets_sent_ % 100)) {
   1.359 +    MOZ_MTLOG(ML_INFO, "RTCP sent packet count for " << description_
   1.360 +              << " Pipeline " << static_cast<void *>(this)
   1.361 +              << " Flow : " << static_cast<void *>(rtcp_.transport_)
   1.362 +              << ": " << rtcp_packets_sent_);
   1.363 +  }
   1.364 +}
   1.365 +
   1.366 +void MediaPipeline::increment_rtp_packets_received(int32_t bytes) {
   1.367 +  ++rtp_packets_received_;
   1.368 +  rtp_bytes_received_ += bytes;
   1.369 +  if (!(rtp_packets_received_ % 100)) {
   1.370 +    MOZ_MTLOG(ML_INFO, "RTP received packet count for " << description_
   1.371 +              << " Pipeline " << static_cast<void *>(this)
   1.372 +              << " Flow : " << static_cast<void *>(rtp_.transport_)
   1.373 +              << ": " << rtp_packets_received_
   1.374 +              << " (" << rtp_bytes_received_ << " bytes)");
   1.375 +  }
   1.376 +}
   1.377 +
   1.378 +void MediaPipeline::increment_rtcp_packets_received() {
   1.379 +  ++rtcp_packets_received_;
   1.380 +  if (!(rtcp_packets_received_ % 100)) {
   1.381 +    MOZ_MTLOG(ML_INFO, "RTCP received packet count for " << description_
   1.382 +              << " Pipeline " << static_cast<void *>(this)
   1.383 +              << " Flow : " << static_cast<void *>(rtcp_.transport_)
   1.384 +              << ": " << rtcp_packets_received_);
   1.385 +  }
   1.386 +}
   1.387 +
   1.388 +void MediaPipeline::RtpPacketReceived(TransportLayer *layer,
   1.389 +                                      const unsigned char *data,
   1.390 +                                      size_t len) {
   1.391 +  if (!transport_->pipeline()) {
   1.392 +    MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport disconnected");
   1.393 +    return;
   1.394 +  }
   1.395 +
   1.396 +  if (!conduit_) {
   1.397 +    MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
   1.398 +    return;
   1.399 +  }
   1.400 +
   1.401 +  TransportInfo* info = &rtp_;
   1.402 +
   1.403 +  if (possible_bundle_rtp_ &&
   1.404 +      possible_bundle_rtp_->transport_->Contains(layer)) {
   1.405 +    // Received this on our possible bundle transport. Override info.
   1.406 +    info = possible_bundle_rtp_;
   1.407 +  }
   1.408 +
   1.409 +  // TODO(bcampen@mozilla.com): Can either of these actually happen? If not,
   1.410 +  // the info variable can be removed, and this function gets simpler.
   1.411 +  if (info->state_ != MP_OPEN) {
   1.412 +    MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; pipeline not open");
   1.413 +    return;
   1.414 +  }
   1.415 +
   1.416 +  if (info->transport_->state() != TransportLayer::TS_OPEN) {
   1.417 +    MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
   1.418 +    return;
   1.419 +  }
   1.420 +
   1.421 +  // This should never happen.
   1.422 +  MOZ_ASSERT(info->recv_srtp_);
   1.423 +
   1.424 +  if (direction_ == TRANSMIT) {
   1.425 +    return;
   1.426 +  }
   1.427 +
   1.428 +  if (possible_bundle_rtp_ && (info == &rtp_))  {
   1.429 +    // We were not sure we would be using rtp_ or possible_bundle_rtp_, but we
   1.430 +    // have just received traffic that clears this up.
   1.431 +    // Don't let our filter prevent us from noticing this, if the filter is
   1.432 +    // incomplete (ie; no SSRCs in remote SDP, or no remote SDP at all).
   1.433 +    SetUsingBundle_s(false);
   1.434 +    MOZ_MTLOG(ML_INFO, "Ruled out the possibility that we're receiving bundle "
   1.435 +                       "for " << description_);
   1.436 +    // TODO(bcampen@mozilla.com): Might be nice to detect when every
   1.437 +    // MediaPipeline but the master has determined that it isn't doing bundle,
   1.438 +    // since that means the master isn't doing bundle either. We could maybe
   1.439 +    // do this by putting some refcounted dummy variable in the filters, and
   1.440 +    // checking the value of the refcount. It is not clear whether this is
   1.441 +    // going to be useful in practice.
   1.442 +  }
   1.443 +
   1.444 +  if (!len) {
   1.445 +    return;
   1.446 +  }
   1.447 +
   1.448 +  // Filter out everything but RTP/RTCP
   1.449 +  if (data[0] < 128 || data[0] > 191) {
   1.450 +    return;
   1.451 +  }
   1.452 +
   1.453 +  if (filter_) {
   1.454 +    webrtc::RTPHeader header;
   1.455 +    if (!rtp_parser_->Parse(data, len, &header) ||
   1.456 +        !filter_->Filter(header)) {
   1.457 +      return;
   1.458 +    }
   1.459 +  }
   1.460 +
   1.461 +  if (possible_bundle_rtp_) {
   1.462 +    // Just got traffic that passed our filter on the potential bundle
   1.463 +    // transport. Must be doing bundle.
   1.464 +    SetUsingBundle_s(true);
   1.465 +    MOZ_MTLOG(ML_INFO, "Confirmed the possibility that we're receiving bundle");
   1.466 +  }
   1.467 +
   1.468 +  // Everything is decided now; just use rtp_
   1.469 +  MOZ_ASSERT(!possible_bundle_rtp_);
   1.470 +  MOZ_ASSERT(!possible_bundle_rtcp_);
   1.471 +
   1.472 +  // Make a copy rather than cast away constness
   1.473 +  ScopedDeletePtr<unsigned char> inner_data(
   1.474 +      new unsigned char[len]);
   1.475 +  memcpy(inner_data, data, len);
   1.476 +  int out_len = 0;
   1.477 +  nsresult res = rtp_.recv_srtp_->UnprotectRtp(inner_data,
   1.478 +                                               len, len, &out_len);
   1.479 +  if (!NS_SUCCEEDED(res)) {
   1.480 +    char tmp[16];
   1.481 +
   1.482 +    PR_snprintf(tmp, sizeof(tmp), "%.2x %.2x %.2x %.2x",
   1.483 +                inner_data[0],
   1.484 +                inner_data[1],
   1.485 +                inner_data[2],
   1.486 +                inner_data[3]);
   1.487 +
   1.488 +    MOZ_MTLOG(ML_NOTICE, "Error unprotecting RTP in " << description_
   1.489 +              << "len= " << len << "[" << tmp << "...]");
   1.490 +
   1.491 +    return;
   1.492 +  }
   1.493 +  increment_rtp_packets_received(out_len);
   1.494 +
   1.495 +  (void)conduit_->ReceivedRTPPacket(inner_data, out_len);  // Ignore error codes
   1.496 +}
   1.497 +
   1.498 +void MediaPipeline::RtcpPacketReceived(TransportLayer *layer,
   1.499 +                                       const unsigned char *data,
   1.500 +                                       size_t len) {
   1.501 +  if (!transport_->pipeline()) {
   1.502 +    MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
   1.503 +    return;
   1.504 +  }
   1.505 +
   1.506 +  if (!conduit_) {
   1.507 +    MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
   1.508 +    return;
   1.509 +  }
   1.510 +
   1.511 +  TransportInfo* info = &rtcp_;
   1.512 +  if (possible_bundle_rtcp_ &&
   1.513 +      possible_bundle_rtcp_->transport_->Contains(layer)) {
   1.514 +    info = possible_bundle_rtcp_;
   1.515 +  }
   1.516 +
   1.517 +  if (info->state_ != MP_OPEN) {
   1.518 +    MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; pipeline not open");
   1.519 +    return;
   1.520 +  }
   1.521 +
   1.522 +  if (info->transport_->state() != TransportLayer::TS_OPEN) {
   1.523 +    MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
   1.524 +    return;
   1.525 +  }
   1.526 +
   1.527 +  if (possible_bundle_rtp_ && (info == &rtcp_)) {
   1.528 +    // We have offered bundle, and received our first packet on a non-bundle
   1.529 +    // address. We are definitely not using the bundle address.
   1.530 +    SetUsingBundle_s(false);
   1.531 +  }
   1.532 +
   1.533 +  if (!len) {
   1.534 +    return;
   1.535 +  }
   1.536 +
   1.537 +  // Filter out everything but RTP/RTCP
   1.538 +  if (data[0] < 128 || data[0] > 191) {
   1.539 +    return;
   1.540 +  }
   1.541 +
   1.542 +  MediaPipelineFilter::Result filter_result = MediaPipelineFilter::PASS;
   1.543 +  if (filter_) {
   1.544 +    filter_result = filter_->FilterRTCP(data, len);
   1.545 +    if (filter_result == MediaPipelineFilter::FAIL) {
   1.546 +      return;
   1.547 +    }
   1.548 +  }
   1.549 +
   1.550 +  if (filter_result == MediaPipelineFilter::PASS && possible_bundle_rtp_) {
   1.551 +    // Just got traffic that passed our filter on the potential bundle
   1.552 +    // transport. Must be doing bundle.
   1.553 +    SetUsingBundle_s(true);
   1.554 +  }
   1.555 +
   1.556 +  // We continue using info here, since it is possible that the filter did not
   1.557 +  // support the payload type (ie; returned MediaPipelineFilter::UNSUPPORTED).
   1.558 +  // In this case, we just let it pass, and hope the webrtc.org code does
   1.559 +  // something sane.
   1.560 +  increment_rtcp_packets_received();
   1.561 +
   1.562 +  MOZ_ASSERT(info->recv_srtp_);  // This should never happen
   1.563 +
   1.564 +  // Make a copy rather than cast away constness
   1.565 +  ScopedDeletePtr<unsigned char> inner_data(
   1.566 +      new unsigned char[len]);
   1.567 +  memcpy(inner_data, data, len);
   1.568 +  int out_len;
   1.569 +
   1.570 +  nsresult res = info->recv_srtp_->UnprotectRtcp(inner_data,
   1.571 +                                                 len,
   1.572 +                                                 len,
   1.573 +                                                 &out_len);
   1.574 +
   1.575 +  if (!NS_SUCCEEDED(res))
   1.576 +    return;
   1.577 +
   1.578 +  (void)conduit_->ReceivedRTCPPacket(inner_data, out_len);  // Ignore error codes
   1.579 +}
   1.580 +
   1.581 +bool MediaPipeline::IsRtp(const unsigned char *data, size_t len) {
   1.582 +  if (len < 2)
   1.583 +    return false;
   1.584 +
   1.585 +  // Check if this is a RTCP packet. Logic based on the types listed in
   1.586 +  // media/webrtc/trunk/src/modules/rtp_rtcp/source/rtp_utility.cc
   1.587 +
   1.588 +  // Anything outside this range is RTP.
   1.589 +  if ((data[1] < 192) || (data[1] > 207))
   1.590 +    return true;
   1.591 +
   1.592 +  if (data[1] == 192)  // FIR
   1.593 +    return false;
   1.594 +
   1.595 +  if (data[1] == 193)  // NACK, but could also be RTP. This makes us sad
   1.596 +    return true;       // but it's how webrtc.org behaves.
   1.597 +
   1.598 +  if (data[1] == 194)
   1.599 +    return true;
   1.600 +
   1.601 +  if (data[1] == 195)  // IJ.
   1.602 +    return false;
   1.603 +
   1.604 +  if ((data[1] > 195) && (data[1] < 200))  // the > 195 is redundant
   1.605 +    return true;
   1.606 +
   1.607 +  if ((data[1] >= 200) && (data[1] <= 207))  // SR, RR, SDES, BYE,
   1.608 +    return false;                            // APP, RTPFB, PSFB, XR
   1.609 +
   1.610 +  MOZ_ASSERT(false);  // Not reached, belt and suspenders.
   1.611 +  return true;
   1.612 +}
   1.613 +
   1.614 +void MediaPipeline::PacketReceived(TransportLayer *layer,
   1.615 +                                   const unsigned char *data,
   1.616 +                                   size_t len) {
   1.617 +  if (!transport_->pipeline()) {
   1.618 +    MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
   1.619 +    return;
   1.620 +  }
   1.621 +
   1.622 +  if (IsRtp(data, len)) {
   1.623 +    RtpPacketReceived(layer, data, len);
   1.624 +  } else {
   1.625 +    RtcpPacketReceived(layer, data, len);
   1.626 +  }
   1.627 +}
   1.628 +
   1.629 +nsresult MediaPipelineTransmit::Init() {
   1.630 +  char track_id_string[11];
   1.631 +  ASSERT_ON_THREAD(main_thread_);
   1.632 +
   1.633 +  // We can replace this when we are allowed to do streams or std::to_string
   1.634 +  PR_snprintf(track_id_string, sizeof(track_id_string), "%d", track_id_);
   1.635 +
   1.636 +  description_ = pc_ + "| ";
   1.637 +  description_ += conduit_->type() == MediaSessionConduit::AUDIO ?
   1.638 +      "Transmit audio[" : "Transmit video[";
   1.639 +  description_ += track_id_string;
   1.640 +  description_ += "]";
   1.641 +
   1.642 +  // TODO(ekr@rtfm.com): Check for errors
   1.643 +  MOZ_MTLOG(ML_DEBUG, "Attaching pipeline to stream "
   1.644 +            << static_cast<void *>(stream_) << " conduit type=" <<
   1.645 +            (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
   1.646 +
   1.647 +  stream_->AddListener(listener_);
   1.648 +
   1.649 +  // Is this a gUM mediastream?  If so, also register the Listener directly with
   1.650 +  // the SourceMediaStream that's attached to the TrackUnion so we can get direct
   1.651 +  // unqueued (and not resampled) data
   1.652 +  if (domstream_->AddDirectListener(listener_)) {
   1.653 +    listener_->direct_connect_ = true;
   1.654 +  }
   1.655 +
   1.656 +  return MediaPipeline::Init();
   1.657 +}
   1.658 +
   1.659 +nsresult MediaPipelineTransmit::TransportReady_s(TransportInfo &info) {
   1.660 +  ASSERT_ON_THREAD(sts_thread_);
   1.661 +  // Call base ready function.
   1.662 +  MediaPipeline::TransportReady_s(info);
   1.663 +
   1.664 +  // Should not be set for a transmitter
   1.665 +  MOZ_ASSERT(!possible_bundle_rtp_);
   1.666 +  if (&info == &rtp_) {
   1.667 +    // TODO(ekr@rtfm.com): Move onto MSG thread.
   1.668 +    listener_->SetActive(true);
   1.669 +  }
   1.670 +
   1.671 +  return NS_OK;
   1.672 +}
   1.673 +
   1.674 +void MediaPipeline::DisconnectTransport_s(TransportInfo &info) {
   1.675 +  MOZ_ASSERT(info.transport_);
   1.676 +  ASSERT_ON_THREAD(sts_thread_);
   1.677 +
   1.678 +  info.transport_->SignalStateChange.disconnect(this);
   1.679 +  // We do this even if we're a transmitter, since we are still possibly
   1.680 +  // registered to receive RTCP.
   1.681 +  TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
   1.682 +      info.transport_->GetLayer(TransportLayerDtls::ID()));
   1.683 +  MOZ_ASSERT(dtls);  // DTLS is mandatory
   1.684 +  MOZ_ASSERT(dtls->downward());
   1.685 +  dtls->downward()->SignalPacketReceived.disconnect(this);
   1.686 +}
   1.687 +
   1.688 +nsresult MediaPipeline::ConnectTransport_s(TransportInfo &info) {
   1.689 +  MOZ_ASSERT(info.transport_);
   1.690 +  ASSERT_ON_THREAD(sts_thread_);
   1.691 +
   1.692 +  // Look to see if the transport is ready
   1.693 +  if (info.transport_->state() == TransportLayer::TS_OPEN) {
   1.694 +    nsresult res = TransportReady_s(info);
   1.695 +    if (NS_FAILED(res)) {
   1.696 +      MOZ_MTLOG(ML_ERROR, "Error calling TransportReady(); res="
   1.697 +                << static_cast<uint32_t>(res) << " in " << __FUNCTION__);
   1.698 +      return res;
   1.699 +    }
   1.700 +  } else if (info.transport_->state() == TransportLayer::TS_ERROR) {
   1.701 +    MOZ_MTLOG(ML_ERROR, ToString(info.type_)
   1.702 +                        << "transport is already in error state");
   1.703 +    TransportFailed_s(info);
   1.704 +    return NS_ERROR_FAILURE;
   1.705 +  }
   1.706 +
   1.707 +  info.transport_->SignalStateChange.connect(this,
   1.708 +                                             &MediaPipeline::StateChange);
   1.709 +
   1.710 +  return NS_OK;
   1.711 +}
   1.712 +
   1.713 +MediaPipeline::TransportInfo* MediaPipeline::GetTransportInfo_s(
   1.714 +    TransportFlow *flow) {
   1.715 +  ASSERT_ON_THREAD(sts_thread_);
   1.716 +  if (flow == rtp_.transport_) {
   1.717 +    return &rtp_;
   1.718 +  }
   1.719 +
   1.720 +  if (flow == rtcp_.transport_) {
   1.721 +    return &rtcp_;
   1.722 +  }
   1.723 +
   1.724 +  if (possible_bundle_rtp_) {
   1.725 +    if (flow == possible_bundle_rtp_->transport_) {
   1.726 +      return possible_bundle_rtp_;
   1.727 +    }
   1.728 +
   1.729 +    if (flow == possible_bundle_rtcp_->transport_) {
   1.730 +      return possible_bundle_rtcp_;
   1.731 +    }
   1.732 +  }
   1.733 +
   1.734 +  return nullptr;
   1.735 +}
   1.736 +
   1.737 +void MediaPipeline::SetUsingBundle_s(bool decision) {
   1.738 +  ASSERT_ON_THREAD(sts_thread_);
   1.739 +  // Note: This can be called either because of events on the STS thread, or
   1.740 +  // by events on the main thread (ie; receiving a remote description). It is
   1.741 +  // best to be careful of races here, so don't assume that transports are open.
   1.742 +  if (!possible_bundle_rtp_) {
   1.743 +    if (!decision) {
   1.744 +      // This can happen on the master pipeline.
   1.745 +      filter_ = nullptr;
   1.746 +    }
   1.747 +    return;
   1.748 +  }
   1.749 +
   1.750 +  if (direction_ == RECEIVE) {
   1.751 +    if (decision) {
   1.752 +      MOZ_MTLOG(ML_INFO, "Non-master pipeline confirmed bundle for "
   1.753 +                         << description_);
   1.754 +      // We're doing bundle. Release the unused flows, and copy the ones we
   1.755 +      // are using into the less wishy-washy members.
   1.756 +      DisconnectTransport_s(rtp_);
   1.757 +      DisconnectTransport_s(rtcp_);
   1.758 +      rtp_ = *possible_bundle_rtp_;
   1.759 +      rtcp_ = *possible_bundle_rtcp_;
   1.760 +    } else {
   1.761 +      MOZ_MTLOG(ML_INFO, "Non-master pipeline confirmed no bundle for "
   1.762 +                         << description_);
   1.763 +      // We are not doing bundle
   1.764 +      DisconnectTransport_s(*possible_bundle_rtp_);
   1.765 +      DisconnectTransport_s(*possible_bundle_rtcp_);
   1.766 +      filter_ = nullptr;
   1.767 +    }
   1.768 +
   1.769 +    // We are no longer in an ambiguous state.
   1.770 +    possible_bundle_rtp_ = nullptr;
   1.771 +    possible_bundle_rtcp_ = nullptr;
   1.772 +  }
   1.773 +}
   1.774 +
   1.775 +MediaPipelineFilter* MediaPipeline::UpdateFilterFromRemoteDescription_s(
   1.776 +    nsAutoPtr<MediaPipelineFilter> filter) {
   1.777 +  ASSERT_ON_THREAD(sts_thread_);
   1.778 +  // This is only supposed to relax the filter. Relaxing a missing filter is
   1.779 +  // not possible.
   1.780 +  MOZ_ASSERT(filter_);
   1.781 +
   1.782 +  if (!filter) {
   1.783 +    filter_ = nullptr;
   1.784 +  } else {
   1.785 +    filter_->IncorporateRemoteDescription(*filter);
   1.786 +  }
   1.787 +
   1.788 +  return filter_.get();
   1.789 +}
   1.790 +
   1.791 +nsresult MediaPipeline::PipelineTransport::SendRtpPacket(
   1.792 +    const void *data, int len) {
   1.793 +
   1.794 +    nsAutoPtr<DataBuffer> buf(new DataBuffer(static_cast<const uint8_t *>(data),
   1.795 +                                             len));
   1.796 +
   1.797 +    RUN_ON_THREAD(sts_thread_,
   1.798 +                  WrapRunnable(
   1.799 +                      RefPtr<MediaPipeline::PipelineTransport>(this),
   1.800 +                      &MediaPipeline::PipelineTransport::SendRtpPacket_s,
   1.801 +                      buf),
   1.802 +                  NS_DISPATCH_NORMAL);
   1.803 +
   1.804 +    return NS_OK;
   1.805 +}
   1.806 +
   1.807 +nsresult MediaPipeline::PipelineTransport::SendRtpPacket_s(
   1.808 +    nsAutoPtr<DataBuffer> data) {
   1.809 +  ASSERT_ON_THREAD(sts_thread_);
   1.810 +  if (!pipeline_)
   1.811 +    return NS_OK;  // Detached
   1.812 +
   1.813 +  if (!pipeline_->rtp_.send_srtp_) {
   1.814 +    MOZ_MTLOG(ML_DEBUG, "Couldn't write RTP packet; SRTP not set up yet");
   1.815 +    return NS_OK;
   1.816 +  }
   1.817 +
   1.818 +  MOZ_ASSERT(pipeline_->rtp_.transport_);
   1.819 +  NS_ENSURE_TRUE(pipeline_->rtp_.transport_, NS_ERROR_NULL_POINTER);
   1.820 +
   1.821 +  // libsrtp enciphers in place, so we need a new, big enough
   1.822 +  // buffer.
   1.823 +  // XXX. allocates and deletes one buffer per packet sent.
   1.824 +  // Bug 822129
   1.825 +  int max_len = data->len() + SRTP_MAX_EXPANSION;
   1.826 +  ScopedDeletePtr<unsigned char> inner_data(
   1.827 +      new unsigned char[max_len]);
   1.828 +  memcpy(inner_data, data->data(), data->len());
   1.829 +
   1.830 +  int out_len;
   1.831 +  nsresult res = pipeline_->rtp_.send_srtp_->ProtectRtp(inner_data,
   1.832 +                                                        data->len(),
   1.833 +                                                        max_len,
   1.834 +                                                        &out_len);
   1.835 +  if (!NS_SUCCEEDED(res))
   1.836 +    return res;
   1.837 +
   1.838 +  pipeline_->increment_rtp_packets_sent(out_len);
   1.839 +  return pipeline_->SendPacket(pipeline_->rtp_.transport_, inner_data,
   1.840 +                               out_len);
   1.841 +}
   1.842 +
   1.843 +nsresult MediaPipeline::PipelineTransport::SendRtcpPacket(
   1.844 +    const void *data, int len) {
   1.845 +
   1.846 +    nsAutoPtr<DataBuffer> buf(new DataBuffer(static_cast<const uint8_t *>(data),
   1.847 +                                             len));
   1.848 +
   1.849 +    RUN_ON_THREAD(sts_thread_,
   1.850 +                  WrapRunnable(
   1.851 +                      RefPtr<MediaPipeline::PipelineTransport>(this),
   1.852 +                      &MediaPipeline::PipelineTransport::SendRtcpPacket_s,
   1.853 +                      buf),
   1.854 +                  NS_DISPATCH_NORMAL);
   1.855 +
   1.856 +    return NS_OK;
   1.857 +}
   1.858 +
   1.859 +nsresult MediaPipeline::PipelineTransport::SendRtcpPacket_s(
   1.860 +    nsAutoPtr<DataBuffer> data) {
   1.861 +  ASSERT_ON_THREAD(sts_thread_);
   1.862 +  if (!pipeline_)
   1.863 +    return NS_OK;  // Detached
   1.864 +
   1.865 +  if (!pipeline_->rtcp_.send_srtp_) {
   1.866 +    MOZ_MTLOG(ML_DEBUG, "Couldn't write RTCP packet; SRTCP not set up yet");
   1.867 +    return NS_OK;
   1.868 +  }
   1.869 +
   1.870 +  MOZ_ASSERT(pipeline_->rtcp_.transport_);
   1.871 +  NS_ENSURE_TRUE(pipeline_->rtcp_.transport_, NS_ERROR_NULL_POINTER);
   1.872 +
   1.873 +  // libsrtp enciphers in place, so we need a new, big enough
   1.874 +  // buffer.
   1.875 +  // XXX. allocates and deletes one buffer per packet sent.
   1.876 +  // Bug 822129.
   1.877 +  int max_len = data->len() + SRTP_MAX_EXPANSION;
   1.878 +  ScopedDeletePtr<unsigned char> inner_data(
   1.879 +      new unsigned char[max_len]);
   1.880 +  memcpy(inner_data, data->data(), data->len());
   1.881 +
   1.882 +  int out_len;
   1.883 +  nsresult res = pipeline_->rtcp_.send_srtp_->ProtectRtcp(inner_data,
   1.884 +                                                          data->len(),
   1.885 +                                                          max_len,
   1.886 +                                                          &out_len);
   1.887 +
   1.888 +  if (!NS_SUCCEEDED(res))
   1.889 +    return res;
   1.890 +
   1.891 +  pipeline_->increment_rtcp_packets_sent();
   1.892 +  return pipeline_->SendPacket(pipeline_->rtcp_.transport_, inner_data,
   1.893 +                               out_len);
   1.894 +}
   1.895 +
   1.896 +// Called if we're attached with AddDirectListener()
   1.897 +void MediaPipelineTransmit::PipelineListener::
   1.898 +NotifyRealtimeData(MediaStreamGraph* graph, TrackID tid,
   1.899 +                   TrackRate rate,
   1.900 +                   TrackTicks offset,
   1.901 +                   uint32_t events,
   1.902 +                   const MediaSegment& media) {
   1.903 +  MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyRealtimeData()");
   1.904 +
   1.905 +  NewData(graph, tid, rate, offset, events, media);
   1.906 +}
   1.907 +
   1.908 +void MediaPipelineTransmit::PipelineListener::
   1.909 +NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
   1.910 +                         TrackRate rate,
   1.911 +                         TrackTicks offset,
   1.912 +                         uint32_t events,
   1.913 +                         const MediaSegment& queued_media) {
   1.914 +  MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyQueuedTrackChanges()");
   1.915 +
   1.916 +  // ignore non-direct data if we're also getting direct data
   1.917 +  if (!direct_connect_) {
   1.918 +    NewData(graph, tid, rate, offset, events, queued_media);
   1.919 +  }
   1.920 +}
   1.921 +
   1.922 +void MediaPipelineTransmit::PipelineListener::
   1.923 +NewData(MediaStreamGraph* graph, TrackID tid,
   1.924 +        TrackRate rate,
   1.925 +        TrackTicks offset,
   1.926 +        uint32_t events,
   1.927 +        const MediaSegment& media) {
   1.928 +  if (!active_) {
   1.929 +    MOZ_MTLOG(ML_DEBUG, "Discarding packets because transport not ready");
   1.930 +    return;
   1.931 +  }
   1.932 +
   1.933 +  // TODO(ekr@rtfm.com): For now assume that we have only one
   1.934 +  // track type and it's destined for us
   1.935 +  // See bug 784517
   1.936 +  if (media.GetType() == MediaSegment::AUDIO) {
   1.937 +    if (conduit_->type() != MediaSessionConduit::AUDIO) {
   1.938 +      // Ignore data in case we have a muxed stream
   1.939 +      return;
   1.940 +    }
   1.941 +    AudioSegment* audio = const_cast<AudioSegment *>(
   1.942 +        static_cast<const AudioSegment *>(&media));
   1.943 +
   1.944 +    AudioSegment::ChunkIterator iter(*audio);
   1.945 +    while(!iter.IsEnded()) {
   1.946 +      ProcessAudioChunk(static_cast<AudioSessionConduit*>(conduit_.get()),
   1.947 +                        rate, *iter);
   1.948 +      iter.Next();
   1.949 +    }
   1.950 +  } else if (media.GetType() == MediaSegment::VIDEO) {
   1.951 +#ifdef MOZILLA_INTERNAL_API
   1.952 +    if (conduit_->type() != MediaSessionConduit::VIDEO) {
   1.953 +      // Ignore data in case we have a muxed stream
   1.954 +      return;
   1.955 +    }
   1.956 +    VideoSegment* video = const_cast<VideoSegment *>(
   1.957 +        static_cast<const VideoSegment *>(&media));
   1.958 +
   1.959 +    VideoSegment::ChunkIterator iter(*video);
   1.960 +    while(!iter.IsEnded()) {
   1.961 +      ProcessVideoChunk(static_cast<VideoSessionConduit*>(conduit_.get()),
   1.962 +                        rate, *iter);
   1.963 +      iter.Next();
   1.964 +    }
   1.965 +#endif
   1.966 +  } else {
   1.967 +    // Ignore
   1.968 +  }
   1.969 +}
   1.970 +
   1.971 +void MediaPipelineTransmit::PipelineListener::ProcessAudioChunk(
   1.972 +    AudioSessionConduit *conduit,
   1.973 +    TrackRate rate,
   1.974 +    AudioChunk& chunk) {
   1.975 +  // TODO(ekr@rtfm.com): Do more than one channel
   1.976 +  nsAutoArrayPtr<int16_t> samples(new int16_t[chunk.mDuration]);
   1.977 +
   1.978 +  if (chunk.mBuffer) {
   1.979 +    switch (chunk.mBufferFormat) {
   1.980 +      case AUDIO_FORMAT_FLOAT32:
   1.981 +        {
   1.982 +          const float* buf = static_cast<const float *>(chunk.mChannelData[0]);
   1.983 +          ConvertAudioSamplesWithScale(buf, static_cast<int16_t*>(samples),
   1.984 +                                       chunk.mDuration, chunk.mVolume);
   1.985 +        }
   1.986 +        break;
   1.987 +      case AUDIO_FORMAT_S16:
   1.988 +        {
   1.989 +          const short* buf = static_cast<const short *>(chunk.mChannelData[0]);
   1.990 +          ConvertAudioSamplesWithScale(buf, samples, chunk.mDuration, chunk.mVolume);
   1.991 +        }
   1.992 +        break;
   1.993 +      case AUDIO_FORMAT_SILENCE:
   1.994 +        memset(samples, 0, chunk.mDuration * sizeof(samples[0]));
   1.995 +        break;
   1.996 +      default:
   1.997 +        MOZ_ASSERT(PR_FALSE);
   1.998 +        return;
   1.999 +        break;
  1.1000 +    }
  1.1001 +  } else {
  1.1002 +    // This means silence.
  1.1003 +    memset(samples, 0, chunk.mDuration * sizeof(samples[0]));
  1.1004 +  }
  1.1005 +
  1.1006 +  MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
  1.1007 +
  1.1008 +  // Check if the rate has changed since the last time we came through
  1.1009 +  // I realize it may be overkill to check if the rate has changed, but
  1.1010 +  // I believe it is possible (e.g. if we change sources) and it costs us
  1.1011 +  // very little to handle this case
  1.1012 +
  1.1013 +  if (samplenum_10ms_ !=  rate/100) {
  1.1014 +    // Determine number of samples in 10 ms from the rate:
  1.1015 +    samplenum_10ms_ = rate/100;
  1.1016 +    // If we switch sample rates (e.g. if we switch codecs),
  1.1017 +    // we throw away what was in the sample_10ms_buffer at the old rate
  1.1018 +    samples_10ms_buffer_ = new int16_t[samplenum_10ms_];
  1.1019 +    buffer_current_ = 0;
  1.1020 +  }
  1.1021 +
  1.1022 +  // Vars to handle the non-sunny-day case (where the audio chunks
  1.1023 +  // we got are not multiples of 10ms OR there were samples left over
  1.1024 +  // from the last run)
  1.1025 +  int64_t chunk_remaining;
  1.1026 +  int64_t tocpy;
  1.1027 +  int16_t *samples_tmp = samples.get();
  1.1028 +
  1.1029 +  chunk_remaining = chunk.mDuration;
  1.1030 +
  1.1031 +  MOZ_ASSERT(chunk_remaining >= 0);
  1.1032 +
  1.1033 +  if (buffer_current_) {
  1.1034 +    tocpy = std::min(chunk_remaining, samplenum_10ms_ - buffer_current_);
  1.1035 +    memcpy(&samples_10ms_buffer_[buffer_current_], samples_tmp, tocpy * sizeof(int16_t));
  1.1036 +    buffer_current_ += tocpy;
  1.1037 +    samples_tmp += tocpy;
  1.1038 +    chunk_remaining -= tocpy;
  1.1039 +
  1.1040 +    if (buffer_current_ == samplenum_10ms_) {
  1.1041 +      // Send out the audio buffer we just finished filling
  1.1042 +      conduit->SendAudioFrame(samples_10ms_buffer_, samplenum_10ms_, rate, 0);
  1.1043 +      buffer_current_ = 0;
  1.1044 +    } else {
  1.1045 +      // We still don't have enough data to send a buffer
  1.1046 +      return;
  1.1047 +    }
  1.1048 +  }
  1.1049 +
  1.1050 +  // Now send (more) frames if there is more than 10ms of input left
  1.1051 +  tocpy = (chunk_remaining / samplenum_10ms_) * samplenum_10ms_;
  1.1052 +  if (tocpy > 0) {
  1.1053 +    conduit->SendAudioFrame(samples_tmp, tocpy, rate, 0);
  1.1054 +    samples_tmp += tocpy;
  1.1055 +    chunk_remaining -= tocpy;
  1.1056 +  }
  1.1057 +  // Copy what remains for the next run
  1.1058 +
  1.1059 +  MOZ_ASSERT(chunk_remaining < samplenum_10ms_);
  1.1060 +
  1.1061 +  if (chunk_remaining) {
  1.1062 +    memcpy(samples_10ms_buffer_, samples_tmp, chunk_remaining * sizeof(int16_t));
  1.1063 +    buffer_current_ = chunk_remaining;
  1.1064 +  }
  1.1065 +
  1.1066 +}
  1.1067 +
  1.1068 +#ifdef MOZILLA_INTERNAL_API
  1.1069 +void MediaPipelineTransmit::PipelineListener::ProcessVideoChunk(
  1.1070 +    VideoSessionConduit* conduit,
  1.1071 +    TrackRate rate,
  1.1072 +    VideoChunk& chunk) {
  1.1073 +  layers::Image *img = chunk.mFrame.GetImage();
  1.1074 +
  1.1075 +  // We now need to send the video frame to the other side
  1.1076 +  if (!img) {
  1.1077 +    // segment.AppendFrame() allows null images, which show up here as null
  1.1078 +    return;
  1.1079 +  }
  1.1080 +
  1.1081 +  gfx::IntSize size = img->GetSize();
  1.1082 +  if ((size.width & 1) != 0 || (size.height & 1) != 0) {
  1.1083 +    MOZ_ASSERT(false, "Can't handle odd-sized images");
  1.1084 +    return;
  1.1085 +  }
  1.1086 +
  1.1087 +  if (chunk.mFrame.GetForceBlack()) {
  1.1088 +    uint32_t yPlaneLen = size.width*size.height;
  1.1089 +    uint32_t cbcrPlaneLen = yPlaneLen/2;
  1.1090 +    uint32_t length = yPlaneLen + cbcrPlaneLen;
  1.1091 +
  1.1092 +    // Send a black image.
  1.1093 +    nsAutoArrayPtr<uint8_t> pixelData;
  1.1094 +    static const fallible_t fallible = fallible_t();
  1.1095 +    pixelData = new (fallible) uint8_t[length];
  1.1096 +    if (pixelData) {
  1.1097 +      memset(pixelData, 0x10, yPlaneLen);
  1.1098 +      // Fill Cb/Cr planes
  1.1099 +      memset(pixelData + yPlaneLen, 0x80, cbcrPlaneLen);
  1.1100 +
  1.1101 +      MOZ_MTLOG(ML_DEBUG, "Sending a black video frame");
  1.1102 +      conduit->SendVideoFrame(pixelData, length, size.width, size.height,
  1.1103 +                              mozilla::kVideoI420, 0);
  1.1104 +    }
  1.1105 +    return;
  1.1106 +  }
  1.1107 +
  1.1108 +  // We get passed duplicate frames every ~10ms even if there's no frame change!
  1.1109 +  int32_t serial = img->GetSerial();
  1.1110 +  if (serial == last_img_) {
  1.1111 +    return;
  1.1112 +  }
  1.1113 +  last_img_ = serial;
  1.1114 +
  1.1115 +  ImageFormat format = img->GetFormat();
  1.1116 +#ifdef WEBRTC_GONK
  1.1117 +  if (format == ImageFormat::GRALLOC_PLANAR_YCBCR) {
  1.1118 +    layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(img);
  1.1119 +    android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
  1.1120 +    void *basePtr;
  1.1121 +    graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &basePtr);
  1.1122 +    conduit->SendVideoFrame(static_cast<unsigned char*>(basePtr),
  1.1123 +                            (graphicBuffer->getWidth() * graphicBuffer->getHeight() * 3) / 2,
  1.1124 +                            graphicBuffer->getWidth(),
  1.1125 +                            graphicBuffer->getHeight(),
  1.1126 +                            mozilla::kVideoNV21, 0);
  1.1127 +    graphicBuffer->unlock();
  1.1128 +  } else
  1.1129 +#endif
  1.1130 +  if (format == ImageFormat::PLANAR_YCBCR) {
  1.1131 +    // Cast away constness b/c some of the accessors are non-const
  1.1132 +    layers::PlanarYCbCrImage* yuv =
  1.1133 +    const_cast<layers::PlanarYCbCrImage *>(
  1.1134 +          static_cast<const layers::PlanarYCbCrImage *>(img));
  1.1135 +    // Big-time assumption here that this is all contiguous data coming
  1.1136 +    // from getUserMedia or other sources.
  1.1137 +    const layers::PlanarYCbCrData *data = yuv->GetData();
  1.1138 +
  1.1139 +    uint8_t *y = data->mYChannel;
  1.1140 +#ifdef DEBUG
  1.1141 +    uint8_t *cb = data->mCbChannel;
  1.1142 +    uint8_t *cr = data->mCrChannel;
  1.1143 +#endif
  1.1144 +    uint32_t width = yuv->GetSize().width;
  1.1145 +    uint32_t height = yuv->GetSize().height;
  1.1146 +    uint32_t length = yuv->GetDataSize();
  1.1147 +
  1.1148 +    // SendVideoFrame only supports contiguous YCrCb 4:2:0 buffers
  1.1149 +    // Verify it's contiguous and in the right order
  1.1150 +    MOZ_ASSERT(cb == (y + width*height) &&
  1.1151 +               cr == (cb + width*height/4));
  1.1152 +    // XXX Consider making this a non-debug-only check if we ever implement
  1.1153 +    // any subclasses of PlanarYCbCrImage that allow disjoint buffers such
  1.1154 +    // that y+3(width*height)/2 might go outside the allocation.
  1.1155 +    // GrallocImage can have wider strides, and so in some cases
  1.1156 +    // would encode as garbage.  If we need to encode it we'll either want to
  1.1157 +    // modify SendVideoFrame or copy/move the data in the buffer.
  1.1158 +
  1.1159 +    // OK, pass it on to the conduit
  1.1160 +    MOZ_MTLOG(ML_DEBUG, "Sending a video frame");
  1.1161 +    // Not much for us to do with an error
  1.1162 +    conduit->SendVideoFrame(y, length, width, height, mozilla::kVideoI420, 0);
  1.1163 +  } else if(format == ImageFormat::CAIRO_SURFACE) {
  1.1164 +    layers::CairoImage* rgb =
  1.1165 +    const_cast<layers::CairoImage *>(
  1.1166 +          static_cast<const layers::CairoImage *>(img));
  1.1167 +
  1.1168 +    gfx::IntSize size = rgb->GetSize();
  1.1169 +    int half_width = (size.width + 1) >> 1;
  1.1170 +    int half_height = (size.height + 1) >> 1;
  1.1171 +    int c_size = half_width * half_height;
  1.1172 +    int buffer_size = size.width * size.height + 2 * c_size;
  1.1173 +    uint8* yuv = (uint8*) malloc(buffer_size);
  1.1174 +    if (!yuv)
  1.1175 +      return;
  1.1176 +
  1.1177 +    int cb_offset = size.width * size.height;
  1.1178 +    int cr_offset = cb_offset + c_size;
  1.1179 +    RefPtr<gfx::SourceSurface> tempSurf = rgb->GetAsSourceSurface();
  1.1180 +    RefPtr<gfx::DataSourceSurface> surf = tempSurf->GetDataSurface();
  1.1181 +
  1.1182 +    switch (surf->GetFormat()) {
  1.1183 +      case gfx::SurfaceFormat::B8G8R8A8:
  1.1184 +      case gfx::SurfaceFormat::B8G8R8X8:
  1.1185 +        libyuv::ARGBToI420(static_cast<uint8*>(surf->GetData()), surf->Stride(),
  1.1186 +                           yuv, size.width,
  1.1187 +                           yuv + cb_offset, half_width,
  1.1188 +                           yuv + cr_offset, half_width,
  1.1189 +                           size.width, size.height);
  1.1190 +        break;
  1.1191 +      case gfx::SurfaceFormat::R5G6B5:
  1.1192 +        libyuv::RGB565ToI420(static_cast<uint8*>(surf->GetData()), surf->Stride(),
  1.1193 +                             yuv, size.width,
  1.1194 +                             yuv + cb_offset, half_width,
  1.1195 +                             yuv + cr_offset, half_width,
  1.1196 +                             size.width, size.height);
  1.1197 +        break;
  1.1198 +      default:
  1.1199 +        MOZ_MTLOG(ML_ERROR, "Unsupported RGB video format");
  1.1200 +        MOZ_ASSERT(PR_FALSE);
  1.1201 +    }
  1.1202 +    conduit->SendVideoFrame(yuv, buffer_size, size.width, size.height, mozilla::kVideoI420, 0);
  1.1203 +  } else {
  1.1204 +    MOZ_MTLOG(ML_ERROR, "Unsupported video format");
  1.1205 +    MOZ_ASSERT(PR_FALSE);
  1.1206 +    return;
  1.1207 +  }
  1.1208 +}
  1.1209 +#endif
  1.1210 +
  1.1211 +nsresult MediaPipelineReceiveAudio::Init() {
  1.1212 +  char track_id_string[11];
  1.1213 +  ASSERT_ON_THREAD(main_thread_);
  1.1214 +  MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
  1.1215 +
  1.1216 +  // We can replace this when we are allowed to do streams or std::to_string
  1.1217 +  PR_snprintf(track_id_string, sizeof(track_id_string), "%d", track_id_);
  1.1218 +
  1.1219 +  description_ = pc_ + "| Receive audio[";
  1.1220 +  description_ += track_id_string;
  1.1221 +  description_ += "]";
  1.1222 +
  1.1223 +  listener_->AddSelf(new AudioSegment());
  1.1224 +
  1.1225 +  return MediaPipelineReceive::Init();
  1.1226 +}
  1.1227 +
  1.1228 +
  1.1229 +// Add a track and listener on the MSG thread using the MSG command queue
  1.1230 +static void AddTrackAndListener(MediaStream* source,
  1.1231 +                                TrackID track_id, TrackRate track_rate,
  1.1232 +                                MediaStreamListener* listener, MediaSegment* segment,
  1.1233 +                                const RefPtr<TrackAddedCallback>& completed) {
  1.1234 +  // This both adds the listener and the track
  1.1235 +#ifdef MOZILLA_INTERNAL_API
  1.1236 +  class Message : public ControlMessage {
  1.1237 +   public:
  1.1238 +    Message(MediaStream* stream, TrackID track, TrackRate rate,
  1.1239 +            MediaSegment* segment, MediaStreamListener* listener,
  1.1240 +            const RefPtr<TrackAddedCallback>& completed)
  1.1241 +      : ControlMessage(stream),
  1.1242 +        track_id_(track),
  1.1243 +        track_rate_(rate),
  1.1244 +        segment_(segment),
  1.1245 +        listener_(listener),
  1.1246 +        completed_(completed) {}
  1.1247 +
  1.1248 +    virtual void Run() MOZ_OVERRIDE {
  1.1249 +      StreamTime current_end = mStream->GetBufferEnd();
  1.1250 +      TrackTicks current_ticks = TimeToTicksRoundUp(track_rate_, current_end);
  1.1251 +
  1.1252 +      mStream->AddListenerImpl(listener_.forget());
  1.1253 +
  1.1254 +      // Add a track 'now' to avoid possible underrun, especially if we add
  1.1255 +      // a track "later".
  1.1256 +
  1.1257 +      if (current_end != 0L) {
  1.1258 +        MOZ_MTLOG(ML_DEBUG, "added track @ " << current_end <<
  1.1259 +                  " -> " << MediaTimeToSeconds(current_end));
  1.1260 +      }
  1.1261 +
  1.1262 +      // To avoid assertions, we need to insert a dummy segment that covers up
  1.1263 +      // to the "start" time for the track
  1.1264 +      segment_->AppendNullData(current_ticks);
  1.1265 +      mStream->AsSourceStream()->AddTrack(track_id_, track_rate_,
  1.1266 +                                          current_ticks, segment_);
  1.1267 +      // AdvanceKnownTracksTicksTime(HEAT_DEATH_OF_UNIVERSE) means that in
  1.1268 +      // theory per the API, we can't add more tracks before that
  1.1269 +      // time. However, the impl actually allows it, and it avoids a whole
  1.1270 +      // bunch of locking that would be required (and potential blocking)
  1.1271 +      // if we used smaller values and updated them on each NotifyPull.
  1.1272 +      mStream->AsSourceStream()->AdvanceKnownTracksTime(STREAM_TIME_MAX);
  1.1273 +
  1.1274 +      // We need to know how much has been "inserted" because we're given absolute
  1.1275 +      // times in NotifyPull.
  1.1276 +      completed_->TrackAdded(current_ticks);
  1.1277 +    }
  1.1278 +   private:
  1.1279 +    TrackID track_id_;
  1.1280 +    TrackRate track_rate_;
  1.1281 +    MediaSegment* segment_;
  1.1282 +    nsRefPtr<MediaStreamListener> listener_;
  1.1283 +    const RefPtr<TrackAddedCallback> completed_;
  1.1284 +  };
  1.1285 +
  1.1286 +  MOZ_ASSERT(listener);
  1.1287 +
  1.1288 +  source->GraphImpl()->AppendMessage(new Message(source, track_id, track_rate, segment, listener, completed));
  1.1289 +#else
  1.1290 +  source->AddListener(listener);
  1.1291 +  source->AsSourceStream()->AddTrack(track_id, track_rate, 0, segment);
  1.1292 +#endif
  1.1293 +}
  1.1294 +
  1.1295 +void GenericReceiveListener::AddSelf(MediaSegment* segment) {
  1.1296 +  RefPtr<TrackAddedCallback> callback = new GenericReceiveCallback(this);
  1.1297 +  AddTrackAndListener(source_, track_id_, track_rate_, this, segment, callback);
  1.1298 +}
  1.1299 +
  1.1300 +MediaPipelineReceiveAudio::PipelineListener::PipelineListener(
  1.1301 +    SourceMediaStream * source, TrackID track_id,
  1.1302 +    const RefPtr<MediaSessionConduit>& conduit)
  1.1303 +  : GenericReceiveListener(source, track_id, 16000), // XXX rate assumption
  1.1304 +    conduit_(conduit)
  1.1305 +{
  1.1306 +  MOZ_ASSERT(track_rate_%100 == 0);
  1.1307 +}
  1.1308 +
  1.1309 +void MediaPipelineReceiveAudio::PipelineListener::
  1.1310 +NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) {
  1.1311 +  MOZ_ASSERT(source_);
  1.1312 +  if (!source_) {
  1.1313 +    MOZ_MTLOG(ML_ERROR, "NotifyPull() called from a non-SourceMediaStream");
  1.1314 +    return;
  1.1315 +  }
  1.1316 +
  1.1317 +  // This comparison is done in total time to avoid accumulated roundoff errors.
  1.1318 +  while (TicksToTimeRoundDown(track_rate_, played_ticks_) < desired_time) {
  1.1319 +    // TODO(ekr@rtfm.com): Is there a way to avoid mallocating here?  Or reduce the size?
  1.1320 +    // Max size given mono is 480*2*1 = 960 (48KHz)
  1.1321 +#define AUDIO_SAMPLE_BUFFER_MAX 1000
  1.1322 +    MOZ_ASSERT((track_rate_/100)*sizeof(uint16_t) <= AUDIO_SAMPLE_BUFFER_MAX);
  1.1323 +
  1.1324 +    nsRefPtr<SharedBuffer> samples = SharedBuffer::Create(AUDIO_SAMPLE_BUFFER_MAX);
  1.1325 +    int16_t *samples_data = static_cast<int16_t *>(samples->Data());
  1.1326 +    int samples_length;
  1.1327 +
  1.1328 +    // This fetches 10ms of data
  1.1329 +    MediaConduitErrorCode err =
  1.1330 +        static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
  1.1331 +            samples_data,
  1.1332 +            track_rate_,
  1.1333 +            0,  // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
  1.1334 +            samples_length);
  1.1335 +    MOZ_ASSERT(samples_length < AUDIO_SAMPLE_BUFFER_MAX);
  1.1336 +
  1.1337 +    if (err != kMediaConduitNoError) {
  1.1338 +      // Insert silence on conduit/GIPS failure (extremely unlikely)
  1.1339 +      MOZ_MTLOG(ML_ERROR, "Audio conduit failed (" << err
  1.1340 +                << ") to return data @ " << played_ticks_
  1.1341 +                << " (desired " << desired_time << " -> "
  1.1342 +                << MediaTimeToSeconds(desired_time) << ")");
  1.1343 +      MOZ_ASSERT(err == kMediaConduitNoError);
  1.1344 +      samples_length = (track_rate_/100)*sizeof(uint16_t); // if this is not enough we'll loop and provide more
  1.1345 +      memset(samples_data, '\0', samples_length);
  1.1346 +    }
  1.1347 +
  1.1348 +    MOZ_MTLOG(ML_DEBUG, "Audio conduit returned buffer of length "
  1.1349 +              << samples_length);
  1.1350 +
  1.1351 +    AudioSegment segment;
  1.1352 +    nsAutoTArray<const int16_t*,1> channels;
  1.1353 +    channels.AppendElement(samples_data);
  1.1354 +    segment.AppendFrames(samples.forget(), channels, samples_length);
  1.1355 +
  1.1356 +    // Handle track not actually added yet or removed/finished
  1.1357 +    if (source_->AppendToTrack(track_id_, &segment)) {
  1.1358 +      played_ticks_ += track_rate_/100; // 10ms in TrackTicks
  1.1359 +    } else {
  1.1360 +      MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
  1.1361 +      // we can't un-read the data, but that's ok since we don't want to
  1.1362 +      // buffer - but don't i-loop!
  1.1363 +      return;
  1.1364 +    }
  1.1365 +  }
  1.1366 +}
  1.1367 +
  1.1368 +nsresult MediaPipelineReceiveVideo::Init() {
  1.1369 +  char track_id_string[11];
  1.1370 +  ASSERT_ON_THREAD(main_thread_);
  1.1371 +  MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
  1.1372 +
  1.1373 +  // We can replace this when we are allowed to do streams or std::to_string
  1.1374 +  PR_snprintf(track_id_string, sizeof(track_id_string), "%d", track_id_);
  1.1375 +
  1.1376 +  description_ = pc_ + "| Receive video[";
  1.1377 +  description_ += track_id_string;
  1.1378 +  description_ += "]";
  1.1379 +
  1.1380 +#ifdef MOZILLA_INTERNAL_API
  1.1381 +  listener_->AddSelf(new VideoSegment());
  1.1382 +#endif
  1.1383 +
  1.1384 +  // Always happens before we can DetachMediaStream()
  1.1385 +  static_cast<VideoSessionConduit *>(conduit_.get())->
  1.1386 +      AttachRenderer(renderer_);
  1.1387 +
  1.1388 +  return MediaPipelineReceive::Init();
  1.1389 +}
  1.1390 +
  1.1391 +MediaPipelineReceiveVideo::PipelineListener::PipelineListener(
  1.1392 +  SourceMediaStream* source, TrackID track_id)
  1.1393 +  : GenericReceiveListener(source, track_id, USECS_PER_S),
  1.1394 +    width_(640),
  1.1395 +    height_(480),
  1.1396 +#ifdef MOZILLA_INTERNAL_API
  1.1397 +    image_container_(),
  1.1398 +    image_(),
  1.1399 +#endif
  1.1400 +    monitor_("Video PipelineListener") {
  1.1401 +#ifdef MOZILLA_INTERNAL_API
  1.1402 +  image_container_ = layers::LayerManager::CreateImageContainer();
  1.1403 +#endif
  1.1404 +}
  1.1405 +
  1.1406 +void MediaPipelineReceiveVideo::PipelineListener::RenderVideoFrame(
  1.1407 +    const unsigned char* buffer,
  1.1408 +    unsigned int buffer_size,
  1.1409 +    uint32_t time_stamp,
  1.1410 +    int64_t render_time,
  1.1411 +    const RefPtr<layers::Image>& video_image) {
  1.1412 +#ifdef MOZILLA_INTERNAL_API
  1.1413 +  ReentrantMonitorAutoEnter enter(monitor_);
  1.1414 +
  1.1415 +  if (buffer) {
  1.1416 +    // Create a video frame using |buffer|.
  1.1417 +#ifdef MOZ_WIDGET_GONK
  1.1418 +    ImageFormat format = ImageFormat::GRALLOC_PLANAR_YCBCR;
  1.1419 +#else
  1.1420 +    ImageFormat format = ImageFormat::PLANAR_YCBCR;
  1.1421 +#endif
  1.1422 +    nsRefPtr<layers::Image> image = image_container_->CreateImage(format);
  1.1423 +    layers::PlanarYCbCrImage* yuvImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
  1.1424 +    uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
  1.1425 +    const uint8_t lumaBpp = 8;
  1.1426 +    const uint8_t chromaBpp = 4;
  1.1427 +
  1.1428 +    layers::PlanarYCbCrData yuvData;
  1.1429 +    yuvData.mYChannel = frame;
  1.1430 +    yuvData.mYSize = IntSize(width_, height_);
  1.1431 +    yuvData.mYStride = width_ * lumaBpp/ 8;
  1.1432 +    yuvData.mCbCrStride = width_ * chromaBpp / 8;
  1.1433 +    yuvData.mCbChannel = frame + height_ * yuvData.mYStride;
  1.1434 +    yuvData.mCrChannel = yuvData.mCbChannel + height_ * yuvData.mCbCrStride / 2;
  1.1435 +    yuvData.mCbCrSize = IntSize(width_/ 2, height_/ 2);
  1.1436 +    yuvData.mPicX = 0;
  1.1437 +    yuvData.mPicY = 0;
  1.1438 +    yuvData.mPicSize = IntSize(width_, height_);
  1.1439 +    yuvData.mStereoMode = StereoMode::MONO;
  1.1440 +
  1.1441 +    yuvImage->SetData(yuvData);
  1.1442 +
  1.1443 +    image_ = image.forget();
  1.1444 +  }
  1.1445 +#ifdef WEBRTC_GONK
  1.1446 +  else {
  1.1447 +    // Decoder produced video frame that can be appended to the track directly.
  1.1448 +    MOZ_ASSERT(video_image);
  1.1449 +    image_ = video_image;
  1.1450 +  }
  1.1451 +#endif // WEBRTC_GONK
  1.1452 +#endif // MOZILLA_INTERNAL_API
  1.1453 +}
  1.1454 +
  1.1455 +void MediaPipelineReceiveVideo::PipelineListener::
  1.1456 +NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) {
  1.1457 +  ReentrantMonitorAutoEnter enter(monitor_);
  1.1458 +
  1.1459 +#ifdef MOZILLA_INTERNAL_API
  1.1460 +  nsRefPtr<layers::Image> image = image_;
  1.1461 +  TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, desired_time);
  1.1462 +  TrackTicks delta = target - played_ticks_;
  1.1463 +
  1.1464 +  // Don't append if we've already provided a frame that supposedly
  1.1465 +  // goes past the current aDesiredTime Doing so means a negative
  1.1466 +  // delta and thus messes up handling of the graph
  1.1467 +  if (delta > 0) {
  1.1468 +    VideoSegment segment;
  1.1469 +    segment.AppendFrame(image.forget(), delta, IntSize(width_, height_));
  1.1470 +    // Handle track not actually added yet or removed/finished
  1.1471 +    if (source_->AppendToTrack(track_id_, &segment)) {
  1.1472 +      played_ticks_ = target;
  1.1473 +    } else {
  1.1474 +      MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
  1.1475 +      return;
  1.1476 +    }
  1.1477 +  }
  1.1478 +#endif
  1.1479 +}
  1.1480 +
  1.1481 +
  1.1482 +}  // end namespace

mercurial