media/webrtc/signaling/src/mediapipeline/MediaPipeline.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,707 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.7 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.8 +
     1.9 +// Original author: ekr@rtfm.com
    1.10 +
    1.11 +#ifndef mediapipeline_h__
    1.12 +#define mediapipeline_h__
    1.13 +
    1.14 +#include "sigslot.h"
    1.15 +
    1.16 +#ifdef USE_FAKE_MEDIA_STREAMS
    1.17 +#include "FakeMediaStreams.h"
    1.18 +#else
    1.19 +#include "DOMMediaStream.h"
    1.20 +#include "MediaStreamGraph.h"
    1.21 +#include "VideoUtils.h"
    1.22 +#endif
    1.23 +#include "MediaConduitInterface.h"
    1.24 +#include "MediaPipelineFilter.h"
    1.25 +#include "AudioSegment.h"
    1.26 +#include "mozilla/ReentrantMonitor.h"
    1.27 +#include "SrtpFlow.h"
    1.28 +#include "databuffer.h"
    1.29 +#include "runnable_utils.h"
    1.30 +#include "transportflow.h"
    1.31 +
    1.32 +#ifdef MOZILLA_INTERNAL_API
    1.33 +#include "VideoSegment.h"
    1.34 +#endif
    1.35 +
    1.36 +#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
    1.37 +
    1.38 +namespace mozilla {
    1.39 +
    1.40 +// A class that represents the pipeline of audio and video
    1.41 +// The dataflow looks like:
    1.42 +//
    1.43 +// TRANSMIT
    1.44 +// CaptureDevice -> stream -> [us] -> conduit -> [us] -> transport -> network
    1.45 +//
    1.46 +// RECEIVE
    1.47 +// network -> transport -> [us] -> conduit -> [us] -> stream -> Playout
    1.48 +//
    1.49 +// The boxes labeled [us] are just bridge logic implemented in this class
    1.50 +//
    1.51 +// We have to deal with a number of threads:
    1.52 +//
    1.53 +// GSM:
    1.54 +//   * Assembles the pipeline
    1.55 +// SocketTransportService
    1.56 +//   * Receives notification that ICE and DTLS have completed
    1.57 +//   * Processes incoming network data and passes it to the conduit
    1.58 +//   * Processes outgoing RTP and RTCP
    1.59 +// MediaStreamGraph
    1.60 +//   * Receives outgoing data from the MediaStreamGraph
    1.61 +//   * Receives pull requests for more data from the
    1.62 +//     MediaStreamGraph
    1.63 +// One or another GIPS threads
    1.64 +//   * Receives RTCP messages to send to the other side
    1.65 +//   * Processes video frames GIPS wants to render
    1.66 +//
    1.67 +// For a transmitting conduit, "output" is RTP and "input" is RTCP.
    1.68 +// For a receiving conduit, "input" is RTP and "output" is RTCP.
    1.69 +//
    1.70 +class MediaPipeline : public sigslot::has_slots<> {
    1.71 + public:
    1.72 +  enum Direction { TRANSMIT, RECEIVE };
    1.73 +  enum State { MP_CONNECTING, MP_OPEN, MP_CLOSED };
    1.74 +  MediaPipeline(const std::string& pc,
    1.75 +                Direction direction,
    1.76 +                nsCOMPtr<nsIEventTarget> main_thread,
    1.77 +                nsCOMPtr<nsIEventTarget> sts_thread,
    1.78 +                MediaStream *stream,
    1.79 +                TrackID track_id,
    1.80 +                int level,
    1.81 +                RefPtr<MediaSessionConduit> conduit,
    1.82 +                RefPtr<TransportFlow> rtp_transport,
    1.83 +                RefPtr<TransportFlow> rtcp_transport)
    1.84 +      : direction_(direction),
    1.85 +        stream_(stream),
    1.86 +        track_id_(track_id),
    1.87 +        level_(level),
    1.88 +        conduit_(conduit),
    1.89 +        rtp_(rtp_transport, rtcp_transport ? RTP : MUX),
    1.90 +        rtcp_(rtcp_transport ? rtcp_transport : rtp_transport,
    1.91 +              rtcp_transport ? RTCP : MUX),
    1.92 +        main_thread_(main_thread),
    1.93 +        sts_thread_(sts_thread),
    1.94 +        rtp_packets_sent_(0),
    1.95 +        rtcp_packets_sent_(0),
    1.96 +        rtp_packets_received_(0),
    1.97 +        rtcp_packets_received_(0),
    1.98 +        rtp_bytes_sent_(0),
    1.99 +        rtp_bytes_received_(0),
   1.100 +        pc_(pc),
   1.101 +        description_() {
   1.102 +      // To indicate rtcp-mux rtcp_transport should be nullptr.
   1.103 +      // Therefore it's an error to send in the same flow for
   1.104 +      // both rtp and rtcp.
   1.105 +      MOZ_ASSERT(rtp_transport != rtcp_transport);
   1.106 +
   1.107 +      // PipelineTransport() will access this->sts_thread_; moved here for safety
   1.108 +      transport_ = new PipelineTransport(this);
   1.109 +  }
   1.110 +
   1.111 +  virtual ~MediaPipeline();
   1.112 +
   1.113 +  // Must be called on the STS thread.  Must be called after ShutdownMedia_m().
   1.114 +  void ShutdownTransport_s();
   1.115 +
   1.116 +  // Must be called on the main thread.
   1.117 +  void ShutdownMedia_m() {
   1.118 +    ASSERT_ON_THREAD(main_thread_);
   1.119 +
   1.120 +    if (stream_) {
   1.121 +      DetachMediaStream();
   1.122 +    }
   1.123 +  }
   1.124 +
   1.125 +  virtual nsresult Init();
   1.126 +
   1.127 +  // When we have offered bundle, the MediaPipelines are created in an
   1.128 +  // indeterminate state; we do not know whether the answerer will take us
   1.129 +  // up on our offer. In the meantime, we need to behave in a manner that
   1.130 +  // errs on the side of packet loss when it is unclear whether an arriving
   1.131 +  // packet is meant for us. We want to get out of this indeterminate state
   1.132 +  // ASAP, which is what this function can be used for.
   1.133 +  void SetUsingBundle_s(bool decision);
   1.134 +  MediaPipelineFilter* UpdateFilterFromRemoteDescription_s(
   1.135 +      nsAutoPtr<MediaPipelineFilter> filter);
   1.136 +
   1.137 +  virtual Direction direction() const { return direction_; }
   1.138 +  virtual TrackID trackid() const { return track_id_; }
   1.139 +  virtual int level() const { return level_; }
   1.140 +
   1.141 +  bool IsDoingRtcpMux() const {
   1.142 +    return (rtp_.type_ == MUX);
   1.143 +  }
   1.144 +
   1.145 +  int32_t rtp_packets_sent() const { return rtp_packets_sent_; }
   1.146 +  int64_t rtp_bytes_sent() const { return rtp_bytes_sent_; }
   1.147 +  int32_t rtcp_packets_sent() const { return rtcp_packets_sent_; }
   1.148 +  int32_t rtp_packets_received() const { return rtp_packets_received_; }
   1.149 +  int64_t rtp_bytes_received() const { return rtp_bytes_received_; }
   1.150 +  int32_t rtcp_packets_received() const { return rtcp_packets_received_; }
   1.151 +
   1.152 +  MediaSessionConduit *Conduit() const { return conduit_; }
   1.153 +
   1.154 +  // Thread counting
   1.155 +  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaPipeline)
   1.156 +
   1.157 +  typedef enum {
   1.158 +    RTP,
   1.159 +    RTCP,
   1.160 +    MUX,
   1.161 +    MAX_RTP_TYPE
   1.162 +  } RtpType;
   1.163 +
   1.164 + protected:
   1.165 +  virtual void DetachMediaStream() {}
   1.166 +
   1.167 +  // Separate class to allow ref counting
   1.168 +  class PipelineTransport : public TransportInterface {
   1.169 +   public:
   1.170 +    // Implement the TransportInterface functions
   1.171 +    explicit PipelineTransport(MediaPipeline *pipeline)
   1.172 +        : pipeline_(pipeline),
   1.173 +          sts_thread_(pipeline->sts_thread_) {}
   1.174 +
   1.175 +    void Detach() { pipeline_ = nullptr; }
   1.176 +    MediaPipeline *pipeline() const { return pipeline_; }
   1.177 +
   1.178 +    virtual nsresult SendRtpPacket(const void* data, int len);
   1.179 +    virtual nsresult SendRtcpPacket(const void* data, int len);
   1.180 +
   1.181 +   private:
   1.182 +    virtual nsresult SendRtpPacket_s(nsAutoPtr<DataBuffer> data);
   1.183 +    virtual nsresult SendRtcpPacket_s(nsAutoPtr<DataBuffer> data);
   1.184 +
   1.185 +    MediaPipeline *pipeline_;  // Raw pointer to avoid cycles
   1.186 +    nsCOMPtr<nsIEventTarget> sts_thread_;
   1.187 +  };
   1.188 +  friend class PipelineTransport;
   1.189 +
   1.190 +  class TransportInfo {
   1.191 +    public:
   1.192 +      TransportInfo(RefPtr<TransportFlow> flow, RtpType type) :
   1.193 +        transport_(flow),
   1.194 +        state_(MP_CONNECTING),
   1.195 +        type_(type) {
   1.196 +        MOZ_ASSERT(flow);
   1.197 +      }
   1.198 +
   1.199 +      RefPtr<TransportFlow> transport_;
   1.200 +      State state_;
   1.201 +      RefPtr<SrtpFlow> send_srtp_;
   1.202 +      RefPtr<SrtpFlow> recv_srtp_;
   1.203 +      RtpType type_;
   1.204 +  };
   1.205 +
   1.206 +  // The transport is down
   1.207 +  virtual nsresult TransportFailed_s(TransportInfo &info);
   1.208 +  // The transport is ready
   1.209 +  virtual nsresult TransportReady_s(TransportInfo &info);
   1.210 +  void UpdateRtcpMuxState(TransportInfo &info);
   1.211 +
   1.212 +  // Unhooks from signals
   1.213 +  void DisconnectTransport_s(TransportInfo &info);
   1.214 +  nsresult ConnectTransport_s(TransportInfo &info);
   1.215 +
   1.216 +  TransportInfo* GetTransportInfo_s(TransportFlow *flow);
   1.217 +
   1.218 +  void increment_rtp_packets_sent(int bytes);
   1.219 +  void increment_rtcp_packets_sent();
   1.220 +  void increment_rtp_packets_received(int bytes);
   1.221 +  void increment_rtcp_packets_received();
   1.222 +
   1.223 +  virtual nsresult SendPacket(TransportFlow *flow, const void *data, int len);
   1.224 +
   1.225 +  // Process slots on transports
   1.226 +  void StateChange(TransportFlow *flow, TransportLayer::State);
   1.227 +  void RtpPacketReceived(TransportLayer *layer, const unsigned char *data,
   1.228 +                         size_t len);
   1.229 +  void RtcpPacketReceived(TransportLayer *layer, const unsigned char *data,
   1.230 +                          size_t len);
   1.231 +  void PacketReceived(TransportLayer *layer, const unsigned char *data,
   1.232 +                      size_t len);
   1.233 +
   1.234 +  Direction direction_;
   1.235 +  RefPtr<MediaStream> stream_;  // A pointer to the stream we are servicing.
   1.236 +                                // Written on the main thread.
   1.237 +                                // Used on STS and MediaStreamGraph threads.
   1.238 +  TrackID track_id_;            // The track on the stream.
   1.239 +                                // Written and used as the stream_;
   1.240 +  int level_; // The m-line index (starting at 1, to match convention)
   1.241 +  RefPtr<MediaSessionConduit> conduit_;  // Our conduit. Written on the main
   1.242 +                                         // thread. Read on STS thread.
   1.243 +
   1.244 +  // The transport objects. Read/written on STS thread.
   1.245 +  TransportInfo rtp_;
   1.246 +  TransportInfo rtcp_;
   1.247 +  // These are for bundle. We have a separate set because when we have offered
   1.248 +  // bundle, we do not know whether we will receive traffic on the transport
   1.249 +  // in this pipeline's m-line, or the transport in the "master" m-line for
   1.250 +  // the bundle. We need to be ready for either. Once this ambiguity is
   1.251 +  // resolved, the transport we know that we'll be using will be set in
   1.252 +  // rtp_transport_ and rtcp_transport_, and these will be unset.
   1.253 +  // TODO(bcampen@mozilla.com): I'm pretty sure this could be leveraged for
   1.254 +  // re-offer with a new address on an m-line too, with a little work.
   1.255 +  nsAutoPtr<TransportInfo> possible_bundle_rtp_;
   1.256 +  nsAutoPtr<TransportInfo> possible_bundle_rtcp_;
   1.257 +
   1.258 +  // Pointers to the threads we need. Initialized at creation
   1.259 +  // and used all over the place.
   1.260 +  nsCOMPtr<nsIEventTarget> main_thread_;
   1.261 +  nsCOMPtr<nsIEventTarget> sts_thread_;
   1.262 +
   1.263 +  // Created on Init. Referenced by the conduit and eventually
   1.264 +  // destroyed on the STS thread.
   1.265 +  RefPtr<PipelineTransport> transport_;
   1.266 +
   1.267 +  // Only safe to access from STS thread.
   1.268 +  // Build into TransportInfo?
   1.269 +  int32_t rtp_packets_sent_;
   1.270 +  int32_t rtcp_packets_sent_;
   1.271 +  int32_t rtp_packets_received_;
   1.272 +  int32_t rtcp_packets_received_;
   1.273 +  int64_t rtp_bytes_sent_;
   1.274 +  int64_t rtp_bytes_received_;
   1.275 +
   1.276 +  // Written on Init. Read on STS thread.
   1.277 +  std::string pc_;
   1.278 +  std::string description_;
   1.279 +
   1.280 +  // Written on Init, all following accesses are on the STS thread.
   1.281 +  nsAutoPtr<MediaPipelineFilter> filter_;
   1.282 +  nsAutoPtr<webrtc::RtpHeaderParser> rtp_parser_;
   1.283 +
   1.284 + private:
   1.285 +  nsresult Init_s();
   1.286 +
   1.287 +  bool IsRtp(const unsigned char *data, size_t len);
   1.288 +};
   1.289 +
   1.290 +class GenericReceiveListener : public MediaStreamListener
   1.291 +{
   1.292 + public:
   1.293 +  GenericReceiveListener(SourceMediaStream *source, TrackID track_id,
   1.294 +                         TrackRate track_rate)
   1.295 +    : source_(source),
   1.296 +      track_id_(track_id),
   1.297 +      track_rate_(track_rate),
   1.298 +      played_ticks_(0) {}
   1.299 +
   1.300 +  virtual ~GenericReceiveListener() {}
   1.301 +
   1.302 +  void AddSelf(MediaSegment* segment);
   1.303 +
   1.304 +  void SetPlayedTicks(TrackTicks time) {
   1.305 +    played_ticks_ = time;
   1.306 +  }
   1.307 +
   1.308 +  void EndTrack() {
   1.309 +    source_->EndTrack(track_id_);
   1.310 +  }
   1.311 +
   1.312 + protected:
   1.313 +  SourceMediaStream *source_;
   1.314 +  TrackID track_id_;
   1.315 +  TrackRate track_rate_;
   1.316 +  TrackTicks played_ticks_;
   1.317 +};
   1.318 +
   1.319 +class TrackAddedCallback {
   1.320 + public:
   1.321 +  virtual void TrackAdded(TrackTicks current_ticks) = 0;
   1.322 +
   1.323 +  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TrackAddedCallback);
   1.324 +
   1.325 + protected:
   1.326 +  virtual ~TrackAddedCallback() {}
   1.327 +};
   1.328 +
   1.329 +class GenericReceiveListener;
   1.330 +
   1.331 +class GenericReceiveCallback : public TrackAddedCallback
   1.332 +{
   1.333 + public:
   1.334 +  GenericReceiveCallback(GenericReceiveListener* listener)
   1.335 +    : listener_(listener) {}
   1.336 +
   1.337 +  void TrackAdded(TrackTicks time) {
   1.338 +    listener_->SetPlayedTicks(time);
   1.339 +  }
   1.340 +
   1.341 + private:
   1.342 +  RefPtr<GenericReceiveListener> listener_;
   1.343 +};
   1.344 +
   1.345 +class ConduitDeleteEvent: public nsRunnable
   1.346 +{
   1.347 +public:
   1.348 +  ConduitDeleteEvent(TemporaryRef<MediaSessionConduit> aConduit) :
   1.349 +    mConduit(aConduit) {}
   1.350 +
   1.351 +  /* we exist solely to proxy release of the conduit */
   1.352 +  NS_IMETHOD Run() { return NS_OK; }
   1.353 +private:
   1.354 +  RefPtr<MediaSessionConduit> mConduit;
   1.355 +};
   1.356 +
   1.357 +// A specialization of pipeline for reading from an input device
   1.358 +// and transmitting to the network.
   1.359 +class MediaPipelineTransmit : public MediaPipeline {
   1.360 + public:
   1.361 +  // Set rtcp_transport to nullptr to use rtcp-mux
   1.362 +  MediaPipelineTransmit(const std::string& pc,
   1.363 +                        nsCOMPtr<nsIEventTarget> main_thread,
   1.364 +                        nsCOMPtr<nsIEventTarget> sts_thread,
   1.365 +                        DOMMediaStream *domstream,
   1.366 +                        TrackID track_id,
   1.367 +                        int level,
   1.368 +                        RefPtr<MediaSessionConduit> conduit,
   1.369 +                        RefPtr<TransportFlow> rtp_transport,
   1.370 +                        RefPtr<TransportFlow> rtcp_transport) :
   1.371 +      MediaPipeline(pc, TRANSMIT, main_thread, sts_thread,
   1.372 +                    domstream->GetStream(), track_id, level,
   1.373 +                    conduit, rtp_transport, rtcp_transport),
   1.374 +      listener_(new PipelineListener(conduit)),
   1.375 +      domstream_(domstream)
   1.376 +  {}
   1.377 +
   1.378 +  // Initialize (stuff here may fail)
   1.379 +  virtual nsresult Init();
   1.380 +
   1.381 +  // Called on the main thread.
   1.382 +  virtual void DetachMediaStream() {
   1.383 +    ASSERT_ON_THREAD(main_thread_);
   1.384 +    domstream_->RemoveDirectListener(listener_);
   1.385 +    domstream_ = nullptr;
   1.386 +    stream_->RemoveListener(listener_);
   1.387 +    // Let the listener be destroyed with the pipeline (or later).
   1.388 +    stream_ = nullptr;
   1.389 +  }
   1.390 +
   1.391 +  // Override MediaPipeline::TransportReady.
   1.392 +  virtual nsresult TransportReady_s(TransportInfo &info);
   1.393 +
   1.394 +  // Separate class to allow ref counting
   1.395 +  class PipelineListener : public MediaStreamDirectListener {
   1.396 +   friend class MediaPipelineTransmit;
   1.397 +   public:
   1.398 +    PipelineListener(const RefPtr<MediaSessionConduit>& conduit)
   1.399 +      : conduit_(conduit),
   1.400 +        active_(false),
   1.401 +        direct_connect_(false),
   1.402 +        samples_10ms_buffer_(nullptr),
   1.403 +        buffer_current_(0),
   1.404 +        samplenum_10ms_(0)
   1.405 +#ifdef MOZILLA_INTERNAL_API
   1.406 +        , last_img_(-1)
   1.407 +#endif // MOZILLA_INTERNAL_API
   1.408 +    {
   1.409 +    }
   1.410 +
   1.411 +    ~PipelineListener()
   1.412 +    {
   1.413 +      // release conduit on mainthread.  Must use forget()!
   1.414 +      nsresult rv = NS_DispatchToMainThread(new
   1.415 +        ConduitDeleteEvent(conduit_.forget()), NS_DISPATCH_NORMAL);
   1.416 +      MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
   1.417 +      if (NS_FAILED(rv)) {
   1.418 +        MOZ_CRASH();
   1.419 +      }
   1.420 +    }
   1.421 +
   1.422 +
   1.423 +    // XXX. This is not thread-safe but the hazard is just
   1.424 +    // that active_ = true takes a while to propagate. Revisit
   1.425 +    // when 823600 lands.
   1.426 +    void SetActive(bool active) { active_ = active; }
   1.427 +
   1.428 +    // Implement MediaStreamListener
   1.429 +    virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
   1.430 +                                          TrackRate rate,
   1.431 +                                          TrackTicks offset,
   1.432 +                                          uint32_t events,
   1.433 +                                          const MediaSegment& queued_media) MOZ_OVERRIDE;
   1.434 +    virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) MOZ_OVERRIDE {}
   1.435 +
   1.436 +    // Implement MediaStreamDirectListener
   1.437 +    virtual void NotifyRealtimeData(MediaStreamGraph* graph, TrackID tid,
   1.438 +                                    TrackRate rate,
   1.439 +                                    TrackTicks offset,
   1.440 +                                    uint32_t events,
   1.441 +                                    const MediaSegment& media) MOZ_OVERRIDE;
   1.442 +
   1.443 +   private:
   1.444 +    void NewData(MediaStreamGraph* graph, TrackID tid,
   1.445 +                 TrackRate rate,
   1.446 +                 TrackTicks offset,
   1.447 +                 uint32_t events,
   1.448 +                 const MediaSegment& media);
   1.449 +
   1.450 +    virtual void ProcessAudioChunk(AudioSessionConduit *conduit,
   1.451 +                                   TrackRate rate, AudioChunk& chunk);
   1.452 +#ifdef MOZILLA_INTERNAL_API
   1.453 +    virtual void ProcessVideoChunk(VideoSessionConduit *conduit,
   1.454 +                                   TrackRate rate, VideoChunk& chunk);
   1.455 +#endif
   1.456 +    RefPtr<MediaSessionConduit> conduit_;
   1.457 +    volatile bool active_;
   1.458 +    bool direct_connect_;
   1.459 +
   1.460 +    // These vars handle breaking audio samples into exact 10ms chunks:
   1.461 +    // The buffer of 10ms audio samples that we will send once full
   1.462 +    // (can be carried over from one call to another).
   1.463 +    nsAutoArrayPtr<int16_t> samples_10ms_buffer_;
   1.464 +    // The location of the pointer within that buffer (in units of samples).
   1.465 +    int64_t buffer_current_;
   1.466 +    // The number of samples in a 10ms audio chunk.
   1.467 +    int64_t samplenum_10ms_;
   1.468 +
   1.469 +#ifdef MOZILLA_INTERNAL_API
   1.470 +    int32_t last_img_; // serial number of last Image
   1.471 +#endif // MOZILLA_INTERNAL_API
   1.472 +  };
   1.473 +
   1.474 + private:
   1.475 +  RefPtr<PipelineListener> listener_;
   1.476 +  DOMMediaStream *domstream_;
   1.477 +};
   1.478 +
   1.479 +
   1.480 +// A specialization of pipeline for reading from the network and
   1.481 +// rendering video.
   1.482 +class MediaPipelineReceive : public MediaPipeline {
   1.483 + public:
   1.484 +  // Set rtcp_transport to nullptr to use rtcp-mux
   1.485 +  MediaPipelineReceive(const std::string& pc,
   1.486 +                       nsCOMPtr<nsIEventTarget> main_thread,
   1.487 +                       nsCOMPtr<nsIEventTarget> sts_thread,
   1.488 +                       MediaStream *stream,
   1.489 +                       TrackID track_id,
   1.490 +                       int level,
   1.491 +                       RefPtr<MediaSessionConduit> conduit,
   1.492 +                       RefPtr<TransportFlow> rtp_transport,
   1.493 +                       RefPtr<TransportFlow> rtcp_transport,
   1.494 +                       RefPtr<TransportFlow> bundle_rtp_transport,
   1.495 +                       RefPtr<TransportFlow> bundle_rtcp_transport,
   1.496 +                       nsAutoPtr<MediaPipelineFilter> filter) :
   1.497 +      MediaPipeline(pc, RECEIVE, main_thread, sts_thread,
   1.498 +                    stream, track_id, level, conduit, rtp_transport,
   1.499 +                    rtcp_transport),
   1.500 +      segments_added_(0) {
   1.501 +    filter_ = filter;
   1.502 +    rtp_parser_ = webrtc::RtpHeaderParser::Create();
   1.503 +    if (bundle_rtp_transport) {
   1.504 +      if (bundle_rtcp_transport) {
   1.505 +        MOZ_ASSERT(bundle_rtp_transport != bundle_rtcp_transport);
   1.506 +        possible_bundle_rtp_ = new TransportInfo(bundle_rtp_transport, RTP);
   1.507 +        possible_bundle_rtcp_ = new TransportInfo(bundle_rtcp_transport, RTCP);
   1.508 +      } else {
   1.509 +        possible_bundle_rtp_ = new TransportInfo(bundle_rtp_transport, MUX);
   1.510 +        possible_bundle_rtcp_ = new TransportInfo(bundle_rtp_transport, MUX);
   1.511 +      }
   1.512 +    }
   1.513 +  }
   1.514 +
   1.515 +  int segments_added() const { return segments_added_; }
   1.516 +
   1.517 + protected:
   1.518 +  int segments_added_;
   1.519 +
   1.520 + private:
   1.521 +};
   1.522 +
   1.523 +
   1.524 +// A specialization of pipeline for reading from the network and
   1.525 +// rendering audio.
   1.526 +class MediaPipelineReceiveAudio : public MediaPipelineReceive {
   1.527 + public:
   1.528 +  MediaPipelineReceiveAudio(const std::string& pc,
   1.529 +                            nsCOMPtr<nsIEventTarget> main_thread,
   1.530 +                            nsCOMPtr<nsIEventTarget> sts_thread,
   1.531 +                            MediaStream *stream,
   1.532 +                            TrackID track_id,
   1.533 +                            int level,
   1.534 +                            RefPtr<AudioSessionConduit> conduit,
   1.535 +                            RefPtr<TransportFlow> rtp_transport,
   1.536 +                            RefPtr<TransportFlow> rtcp_transport,
   1.537 +                            RefPtr<TransportFlow> bundle_rtp_transport,
   1.538 +                            RefPtr<TransportFlow> bundle_rtcp_transport,
   1.539 +                            nsAutoPtr<MediaPipelineFilter> filter) :
   1.540 +      MediaPipelineReceive(pc, main_thread, sts_thread,
   1.541 +                           stream, track_id, level, conduit, rtp_transport,
   1.542 +                           rtcp_transport, bundle_rtp_transport,
   1.543 +                           bundle_rtcp_transport, filter),
   1.544 +      listener_(new PipelineListener(stream->AsSourceStream(),
   1.545 +                                     track_id, conduit)) {
   1.546 +  }
   1.547 +
   1.548 +  virtual void DetachMediaStream() {
   1.549 +    ASSERT_ON_THREAD(main_thread_);
   1.550 +    listener_->EndTrack();
   1.551 +    stream_->RemoveListener(listener_);
   1.552 +    stream_ = nullptr;
   1.553 +  }
   1.554 +
   1.555 +  virtual nsresult Init();
   1.556 +
   1.557 + private:
   1.558 +  // Separate class to allow ref counting
   1.559 +  class PipelineListener : public GenericReceiveListener {
   1.560 +   public:
   1.561 +    PipelineListener(SourceMediaStream * source, TrackID track_id,
   1.562 +                     const RefPtr<MediaSessionConduit>& conduit);
   1.563 +
   1.564 +    ~PipelineListener()
   1.565 +    {
   1.566 +      // release conduit on mainthread.  Must use forget()!
   1.567 +      nsresult rv = NS_DispatchToMainThread(new
   1.568 +        ConduitDeleteEvent(conduit_.forget()), NS_DISPATCH_NORMAL);
   1.569 +      MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
   1.570 +      if (NS_FAILED(rv)) {
   1.571 +        MOZ_CRASH();
   1.572 +      }
   1.573 +    }
   1.574 +
   1.575 +    // Implement MediaStreamListener
   1.576 +    virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
   1.577 +                                          TrackRate rate,
   1.578 +                                          TrackTicks offset,
   1.579 +                                          uint32_t events,
   1.580 +                                          const MediaSegment& queued_media) MOZ_OVERRIDE {}
   1.581 +    virtual void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) MOZ_OVERRIDE;
   1.582 +
   1.583 +   private:
   1.584 +    RefPtr<MediaSessionConduit> conduit_;
   1.585 +  };
   1.586 +
   1.587 +  RefPtr<PipelineListener> listener_;
   1.588 +};
   1.589 +
   1.590 +
   1.591 +// A specialization of pipeline for reading from the network and
   1.592 +// rendering video.
   1.593 +class MediaPipelineReceiveVideo : public MediaPipelineReceive {
   1.594 + public:
   1.595 +  MediaPipelineReceiveVideo(const std::string& pc,
   1.596 +                            nsCOMPtr<nsIEventTarget> main_thread,
   1.597 +                            nsCOMPtr<nsIEventTarget> sts_thread,
   1.598 +                            MediaStream *stream,
   1.599 +                            TrackID track_id,
   1.600 +                            int level,
   1.601 +                            RefPtr<VideoSessionConduit> conduit,
   1.602 +                            RefPtr<TransportFlow> rtp_transport,
   1.603 +                            RefPtr<TransportFlow> rtcp_transport,
   1.604 +                            RefPtr<TransportFlow> bundle_rtp_transport,
   1.605 +                            RefPtr<TransportFlow> bundle_rtcp_transport,
   1.606 +                            nsAutoPtr<MediaPipelineFilter> filter) :
   1.607 +      MediaPipelineReceive(pc, main_thread, sts_thread,
   1.608 +                           stream, track_id, level, conduit, rtp_transport,
   1.609 +                           rtcp_transport, bundle_rtp_transport,
   1.610 +                           bundle_rtcp_transport, filter),
   1.611 +      renderer_(new PipelineRenderer(MOZ_THIS_IN_INITIALIZER_LIST())),
   1.612 +      listener_(new PipelineListener(stream->AsSourceStream(), track_id)) {
   1.613 +  }
   1.614 +
   1.615 +  // Called on the main thread.
   1.616 +  virtual void DetachMediaStream() {
   1.617 +    ASSERT_ON_THREAD(main_thread_);
   1.618 +
   1.619 +    listener_->EndTrack();
   1.620 +    // stop generating video and thus stop invoking the PipelineRenderer
   1.621 +    // and PipelineListener - the renderer has a raw ptr to the Pipeline to
   1.622 +    // avoid cycles, and the render callbacks are invoked from a different
   1.623 +    // thread so simple null-checks would cause TSAN bugs without locks.
   1.624 +    static_cast<VideoSessionConduit*>(conduit_.get())->DetachRenderer();
   1.625 +    stream_->RemoveListener(listener_);
   1.626 +    stream_ = nullptr;
   1.627 +  }
   1.628 +
   1.629 +  virtual nsresult Init();
   1.630 +
   1.631 + private:
   1.632 +  class PipelineRenderer : public VideoRenderer {
   1.633 +   public:
   1.634 +    PipelineRenderer(MediaPipelineReceiveVideo *pipeline) :
   1.635 +      pipeline_(pipeline) {}
   1.636 +
   1.637 +    void Detach() { pipeline_ = nullptr; }
   1.638 +
   1.639 +    // Implement VideoRenderer
   1.640 +    virtual void FrameSizeChange(unsigned int width,
   1.641 +                                 unsigned int height,
   1.642 +                                 unsigned int number_of_streams) {
   1.643 +      pipeline_->listener_->FrameSizeChange(width, height, number_of_streams);
   1.644 +    }
   1.645 +
   1.646 +    virtual void RenderVideoFrame(const unsigned char* buffer,
   1.647 +                                  unsigned int buffer_size,
   1.648 +                                  uint32_t time_stamp,
   1.649 +                                  int64_t render_time,
   1.650 +                                  const ImageHandle& handle) {
   1.651 +      pipeline_->listener_->RenderVideoFrame(buffer, buffer_size, time_stamp,
   1.652 +                                             render_time,
   1.653 +                                             handle.GetImage());
   1.654 +    }
   1.655 +
   1.656 +   private:
   1.657 +    MediaPipelineReceiveVideo *pipeline_;  // Raw pointer to avoid cycles
   1.658 +  };
   1.659 +
   1.660 +  // Separate class to allow ref counting
   1.661 +  class PipelineListener : public GenericReceiveListener {
   1.662 +   public:
   1.663 +    PipelineListener(SourceMediaStream * source, TrackID track_id);
   1.664 +
   1.665 +    // Implement MediaStreamListener
   1.666 +    virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
   1.667 +                                          TrackRate rate,
   1.668 +                                          TrackTicks offset,
   1.669 +                                          uint32_t events,
   1.670 +                                          const MediaSegment& queued_media) MOZ_OVERRIDE {}
   1.671 +    virtual void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) MOZ_OVERRIDE;
   1.672 +
   1.673 +    // Accessors for external writes from the renderer
   1.674 +    void FrameSizeChange(unsigned int width,
   1.675 +                         unsigned int height,
   1.676 +                         unsigned int number_of_streams) {
   1.677 +      ReentrantMonitorAutoEnter enter(monitor_);
   1.678 +
   1.679 +      width_ = width;
   1.680 +      height_ = height;
   1.681 +    }
   1.682 +
   1.683 +    void RenderVideoFrame(const unsigned char* buffer,
   1.684 +                          unsigned int buffer_size,
   1.685 +                          uint32_t time_stamp,
   1.686 +                          int64_t render_time,
   1.687 +                          const RefPtr<layers::Image>& video_image);
   1.688 +
   1.689 +   private:
   1.690 +    int width_;
   1.691 +    int height_;
   1.692 +#ifdef MOZILLA_INTERNAL_API
   1.693 +    nsRefPtr<layers::ImageContainer> image_container_;
   1.694 +    nsRefPtr<layers::Image> image_;
   1.695 +#endif
   1.696 +    mozilla::ReentrantMonitor monitor_; // Monitor for processing WebRTC frames.
   1.697 +                                        // Protects image_ against:
   1.698 +                                        // - Writing from the GIPS thread
   1.699 +                                        // - Reading from the MSG thread
   1.700 +  };
   1.701 +
   1.702 +  friend class PipelineRenderer;
   1.703 +
   1.704 +  RefPtr<PipelineRenderer> renderer_;
   1.705 +  RefPtr<PipelineListener> listener_;
   1.706 +};
   1.707 +
   1.708 +
   1.709 +}  // end namespace
   1.710 +#endif

mercurial