content/media/webrtc/MediaEngineWebRTCVideo.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,915 @@
     1.4 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.5 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.6 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.7 +
     1.8 +#include "MediaEngineWebRTC.h"
     1.9 +#include "Layers.h"
    1.10 +#include "ImageTypes.h"
    1.11 +#include "ImageContainer.h"
    1.12 +#include "mozilla/layers/GrallocTextureClient.h"
    1.13 +#include "nsMemory.h"
    1.14 +#include "mtransport/runnable_utils.h"
    1.15 +#include "MediaTrackConstraints.h"
    1.16 +
    1.17 +#ifdef MOZ_B2G_CAMERA
    1.18 +#include "GrallocImages.h"
    1.19 +#include "libyuv.h"
    1.20 +#include "mozilla/Hal.h"
    1.21 +#include "ScreenOrientation.h"
    1.22 +using namespace mozilla::dom;
    1.23 +#endif
    1.24 +namespace mozilla {
    1.25 +
    1.26 +using namespace mozilla::gfx;
    1.27 +using dom::ConstrainLongRange;
    1.28 +using dom::ConstrainDoubleRange;
    1.29 +using dom::MediaTrackConstraintSet;
    1.30 +
    1.31 +#ifdef PR_LOGGING
    1.32 +extern PRLogModuleInfo* GetMediaManagerLog();
    1.33 +#define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
    1.34 +#define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
    1.35 +#else
    1.36 +#define LOG(msg)
    1.37 +#define LOGFRAME(msg)
    1.38 +#endif
    1.39 +
    1.40 +/**
    1.41 + * Webrtc video source.
    1.42 + */
    1.43 +#ifndef MOZ_B2G_CAMERA
    1.44 +NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
    1.45 +#else
    1.46 +NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
    1.47 +NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
    1.48 +NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
    1.49 +#endif
    1.50 +
    1.51 +// ViEExternalRenderer Callback.
    1.52 +#ifndef MOZ_B2G_CAMERA
    1.53 +int
    1.54 +MediaEngineWebRTCVideoSource::FrameSizeChange(
    1.55 +   unsigned int w, unsigned int h, unsigned int streams)
    1.56 +{
    1.57 +  mWidth = w;
    1.58 +  mHeight = h;
    1.59 +  LOG(("Video FrameSizeChange: %ux%u", w, h));
    1.60 +  return 0;
    1.61 +}
    1.62 +
    1.63 +// ViEExternalRenderer Callback. Process every incoming frame here.
    1.64 +int
    1.65 +MediaEngineWebRTCVideoSource::DeliverFrame(
    1.66 +   unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
    1.67 +   void *handle)
    1.68 +{
    1.69 +  // mInSnapshotMode can only be set before the camera is turned on and
    1.70 +  // the renderer is started, so this amounts to a 1-shot
    1.71 +  if (mInSnapshotMode) {
    1.72 +    // Set the condition variable to false and notify Snapshot().
    1.73 +    MonitorAutoLock lock(mMonitor);
    1.74 +    mInSnapshotMode = false;
    1.75 +    lock.Notify();
    1.76 +    return 0;
    1.77 +  }
    1.78 +
    1.79 +  // Check for proper state.
    1.80 +  if (mState != kStarted) {
    1.81 +    LOG(("DeliverFrame: video not started"));
    1.82 +    return 0;
    1.83 +  }
    1.84 +
    1.85 +  MOZ_ASSERT(mWidth*mHeight*3/2 == size);
    1.86 +  if (mWidth*mHeight*3/2 != size) {
    1.87 +    return 0;
    1.88 +  }
    1.89 +
    1.90 +  // Create a video frame and append it to the track.
    1.91 +  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
    1.92 +
    1.93 +  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
    1.94 +
    1.95 +  uint8_t* frame = static_cast<uint8_t*> (buffer);
    1.96 +  const uint8_t lumaBpp = 8;
    1.97 +  const uint8_t chromaBpp = 4;
    1.98 +
    1.99 +  layers::PlanarYCbCrData data;
   1.100 +  data.mYChannel = frame;
   1.101 +  data.mYSize = IntSize(mWidth, mHeight);
   1.102 +  data.mYStride = mWidth * lumaBpp/ 8;
   1.103 +  data.mCbCrStride = mWidth * chromaBpp / 8;
   1.104 +  data.mCbChannel = frame + mHeight * data.mYStride;
   1.105 +  data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2;
   1.106 +  data.mCbCrSize = IntSize(mWidth/ 2, mHeight/ 2);
   1.107 +  data.mPicX = 0;
   1.108 +  data.mPicY = 0;
   1.109 +  data.mPicSize = IntSize(mWidth, mHeight);
   1.110 +  data.mStereoMode = StereoMode::MONO;
   1.111 +
   1.112 +  videoImage->SetData(data);
   1.113 +
   1.114 +#ifdef DEBUG
   1.115 +  static uint32_t frame_num = 0;
   1.116 +  LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++,
   1.117 +            mWidth, mHeight, time_stamp, render_time));
   1.118 +#endif
   1.119 +
   1.120 +  // we don't touch anything in 'this' until here (except for snapshot,
   1.121 +  // which has it's own lock)
   1.122 +  MonitorAutoLock lock(mMonitor);
   1.123 +
   1.124 +  // implicitly releases last image
   1.125 +  mImage = image.forget();
   1.126 +
   1.127 +  return 0;
   1.128 +}
   1.129 +#endif
   1.130 +
   1.131 +// Called if the graph thinks it's running out of buffered video; repeat
   1.132 +// the last frame for whatever minimum period it think it needs.  Note that
   1.133 +// this means that no *real* frame can be inserted during this period.
   1.134 +void
   1.135 +MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
   1.136 +                                         SourceMediaStream *aSource,
   1.137 +                                         TrackID aID,
   1.138 +                                         StreamTime aDesiredTime,
   1.139 +                                         TrackTicks &aLastEndTime)
   1.140 +{
   1.141 +  VideoSegment segment;
   1.142 +
   1.143 +  MonitorAutoLock lock(mMonitor);
   1.144 +  if (mState != kStarted)
   1.145 +    return;
   1.146 +
   1.147 +  // Note: we're not giving up mImage here
   1.148 +  nsRefPtr<layers::Image> image = mImage;
   1.149 +  TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
   1.150 +  TrackTicks delta = target - aLastEndTime;
   1.151 +  LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
   1.152 +            (int64_t) target, (int64_t) delta, image ? "" : "<null>"));
   1.153 +
   1.154 +  // Bug 846188 We may want to limit incoming frames to the requested frame rate
   1.155 +  // mFps - if you want 30FPS, and the camera gives you 60FPS, this could
   1.156 +  // cause issues.
   1.157 +  // We may want to signal if the actual frame rate is below mMinFPS -
   1.158 +  // cameras often don't return the requested frame rate especially in low
   1.159 +  // light; we should consider surfacing this so that we can switch to a
   1.160 +  // lower resolution (which may up the frame rate)
   1.161 +
   1.162 +  // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
   1.163 +  // Doing so means a negative delta and thus messes up handling of the graph
   1.164 +  if (delta > 0) {
   1.165 +    // nullptr images are allowed
   1.166 +    IntSize size(image ? mWidth : 0, image ? mHeight : 0);
   1.167 +    segment.AppendFrame(image.forget(), delta, size);
   1.168 +    // This can fail if either a) we haven't added the track yet, or b)
   1.169 +    // we've removed or finished the track.
   1.170 +    if (aSource->AppendToTrack(aID, &(segment))) {
   1.171 +      aLastEndTime = target;
   1.172 +    }
   1.173 +  }
   1.174 +}
   1.175 +
   1.176 +static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
   1.177 +  return aRange.mMin <= n && n <= aRange.mMax;
   1.178 +}
   1.179 +
   1.180 +static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
   1.181 +  return aRange.mMin <= n && n <= aRange.mMax;
   1.182 +}
   1.183 +
   1.184 +static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
   1.185 +  return std::max(aRange.mMin, std::min(n, aRange.mMax));
   1.186 +}
   1.187 +
   1.188 +static bool
   1.189 +AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
   1.190 +  return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
   1.191 +}
   1.192 +
   1.193 +static bool
   1.194 +Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
   1.195 +  MOZ_ASSERT(AreIntersecting(aA, aB));
   1.196 +  aA.mMin = std::max(aA.mMin, aB.mMin);
   1.197 +  aA.mMax = std::min(aA.mMax, aB.mMax);
   1.198 +  return true;
   1.199 +}
   1.200 +
   1.201 +static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
   1.202 +                                 const webrtc::CaptureCapability& aCandidate) {
   1.203 +  if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
   1.204 +      !IsWithin(aCandidate.height, aConstraints.mHeight)) {
   1.205 +    return false;
   1.206 +  }
   1.207 +  if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
   1.208 +    return false;
   1.209 +  }
   1.210 +  return true;
   1.211 +}
   1.212 +
   1.213 +void
   1.214 +MediaEngineWebRTCVideoSource::ChooseCapability(
   1.215 +    const VideoTrackConstraintsN &aConstraints,
   1.216 +    const MediaEnginePrefs &aPrefs)
   1.217 +{
   1.218 +#ifdef MOZ_B2G_CAMERA
   1.219 +  return GuessCapability(aConstraints, aPrefs);
   1.220 +#else
   1.221 +  NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
   1.222 +  int num = mViECapture->NumberOfCapabilities(uniqueId.get(), KMaxUniqueIdLength);
   1.223 +  if (num <= 0) {
   1.224 +    // Mac doesn't support capabilities.
   1.225 +    return GuessCapability(aConstraints, aPrefs);
   1.226 +  }
   1.227 +
   1.228 +  // The rest is the full algorithm for cameras that can list their capabilities.
   1.229 +
   1.230 +  LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps",
   1.231 +       aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
   1.232 +
   1.233 +  typedef nsTArray<uint8_t> SourceSet;
   1.234 +
   1.235 +  SourceSet candidateSet;
   1.236 +  for (int i = 0; i < num; i++) {
   1.237 +    candidateSet.AppendElement(i);
   1.238 +  }
   1.239 +
   1.240 +  // Pick among capabilities: First apply required constraints.
   1.241 +
   1.242 +  for (uint32_t i = 0; i < candidateSet.Length();) {
   1.243 +    webrtc::CaptureCapability cap;
   1.244 +    mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength,
   1.245 +                                      candidateSet[i], cap);
   1.246 +    if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) {
   1.247 +      candidateSet.RemoveElementAt(i);
   1.248 +    } else {
   1.249 +      ++i;
   1.250 +    }
   1.251 +  }
   1.252 +
   1.253 +  SourceSet tailSet;
   1.254 +
   1.255 +  // Then apply advanced (formerly known as optional) constraints.
   1.256 +
   1.257 +  if (aConstraints.mAdvanced.WasPassed()) {
   1.258 +    auto &array = aConstraints.mAdvanced.Value();
   1.259 +
   1.260 +    for (uint32_t i = 0; i < array.Length(); i++) {
   1.261 +      SourceSet rejects;
   1.262 +      for (uint32_t j = 0; j < candidateSet.Length();) {
   1.263 +        webrtc::CaptureCapability cap;
   1.264 +        mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength,
   1.265 +                                          candidateSet[j], cap);
   1.266 +        if (!SatisfyConstraintSet(array[i], cap)) {
   1.267 +          rejects.AppendElement(candidateSet[j]);
   1.268 +          candidateSet.RemoveElementAt(j);
   1.269 +        } else {
   1.270 +          ++j;
   1.271 +        }
   1.272 +      }
   1.273 +      (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects);
   1.274 +    }
   1.275 +  }
   1.276 +
   1.277 +  if (!candidateSet.Length()) {
   1.278 +    candidateSet.AppendElement(0);
   1.279 +  }
   1.280 +
   1.281 +  int prefWidth = aPrefs.GetWidth();
   1.282 +  int prefHeight = aPrefs.GetHeight();
   1.283 +
   1.284 +  // Default is closest to available capability but equal to or below;
   1.285 +  // otherwise closest above.  Since we handle the num=0 case above and
   1.286 +  // take the first entry always, we can never exit uninitialized.
   1.287 +
   1.288 +  webrtc::CaptureCapability cap;
   1.289 +  bool higher = true;
   1.290 +  for (uint32_t i = 0; i < candidateSet.Length(); i++) {
   1.291 +    mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(),
   1.292 +                                      KMaxUniqueIdLength, candidateSet[i], cap);
   1.293 +    if (higher) {
   1.294 +      if (i == 0 ||
   1.295 +          (mCapability.width > cap.width && mCapability.height > cap.height)) {
   1.296 +        // closer than the current choice
   1.297 +        mCapability = cap;
   1.298 +        // FIXME: expose expected capture delay?
   1.299 +      }
   1.300 +      if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) {
   1.301 +        higher = false;
   1.302 +      }
   1.303 +    } else {
   1.304 +      if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight ||
   1.305 +          cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
   1.306 +        continue;
   1.307 +      }
   1.308 +      if (mCapability.width < cap.width && mCapability.height < cap.height) {
   1.309 +        mCapability = cap;
   1.310 +        // FIXME: expose expected capture delay?
   1.311 +      }
   1.312 +    }
   1.313 +  }
   1.314 +  LOG(("chose cap %dx%d @%dfps",
   1.315 +       mCapability.width, mCapability.height, mCapability.maxFPS));
   1.316 +#endif
   1.317 +}
   1.318 +
   1.319 +// A special version of the algorithm for cameras that don't list capabilities.
   1.320 +
   1.321 +void
   1.322 +MediaEngineWebRTCVideoSource::GuessCapability(
   1.323 +    const VideoTrackConstraintsN &aConstraints,
   1.324 +    const MediaEnginePrefs &aPrefs)
   1.325 +{
   1.326 +  LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
   1.327 +       aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
   1.328 +
   1.329 +  // In short: compound constraint-ranges and use pref as ideal.
   1.330 +
   1.331 +  ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
   1.332 +  ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
   1.333 +
   1.334 +  if (aConstraints.mAdvanced.WasPassed()) {
   1.335 +    const auto& advanced = aConstraints.mAdvanced.Value();
   1.336 +    for (uint32_t i = 0; i < advanced.Length(); i++) {
   1.337 +      if (AreIntersecting(cWidth, advanced[i].mWidth) &&
   1.338 +          AreIntersecting(cHeight, advanced[i].mHeight)) {
   1.339 +        Intersect(cWidth, advanced[i].mWidth);
   1.340 +        Intersect(cHeight, advanced[i].mHeight);
   1.341 +      }
   1.342 +    }
   1.343 +  }
   1.344 +  // Detect Mac HD cams and give them some love in the form of a dynamic default
   1.345 +  // since that hardware switches between 4:3 at low res and 16:9 at higher res.
   1.346 +  //
   1.347 +  // Logic is: if we're relying on defaults in aPrefs, then
   1.348 +  // only use HD pref when non-HD pref is too small and HD pref isn't too big.
   1.349 +
   1.350 +  bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
   1.351 +                mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
   1.352 +                (aPrefs.GetWidth() < cWidth.mMin ||
   1.353 +                 aPrefs.GetHeight() < cHeight.mMin) &&
   1.354 +                !(aPrefs.GetWidth(true) > cWidth.mMax ||
   1.355 +                  aPrefs.GetHeight(true) > cHeight.mMax));
   1.356 +  int prefWidth = aPrefs.GetWidth(macHD);
   1.357 +  int prefHeight = aPrefs.GetHeight(macHD);
   1.358 +
   1.359 +  // Clamp width and height without distorting inherent aspect too much.
   1.360 +
   1.361 +  if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
   1.362 +    // If both are within, we get the default (pref) aspect.
   1.363 +    // If neither are within, we get the aspect of the enclosing constraint.
   1.364 +    // Either are presumably reasonable (presuming constraints are sane).
   1.365 +    mCapability.width = Clamp(prefWidth, cWidth);
   1.366 +    mCapability.height = Clamp(prefHeight, cHeight);
   1.367 +  } else {
   1.368 +    // But if only one clips (e.g. width), the resulting skew is undesirable:
   1.369 +    //       .------------.
   1.370 +    //       | constraint |
   1.371 +    //  .----+------------+----.
   1.372 +    //  |    |            |    |
   1.373 +    //  |pref|  result    |    |   prefAspect != resultAspect
   1.374 +    //  |    |            |    |
   1.375 +    //  '----+------------+----'
   1.376 +    //       '------------'
   1.377 +    //  So in this case, preserve prefAspect instead:
   1.378 +    //  .------------.
   1.379 +    //  | constraint |
   1.380 +    //  .------------.
   1.381 +    //  |pref        |             prefAspect is unchanged
   1.382 +    //  '------------'
   1.383 +    //  |            |
   1.384 +    //  '------------'
   1.385 +    if (IsWithin(prefWidth, cWidth)) {
   1.386 +      mCapability.height = Clamp(prefHeight, cHeight);
   1.387 +      mCapability.width = Clamp((mCapability.height * prefWidth) /
   1.388 +                                prefHeight, cWidth);
   1.389 +    } else {
   1.390 +      mCapability.width = Clamp(prefWidth, cWidth);
   1.391 +      mCapability.height = Clamp((mCapability.width * prefHeight) /
   1.392 +                                 prefWidth, cHeight);
   1.393 +    }
   1.394 +  }
   1.395 +  mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
   1.396 +  LOG(("chose cap %dx%d @%dfps",
   1.397 +       mCapability.width, mCapability.height, mCapability.maxFPS));
   1.398 +}
   1.399 +
   1.400 +void
   1.401 +MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
   1.402 +{
   1.403 +  aName = mDeviceName;
   1.404 +}
   1.405 +
   1.406 +void
   1.407 +MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
   1.408 +{
   1.409 +  aUUID = mUniqueId;
   1.410 +}
   1.411 +
   1.412 +nsresult
   1.413 +MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
   1.414 +                                       const MediaEnginePrefs &aPrefs)
   1.415 +{
   1.416 +  LOG((__FUNCTION__));
   1.417 +#ifdef MOZ_B2G_CAMERA
   1.418 +  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.419 +  if (mState == kReleased && mInitDone) {
   1.420 +    ChooseCapability(aConstraints, aPrefs);
   1.421 +    NS_DispatchToMainThread(WrapRunnable(this,
   1.422 +                                         &MediaEngineWebRTCVideoSource::AllocImpl));
   1.423 +    mCallbackMonitor.Wait();
   1.424 +    if (mState != kAllocated) {
   1.425 +      return NS_ERROR_FAILURE;
   1.426 +    }
   1.427 +  }
   1.428 +#else
   1.429 +  if (mState == kReleased && mInitDone) {
   1.430 +    // Note: if shared, we don't allow a later opener to affect the resolution.
   1.431 +    // (This may change depending on spec changes for Constraints/settings)
   1.432 +
   1.433 +    ChooseCapability(aConstraints, aPrefs);
   1.434 +
   1.435 +    if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
   1.436 +                                           KMaxUniqueIdLength, mCaptureIndex)) {
   1.437 +      return NS_ERROR_FAILURE;
   1.438 +    }
   1.439 +    mState = kAllocated;
   1.440 +    LOG(("Video device %d allocated", mCaptureIndex));
   1.441 +  } else if (mSources.IsEmpty()) {
   1.442 +    LOG(("Video device %d reallocated", mCaptureIndex));
   1.443 +  } else {
   1.444 +    LOG(("Video device %d allocated shared", mCaptureIndex));
   1.445 +  }
   1.446 +#endif
   1.447 +
   1.448 +  return NS_OK;
   1.449 +}
   1.450 +
   1.451 +nsresult
   1.452 +MediaEngineWebRTCVideoSource::Deallocate()
   1.453 +{
   1.454 +  LOG((__FUNCTION__));
   1.455 +  if (mSources.IsEmpty()) {
   1.456 +#ifdef MOZ_B2G_CAMERA
   1.457 +    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.458 +#endif
   1.459 +    if (mState != kStopped && mState != kAllocated) {
   1.460 +      return NS_ERROR_FAILURE;
   1.461 +    }
   1.462 +#ifdef MOZ_B2G_CAMERA
   1.463 +    // We do not register success callback here
   1.464 +
   1.465 +    NS_DispatchToMainThread(WrapRunnable(this,
   1.466 +                                         &MediaEngineWebRTCVideoSource::DeallocImpl));
   1.467 +    mCallbackMonitor.Wait();
   1.468 +    if (mState != kReleased) {
   1.469 +      return NS_ERROR_FAILURE;
   1.470 +    }
   1.471 +#elif XP_MACOSX
   1.472 +    // Bug 829907 - on mac, in shutdown, the mainthread stops processing
   1.473 +    // 'native' events, and the QTKit code uses events to the main native CFRunLoop
   1.474 +    // in order to provide thread safety.  In order to avoid this locking us up,
   1.475 +    // release the ViE capture device synchronously on MainThread (so the native
   1.476 +    // event isn't needed).
   1.477 +    // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
   1.478 +    // XXX It might be nice to only do this if we're in shutdown...  Hard to be
   1.479 +    // sure when that is though.
   1.480 +    // Thread safety: a) we call this synchronously, and don't use ViECapture from
   1.481 +    // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs
   1.482 +    // an exclusive object lock and deletes it in a critical section, so all in all
   1.483 +    // this should be safe threadwise.
   1.484 +    NS_DispatchToMainThread(WrapRunnable(mViECapture,
   1.485 +                                         &webrtc::ViECapture::ReleaseCaptureDevice,
   1.486 +                                         mCaptureIndex),
   1.487 +                            NS_DISPATCH_SYNC);
   1.488 +#else
   1.489 +    mViECapture->ReleaseCaptureDevice(mCaptureIndex);
   1.490 +#endif
   1.491 +    mState = kReleased;
   1.492 +    LOG(("Video device %d deallocated", mCaptureIndex));
   1.493 +  } else {
   1.494 +    LOG(("Video device %d deallocated but still in use", mCaptureIndex));
   1.495 +  }
   1.496 +  return NS_OK;
   1.497 +}
   1.498 +
   1.499 +nsresult
   1.500 +MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
   1.501 +{
   1.502 +  LOG((__FUNCTION__));
   1.503 +#ifndef MOZ_B2G_CAMERA
   1.504 +  int error = 0;
   1.505 +#endif
   1.506 +  if (!mInitDone || !aStream) {
   1.507 +    return NS_ERROR_FAILURE;
   1.508 +  }
   1.509 +
   1.510 +  mSources.AppendElement(aStream);
   1.511 +
   1.512 +  aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
   1.513 +  aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
   1.514 +
   1.515 +#ifdef MOZ_B2G_CAMERA
   1.516 +  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.517 +#endif
   1.518 +
   1.519 +  if (mState == kStarted) {
   1.520 +    return NS_OK;
   1.521 +  }
   1.522 +  mImageContainer = layers::LayerManager::CreateImageContainer();
   1.523 +
   1.524 +#ifdef MOZ_B2G_CAMERA
   1.525 +  NS_DispatchToMainThread(WrapRunnable(this,
   1.526 +                                       &MediaEngineWebRTCVideoSource::StartImpl,
   1.527 +                                       mCapability));
   1.528 +  mCallbackMonitor.Wait();
   1.529 +  if (mState != kStarted) {
   1.530 +    return NS_ERROR_FAILURE;
   1.531 +  }
   1.532 +#else
   1.533 +  mState = kStarted;
   1.534 +  error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
   1.535 +  if (error == -1) {
   1.536 +    return NS_ERROR_FAILURE;
   1.537 +  }
   1.538 +
   1.539 +  error = mViERender->StartRender(mCaptureIndex);
   1.540 +  if (error == -1) {
   1.541 +    return NS_ERROR_FAILURE;
   1.542 +  }
   1.543 +
   1.544 +  if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
   1.545 +    return NS_ERROR_FAILURE;
   1.546 +  }
   1.547 +#endif
   1.548 +
   1.549 +  return NS_OK;
   1.550 +}
   1.551 +
   1.552 +nsresult
   1.553 +MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
   1.554 +{
   1.555 +  LOG((__FUNCTION__));
   1.556 +  if (!mSources.RemoveElement(aSource)) {
   1.557 +    // Already stopped - this is allowed
   1.558 +    return NS_OK;
   1.559 +  }
   1.560 +  if (!mSources.IsEmpty()) {
   1.561 +    return NS_OK;
   1.562 +  }
   1.563 +#ifdef MOZ_B2G_CAMERA
   1.564 +  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.565 +#endif
   1.566 +  if (mState != kStarted) {
   1.567 +    return NS_ERROR_FAILURE;
   1.568 +  }
   1.569 +
   1.570 +  {
   1.571 +    MonitorAutoLock lock(mMonitor);
   1.572 +    mState = kStopped;
   1.573 +    aSource->EndTrack(aID);
   1.574 +    // Drop any cached image so we don't start with a stale image on next
   1.575 +    // usage
   1.576 +    mImage = nullptr;
   1.577 +  }
   1.578 +#ifdef MOZ_B2G_CAMERA
   1.579 +  NS_DispatchToMainThread(WrapRunnable(this,
   1.580 +                                       &MediaEngineWebRTCVideoSource::StopImpl));
   1.581 +#else
   1.582 +  mViERender->StopRender(mCaptureIndex);
   1.583 +  mViERender->RemoveRenderer(mCaptureIndex);
   1.584 +  mViECapture->StopCapture(mCaptureIndex);
   1.585 +#endif
   1.586 +
   1.587 +  return NS_OK;
   1.588 +}
   1.589 +
   1.590 +nsresult
   1.591 +MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
   1.592 +{
   1.593 +  return NS_ERROR_NOT_IMPLEMENTED;
   1.594 +}
   1.595 +
   1.596 +/**
   1.597 + * Initialization and Shutdown functions for the video source, called by the
   1.598 + * constructor and destructor respectively.
   1.599 + */
   1.600 +
   1.601 +void
   1.602 +MediaEngineWebRTCVideoSource::Init()
   1.603 +{
   1.604 +#ifdef MOZ_B2G_CAMERA
   1.605 +  nsAutoCString deviceName;
   1.606 +  ICameraControl::GetCameraName(mCaptureIndex, deviceName);
   1.607 +  CopyUTF8toUTF16(deviceName, mDeviceName);
   1.608 +  CopyUTF8toUTF16(deviceName, mUniqueId);
   1.609 +#else
   1.610 +  // fix compile warning for these being unused. (remove once used)
   1.611 +  (void) mFps;
   1.612 +  (void) mMinFps;
   1.613 +
   1.614 +  LOG((__FUNCTION__));
   1.615 +  if (mVideoEngine == nullptr) {
   1.616 +    return;
   1.617 +  }
   1.618 +
   1.619 +  mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine);
   1.620 +  if (mViEBase == nullptr) {
   1.621 +    return;
   1.622 +  }
   1.623 +
   1.624 +  // Get interfaces for capture, render for now
   1.625 +  mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine);
   1.626 +  mViERender = webrtc::ViERender::GetInterface(mVideoEngine);
   1.627 +
   1.628 +  if (mViECapture == nullptr || mViERender == nullptr) {
   1.629 +    return;
   1.630 +  }
   1.631 +
   1.632 +  const uint32_t KMaxDeviceNameLength = 128;
   1.633 +  const uint32_t KMaxUniqueIdLength = 256;
   1.634 +  char deviceName[KMaxDeviceNameLength];
   1.635 +  char uniqueId[KMaxUniqueIdLength];
   1.636 +  if (mViECapture->GetCaptureDevice(mCaptureIndex,
   1.637 +                                    deviceName, KMaxDeviceNameLength,
   1.638 +                                    uniqueId, KMaxUniqueIdLength)) {
   1.639 +    return;
   1.640 +  }
   1.641 +
   1.642 +  CopyUTF8toUTF16(deviceName, mDeviceName);
   1.643 +  CopyUTF8toUTF16(uniqueId, mUniqueId);
   1.644 +#endif
   1.645 +
   1.646 +  mInitDone = true;
   1.647 +}
   1.648 +
   1.649 +void
   1.650 +MediaEngineWebRTCVideoSource::Shutdown()
   1.651 +{
   1.652 +  LOG((__FUNCTION__));
   1.653 +  if (!mInitDone) {
   1.654 +    return;
   1.655 +  }
   1.656 +#ifdef MOZ_B2G_CAMERA
   1.657 +  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.658 +#endif
   1.659 +  if (mState == kStarted) {
   1.660 +    while (!mSources.IsEmpty()) {
   1.661 +      Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
   1.662 +    }
   1.663 +    MOZ_ASSERT(mState == kStopped);
   1.664 +  }
   1.665 +
   1.666 +  if (mState == kAllocated || mState == kStopped) {
   1.667 +    Deallocate();
   1.668 +  }
   1.669 +#ifndef MOZ_B2G_CAMERA
   1.670 +  mViECapture->Release();
   1.671 +  mViERender->Release();
   1.672 +  mViEBase->Release();
   1.673 +#endif
   1.674 +  mState = kReleased;
   1.675 +  mInitDone = false;
   1.676 +}
   1.677 +
   1.678 +#ifdef MOZ_B2G_CAMERA
   1.679 +
   1.680 +// All these functions must be run on MainThread!
   1.681 +void
   1.682 +MediaEngineWebRTCVideoSource::AllocImpl() {
   1.683 +  MOZ_ASSERT(NS_IsMainThread());
   1.684 +
   1.685 +  mCameraControl = ICameraControl::Create(mCaptureIndex);
   1.686 +  if (mCameraControl) {
   1.687 +    mState = kAllocated;
   1.688 +    // Add this as a listener for CameraControl events. We don't need
   1.689 +    // to explicitly remove this--destroying the CameraControl object
   1.690 +    // in DeallocImpl() will do that for us.
   1.691 +    mCameraControl->AddListener(this);
   1.692 +  }
   1.693 +
   1.694 +  mCallbackMonitor.Notify();
   1.695 +}
   1.696 +
   1.697 +void
   1.698 +MediaEngineWebRTCVideoSource::DeallocImpl() {
   1.699 +  MOZ_ASSERT(NS_IsMainThread());
   1.700 +
   1.701 +  mCameraControl = nullptr;
   1.702 +}
   1.703 +
   1.704 +// The same algorithm from bug 840244
   1.705 +static int
   1.706 +GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
   1.707 +  int screenAngle = 0;
   1.708 +  switch (aScreen) {
   1.709 +    case eScreenOrientation_PortraitPrimary:
   1.710 +      screenAngle = 0;
   1.711 +      break;
   1.712 +    case eScreenOrientation_PortraitSecondary:
   1.713 +      screenAngle = 180;
   1.714 +      break;
   1.715 +   case eScreenOrientation_LandscapePrimary:
   1.716 +      screenAngle = 90;
   1.717 +      break;
   1.718 +   case eScreenOrientation_LandscapeSecondary:
   1.719 +      screenAngle = 270;
   1.720 +      break;
   1.721 +   default:
   1.722 +      MOZ_ASSERT(false);
   1.723 +      break;
   1.724 +  }
   1.725 +
   1.726 +  int result;
   1.727 +
   1.728 +  if (aBackCamera) {
   1.729 +    //back camera
   1.730 +    result = (aCameraMountAngle - screenAngle + 360) % 360;
   1.731 +  } else {
   1.732 +    //front camera
   1.733 +    result = (aCameraMountAngle + screenAngle) % 360;
   1.734 +  }
   1.735 +  return result;
   1.736 +}
   1.737 +
   1.738 +// undefine to remove on-the-fly rotation support
   1.739 +// #define DYNAMIC_GUM_ROTATION
   1.740 +
   1.741 +void
   1.742 +MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
   1.743 +#ifdef DYNAMIC_GUM_ROTATION
   1.744 +  MonitorAutoLock enter(mMonitor);
   1.745 +  mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
   1.746 +
   1.747 +  LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
   1.748 +       mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
   1.749 +#endif
   1.750 +}
   1.751 +
   1.752 +void
   1.753 +MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
   1.754 +  MOZ_ASSERT(NS_IsMainThread());
   1.755 +
   1.756 +  ICameraControl::Configuration config;
   1.757 +  config.mMode = ICameraControl::kPictureMode;
   1.758 +  config.mPreviewSize.width = aCapability.width;
   1.759 +  config.mPreviewSize.height = aCapability.height;
   1.760 +  mCameraControl->Start(&config);
   1.761 +  mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
   1.762 +
   1.763 +  hal::RegisterScreenConfigurationObserver(this);
   1.764 +}
   1.765 +
   1.766 +void
   1.767 +MediaEngineWebRTCVideoSource::StopImpl() {
   1.768 +  MOZ_ASSERT(NS_IsMainThread());
   1.769 +
   1.770 +  hal::UnregisterScreenConfigurationObserver(this);
   1.771 +  mCameraControl->Stop();
   1.772 +}
   1.773 +
   1.774 +void
   1.775 +MediaEngineWebRTCVideoSource::SnapshotImpl() {
   1.776 +  MOZ_ASSERT(NS_IsMainThread());
   1.777 +  mCameraControl->TakePicture();
   1.778 +}
   1.779 +
   1.780 +void
   1.781 +MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
   1.782 +{
   1.783 +  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.784 +  if (aState == CameraControlListener::kHardwareClosed) {
   1.785 +    // When the first CameraControl listener is added, it gets pushed
   1.786 +    // the current state of the camera--normally 'closed'. We only
   1.787 +    // pay attention to that state if we've progressed out of the
   1.788 +    // allocated state.
   1.789 +    if (mState != kAllocated) {
   1.790 +      mState = kReleased;
   1.791 +      mCallbackMonitor.Notify();
   1.792 +    }
   1.793 +  } else {
   1.794 +    mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
   1.795 +    MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
   1.796 +               mCameraAngle == 270);
   1.797 +    hal::ScreenConfiguration aConfig;
   1.798 +    hal::GetCurrentScreenConfiguration(&aConfig);
   1.799 +
   1.800 +    nsCString deviceName;
   1.801 +    ICameraControl::GetCameraName(mCaptureIndex, deviceName);
   1.802 +    if (deviceName.EqualsASCII("back")) {
   1.803 +      mBackCamera = true;
   1.804 +    }
   1.805 +
   1.806 +    mRotation = GetRotateAmount(aConfig.orientation(), mCameraAngle, mBackCamera);
   1.807 +    LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
   1.808 +         mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
   1.809 +    mState = kStarted;
   1.810 +    mCallbackMonitor.Notify();
   1.811 +  }
   1.812 +}
   1.813 +
   1.814 +void
   1.815 +MediaEngineWebRTCVideoSource::OnError(CameraErrorContext aContext, CameraError aError)
   1.816 +{
   1.817 +  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.818 +  mCallbackMonitor.Notify();
   1.819 +}
   1.820 +
   1.821 +void
   1.822 +MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
   1.823 +{
   1.824 +  mLastCapture =
   1.825 +    static_cast<nsIDOMFile*>(new nsDOMMemoryFile(static_cast<void*>(aData),
   1.826 +                                                 static_cast<uint64_t>(aLength),
   1.827 +                                                 aMimeType));
   1.828 +  mCallbackMonitor.Notify();
   1.829 +}
   1.830 +
   1.831 +void
   1.832 +MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
   1.833 +  layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
   1.834 +  android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
   1.835 +  void *pMem = nullptr;
   1.836 +  uint32_t size = aWidth * aHeight * 3 / 2;
   1.837 +
   1.838 +  graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
   1.839 +
   1.840 +  uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
   1.841 +  // Create a video frame and append it to the track.
   1.842 +  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
   1.843 +  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
   1.844 +
   1.845 +  uint32_t dstWidth;
   1.846 +  uint32_t dstHeight;
   1.847 +
   1.848 +  if (mRotation == 90 || mRotation == 270) {
   1.849 +    dstWidth = aHeight;
   1.850 +    dstHeight = aWidth;
   1.851 +  } else {
   1.852 +    dstWidth = aWidth;
   1.853 +    dstHeight = aHeight;
   1.854 +  }
   1.855 +
   1.856 +  uint32_t half_width = dstWidth / 2;
   1.857 +  uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
   1.858 +  libyuv::ConvertToI420(srcPtr, size,
   1.859 +                        dstPtr, dstWidth,
   1.860 +                        dstPtr + (dstWidth * dstHeight), half_width,
   1.861 +                        dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
   1.862 +                        0, 0,
   1.863 +                        aWidth, aHeight,
   1.864 +                        aWidth, aHeight,
   1.865 +                        static_cast<libyuv::RotationMode>(mRotation),
   1.866 +                        libyuv::FOURCC_NV21);
   1.867 +  graphicBuffer->unlock();
   1.868 +
   1.869 +  const uint8_t lumaBpp = 8;
   1.870 +  const uint8_t chromaBpp = 4;
   1.871 +
   1.872 +  layers::PlanarYCbCrData data;
   1.873 +  data.mYChannel = dstPtr;
   1.874 +  data.mYSize = IntSize(dstWidth, dstHeight);
   1.875 +  data.mYStride = dstWidth * lumaBpp / 8;
   1.876 +  data.mCbCrStride = dstWidth * chromaBpp / 8;
   1.877 +  data.mCbChannel = dstPtr + dstHeight * data.mYStride;
   1.878 +  data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
   1.879 +  data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
   1.880 +  data.mPicX = 0;
   1.881 +  data.mPicY = 0;
   1.882 +  data.mPicSize = IntSize(dstWidth, dstHeight);
   1.883 +  data.mStereoMode = StereoMode::MONO;
   1.884 +
   1.885 +  videoImage->SetDataNoCopy(data);
   1.886 +
   1.887 +  // implicitly releases last image
   1.888 +  mImage = image.forget();
   1.889 +}
   1.890 +
   1.891 +bool
   1.892 +MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
   1.893 +  {
   1.894 +    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   1.895 +    if (mState == kStopped) {
   1.896 +      return false;
   1.897 +    }
   1.898 +  }
   1.899 +
   1.900 +  MonitorAutoLock enter(mMonitor);
   1.901 +  // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
   1.902 +  RotateImage(aImage, aWidth, aHeight);
   1.903 +  if (mRotation != 0 && mRotation != 180) {
   1.904 +    uint32_t temp = aWidth;
   1.905 +    aWidth = aHeight;
   1.906 +    aHeight = temp;
   1.907 +  }
   1.908 +  if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
   1.909 +    mWidth = aWidth;
   1.910 +    mHeight = aHeight;
   1.911 +    LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
   1.912 +  }
   1.913 +
   1.914 +  return true; // return true because we're accepting the frame
   1.915 +}
   1.916 +#endif
   1.917 +
   1.918 +}

mercurial