content/media/webrtc/MediaEngineWebRTCVideo.cpp

Fri, 16 Jan 2015 04:50:19 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Fri, 16 Jan 2015 04:50:19 +0100
branch
TOR_BUG_9701
changeset 13
44a2da4a2ab2
permissions
-rw-r--r--

Replace accessor implementation with direct member state manipulation, by
request https://trac.torproject.org/projects/tor/ticket/9701#comment:32

     1 /* This Source Code Form is subject to the terms of the Mozilla Public
     2  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     3  * You can obtain one at http://mozilla.org/MPL/2.0/. */
     5 #include "MediaEngineWebRTC.h"
     6 #include "Layers.h"
     7 #include "ImageTypes.h"
     8 #include "ImageContainer.h"
     9 #include "mozilla/layers/GrallocTextureClient.h"
    10 #include "nsMemory.h"
    11 #include "mtransport/runnable_utils.h"
    12 #include "MediaTrackConstraints.h"
    14 #ifdef MOZ_B2G_CAMERA
    15 #include "GrallocImages.h"
    16 #include "libyuv.h"
    17 #include "mozilla/Hal.h"
    18 #include "ScreenOrientation.h"
    19 using namespace mozilla::dom;
    20 #endif
    21 namespace mozilla {
    23 using namespace mozilla::gfx;
    24 using dom::ConstrainLongRange;
    25 using dom::ConstrainDoubleRange;
    26 using dom::MediaTrackConstraintSet;
    28 #ifdef PR_LOGGING
    29 extern PRLogModuleInfo* GetMediaManagerLog();
    30 #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
    31 #define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
    32 #else
    33 #define LOG(msg)
    34 #define LOGFRAME(msg)
    35 #endif
    37 /**
    38  * Webrtc video source.
    39  */
    40 #ifndef MOZ_B2G_CAMERA
    41 NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
    42 #else
    43 NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
    44 NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
    45 NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
    46 #endif
    48 // ViEExternalRenderer Callback.
    49 #ifndef MOZ_B2G_CAMERA
    50 int
    51 MediaEngineWebRTCVideoSource::FrameSizeChange(
    52    unsigned int w, unsigned int h, unsigned int streams)
    53 {
    54   mWidth = w;
    55   mHeight = h;
    56   LOG(("Video FrameSizeChange: %ux%u", w, h));
    57   return 0;
    58 }
    60 // ViEExternalRenderer Callback. Process every incoming frame here.
    61 int
    62 MediaEngineWebRTCVideoSource::DeliverFrame(
    63    unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
    64    void *handle)
    65 {
    66   // mInSnapshotMode can only be set before the camera is turned on and
    67   // the renderer is started, so this amounts to a 1-shot
    68   if (mInSnapshotMode) {
    69     // Set the condition variable to false and notify Snapshot().
    70     MonitorAutoLock lock(mMonitor);
    71     mInSnapshotMode = false;
    72     lock.Notify();
    73     return 0;
    74   }
    76   // Check for proper state.
    77   if (mState != kStarted) {
    78     LOG(("DeliverFrame: video not started"));
    79     return 0;
    80   }
    82   MOZ_ASSERT(mWidth*mHeight*3/2 == size);
    83   if (mWidth*mHeight*3/2 != size) {
    84     return 0;
    85   }
    87   // Create a video frame and append it to the track.
    88   nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
    90   layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
    92   uint8_t* frame = static_cast<uint8_t*> (buffer);
    93   const uint8_t lumaBpp = 8;
    94   const uint8_t chromaBpp = 4;
    96   layers::PlanarYCbCrData data;
    97   data.mYChannel = frame;
    98   data.mYSize = IntSize(mWidth, mHeight);
    99   data.mYStride = mWidth * lumaBpp/ 8;
   100   data.mCbCrStride = mWidth * chromaBpp / 8;
   101   data.mCbChannel = frame + mHeight * data.mYStride;
   102   data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2;
   103   data.mCbCrSize = IntSize(mWidth/ 2, mHeight/ 2);
   104   data.mPicX = 0;
   105   data.mPicY = 0;
   106   data.mPicSize = IntSize(mWidth, mHeight);
   107   data.mStereoMode = StereoMode::MONO;
   109   videoImage->SetData(data);
   111 #ifdef DEBUG
   112   static uint32_t frame_num = 0;
   113   LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++,
   114             mWidth, mHeight, time_stamp, render_time));
   115 #endif
   117   // we don't touch anything in 'this' until here (except for snapshot,
   118   // which has it's own lock)
   119   MonitorAutoLock lock(mMonitor);
   121   // implicitly releases last image
   122   mImage = image.forget();
   124   return 0;
   125 }
   126 #endif
   128 // Called if the graph thinks it's running out of buffered video; repeat
   129 // the last frame for whatever minimum period it think it needs.  Note that
   130 // this means that no *real* frame can be inserted during this period.
   131 void
   132 MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
   133                                          SourceMediaStream *aSource,
   134                                          TrackID aID,
   135                                          StreamTime aDesiredTime,
   136                                          TrackTicks &aLastEndTime)
   137 {
   138   VideoSegment segment;
   140   MonitorAutoLock lock(mMonitor);
   141   if (mState != kStarted)
   142     return;
   144   // Note: we're not giving up mImage here
   145   nsRefPtr<layers::Image> image = mImage;
   146   TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
   147   TrackTicks delta = target - aLastEndTime;
   148   LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
   149             (int64_t) target, (int64_t) delta, image ? "" : "<null>"));
   151   // Bug 846188 We may want to limit incoming frames to the requested frame rate
   152   // mFps - if you want 30FPS, and the camera gives you 60FPS, this could
   153   // cause issues.
   154   // We may want to signal if the actual frame rate is below mMinFPS -
   155   // cameras often don't return the requested frame rate especially in low
   156   // light; we should consider surfacing this so that we can switch to a
   157   // lower resolution (which may up the frame rate)
   159   // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
   160   // Doing so means a negative delta and thus messes up handling of the graph
   161   if (delta > 0) {
   162     // nullptr images are allowed
   163     IntSize size(image ? mWidth : 0, image ? mHeight : 0);
   164     segment.AppendFrame(image.forget(), delta, size);
   165     // This can fail if either a) we haven't added the track yet, or b)
   166     // we've removed or finished the track.
   167     if (aSource->AppendToTrack(aID, &(segment))) {
   168       aLastEndTime = target;
   169     }
   170   }
   171 }
   173 static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
   174   return aRange.mMin <= n && n <= aRange.mMax;
   175 }
   177 static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
   178   return aRange.mMin <= n && n <= aRange.mMax;
   179 }
   181 static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
   182   return std::max(aRange.mMin, std::min(n, aRange.mMax));
   183 }
   185 static bool
   186 AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
   187   return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
   188 }
   190 static bool
   191 Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
   192   MOZ_ASSERT(AreIntersecting(aA, aB));
   193   aA.mMin = std::max(aA.mMin, aB.mMin);
   194   aA.mMax = std::min(aA.mMax, aB.mMax);
   195   return true;
   196 }
   198 static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
   199                                  const webrtc::CaptureCapability& aCandidate) {
   200   if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
   201       !IsWithin(aCandidate.height, aConstraints.mHeight)) {
   202     return false;
   203   }
   204   if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
   205     return false;
   206   }
   207   return true;
   208 }
   210 void
   211 MediaEngineWebRTCVideoSource::ChooseCapability(
   212     const VideoTrackConstraintsN &aConstraints,
   213     const MediaEnginePrefs &aPrefs)
   214 {
   215 #ifdef MOZ_B2G_CAMERA
   216   return GuessCapability(aConstraints, aPrefs);
   217 #else
   218   NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
   219   int num = mViECapture->NumberOfCapabilities(uniqueId.get(), KMaxUniqueIdLength);
   220   if (num <= 0) {
   221     // Mac doesn't support capabilities.
   222     return GuessCapability(aConstraints, aPrefs);
   223   }
   225   // The rest is the full algorithm for cameras that can list their capabilities.
   227   LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps",
   228        aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
   230   typedef nsTArray<uint8_t> SourceSet;
   232   SourceSet candidateSet;
   233   for (int i = 0; i < num; i++) {
   234     candidateSet.AppendElement(i);
   235   }
   237   // Pick among capabilities: First apply required constraints.
   239   for (uint32_t i = 0; i < candidateSet.Length();) {
   240     webrtc::CaptureCapability cap;
   241     mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength,
   242                                       candidateSet[i], cap);
   243     if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) {
   244       candidateSet.RemoveElementAt(i);
   245     } else {
   246       ++i;
   247     }
   248   }
   250   SourceSet tailSet;
   252   // Then apply advanced (formerly known as optional) constraints.
   254   if (aConstraints.mAdvanced.WasPassed()) {
   255     auto &array = aConstraints.mAdvanced.Value();
   257     for (uint32_t i = 0; i < array.Length(); i++) {
   258       SourceSet rejects;
   259       for (uint32_t j = 0; j < candidateSet.Length();) {
   260         webrtc::CaptureCapability cap;
   261         mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength,
   262                                           candidateSet[j], cap);
   263         if (!SatisfyConstraintSet(array[i], cap)) {
   264           rejects.AppendElement(candidateSet[j]);
   265           candidateSet.RemoveElementAt(j);
   266         } else {
   267           ++j;
   268         }
   269       }
   270       (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects);
   271     }
   272   }
   274   if (!candidateSet.Length()) {
   275     candidateSet.AppendElement(0);
   276   }
   278   int prefWidth = aPrefs.GetWidth();
   279   int prefHeight = aPrefs.GetHeight();
   281   // Default is closest to available capability but equal to or below;
   282   // otherwise closest above.  Since we handle the num=0 case above and
   283   // take the first entry always, we can never exit uninitialized.
   285   webrtc::CaptureCapability cap;
   286   bool higher = true;
   287   for (uint32_t i = 0; i < candidateSet.Length(); i++) {
   288     mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(),
   289                                       KMaxUniqueIdLength, candidateSet[i], cap);
   290     if (higher) {
   291       if (i == 0 ||
   292           (mCapability.width > cap.width && mCapability.height > cap.height)) {
   293         // closer than the current choice
   294         mCapability = cap;
   295         // FIXME: expose expected capture delay?
   296       }
   297       if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) {
   298         higher = false;
   299       }
   300     } else {
   301       if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight ||
   302           cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
   303         continue;
   304       }
   305       if (mCapability.width < cap.width && mCapability.height < cap.height) {
   306         mCapability = cap;
   307         // FIXME: expose expected capture delay?
   308       }
   309     }
   310   }
   311   LOG(("chose cap %dx%d @%dfps",
   312        mCapability.width, mCapability.height, mCapability.maxFPS));
   313 #endif
   314 }
   316 // A special version of the algorithm for cameras that don't list capabilities.
   318 void
   319 MediaEngineWebRTCVideoSource::GuessCapability(
   320     const VideoTrackConstraintsN &aConstraints,
   321     const MediaEnginePrefs &aPrefs)
   322 {
   323   LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
   324        aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
   326   // In short: compound constraint-ranges and use pref as ideal.
   328   ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
   329   ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
   331   if (aConstraints.mAdvanced.WasPassed()) {
   332     const auto& advanced = aConstraints.mAdvanced.Value();
   333     for (uint32_t i = 0; i < advanced.Length(); i++) {
   334       if (AreIntersecting(cWidth, advanced[i].mWidth) &&
   335           AreIntersecting(cHeight, advanced[i].mHeight)) {
   336         Intersect(cWidth, advanced[i].mWidth);
   337         Intersect(cHeight, advanced[i].mHeight);
   338       }
   339     }
   340   }
   341   // Detect Mac HD cams and give them some love in the form of a dynamic default
   342   // since that hardware switches between 4:3 at low res and 16:9 at higher res.
   343   //
   344   // Logic is: if we're relying on defaults in aPrefs, then
   345   // only use HD pref when non-HD pref is too small and HD pref isn't too big.
   347   bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
   348                 mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
   349                 (aPrefs.GetWidth() < cWidth.mMin ||
   350                  aPrefs.GetHeight() < cHeight.mMin) &&
   351                 !(aPrefs.GetWidth(true) > cWidth.mMax ||
   352                   aPrefs.GetHeight(true) > cHeight.mMax));
   353   int prefWidth = aPrefs.GetWidth(macHD);
   354   int prefHeight = aPrefs.GetHeight(macHD);
   356   // Clamp width and height without distorting inherent aspect too much.
   358   if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
   359     // If both are within, we get the default (pref) aspect.
   360     // If neither are within, we get the aspect of the enclosing constraint.
   361     // Either are presumably reasonable (presuming constraints are sane).
   362     mCapability.width = Clamp(prefWidth, cWidth);
   363     mCapability.height = Clamp(prefHeight, cHeight);
   364   } else {
   365     // But if only one clips (e.g. width), the resulting skew is undesirable:
   366     //       .------------.
   367     //       | constraint |
   368     //  .----+------------+----.
   369     //  |    |            |    |
   370     //  |pref|  result    |    |   prefAspect != resultAspect
   371     //  |    |            |    |
   372     //  '----+------------+----'
   373     //       '------------'
   374     //  So in this case, preserve prefAspect instead:
   375     //  .------------.
   376     //  | constraint |
   377     //  .------------.
   378     //  |pref        |             prefAspect is unchanged
   379     //  '------------'
   380     //  |            |
   381     //  '------------'
   382     if (IsWithin(prefWidth, cWidth)) {
   383       mCapability.height = Clamp(prefHeight, cHeight);
   384       mCapability.width = Clamp((mCapability.height * prefWidth) /
   385                                 prefHeight, cWidth);
   386     } else {
   387       mCapability.width = Clamp(prefWidth, cWidth);
   388       mCapability.height = Clamp((mCapability.width * prefHeight) /
   389                                  prefWidth, cHeight);
   390     }
   391   }
   392   mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
   393   LOG(("chose cap %dx%d @%dfps",
   394        mCapability.width, mCapability.height, mCapability.maxFPS));
   395 }
   397 void
   398 MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
   399 {
   400   aName = mDeviceName;
   401 }
   403 void
   404 MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
   405 {
   406   aUUID = mUniqueId;
   407 }
   409 nsresult
   410 MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
   411                                        const MediaEnginePrefs &aPrefs)
   412 {
   413   LOG((__FUNCTION__));
   414 #ifdef MOZ_B2G_CAMERA
   415   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   416   if (mState == kReleased && mInitDone) {
   417     ChooseCapability(aConstraints, aPrefs);
   418     NS_DispatchToMainThread(WrapRunnable(this,
   419                                          &MediaEngineWebRTCVideoSource::AllocImpl));
   420     mCallbackMonitor.Wait();
   421     if (mState != kAllocated) {
   422       return NS_ERROR_FAILURE;
   423     }
   424   }
   425 #else
   426   if (mState == kReleased && mInitDone) {
   427     // Note: if shared, we don't allow a later opener to affect the resolution.
   428     // (This may change depending on spec changes for Constraints/settings)
   430     ChooseCapability(aConstraints, aPrefs);
   432     if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
   433                                            KMaxUniqueIdLength, mCaptureIndex)) {
   434       return NS_ERROR_FAILURE;
   435     }
   436     mState = kAllocated;
   437     LOG(("Video device %d allocated", mCaptureIndex));
   438   } else if (mSources.IsEmpty()) {
   439     LOG(("Video device %d reallocated", mCaptureIndex));
   440   } else {
   441     LOG(("Video device %d allocated shared", mCaptureIndex));
   442   }
   443 #endif
   445   return NS_OK;
   446 }
   448 nsresult
   449 MediaEngineWebRTCVideoSource::Deallocate()
   450 {
   451   LOG((__FUNCTION__));
   452   if (mSources.IsEmpty()) {
   453 #ifdef MOZ_B2G_CAMERA
   454     ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   455 #endif
   456     if (mState != kStopped && mState != kAllocated) {
   457       return NS_ERROR_FAILURE;
   458     }
   459 #ifdef MOZ_B2G_CAMERA
   460     // We do not register success callback here
   462     NS_DispatchToMainThread(WrapRunnable(this,
   463                                          &MediaEngineWebRTCVideoSource::DeallocImpl));
   464     mCallbackMonitor.Wait();
   465     if (mState != kReleased) {
   466       return NS_ERROR_FAILURE;
   467     }
   468 #elif XP_MACOSX
   469     // Bug 829907 - on mac, in shutdown, the mainthread stops processing
   470     // 'native' events, and the QTKit code uses events to the main native CFRunLoop
   471     // in order to provide thread safety.  In order to avoid this locking us up,
   472     // release the ViE capture device synchronously on MainThread (so the native
   473     // event isn't needed).
   474     // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
   475     // XXX It might be nice to only do this if we're in shutdown...  Hard to be
   476     // sure when that is though.
   477     // Thread safety: a) we call this synchronously, and don't use ViECapture from
   478     // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs
   479     // an exclusive object lock and deletes it in a critical section, so all in all
   480     // this should be safe threadwise.
   481     NS_DispatchToMainThread(WrapRunnable(mViECapture,
   482                                          &webrtc::ViECapture::ReleaseCaptureDevice,
   483                                          mCaptureIndex),
   484                             NS_DISPATCH_SYNC);
   485 #else
   486     mViECapture->ReleaseCaptureDevice(mCaptureIndex);
   487 #endif
   488     mState = kReleased;
   489     LOG(("Video device %d deallocated", mCaptureIndex));
   490   } else {
   491     LOG(("Video device %d deallocated but still in use", mCaptureIndex));
   492   }
   493   return NS_OK;
   494 }
   496 nsresult
   497 MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
   498 {
   499   LOG((__FUNCTION__));
   500 #ifndef MOZ_B2G_CAMERA
   501   int error = 0;
   502 #endif
   503   if (!mInitDone || !aStream) {
   504     return NS_ERROR_FAILURE;
   505   }
   507   mSources.AppendElement(aStream);
   509   aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
   510   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
   512 #ifdef MOZ_B2G_CAMERA
   513   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   514 #endif
   516   if (mState == kStarted) {
   517     return NS_OK;
   518   }
   519   mImageContainer = layers::LayerManager::CreateImageContainer();
   521 #ifdef MOZ_B2G_CAMERA
   522   NS_DispatchToMainThread(WrapRunnable(this,
   523                                        &MediaEngineWebRTCVideoSource::StartImpl,
   524                                        mCapability));
   525   mCallbackMonitor.Wait();
   526   if (mState != kStarted) {
   527     return NS_ERROR_FAILURE;
   528   }
   529 #else
   530   mState = kStarted;
   531   error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
   532   if (error == -1) {
   533     return NS_ERROR_FAILURE;
   534   }
   536   error = mViERender->StartRender(mCaptureIndex);
   537   if (error == -1) {
   538     return NS_ERROR_FAILURE;
   539   }
   541   if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
   542     return NS_ERROR_FAILURE;
   543   }
   544 #endif
   546   return NS_OK;
   547 }
   549 nsresult
   550 MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
   551 {
   552   LOG((__FUNCTION__));
   553   if (!mSources.RemoveElement(aSource)) {
   554     // Already stopped - this is allowed
   555     return NS_OK;
   556   }
   557   if (!mSources.IsEmpty()) {
   558     return NS_OK;
   559   }
   560 #ifdef MOZ_B2G_CAMERA
   561   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   562 #endif
   563   if (mState != kStarted) {
   564     return NS_ERROR_FAILURE;
   565   }
   567   {
   568     MonitorAutoLock lock(mMonitor);
   569     mState = kStopped;
   570     aSource->EndTrack(aID);
   571     // Drop any cached image so we don't start with a stale image on next
   572     // usage
   573     mImage = nullptr;
   574   }
   575 #ifdef MOZ_B2G_CAMERA
   576   NS_DispatchToMainThread(WrapRunnable(this,
   577                                        &MediaEngineWebRTCVideoSource::StopImpl));
   578 #else
   579   mViERender->StopRender(mCaptureIndex);
   580   mViERender->RemoveRenderer(mCaptureIndex);
   581   mViECapture->StopCapture(mCaptureIndex);
   582 #endif
   584   return NS_OK;
   585 }
   587 nsresult
   588 MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
   589 {
   590   return NS_ERROR_NOT_IMPLEMENTED;
   591 }
   593 /**
   594  * Initialization and Shutdown functions for the video source, called by the
   595  * constructor and destructor respectively.
   596  */
   598 void
   599 MediaEngineWebRTCVideoSource::Init()
   600 {
   601 #ifdef MOZ_B2G_CAMERA
   602   nsAutoCString deviceName;
   603   ICameraControl::GetCameraName(mCaptureIndex, deviceName);
   604   CopyUTF8toUTF16(deviceName, mDeviceName);
   605   CopyUTF8toUTF16(deviceName, mUniqueId);
   606 #else
   607   // fix compile warning for these being unused. (remove once used)
   608   (void) mFps;
   609   (void) mMinFps;
   611   LOG((__FUNCTION__));
   612   if (mVideoEngine == nullptr) {
   613     return;
   614   }
   616   mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine);
   617   if (mViEBase == nullptr) {
   618     return;
   619   }
   621   // Get interfaces for capture, render for now
   622   mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine);
   623   mViERender = webrtc::ViERender::GetInterface(mVideoEngine);
   625   if (mViECapture == nullptr || mViERender == nullptr) {
   626     return;
   627   }
   629   const uint32_t KMaxDeviceNameLength = 128;
   630   const uint32_t KMaxUniqueIdLength = 256;
   631   char deviceName[KMaxDeviceNameLength];
   632   char uniqueId[KMaxUniqueIdLength];
   633   if (mViECapture->GetCaptureDevice(mCaptureIndex,
   634                                     deviceName, KMaxDeviceNameLength,
   635                                     uniqueId, KMaxUniqueIdLength)) {
   636     return;
   637   }
   639   CopyUTF8toUTF16(deviceName, mDeviceName);
   640   CopyUTF8toUTF16(uniqueId, mUniqueId);
   641 #endif
   643   mInitDone = true;
   644 }
   646 void
   647 MediaEngineWebRTCVideoSource::Shutdown()
   648 {
   649   LOG((__FUNCTION__));
   650   if (!mInitDone) {
   651     return;
   652   }
   653 #ifdef MOZ_B2G_CAMERA
   654   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   655 #endif
   656   if (mState == kStarted) {
   657     while (!mSources.IsEmpty()) {
   658       Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
   659     }
   660     MOZ_ASSERT(mState == kStopped);
   661   }
   663   if (mState == kAllocated || mState == kStopped) {
   664     Deallocate();
   665   }
   666 #ifndef MOZ_B2G_CAMERA
   667   mViECapture->Release();
   668   mViERender->Release();
   669   mViEBase->Release();
   670 #endif
   671   mState = kReleased;
   672   mInitDone = false;
   673 }
   675 #ifdef MOZ_B2G_CAMERA
   677 // All these functions must be run on MainThread!
   678 void
   679 MediaEngineWebRTCVideoSource::AllocImpl() {
   680   MOZ_ASSERT(NS_IsMainThread());
   682   mCameraControl = ICameraControl::Create(mCaptureIndex);
   683   if (mCameraControl) {
   684     mState = kAllocated;
   685     // Add this as a listener for CameraControl events. We don't need
   686     // to explicitly remove this--destroying the CameraControl object
   687     // in DeallocImpl() will do that for us.
   688     mCameraControl->AddListener(this);
   689   }
   691   mCallbackMonitor.Notify();
   692 }
   694 void
   695 MediaEngineWebRTCVideoSource::DeallocImpl() {
   696   MOZ_ASSERT(NS_IsMainThread());
   698   mCameraControl = nullptr;
   699 }
   701 // The same algorithm from bug 840244
   702 static int
   703 GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
   704   int screenAngle = 0;
   705   switch (aScreen) {
   706     case eScreenOrientation_PortraitPrimary:
   707       screenAngle = 0;
   708       break;
   709     case eScreenOrientation_PortraitSecondary:
   710       screenAngle = 180;
   711       break;
   712    case eScreenOrientation_LandscapePrimary:
   713       screenAngle = 90;
   714       break;
   715    case eScreenOrientation_LandscapeSecondary:
   716       screenAngle = 270;
   717       break;
   718    default:
   719       MOZ_ASSERT(false);
   720       break;
   721   }
   723   int result;
   725   if (aBackCamera) {
   726     //back camera
   727     result = (aCameraMountAngle - screenAngle + 360) % 360;
   728   } else {
   729     //front camera
   730     result = (aCameraMountAngle + screenAngle) % 360;
   731   }
   732   return result;
   733 }
   735 // undefine to remove on-the-fly rotation support
   736 // #define DYNAMIC_GUM_ROTATION
   738 void
   739 MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
   740 #ifdef DYNAMIC_GUM_ROTATION
   741   MonitorAutoLock enter(mMonitor);
   742   mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
   744   LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
   745        mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
   746 #endif
   747 }
   749 void
   750 MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
   751   MOZ_ASSERT(NS_IsMainThread());
   753   ICameraControl::Configuration config;
   754   config.mMode = ICameraControl::kPictureMode;
   755   config.mPreviewSize.width = aCapability.width;
   756   config.mPreviewSize.height = aCapability.height;
   757   mCameraControl->Start(&config);
   758   mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
   760   hal::RegisterScreenConfigurationObserver(this);
   761 }
   763 void
   764 MediaEngineWebRTCVideoSource::StopImpl() {
   765   MOZ_ASSERT(NS_IsMainThread());
   767   hal::UnregisterScreenConfigurationObserver(this);
   768   mCameraControl->Stop();
   769 }
   771 void
   772 MediaEngineWebRTCVideoSource::SnapshotImpl() {
   773   MOZ_ASSERT(NS_IsMainThread());
   774   mCameraControl->TakePicture();
   775 }
   777 void
   778 MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
   779 {
   780   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   781   if (aState == CameraControlListener::kHardwareClosed) {
   782     // When the first CameraControl listener is added, it gets pushed
   783     // the current state of the camera--normally 'closed'. We only
   784     // pay attention to that state if we've progressed out of the
   785     // allocated state.
   786     if (mState != kAllocated) {
   787       mState = kReleased;
   788       mCallbackMonitor.Notify();
   789     }
   790   } else {
   791     mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
   792     MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
   793                mCameraAngle == 270);
   794     hal::ScreenConfiguration aConfig;
   795     hal::GetCurrentScreenConfiguration(&aConfig);
   797     nsCString deviceName;
   798     ICameraControl::GetCameraName(mCaptureIndex, deviceName);
   799     if (deviceName.EqualsASCII("back")) {
   800       mBackCamera = true;
   801     }
   803     mRotation = GetRotateAmount(aConfig.orientation(), mCameraAngle, mBackCamera);
   804     LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
   805          mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
   806     mState = kStarted;
   807     mCallbackMonitor.Notify();
   808   }
   809 }
   811 void
   812 MediaEngineWebRTCVideoSource::OnError(CameraErrorContext aContext, CameraError aError)
   813 {
   814   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   815   mCallbackMonitor.Notify();
   816 }
   818 void
   819 MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
   820 {
   821   mLastCapture =
   822     static_cast<nsIDOMFile*>(new nsDOMMemoryFile(static_cast<void*>(aData),
   823                                                  static_cast<uint64_t>(aLength),
   824                                                  aMimeType));
   825   mCallbackMonitor.Notify();
   826 }
   828 void
   829 MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
   830   layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
   831   android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
   832   void *pMem = nullptr;
   833   uint32_t size = aWidth * aHeight * 3 / 2;
   835   graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
   837   uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
   838   // Create a video frame and append it to the track.
   839   nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
   840   layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
   842   uint32_t dstWidth;
   843   uint32_t dstHeight;
   845   if (mRotation == 90 || mRotation == 270) {
   846     dstWidth = aHeight;
   847     dstHeight = aWidth;
   848   } else {
   849     dstWidth = aWidth;
   850     dstHeight = aHeight;
   851   }
   853   uint32_t half_width = dstWidth / 2;
   854   uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
   855   libyuv::ConvertToI420(srcPtr, size,
   856                         dstPtr, dstWidth,
   857                         dstPtr + (dstWidth * dstHeight), half_width,
   858                         dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
   859                         0, 0,
   860                         aWidth, aHeight,
   861                         aWidth, aHeight,
   862                         static_cast<libyuv::RotationMode>(mRotation),
   863                         libyuv::FOURCC_NV21);
   864   graphicBuffer->unlock();
   866   const uint8_t lumaBpp = 8;
   867   const uint8_t chromaBpp = 4;
   869   layers::PlanarYCbCrData data;
   870   data.mYChannel = dstPtr;
   871   data.mYSize = IntSize(dstWidth, dstHeight);
   872   data.mYStride = dstWidth * lumaBpp / 8;
   873   data.mCbCrStride = dstWidth * chromaBpp / 8;
   874   data.mCbChannel = dstPtr + dstHeight * data.mYStride;
   875   data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
   876   data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
   877   data.mPicX = 0;
   878   data.mPicY = 0;
   879   data.mPicSize = IntSize(dstWidth, dstHeight);
   880   data.mStereoMode = StereoMode::MONO;
   882   videoImage->SetDataNoCopy(data);
   884   // implicitly releases last image
   885   mImage = image.forget();
   886 }
   888 bool
   889 MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
   890   {
   891     ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   892     if (mState == kStopped) {
   893       return false;
   894     }
   895   }
   897   MonitorAutoLock enter(mMonitor);
   898   // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
   899   RotateImage(aImage, aWidth, aHeight);
   900   if (mRotation != 0 && mRotation != 180) {
   901     uint32_t temp = aWidth;
   902     aWidth = aHeight;
   903     aHeight = temp;
   904   }
   905   if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
   906     mWidth = aWidth;
   907     mHeight = aHeight;
   908     LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
   909   }
   911   return true; // return true because we're accepting the frame
   912 }
   913 #endif
   915 }

mercurial