michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this file, michael@0: * You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "MediaEngineWebRTC.h" michael@0: #include "Layers.h" michael@0: #include "ImageTypes.h" michael@0: #include "ImageContainer.h" michael@0: #include "mozilla/layers/GrallocTextureClient.h" michael@0: #include "nsMemory.h" michael@0: #include "mtransport/runnable_utils.h" michael@0: #include "MediaTrackConstraints.h" michael@0: michael@0: #ifdef MOZ_B2G_CAMERA michael@0: #include "GrallocImages.h" michael@0: #include "libyuv.h" michael@0: #include "mozilla/Hal.h" michael@0: #include "ScreenOrientation.h" michael@0: using namespace mozilla::dom; michael@0: #endif michael@0: namespace mozilla { michael@0: michael@0: using namespace mozilla::gfx; michael@0: using dom::ConstrainLongRange; michael@0: using dom::ConstrainDoubleRange; michael@0: using dom::MediaTrackConstraintSet; michael@0: michael@0: #ifdef PR_LOGGING michael@0: extern PRLogModuleInfo* GetMediaManagerLog(); michael@0: #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg) michael@0: #define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg) michael@0: #else michael@0: #define LOG(msg) michael@0: #define LOGFRAME(msg) michael@0: #endif michael@0: michael@0: /** michael@0: * Webrtc video source. michael@0: */ michael@0: #ifndef MOZ_B2G_CAMERA michael@0: NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable) michael@0: #else michael@0: NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable) michael@0: NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener) michael@0: NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener) michael@0: #endif michael@0: michael@0: // ViEExternalRenderer Callback. michael@0: #ifndef MOZ_B2G_CAMERA michael@0: int michael@0: MediaEngineWebRTCVideoSource::FrameSizeChange( michael@0: unsigned int w, unsigned int h, unsigned int streams) michael@0: { michael@0: mWidth = w; michael@0: mHeight = h; michael@0: LOG(("Video FrameSizeChange: %ux%u", w, h)); michael@0: return 0; michael@0: } michael@0: michael@0: // ViEExternalRenderer Callback. Process every incoming frame here. michael@0: int michael@0: MediaEngineWebRTCVideoSource::DeliverFrame( michael@0: unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time, michael@0: void *handle) michael@0: { michael@0: // mInSnapshotMode can only be set before the camera is turned on and michael@0: // the renderer is started, so this amounts to a 1-shot michael@0: if (mInSnapshotMode) { michael@0: // Set the condition variable to false and notify Snapshot(). michael@0: MonitorAutoLock lock(mMonitor); michael@0: mInSnapshotMode = false; michael@0: lock.Notify(); michael@0: return 0; michael@0: } michael@0: michael@0: // Check for proper state. michael@0: if (mState != kStarted) { michael@0: LOG(("DeliverFrame: video not started")); michael@0: return 0; michael@0: } michael@0: michael@0: MOZ_ASSERT(mWidth*mHeight*3/2 == size); michael@0: if (mWidth*mHeight*3/2 != size) { michael@0: return 0; michael@0: } michael@0: michael@0: // Create a video frame and append it to the track. michael@0: nsRefPtr image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); michael@0: michael@0: layers::PlanarYCbCrImage* videoImage = static_cast(image.get()); michael@0: michael@0: uint8_t* frame = static_cast (buffer); michael@0: const uint8_t lumaBpp = 8; michael@0: const uint8_t chromaBpp = 4; michael@0: michael@0: layers::PlanarYCbCrData data; michael@0: data.mYChannel = frame; michael@0: data.mYSize = IntSize(mWidth, mHeight); michael@0: data.mYStride = mWidth * lumaBpp/ 8; michael@0: data.mCbCrStride = mWidth * chromaBpp / 8; michael@0: data.mCbChannel = frame + mHeight * data.mYStride; michael@0: data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2; michael@0: data.mCbCrSize = IntSize(mWidth/ 2, mHeight/ 2); michael@0: data.mPicX = 0; michael@0: data.mPicY = 0; michael@0: data.mPicSize = IntSize(mWidth, mHeight); michael@0: data.mStereoMode = StereoMode::MONO; michael@0: michael@0: videoImage->SetData(data); michael@0: michael@0: #ifdef DEBUG michael@0: static uint32_t frame_num = 0; michael@0: LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++, michael@0: mWidth, mHeight, time_stamp, render_time)); michael@0: #endif michael@0: michael@0: // we don't touch anything in 'this' until here (except for snapshot, michael@0: // which has it's own lock) michael@0: MonitorAutoLock lock(mMonitor); michael@0: michael@0: // implicitly releases last image michael@0: mImage = image.forget(); michael@0: michael@0: return 0; michael@0: } michael@0: #endif michael@0: michael@0: // Called if the graph thinks it's running out of buffered video; repeat michael@0: // the last frame for whatever minimum period it think it needs. Note that michael@0: // this means that no *real* frame can be inserted during this period. michael@0: void michael@0: MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph, michael@0: SourceMediaStream *aSource, michael@0: TrackID aID, michael@0: StreamTime aDesiredTime, michael@0: TrackTicks &aLastEndTime) michael@0: { michael@0: VideoSegment segment; michael@0: michael@0: MonitorAutoLock lock(mMonitor); michael@0: if (mState != kStarted) michael@0: return; michael@0: michael@0: // Note: we're not giving up mImage here michael@0: nsRefPtr image = mImage; michael@0: TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); michael@0: TrackTicks delta = target - aLastEndTime; michael@0: LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime, michael@0: (int64_t) target, (int64_t) delta, image ? "" : "")); michael@0: michael@0: // Bug 846188 We may want to limit incoming frames to the requested frame rate michael@0: // mFps - if you want 30FPS, and the camera gives you 60FPS, this could michael@0: // cause issues. michael@0: // We may want to signal if the actual frame rate is below mMinFPS - michael@0: // cameras often don't return the requested frame rate especially in low michael@0: // light; we should consider surfacing this so that we can switch to a michael@0: // lower resolution (which may up the frame rate) michael@0: michael@0: // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime michael@0: // Doing so means a negative delta and thus messes up handling of the graph michael@0: if (delta > 0) { michael@0: // nullptr images are allowed michael@0: IntSize size(image ? mWidth : 0, image ? mHeight : 0); michael@0: segment.AppendFrame(image.forget(), delta, size); michael@0: // This can fail if either a) we haven't added the track yet, or b) michael@0: // we've removed or finished the track. michael@0: if (aSource->AppendToTrack(aID, &(segment))) { michael@0: aLastEndTime = target; michael@0: } michael@0: } michael@0: } michael@0: michael@0: static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) { michael@0: return aRange.mMin <= n && n <= aRange.mMax; michael@0: } michael@0: michael@0: static bool IsWithin(double n, const ConstrainDoubleRange& aRange) { michael@0: return aRange.mMin <= n && n <= aRange.mMax; michael@0: } michael@0: michael@0: static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) { michael@0: return std::max(aRange.mMin, std::min(n, aRange.mMax)); michael@0: } michael@0: michael@0: static bool michael@0: AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) { michael@0: return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax; michael@0: } michael@0: michael@0: static bool michael@0: Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) { michael@0: MOZ_ASSERT(AreIntersecting(aA, aB)); michael@0: aA.mMin = std::max(aA.mMin, aB.mMin); michael@0: aA.mMax = std::min(aA.mMax, aB.mMax); michael@0: return true; michael@0: } michael@0: michael@0: static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints, michael@0: const webrtc::CaptureCapability& aCandidate) { michael@0: if (!IsWithin(aCandidate.width, aConstraints.mWidth) || michael@0: !IsWithin(aCandidate.height, aConstraints.mHeight)) { michael@0: return false; michael@0: } michael@0: if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) { michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::ChooseCapability( michael@0: const VideoTrackConstraintsN &aConstraints, michael@0: const MediaEnginePrefs &aPrefs) michael@0: { michael@0: #ifdef MOZ_B2G_CAMERA michael@0: return GuessCapability(aConstraints, aPrefs); michael@0: #else michael@0: NS_ConvertUTF16toUTF8 uniqueId(mUniqueId); michael@0: int num = mViECapture->NumberOfCapabilities(uniqueId.get(), KMaxUniqueIdLength); michael@0: if (num <= 0) { michael@0: // Mac doesn't support capabilities. michael@0: return GuessCapability(aConstraints, aPrefs); michael@0: } michael@0: michael@0: // The rest is the full algorithm for cameras that can list their capabilities. michael@0: michael@0: LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps", michael@0: aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS)); michael@0: michael@0: typedef nsTArray SourceSet; michael@0: michael@0: SourceSet candidateSet; michael@0: for (int i = 0; i < num; i++) { michael@0: candidateSet.AppendElement(i); michael@0: } michael@0: michael@0: // Pick among capabilities: First apply required constraints. michael@0: michael@0: for (uint32_t i = 0; i < candidateSet.Length();) { michael@0: webrtc::CaptureCapability cap; michael@0: mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength, michael@0: candidateSet[i], cap); michael@0: if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) { michael@0: candidateSet.RemoveElementAt(i); michael@0: } else { michael@0: ++i; michael@0: } michael@0: } michael@0: michael@0: SourceSet tailSet; michael@0: michael@0: // Then apply advanced (formerly known as optional) constraints. michael@0: michael@0: if (aConstraints.mAdvanced.WasPassed()) { michael@0: auto &array = aConstraints.mAdvanced.Value(); michael@0: michael@0: for (uint32_t i = 0; i < array.Length(); i++) { michael@0: SourceSet rejects; michael@0: for (uint32_t j = 0; j < candidateSet.Length();) { michael@0: webrtc::CaptureCapability cap; michael@0: mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength, michael@0: candidateSet[j], cap); michael@0: if (!SatisfyConstraintSet(array[i], cap)) { michael@0: rejects.AppendElement(candidateSet[j]); michael@0: candidateSet.RemoveElementAt(j); michael@0: } else { michael@0: ++j; michael@0: } michael@0: } michael@0: (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects); michael@0: } michael@0: } michael@0: michael@0: if (!candidateSet.Length()) { michael@0: candidateSet.AppendElement(0); michael@0: } michael@0: michael@0: int prefWidth = aPrefs.GetWidth(); michael@0: int prefHeight = aPrefs.GetHeight(); michael@0: michael@0: // Default is closest to available capability but equal to or below; michael@0: // otherwise closest above. Since we handle the num=0 case above and michael@0: // take the first entry always, we can never exit uninitialized. michael@0: michael@0: webrtc::CaptureCapability cap; michael@0: bool higher = true; michael@0: for (uint32_t i = 0; i < candidateSet.Length(); i++) { michael@0: mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(), michael@0: KMaxUniqueIdLength, candidateSet[i], cap); michael@0: if (higher) { michael@0: if (i == 0 || michael@0: (mCapability.width > cap.width && mCapability.height > cap.height)) { michael@0: // closer than the current choice michael@0: mCapability = cap; michael@0: // FIXME: expose expected capture delay? michael@0: } michael@0: if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) { michael@0: higher = false; michael@0: } michael@0: } else { michael@0: if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight || michael@0: cap.maxFPS < (uint32_t) aPrefs.mMinFPS) { michael@0: continue; michael@0: } michael@0: if (mCapability.width < cap.width && mCapability.height < cap.height) { michael@0: mCapability = cap; michael@0: // FIXME: expose expected capture delay? michael@0: } michael@0: } michael@0: } michael@0: LOG(("chose cap %dx%d @%dfps", michael@0: mCapability.width, mCapability.height, mCapability.maxFPS)); michael@0: #endif michael@0: } michael@0: michael@0: // A special version of the algorithm for cameras that don't list capabilities. michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::GuessCapability( michael@0: const VideoTrackConstraintsN &aConstraints, michael@0: const MediaEnginePrefs &aPrefs) michael@0: { michael@0: LOG(("GuessCapability: prefs: %dx%d @%d-%dfps", michael@0: aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS)); michael@0: michael@0: // In short: compound constraint-ranges and use pref as ideal. michael@0: michael@0: ConstrainLongRange cWidth(aConstraints.mRequired.mWidth); michael@0: ConstrainLongRange cHeight(aConstraints.mRequired.mHeight); michael@0: michael@0: if (aConstraints.mAdvanced.WasPassed()) { michael@0: const auto& advanced = aConstraints.mAdvanced.Value(); michael@0: for (uint32_t i = 0; i < advanced.Length(); i++) { michael@0: if (AreIntersecting(cWidth, advanced[i].mWidth) && michael@0: AreIntersecting(cHeight, advanced[i].mHeight)) { michael@0: Intersect(cWidth, advanced[i].mWidth); michael@0: Intersect(cHeight, advanced[i].mHeight); michael@0: } michael@0: } michael@0: } michael@0: // Detect Mac HD cams and give them some love in the form of a dynamic default michael@0: // since that hardware switches between 4:3 at low res and 16:9 at higher res. michael@0: // michael@0: // Logic is: if we're relying on defaults in aPrefs, then michael@0: // only use HD pref when non-HD pref is too small and HD pref isn't too big. michael@0: michael@0: bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) && michael@0: mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") && michael@0: (aPrefs.GetWidth() < cWidth.mMin || michael@0: aPrefs.GetHeight() < cHeight.mMin) && michael@0: !(aPrefs.GetWidth(true) > cWidth.mMax || michael@0: aPrefs.GetHeight(true) > cHeight.mMax)); michael@0: int prefWidth = aPrefs.GetWidth(macHD); michael@0: int prefHeight = aPrefs.GetHeight(macHD); michael@0: michael@0: // Clamp width and height without distorting inherent aspect too much. michael@0: michael@0: if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) { michael@0: // If both are within, we get the default (pref) aspect. michael@0: // If neither are within, we get the aspect of the enclosing constraint. michael@0: // Either are presumably reasonable (presuming constraints are sane). michael@0: mCapability.width = Clamp(prefWidth, cWidth); michael@0: mCapability.height = Clamp(prefHeight, cHeight); michael@0: } else { michael@0: // But if only one clips (e.g. width), the resulting skew is undesirable: michael@0: // .------------. michael@0: // | constraint | michael@0: // .----+------------+----. michael@0: // | | | | michael@0: // |pref| result | | prefAspect != resultAspect michael@0: // | | | | michael@0: // '----+------------+----' michael@0: // '------------' michael@0: // So in this case, preserve prefAspect instead: michael@0: // .------------. michael@0: // | constraint | michael@0: // .------------. michael@0: // |pref | prefAspect is unchanged michael@0: // '------------' michael@0: // | | michael@0: // '------------' michael@0: if (IsWithin(prefWidth, cWidth)) { michael@0: mCapability.height = Clamp(prefHeight, cHeight); michael@0: mCapability.width = Clamp((mCapability.height * prefWidth) / michael@0: prefHeight, cWidth); michael@0: } else { michael@0: mCapability.width = Clamp(prefWidth, cWidth); michael@0: mCapability.height = Clamp((mCapability.width * prefHeight) / michael@0: prefWidth, cHeight); michael@0: } michael@0: } michael@0: mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS; michael@0: LOG(("chose cap %dx%d @%dfps", michael@0: mCapability.width, mCapability.height, mCapability.maxFPS)); michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::GetName(nsAString& aName) michael@0: { michael@0: aName = mDeviceName; michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID) michael@0: { michael@0: aUUID = mUniqueId; michael@0: } michael@0: michael@0: nsresult michael@0: MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints, michael@0: const MediaEnginePrefs &aPrefs) michael@0: { michael@0: LOG((__FUNCTION__)); michael@0: #ifdef MOZ_B2G_CAMERA michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: if (mState == kReleased && mInitDone) { michael@0: ChooseCapability(aConstraints, aPrefs); michael@0: NS_DispatchToMainThread(WrapRunnable(this, michael@0: &MediaEngineWebRTCVideoSource::AllocImpl)); michael@0: mCallbackMonitor.Wait(); michael@0: if (mState != kAllocated) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: } michael@0: #else michael@0: if (mState == kReleased && mInitDone) { michael@0: // Note: if shared, we don't allow a later opener to affect the resolution. michael@0: // (This may change depending on spec changes for Constraints/settings) michael@0: michael@0: ChooseCapability(aConstraints, aPrefs); michael@0: michael@0: if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(), michael@0: KMaxUniqueIdLength, mCaptureIndex)) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: mState = kAllocated; michael@0: LOG(("Video device %d allocated", mCaptureIndex)); michael@0: } else if (mSources.IsEmpty()) { michael@0: LOG(("Video device %d reallocated", mCaptureIndex)); michael@0: } else { michael@0: LOG(("Video device %d allocated shared", mCaptureIndex)); michael@0: } michael@0: #endif michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaEngineWebRTCVideoSource::Deallocate() michael@0: { michael@0: LOG((__FUNCTION__)); michael@0: if (mSources.IsEmpty()) { michael@0: #ifdef MOZ_B2G_CAMERA michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: #endif michael@0: if (mState != kStopped && mState != kAllocated) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: #ifdef MOZ_B2G_CAMERA michael@0: // We do not register success callback here michael@0: michael@0: NS_DispatchToMainThread(WrapRunnable(this, michael@0: &MediaEngineWebRTCVideoSource::DeallocImpl)); michael@0: mCallbackMonitor.Wait(); michael@0: if (mState != kReleased) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: #elif XP_MACOSX michael@0: // Bug 829907 - on mac, in shutdown, the mainthread stops processing michael@0: // 'native' events, and the QTKit code uses events to the main native CFRunLoop michael@0: // in order to provide thread safety. In order to avoid this locking us up, michael@0: // release the ViE capture device synchronously on MainThread (so the native michael@0: // event isn't needed). michael@0: // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock. michael@0: // XXX It might be nice to only do this if we're in shutdown... Hard to be michael@0: // sure when that is though. michael@0: // Thread safety: a) we call this synchronously, and don't use ViECapture from michael@0: // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs michael@0: // an exclusive object lock and deletes it in a critical section, so all in all michael@0: // this should be safe threadwise. michael@0: NS_DispatchToMainThread(WrapRunnable(mViECapture, michael@0: &webrtc::ViECapture::ReleaseCaptureDevice, michael@0: mCaptureIndex), michael@0: NS_DISPATCH_SYNC); michael@0: #else michael@0: mViECapture->ReleaseCaptureDevice(mCaptureIndex); michael@0: #endif michael@0: mState = kReleased; michael@0: LOG(("Video device %d deallocated", mCaptureIndex)); michael@0: } else { michael@0: LOG(("Video device %d deallocated but still in use", mCaptureIndex)); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID) michael@0: { michael@0: LOG((__FUNCTION__)); michael@0: #ifndef MOZ_B2G_CAMERA michael@0: int error = 0; michael@0: #endif michael@0: if (!mInitDone || !aStream) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: mSources.AppendElement(aStream); michael@0: michael@0: aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment()); michael@0: aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX); michael@0: michael@0: #ifdef MOZ_B2G_CAMERA michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: #endif michael@0: michael@0: if (mState == kStarted) { michael@0: return NS_OK; michael@0: } michael@0: mImageContainer = layers::LayerManager::CreateImageContainer(); michael@0: michael@0: #ifdef MOZ_B2G_CAMERA michael@0: NS_DispatchToMainThread(WrapRunnable(this, michael@0: &MediaEngineWebRTCVideoSource::StartImpl, michael@0: mCapability)); michael@0: mCallbackMonitor.Wait(); michael@0: if (mState != kStarted) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: #else michael@0: mState = kStarted; michael@0: error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this); michael@0: if (error == -1) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: error = mViERender->StartRender(mCaptureIndex); michael@0: if (error == -1) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: #endif michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID) michael@0: { michael@0: LOG((__FUNCTION__)); michael@0: if (!mSources.RemoveElement(aSource)) { michael@0: // Already stopped - this is allowed michael@0: return NS_OK; michael@0: } michael@0: if (!mSources.IsEmpty()) { michael@0: return NS_OK; michael@0: } michael@0: #ifdef MOZ_B2G_CAMERA michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: #endif michael@0: if (mState != kStarted) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: { michael@0: MonitorAutoLock lock(mMonitor); michael@0: mState = kStopped; michael@0: aSource->EndTrack(aID); michael@0: // Drop any cached image so we don't start with a stale image on next michael@0: // usage michael@0: mImage = nullptr; michael@0: } michael@0: #ifdef MOZ_B2G_CAMERA michael@0: NS_DispatchToMainThread(WrapRunnable(this, michael@0: &MediaEngineWebRTCVideoSource::StopImpl)); michael@0: #else michael@0: mViERender->StopRender(mCaptureIndex); michael@0: mViERender->RemoveRenderer(mCaptureIndex); michael@0: mViECapture->StopCapture(mCaptureIndex); michael@0: #endif michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile) michael@0: { michael@0: return NS_ERROR_NOT_IMPLEMENTED; michael@0: } michael@0: michael@0: /** michael@0: * Initialization and Shutdown functions for the video source, called by the michael@0: * constructor and destructor respectively. michael@0: */ michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::Init() michael@0: { michael@0: #ifdef MOZ_B2G_CAMERA michael@0: nsAutoCString deviceName; michael@0: ICameraControl::GetCameraName(mCaptureIndex, deviceName); michael@0: CopyUTF8toUTF16(deviceName, mDeviceName); michael@0: CopyUTF8toUTF16(deviceName, mUniqueId); michael@0: #else michael@0: // fix compile warning for these being unused. (remove once used) michael@0: (void) mFps; michael@0: (void) mMinFps; michael@0: michael@0: LOG((__FUNCTION__)); michael@0: if (mVideoEngine == nullptr) { michael@0: return; michael@0: } michael@0: michael@0: mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine); michael@0: if (mViEBase == nullptr) { michael@0: return; michael@0: } michael@0: michael@0: // Get interfaces for capture, render for now michael@0: mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine); michael@0: mViERender = webrtc::ViERender::GetInterface(mVideoEngine); michael@0: michael@0: if (mViECapture == nullptr || mViERender == nullptr) { michael@0: return; michael@0: } michael@0: michael@0: const uint32_t KMaxDeviceNameLength = 128; michael@0: const uint32_t KMaxUniqueIdLength = 256; michael@0: char deviceName[KMaxDeviceNameLength]; michael@0: char uniqueId[KMaxUniqueIdLength]; michael@0: if (mViECapture->GetCaptureDevice(mCaptureIndex, michael@0: deviceName, KMaxDeviceNameLength, michael@0: uniqueId, KMaxUniqueIdLength)) { michael@0: return; michael@0: } michael@0: michael@0: CopyUTF8toUTF16(deviceName, mDeviceName); michael@0: CopyUTF8toUTF16(uniqueId, mUniqueId); michael@0: #endif michael@0: michael@0: mInitDone = true; michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::Shutdown() michael@0: { michael@0: LOG((__FUNCTION__)); michael@0: if (!mInitDone) { michael@0: return; michael@0: } michael@0: #ifdef MOZ_B2G_CAMERA michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: #endif michael@0: if (mState == kStarted) { michael@0: while (!mSources.IsEmpty()) { michael@0: Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks michael@0: } michael@0: MOZ_ASSERT(mState == kStopped); michael@0: } michael@0: michael@0: if (mState == kAllocated || mState == kStopped) { michael@0: Deallocate(); michael@0: } michael@0: #ifndef MOZ_B2G_CAMERA michael@0: mViECapture->Release(); michael@0: mViERender->Release(); michael@0: mViEBase->Release(); michael@0: #endif michael@0: mState = kReleased; michael@0: mInitDone = false; michael@0: } michael@0: michael@0: #ifdef MOZ_B2G_CAMERA michael@0: michael@0: // All these functions must be run on MainThread! michael@0: void michael@0: MediaEngineWebRTCVideoSource::AllocImpl() { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: michael@0: mCameraControl = ICameraControl::Create(mCaptureIndex); michael@0: if (mCameraControl) { michael@0: mState = kAllocated; michael@0: // Add this as a listener for CameraControl events. We don't need michael@0: // to explicitly remove this--destroying the CameraControl object michael@0: // in DeallocImpl() will do that for us. michael@0: mCameraControl->AddListener(this); michael@0: } michael@0: michael@0: mCallbackMonitor.Notify(); michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::DeallocImpl() { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: michael@0: mCameraControl = nullptr; michael@0: } michael@0: michael@0: // The same algorithm from bug 840244 michael@0: static int michael@0: GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) { michael@0: int screenAngle = 0; michael@0: switch (aScreen) { michael@0: case eScreenOrientation_PortraitPrimary: michael@0: screenAngle = 0; michael@0: break; michael@0: case eScreenOrientation_PortraitSecondary: michael@0: screenAngle = 180; michael@0: break; michael@0: case eScreenOrientation_LandscapePrimary: michael@0: screenAngle = 90; michael@0: break; michael@0: case eScreenOrientation_LandscapeSecondary: michael@0: screenAngle = 270; michael@0: break; michael@0: default: michael@0: MOZ_ASSERT(false); michael@0: break; michael@0: } michael@0: michael@0: int result; michael@0: michael@0: if (aBackCamera) { michael@0: //back camera michael@0: result = (aCameraMountAngle - screenAngle + 360) % 360; michael@0: } else { michael@0: //front camera michael@0: result = (aCameraMountAngle + screenAngle) % 360; michael@0: } michael@0: return result; michael@0: } michael@0: michael@0: // undefine to remove on-the-fly rotation support michael@0: // #define DYNAMIC_GUM_ROTATION michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) { michael@0: #ifdef DYNAMIC_GUM_ROTATION michael@0: MonitorAutoLock enter(mMonitor); michael@0: mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera); michael@0: michael@0: LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)", michael@0: mRotation, mCaptureIndex, mBackCamera, mCameraAngle)); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: michael@0: ICameraControl::Configuration config; michael@0: config.mMode = ICameraControl::kPictureMode; michael@0: config.mPreviewSize.width = aCapability.width; michael@0: config.mPreviewSize.height = aCapability.height; michael@0: mCameraControl->Start(&config); michael@0: mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize); michael@0: michael@0: hal::RegisterScreenConfigurationObserver(this); michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::StopImpl() { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: michael@0: hal::UnregisterScreenConfigurationObserver(this); michael@0: mCameraControl->Stop(); michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::SnapshotImpl() { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: mCameraControl->TakePicture(); michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState) michael@0: { michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: if (aState == CameraControlListener::kHardwareClosed) { michael@0: // When the first CameraControl listener is added, it gets pushed michael@0: // the current state of the camera--normally 'closed'. We only michael@0: // pay attention to that state if we've progressed out of the michael@0: // allocated state. michael@0: if (mState != kAllocated) { michael@0: mState = kReleased; michael@0: mCallbackMonitor.Notify(); michael@0: } michael@0: } else { michael@0: mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle); michael@0: MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 || michael@0: mCameraAngle == 270); michael@0: hal::ScreenConfiguration aConfig; michael@0: hal::GetCurrentScreenConfiguration(&aConfig); michael@0: michael@0: nsCString deviceName; michael@0: ICameraControl::GetCameraName(mCaptureIndex, deviceName); michael@0: if (deviceName.EqualsASCII("back")) { michael@0: mBackCamera = true; michael@0: } michael@0: michael@0: mRotation = GetRotateAmount(aConfig.orientation(), mCameraAngle, mBackCamera); michael@0: LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)", michael@0: mRotation, mCaptureIndex, mBackCamera, mCameraAngle)); michael@0: mState = kStarted; michael@0: mCallbackMonitor.Notify(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::OnError(CameraErrorContext aContext, CameraError aError) michael@0: { michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: mCallbackMonitor.Notify(); michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType) michael@0: { michael@0: mLastCapture = michael@0: static_cast(new nsDOMMemoryFile(static_cast(aData), michael@0: static_cast(aLength), michael@0: aMimeType)); michael@0: mCallbackMonitor.Notify(); michael@0: } michael@0: michael@0: void michael@0: MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) { michael@0: layers::GrallocImage *nativeImage = static_cast(aImage); michael@0: android::sp graphicBuffer = nativeImage->GetGraphicBuffer(); michael@0: void *pMem = nullptr; michael@0: uint32_t size = aWidth * aHeight * 3 / 2; michael@0: michael@0: graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem); michael@0: michael@0: uint8_t* srcPtr = static_cast(pMem); michael@0: // Create a video frame and append it to the track. michael@0: nsRefPtr image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); michael@0: layers::PlanarYCbCrImage* videoImage = static_cast(image.get()); michael@0: michael@0: uint32_t dstWidth; michael@0: uint32_t dstHeight; michael@0: michael@0: if (mRotation == 90 || mRotation == 270) { michael@0: dstWidth = aHeight; michael@0: dstHeight = aWidth; michael@0: } else { michael@0: dstWidth = aWidth; michael@0: dstHeight = aHeight; michael@0: } michael@0: michael@0: uint32_t half_width = dstWidth / 2; michael@0: uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size); michael@0: libyuv::ConvertToI420(srcPtr, size, michael@0: dstPtr, dstWidth, michael@0: dstPtr + (dstWidth * dstHeight), half_width, michael@0: dstPtr + (dstWidth * dstHeight * 5 / 4), half_width, michael@0: 0, 0, michael@0: aWidth, aHeight, michael@0: aWidth, aHeight, michael@0: static_cast(mRotation), michael@0: libyuv::FOURCC_NV21); michael@0: graphicBuffer->unlock(); michael@0: michael@0: const uint8_t lumaBpp = 8; michael@0: const uint8_t chromaBpp = 4; michael@0: michael@0: layers::PlanarYCbCrData data; michael@0: data.mYChannel = dstPtr; michael@0: data.mYSize = IntSize(dstWidth, dstHeight); michael@0: data.mYStride = dstWidth * lumaBpp / 8; michael@0: data.mCbCrStride = dstWidth * chromaBpp / 8; michael@0: data.mCbChannel = dstPtr + dstHeight * data.mYStride; michael@0: data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2); michael@0: data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2); michael@0: data.mPicX = 0; michael@0: data.mPicY = 0; michael@0: data.mPicSize = IntSize(dstWidth, dstHeight); michael@0: data.mStereoMode = StereoMode::MONO; michael@0: michael@0: videoImage->SetDataNoCopy(data); michael@0: michael@0: // implicitly releases last image michael@0: mImage = image.forget(); michael@0: } michael@0: michael@0: bool michael@0: MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) { michael@0: { michael@0: ReentrantMonitorAutoEnter sync(mCallbackMonitor); michael@0: if (mState == kStopped) { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: MonitorAutoLock enter(mMonitor); michael@0: // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage() michael@0: RotateImage(aImage, aWidth, aHeight); michael@0: if (mRotation != 0 && mRotation != 180) { michael@0: uint32_t temp = aWidth; michael@0: aWidth = aHeight; michael@0: aHeight = temp; michael@0: } michael@0: if (mWidth != static_cast(aWidth) || mHeight != static_cast(aHeight)) { michael@0: mWidth = aWidth; michael@0: mHeight = aHeight; michael@0: LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight)); michael@0: } michael@0: michael@0: return true; // return true because we're accepting the frame michael@0: } michael@0: #endif michael@0: michael@0: }