Fri, 16 Jan 2015 04:50:19 +0100
Replace accessor implementation with direct member state manipulation, by
request https://trac.torproject.org/projects/tor/ticket/9701#comment:32
michael@0 | 1 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 2 | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
michael@0 | 3 | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 4 | |
michael@0 | 5 | #include "MediaEngineWebRTC.h" |
michael@0 | 6 | #include "Layers.h" |
michael@0 | 7 | #include "ImageTypes.h" |
michael@0 | 8 | #include "ImageContainer.h" |
michael@0 | 9 | #include "mozilla/layers/GrallocTextureClient.h" |
michael@0 | 10 | #include "nsMemory.h" |
michael@0 | 11 | #include "mtransport/runnable_utils.h" |
michael@0 | 12 | #include "MediaTrackConstraints.h" |
michael@0 | 13 | |
michael@0 | 14 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 15 | #include "GrallocImages.h" |
michael@0 | 16 | #include "libyuv.h" |
michael@0 | 17 | #include "mozilla/Hal.h" |
michael@0 | 18 | #include "ScreenOrientation.h" |
michael@0 | 19 | using namespace mozilla::dom; |
michael@0 | 20 | #endif |
michael@0 | 21 | namespace mozilla { |
michael@0 | 22 | |
michael@0 | 23 | using namespace mozilla::gfx; |
michael@0 | 24 | using dom::ConstrainLongRange; |
michael@0 | 25 | using dom::ConstrainDoubleRange; |
michael@0 | 26 | using dom::MediaTrackConstraintSet; |
michael@0 | 27 | |
michael@0 | 28 | #ifdef PR_LOGGING |
michael@0 | 29 | extern PRLogModuleInfo* GetMediaManagerLog(); |
michael@0 | 30 | #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg) |
michael@0 | 31 | #define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg) |
michael@0 | 32 | #else |
michael@0 | 33 | #define LOG(msg) |
michael@0 | 34 | #define LOGFRAME(msg) |
michael@0 | 35 | #endif |
michael@0 | 36 | |
michael@0 | 37 | /** |
michael@0 | 38 | * Webrtc video source. |
michael@0 | 39 | */ |
michael@0 | 40 | #ifndef MOZ_B2G_CAMERA |
michael@0 | 41 | NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable) |
michael@0 | 42 | #else |
michael@0 | 43 | NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable) |
michael@0 | 44 | NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener) |
michael@0 | 45 | NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener) |
michael@0 | 46 | #endif |
michael@0 | 47 | |
michael@0 | 48 | // ViEExternalRenderer Callback. |
michael@0 | 49 | #ifndef MOZ_B2G_CAMERA |
michael@0 | 50 | int |
michael@0 | 51 | MediaEngineWebRTCVideoSource::FrameSizeChange( |
michael@0 | 52 | unsigned int w, unsigned int h, unsigned int streams) |
michael@0 | 53 | { |
michael@0 | 54 | mWidth = w; |
michael@0 | 55 | mHeight = h; |
michael@0 | 56 | LOG(("Video FrameSizeChange: %ux%u", w, h)); |
michael@0 | 57 | return 0; |
michael@0 | 58 | } |
michael@0 | 59 | |
michael@0 | 60 | // ViEExternalRenderer Callback. Process every incoming frame here. |
michael@0 | 61 | int |
michael@0 | 62 | MediaEngineWebRTCVideoSource::DeliverFrame( |
michael@0 | 63 | unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time, |
michael@0 | 64 | void *handle) |
michael@0 | 65 | { |
michael@0 | 66 | // mInSnapshotMode can only be set before the camera is turned on and |
michael@0 | 67 | // the renderer is started, so this amounts to a 1-shot |
michael@0 | 68 | if (mInSnapshotMode) { |
michael@0 | 69 | // Set the condition variable to false and notify Snapshot(). |
michael@0 | 70 | MonitorAutoLock lock(mMonitor); |
michael@0 | 71 | mInSnapshotMode = false; |
michael@0 | 72 | lock.Notify(); |
michael@0 | 73 | return 0; |
michael@0 | 74 | } |
michael@0 | 75 | |
michael@0 | 76 | // Check for proper state. |
michael@0 | 77 | if (mState != kStarted) { |
michael@0 | 78 | LOG(("DeliverFrame: video not started")); |
michael@0 | 79 | return 0; |
michael@0 | 80 | } |
michael@0 | 81 | |
michael@0 | 82 | MOZ_ASSERT(mWidth*mHeight*3/2 == size); |
michael@0 | 83 | if (mWidth*mHeight*3/2 != size) { |
michael@0 | 84 | return 0; |
michael@0 | 85 | } |
michael@0 | 86 | |
michael@0 | 87 | // Create a video frame and append it to the track. |
michael@0 | 88 | nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); |
michael@0 | 89 | |
michael@0 | 90 | layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get()); |
michael@0 | 91 | |
michael@0 | 92 | uint8_t* frame = static_cast<uint8_t*> (buffer); |
michael@0 | 93 | const uint8_t lumaBpp = 8; |
michael@0 | 94 | const uint8_t chromaBpp = 4; |
michael@0 | 95 | |
michael@0 | 96 | layers::PlanarYCbCrData data; |
michael@0 | 97 | data.mYChannel = frame; |
michael@0 | 98 | data.mYSize = IntSize(mWidth, mHeight); |
michael@0 | 99 | data.mYStride = mWidth * lumaBpp/ 8; |
michael@0 | 100 | data.mCbCrStride = mWidth * chromaBpp / 8; |
michael@0 | 101 | data.mCbChannel = frame + mHeight * data.mYStride; |
michael@0 | 102 | data.mCrChannel = data.mCbChannel + mHeight * data.mCbCrStride / 2; |
michael@0 | 103 | data.mCbCrSize = IntSize(mWidth/ 2, mHeight/ 2); |
michael@0 | 104 | data.mPicX = 0; |
michael@0 | 105 | data.mPicY = 0; |
michael@0 | 106 | data.mPicSize = IntSize(mWidth, mHeight); |
michael@0 | 107 | data.mStereoMode = StereoMode::MONO; |
michael@0 | 108 | |
michael@0 | 109 | videoImage->SetData(data); |
michael@0 | 110 | |
michael@0 | 111 | #ifdef DEBUG |
michael@0 | 112 | static uint32_t frame_num = 0; |
michael@0 | 113 | LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++, |
michael@0 | 114 | mWidth, mHeight, time_stamp, render_time)); |
michael@0 | 115 | #endif |
michael@0 | 116 | |
michael@0 | 117 | // we don't touch anything in 'this' until here (except for snapshot, |
michael@0 | 118 | // which has it's own lock) |
michael@0 | 119 | MonitorAutoLock lock(mMonitor); |
michael@0 | 120 | |
michael@0 | 121 | // implicitly releases last image |
michael@0 | 122 | mImage = image.forget(); |
michael@0 | 123 | |
michael@0 | 124 | return 0; |
michael@0 | 125 | } |
michael@0 | 126 | #endif |
michael@0 | 127 | |
michael@0 | 128 | // Called if the graph thinks it's running out of buffered video; repeat |
michael@0 | 129 | // the last frame for whatever minimum period it think it needs. Note that |
michael@0 | 130 | // this means that no *real* frame can be inserted during this period. |
michael@0 | 131 | void |
michael@0 | 132 | MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph, |
michael@0 | 133 | SourceMediaStream *aSource, |
michael@0 | 134 | TrackID aID, |
michael@0 | 135 | StreamTime aDesiredTime, |
michael@0 | 136 | TrackTicks &aLastEndTime) |
michael@0 | 137 | { |
michael@0 | 138 | VideoSegment segment; |
michael@0 | 139 | |
michael@0 | 140 | MonitorAutoLock lock(mMonitor); |
michael@0 | 141 | if (mState != kStarted) |
michael@0 | 142 | return; |
michael@0 | 143 | |
michael@0 | 144 | // Note: we're not giving up mImage here |
michael@0 | 145 | nsRefPtr<layers::Image> image = mImage; |
michael@0 | 146 | TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); |
michael@0 | 147 | TrackTicks delta = target - aLastEndTime; |
michael@0 | 148 | LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime, |
michael@0 | 149 | (int64_t) target, (int64_t) delta, image ? "" : "<null>")); |
michael@0 | 150 | |
michael@0 | 151 | // Bug 846188 We may want to limit incoming frames to the requested frame rate |
michael@0 | 152 | // mFps - if you want 30FPS, and the camera gives you 60FPS, this could |
michael@0 | 153 | // cause issues. |
michael@0 | 154 | // We may want to signal if the actual frame rate is below mMinFPS - |
michael@0 | 155 | // cameras often don't return the requested frame rate especially in low |
michael@0 | 156 | // light; we should consider surfacing this so that we can switch to a |
michael@0 | 157 | // lower resolution (which may up the frame rate) |
michael@0 | 158 | |
michael@0 | 159 | // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime |
michael@0 | 160 | // Doing so means a negative delta and thus messes up handling of the graph |
michael@0 | 161 | if (delta > 0) { |
michael@0 | 162 | // nullptr images are allowed |
michael@0 | 163 | IntSize size(image ? mWidth : 0, image ? mHeight : 0); |
michael@0 | 164 | segment.AppendFrame(image.forget(), delta, size); |
michael@0 | 165 | // This can fail if either a) we haven't added the track yet, or b) |
michael@0 | 166 | // we've removed or finished the track. |
michael@0 | 167 | if (aSource->AppendToTrack(aID, &(segment))) { |
michael@0 | 168 | aLastEndTime = target; |
michael@0 | 169 | } |
michael@0 | 170 | } |
michael@0 | 171 | } |
michael@0 | 172 | |
michael@0 | 173 | static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) { |
michael@0 | 174 | return aRange.mMin <= n && n <= aRange.mMax; |
michael@0 | 175 | } |
michael@0 | 176 | |
michael@0 | 177 | static bool IsWithin(double n, const ConstrainDoubleRange& aRange) { |
michael@0 | 178 | return aRange.mMin <= n && n <= aRange.mMax; |
michael@0 | 179 | } |
michael@0 | 180 | |
michael@0 | 181 | static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) { |
michael@0 | 182 | return std::max(aRange.mMin, std::min(n, aRange.mMax)); |
michael@0 | 183 | } |
michael@0 | 184 | |
michael@0 | 185 | static bool |
michael@0 | 186 | AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) { |
michael@0 | 187 | return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax; |
michael@0 | 188 | } |
michael@0 | 189 | |
michael@0 | 190 | static bool |
michael@0 | 191 | Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) { |
michael@0 | 192 | MOZ_ASSERT(AreIntersecting(aA, aB)); |
michael@0 | 193 | aA.mMin = std::max(aA.mMin, aB.mMin); |
michael@0 | 194 | aA.mMax = std::min(aA.mMax, aB.mMax); |
michael@0 | 195 | return true; |
michael@0 | 196 | } |
michael@0 | 197 | |
michael@0 | 198 | static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints, |
michael@0 | 199 | const webrtc::CaptureCapability& aCandidate) { |
michael@0 | 200 | if (!IsWithin(aCandidate.width, aConstraints.mWidth) || |
michael@0 | 201 | !IsWithin(aCandidate.height, aConstraints.mHeight)) { |
michael@0 | 202 | return false; |
michael@0 | 203 | } |
michael@0 | 204 | if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) { |
michael@0 | 205 | return false; |
michael@0 | 206 | } |
michael@0 | 207 | return true; |
michael@0 | 208 | } |
michael@0 | 209 | |
michael@0 | 210 | void |
michael@0 | 211 | MediaEngineWebRTCVideoSource::ChooseCapability( |
michael@0 | 212 | const VideoTrackConstraintsN &aConstraints, |
michael@0 | 213 | const MediaEnginePrefs &aPrefs) |
michael@0 | 214 | { |
michael@0 | 215 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 216 | return GuessCapability(aConstraints, aPrefs); |
michael@0 | 217 | #else |
michael@0 | 218 | NS_ConvertUTF16toUTF8 uniqueId(mUniqueId); |
michael@0 | 219 | int num = mViECapture->NumberOfCapabilities(uniqueId.get(), KMaxUniqueIdLength); |
michael@0 | 220 | if (num <= 0) { |
michael@0 | 221 | // Mac doesn't support capabilities. |
michael@0 | 222 | return GuessCapability(aConstraints, aPrefs); |
michael@0 | 223 | } |
michael@0 | 224 | |
michael@0 | 225 | // The rest is the full algorithm for cameras that can list their capabilities. |
michael@0 | 226 | |
michael@0 | 227 | LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps", |
michael@0 | 228 | aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS)); |
michael@0 | 229 | |
michael@0 | 230 | typedef nsTArray<uint8_t> SourceSet; |
michael@0 | 231 | |
michael@0 | 232 | SourceSet candidateSet; |
michael@0 | 233 | for (int i = 0; i < num; i++) { |
michael@0 | 234 | candidateSet.AppendElement(i); |
michael@0 | 235 | } |
michael@0 | 236 | |
michael@0 | 237 | // Pick among capabilities: First apply required constraints. |
michael@0 | 238 | |
michael@0 | 239 | for (uint32_t i = 0; i < candidateSet.Length();) { |
michael@0 | 240 | webrtc::CaptureCapability cap; |
michael@0 | 241 | mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength, |
michael@0 | 242 | candidateSet[i], cap); |
michael@0 | 243 | if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) { |
michael@0 | 244 | candidateSet.RemoveElementAt(i); |
michael@0 | 245 | } else { |
michael@0 | 246 | ++i; |
michael@0 | 247 | } |
michael@0 | 248 | } |
michael@0 | 249 | |
michael@0 | 250 | SourceSet tailSet; |
michael@0 | 251 | |
michael@0 | 252 | // Then apply advanced (formerly known as optional) constraints. |
michael@0 | 253 | |
michael@0 | 254 | if (aConstraints.mAdvanced.WasPassed()) { |
michael@0 | 255 | auto &array = aConstraints.mAdvanced.Value(); |
michael@0 | 256 | |
michael@0 | 257 | for (uint32_t i = 0; i < array.Length(); i++) { |
michael@0 | 258 | SourceSet rejects; |
michael@0 | 259 | for (uint32_t j = 0; j < candidateSet.Length();) { |
michael@0 | 260 | webrtc::CaptureCapability cap; |
michael@0 | 261 | mViECapture->GetCaptureCapability(uniqueId.get(), KMaxUniqueIdLength, |
michael@0 | 262 | candidateSet[j], cap); |
michael@0 | 263 | if (!SatisfyConstraintSet(array[i], cap)) { |
michael@0 | 264 | rejects.AppendElement(candidateSet[j]); |
michael@0 | 265 | candidateSet.RemoveElementAt(j); |
michael@0 | 266 | } else { |
michael@0 | 267 | ++j; |
michael@0 | 268 | } |
michael@0 | 269 | } |
michael@0 | 270 | (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects); |
michael@0 | 271 | } |
michael@0 | 272 | } |
michael@0 | 273 | |
michael@0 | 274 | if (!candidateSet.Length()) { |
michael@0 | 275 | candidateSet.AppendElement(0); |
michael@0 | 276 | } |
michael@0 | 277 | |
michael@0 | 278 | int prefWidth = aPrefs.GetWidth(); |
michael@0 | 279 | int prefHeight = aPrefs.GetHeight(); |
michael@0 | 280 | |
michael@0 | 281 | // Default is closest to available capability but equal to or below; |
michael@0 | 282 | // otherwise closest above. Since we handle the num=0 case above and |
michael@0 | 283 | // take the first entry always, we can never exit uninitialized. |
michael@0 | 284 | |
michael@0 | 285 | webrtc::CaptureCapability cap; |
michael@0 | 286 | bool higher = true; |
michael@0 | 287 | for (uint32_t i = 0; i < candidateSet.Length(); i++) { |
michael@0 | 288 | mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(), |
michael@0 | 289 | KMaxUniqueIdLength, candidateSet[i], cap); |
michael@0 | 290 | if (higher) { |
michael@0 | 291 | if (i == 0 || |
michael@0 | 292 | (mCapability.width > cap.width && mCapability.height > cap.height)) { |
michael@0 | 293 | // closer than the current choice |
michael@0 | 294 | mCapability = cap; |
michael@0 | 295 | // FIXME: expose expected capture delay? |
michael@0 | 296 | } |
michael@0 | 297 | if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) { |
michael@0 | 298 | higher = false; |
michael@0 | 299 | } |
michael@0 | 300 | } else { |
michael@0 | 301 | if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight || |
michael@0 | 302 | cap.maxFPS < (uint32_t) aPrefs.mMinFPS) { |
michael@0 | 303 | continue; |
michael@0 | 304 | } |
michael@0 | 305 | if (mCapability.width < cap.width && mCapability.height < cap.height) { |
michael@0 | 306 | mCapability = cap; |
michael@0 | 307 | // FIXME: expose expected capture delay? |
michael@0 | 308 | } |
michael@0 | 309 | } |
michael@0 | 310 | } |
michael@0 | 311 | LOG(("chose cap %dx%d @%dfps", |
michael@0 | 312 | mCapability.width, mCapability.height, mCapability.maxFPS)); |
michael@0 | 313 | #endif |
michael@0 | 314 | } |
michael@0 | 315 | |
michael@0 | 316 | // A special version of the algorithm for cameras that don't list capabilities. |
michael@0 | 317 | |
michael@0 | 318 | void |
michael@0 | 319 | MediaEngineWebRTCVideoSource::GuessCapability( |
michael@0 | 320 | const VideoTrackConstraintsN &aConstraints, |
michael@0 | 321 | const MediaEnginePrefs &aPrefs) |
michael@0 | 322 | { |
michael@0 | 323 | LOG(("GuessCapability: prefs: %dx%d @%d-%dfps", |
michael@0 | 324 | aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS)); |
michael@0 | 325 | |
michael@0 | 326 | // In short: compound constraint-ranges and use pref as ideal. |
michael@0 | 327 | |
michael@0 | 328 | ConstrainLongRange cWidth(aConstraints.mRequired.mWidth); |
michael@0 | 329 | ConstrainLongRange cHeight(aConstraints.mRequired.mHeight); |
michael@0 | 330 | |
michael@0 | 331 | if (aConstraints.mAdvanced.WasPassed()) { |
michael@0 | 332 | const auto& advanced = aConstraints.mAdvanced.Value(); |
michael@0 | 333 | for (uint32_t i = 0; i < advanced.Length(); i++) { |
michael@0 | 334 | if (AreIntersecting(cWidth, advanced[i].mWidth) && |
michael@0 | 335 | AreIntersecting(cHeight, advanced[i].mHeight)) { |
michael@0 | 336 | Intersect(cWidth, advanced[i].mWidth); |
michael@0 | 337 | Intersect(cHeight, advanced[i].mHeight); |
michael@0 | 338 | } |
michael@0 | 339 | } |
michael@0 | 340 | } |
michael@0 | 341 | // Detect Mac HD cams and give them some love in the form of a dynamic default |
michael@0 | 342 | // since that hardware switches between 4:3 at low res and 16:9 at higher res. |
michael@0 | 343 | // |
michael@0 | 344 | // Logic is: if we're relying on defaults in aPrefs, then |
michael@0 | 345 | // only use HD pref when non-HD pref is too small and HD pref isn't too big. |
michael@0 | 346 | |
michael@0 | 347 | bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) && |
michael@0 | 348 | mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") && |
michael@0 | 349 | (aPrefs.GetWidth() < cWidth.mMin || |
michael@0 | 350 | aPrefs.GetHeight() < cHeight.mMin) && |
michael@0 | 351 | !(aPrefs.GetWidth(true) > cWidth.mMax || |
michael@0 | 352 | aPrefs.GetHeight(true) > cHeight.mMax)); |
michael@0 | 353 | int prefWidth = aPrefs.GetWidth(macHD); |
michael@0 | 354 | int prefHeight = aPrefs.GetHeight(macHD); |
michael@0 | 355 | |
michael@0 | 356 | // Clamp width and height without distorting inherent aspect too much. |
michael@0 | 357 | |
michael@0 | 358 | if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) { |
michael@0 | 359 | // If both are within, we get the default (pref) aspect. |
michael@0 | 360 | // If neither are within, we get the aspect of the enclosing constraint. |
michael@0 | 361 | // Either are presumably reasonable (presuming constraints are sane). |
michael@0 | 362 | mCapability.width = Clamp(prefWidth, cWidth); |
michael@0 | 363 | mCapability.height = Clamp(prefHeight, cHeight); |
michael@0 | 364 | } else { |
michael@0 | 365 | // But if only one clips (e.g. width), the resulting skew is undesirable: |
michael@0 | 366 | // .------------. |
michael@0 | 367 | // | constraint | |
michael@0 | 368 | // .----+------------+----. |
michael@0 | 369 | // | | | | |
michael@0 | 370 | // |pref| result | | prefAspect != resultAspect |
michael@0 | 371 | // | | | | |
michael@0 | 372 | // '----+------------+----' |
michael@0 | 373 | // '------------' |
michael@0 | 374 | // So in this case, preserve prefAspect instead: |
michael@0 | 375 | // .------------. |
michael@0 | 376 | // | constraint | |
michael@0 | 377 | // .------------. |
michael@0 | 378 | // |pref | prefAspect is unchanged |
michael@0 | 379 | // '------------' |
michael@0 | 380 | // | | |
michael@0 | 381 | // '------------' |
michael@0 | 382 | if (IsWithin(prefWidth, cWidth)) { |
michael@0 | 383 | mCapability.height = Clamp(prefHeight, cHeight); |
michael@0 | 384 | mCapability.width = Clamp((mCapability.height * prefWidth) / |
michael@0 | 385 | prefHeight, cWidth); |
michael@0 | 386 | } else { |
michael@0 | 387 | mCapability.width = Clamp(prefWidth, cWidth); |
michael@0 | 388 | mCapability.height = Clamp((mCapability.width * prefHeight) / |
michael@0 | 389 | prefWidth, cHeight); |
michael@0 | 390 | } |
michael@0 | 391 | } |
michael@0 | 392 | mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS; |
michael@0 | 393 | LOG(("chose cap %dx%d @%dfps", |
michael@0 | 394 | mCapability.width, mCapability.height, mCapability.maxFPS)); |
michael@0 | 395 | } |
michael@0 | 396 | |
michael@0 | 397 | void |
michael@0 | 398 | MediaEngineWebRTCVideoSource::GetName(nsAString& aName) |
michael@0 | 399 | { |
michael@0 | 400 | aName = mDeviceName; |
michael@0 | 401 | } |
michael@0 | 402 | |
michael@0 | 403 | void |
michael@0 | 404 | MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID) |
michael@0 | 405 | { |
michael@0 | 406 | aUUID = mUniqueId; |
michael@0 | 407 | } |
michael@0 | 408 | |
michael@0 | 409 | nsresult |
michael@0 | 410 | MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints, |
michael@0 | 411 | const MediaEnginePrefs &aPrefs) |
michael@0 | 412 | { |
michael@0 | 413 | LOG((__FUNCTION__)); |
michael@0 | 414 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 415 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 416 | if (mState == kReleased && mInitDone) { |
michael@0 | 417 | ChooseCapability(aConstraints, aPrefs); |
michael@0 | 418 | NS_DispatchToMainThread(WrapRunnable(this, |
michael@0 | 419 | &MediaEngineWebRTCVideoSource::AllocImpl)); |
michael@0 | 420 | mCallbackMonitor.Wait(); |
michael@0 | 421 | if (mState != kAllocated) { |
michael@0 | 422 | return NS_ERROR_FAILURE; |
michael@0 | 423 | } |
michael@0 | 424 | } |
michael@0 | 425 | #else |
michael@0 | 426 | if (mState == kReleased && mInitDone) { |
michael@0 | 427 | // Note: if shared, we don't allow a later opener to affect the resolution. |
michael@0 | 428 | // (This may change depending on spec changes for Constraints/settings) |
michael@0 | 429 | |
michael@0 | 430 | ChooseCapability(aConstraints, aPrefs); |
michael@0 | 431 | |
michael@0 | 432 | if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(), |
michael@0 | 433 | KMaxUniqueIdLength, mCaptureIndex)) { |
michael@0 | 434 | return NS_ERROR_FAILURE; |
michael@0 | 435 | } |
michael@0 | 436 | mState = kAllocated; |
michael@0 | 437 | LOG(("Video device %d allocated", mCaptureIndex)); |
michael@0 | 438 | } else if (mSources.IsEmpty()) { |
michael@0 | 439 | LOG(("Video device %d reallocated", mCaptureIndex)); |
michael@0 | 440 | } else { |
michael@0 | 441 | LOG(("Video device %d allocated shared", mCaptureIndex)); |
michael@0 | 442 | } |
michael@0 | 443 | #endif |
michael@0 | 444 | |
michael@0 | 445 | return NS_OK; |
michael@0 | 446 | } |
michael@0 | 447 | |
michael@0 | 448 | nsresult |
michael@0 | 449 | MediaEngineWebRTCVideoSource::Deallocate() |
michael@0 | 450 | { |
michael@0 | 451 | LOG((__FUNCTION__)); |
michael@0 | 452 | if (mSources.IsEmpty()) { |
michael@0 | 453 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 454 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 455 | #endif |
michael@0 | 456 | if (mState != kStopped && mState != kAllocated) { |
michael@0 | 457 | return NS_ERROR_FAILURE; |
michael@0 | 458 | } |
michael@0 | 459 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 460 | // We do not register success callback here |
michael@0 | 461 | |
michael@0 | 462 | NS_DispatchToMainThread(WrapRunnable(this, |
michael@0 | 463 | &MediaEngineWebRTCVideoSource::DeallocImpl)); |
michael@0 | 464 | mCallbackMonitor.Wait(); |
michael@0 | 465 | if (mState != kReleased) { |
michael@0 | 466 | return NS_ERROR_FAILURE; |
michael@0 | 467 | } |
michael@0 | 468 | #elif XP_MACOSX |
michael@0 | 469 | // Bug 829907 - on mac, in shutdown, the mainthread stops processing |
michael@0 | 470 | // 'native' events, and the QTKit code uses events to the main native CFRunLoop |
michael@0 | 471 | // in order to provide thread safety. In order to avoid this locking us up, |
michael@0 | 472 | // release the ViE capture device synchronously on MainThread (so the native |
michael@0 | 473 | // event isn't needed). |
michael@0 | 474 | // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock. |
michael@0 | 475 | // XXX It might be nice to only do this if we're in shutdown... Hard to be |
michael@0 | 476 | // sure when that is though. |
michael@0 | 477 | // Thread safety: a) we call this synchronously, and don't use ViECapture from |
michael@0 | 478 | // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs |
michael@0 | 479 | // an exclusive object lock and deletes it in a critical section, so all in all |
michael@0 | 480 | // this should be safe threadwise. |
michael@0 | 481 | NS_DispatchToMainThread(WrapRunnable(mViECapture, |
michael@0 | 482 | &webrtc::ViECapture::ReleaseCaptureDevice, |
michael@0 | 483 | mCaptureIndex), |
michael@0 | 484 | NS_DISPATCH_SYNC); |
michael@0 | 485 | #else |
michael@0 | 486 | mViECapture->ReleaseCaptureDevice(mCaptureIndex); |
michael@0 | 487 | #endif |
michael@0 | 488 | mState = kReleased; |
michael@0 | 489 | LOG(("Video device %d deallocated", mCaptureIndex)); |
michael@0 | 490 | } else { |
michael@0 | 491 | LOG(("Video device %d deallocated but still in use", mCaptureIndex)); |
michael@0 | 492 | } |
michael@0 | 493 | return NS_OK; |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | nsresult |
michael@0 | 497 | MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID) |
michael@0 | 498 | { |
michael@0 | 499 | LOG((__FUNCTION__)); |
michael@0 | 500 | #ifndef MOZ_B2G_CAMERA |
michael@0 | 501 | int error = 0; |
michael@0 | 502 | #endif |
michael@0 | 503 | if (!mInitDone || !aStream) { |
michael@0 | 504 | return NS_ERROR_FAILURE; |
michael@0 | 505 | } |
michael@0 | 506 | |
michael@0 | 507 | mSources.AppendElement(aStream); |
michael@0 | 508 | |
michael@0 | 509 | aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment()); |
michael@0 | 510 | aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX); |
michael@0 | 511 | |
michael@0 | 512 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 513 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 514 | #endif |
michael@0 | 515 | |
michael@0 | 516 | if (mState == kStarted) { |
michael@0 | 517 | return NS_OK; |
michael@0 | 518 | } |
michael@0 | 519 | mImageContainer = layers::LayerManager::CreateImageContainer(); |
michael@0 | 520 | |
michael@0 | 521 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 522 | NS_DispatchToMainThread(WrapRunnable(this, |
michael@0 | 523 | &MediaEngineWebRTCVideoSource::StartImpl, |
michael@0 | 524 | mCapability)); |
michael@0 | 525 | mCallbackMonitor.Wait(); |
michael@0 | 526 | if (mState != kStarted) { |
michael@0 | 527 | return NS_ERROR_FAILURE; |
michael@0 | 528 | } |
michael@0 | 529 | #else |
michael@0 | 530 | mState = kStarted; |
michael@0 | 531 | error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this); |
michael@0 | 532 | if (error == -1) { |
michael@0 | 533 | return NS_ERROR_FAILURE; |
michael@0 | 534 | } |
michael@0 | 535 | |
michael@0 | 536 | error = mViERender->StartRender(mCaptureIndex); |
michael@0 | 537 | if (error == -1) { |
michael@0 | 538 | return NS_ERROR_FAILURE; |
michael@0 | 539 | } |
michael@0 | 540 | |
michael@0 | 541 | if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) { |
michael@0 | 542 | return NS_ERROR_FAILURE; |
michael@0 | 543 | } |
michael@0 | 544 | #endif |
michael@0 | 545 | |
michael@0 | 546 | return NS_OK; |
michael@0 | 547 | } |
michael@0 | 548 | |
michael@0 | 549 | nsresult |
michael@0 | 550 | MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID) |
michael@0 | 551 | { |
michael@0 | 552 | LOG((__FUNCTION__)); |
michael@0 | 553 | if (!mSources.RemoveElement(aSource)) { |
michael@0 | 554 | // Already stopped - this is allowed |
michael@0 | 555 | return NS_OK; |
michael@0 | 556 | } |
michael@0 | 557 | if (!mSources.IsEmpty()) { |
michael@0 | 558 | return NS_OK; |
michael@0 | 559 | } |
michael@0 | 560 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 561 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 562 | #endif |
michael@0 | 563 | if (mState != kStarted) { |
michael@0 | 564 | return NS_ERROR_FAILURE; |
michael@0 | 565 | } |
michael@0 | 566 | |
michael@0 | 567 | { |
michael@0 | 568 | MonitorAutoLock lock(mMonitor); |
michael@0 | 569 | mState = kStopped; |
michael@0 | 570 | aSource->EndTrack(aID); |
michael@0 | 571 | // Drop any cached image so we don't start with a stale image on next |
michael@0 | 572 | // usage |
michael@0 | 573 | mImage = nullptr; |
michael@0 | 574 | } |
michael@0 | 575 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 576 | NS_DispatchToMainThread(WrapRunnable(this, |
michael@0 | 577 | &MediaEngineWebRTCVideoSource::StopImpl)); |
michael@0 | 578 | #else |
michael@0 | 579 | mViERender->StopRender(mCaptureIndex); |
michael@0 | 580 | mViERender->RemoveRenderer(mCaptureIndex); |
michael@0 | 581 | mViECapture->StopCapture(mCaptureIndex); |
michael@0 | 582 | #endif |
michael@0 | 583 | |
michael@0 | 584 | return NS_OK; |
michael@0 | 585 | } |
michael@0 | 586 | |
michael@0 | 587 | nsresult |
michael@0 | 588 | MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile) |
michael@0 | 589 | { |
michael@0 | 590 | return NS_ERROR_NOT_IMPLEMENTED; |
michael@0 | 591 | } |
michael@0 | 592 | |
michael@0 | 593 | /** |
michael@0 | 594 | * Initialization and Shutdown functions for the video source, called by the |
michael@0 | 595 | * constructor and destructor respectively. |
michael@0 | 596 | */ |
michael@0 | 597 | |
michael@0 | 598 | void |
michael@0 | 599 | MediaEngineWebRTCVideoSource::Init() |
michael@0 | 600 | { |
michael@0 | 601 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 602 | nsAutoCString deviceName; |
michael@0 | 603 | ICameraControl::GetCameraName(mCaptureIndex, deviceName); |
michael@0 | 604 | CopyUTF8toUTF16(deviceName, mDeviceName); |
michael@0 | 605 | CopyUTF8toUTF16(deviceName, mUniqueId); |
michael@0 | 606 | #else |
michael@0 | 607 | // fix compile warning for these being unused. (remove once used) |
michael@0 | 608 | (void) mFps; |
michael@0 | 609 | (void) mMinFps; |
michael@0 | 610 | |
michael@0 | 611 | LOG((__FUNCTION__)); |
michael@0 | 612 | if (mVideoEngine == nullptr) { |
michael@0 | 613 | return; |
michael@0 | 614 | } |
michael@0 | 615 | |
michael@0 | 616 | mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine); |
michael@0 | 617 | if (mViEBase == nullptr) { |
michael@0 | 618 | return; |
michael@0 | 619 | } |
michael@0 | 620 | |
michael@0 | 621 | // Get interfaces for capture, render for now |
michael@0 | 622 | mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine); |
michael@0 | 623 | mViERender = webrtc::ViERender::GetInterface(mVideoEngine); |
michael@0 | 624 | |
michael@0 | 625 | if (mViECapture == nullptr || mViERender == nullptr) { |
michael@0 | 626 | return; |
michael@0 | 627 | } |
michael@0 | 628 | |
michael@0 | 629 | const uint32_t KMaxDeviceNameLength = 128; |
michael@0 | 630 | const uint32_t KMaxUniqueIdLength = 256; |
michael@0 | 631 | char deviceName[KMaxDeviceNameLength]; |
michael@0 | 632 | char uniqueId[KMaxUniqueIdLength]; |
michael@0 | 633 | if (mViECapture->GetCaptureDevice(mCaptureIndex, |
michael@0 | 634 | deviceName, KMaxDeviceNameLength, |
michael@0 | 635 | uniqueId, KMaxUniqueIdLength)) { |
michael@0 | 636 | return; |
michael@0 | 637 | } |
michael@0 | 638 | |
michael@0 | 639 | CopyUTF8toUTF16(deviceName, mDeviceName); |
michael@0 | 640 | CopyUTF8toUTF16(uniqueId, mUniqueId); |
michael@0 | 641 | #endif |
michael@0 | 642 | |
michael@0 | 643 | mInitDone = true; |
michael@0 | 644 | } |
michael@0 | 645 | |
michael@0 | 646 | void |
michael@0 | 647 | MediaEngineWebRTCVideoSource::Shutdown() |
michael@0 | 648 | { |
michael@0 | 649 | LOG((__FUNCTION__)); |
michael@0 | 650 | if (!mInitDone) { |
michael@0 | 651 | return; |
michael@0 | 652 | } |
michael@0 | 653 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 654 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 655 | #endif |
michael@0 | 656 | if (mState == kStarted) { |
michael@0 | 657 | while (!mSources.IsEmpty()) { |
michael@0 | 658 | Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks |
michael@0 | 659 | } |
michael@0 | 660 | MOZ_ASSERT(mState == kStopped); |
michael@0 | 661 | } |
michael@0 | 662 | |
michael@0 | 663 | if (mState == kAllocated || mState == kStopped) { |
michael@0 | 664 | Deallocate(); |
michael@0 | 665 | } |
michael@0 | 666 | #ifndef MOZ_B2G_CAMERA |
michael@0 | 667 | mViECapture->Release(); |
michael@0 | 668 | mViERender->Release(); |
michael@0 | 669 | mViEBase->Release(); |
michael@0 | 670 | #endif |
michael@0 | 671 | mState = kReleased; |
michael@0 | 672 | mInitDone = false; |
michael@0 | 673 | } |
michael@0 | 674 | |
michael@0 | 675 | #ifdef MOZ_B2G_CAMERA |
michael@0 | 676 | |
michael@0 | 677 | // All these functions must be run on MainThread! |
michael@0 | 678 | void |
michael@0 | 679 | MediaEngineWebRTCVideoSource::AllocImpl() { |
michael@0 | 680 | MOZ_ASSERT(NS_IsMainThread()); |
michael@0 | 681 | |
michael@0 | 682 | mCameraControl = ICameraControl::Create(mCaptureIndex); |
michael@0 | 683 | if (mCameraControl) { |
michael@0 | 684 | mState = kAllocated; |
michael@0 | 685 | // Add this as a listener for CameraControl events. We don't need |
michael@0 | 686 | // to explicitly remove this--destroying the CameraControl object |
michael@0 | 687 | // in DeallocImpl() will do that for us. |
michael@0 | 688 | mCameraControl->AddListener(this); |
michael@0 | 689 | } |
michael@0 | 690 | |
michael@0 | 691 | mCallbackMonitor.Notify(); |
michael@0 | 692 | } |
michael@0 | 693 | |
michael@0 | 694 | void |
michael@0 | 695 | MediaEngineWebRTCVideoSource::DeallocImpl() { |
michael@0 | 696 | MOZ_ASSERT(NS_IsMainThread()); |
michael@0 | 697 | |
michael@0 | 698 | mCameraControl = nullptr; |
michael@0 | 699 | } |
michael@0 | 700 | |
michael@0 | 701 | // The same algorithm from bug 840244 |
michael@0 | 702 | static int |
michael@0 | 703 | GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) { |
michael@0 | 704 | int screenAngle = 0; |
michael@0 | 705 | switch (aScreen) { |
michael@0 | 706 | case eScreenOrientation_PortraitPrimary: |
michael@0 | 707 | screenAngle = 0; |
michael@0 | 708 | break; |
michael@0 | 709 | case eScreenOrientation_PortraitSecondary: |
michael@0 | 710 | screenAngle = 180; |
michael@0 | 711 | break; |
michael@0 | 712 | case eScreenOrientation_LandscapePrimary: |
michael@0 | 713 | screenAngle = 90; |
michael@0 | 714 | break; |
michael@0 | 715 | case eScreenOrientation_LandscapeSecondary: |
michael@0 | 716 | screenAngle = 270; |
michael@0 | 717 | break; |
michael@0 | 718 | default: |
michael@0 | 719 | MOZ_ASSERT(false); |
michael@0 | 720 | break; |
michael@0 | 721 | } |
michael@0 | 722 | |
michael@0 | 723 | int result; |
michael@0 | 724 | |
michael@0 | 725 | if (aBackCamera) { |
michael@0 | 726 | //back camera |
michael@0 | 727 | result = (aCameraMountAngle - screenAngle + 360) % 360; |
michael@0 | 728 | } else { |
michael@0 | 729 | //front camera |
michael@0 | 730 | result = (aCameraMountAngle + screenAngle) % 360; |
michael@0 | 731 | } |
michael@0 | 732 | return result; |
michael@0 | 733 | } |
michael@0 | 734 | |
michael@0 | 735 | // undefine to remove on-the-fly rotation support |
michael@0 | 736 | // #define DYNAMIC_GUM_ROTATION |
michael@0 | 737 | |
michael@0 | 738 | void |
michael@0 | 739 | MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) { |
michael@0 | 740 | #ifdef DYNAMIC_GUM_ROTATION |
michael@0 | 741 | MonitorAutoLock enter(mMonitor); |
michael@0 | 742 | mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera); |
michael@0 | 743 | |
michael@0 | 744 | LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)", |
michael@0 | 745 | mRotation, mCaptureIndex, mBackCamera, mCameraAngle)); |
michael@0 | 746 | #endif |
michael@0 | 747 | } |
michael@0 | 748 | |
michael@0 | 749 | void |
michael@0 | 750 | MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) { |
michael@0 | 751 | MOZ_ASSERT(NS_IsMainThread()); |
michael@0 | 752 | |
michael@0 | 753 | ICameraControl::Configuration config; |
michael@0 | 754 | config.mMode = ICameraControl::kPictureMode; |
michael@0 | 755 | config.mPreviewSize.width = aCapability.width; |
michael@0 | 756 | config.mPreviewSize.height = aCapability.height; |
michael@0 | 757 | mCameraControl->Start(&config); |
michael@0 | 758 | mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize); |
michael@0 | 759 | |
michael@0 | 760 | hal::RegisterScreenConfigurationObserver(this); |
michael@0 | 761 | } |
michael@0 | 762 | |
michael@0 | 763 | void |
michael@0 | 764 | MediaEngineWebRTCVideoSource::StopImpl() { |
michael@0 | 765 | MOZ_ASSERT(NS_IsMainThread()); |
michael@0 | 766 | |
michael@0 | 767 | hal::UnregisterScreenConfigurationObserver(this); |
michael@0 | 768 | mCameraControl->Stop(); |
michael@0 | 769 | } |
michael@0 | 770 | |
michael@0 | 771 | void |
michael@0 | 772 | MediaEngineWebRTCVideoSource::SnapshotImpl() { |
michael@0 | 773 | MOZ_ASSERT(NS_IsMainThread()); |
michael@0 | 774 | mCameraControl->TakePicture(); |
michael@0 | 775 | } |
michael@0 | 776 | |
michael@0 | 777 | void |
michael@0 | 778 | MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState) |
michael@0 | 779 | { |
michael@0 | 780 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 781 | if (aState == CameraControlListener::kHardwareClosed) { |
michael@0 | 782 | // When the first CameraControl listener is added, it gets pushed |
michael@0 | 783 | // the current state of the camera--normally 'closed'. We only |
michael@0 | 784 | // pay attention to that state if we've progressed out of the |
michael@0 | 785 | // allocated state. |
michael@0 | 786 | if (mState != kAllocated) { |
michael@0 | 787 | mState = kReleased; |
michael@0 | 788 | mCallbackMonitor.Notify(); |
michael@0 | 789 | } |
michael@0 | 790 | } else { |
michael@0 | 791 | mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle); |
michael@0 | 792 | MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 || |
michael@0 | 793 | mCameraAngle == 270); |
michael@0 | 794 | hal::ScreenConfiguration aConfig; |
michael@0 | 795 | hal::GetCurrentScreenConfiguration(&aConfig); |
michael@0 | 796 | |
michael@0 | 797 | nsCString deviceName; |
michael@0 | 798 | ICameraControl::GetCameraName(mCaptureIndex, deviceName); |
michael@0 | 799 | if (deviceName.EqualsASCII("back")) { |
michael@0 | 800 | mBackCamera = true; |
michael@0 | 801 | } |
michael@0 | 802 | |
michael@0 | 803 | mRotation = GetRotateAmount(aConfig.orientation(), mCameraAngle, mBackCamera); |
michael@0 | 804 | LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)", |
michael@0 | 805 | mRotation, mCaptureIndex, mBackCamera, mCameraAngle)); |
michael@0 | 806 | mState = kStarted; |
michael@0 | 807 | mCallbackMonitor.Notify(); |
michael@0 | 808 | } |
michael@0 | 809 | } |
michael@0 | 810 | |
michael@0 | 811 | void |
michael@0 | 812 | MediaEngineWebRTCVideoSource::OnError(CameraErrorContext aContext, CameraError aError) |
michael@0 | 813 | { |
michael@0 | 814 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 815 | mCallbackMonitor.Notify(); |
michael@0 | 816 | } |
michael@0 | 817 | |
michael@0 | 818 | void |
michael@0 | 819 | MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType) |
michael@0 | 820 | { |
michael@0 | 821 | mLastCapture = |
michael@0 | 822 | static_cast<nsIDOMFile*>(new nsDOMMemoryFile(static_cast<void*>(aData), |
michael@0 | 823 | static_cast<uint64_t>(aLength), |
michael@0 | 824 | aMimeType)); |
michael@0 | 825 | mCallbackMonitor.Notify(); |
michael@0 | 826 | } |
michael@0 | 827 | |
michael@0 | 828 | void |
michael@0 | 829 | MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) { |
michael@0 | 830 | layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage); |
michael@0 | 831 | android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer(); |
michael@0 | 832 | void *pMem = nullptr; |
michael@0 | 833 | uint32_t size = aWidth * aHeight * 3 / 2; |
michael@0 | 834 | |
michael@0 | 835 | graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem); |
michael@0 | 836 | |
michael@0 | 837 | uint8_t* srcPtr = static_cast<uint8_t*>(pMem); |
michael@0 | 838 | // Create a video frame and append it to the track. |
michael@0 | 839 | nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); |
michael@0 | 840 | layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get()); |
michael@0 | 841 | |
michael@0 | 842 | uint32_t dstWidth; |
michael@0 | 843 | uint32_t dstHeight; |
michael@0 | 844 | |
michael@0 | 845 | if (mRotation == 90 || mRotation == 270) { |
michael@0 | 846 | dstWidth = aHeight; |
michael@0 | 847 | dstHeight = aWidth; |
michael@0 | 848 | } else { |
michael@0 | 849 | dstWidth = aWidth; |
michael@0 | 850 | dstHeight = aHeight; |
michael@0 | 851 | } |
michael@0 | 852 | |
michael@0 | 853 | uint32_t half_width = dstWidth / 2; |
michael@0 | 854 | uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size); |
michael@0 | 855 | libyuv::ConvertToI420(srcPtr, size, |
michael@0 | 856 | dstPtr, dstWidth, |
michael@0 | 857 | dstPtr + (dstWidth * dstHeight), half_width, |
michael@0 | 858 | dstPtr + (dstWidth * dstHeight * 5 / 4), half_width, |
michael@0 | 859 | 0, 0, |
michael@0 | 860 | aWidth, aHeight, |
michael@0 | 861 | aWidth, aHeight, |
michael@0 | 862 | static_cast<libyuv::RotationMode>(mRotation), |
michael@0 | 863 | libyuv::FOURCC_NV21); |
michael@0 | 864 | graphicBuffer->unlock(); |
michael@0 | 865 | |
michael@0 | 866 | const uint8_t lumaBpp = 8; |
michael@0 | 867 | const uint8_t chromaBpp = 4; |
michael@0 | 868 | |
michael@0 | 869 | layers::PlanarYCbCrData data; |
michael@0 | 870 | data.mYChannel = dstPtr; |
michael@0 | 871 | data.mYSize = IntSize(dstWidth, dstHeight); |
michael@0 | 872 | data.mYStride = dstWidth * lumaBpp / 8; |
michael@0 | 873 | data.mCbCrStride = dstWidth * chromaBpp / 8; |
michael@0 | 874 | data.mCbChannel = dstPtr + dstHeight * data.mYStride; |
michael@0 | 875 | data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2); |
michael@0 | 876 | data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2); |
michael@0 | 877 | data.mPicX = 0; |
michael@0 | 878 | data.mPicY = 0; |
michael@0 | 879 | data.mPicSize = IntSize(dstWidth, dstHeight); |
michael@0 | 880 | data.mStereoMode = StereoMode::MONO; |
michael@0 | 881 | |
michael@0 | 882 | videoImage->SetDataNoCopy(data); |
michael@0 | 883 | |
michael@0 | 884 | // implicitly releases last image |
michael@0 | 885 | mImage = image.forget(); |
michael@0 | 886 | } |
michael@0 | 887 | |
michael@0 | 888 | bool |
michael@0 | 889 | MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) { |
michael@0 | 890 | { |
michael@0 | 891 | ReentrantMonitorAutoEnter sync(mCallbackMonitor); |
michael@0 | 892 | if (mState == kStopped) { |
michael@0 | 893 | return false; |
michael@0 | 894 | } |
michael@0 | 895 | } |
michael@0 | 896 | |
michael@0 | 897 | MonitorAutoLock enter(mMonitor); |
michael@0 | 898 | // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage() |
michael@0 | 899 | RotateImage(aImage, aWidth, aHeight); |
michael@0 | 900 | if (mRotation != 0 && mRotation != 180) { |
michael@0 | 901 | uint32_t temp = aWidth; |
michael@0 | 902 | aWidth = aHeight; |
michael@0 | 903 | aHeight = temp; |
michael@0 | 904 | } |
michael@0 | 905 | if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) { |
michael@0 | 906 | mWidth = aWidth; |
michael@0 | 907 | mHeight = aHeight; |
michael@0 | 908 | LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight)); |
michael@0 | 909 | } |
michael@0 | 910 | |
michael@0 | 911 | return true; // return true because we're accepting the frame |
michael@0 | 912 | } |
michael@0 | 913 | #endif |
michael@0 | 914 | |
michael@0 | 915 | } |