Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
michael@0 | 5 | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | #include "MediaPluginReader.h" |
michael@0 | 7 | #include "mozilla/TimeStamp.h" |
michael@0 | 8 | #include "mozilla/dom/TimeRanges.h" |
michael@0 | 9 | #include "mozilla/gfx/Point.h" |
michael@0 | 10 | #include "MediaResource.h" |
michael@0 | 11 | #include "VideoUtils.h" |
michael@0 | 12 | #include "MediaPluginDecoder.h" |
michael@0 | 13 | #include "MediaPluginHost.h" |
michael@0 | 14 | #include "MediaDecoderStateMachine.h" |
michael@0 | 15 | #include "ImageContainer.h" |
michael@0 | 16 | #include "AbstractMediaDecoder.h" |
michael@0 | 17 | #include "gfx2DGlue.h" |
michael@0 | 18 | |
michael@0 | 19 | namespace mozilla { |
michael@0 | 20 | |
michael@0 | 21 | using namespace mozilla::gfx; |
michael@0 | 22 | |
michael@0 | 23 | typedef mozilla::layers::Image Image; |
michael@0 | 24 | typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage; |
michael@0 | 25 | |
michael@0 | 26 | MediaPluginReader::MediaPluginReader(AbstractMediaDecoder *aDecoder, |
michael@0 | 27 | const nsACString& aContentType) : |
michael@0 | 28 | MediaDecoderReader(aDecoder), |
michael@0 | 29 | mType(aContentType), |
michael@0 | 30 | mPlugin(nullptr), |
michael@0 | 31 | mHasAudio(false), |
michael@0 | 32 | mHasVideo(false), |
michael@0 | 33 | mVideoSeekTimeUs(-1), |
michael@0 | 34 | mAudioSeekTimeUs(-1) |
michael@0 | 35 | { |
michael@0 | 36 | } |
michael@0 | 37 | |
michael@0 | 38 | MediaPluginReader::~MediaPluginReader() |
michael@0 | 39 | { |
michael@0 | 40 | ResetDecode(); |
michael@0 | 41 | } |
michael@0 | 42 | |
michael@0 | 43 | nsresult MediaPluginReader::Init(MediaDecoderReader* aCloneDonor) |
michael@0 | 44 | { |
michael@0 | 45 | return NS_OK; |
michael@0 | 46 | } |
michael@0 | 47 | |
michael@0 | 48 | nsresult MediaPluginReader::ReadMetadata(MediaInfo* aInfo, |
michael@0 | 49 | MetadataTags** aTags) |
michael@0 | 50 | { |
michael@0 | 51 | NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 52 | |
michael@0 | 53 | if (!mPlugin) { |
michael@0 | 54 | mPlugin = GetMediaPluginHost()->CreateDecoder(mDecoder->GetResource(), mType); |
michael@0 | 55 | if (!mPlugin) { |
michael@0 | 56 | return NS_ERROR_FAILURE; |
michael@0 | 57 | } |
michael@0 | 58 | } |
michael@0 | 59 | |
michael@0 | 60 | // Set the total duration (the max of the audio and video track). |
michael@0 | 61 | int64_t durationUs; |
michael@0 | 62 | mPlugin->GetDuration(mPlugin, &durationUs); |
michael@0 | 63 | if (durationUs) { |
michael@0 | 64 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 65 | mDecoder->SetMediaDuration(durationUs); |
michael@0 | 66 | } |
michael@0 | 67 | |
michael@0 | 68 | if (mPlugin->HasVideo(mPlugin)) { |
michael@0 | 69 | int32_t width, height; |
michael@0 | 70 | mPlugin->GetVideoParameters(mPlugin, &width, &height); |
michael@0 | 71 | nsIntRect pictureRect(0, 0, width, height); |
michael@0 | 72 | |
michael@0 | 73 | // Validate the container-reported frame and pictureRect sizes. This ensures |
michael@0 | 74 | // that our video frame creation code doesn't overflow. |
michael@0 | 75 | nsIntSize displaySize(width, height); |
michael@0 | 76 | nsIntSize frameSize(width, height); |
michael@0 | 77 | if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { |
michael@0 | 78 | return NS_ERROR_FAILURE; |
michael@0 | 79 | } |
michael@0 | 80 | |
michael@0 | 81 | // Video track's frame sizes will not overflow. Activate the video track. |
michael@0 | 82 | mHasVideo = mInfo.mVideo.mHasVideo = true; |
michael@0 | 83 | mInfo.mVideo.mDisplay = displaySize; |
michael@0 | 84 | mPicture = pictureRect; |
michael@0 | 85 | mInitialFrame = frameSize; |
michael@0 | 86 | VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); |
michael@0 | 87 | if (container) { |
michael@0 | 88 | container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height), |
michael@0 | 89 | nullptr, |
michael@0 | 90 | mozilla::TimeStamp::Now()); |
michael@0 | 91 | } |
michael@0 | 92 | } |
michael@0 | 93 | |
michael@0 | 94 | if (mPlugin->HasAudio(mPlugin)) { |
michael@0 | 95 | int32_t numChannels, sampleRate; |
michael@0 | 96 | mPlugin->GetAudioParameters(mPlugin, &numChannels, &sampleRate); |
michael@0 | 97 | mHasAudio = mInfo.mAudio.mHasAudio = true; |
michael@0 | 98 | mInfo.mAudio.mChannels = numChannels; |
michael@0 | 99 | mInfo.mAudio.mRate = sampleRate; |
michael@0 | 100 | } |
michael@0 | 101 | |
michael@0 | 102 | *aInfo = mInfo; |
michael@0 | 103 | *aTags = nullptr; |
michael@0 | 104 | return NS_OK; |
michael@0 | 105 | } |
michael@0 | 106 | |
michael@0 | 107 | // Resets all state related to decoding, emptying all buffers etc. |
michael@0 | 108 | nsresult MediaPluginReader::ResetDecode() |
michael@0 | 109 | { |
michael@0 | 110 | if (mLastVideoFrame) { |
michael@0 | 111 | mLastVideoFrame = nullptr; |
michael@0 | 112 | } |
michael@0 | 113 | if (mPlugin) { |
michael@0 | 114 | GetMediaPluginHost()->DestroyDecoder(mPlugin); |
michael@0 | 115 | mPlugin = nullptr; |
michael@0 | 116 | } |
michael@0 | 117 | |
michael@0 | 118 | return NS_OK; |
michael@0 | 119 | } |
michael@0 | 120 | |
michael@0 | 121 | bool MediaPluginReader::DecodeVideoFrame(bool &aKeyframeSkip, |
michael@0 | 122 | int64_t aTimeThreshold) |
michael@0 | 123 | { |
michael@0 | 124 | // Record number of frames decoded and parsed. Automatically update the |
michael@0 | 125 | // stats counters using the AutoNotifyDecoded stack-based class. |
michael@0 | 126 | uint32_t parsed = 0, decoded = 0; |
michael@0 | 127 | AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded); |
michael@0 | 128 | |
michael@0 | 129 | // Throw away the currently buffered frame if we are seeking. |
michael@0 | 130 | if (mLastVideoFrame && mVideoSeekTimeUs != -1) { |
michael@0 | 131 | mLastVideoFrame = nullptr; |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | ImageBufferCallback bufferCallback(mDecoder->GetImageContainer()); |
michael@0 | 135 | nsRefPtr<Image> currentImage; |
michael@0 | 136 | |
michael@0 | 137 | // Read next frame |
michael@0 | 138 | while (true) { |
michael@0 | 139 | MPAPI::VideoFrame frame; |
michael@0 | 140 | if (!mPlugin->ReadVideo(mPlugin, &frame, mVideoSeekTimeUs, &bufferCallback)) { |
michael@0 | 141 | // We reached the end of the video stream. If we have a buffered |
michael@0 | 142 | // video frame, push it the video queue using the total duration |
michael@0 | 143 | // of the video as the end time. |
michael@0 | 144 | if (mLastVideoFrame) { |
michael@0 | 145 | int64_t durationUs; |
michael@0 | 146 | mPlugin->GetDuration(mPlugin, &durationUs); |
michael@0 | 147 | durationUs = std::max<int64_t>(durationUs - mLastVideoFrame->mTime, 0); |
michael@0 | 148 | mVideoQueue.Push(VideoData::ShallowCopyUpdateDuration(mLastVideoFrame, |
michael@0 | 149 | durationUs)); |
michael@0 | 150 | mLastVideoFrame = nullptr; |
michael@0 | 151 | } |
michael@0 | 152 | return false; |
michael@0 | 153 | } |
michael@0 | 154 | mVideoSeekTimeUs = -1; |
michael@0 | 155 | |
michael@0 | 156 | if (aKeyframeSkip) { |
michael@0 | 157 | // Disable keyframe skipping for now as |
michael@0 | 158 | // stagefright doesn't seem to be telling us |
michael@0 | 159 | // when a frame is a keyframe. |
michael@0 | 160 | #if 0 |
michael@0 | 161 | if (!frame.mKeyFrame) { |
michael@0 | 162 | ++parsed; |
michael@0 | 163 | continue; |
michael@0 | 164 | } |
michael@0 | 165 | #endif |
michael@0 | 166 | aKeyframeSkip = false; |
michael@0 | 167 | } |
michael@0 | 168 | |
michael@0 | 169 | if (frame.mSize == 0) |
michael@0 | 170 | return true; |
michael@0 | 171 | |
michael@0 | 172 | currentImage = bufferCallback.GetImage(); |
michael@0 | 173 | int64_t pos = mDecoder->GetResource()->Tell(); |
michael@0 | 174 | IntRect picture = ToIntRect(mPicture); |
michael@0 | 175 | |
michael@0 | 176 | nsAutoPtr<VideoData> v; |
michael@0 | 177 | if (currentImage) { |
michael@0 | 178 | gfx::IntSize frameSize = currentImage->GetSize(); |
michael@0 | 179 | if (frameSize.width != mInitialFrame.width || |
michael@0 | 180 | frameSize.height != mInitialFrame.height) { |
michael@0 | 181 | // Frame size is different from what the container reports. This is legal, |
michael@0 | 182 | // and we will preserve the ratio of the crop rectangle as it |
michael@0 | 183 | // was reported relative to the picture size reported by the container. |
michael@0 | 184 | picture.x = (mPicture.x * frameSize.width) / mInitialFrame.width; |
michael@0 | 185 | picture.y = (mPicture.y * frameSize.height) / mInitialFrame.height; |
michael@0 | 186 | picture.width = (frameSize.width * mPicture.width) / mInitialFrame.width; |
michael@0 | 187 | picture.height = (frameSize.height * mPicture.height) / mInitialFrame.height; |
michael@0 | 188 | } |
michael@0 | 189 | |
michael@0 | 190 | v = VideoData::CreateFromImage(mInfo.mVideo, |
michael@0 | 191 | mDecoder->GetImageContainer(), |
michael@0 | 192 | pos, |
michael@0 | 193 | frame.mTimeUs, |
michael@0 | 194 | 1, // We don't know the duration yet. |
michael@0 | 195 | currentImage, |
michael@0 | 196 | frame.mKeyFrame, |
michael@0 | 197 | -1, |
michael@0 | 198 | picture); |
michael@0 | 199 | } else { |
michael@0 | 200 | // Assume YUV |
michael@0 | 201 | VideoData::YCbCrBuffer b; |
michael@0 | 202 | b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData); |
michael@0 | 203 | b.mPlanes[0].mStride = frame.Y.mStride; |
michael@0 | 204 | b.mPlanes[0].mHeight = frame.Y.mHeight; |
michael@0 | 205 | b.mPlanes[0].mWidth = frame.Y.mWidth; |
michael@0 | 206 | b.mPlanes[0].mOffset = frame.Y.mOffset; |
michael@0 | 207 | b.mPlanes[0].mSkip = frame.Y.mSkip; |
michael@0 | 208 | |
michael@0 | 209 | b.mPlanes[1].mData = static_cast<uint8_t *>(frame.Cb.mData); |
michael@0 | 210 | b.mPlanes[1].mStride = frame.Cb.mStride; |
michael@0 | 211 | b.mPlanes[1].mHeight = frame.Cb.mHeight; |
michael@0 | 212 | b.mPlanes[1].mWidth = frame.Cb.mWidth; |
michael@0 | 213 | b.mPlanes[1].mOffset = frame.Cb.mOffset; |
michael@0 | 214 | b.mPlanes[1].mSkip = frame.Cb.mSkip; |
michael@0 | 215 | |
michael@0 | 216 | b.mPlanes[2].mData = static_cast<uint8_t *>(frame.Cr.mData); |
michael@0 | 217 | b.mPlanes[2].mStride = frame.Cr.mStride; |
michael@0 | 218 | b.mPlanes[2].mHeight = frame.Cr.mHeight; |
michael@0 | 219 | b.mPlanes[2].mWidth = frame.Cr.mWidth; |
michael@0 | 220 | b.mPlanes[2].mOffset = frame.Cr.mOffset; |
michael@0 | 221 | b.mPlanes[2].mSkip = frame.Cr.mSkip; |
michael@0 | 222 | |
michael@0 | 223 | if (frame.Y.mWidth != mInitialFrame.width || |
michael@0 | 224 | frame.Y.mHeight != mInitialFrame.height) { |
michael@0 | 225 | |
michael@0 | 226 | // Frame size is different from what the container reports. This is legal, |
michael@0 | 227 | // and we will preserve the ratio of the crop rectangle as it |
michael@0 | 228 | // was reported relative to the picture size reported by the container. |
michael@0 | 229 | picture.x = (mPicture.x * frame.Y.mWidth) / mInitialFrame.width; |
michael@0 | 230 | picture.y = (mPicture.y * frame.Y.mHeight) / mInitialFrame.height; |
michael@0 | 231 | picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width; |
michael@0 | 232 | picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height; |
michael@0 | 233 | } |
michael@0 | 234 | |
michael@0 | 235 | // This is the approximate byte position in the stream. |
michael@0 | 236 | v = VideoData::Create(mInfo.mVideo, |
michael@0 | 237 | mDecoder->GetImageContainer(), |
michael@0 | 238 | pos, |
michael@0 | 239 | frame.mTimeUs, |
michael@0 | 240 | 1, // We don't know the duration yet. |
michael@0 | 241 | b, |
michael@0 | 242 | frame.mKeyFrame, |
michael@0 | 243 | -1, |
michael@0 | 244 | picture); |
michael@0 | 245 | } |
michael@0 | 246 | |
michael@0 | 247 | if (!v) { |
michael@0 | 248 | return false; |
michael@0 | 249 | } |
michael@0 | 250 | parsed++; |
michael@0 | 251 | decoded++; |
michael@0 | 252 | NS_ASSERTION(decoded <= parsed, "Expect to decode fewer frames than parsed in MediaPlugin..."); |
michael@0 | 253 | |
michael@0 | 254 | // Since MPAPI doesn't give us the end time of frames, we keep one frame |
michael@0 | 255 | // buffered in MediaPluginReader and push it into the queue as soon |
michael@0 | 256 | // we read the following frame so we can use that frame's start time as |
michael@0 | 257 | // the end time of the buffered frame. |
michael@0 | 258 | if (!mLastVideoFrame) { |
michael@0 | 259 | mLastVideoFrame = v; |
michael@0 | 260 | continue; |
michael@0 | 261 | } |
michael@0 | 262 | |
michael@0 | 263 | // Calculate the duration as the timestamp of the current frame minus the |
michael@0 | 264 | // timestamp of the previous frame. We can then return the previously |
michael@0 | 265 | // decoded frame, and it will have a valid timestamp. |
michael@0 | 266 | int64_t duration = v->mTime - mLastVideoFrame->mTime; |
michael@0 | 267 | mLastVideoFrame = VideoData::ShallowCopyUpdateDuration(mLastVideoFrame, duration); |
michael@0 | 268 | |
michael@0 | 269 | // We have the start time of the next frame, so we can push the previous |
michael@0 | 270 | // frame into the queue, except if the end time is below the threshold, |
michael@0 | 271 | // in which case it wouldn't be displayed anyway. |
michael@0 | 272 | if (mLastVideoFrame->GetEndTime() < aTimeThreshold) { |
michael@0 | 273 | mLastVideoFrame = nullptr; |
michael@0 | 274 | continue; |
michael@0 | 275 | } |
michael@0 | 276 | |
michael@0 | 277 | mVideoQueue.Push(mLastVideoFrame.forget()); |
michael@0 | 278 | |
michael@0 | 279 | // Buffer the current frame we just decoded. |
michael@0 | 280 | mLastVideoFrame = v; |
michael@0 | 281 | |
michael@0 | 282 | break; |
michael@0 | 283 | } |
michael@0 | 284 | |
michael@0 | 285 | return true; |
michael@0 | 286 | } |
michael@0 | 287 | |
michael@0 | 288 | bool MediaPluginReader::DecodeAudioData() |
michael@0 | 289 | { |
michael@0 | 290 | NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 291 | |
michael@0 | 292 | // This is the approximate byte position in the stream. |
michael@0 | 293 | int64_t pos = mDecoder->GetResource()->Tell(); |
michael@0 | 294 | |
michael@0 | 295 | // Read next frame |
michael@0 | 296 | MPAPI::AudioFrame source; |
michael@0 | 297 | if (!mPlugin->ReadAudio(mPlugin, &source, mAudioSeekTimeUs)) { |
michael@0 | 298 | return false; |
michael@0 | 299 | } |
michael@0 | 300 | mAudioSeekTimeUs = -1; |
michael@0 | 301 | |
michael@0 | 302 | // Ignore empty buffers which stagefright media read will sporadically return |
michael@0 | 303 | if (source.mSize == 0) |
michael@0 | 304 | return true; |
michael@0 | 305 | |
michael@0 | 306 | uint32_t frames = source.mSize / (source.mAudioChannels * |
michael@0 | 307 | sizeof(AudioDataValue)); |
michael@0 | 308 | |
michael@0 | 309 | typedef AudioCompactor::NativeCopy MPCopy; |
michael@0 | 310 | return mAudioCompactor.Push(pos, |
michael@0 | 311 | source.mTimeUs, |
michael@0 | 312 | source.mAudioSampleRate, |
michael@0 | 313 | frames, |
michael@0 | 314 | source.mAudioChannels, |
michael@0 | 315 | MPCopy(static_cast<uint8_t *>(source.mData), |
michael@0 | 316 | source.mSize, |
michael@0 | 317 | source.mAudioChannels)); |
michael@0 | 318 | } |
michael@0 | 319 | |
michael@0 | 320 | nsresult MediaPluginReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime) |
michael@0 | 321 | { |
michael@0 | 322 | NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 323 | |
michael@0 | 324 | mVideoQueue.Reset(); |
michael@0 | 325 | mAudioQueue.Reset(); |
michael@0 | 326 | |
michael@0 | 327 | mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget; |
michael@0 | 328 | |
michael@0 | 329 | return NS_OK; |
michael@0 | 330 | } |
michael@0 | 331 | |
michael@0 | 332 | MediaPluginReader::ImageBufferCallback::ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer) : |
michael@0 | 333 | mImageContainer(aImageContainer) |
michael@0 | 334 | { |
michael@0 | 335 | } |
michael@0 | 336 | |
michael@0 | 337 | void * |
michael@0 | 338 | MediaPluginReader::ImageBufferCallback::operator()(size_t aWidth, size_t aHeight, |
michael@0 | 339 | MPAPI::ColorFormat aColorFormat) |
michael@0 | 340 | { |
michael@0 | 341 | if (!mImageContainer) { |
michael@0 | 342 | NS_WARNING("No image container to construct an image"); |
michael@0 | 343 | return nullptr; |
michael@0 | 344 | } |
michael@0 | 345 | |
michael@0 | 346 | nsRefPtr<Image> image; |
michael@0 | 347 | switch(aColorFormat) { |
michael@0 | 348 | case MPAPI::RGB565: |
michael@0 | 349 | image = mozilla::layers::CreateSharedRGBImage(mImageContainer, |
michael@0 | 350 | nsIntSize(aWidth, aHeight), |
michael@0 | 351 | gfxImageFormat::RGB16_565); |
michael@0 | 352 | if (!image) { |
michael@0 | 353 | NS_WARNING("Could not create rgb image"); |
michael@0 | 354 | return nullptr; |
michael@0 | 355 | } |
michael@0 | 356 | |
michael@0 | 357 | mImage = image; |
michael@0 | 358 | return image->AsSharedImage()->GetBuffer(); |
michael@0 | 359 | case MPAPI::I420: |
michael@0 | 360 | return CreateI420Image(aWidth, aHeight); |
michael@0 | 361 | default: |
michael@0 | 362 | NS_NOTREACHED("Color format not supported"); |
michael@0 | 363 | return nullptr; |
michael@0 | 364 | } |
michael@0 | 365 | } |
michael@0 | 366 | |
michael@0 | 367 | uint8_t * |
michael@0 | 368 | MediaPluginReader::ImageBufferCallback::CreateI420Image(size_t aWidth, |
michael@0 | 369 | size_t aHeight) |
michael@0 | 370 | { |
michael@0 | 371 | mImage = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); |
michael@0 | 372 | PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage *>(mImage.get()); |
michael@0 | 373 | |
michael@0 | 374 | if (!yuvImage) { |
michael@0 | 375 | NS_WARNING("Could not create I420 image"); |
michael@0 | 376 | return nullptr; |
michael@0 | 377 | } |
michael@0 | 378 | |
michael@0 | 379 | size_t frameSize = aWidth * aHeight; |
michael@0 | 380 | |
michael@0 | 381 | // Allocate enough for one full resolution Y plane |
michael@0 | 382 | // and two quarter resolution Cb/Cr planes. |
michael@0 | 383 | uint8_t *buffer = yuvImage->AllocateAndGetNewBuffer(frameSize * 3 / 2); |
michael@0 | 384 | |
michael@0 | 385 | mozilla::layers::PlanarYCbCrData frameDesc; |
michael@0 | 386 | |
michael@0 | 387 | frameDesc.mYChannel = buffer; |
michael@0 | 388 | frameDesc.mCbChannel = buffer + frameSize; |
michael@0 | 389 | frameDesc.mCrChannel = buffer + frameSize * 5 / 4; |
michael@0 | 390 | |
michael@0 | 391 | frameDesc.mYSize = IntSize(aWidth, aHeight); |
michael@0 | 392 | frameDesc.mCbCrSize = IntSize(aWidth / 2, aHeight / 2); |
michael@0 | 393 | |
michael@0 | 394 | frameDesc.mYStride = aWidth; |
michael@0 | 395 | frameDesc.mCbCrStride = aWidth / 2; |
michael@0 | 396 | |
michael@0 | 397 | frameDesc.mYSkip = 0; |
michael@0 | 398 | frameDesc.mCbSkip = 0; |
michael@0 | 399 | frameDesc.mCrSkip = 0; |
michael@0 | 400 | |
michael@0 | 401 | frameDesc.mPicX = 0; |
michael@0 | 402 | frameDesc.mPicY = 0; |
michael@0 | 403 | frameDesc.mPicSize = IntSize(aWidth, aHeight); |
michael@0 | 404 | |
michael@0 | 405 | yuvImage->SetDataNoCopy(frameDesc); |
michael@0 | 406 | |
michael@0 | 407 | return buffer; |
michael@0 | 408 | } |
michael@0 | 409 | |
michael@0 | 410 | already_AddRefed<Image> |
michael@0 | 411 | MediaPluginReader::ImageBufferCallback::GetImage() |
michael@0 | 412 | { |
michael@0 | 413 | return mImage.forget(); |
michael@0 | 414 | } |
michael@0 | 415 | |
michael@0 | 416 | } // namespace mozilla |