Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "MediaTaskQueue.h" |
michael@0 | 8 | #include "nsThreadUtils.h" |
michael@0 | 9 | #include "nsAutoPtr.h" |
michael@0 | 10 | #include "ImageContainer.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "mp4_demuxer/mp4_demuxer.h" |
michael@0 | 13 | #include "FFmpegRuntimeLinker.h" |
michael@0 | 14 | |
michael@0 | 15 | #include "FFmpegH264Decoder.h" |
michael@0 | 16 | |
michael@0 | 17 | #define GECKO_FRAME_TYPE 0x00093CC0 |
michael@0 | 18 | |
michael@0 | 19 | typedef mozilla::layers::Image Image; |
michael@0 | 20 | typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage; |
michael@0 | 21 | |
michael@0 | 22 | typedef mp4_demuxer::MP4Sample MP4Sample; |
michael@0 | 23 | |
michael@0 | 24 | namespace mozilla |
michael@0 | 25 | { |
michael@0 | 26 | |
michael@0 | 27 | FFmpegH264Decoder::FFmpegH264Decoder( |
michael@0 | 28 | MediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback, |
michael@0 | 29 | const mp4_demuxer::VideoDecoderConfig &aConfig, |
michael@0 | 30 | ImageContainer* aImageContainer) |
michael@0 | 31 | : FFmpegDataDecoder(aTaskQueue, AV_CODEC_ID_H264) |
michael@0 | 32 | , mConfig(aConfig) |
michael@0 | 33 | , mCallback(aCallback) |
michael@0 | 34 | , mImageContainer(aImageContainer) |
michael@0 | 35 | { |
michael@0 | 36 | MOZ_COUNT_CTOR(FFmpegH264Decoder); |
michael@0 | 37 | } |
michael@0 | 38 | |
michael@0 | 39 | nsresult |
michael@0 | 40 | FFmpegH264Decoder::Init() |
michael@0 | 41 | { |
michael@0 | 42 | nsresult rv = FFmpegDataDecoder::Init(); |
michael@0 | 43 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 44 | |
michael@0 | 45 | mCodecContext.get_buffer = AllocateBufferCb; |
michael@0 | 46 | |
michael@0 | 47 | return NS_OK; |
michael@0 | 48 | } |
michael@0 | 49 | |
michael@0 | 50 | void |
michael@0 | 51 | FFmpegH264Decoder::DecodeFrame(mp4_demuxer::MP4Sample* aSample) |
michael@0 | 52 | { |
michael@0 | 53 | AVPacket packet; |
michael@0 | 54 | av_init_packet(&packet); |
michael@0 | 55 | |
michael@0 | 56 | packet.data = &(*aSample->data)[0]; |
michael@0 | 57 | packet.size = aSample->data->size(); |
michael@0 | 58 | packet.dts = aSample->decode_timestamp; |
michael@0 | 59 | packet.pts = aSample->composition_timestamp; |
michael@0 | 60 | packet.flags = aSample->is_sync_point ? AV_PKT_FLAG_KEY : 0; |
michael@0 | 61 | packet.pos = aSample->byte_offset; |
michael@0 | 62 | |
michael@0 | 63 | nsAutoPtr<AVFrame> frame(avcodec_alloc_frame()); |
michael@0 | 64 | avcodec_get_frame_defaults(frame); |
michael@0 | 65 | |
michael@0 | 66 | int decoded; |
michael@0 | 67 | int bytesConsumed = |
michael@0 | 68 | avcodec_decode_video2(&mCodecContext, frame, &decoded, &packet); |
michael@0 | 69 | |
michael@0 | 70 | if (bytesConsumed < 0) { |
michael@0 | 71 | NS_WARNING("FFmpeg video decoder error."); |
michael@0 | 72 | mCallback->Error(); |
michael@0 | 73 | return; |
michael@0 | 74 | } |
michael@0 | 75 | |
michael@0 | 76 | if (!decoded) { |
michael@0 | 77 | // The decoder doesn't have enough data to decode a frame yet. |
michael@0 | 78 | return; |
michael@0 | 79 | } |
michael@0 | 80 | |
michael@0 | 81 | nsAutoPtr<VideoData> data; |
michael@0 | 82 | |
michael@0 | 83 | VideoInfo info; |
michael@0 | 84 | info.mDisplay = nsIntSize(mCodecContext.width, mCodecContext.height); |
michael@0 | 85 | info.mStereoMode = StereoMode::MONO; |
michael@0 | 86 | info.mHasVideo = true; |
michael@0 | 87 | |
michael@0 | 88 | data = VideoData::CreateFromImage( |
michael@0 | 89 | info, mImageContainer, aSample->byte_offset, aSample->composition_timestamp, |
michael@0 | 90 | aSample->duration, mCurrentImage, aSample->is_sync_point, -1, |
michael@0 | 91 | gfx::IntRect(0, 0, mCodecContext.width, mCodecContext.height)); |
michael@0 | 92 | |
michael@0 | 93 | // Insert the frame into the heap for reordering. |
michael@0 | 94 | mDelayedFrames.Push(data.forget()); |
michael@0 | 95 | |
michael@0 | 96 | // Reorder video frames from decode order to presentation order. The minimum |
michael@0 | 97 | // size of the heap comes from one P frame + |max_b_frames| B frames, which |
michael@0 | 98 | // is the maximum number of frames in a row which will be out-of-order. |
michael@0 | 99 | if (mDelayedFrames.Length() > (uint32_t)mCodecContext.max_b_frames + 1) { |
michael@0 | 100 | VideoData* d = mDelayedFrames.Pop(); |
michael@0 | 101 | mCallback->Output(d); |
michael@0 | 102 | } |
michael@0 | 103 | |
michael@0 | 104 | if (mTaskQueue->IsEmpty()) { |
michael@0 | 105 | mCallback->InputExhausted(); |
michael@0 | 106 | } |
michael@0 | 107 | } |
michael@0 | 108 | |
michael@0 | 109 | static void |
michael@0 | 110 | PlanarYCbCrDataFromAVFrame(mozilla::layers::PlanarYCbCrData &aData, |
michael@0 | 111 | AVFrame* aFrame) |
michael@0 | 112 | { |
michael@0 | 113 | aData.mPicX = aData.mPicY = 0; |
michael@0 | 114 | aData.mPicSize = mozilla::gfx::IntSize(aFrame->width, aFrame->height); |
michael@0 | 115 | aData.mStereoMode = StereoMode::MONO; |
michael@0 | 116 | |
michael@0 | 117 | aData.mYChannel = aFrame->data[0]; |
michael@0 | 118 | aData.mYStride = aFrame->linesize[0]; |
michael@0 | 119 | aData.mYSize = aData.mPicSize; |
michael@0 | 120 | aData.mYSkip = 0; |
michael@0 | 121 | |
michael@0 | 122 | aData.mCbChannel = aFrame->data[1]; |
michael@0 | 123 | aData.mCrChannel = aFrame->data[2]; |
michael@0 | 124 | aData.mCbCrStride = aFrame->linesize[1]; |
michael@0 | 125 | aData.mCbSkip = aData.mCrSkip = 0; |
michael@0 | 126 | aData.mCbCrSize = |
michael@0 | 127 | mozilla::gfx::IntSize((aFrame->width + 1) / 2, (aFrame->height + 1) / 2); |
michael@0 | 128 | } |
michael@0 | 129 | |
michael@0 | 130 | /* static */ int |
michael@0 | 131 | FFmpegH264Decoder::AllocateBufferCb(AVCodecContext* aCodecContext, |
michael@0 | 132 | AVFrame* aFrame) |
michael@0 | 133 | { |
michael@0 | 134 | MOZ_ASSERT(aCodecContext->codec_type == AVMEDIA_TYPE_VIDEO); |
michael@0 | 135 | |
michael@0 | 136 | FFmpegH264Decoder* self = |
michael@0 | 137 | reinterpret_cast<FFmpegH264Decoder*>(aCodecContext->opaque); |
michael@0 | 138 | |
michael@0 | 139 | switch (aCodecContext->pix_fmt) { |
michael@0 | 140 | case PIX_FMT_YUV420P: |
michael@0 | 141 | return self->AllocateYUV420PVideoBuffer(aCodecContext, aFrame); |
michael@0 | 142 | default: |
michael@0 | 143 | return avcodec_default_get_buffer(aCodecContext, aFrame); |
michael@0 | 144 | } |
michael@0 | 145 | } |
michael@0 | 146 | |
michael@0 | 147 | int |
michael@0 | 148 | FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext, |
michael@0 | 149 | AVFrame* aFrame) |
michael@0 | 150 | { |
michael@0 | 151 | // Older versions of ffmpeg require that edges be allocated* around* the |
michael@0 | 152 | // actual image. |
michael@0 | 153 | int edgeWidth = avcodec_get_edge_width(); |
michael@0 | 154 | int decodeWidth = aCodecContext->width + edgeWidth * 2; |
michael@0 | 155 | int decodeHeight = aCodecContext->height + edgeWidth * 2; |
michael@0 | 156 | |
michael@0 | 157 | // Align width and height to possibly speed up decode. |
michael@0 | 158 | int stride_align[AV_NUM_DATA_POINTERS]; |
michael@0 | 159 | avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight, |
michael@0 | 160 | stride_align); |
michael@0 | 161 | |
michael@0 | 162 | // Get strides for each plane. |
michael@0 | 163 | av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt, |
michael@0 | 164 | decodeWidth); |
michael@0 | 165 | |
michael@0 | 166 | // Let FFmpeg set up its YUV plane pointers and tell us how much memory we |
michael@0 | 167 | // need. |
michael@0 | 168 | // Note that we're passing |nullptr| here as the base address as we haven't |
michael@0 | 169 | // allocated our image yet. We will adjust |aFrame->data| below. |
michael@0 | 170 | size_t allocSize = |
michael@0 | 171 | av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight, |
michael@0 | 172 | nullptr /* base address */, aFrame->linesize); |
michael@0 | 173 | |
michael@0 | 174 | nsRefPtr<Image> image = |
michael@0 | 175 | mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); |
michael@0 | 176 | PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get()); |
michael@0 | 177 | uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize); |
michael@0 | 178 | |
michael@0 | 179 | if (!buffer) { |
michael@0 | 180 | NS_WARNING("Failed to allocate buffer for FFmpeg video decoding"); |
michael@0 | 181 | return -1; |
michael@0 | 182 | } |
michael@0 | 183 | |
michael@0 | 184 | // Now that we've allocated our image, we can add its address to the offsets |
michael@0 | 185 | // set by |av_image_fill_pointers| above. We also have to add |edgeWidth| |
michael@0 | 186 | // pixels of padding here. |
michael@0 | 187 | for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) { |
michael@0 | 188 | // The C planes are half the resolution of the Y plane, so we need to halve |
michael@0 | 189 | // the edge width here. |
michael@0 | 190 | uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1); |
michael@0 | 191 | |
michael@0 | 192 | // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the |
michael@0 | 193 | // top of the frame, plus |edgeWidth| pixels from the left of the frame. |
michael@0 | 194 | aFrame->data[i] += reinterpret_cast<ptrdiff_t>( |
michael@0 | 195 | buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth); |
michael@0 | 196 | } |
michael@0 | 197 | |
michael@0 | 198 | // Unused, but needs to be non-zero to keep ffmpeg happy. |
michael@0 | 199 | aFrame->type = GECKO_FRAME_TYPE; |
michael@0 | 200 | |
michael@0 | 201 | aFrame->extended_data = aFrame->data; |
michael@0 | 202 | aFrame->width = aCodecContext->width; |
michael@0 | 203 | aFrame->height = aCodecContext->height; |
michael@0 | 204 | |
michael@0 | 205 | mozilla::layers::PlanarYCbCrData data; |
michael@0 | 206 | PlanarYCbCrDataFromAVFrame(data, aFrame); |
michael@0 | 207 | ycbcr->SetDataNoCopy(data); |
michael@0 | 208 | |
michael@0 | 209 | mCurrentImage.swap(image); |
michael@0 | 210 | |
michael@0 | 211 | return 0; |
michael@0 | 212 | } |
michael@0 | 213 | |
michael@0 | 214 | nsresult |
michael@0 | 215 | FFmpegH264Decoder::Input(mp4_demuxer::MP4Sample* aSample) |
michael@0 | 216 | { |
michael@0 | 217 | mTaskQueue->Dispatch( |
michael@0 | 218 | NS_NewRunnableMethodWithArg<nsAutoPtr<mp4_demuxer::MP4Sample> >( |
michael@0 | 219 | this, &FFmpegH264Decoder::DecodeFrame, |
michael@0 | 220 | nsAutoPtr<mp4_demuxer::MP4Sample>(aSample))); |
michael@0 | 221 | |
michael@0 | 222 | return NS_OK; |
michael@0 | 223 | } |
michael@0 | 224 | |
michael@0 | 225 | void |
michael@0 | 226 | FFmpegH264Decoder::OutputDelayedFrames() |
michael@0 | 227 | { |
michael@0 | 228 | while (!mDelayedFrames.IsEmpty()) { |
michael@0 | 229 | mCallback->Output(mDelayedFrames.Pop()); |
michael@0 | 230 | } |
michael@0 | 231 | } |
michael@0 | 232 | |
michael@0 | 233 | nsresult |
michael@0 | 234 | FFmpegH264Decoder::Drain() |
michael@0 | 235 | { |
michael@0 | 236 | // The maximum number of frames that can be waiting to be decoded is |
michael@0 | 237 | // max_b_frames + 1: One P frame and max_b_frames B frames. |
michael@0 | 238 | for (int32_t i = 0; i <= mCodecContext.max_b_frames; i++) { |
michael@0 | 239 | // An empty frame tells FFmpeg to decode the next delayed frame it has in |
michael@0 | 240 | // its queue, if it has any. |
michael@0 | 241 | nsAutoPtr<MP4Sample> empty(new MP4Sample(0 /* dts */, 0 /* cts */, |
michael@0 | 242 | 0 /* duration */, 0 /* offset */, |
michael@0 | 243 | new std::vector<uint8_t>(), |
michael@0 | 244 | mp4_demuxer::kVideo, nullptr, |
michael@0 | 245 | false)); |
michael@0 | 246 | |
michael@0 | 247 | nsresult rv = Input(empty.forget()); |
michael@0 | 248 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 249 | } |
michael@0 | 250 | |
michael@0 | 251 | mTaskQueue->Dispatch( |
michael@0 | 252 | NS_NewRunnableMethod(this, &FFmpegH264Decoder::OutputDelayedFrames)); |
michael@0 | 253 | |
michael@0 | 254 | return NS_OK; |
michael@0 | 255 | } |
michael@0 | 256 | |
michael@0 | 257 | nsresult |
michael@0 | 258 | FFmpegH264Decoder::Flush() |
michael@0 | 259 | { |
michael@0 | 260 | nsresult rv = FFmpegDataDecoder::Flush(); |
michael@0 | 261 | // Even if the above fails we may as well clear our frame queue. |
michael@0 | 262 | mDelayedFrames.Clear(); |
michael@0 | 263 | return rv; |
michael@0 | 264 | } |
michael@0 | 265 | |
michael@0 | 266 | FFmpegH264Decoder::~FFmpegH264Decoder() { |
michael@0 | 267 | MOZ_COUNT_DTOR(FFmpegH264Decoder); |
michael@0 | 268 | MOZ_ASSERT(mDelayedFrames.IsEmpty()); |
michael@0 | 269 | } |
michael@0 | 270 | |
michael@0 | 271 | } // namespace mozilla |