Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "MediaTaskQueue.h"
8 #include "nsThreadUtils.h"
9 #include "nsAutoPtr.h"
10 #include "ImageContainer.h"
12 #include "mp4_demuxer/mp4_demuxer.h"
13 #include "FFmpegRuntimeLinker.h"
15 #include "FFmpegH264Decoder.h"
17 #define GECKO_FRAME_TYPE 0x00093CC0
19 typedef mozilla::layers::Image Image;
20 typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
22 typedef mp4_demuxer::MP4Sample MP4Sample;
24 namespace mozilla
25 {
27 FFmpegH264Decoder::FFmpegH264Decoder(
28 MediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
29 const mp4_demuxer::VideoDecoderConfig &aConfig,
30 ImageContainer* aImageContainer)
31 : FFmpegDataDecoder(aTaskQueue, AV_CODEC_ID_H264)
32 , mConfig(aConfig)
33 , mCallback(aCallback)
34 , mImageContainer(aImageContainer)
35 {
36 MOZ_COUNT_CTOR(FFmpegH264Decoder);
37 }
39 nsresult
40 FFmpegH264Decoder::Init()
41 {
42 nsresult rv = FFmpegDataDecoder::Init();
43 NS_ENSURE_SUCCESS(rv, rv);
45 mCodecContext.get_buffer = AllocateBufferCb;
47 return NS_OK;
48 }
50 void
51 FFmpegH264Decoder::DecodeFrame(mp4_demuxer::MP4Sample* aSample)
52 {
53 AVPacket packet;
54 av_init_packet(&packet);
56 packet.data = &(*aSample->data)[0];
57 packet.size = aSample->data->size();
58 packet.dts = aSample->decode_timestamp;
59 packet.pts = aSample->composition_timestamp;
60 packet.flags = aSample->is_sync_point ? AV_PKT_FLAG_KEY : 0;
61 packet.pos = aSample->byte_offset;
63 nsAutoPtr<AVFrame> frame(avcodec_alloc_frame());
64 avcodec_get_frame_defaults(frame);
66 int decoded;
67 int bytesConsumed =
68 avcodec_decode_video2(&mCodecContext, frame, &decoded, &packet);
70 if (bytesConsumed < 0) {
71 NS_WARNING("FFmpeg video decoder error.");
72 mCallback->Error();
73 return;
74 }
76 if (!decoded) {
77 // The decoder doesn't have enough data to decode a frame yet.
78 return;
79 }
81 nsAutoPtr<VideoData> data;
83 VideoInfo info;
84 info.mDisplay = nsIntSize(mCodecContext.width, mCodecContext.height);
85 info.mStereoMode = StereoMode::MONO;
86 info.mHasVideo = true;
88 data = VideoData::CreateFromImage(
89 info, mImageContainer, aSample->byte_offset, aSample->composition_timestamp,
90 aSample->duration, mCurrentImage, aSample->is_sync_point, -1,
91 gfx::IntRect(0, 0, mCodecContext.width, mCodecContext.height));
93 // Insert the frame into the heap for reordering.
94 mDelayedFrames.Push(data.forget());
96 // Reorder video frames from decode order to presentation order. The minimum
97 // size of the heap comes from one P frame + |max_b_frames| B frames, which
98 // is the maximum number of frames in a row which will be out-of-order.
99 if (mDelayedFrames.Length() > (uint32_t)mCodecContext.max_b_frames + 1) {
100 VideoData* d = mDelayedFrames.Pop();
101 mCallback->Output(d);
102 }
104 if (mTaskQueue->IsEmpty()) {
105 mCallback->InputExhausted();
106 }
107 }
109 static void
110 PlanarYCbCrDataFromAVFrame(mozilla::layers::PlanarYCbCrData &aData,
111 AVFrame* aFrame)
112 {
113 aData.mPicX = aData.mPicY = 0;
114 aData.mPicSize = mozilla::gfx::IntSize(aFrame->width, aFrame->height);
115 aData.mStereoMode = StereoMode::MONO;
117 aData.mYChannel = aFrame->data[0];
118 aData.mYStride = aFrame->linesize[0];
119 aData.mYSize = aData.mPicSize;
120 aData.mYSkip = 0;
122 aData.mCbChannel = aFrame->data[1];
123 aData.mCrChannel = aFrame->data[2];
124 aData.mCbCrStride = aFrame->linesize[1];
125 aData.mCbSkip = aData.mCrSkip = 0;
126 aData.mCbCrSize =
127 mozilla::gfx::IntSize((aFrame->width + 1) / 2, (aFrame->height + 1) / 2);
128 }
130 /* static */ int
131 FFmpegH264Decoder::AllocateBufferCb(AVCodecContext* aCodecContext,
132 AVFrame* aFrame)
133 {
134 MOZ_ASSERT(aCodecContext->codec_type == AVMEDIA_TYPE_VIDEO);
136 FFmpegH264Decoder* self =
137 reinterpret_cast<FFmpegH264Decoder*>(aCodecContext->opaque);
139 switch (aCodecContext->pix_fmt) {
140 case PIX_FMT_YUV420P:
141 return self->AllocateYUV420PVideoBuffer(aCodecContext, aFrame);
142 default:
143 return avcodec_default_get_buffer(aCodecContext, aFrame);
144 }
145 }
147 int
148 FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
149 AVFrame* aFrame)
150 {
151 // Older versions of ffmpeg require that edges be allocated* around* the
152 // actual image.
153 int edgeWidth = avcodec_get_edge_width();
154 int decodeWidth = aCodecContext->width + edgeWidth * 2;
155 int decodeHeight = aCodecContext->height + edgeWidth * 2;
157 // Align width and height to possibly speed up decode.
158 int stride_align[AV_NUM_DATA_POINTERS];
159 avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
160 stride_align);
162 // Get strides for each plane.
163 av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
164 decodeWidth);
166 // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
167 // need.
168 // Note that we're passing |nullptr| here as the base address as we haven't
169 // allocated our image yet. We will adjust |aFrame->data| below.
170 size_t allocSize =
171 av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
172 nullptr /* base address */, aFrame->linesize);
174 nsRefPtr<Image> image =
175 mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
176 PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get());
177 uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize);
179 if (!buffer) {
180 NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
181 return -1;
182 }
184 // Now that we've allocated our image, we can add its address to the offsets
185 // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
186 // pixels of padding here.
187 for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
188 // The C planes are half the resolution of the Y plane, so we need to halve
189 // the edge width here.
190 uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);
192 // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
193 // top of the frame, plus |edgeWidth| pixels from the left of the frame.
194 aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
195 buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
196 }
198 // Unused, but needs to be non-zero to keep ffmpeg happy.
199 aFrame->type = GECKO_FRAME_TYPE;
201 aFrame->extended_data = aFrame->data;
202 aFrame->width = aCodecContext->width;
203 aFrame->height = aCodecContext->height;
205 mozilla::layers::PlanarYCbCrData data;
206 PlanarYCbCrDataFromAVFrame(data, aFrame);
207 ycbcr->SetDataNoCopy(data);
209 mCurrentImage.swap(image);
211 return 0;
212 }
214 nsresult
215 FFmpegH264Decoder::Input(mp4_demuxer::MP4Sample* aSample)
216 {
217 mTaskQueue->Dispatch(
218 NS_NewRunnableMethodWithArg<nsAutoPtr<mp4_demuxer::MP4Sample> >(
219 this, &FFmpegH264Decoder::DecodeFrame,
220 nsAutoPtr<mp4_demuxer::MP4Sample>(aSample)));
222 return NS_OK;
223 }
225 void
226 FFmpegH264Decoder::OutputDelayedFrames()
227 {
228 while (!mDelayedFrames.IsEmpty()) {
229 mCallback->Output(mDelayedFrames.Pop());
230 }
231 }
233 nsresult
234 FFmpegH264Decoder::Drain()
235 {
236 // The maximum number of frames that can be waiting to be decoded is
237 // max_b_frames + 1: One P frame and max_b_frames B frames.
238 for (int32_t i = 0; i <= mCodecContext.max_b_frames; i++) {
239 // An empty frame tells FFmpeg to decode the next delayed frame it has in
240 // its queue, if it has any.
241 nsAutoPtr<MP4Sample> empty(new MP4Sample(0 /* dts */, 0 /* cts */,
242 0 /* duration */, 0 /* offset */,
243 new std::vector<uint8_t>(),
244 mp4_demuxer::kVideo, nullptr,
245 false));
247 nsresult rv = Input(empty.forget());
248 NS_ENSURE_SUCCESS(rv, rv);
249 }
251 mTaskQueue->Dispatch(
252 NS_NewRunnableMethod(this, &FFmpegH264Decoder::OutputDelayedFrames));
254 return NS_OK;
255 }
257 nsresult
258 FFmpegH264Decoder::Flush()
259 {
260 nsresult rv = FFmpegDataDecoder::Flush();
261 // Even if the above fails we may as well clear our frame queue.
262 mDelayedFrames.Clear();
263 return rv;
264 }
266 FFmpegH264Decoder::~FFmpegH264Decoder() {
267 MOZ_COUNT_DTOR(FFmpegH264Decoder);
268 MOZ_ASSERT(mDelayedFrames.IsEmpty());
269 }
271 } // namespace mozilla