content/media/fmp4/ffmpeg/FFmpegH264Decoder.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/fmp4/ffmpeg/FFmpegH264Decoder.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,271 @@
     1.4 +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     1.5 +/* vim:set ts=2 sw=2 sts=2 et cindent: */
     1.6 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#include "MediaTaskQueue.h"
    1.11 +#include "nsThreadUtils.h"
    1.12 +#include "nsAutoPtr.h"
    1.13 +#include "ImageContainer.h"
    1.14 +
    1.15 +#include "mp4_demuxer/mp4_demuxer.h"
    1.16 +#include "FFmpegRuntimeLinker.h"
    1.17 +
    1.18 +#include "FFmpegH264Decoder.h"
    1.19 +
    1.20 +#define GECKO_FRAME_TYPE 0x00093CC0
    1.21 +
    1.22 +typedef mozilla::layers::Image Image;
    1.23 +typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
    1.24 +
    1.25 +typedef mp4_demuxer::MP4Sample MP4Sample;
    1.26 +
    1.27 +namespace mozilla
    1.28 +{
    1.29 +
    1.30 +FFmpegH264Decoder::FFmpegH264Decoder(
    1.31 +  MediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
    1.32 +  const mp4_demuxer::VideoDecoderConfig &aConfig,
    1.33 +  ImageContainer* aImageContainer)
    1.34 +  : FFmpegDataDecoder(aTaskQueue, AV_CODEC_ID_H264)
    1.35 +  , mConfig(aConfig)
    1.36 +  , mCallback(aCallback)
    1.37 +  , mImageContainer(aImageContainer)
    1.38 +{
    1.39 +  MOZ_COUNT_CTOR(FFmpegH264Decoder);
    1.40 +}
    1.41 +
    1.42 +nsresult
    1.43 +FFmpegH264Decoder::Init()
    1.44 +{
    1.45 +  nsresult rv = FFmpegDataDecoder::Init();
    1.46 +  NS_ENSURE_SUCCESS(rv, rv);
    1.47 +
    1.48 +  mCodecContext.get_buffer = AllocateBufferCb;
    1.49 +
    1.50 +  return NS_OK;
    1.51 +}
    1.52 +
    1.53 +void
    1.54 +FFmpegH264Decoder::DecodeFrame(mp4_demuxer::MP4Sample* aSample)
    1.55 +{
    1.56 +  AVPacket packet;
    1.57 +  av_init_packet(&packet);
    1.58 +
    1.59 +  packet.data = &(*aSample->data)[0];
    1.60 +  packet.size = aSample->data->size();
    1.61 +  packet.dts = aSample->decode_timestamp;
    1.62 +  packet.pts = aSample->composition_timestamp;
    1.63 +  packet.flags = aSample->is_sync_point ? AV_PKT_FLAG_KEY : 0;
    1.64 +  packet.pos = aSample->byte_offset;
    1.65 +
    1.66 +  nsAutoPtr<AVFrame> frame(avcodec_alloc_frame());
    1.67 +  avcodec_get_frame_defaults(frame);
    1.68 +
    1.69 +  int decoded;
    1.70 +  int bytesConsumed =
    1.71 +    avcodec_decode_video2(&mCodecContext, frame, &decoded, &packet);
    1.72 +
    1.73 +  if (bytesConsumed < 0) {
    1.74 +    NS_WARNING("FFmpeg video decoder error.");
    1.75 +    mCallback->Error();
    1.76 +    return;
    1.77 +  }
    1.78 +
    1.79 +  if (!decoded) {
    1.80 +    // The decoder doesn't have enough data to decode a frame yet.
    1.81 +    return;
    1.82 +  }
    1.83 +
    1.84 +  nsAutoPtr<VideoData> data;
    1.85 +
    1.86 +  VideoInfo info;
    1.87 +  info.mDisplay = nsIntSize(mCodecContext.width, mCodecContext.height);
    1.88 +  info.mStereoMode = StereoMode::MONO;
    1.89 +  info.mHasVideo = true;
    1.90 +
    1.91 +  data = VideoData::CreateFromImage(
    1.92 +    info, mImageContainer, aSample->byte_offset, aSample->composition_timestamp,
    1.93 +    aSample->duration, mCurrentImage, aSample->is_sync_point, -1,
    1.94 +    gfx::IntRect(0, 0, mCodecContext.width, mCodecContext.height));
    1.95 +
    1.96 +  // Insert the frame into the heap for reordering.
    1.97 +  mDelayedFrames.Push(data.forget());
    1.98 +
    1.99 +  // Reorder video frames from decode order to presentation order. The minimum
   1.100 +  // size of the heap comes from one P frame + |max_b_frames| B frames, which
   1.101 +  // is the maximum number of frames in a row which will be out-of-order.
   1.102 +  if (mDelayedFrames.Length() > (uint32_t)mCodecContext.max_b_frames + 1) {
   1.103 +    VideoData* d = mDelayedFrames.Pop();
   1.104 +    mCallback->Output(d);
   1.105 +  }
   1.106 +
   1.107 +  if (mTaskQueue->IsEmpty()) {
   1.108 +    mCallback->InputExhausted();
   1.109 +  }
   1.110 +}
   1.111 +
   1.112 +static void
   1.113 +PlanarYCbCrDataFromAVFrame(mozilla::layers::PlanarYCbCrData &aData,
   1.114 +                           AVFrame* aFrame)
   1.115 +{
   1.116 +  aData.mPicX = aData.mPicY = 0;
   1.117 +  aData.mPicSize = mozilla::gfx::IntSize(aFrame->width, aFrame->height);
   1.118 +  aData.mStereoMode = StereoMode::MONO;
   1.119 +
   1.120 +  aData.mYChannel = aFrame->data[0];
   1.121 +  aData.mYStride = aFrame->linesize[0];
   1.122 +  aData.mYSize = aData.mPicSize;
   1.123 +  aData.mYSkip = 0;
   1.124 +
   1.125 +  aData.mCbChannel = aFrame->data[1];
   1.126 +  aData.mCrChannel = aFrame->data[2];
   1.127 +  aData.mCbCrStride = aFrame->linesize[1];
   1.128 +  aData.mCbSkip = aData.mCrSkip = 0;
   1.129 +  aData.mCbCrSize =
   1.130 +    mozilla::gfx::IntSize((aFrame->width + 1) / 2, (aFrame->height + 1) / 2);
   1.131 +}
   1.132 +
   1.133 +/* static */ int
   1.134 +FFmpegH264Decoder::AllocateBufferCb(AVCodecContext* aCodecContext,
   1.135 +                                    AVFrame* aFrame)
   1.136 +{
   1.137 +  MOZ_ASSERT(aCodecContext->codec_type == AVMEDIA_TYPE_VIDEO);
   1.138 +
   1.139 +  FFmpegH264Decoder* self =
   1.140 +    reinterpret_cast<FFmpegH264Decoder*>(aCodecContext->opaque);
   1.141 +
   1.142 +  switch (aCodecContext->pix_fmt) {
   1.143 +  case PIX_FMT_YUV420P:
   1.144 +    return self->AllocateYUV420PVideoBuffer(aCodecContext, aFrame);
   1.145 +  default:
   1.146 +    return avcodec_default_get_buffer(aCodecContext, aFrame);
   1.147 +  }
   1.148 +}
   1.149 +
   1.150 +int
   1.151 +FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
   1.152 +                                              AVFrame* aFrame)
   1.153 +{
   1.154 +  // Older versions of ffmpeg require that edges be allocated* around* the
   1.155 +  // actual image.
   1.156 +  int edgeWidth = avcodec_get_edge_width();
   1.157 +  int decodeWidth = aCodecContext->width + edgeWidth * 2;
   1.158 +  int decodeHeight = aCodecContext->height + edgeWidth * 2;
   1.159 +
   1.160 +  // Align width and height to possibly speed up decode.
   1.161 +  int stride_align[AV_NUM_DATA_POINTERS];
   1.162 +  avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
   1.163 +                            stride_align);
   1.164 +
   1.165 +  // Get strides for each plane.
   1.166 +  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
   1.167 +                          decodeWidth);
   1.168 +
   1.169 +  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
   1.170 +  // need.
   1.171 +  // Note that we're passing |nullptr| here as the base address as we haven't
   1.172 +  // allocated our image yet. We will adjust |aFrame->data| below.
   1.173 +  size_t allocSize =
   1.174 +    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
   1.175 +                           nullptr /* base address */, aFrame->linesize);
   1.176 +
   1.177 +  nsRefPtr<Image> image =
   1.178 +    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
   1.179 +  PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get());
   1.180 +  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize);
   1.181 +
   1.182 +  if (!buffer) {
   1.183 +    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
   1.184 +    return -1;
   1.185 +  }
   1.186 +
   1.187 +  // Now that we've allocated our image, we can add its address to the offsets
   1.188 +  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
   1.189 +  // pixels of padding here.
   1.190 +  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
   1.191 +    // The C planes are half the resolution of the Y plane, so we need to halve
   1.192 +    // the edge width here.
   1.193 +    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);
   1.194 +
   1.195 +    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
   1.196 +    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
   1.197 +    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
   1.198 +      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
   1.199 +  }
   1.200 +
   1.201 +  // Unused, but needs to be non-zero to keep ffmpeg happy.
   1.202 +  aFrame->type = GECKO_FRAME_TYPE;
   1.203 +
   1.204 +  aFrame->extended_data = aFrame->data;
   1.205 +  aFrame->width = aCodecContext->width;
   1.206 +  aFrame->height = aCodecContext->height;
   1.207 +
   1.208 +  mozilla::layers::PlanarYCbCrData data;
   1.209 +  PlanarYCbCrDataFromAVFrame(data, aFrame);
   1.210 +  ycbcr->SetDataNoCopy(data);
   1.211 +
   1.212 +  mCurrentImage.swap(image);
   1.213 +
   1.214 +  return 0;
   1.215 +}
   1.216 +
   1.217 +nsresult
   1.218 +FFmpegH264Decoder::Input(mp4_demuxer::MP4Sample* aSample)
   1.219 +{
   1.220 +  mTaskQueue->Dispatch(
   1.221 +    NS_NewRunnableMethodWithArg<nsAutoPtr<mp4_demuxer::MP4Sample> >(
   1.222 +      this, &FFmpegH264Decoder::DecodeFrame,
   1.223 +      nsAutoPtr<mp4_demuxer::MP4Sample>(aSample)));
   1.224 +
   1.225 +  return NS_OK;
   1.226 +}
   1.227 +
   1.228 +void
   1.229 +FFmpegH264Decoder::OutputDelayedFrames()
   1.230 +{
   1.231 +  while (!mDelayedFrames.IsEmpty()) {
   1.232 +    mCallback->Output(mDelayedFrames.Pop());
   1.233 +  }
   1.234 +}
   1.235 +
   1.236 +nsresult
   1.237 +FFmpegH264Decoder::Drain()
   1.238 +{
   1.239 +  // The maximum number of frames that can be waiting to be decoded is
   1.240 +  // max_b_frames + 1: One P frame and max_b_frames B frames.
   1.241 +  for (int32_t i = 0; i <= mCodecContext.max_b_frames; i++) {
   1.242 +    // An empty frame tells FFmpeg to decode the next delayed frame it has in
   1.243 +    // its queue, if it has any.
   1.244 +    nsAutoPtr<MP4Sample> empty(new MP4Sample(0 /* dts */, 0 /* cts */,
   1.245 +                                              0 /* duration */, 0 /* offset */,
   1.246 +                                              new std::vector<uint8_t>(),
   1.247 +                                              mp4_demuxer::kVideo, nullptr,
   1.248 +                                              false));
   1.249 +
   1.250 +    nsresult rv = Input(empty.forget());
   1.251 +    NS_ENSURE_SUCCESS(rv, rv);
   1.252 +  }
   1.253 +
   1.254 +  mTaskQueue->Dispatch(
   1.255 +    NS_NewRunnableMethod(this, &FFmpegH264Decoder::OutputDelayedFrames));
   1.256 +
   1.257 +  return NS_OK;
   1.258 +}
   1.259 +
   1.260 +nsresult
   1.261 +FFmpegH264Decoder::Flush()
   1.262 +{
   1.263 +  nsresult rv = FFmpegDataDecoder::Flush();
   1.264 +  // Even if the above fails we may as well clear our frame queue.
   1.265 +  mDelayedFrames.Clear();
   1.266 +  return rv;
   1.267 +}
   1.268 +
   1.269 +FFmpegH264Decoder::~FFmpegH264Decoder() {
   1.270 +  MOZ_COUNT_DTOR(FFmpegH264Decoder);
   1.271 +  MOZ_ASSERT(mDelayedFrames.IsEmpty());
   1.272 +}
   1.273 +
   1.274 +} // namespace mozilla

mercurial