michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "MediaTaskQueue.h" michael@0: #include "nsThreadUtils.h" michael@0: #include "nsAutoPtr.h" michael@0: #include "ImageContainer.h" michael@0: michael@0: #include "mp4_demuxer/mp4_demuxer.h" michael@0: #include "FFmpegRuntimeLinker.h" michael@0: michael@0: #include "FFmpegH264Decoder.h" michael@0: michael@0: #define GECKO_FRAME_TYPE 0x00093CC0 michael@0: michael@0: typedef mozilla::layers::Image Image; michael@0: typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage; michael@0: michael@0: typedef mp4_demuxer::MP4Sample MP4Sample; michael@0: michael@0: namespace mozilla michael@0: { michael@0: michael@0: FFmpegH264Decoder::FFmpegH264Decoder( michael@0: MediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback, michael@0: const mp4_demuxer::VideoDecoderConfig &aConfig, michael@0: ImageContainer* aImageContainer) michael@0: : FFmpegDataDecoder(aTaskQueue, AV_CODEC_ID_H264) michael@0: , mConfig(aConfig) michael@0: , mCallback(aCallback) michael@0: , mImageContainer(aImageContainer) michael@0: { michael@0: MOZ_COUNT_CTOR(FFmpegH264Decoder); michael@0: } michael@0: michael@0: nsresult michael@0: FFmpegH264Decoder::Init() michael@0: { michael@0: nsresult rv = FFmpegDataDecoder::Init(); michael@0: NS_ENSURE_SUCCESS(rv, rv); michael@0: michael@0: mCodecContext.get_buffer = AllocateBufferCb; michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: void michael@0: FFmpegH264Decoder::DecodeFrame(mp4_demuxer::MP4Sample* aSample) michael@0: { michael@0: AVPacket packet; michael@0: av_init_packet(&packet); michael@0: michael@0: packet.data = &(*aSample->data)[0]; michael@0: packet.size = aSample->data->size(); michael@0: packet.dts = aSample->decode_timestamp; michael@0: packet.pts = aSample->composition_timestamp; michael@0: packet.flags = aSample->is_sync_point ? AV_PKT_FLAG_KEY : 0; michael@0: packet.pos = aSample->byte_offset; michael@0: michael@0: nsAutoPtr frame(avcodec_alloc_frame()); michael@0: avcodec_get_frame_defaults(frame); michael@0: michael@0: int decoded; michael@0: int bytesConsumed = michael@0: avcodec_decode_video2(&mCodecContext, frame, &decoded, &packet); michael@0: michael@0: if (bytesConsumed < 0) { michael@0: NS_WARNING("FFmpeg video decoder error."); michael@0: mCallback->Error(); michael@0: return; michael@0: } michael@0: michael@0: if (!decoded) { michael@0: // The decoder doesn't have enough data to decode a frame yet. michael@0: return; michael@0: } michael@0: michael@0: nsAutoPtr data; michael@0: michael@0: VideoInfo info; michael@0: info.mDisplay = nsIntSize(mCodecContext.width, mCodecContext.height); michael@0: info.mStereoMode = StereoMode::MONO; michael@0: info.mHasVideo = true; michael@0: michael@0: data = VideoData::CreateFromImage( michael@0: info, mImageContainer, aSample->byte_offset, aSample->composition_timestamp, michael@0: aSample->duration, mCurrentImage, aSample->is_sync_point, -1, michael@0: gfx::IntRect(0, 0, mCodecContext.width, mCodecContext.height)); michael@0: michael@0: // Insert the frame into the heap for reordering. michael@0: mDelayedFrames.Push(data.forget()); michael@0: michael@0: // Reorder video frames from decode order to presentation order. The minimum michael@0: // size of the heap comes from one P frame + |max_b_frames| B frames, which michael@0: // is the maximum number of frames in a row which will be out-of-order. michael@0: if (mDelayedFrames.Length() > (uint32_t)mCodecContext.max_b_frames + 1) { michael@0: VideoData* d = mDelayedFrames.Pop(); michael@0: mCallback->Output(d); michael@0: } michael@0: michael@0: if (mTaskQueue->IsEmpty()) { michael@0: mCallback->InputExhausted(); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: PlanarYCbCrDataFromAVFrame(mozilla::layers::PlanarYCbCrData &aData, michael@0: AVFrame* aFrame) michael@0: { michael@0: aData.mPicX = aData.mPicY = 0; michael@0: aData.mPicSize = mozilla::gfx::IntSize(aFrame->width, aFrame->height); michael@0: aData.mStereoMode = StereoMode::MONO; michael@0: michael@0: aData.mYChannel = aFrame->data[0]; michael@0: aData.mYStride = aFrame->linesize[0]; michael@0: aData.mYSize = aData.mPicSize; michael@0: aData.mYSkip = 0; michael@0: michael@0: aData.mCbChannel = aFrame->data[1]; michael@0: aData.mCrChannel = aFrame->data[2]; michael@0: aData.mCbCrStride = aFrame->linesize[1]; michael@0: aData.mCbSkip = aData.mCrSkip = 0; michael@0: aData.mCbCrSize = michael@0: mozilla::gfx::IntSize((aFrame->width + 1) / 2, (aFrame->height + 1) / 2); michael@0: } michael@0: michael@0: /* static */ int michael@0: FFmpegH264Decoder::AllocateBufferCb(AVCodecContext* aCodecContext, michael@0: AVFrame* aFrame) michael@0: { michael@0: MOZ_ASSERT(aCodecContext->codec_type == AVMEDIA_TYPE_VIDEO); michael@0: michael@0: FFmpegH264Decoder* self = michael@0: reinterpret_cast(aCodecContext->opaque); michael@0: michael@0: switch (aCodecContext->pix_fmt) { michael@0: case PIX_FMT_YUV420P: michael@0: return self->AllocateYUV420PVideoBuffer(aCodecContext, aFrame); michael@0: default: michael@0: return avcodec_default_get_buffer(aCodecContext, aFrame); michael@0: } michael@0: } michael@0: michael@0: int michael@0: FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext, michael@0: AVFrame* aFrame) michael@0: { michael@0: // Older versions of ffmpeg require that edges be allocated* around* the michael@0: // actual image. michael@0: int edgeWidth = avcodec_get_edge_width(); michael@0: int decodeWidth = aCodecContext->width + edgeWidth * 2; michael@0: int decodeHeight = aCodecContext->height + edgeWidth * 2; michael@0: michael@0: // Align width and height to possibly speed up decode. michael@0: int stride_align[AV_NUM_DATA_POINTERS]; michael@0: avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight, michael@0: stride_align); michael@0: michael@0: // Get strides for each plane. michael@0: av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt, michael@0: decodeWidth); michael@0: michael@0: // Let FFmpeg set up its YUV plane pointers and tell us how much memory we michael@0: // need. michael@0: // Note that we're passing |nullptr| here as the base address as we haven't michael@0: // allocated our image yet. We will adjust |aFrame->data| below. michael@0: size_t allocSize = michael@0: av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight, michael@0: nullptr /* base address */, aFrame->linesize); michael@0: michael@0: nsRefPtr image = michael@0: mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR); michael@0: PlanarYCbCrImage* ycbcr = reinterpret_cast(image.get()); michael@0: uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize); michael@0: michael@0: if (!buffer) { michael@0: NS_WARNING("Failed to allocate buffer for FFmpeg video decoding"); michael@0: return -1; michael@0: } michael@0: michael@0: // Now that we've allocated our image, we can add its address to the offsets michael@0: // set by |av_image_fill_pointers| above. We also have to add |edgeWidth| michael@0: // pixels of padding here. michael@0: for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) { michael@0: // The C planes are half the resolution of the Y plane, so we need to halve michael@0: // the edge width here. michael@0: uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1); michael@0: michael@0: // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the michael@0: // top of the frame, plus |edgeWidth| pixels from the left of the frame. michael@0: aFrame->data[i] += reinterpret_cast( michael@0: buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth); michael@0: } michael@0: michael@0: // Unused, but needs to be non-zero to keep ffmpeg happy. michael@0: aFrame->type = GECKO_FRAME_TYPE; michael@0: michael@0: aFrame->extended_data = aFrame->data; michael@0: aFrame->width = aCodecContext->width; michael@0: aFrame->height = aCodecContext->height; michael@0: michael@0: mozilla::layers::PlanarYCbCrData data; michael@0: PlanarYCbCrDataFromAVFrame(data, aFrame); michael@0: ycbcr->SetDataNoCopy(data); michael@0: michael@0: mCurrentImage.swap(image); michael@0: michael@0: return 0; michael@0: } michael@0: michael@0: nsresult michael@0: FFmpegH264Decoder::Input(mp4_demuxer::MP4Sample* aSample) michael@0: { michael@0: mTaskQueue->Dispatch( michael@0: NS_NewRunnableMethodWithArg >( michael@0: this, &FFmpegH264Decoder::DecodeFrame, michael@0: nsAutoPtr(aSample))); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: void michael@0: FFmpegH264Decoder::OutputDelayedFrames() michael@0: { michael@0: while (!mDelayedFrames.IsEmpty()) { michael@0: mCallback->Output(mDelayedFrames.Pop()); michael@0: } michael@0: } michael@0: michael@0: nsresult michael@0: FFmpegH264Decoder::Drain() michael@0: { michael@0: // The maximum number of frames that can be waiting to be decoded is michael@0: // max_b_frames + 1: One P frame and max_b_frames B frames. michael@0: for (int32_t i = 0; i <= mCodecContext.max_b_frames; i++) { michael@0: // An empty frame tells FFmpeg to decode the next delayed frame it has in michael@0: // its queue, if it has any. michael@0: nsAutoPtr empty(new MP4Sample(0 /* dts */, 0 /* cts */, michael@0: 0 /* duration */, 0 /* offset */, michael@0: new std::vector(), michael@0: mp4_demuxer::kVideo, nullptr, michael@0: false)); michael@0: michael@0: nsresult rv = Input(empty.forget()); michael@0: NS_ENSURE_SUCCESS(rv, rv); michael@0: } michael@0: michael@0: mTaskQueue->Dispatch( michael@0: NS_NewRunnableMethod(this, &FFmpegH264Decoder::OutputDelayedFrames)); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: FFmpegH264Decoder::Flush() michael@0: { michael@0: nsresult rv = FFmpegDataDecoder::Flush(); michael@0: // Even if the above fails we may as well clear our frame queue. michael@0: mDelayedFrames.Clear(); michael@0: return rv; michael@0: } michael@0: michael@0: FFmpegH264Decoder::~FFmpegH264Decoder() { michael@0: MOZ_COUNT_DTOR(FFmpegH264Decoder); michael@0: MOZ_ASSERT(mDelayedFrames.IsEmpty()); michael@0: } michael@0: michael@0: } // namespace mozilla