michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this file, michael@0: * You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "nsError.h" michael@0: #include "nsMimeTypes.h" michael@0: #include "MediaDecoderStateMachine.h" michael@0: #include "AbstractMediaDecoder.h" michael@0: #include "MediaResource.h" michael@0: #include "GStreamerReader.h" michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: #include "GStreamerAllocator.h" michael@0: #endif michael@0: #include "GStreamerFormatHelper.h" michael@0: #include "VideoUtils.h" michael@0: #include "mozilla/dom/TimeRanges.h" michael@0: #include "mozilla/Endian.h" michael@0: #include "mozilla/Preferences.h" michael@0: #include "mozilla/unused.h" michael@0: #include "GStreamerLoader.h" michael@0: #include "gfx2DGlue.h" michael@0: michael@0: namespace mozilla { michael@0: michael@0: using namespace gfx; michael@0: using namespace layers; michael@0: michael@0: // Un-comment to enable logging of seek bisections. michael@0: //#define SEEK_LOGGING michael@0: michael@0: #ifdef PR_LOGGING michael@0: extern PRLogModuleInfo* gMediaDecoderLog; michael@0: #define LOG(type, msg, ...) \ michael@0: PR_LOG(gMediaDecoderLog, type, ("GStreamerReader(%p) " msg, this, ##__VA_ARGS__)) michael@0: #else michael@0: #define LOG(type, msg, ...) michael@0: #endif michael@0: michael@0: #if DEBUG michael@0: static const unsigned int MAX_CHANNELS = 4; michael@0: #endif michael@0: // Let the demuxer work in pull mode for short files. This used to be a micro michael@0: // optimization to have more accurate durations for ogg files in mochitests. michael@0: // Since as of today we aren't using gstreamer to demux ogg, and having demuxers michael@0: // work in pull mode over http makes them slower (since they really assume michael@0: // near-zero latency in pull mode) set the constant to 0 for now, which michael@0: // effectively disables it. michael@0: static const int SHORT_FILE_SIZE = 0; michael@0: // The default resource->Read() size when working in push mode michael@0: static const int DEFAULT_SOURCE_READ_SIZE = 50 * 1024; michael@0: michael@0: typedef enum { michael@0: GST_PLAY_FLAG_VIDEO = (1 << 0), michael@0: GST_PLAY_FLAG_AUDIO = (1 << 1), michael@0: GST_PLAY_FLAG_TEXT = (1 << 2), michael@0: GST_PLAY_FLAG_VIS = (1 << 3), michael@0: GST_PLAY_FLAG_SOFT_VOLUME = (1 << 4), michael@0: GST_PLAY_FLAG_NATIVE_AUDIO = (1 << 5), michael@0: GST_PLAY_FLAG_NATIVE_VIDEO = (1 << 6), michael@0: GST_PLAY_FLAG_DOWNLOAD = (1 << 7), michael@0: GST_PLAY_FLAG_BUFFERING = (1 << 8), michael@0: GST_PLAY_FLAG_DEINTERLACE = (1 << 9), michael@0: GST_PLAY_FLAG_SOFT_COLORBALANCE = (1 << 10) michael@0: } PlayFlags; michael@0: michael@0: GStreamerReader::GStreamerReader(AbstractMediaDecoder* aDecoder) michael@0: : MediaDecoderReader(aDecoder), michael@0: mMP3FrameParser(aDecoder->GetResource()->GetLength()), michael@0: mDataOffset(0), michael@0: mUseParserDuration(false), michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: mAllocator(nullptr), michael@0: mBufferPool(nullptr), michael@0: #endif michael@0: mPlayBin(nullptr), michael@0: mBus(nullptr), michael@0: mSource(nullptr), michael@0: mVideoSink(nullptr), michael@0: mVideoAppSink(nullptr), michael@0: mAudioSink(nullptr), michael@0: mAudioAppSink(nullptr), michael@0: mFormat(GST_VIDEO_FORMAT_UNKNOWN), michael@0: mVideoSinkBufferCount(0), michael@0: mAudioSinkBufferCount(0), michael@0: mGstThreadsMonitor("media.gst.threads"), michael@0: mReachedAudioEos(false), michael@0: mReachedVideoEos(false), michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: mConfigureAlignment(true), michael@0: #endif michael@0: fpsNum(0), michael@0: fpsDen(0) michael@0: { michael@0: MOZ_COUNT_CTOR(GStreamerReader); michael@0: michael@0: mSrcCallbacks.need_data = GStreamerReader::NeedDataCb; michael@0: mSrcCallbacks.enough_data = GStreamerReader::EnoughDataCb; michael@0: mSrcCallbacks.seek_data = GStreamerReader::SeekDataCb; michael@0: michael@0: mSinkCallbacks.eos = GStreamerReader::EosCb; michael@0: mSinkCallbacks.new_preroll = GStreamerReader::NewPrerollCb; michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: mSinkCallbacks.new_sample = GStreamerReader::NewBufferCb; michael@0: #else michael@0: mSinkCallbacks.new_buffer = GStreamerReader::NewBufferCb; michael@0: mSinkCallbacks.new_buffer_list = nullptr; michael@0: #endif michael@0: michael@0: gst_segment_init(&mVideoSegment, GST_FORMAT_UNDEFINED); michael@0: gst_segment_init(&mAudioSegment, GST_FORMAT_UNDEFINED); michael@0: } michael@0: michael@0: GStreamerReader::~GStreamerReader() michael@0: { michael@0: MOZ_COUNT_DTOR(GStreamerReader); michael@0: ResetDecode(); michael@0: michael@0: if (mPlayBin) { michael@0: gst_app_src_end_of_stream(mSource); michael@0: if (mSource) michael@0: gst_object_unref(mSource); michael@0: gst_element_set_state(mPlayBin, GST_STATE_NULL); michael@0: gst_object_unref(mPlayBin); michael@0: mPlayBin = nullptr; michael@0: mVideoSink = nullptr; michael@0: mVideoAppSink = nullptr; michael@0: mAudioSink = nullptr; michael@0: mAudioAppSink = nullptr; michael@0: gst_object_unref(mBus); michael@0: mBus = nullptr; michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: g_object_unref(mAllocator); michael@0: g_object_unref(mBufferPool); michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: nsresult GStreamerReader::Init(MediaDecoderReader* aCloneDonor) michael@0: { michael@0: GStreamerFormatHelper::Instance(); michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: mAllocator = static_cast(g_object_new(GST_TYPE_MOZ_GFX_MEMORY_ALLOCATOR, nullptr)); michael@0: moz_gfx_memory_allocator_set_reader(mAllocator, this); michael@0: michael@0: mBufferPool = static_cast(g_object_new(GST_TYPE_MOZ_GFX_BUFFER_POOL, nullptr)); michael@0: #endif michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: mPlayBin = gst_element_factory_make("playbin", nullptr); michael@0: #else michael@0: mPlayBin = gst_element_factory_make("playbin2", nullptr); michael@0: #endif michael@0: if (!mPlayBin) { michael@0: LOG(PR_LOG_ERROR, "couldn't create playbin"); michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: g_object_set(mPlayBin, "buffer-size", 0, nullptr); michael@0: mBus = gst_pipeline_get_bus(GST_PIPELINE(mPlayBin)); michael@0: michael@0: mVideoSink = gst_parse_bin_from_description("capsfilter name=filter ! " michael@0: "appsink name=videosink sync=false max-buffers=1 " michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: "caps=video/x-raw,format=I420" michael@0: #else michael@0: "caps=video/x-raw-yuv,format=(fourcc)I420" michael@0: #endif michael@0: , TRUE, nullptr); michael@0: mVideoAppSink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(mVideoSink), michael@0: "videosink")); michael@0: mAudioSink = gst_parse_bin_from_description("capsfilter name=filter ! " michael@0: "appsink name=audiosink sync=false max-buffers=1", TRUE, nullptr); michael@0: mAudioAppSink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(mAudioSink), michael@0: "audiosink")); michael@0: GstCaps* caps = BuildAudioSinkCaps(); michael@0: g_object_set(mAudioAppSink, "caps", caps, nullptr); michael@0: gst_caps_unref(caps); michael@0: michael@0: gst_app_sink_set_callbacks(mVideoAppSink, &mSinkCallbacks, michael@0: (gpointer) this, nullptr); michael@0: gst_app_sink_set_callbacks(mAudioAppSink, &mSinkCallbacks, michael@0: (gpointer) this, nullptr); michael@0: InstallPadCallbacks(); michael@0: michael@0: g_object_set(mPlayBin, "uri", "appsrc://", michael@0: "video-sink", mVideoSink, michael@0: "audio-sink", mAudioSink, michael@0: nullptr); michael@0: michael@0: g_signal_connect(G_OBJECT(mPlayBin), "notify::source", michael@0: G_CALLBACK(GStreamerReader::PlayBinSourceSetupCb), this); michael@0: g_signal_connect(G_OBJECT(mPlayBin), "element-added", michael@0: G_CALLBACK(GStreamerReader::PlayElementAddedCb), this); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: GstBusSyncReply michael@0: GStreamerReader::ErrorCb(GstBus *aBus, GstMessage *aMessage, gpointer aUserData) michael@0: { michael@0: return static_cast(aUserData)->Error(aBus, aMessage); michael@0: } michael@0: michael@0: GstBusSyncReply michael@0: GStreamerReader::Error(GstBus *aBus, GstMessage *aMessage) michael@0: { michael@0: if (GST_MESSAGE_TYPE(aMessage) == GST_MESSAGE_ERROR) { michael@0: Eos(); michael@0: } michael@0: michael@0: return GST_BUS_PASS; michael@0: } michael@0: michael@0: void GStreamerReader::PlayBinSourceSetupCb(GstElement* aPlayBin, michael@0: GParamSpec* pspec, michael@0: gpointer aUserData) michael@0: { michael@0: GstElement *source; michael@0: GStreamerReader* reader = reinterpret_cast(aUserData); michael@0: michael@0: g_object_get(aPlayBin, "source", &source, nullptr); michael@0: reader->PlayBinSourceSetup(GST_APP_SRC(source)); michael@0: } michael@0: michael@0: void GStreamerReader::PlayBinSourceSetup(GstAppSrc* aSource) michael@0: { michael@0: mSource = GST_APP_SRC(aSource); michael@0: gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, nullptr); michael@0: MediaResource* resource = mDecoder->GetResource(); michael@0: michael@0: /* do a short read to trigger a network request so that GetLength() below michael@0: * returns something meaningful and not -1 michael@0: */ michael@0: char buf[512]; michael@0: unsigned int size = 0; michael@0: resource->Read(buf, sizeof(buf), &size); michael@0: resource->Seek(SEEK_SET, 0); michael@0: michael@0: /* now we should have a length */ michael@0: int64_t resourceLength = GetDataLength(); michael@0: gst_app_src_set_size(mSource, resourceLength); michael@0: if (resource->IsDataCachedToEndOfResource(0) || michael@0: (resourceLength != -1 && resourceLength <= SHORT_FILE_SIZE)) { michael@0: /* let the demuxer work in pull mode for local files (or very short files) michael@0: * so that we get optimal seeking accuracy/performance michael@0: */ michael@0: LOG(PR_LOG_DEBUG, "configuring random access, len %lld", resourceLength); michael@0: gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_RANDOM_ACCESS); michael@0: } else { michael@0: /* make the demuxer work in push mode so that seeking is kept to a minimum michael@0: */ michael@0: LOG(PR_LOG_DEBUG, "configuring push mode, len %lld", resourceLength); michael@0: gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_SEEKABLE); michael@0: } michael@0: michael@0: // Set the source MIME type to stop typefind trying every. single. format. michael@0: GstCaps *caps = michael@0: GStreamerFormatHelper::ConvertFormatsToCaps(mDecoder->GetResource()->GetContentType().get(), michael@0: nullptr); michael@0: michael@0: gst_app_src_set_caps(aSource, caps); michael@0: gst_caps_unref(caps); michael@0: } michael@0: michael@0: /** michael@0: * If this stream is an MP3, we want to parse the headers to estimate the michael@0: * stream duration. michael@0: */ michael@0: nsresult GStreamerReader::ParseMP3Headers() michael@0: { michael@0: MediaResource *resource = mDecoder->GetResource(); michael@0: michael@0: const uint32_t MAX_READ_BYTES = 4096; michael@0: michael@0: uint64_t offset = 0; michael@0: char bytes[MAX_READ_BYTES]; michael@0: uint32_t bytesRead; michael@0: do { michael@0: nsresult rv = resource->ReadAt(offset, bytes, MAX_READ_BYTES, &bytesRead); michael@0: NS_ENSURE_SUCCESS(rv, rv); michael@0: NS_ENSURE_TRUE(bytesRead, NS_ERROR_FAILURE); michael@0: michael@0: mMP3FrameParser.Parse(bytes, bytesRead, offset); michael@0: offset += bytesRead; michael@0: } while (!mMP3FrameParser.ParsedHeaders()); michael@0: michael@0: if (mMP3FrameParser.IsMP3()) { michael@0: mLastParserDuration = mMP3FrameParser.GetDuration(); michael@0: mDataOffset = mMP3FrameParser.GetMP3Offset(); michael@0: michael@0: // Update GStreamer's stream length in case we found any ID3 headers to michael@0: // ignore. michael@0: gst_app_src_set_size(mSource, GetDataLength()); michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: int64_t michael@0: GStreamerReader::GetDataLength() michael@0: { michael@0: int64_t streamLen = mDecoder->GetResource()->GetLength(); michael@0: michael@0: if (streamLen < 0) { michael@0: return streamLen; michael@0: } michael@0: michael@0: return streamLen - mDataOffset; michael@0: } michael@0: michael@0: nsresult GStreamerReader::ReadMetadata(MediaInfo* aInfo, michael@0: MetadataTags** aTags) michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: nsresult ret = NS_OK; michael@0: michael@0: /* michael@0: * Parse MP3 headers before we kick off the GStreamer pipeline otherwise there michael@0: * might be concurrent stream operations happening on both decoding and gstreamer michael@0: * threads which will screw the GStreamer state machine. michael@0: */ michael@0: bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3); michael@0: if (isMP3) { michael@0: ParseMP3Headers(); michael@0: } michael@0: michael@0: michael@0: /* We do 3 attempts here: decoding audio and video, decoding video only, michael@0: * decoding audio only. This allows us to play streams that have one broken michael@0: * stream but that are otherwise decodeable. michael@0: */ michael@0: guint flags[3] = {GST_PLAY_FLAG_VIDEO|GST_PLAY_FLAG_AUDIO, michael@0: static_cast(~GST_PLAY_FLAG_AUDIO), static_cast(~GST_PLAY_FLAG_VIDEO)}; michael@0: guint default_flags, current_flags; michael@0: g_object_get(mPlayBin, "flags", &default_flags, nullptr); michael@0: michael@0: GstMessage* message = nullptr; michael@0: for (unsigned int i = 0; i < G_N_ELEMENTS(flags); i++) { michael@0: current_flags = default_flags & flags[i]; michael@0: g_object_set(G_OBJECT(mPlayBin), "flags", current_flags, nullptr); michael@0: michael@0: /* reset filter caps to ANY */ michael@0: GstCaps* caps = gst_caps_new_any(); michael@0: GstElement* filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter"); michael@0: g_object_set(filter, "caps", caps, nullptr); michael@0: gst_object_unref(filter); michael@0: michael@0: filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter"); michael@0: g_object_set(filter, "caps", caps, nullptr); michael@0: gst_object_unref(filter); michael@0: gst_caps_unref(caps); michael@0: filter = nullptr; michael@0: michael@0: if (!(current_flags & GST_PLAY_FLAG_AUDIO)) michael@0: filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter"); michael@0: else if (!(current_flags & GST_PLAY_FLAG_VIDEO)) michael@0: filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter"); michael@0: michael@0: if (filter) { michael@0: /* Little trick: set the target caps to "skip" so that playbin2 fails to michael@0: * find a decoder for the stream we want to skip. michael@0: */ michael@0: GstCaps* filterCaps = gst_caps_new_simple ("skip", nullptr, nullptr); michael@0: g_object_set(filter, "caps", filterCaps, nullptr); michael@0: gst_caps_unref(filterCaps); michael@0: gst_object_unref(filter); michael@0: } michael@0: michael@0: LOG(PR_LOG_DEBUG, "starting metadata pipeline"); michael@0: if (gst_element_set_state(mPlayBin, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) { michael@0: LOG(PR_LOG_DEBUG, "metadata pipeline state change failed"); michael@0: ret = NS_ERROR_FAILURE; michael@0: continue; michael@0: } michael@0: michael@0: /* Wait for ASYNC_DONE, which is emitted when the pipeline is built, michael@0: * prerolled and ready to play. Also watch for errors. michael@0: */ michael@0: message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE, michael@0: (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR | GST_MESSAGE_EOS)); michael@0: if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ASYNC_DONE) { michael@0: LOG(PR_LOG_DEBUG, "read metadata pipeline prerolled"); michael@0: gst_message_unref(message); michael@0: ret = NS_OK; michael@0: break; michael@0: } else { michael@0: LOG(PR_LOG_DEBUG, "read metadata pipeline failed to preroll: %s", michael@0: gst_message_type_get_name (GST_MESSAGE_TYPE (message))); michael@0: michael@0: if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ERROR) { michael@0: GError* error; michael@0: gchar* debug; michael@0: gst_message_parse_error(message, &error, &debug); michael@0: LOG(PR_LOG_ERROR, "read metadata error: %s: %s", error->message, debug); michael@0: g_error_free(error); michael@0: g_free(debug); michael@0: } michael@0: /* Unexpected stream close/EOS or other error. We'll give up if all michael@0: * streams are in error/eos. */ michael@0: gst_element_set_state(mPlayBin, GST_STATE_NULL); michael@0: gst_message_unref(message); michael@0: ret = NS_ERROR_FAILURE; michael@0: } michael@0: } michael@0: michael@0: if (NS_SUCCEEDED(ret)) michael@0: ret = CheckSupportedFormats(); michael@0: michael@0: if (NS_FAILED(ret)) michael@0: /* we couldn't get this to play */ michael@0: return ret; michael@0: michael@0: /* report the duration */ michael@0: gint64 duration; michael@0: michael@0: if (isMP3 && mMP3FrameParser.IsMP3()) { michael@0: // The MP3FrameParser has reported a duration; use that over the gstreamer michael@0: // reported duration for inter-platform consistency. michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mUseParserDuration = true; michael@0: mLastParserDuration = mMP3FrameParser.GetDuration(); michael@0: mDecoder->SetMediaDuration(mLastParserDuration); michael@0: } else { michael@0: LOG(PR_LOG_DEBUG, "querying duration"); michael@0: // Otherwise use the gstreamer duration. michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: if (gst_element_query_duration(GST_ELEMENT(mPlayBin), michael@0: GST_FORMAT_TIME, &duration)) { michael@0: #else michael@0: GstFormat format = GST_FORMAT_TIME; michael@0: if (gst_element_query_duration(GST_ELEMENT(mPlayBin), michael@0: &format, &duration) && format == GST_FORMAT_TIME) { michael@0: #endif michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: LOG(PR_LOG_DEBUG, "have duration %" GST_TIME_FORMAT, GST_TIME_ARGS(duration)); michael@0: duration = GST_TIME_AS_USECONDS (duration); michael@0: mDecoder->SetMediaDuration(duration); michael@0: } else { michael@0: mDecoder->SetMediaSeekable(false); michael@0: } michael@0: } michael@0: michael@0: int n_video = 0, n_audio = 0; michael@0: g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, nullptr); michael@0: mInfo.mVideo.mHasVideo = n_video != 0; michael@0: mInfo.mAudio.mHasAudio = n_audio != 0; michael@0: michael@0: *aInfo = mInfo; michael@0: michael@0: *aTags = nullptr; michael@0: michael@0: // Watch the pipeline for fatal errors michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: gst_bus_set_sync_handler(mBus, GStreamerReader::ErrorCb, this, nullptr); michael@0: #else michael@0: gst_bus_set_sync_handler(mBus, GStreamerReader::ErrorCb, this); michael@0: #endif michael@0: michael@0: /* set the pipeline to PLAYING so that it starts decoding and queueing data in michael@0: * the appsinks */ michael@0: gst_element_set_state(mPlayBin, GST_STATE_PLAYING); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult GStreamerReader::CheckSupportedFormats() michael@0: { michael@0: bool done = false; michael@0: bool unsupported = false; michael@0: michael@0: GstIterator* it = gst_bin_iterate_recurse(GST_BIN(mPlayBin)); michael@0: while (!done) { michael@0: GstIteratorResult res; michael@0: GstElement* element; michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GValue value = {0,}; michael@0: res = gst_iterator_next(it, &value); michael@0: #else michael@0: res = gst_iterator_next(it, (void **) &element); michael@0: #endif michael@0: switch(res) { michael@0: case GST_ITERATOR_OK: michael@0: { michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: element = GST_ELEMENT (g_value_get_object (&value)); michael@0: #endif michael@0: GstElementFactory* factory = gst_element_get_factory(element); michael@0: if (factory) { michael@0: const char* klass = gst_element_factory_get_klass(factory); michael@0: GstPad* pad = gst_element_get_static_pad(element, "sink"); michael@0: if (pad) { michael@0: GstCaps* caps; michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: caps = gst_pad_get_current_caps(pad); michael@0: #else michael@0: caps = gst_pad_get_negotiated_caps(pad); michael@0: #endif michael@0: michael@0: if (caps) { michael@0: /* check for demuxers but ignore elements like id3demux */ michael@0: if (strstr (klass, "Demuxer") && !strstr(klass, "Metadata")) michael@0: unsupported = !GStreamerFormatHelper::Instance()->CanHandleContainerCaps(caps); michael@0: else if (strstr (klass, "Decoder") && !strstr(klass, "Generic")) michael@0: unsupported = !GStreamerFormatHelper::Instance()->CanHandleCodecCaps(caps); michael@0: michael@0: gst_caps_unref(caps); michael@0: } michael@0: gst_object_unref(pad); michael@0: } michael@0: } michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: g_value_unset (&value); michael@0: #else michael@0: gst_object_unref(element); michael@0: #endif michael@0: done = unsupported; michael@0: break; michael@0: } michael@0: case GST_ITERATOR_RESYNC: michael@0: unsupported = false; michael@0: done = false; michael@0: break; michael@0: case GST_ITERATOR_ERROR: michael@0: done = true; michael@0: break; michael@0: case GST_ITERATOR_DONE: michael@0: done = true; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: return unsupported ? NS_ERROR_FAILURE : NS_OK; michael@0: } michael@0: michael@0: nsresult GStreamerReader::ResetDecode() michael@0: { michael@0: nsresult res = NS_OK; michael@0: michael@0: LOG(PR_LOG_DEBUG, "reset decode"); michael@0: michael@0: if (NS_FAILED(MediaDecoderReader::ResetDecode())) { michael@0: res = NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: mVideoQueue.Reset(); michael@0: mAudioQueue.Reset(); michael@0: michael@0: mVideoSinkBufferCount = 0; michael@0: mAudioSinkBufferCount = 0; michael@0: mReachedAudioEos = false; michael@0: mReachedVideoEos = false; michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: mConfigureAlignment = true; michael@0: #endif michael@0: michael@0: LOG(PR_LOG_DEBUG, "reset decode done"); michael@0: michael@0: return res; michael@0: } michael@0: michael@0: bool GStreamerReader::DecodeAudioData() michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: michael@0: GstBuffer *buffer = nullptr; michael@0: michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: michael@0: if (mReachedAudioEos && !mAudioSinkBufferCount) { michael@0: return false; michael@0: } michael@0: michael@0: /* Wait something to be decoded before return or continue */ michael@0: if (!mAudioSinkBufferCount) { michael@0: if(!mVideoSinkBufferCount) { michael@0: /* We have nothing decoded so it makes no sense to return to the state machine michael@0: * as it will call us back immediately, we'll return again and so on, wasting michael@0: * CPU cycles for no job done. So, block here until there is either video or michael@0: * audio data available michael@0: */ michael@0: mon.Wait(); michael@0: if (!mAudioSinkBufferCount) { michael@0: /* There is still no audio data available, so either there is video data or michael@0: * something else has happened (Eos, etc...). Return to the state machine michael@0: * to process it. michael@0: */ michael@0: return true; michael@0: } michael@0: } michael@0: else { michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GstSample *sample = gst_app_sink_pull_sample(mAudioAppSink); michael@0: buffer = gst_buffer_ref(gst_sample_get_buffer(sample)); michael@0: gst_sample_unref(sample); michael@0: #else michael@0: buffer = gst_app_sink_pull_buffer(mAudioAppSink); michael@0: #endif michael@0: michael@0: mAudioSinkBufferCount--; michael@0: } michael@0: michael@0: int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer); michael@0: timestamp = gst_segment_to_stream_time(&mAudioSegment, michael@0: GST_FORMAT_TIME, timestamp); michael@0: michael@0: timestamp = GST_TIME_AS_USECONDS(timestamp); michael@0: michael@0: int64_t offset = GST_BUFFER_OFFSET(buffer); michael@0: guint8* data; michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GstMapInfo info; michael@0: gst_buffer_map(buffer, &info, GST_MAP_READ); michael@0: unsigned int size = info.size; michael@0: data = info.data; michael@0: #else michael@0: unsigned int size = GST_BUFFER_SIZE(buffer); michael@0: data = GST_BUFFER_DATA(buffer); michael@0: #endif michael@0: int32_t frames = (size / sizeof(AudioDataValue)) / mInfo.mAudio.mChannels; michael@0: michael@0: typedef AudioCompactor::NativeCopy GstCopy; michael@0: mAudioCompactor.Push(offset, michael@0: timestamp, michael@0: mInfo.mAudio.mRate, michael@0: frames, michael@0: mInfo.mAudio.mChannels, michael@0: GstCopy(data, michael@0: size, michael@0: mInfo.mAudio.mChannels)); michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: gst_buffer_unmap(buffer, &info); michael@0: #endif michael@0: michael@0: gst_buffer_unref(buffer); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip, michael@0: int64_t aTimeThreshold) michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: michael@0: GstBuffer *buffer = nullptr; michael@0: michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: michael@0: if (mReachedVideoEos && !mVideoSinkBufferCount) { michael@0: return false; michael@0: } michael@0: michael@0: /* Wait something to be decoded before return or continue */ michael@0: if (!mVideoSinkBufferCount) { michael@0: if (!mAudioSinkBufferCount) { michael@0: /* We have nothing decoded so it makes no sense to return to the state machine michael@0: * as it will call us back immediately, we'll return again and so on, wasting michael@0: * CPU cycles for no job done. So, block here until there is either video or michael@0: * audio data available michael@0: */ michael@0: mon.Wait(); michael@0: if (!mVideoSinkBufferCount) { michael@0: /* There is still no video data available, so either there is audio data or michael@0: * something else has happened (Eos, etc...). Return to the state machine michael@0: * to process it michael@0: */ michael@0: return true; michael@0: } michael@0: } michael@0: else { michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: mDecoder->NotifyDecodedFrames(0, 1); michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GstSample *sample = gst_app_sink_pull_sample(mVideoAppSink); michael@0: buffer = gst_buffer_ref(gst_sample_get_buffer(sample)); michael@0: gst_sample_unref(sample); michael@0: #else michael@0: buffer = gst_app_sink_pull_buffer(mVideoAppSink); michael@0: #endif michael@0: mVideoSinkBufferCount--; michael@0: } michael@0: michael@0: bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DELTA_UNIT); michael@0: if ((aKeyFrameSkip && !isKeyframe)) { michael@0: gst_buffer_unref(buffer); michael@0: return true; michael@0: } michael@0: michael@0: int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer); michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: timestamp = gst_segment_to_stream_time(&mVideoSegment, michael@0: GST_FORMAT_TIME, timestamp); michael@0: } michael@0: NS_ASSERTION(GST_CLOCK_TIME_IS_VALID(timestamp), michael@0: "frame has invalid timestamp"); michael@0: michael@0: timestamp = GST_TIME_AS_USECONDS(timestamp); michael@0: int64_t duration = 0; michael@0: if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer))) michael@0: duration = GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer)); michael@0: else if (fpsNum && fpsDen) michael@0: /* add 1-frame duration */ michael@0: duration = gst_util_uint64_scale(GST_USECOND, fpsDen, fpsNum); michael@0: michael@0: if (timestamp < aTimeThreshold) { michael@0: LOG(PR_LOG_DEBUG, "skipping frame %" GST_TIME_FORMAT michael@0: " threshold %" GST_TIME_FORMAT, michael@0: GST_TIME_ARGS(timestamp * 1000), michael@0: GST_TIME_ARGS(aTimeThreshold * 1000)); michael@0: gst_buffer_unref(buffer); michael@0: return true; michael@0: } michael@0: michael@0: if (!buffer) michael@0: /* no more frames */ michael@0: return true; michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: if (mConfigureAlignment && buffer->pool) { michael@0: GstStructure *config = gst_buffer_pool_get_config(buffer->pool); michael@0: GstVideoAlignment align; michael@0: if (gst_buffer_pool_config_get_video_alignment(config, &align)) michael@0: gst_video_info_align(&mVideoInfo, &align); michael@0: gst_structure_free(config); michael@0: mConfigureAlignment = false; michael@0: } michael@0: #endif michael@0: michael@0: nsRefPtr image = GetImageFromBuffer(buffer); michael@0: if (!image) { michael@0: /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to michael@0: * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy. michael@0: */ michael@0: GstBuffer* tmp = nullptr; michael@0: CopyIntoImageBuffer(buffer, &tmp, image); michael@0: gst_buffer_unref(buffer); michael@0: buffer = tmp; michael@0: } michael@0: michael@0: int64_t offset = mDecoder->GetResource()->Tell(); // Estimate location in media. michael@0: VideoData* video = VideoData::CreateFromImage(mInfo.mVideo, michael@0: mDecoder->GetImageContainer(), michael@0: offset, timestamp, duration, michael@0: static_cast(image.get()), michael@0: isKeyframe, -1, mPicture); michael@0: mVideoQueue.Push(video); michael@0: michael@0: gst_buffer_unref(buffer); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: nsresult GStreamerReader::Seek(int64_t aTarget, michael@0: int64_t aStartTime, michael@0: int64_t aEndTime, michael@0: int64_t aCurrentTime) michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: michael@0: gint64 seekPos = aTarget * GST_USECOND; michael@0: LOG(PR_LOG_DEBUG, "%p About to seek to %" GST_TIME_FORMAT, michael@0: mDecoder, GST_TIME_ARGS(seekPos)); michael@0: michael@0: int flags = GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_KEY_UNIT; michael@0: if (!gst_element_seek_simple(mPlayBin, michael@0: GST_FORMAT_TIME, michael@0: static_cast(flags), michael@0: seekPos)) { michael@0: LOG(PR_LOG_ERROR, "seek failed"); michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: LOG(PR_LOG_DEBUG, "seek succeeded"); michael@0: GstMessage* message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE, michael@0: (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR)); michael@0: gst_message_unref(message); michael@0: LOG(PR_LOG_DEBUG, "seek completed"); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult GStreamerReader::GetBuffered(dom::TimeRanges* aBuffered, michael@0: int64_t aStartTime) michael@0: { michael@0: if (!mInfo.HasValidMedia()) { michael@0: return NS_OK; michael@0: } michael@0: michael@0: #if GST_VERSION_MAJOR == 0 michael@0: GstFormat format = GST_FORMAT_TIME; michael@0: #endif michael@0: MediaResource* resource = mDecoder->GetResource(); michael@0: nsTArray ranges; michael@0: resource->GetCachedRanges(ranges); michael@0: michael@0: if (resource->IsDataCachedToEndOfResource(0)) { michael@0: /* fast path for local or completely cached files */ michael@0: gint64 duration = 0; michael@0: michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: duration = mDecoder->GetMediaDuration(); michael@0: } michael@0: michael@0: double end = (double) duration / GST_MSECOND; michael@0: LOG(PR_LOG_DEBUG, "complete range [0, %f] for [0, %li]", michael@0: end, GetDataLength()); michael@0: aBuffered->Add(0, end); michael@0: return NS_OK; michael@0: } michael@0: michael@0: for(uint32_t index = 0; index < ranges.Length(); index++) { michael@0: int64_t startOffset = ranges[index].mStart; michael@0: int64_t endOffset = ranges[index].mEnd; michael@0: gint64 startTime, endTime; michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES, michael@0: startOffset, GST_FORMAT_TIME, &startTime)) michael@0: continue; michael@0: if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES, michael@0: endOffset, GST_FORMAT_TIME, &endTime)) michael@0: continue; michael@0: #else michael@0: if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES, michael@0: startOffset, &format, &startTime) || format != GST_FORMAT_TIME) michael@0: continue; michael@0: if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES, michael@0: endOffset, &format, &endTime) || format != GST_FORMAT_TIME) michael@0: continue; michael@0: #endif michael@0: michael@0: double start = (double) GST_TIME_AS_USECONDS (startTime) / GST_MSECOND; michael@0: double end = (double) GST_TIME_AS_USECONDS (endTime) / GST_MSECOND; michael@0: LOG(PR_LOG_DEBUG, "adding range [%f, %f] for [%li %li] size %li", michael@0: start, end, startOffset, endOffset, GetDataLength()); michael@0: aBuffered->Add(start, end); michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: void GStreamerReader::ReadAndPushData(guint aLength) michael@0: { michael@0: MediaResource* resource = mDecoder->GetResource(); michael@0: NS_ASSERTION(resource, "Decoder has no media resource"); michael@0: int64_t offset1 = resource->Tell(); michael@0: unused << offset1; michael@0: nsresult rv = NS_OK; michael@0: michael@0: GstBuffer* buffer = gst_buffer_new_and_alloc(aLength); michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GstMapInfo info; michael@0: gst_buffer_map(buffer, &info, GST_MAP_WRITE); michael@0: guint8 *data = info.data; michael@0: #else michael@0: guint8* data = GST_BUFFER_DATA(buffer); michael@0: #endif michael@0: uint32_t size = 0, bytesRead = 0; michael@0: while(bytesRead < aLength) { michael@0: rv = resource->Read(reinterpret_cast(data + bytesRead), michael@0: aLength - bytesRead, &size); michael@0: if (NS_FAILED(rv) || size == 0) michael@0: break; michael@0: michael@0: bytesRead += size; michael@0: } michael@0: michael@0: int64_t offset2 = resource->Tell(); michael@0: unused << offset2; michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: gst_buffer_unmap(buffer, &info); michael@0: gst_buffer_set_size(buffer, bytesRead); michael@0: #else michael@0: GST_BUFFER_SIZE(buffer) = bytesRead; michael@0: #endif michael@0: michael@0: GstFlowReturn ret = gst_app_src_push_buffer(mSource, gst_buffer_ref(buffer)); michael@0: if (ret != GST_FLOW_OK) { michael@0: LOG(PR_LOG_ERROR, "ReadAndPushData push ret %s(%d)", gst_flow_get_name(ret), ret); michael@0: } michael@0: michael@0: if (NS_FAILED(rv)) { michael@0: /* Terminate the stream if there is an error in reading */ michael@0: LOG(PR_LOG_ERROR, "ReadAndPushData read error, rv=%x", rv); michael@0: gst_app_src_end_of_stream(mSource); michael@0: } else if (bytesRead < aLength) { michael@0: /* If we read less than what we wanted, we reached the end */ michael@0: LOG(PR_LOG_WARNING, "ReadAndPushData read underflow, " michael@0: "bytesRead=%u, aLength=%u, offset(%lld,%lld)", michael@0: bytesRead, aLength, offset1, offset2); michael@0: gst_app_src_end_of_stream(mSource); michael@0: } michael@0: michael@0: gst_buffer_unref(buffer); michael@0: michael@0: /* Ensure offset change is consistent in this function. michael@0: * If there are other stream operations on another thread at the same time, michael@0: * it will disturb the GStreamer state machine. michael@0: */ michael@0: MOZ_ASSERT(offset1 + bytesRead == offset2); michael@0: } michael@0: michael@0: void GStreamerReader::NeedDataCb(GstAppSrc* aSrc, michael@0: guint aLength, michael@0: gpointer aUserData) michael@0: { michael@0: GStreamerReader* reader = reinterpret_cast(aUserData); michael@0: reader->NeedData(aSrc, aLength); michael@0: } michael@0: michael@0: void GStreamerReader::NeedData(GstAppSrc* aSrc, guint aLength) michael@0: { michael@0: if (aLength == static_cast(-1)) michael@0: aLength = DEFAULT_SOURCE_READ_SIZE; michael@0: ReadAndPushData(aLength); michael@0: } michael@0: michael@0: void GStreamerReader::EnoughDataCb(GstAppSrc* aSrc, gpointer aUserData) michael@0: { michael@0: GStreamerReader* reader = reinterpret_cast(aUserData); michael@0: reader->EnoughData(aSrc); michael@0: } michael@0: michael@0: void GStreamerReader::EnoughData(GstAppSrc* aSrc) michael@0: { michael@0: } michael@0: michael@0: gboolean GStreamerReader::SeekDataCb(GstAppSrc* aSrc, michael@0: guint64 aOffset, michael@0: gpointer aUserData) michael@0: { michael@0: GStreamerReader* reader = reinterpret_cast(aUserData); michael@0: return reader->SeekData(aSrc, aOffset); michael@0: } michael@0: michael@0: gboolean GStreamerReader::SeekData(GstAppSrc* aSrc, guint64 aOffset) michael@0: { michael@0: aOffset += mDataOffset; michael@0: michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: MediaResource* resource = mDecoder->GetResource(); michael@0: int64_t resourceLength = resource->GetLength(); michael@0: michael@0: if (gst_app_src_get_size(mSource) == -1) { michael@0: /* It's possible that we didn't know the length when we initialized mSource michael@0: * but maybe we do now michael@0: */ michael@0: gst_app_src_set_size(mSource, GetDataLength()); michael@0: } michael@0: michael@0: nsresult rv = NS_ERROR_FAILURE; michael@0: if (aOffset < static_cast(resourceLength)) { michael@0: rv = resource->Seek(SEEK_SET, aOffset); michael@0: } michael@0: michael@0: if (NS_FAILED(rv)) { michael@0: LOG(PR_LOG_ERROR, "seek at %lu failed", aOffset); michael@0: } else { michael@0: MOZ_ASSERT(aOffset == static_cast(resource->Tell())); michael@0: } michael@0: michael@0: return NS_SUCCEEDED(rv); michael@0: } michael@0: michael@0: GstFlowReturn GStreamerReader::NewPrerollCb(GstAppSink* aSink, michael@0: gpointer aUserData) michael@0: { michael@0: GStreamerReader* reader = reinterpret_cast(aUserData); michael@0: michael@0: if (aSink == reader->mVideoAppSink) michael@0: reader->VideoPreroll(); michael@0: else michael@0: reader->AudioPreroll(); michael@0: return GST_FLOW_OK; michael@0: } michael@0: michael@0: void GStreamerReader::AudioPreroll() michael@0: { michael@0: /* The first audio buffer has reached the audio sink. Get rate and channels */ michael@0: LOG(PR_LOG_DEBUG, "Audio preroll"); michael@0: GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mAudioAppSink), "sink"); michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GstCaps *caps = gst_pad_get_current_caps(sinkpad); michael@0: #else michael@0: GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad); michael@0: #endif michael@0: GstStructure* s = gst_caps_get_structure(caps, 0); michael@0: mInfo.mAudio.mRate = mInfo.mAudio.mChannels = 0; michael@0: gst_structure_get_int(s, "rate", (gint*) &mInfo.mAudio.mRate); michael@0: gst_structure_get_int(s, "channels", (gint*) &mInfo.mAudio.mChannels); michael@0: NS_ASSERTION(mInfo.mAudio.mRate != 0, ("audio rate is zero")); michael@0: NS_ASSERTION(mInfo.mAudio.mChannels != 0, ("audio channels is zero")); michael@0: NS_ASSERTION(mInfo.mAudio.mChannels > 0 && mInfo.mAudio.mChannels <= MAX_CHANNELS, michael@0: "invalid audio channels number"); michael@0: mInfo.mAudio.mHasAudio = true; michael@0: gst_caps_unref(caps); michael@0: gst_object_unref(sinkpad); michael@0: } michael@0: michael@0: void GStreamerReader::VideoPreroll() michael@0: { michael@0: /* The first video buffer has reached the video sink. Get width and height */ michael@0: LOG(PR_LOG_DEBUG, "Video preroll"); michael@0: GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mVideoAppSink), "sink"); michael@0: int PARNumerator, PARDenominator; michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GstCaps* caps = gst_pad_get_current_caps(sinkpad); michael@0: memset (&mVideoInfo, 0, sizeof (mVideoInfo)); michael@0: gst_video_info_from_caps(&mVideoInfo, caps); michael@0: mFormat = mVideoInfo.finfo->format; michael@0: mPicture.width = mVideoInfo.width; michael@0: mPicture.height = mVideoInfo.height; michael@0: PARNumerator = GST_VIDEO_INFO_PAR_N(&mVideoInfo); michael@0: PARDenominator = GST_VIDEO_INFO_PAR_D(&mVideoInfo); michael@0: #else michael@0: GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad); michael@0: gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height); michael@0: if (!gst_video_parse_caps_pixel_aspect_ratio(caps, &PARNumerator, &PARDenominator)) { michael@0: PARNumerator = 1; michael@0: PARDenominator = 1; michael@0: } michael@0: #endif michael@0: NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution"); michael@0: michael@0: // Calculate display size according to pixel aspect ratio. michael@0: nsIntRect pictureRect(0, 0, mPicture.width, mPicture.height); michael@0: nsIntSize frameSize = nsIntSize(mPicture.width, mPicture.height); michael@0: nsIntSize displaySize = nsIntSize(mPicture.width, mPicture.height); michael@0: ScaleDisplayByAspectRatio(displaySize, float(PARNumerator) / float(PARDenominator)); michael@0: michael@0: // If video frame size is overflow, stop playing. michael@0: if (IsValidVideoRegion(frameSize, pictureRect, displaySize)) { michael@0: GstStructure* structure = gst_caps_get_structure(caps, 0); michael@0: gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen); michael@0: mInfo.mVideo.mDisplay = ThebesIntSize(displaySize.ToIntSize()); michael@0: mInfo.mVideo.mHasVideo = true; michael@0: } else { michael@0: LOG(PR_LOG_DEBUG, "invalid video region"); michael@0: Eos(); michael@0: } michael@0: gst_caps_unref(caps); michael@0: gst_object_unref(sinkpad); michael@0: } michael@0: michael@0: GstFlowReturn GStreamerReader::NewBufferCb(GstAppSink* aSink, michael@0: gpointer aUserData) michael@0: { michael@0: GStreamerReader* reader = reinterpret_cast(aUserData); michael@0: michael@0: if (aSink == reader->mVideoAppSink) michael@0: reader->NewVideoBuffer(); michael@0: else michael@0: reader->NewAudioBuffer(); michael@0: michael@0: return GST_FLOW_OK; michael@0: } michael@0: michael@0: void GStreamerReader::NewVideoBuffer() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: /* We have a new video buffer queued in the video sink. Increment the counter michael@0: * and notify the decode thread potentially blocked in DecodeVideoFrame michael@0: */ michael@0: michael@0: mDecoder->NotifyDecodedFrames(1, 0); michael@0: mVideoSinkBufferCount++; michael@0: mon.NotifyAll(); michael@0: } michael@0: michael@0: void GStreamerReader::NewAudioBuffer() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: /* We have a new audio buffer queued in the audio sink. Increment the counter michael@0: * and notify the decode thread potentially blocked in DecodeAudioData michael@0: */ michael@0: mAudioSinkBufferCount++; michael@0: mon.NotifyAll(); michael@0: } michael@0: michael@0: void GStreamerReader::EosCb(GstAppSink* aSink, gpointer aUserData) michael@0: { michael@0: GStreamerReader* reader = reinterpret_cast(aUserData); michael@0: reader->Eos(aSink); michael@0: } michael@0: michael@0: void GStreamerReader::Eos(GstAppSink* aSink) michael@0: { michael@0: /* We reached the end of the stream */ michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: /* Potentially unblock DecodeVideoFrame and DecodeAudioData */ michael@0: if (aSink == mVideoAppSink) { michael@0: mReachedVideoEos = true; michael@0: } else if (aSink == mAudioAppSink) { michael@0: mReachedAudioEos = true; michael@0: } else { michael@0: // Assume this is an error causing an EOS. michael@0: mReachedAudioEos = true; michael@0: mReachedVideoEos = true; michael@0: } michael@0: mon.NotifyAll(); michael@0: } michael@0: michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: /* Potentially unblock the decode thread in ::DecodeLoop */ michael@0: mon.NotifyAll(); michael@0: } michael@0: } michael@0: michael@0: /** michael@0: * This callback is called while the pipeline is automatically built, after a michael@0: * new element has been added to the pipeline. We use it to find the michael@0: * uridecodebin instance used by playbin and connect to it to apply our michael@0: * whitelist. michael@0: */ michael@0: void michael@0: GStreamerReader::PlayElementAddedCb(GstBin *aBin, GstElement *aElement, michael@0: gpointer *aUserData) michael@0: { michael@0: const static char sUriDecodeBinPrefix[] = "uridecodebin"; michael@0: gchar *name = gst_element_get_name(aElement); michael@0: michael@0: // Attach this callback to uridecodebin, child of playbin. michael@0: if (!strncmp(name, sUriDecodeBinPrefix, sizeof(sUriDecodeBinPrefix) - 1)) { michael@0: g_signal_connect(G_OBJECT(aElement), "autoplug-sort", michael@0: G_CALLBACK(GStreamerReader::AutoplugSortCb), aUserData); michael@0: } michael@0: michael@0: g_free(name); michael@0: } michael@0: michael@0: bool michael@0: GStreamerReader::ShouldAutoplugFactory(GstElementFactory* aFactory, GstCaps* aCaps) michael@0: { michael@0: bool autoplug; michael@0: const gchar *klass = gst_element_factory_get_klass(aFactory); michael@0: if (strstr(klass, "Demuxer") && !strstr(klass, "Metadata")) { michael@0: autoplug = GStreamerFormatHelper::Instance()->CanHandleContainerCaps(aCaps); michael@0: } else if (strstr(klass, "Decoder") && !strstr(klass, "Generic")) { michael@0: autoplug = GStreamerFormatHelper::Instance()->CanHandleCodecCaps(aCaps); michael@0: } else { michael@0: /* we only filter demuxers and decoders, let everything else be autoplugged */ michael@0: autoplug = true; michael@0: } michael@0: michael@0: return autoplug; michael@0: } michael@0: michael@0: /** michael@0: * This is called by uridecodebin (running inside playbin), after it has found michael@0: * candidate factories to continue decoding the stream. We apply the whitelist michael@0: * here, allowing only demuxers and decoders that output the formats we want to michael@0: * support. michael@0: */ michael@0: GValueArray* michael@0: GStreamerReader::AutoplugSortCb(GstElement* aElement, GstPad* aPad, michael@0: GstCaps* aCaps, GValueArray* aFactories) michael@0: { michael@0: if (!aFactories->n_values) { michael@0: return nullptr; michael@0: } michael@0: michael@0: /* aFactories[0] is the element factory that is going to be used to michael@0: * create the next element needed to demux or decode the stream. michael@0: */ michael@0: GstElementFactory *factory = (GstElementFactory*) g_value_get_object(g_value_array_get_nth(aFactories, 0)); michael@0: if (!ShouldAutoplugFactory(factory, aCaps)) { michael@0: /* We don't support this factory. Return an empty array to signal that we michael@0: * don't want to continue decoding this (sub)stream. michael@0: */ michael@0: return g_value_array_new(0); michael@0: } michael@0: michael@0: /* nullptr means that we're ok with the candidates and don't need to apply any michael@0: * sorting/filtering. michael@0: */ michael@0: return nullptr; michael@0: } michael@0: michael@0: /** michael@0: * If this is an MP3 stream, pass any new data we get to the MP3 frame parser michael@0: * for duration estimation. michael@0: */ michael@0: void GStreamerReader::NotifyDataArrived(const char *aBuffer, michael@0: uint32_t aLength, michael@0: int64_t aOffset) michael@0: { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: michael@0: if (HasVideo()) { michael@0: return; michael@0: } michael@0: michael@0: if (!mMP3FrameParser.NeedsData()) { michael@0: return; michael@0: } michael@0: michael@0: mMP3FrameParser.Parse(aBuffer, aLength, aOffset); michael@0: michael@0: int64_t duration = mMP3FrameParser.GetDuration(); michael@0: if (duration != mLastParserDuration && mUseParserDuration) { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mLastParserDuration = duration; michael@0: mDecoder->UpdateEstimatedMediaDuration(mLastParserDuration); michael@0: } michael@0: } michael@0: michael@0: #if GST_VERSION_MAJOR >= 1 michael@0: GstCaps* GStreamerReader::BuildAudioSinkCaps() michael@0: { michael@0: GstCaps* caps = gst_caps_from_string("audio/x-raw, channels={1,2}"); michael@0: const char* format; michael@0: #ifdef MOZ_SAMPLE_TYPE_FLOAT32 michael@0: #if MOZ_LITTLE_ENDIAN michael@0: format = "F32LE"; michael@0: #else michael@0: format = "F32BE"; michael@0: #endif michael@0: #else /* !MOZ_SAMPLE_TYPE_FLOAT32 */ michael@0: #if MOZ_LITTLE_ENDIAN michael@0: format = "S16LE"; michael@0: #else michael@0: format = "S16BE"; michael@0: #endif michael@0: #endif michael@0: gst_caps_set_simple(caps, "format", G_TYPE_STRING, format, nullptr); michael@0: michael@0: return caps; michael@0: } michael@0: michael@0: void GStreamerReader::InstallPadCallbacks() michael@0: { michael@0: GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mVideoAppSink), "sink"); michael@0: michael@0: gst_pad_add_probe(sinkpad, michael@0: (GstPadProbeType) (GST_PAD_PROBE_TYPE_SCHEDULING | michael@0: GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | michael@0: GST_PAD_PROBE_TYPE_EVENT_UPSTREAM | michael@0: GST_PAD_PROBE_TYPE_EVENT_FLUSH), michael@0: &GStreamerReader::EventProbeCb, this, nullptr); michael@0: gst_pad_add_probe(sinkpad, GST_PAD_PROBE_TYPE_QUERY_DOWNSTREAM, michael@0: GStreamerReader::QueryProbeCb, nullptr, nullptr); michael@0: michael@0: gst_pad_set_element_private(sinkpad, this); michael@0: gst_object_unref(sinkpad); michael@0: michael@0: sinkpad = gst_element_get_static_pad(GST_ELEMENT(mAudioAppSink), "sink"); michael@0: gst_pad_add_probe(sinkpad, michael@0: (GstPadProbeType) (GST_PAD_PROBE_TYPE_SCHEDULING | michael@0: GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | michael@0: GST_PAD_PROBE_TYPE_EVENT_UPSTREAM | michael@0: GST_PAD_PROBE_TYPE_EVENT_FLUSH), michael@0: &GStreamerReader::EventProbeCb, this, nullptr); michael@0: gst_object_unref(sinkpad); michael@0: } michael@0: michael@0: GstPadProbeReturn GStreamerReader::EventProbeCb(GstPad *aPad, michael@0: GstPadProbeInfo *aInfo, michael@0: gpointer aUserData) michael@0: { michael@0: GStreamerReader *reader = (GStreamerReader *) aUserData; michael@0: GstEvent *aEvent = (GstEvent *)aInfo->data; michael@0: return reader->EventProbe(aPad, aEvent); michael@0: } michael@0: michael@0: GstPadProbeReturn GStreamerReader::EventProbe(GstPad *aPad, GstEvent *aEvent) michael@0: { michael@0: GstElement* parent = GST_ELEMENT(gst_pad_get_parent(aPad)); michael@0: michael@0: LOG(PR_LOG_DEBUG, "event probe %s", GST_EVENT_TYPE_NAME (aEvent)); michael@0: michael@0: switch(GST_EVENT_TYPE(aEvent)) { michael@0: case GST_EVENT_SEGMENT: michael@0: { michael@0: const GstSegment *newSegment; michael@0: GstSegment* segment; michael@0: michael@0: /* Store the segments so we can convert timestamps to stream time, which michael@0: * is what the upper layers sync on. michael@0: */ michael@0: ReentrantMonitorAutoEnter mon(mGstThreadsMonitor); michael@0: #if GST_VERSION_MINOR <= 1 && GST_VERSION_MICRO < 1 michael@0: ResetDecode(); michael@0: #endif michael@0: gst_event_parse_segment(aEvent, &newSegment); michael@0: if (parent == GST_ELEMENT(mVideoAppSink)) michael@0: segment = &mVideoSegment; michael@0: else michael@0: segment = &mAudioSegment; michael@0: gst_segment_copy_into (newSegment, segment); michael@0: break; michael@0: } michael@0: case GST_EVENT_FLUSH_STOP: michael@0: /* Reset on seeks */ michael@0: ResetDecode(); michael@0: break; michael@0: default: michael@0: break; michael@0: } michael@0: gst_object_unref(parent); michael@0: michael@0: return GST_PAD_PROBE_OK; michael@0: } michael@0: michael@0: GstPadProbeReturn GStreamerReader::QueryProbeCb(GstPad* aPad, GstPadProbeInfo* aInfo, gpointer aUserData) michael@0: { michael@0: GStreamerReader* reader = reinterpret_cast(gst_pad_get_element_private(aPad)); michael@0: return reader->QueryProbe(aPad, aInfo, aUserData); michael@0: } michael@0: michael@0: GstPadProbeReturn GStreamerReader::QueryProbe(GstPad* aPad, GstPadProbeInfo* aInfo, gpointer aUserData) michael@0: { michael@0: GstQuery *query = gst_pad_probe_info_get_query(aInfo); michael@0: GstPadProbeReturn ret = GST_PAD_PROBE_OK; michael@0: michael@0: switch (GST_QUERY_TYPE (query)) { michael@0: case GST_QUERY_ALLOCATION: michael@0: GstCaps *caps; michael@0: GstVideoInfo info; michael@0: gboolean need_pool; michael@0: michael@0: gst_query_parse_allocation(query, &caps, &need_pool); michael@0: gst_video_info_init(&info); michael@0: gst_video_info_from_caps(&info, caps); michael@0: gst_query_add_allocation_param(query, mAllocator, nullptr); michael@0: gst_query_add_allocation_pool(query, mBufferPool, info.size, 0, 0); michael@0: break; michael@0: default: michael@0: break; michael@0: } michael@0: michael@0: return ret; michael@0: } michael@0: michael@0: void GStreamerReader::ImageDataFromVideoFrame(GstVideoFrame *aFrame, michael@0: PlanarYCbCrImage::Data *aData) michael@0: { michael@0: NS_ASSERTION(GST_VIDEO_INFO_IS_YUV(&mVideoInfo), michael@0: "Non-YUV video frame formats not supported"); michael@0: NS_ASSERTION(GST_VIDEO_FRAME_N_COMPONENTS(aFrame) == 3, michael@0: "Unsupported number of components in video frame"); michael@0: michael@0: aData->mPicX = aData->mPicY = 0; michael@0: aData->mPicSize = gfx::IntSize(mPicture.width, mPicture.height); michael@0: aData->mStereoMode = StereoMode::MONO; michael@0: michael@0: aData->mYChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 0); michael@0: aData->mYStride = GST_VIDEO_FRAME_COMP_STRIDE(aFrame, 0); michael@0: aData->mYSize = gfx::IntSize(GST_VIDEO_FRAME_COMP_WIDTH(aFrame, 0), michael@0: GST_VIDEO_FRAME_COMP_HEIGHT(aFrame, 0)); michael@0: aData->mYSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 0) - 1; michael@0: aData->mCbCrStride = GST_VIDEO_FRAME_COMP_STRIDE(aFrame, 1); michael@0: aData->mCbCrSize = gfx::IntSize(GST_VIDEO_FRAME_COMP_WIDTH(aFrame, 1), michael@0: GST_VIDEO_FRAME_COMP_HEIGHT(aFrame, 1)); michael@0: aData->mCbChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 1); michael@0: aData->mCrChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 2); michael@0: aData->mCbSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 1) - 1; michael@0: aData->mCrSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 2) - 1; michael@0: } michael@0: michael@0: nsRefPtr GStreamerReader::GetImageFromBuffer(GstBuffer* aBuffer) michael@0: { michael@0: nsRefPtr image = nullptr; michael@0: michael@0: if (gst_buffer_n_memory(aBuffer) == 1) { michael@0: GstMemory* mem = gst_buffer_peek_memory(aBuffer, 0); michael@0: if (GST_IS_MOZ_GFX_MEMORY_ALLOCATOR(mem->allocator)) { michael@0: image = moz_gfx_memory_get_image(mem); michael@0: michael@0: GstVideoFrame frame; michael@0: gst_video_frame_map(&frame, &mVideoInfo, aBuffer, GST_MAP_READ); michael@0: PlanarYCbCrImage::Data data; michael@0: ImageDataFromVideoFrame(&frame, &data); michael@0: image->SetDataNoCopy(data); michael@0: gst_video_frame_unmap(&frame); michael@0: } michael@0: } michael@0: michael@0: return image; michael@0: } michael@0: michael@0: void GStreamerReader::CopyIntoImageBuffer(GstBuffer* aBuffer, michael@0: GstBuffer** aOutBuffer, michael@0: nsRefPtr &image) michael@0: { michael@0: *aOutBuffer = gst_buffer_new_allocate(mAllocator, gst_buffer_get_size(aBuffer), nullptr); michael@0: GstMemory *mem = gst_buffer_peek_memory(*aOutBuffer, 0); michael@0: GstMapInfo map_info; michael@0: gst_memory_map(mem, &map_info, GST_MAP_WRITE); michael@0: gst_buffer_extract(aBuffer, 0, map_info.data, gst_buffer_get_size(aBuffer)); michael@0: gst_memory_unmap(mem, &map_info); michael@0: michael@0: /* create a new gst buffer with the newly created memory and copy the michael@0: * metadata over from the incoming buffer */ michael@0: gst_buffer_copy_into(*aOutBuffer, aBuffer, michael@0: (GstBufferCopyFlags)(GST_BUFFER_COPY_METADATA), 0, -1); michael@0: image = GetImageFromBuffer(*aOutBuffer); michael@0: } michael@0: #endif michael@0: michael@0: } // namespace mozilla michael@0: