michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this file, michael@0: * You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "MediaOmxReader.h" michael@0: michael@0: #include "MediaDecoderStateMachine.h" michael@0: #include "mozilla/TimeStamp.h" michael@0: #include "mozilla/dom/TimeRanges.h" michael@0: #include "MediaResource.h" michael@0: #include "VideoUtils.h" michael@0: #include "MediaOmxDecoder.h" michael@0: #include "AbstractMediaDecoder.h" michael@0: #include "AudioChannelService.h" michael@0: #include "OmxDecoder.h" michael@0: #include "MPAPI.h" michael@0: #include "gfx2DGlue.h" michael@0: michael@0: #ifdef MOZ_AUDIO_OFFLOAD michael@0: #include michael@0: #include michael@0: #include michael@0: #endif michael@0: michael@0: #define MAX_DROPPED_FRAMES 25 michael@0: // Try not to spend more than this much time in a single call to DecodeVideoFrame. michael@0: #define MAX_VIDEO_DECODE_SECONDS 0.1 michael@0: michael@0: using namespace mozilla::gfx; michael@0: using namespace android; michael@0: michael@0: namespace mozilla { michael@0: michael@0: #ifdef PR_LOGGING michael@0: extern PRLogModuleInfo* gMediaDecoderLog; michael@0: #define DECODER_LOG(type, msg) PR_LOG(gMediaDecoderLog, type, msg) michael@0: #else michael@0: #define DECODER_LOG(type, msg) michael@0: #endif michael@0: michael@0: MediaOmxReader::MediaOmxReader(AbstractMediaDecoder *aDecoder) michael@0: : MediaDecoderReader(aDecoder) michael@0: , mHasVideo(false) michael@0: , mHasAudio(false) michael@0: , mVideoSeekTimeUs(-1) michael@0: , mAudioSeekTimeUs(-1) michael@0: , mSkipCount(0) michael@0: #ifdef DEBUG michael@0: , mIsActive(true) michael@0: #endif michael@0: { michael@0: #ifdef PR_LOGGING michael@0: if (!gMediaDecoderLog) { michael@0: gMediaDecoderLog = PR_NewLogModule("MediaDecoder"); michael@0: } michael@0: #endif michael@0: michael@0: mAudioChannel = dom::AudioChannelService::GetDefaultAudioChannel(); michael@0: } michael@0: michael@0: MediaOmxReader::~MediaOmxReader() michael@0: { michael@0: ReleaseMediaResources(); michael@0: ReleaseDecoder(); michael@0: mOmxDecoder.clear(); michael@0: } michael@0: michael@0: nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor) michael@0: { michael@0: return NS_OK; michael@0: } michael@0: michael@0: bool MediaOmxReader::IsWaitingMediaResources() michael@0: { michael@0: if (!mOmxDecoder.get()) { michael@0: return false; michael@0: } michael@0: return mOmxDecoder->IsWaitingMediaResources(); michael@0: } michael@0: michael@0: bool MediaOmxReader::IsDormantNeeded() michael@0: { michael@0: if (!mOmxDecoder.get()) { michael@0: return false; michael@0: } michael@0: return mOmxDecoder->IsDormantNeeded(); michael@0: } michael@0: michael@0: void MediaOmxReader::ReleaseMediaResources() michael@0: { michael@0: ResetDecode(); michael@0: // Before freeing a video codec, all video buffers needed to be released michael@0: // even from graphics pipeline. michael@0: VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); michael@0: if (container) { michael@0: container->ClearCurrentFrame(); michael@0: } michael@0: if (mOmxDecoder.get()) { michael@0: mOmxDecoder->ReleaseMediaResources(); michael@0: } michael@0: } michael@0: michael@0: void MediaOmxReader::ReleaseDecoder() michael@0: { michael@0: if (mOmxDecoder.get()) { michael@0: mOmxDecoder->ReleaseDecoder(); michael@0: } michael@0: } michael@0: michael@0: nsresult MediaOmxReader::InitOmxDecoder() michael@0: { michael@0: if (!mOmxDecoder.get()) { michael@0: //register sniffers, if they are not registered in this process. michael@0: DataSource::RegisterDefaultSniffers(); michael@0: mDecoder->GetResource()->SetReadMode(MediaCacheStream::MODE_METADATA); michael@0: michael@0: sp dataSource = new MediaStreamSource(mDecoder->GetResource(), mDecoder); michael@0: dataSource->initCheck(); michael@0: michael@0: mExtractor = MediaExtractor::Create(dataSource); michael@0: if (!mExtractor.get()) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: mOmxDecoder = new OmxDecoder(mDecoder->GetResource(), mDecoder); michael@0: if (!mOmxDecoder->Init(mExtractor)) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: } michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult MediaOmxReader::ReadMetadata(MediaInfo* aInfo, michael@0: MetadataTags** aTags) michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: MOZ_ASSERT(mIsActive); michael@0: michael@0: *aTags = nullptr; michael@0: michael@0: // Initialize the internal OMX Decoder. michael@0: nsresult rv = InitOmxDecoder(); michael@0: if (NS_FAILED(rv)) { michael@0: return rv; michael@0: } michael@0: michael@0: if (!mOmxDecoder->TryLoad()) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: #ifdef MOZ_AUDIO_OFFLOAD michael@0: CheckAudioOffload(); michael@0: #endif michael@0: michael@0: if (IsWaitingMediaResources()) { michael@0: return NS_OK; michael@0: } michael@0: michael@0: // Set the total duration (the max of the audio and video track). michael@0: int64_t durationUs; michael@0: mOmxDecoder->GetDuration(&durationUs); michael@0: if (durationUs) { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mDecoder->SetMediaDuration(durationUs); michael@0: } michael@0: michael@0: // Check the MediaExtract flag if the source is seekable. michael@0: mDecoder->SetMediaSeekable(mExtractor->flags() & MediaExtractor::CAN_SEEK); michael@0: michael@0: if (mOmxDecoder->HasVideo()) { michael@0: int32_t displayWidth, displayHeight, width, height; michael@0: mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight, michael@0: &width, &height); michael@0: nsIntRect pictureRect(0, 0, width, height); michael@0: michael@0: // Validate the container-reported frame and pictureRect sizes. This ensures michael@0: // that our video frame creation code doesn't overflow. michael@0: nsIntSize displaySize(displayWidth, displayHeight); michael@0: nsIntSize frameSize(width, height); michael@0: if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: // Video track's frame sizes will not overflow. Activate the video track. michael@0: mHasVideo = mInfo.mVideo.mHasVideo = true; michael@0: mInfo.mVideo.mDisplay = displaySize; michael@0: mPicture = pictureRect; michael@0: mInitialFrame = frameSize; michael@0: VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); michael@0: if (container) { michael@0: container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height), michael@0: nullptr, michael@0: mozilla::TimeStamp::Now()); michael@0: } michael@0: } michael@0: michael@0: if (mOmxDecoder->HasAudio()) { michael@0: int32_t numChannels, sampleRate; michael@0: mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate); michael@0: mHasAudio = mInfo.mAudio.mHasAudio = true; michael@0: mInfo.mAudio.mChannels = numChannels; michael@0: mInfo.mAudio.mRate = sampleRate; michael@0: } michael@0: michael@0: *aInfo = mInfo; michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: bool MediaOmxReader::DecodeVideoFrame(bool &aKeyframeSkip, michael@0: int64_t aTimeThreshold) michael@0: { michael@0: MOZ_ASSERT(mIsActive); michael@0: michael@0: // Record number of frames decoded and parsed. Automatically update the michael@0: // stats counters using the AutoNotifyDecoded stack-based class. michael@0: uint32_t parsed = 0, decoded = 0; michael@0: AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded); michael@0: michael@0: bool doSeek = mVideoSeekTimeUs != -1; michael@0: if (doSeek) { michael@0: aTimeThreshold = mVideoSeekTimeUs; michael@0: } michael@0: michael@0: TimeStamp start = TimeStamp::Now(); michael@0: michael@0: // Read next frame. Don't let this loop run for too long. michael@0: while ((TimeStamp::Now() - start) < TimeDuration::FromSeconds(MAX_VIDEO_DECODE_SECONDS)) { michael@0: MPAPI::VideoFrame frame; michael@0: frame.mGraphicBuffer = nullptr; michael@0: frame.mShouldSkip = false; michael@0: if (!mOmxDecoder->ReadVideo(&frame, aTimeThreshold, aKeyframeSkip, doSeek)) { michael@0: return false; michael@0: } michael@0: doSeek = false; michael@0: michael@0: // Ignore empty buffer which stagefright media read will sporadically return michael@0: if (frame.mSize == 0 && !frame.mGraphicBuffer) { michael@0: continue; michael@0: } michael@0: michael@0: parsed++; michael@0: if (frame.mShouldSkip && mSkipCount < MAX_DROPPED_FRAMES) { michael@0: mSkipCount++; michael@0: continue; michael@0: } michael@0: michael@0: mSkipCount = 0; michael@0: michael@0: mVideoSeekTimeUs = -1; michael@0: aKeyframeSkip = false; michael@0: michael@0: IntRect picture = ToIntRect(mPicture); michael@0: if (frame.Y.mWidth != mInitialFrame.width || michael@0: frame.Y.mHeight != mInitialFrame.height) { michael@0: michael@0: // Frame size is different from what the container reports. This is legal, michael@0: // and we will preserve the ratio of the crop rectangle as it michael@0: // was reported relative to the picture size reported by the container. michael@0: picture.x = (mPicture.x * frame.Y.mWidth) / mInitialFrame.width; michael@0: picture.y = (mPicture.y * frame.Y.mHeight) / mInitialFrame.height; michael@0: picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width; michael@0: picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height; michael@0: } michael@0: michael@0: // This is the approximate byte position in the stream. michael@0: int64_t pos = mDecoder->GetResource()->Tell(); michael@0: michael@0: VideoData *v; michael@0: if (!frame.mGraphicBuffer) { michael@0: michael@0: VideoData::YCbCrBuffer b; michael@0: b.mPlanes[0].mData = static_cast(frame.Y.mData); michael@0: b.mPlanes[0].mStride = frame.Y.mStride; michael@0: b.mPlanes[0].mHeight = frame.Y.mHeight; michael@0: b.mPlanes[0].mWidth = frame.Y.mWidth; michael@0: b.mPlanes[0].mOffset = frame.Y.mOffset; michael@0: b.mPlanes[0].mSkip = frame.Y.mSkip; michael@0: michael@0: b.mPlanes[1].mData = static_cast(frame.Cb.mData); michael@0: b.mPlanes[1].mStride = frame.Cb.mStride; michael@0: b.mPlanes[1].mHeight = frame.Cb.mHeight; michael@0: b.mPlanes[1].mWidth = frame.Cb.mWidth; michael@0: b.mPlanes[1].mOffset = frame.Cb.mOffset; michael@0: b.mPlanes[1].mSkip = frame.Cb.mSkip; michael@0: michael@0: b.mPlanes[2].mData = static_cast(frame.Cr.mData); michael@0: b.mPlanes[2].mStride = frame.Cr.mStride; michael@0: b.mPlanes[2].mHeight = frame.Cr.mHeight; michael@0: b.mPlanes[2].mWidth = frame.Cr.mWidth; michael@0: b.mPlanes[2].mOffset = frame.Cr.mOffset; michael@0: b.mPlanes[2].mSkip = frame.Cr.mSkip; michael@0: michael@0: v = VideoData::Create(mInfo.mVideo, michael@0: mDecoder->GetImageContainer(), michael@0: pos, michael@0: frame.mTimeUs, michael@0: 1, // We don't know the duration. michael@0: b, michael@0: frame.mKeyFrame, michael@0: -1, michael@0: picture); michael@0: } else { michael@0: v = VideoData::Create(mInfo.mVideo, michael@0: mDecoder->GetImageContainer(), michael@0: pos, michael@0: frame.mTimeUs, michael@0: 1, // We don't know the duration. michael@0: frame.mGraphicBuffer, michael@0: frame.mKeyFrame, michael@0: -1, michael@0: picture); michael@0: } michael@0: michael@0: if (!v) { michael@0: NS_WARNING("Unable to create VideoData"); michael@0: return false; michael@0: } michael@0: michael@0: decoded++; michael@0: NS_ASSERTION(decoded <= parsed, "Expect to decode fewer frames than parsed in MediaPlugin..."); michael@0: michael@0: mVideoQueue.Push(v); michael@0: michael@0: break; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void MediaOmxReader::NotifyDataArrived(const char* aBuffer, uint32_t aLength, int64_t aOffset) michael@0: { michael@0: android::OmxDecoder *omxDecoder = mOmxDecoder.get(); michael@0: michael@0: if (omxDecoder) { michael@0: omxDecoder->NotifyDataArrived(aBuffer, aLength, aOffset); michael@0: } michael@0: } michael@0: michael@0: bool MediaOmxReader::DecodeAudioData() michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: MOZ_ASSERT(mIsActive); michael@0: michael@0: // This is the approximate byte position in the stream. michael@0: int64_t pos = mDecoder->GetResource()->Tell(); michael@0: michael@0: // Read next frame michael@0: MPAPI::AudioFrame source; michael@0: if (!mOmxDecoder->ReadAudio(&source, mAudioSeekTimeUs)) { michael@0: return false; michael@0: } michael@0: mAudioSeekTimeUs = -1; michael@0: michael@0: // Ignore empty buffer which stagefright media read will sporadically return michael@0: if (source.mSize == 0) { michael@0: return true; michael@0: } michael@0: michael@0: uint32_t frames = source.mSize / (source.mAudioChannels * michael@0: sizeof(AudioDataValue)); michael@0: michael@0: typedef AudioCompactor::NativeCopy OmxCopy; michael@0: return mAudioCompactor.Push(pos, michael@0: source.mTimeUs, michael@0: source.mAudioSampleRate, michael@0: frames, michael@0: source.mAudioChannels, michael@0: OmxCopy(static_cast(source.mData), michael@0: source.mSize, michael@0: source.mAudioChannels)); michael@0: } michael@0: michael@0: nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime) michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: MOZ_ASSERT(mIsActive); michael@0: michael@0: ResetDecode(); michael@0: VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); michael@0: if (container && container->GetImageContainer()) { michael@0: container->GetImageContainer()->ClearAllImagesExceptFront(); michael@0: } michael@0: michael@0: if (mHasAudio && mHasVideo) { michael@0: // The OMXDecoder seeks/demuxes audio and video streams separately. So if michael@0: // we seek both audio and video to aTarget, the audio stream can typically michael@0: // seek closer to the seek target, since typically every audio block is michael@0: // a sync point, whereas for video there are only keyframes once every few michael@0: // seconds. So if we have both audio and video, we must seek the video michael@0: // stream to the preceeding keyframe first, get the stream time, and then michael@0: // seek the audio stream to match the video stream's time. Otherwise, the michael@0: // audio and video streams won't be in sync after the seek. michael@0: mVideoSeekTimeUs = aTarget; michael@0: const VideoData* v = DecodeToFirstVideoData(); michael@0: mAudioSeekTimeUs = v ? v->mTime : aTarget; michael@0: } else { michael@0: mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget; michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: static uint64_t BytesToTime(int64_t offset, uint64_t length, uint64_t durationUs) { michael@0: double perc = double(offset) / double(length); michael@0: if (perc > 1.0) michael@0: perc = 1.0; michael@0: return uint64_t(double(durationUs) * perc); michael@0: } michael@0: michael@0: void MediaOmxReader::SetIdle() { michael@0: #ifdef DEBUG michael@0: mIsActive = false; michael@0: #endif michael@0: if (!mOmxDecoder.get()) { michael@0: return; michael@0: } michael@0: mOmxDecoder->Pause(); michael@0: } michael@0: michael@0: void MediaOmxReader::SetActive() { michael@0: #ifdef DEBUG michael@0: mIsActive = true; michael@0: #endif michael@0: if (!mOmxDecoder.get()) { michael@0: return; michael@0: } michael@0: DebugOnly result = mOmxDecoder->Play(); michael@0: NS_ASSERTION(result == NS_OK, "OmxDecoder should be in play state to continue decoding"); michael@0: } michael@0: michael@0: #ifdef MOZ_AUDIO_OFFLOAD michael@0: void MediaOmxReader::CheckAudioOffload() michael@0: { michael@0: NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); michael@0: michael@0: char offloadProp[128]; michael@0: property_get("audio.offload.disable", offloadProp, "0"); michael@0: bool offloadDisable = atoi(offloadProp) != 0; michael@0: if (offloadDisable) { michael@0: return; michael@0: } michael@0: michael@0: mAudioOffloadTrack = mOmxDecoder->GetAudioOffloadTrack(); michael@0: sp meta = (mAudioOffloadTrack.get()) ? michael@0: mAudioOffloadTrack->getFormat() : nullptr; michael@0: michael@0: // Supporting audio offload only when there is no video, no streaming michael@0: bool hasNoVideo = !mOmxDecoder->HasVideo(); michael@0: bool isNotStreaming michael@0: = mDecoder->GetResource()->IsDataCachedToEndOfResource(0); michael@0: michael@0: // Not much benefit in trying to offload other channel types. Most of them michael@0: // aren't supported and also duration would be less than a minute michael@0: bool isTypeMusic = mAudioChannel == dom::AudioChannel::Content; michael@0: michael@0: DECODER_LOG(PR_LOG_DEBUG, ("%s meta %p, no video %d, no streaming %d," michael@0: " channel type %d", __FUNCTION__, meta.get(), hasNoVideo, michael@0: isNotStreaming, mAudioChannel)); michael@0: michael@0: if ((meta.get()) && hasNoVideo && isNotStreaming && isTypeMusic && michael@0: canOffloadStream(meta, false, false, AUDIO_STREAM_MUSIC)) { michael@0: DECODER_LOG(PR_LOG_DEBUG, ("Can offload this audio stream")); michael@0: mDecoder->SetCanOffloadAudio(true); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: } // namespace mozilla