michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifdef XP_WIN michael@0: // Include Windows headers required for enabling high precision timers. michael@0: #include "windows.h" michael@0: #include "mmsystem.h" michael@0: #endif michael@0: michael@0: #include "mozilla/DebugOnly.h" michael@0: #include michael@0: michael@0: #include "MediaDecoderStateMachine.h" michael@0: #include "AudioStream.h" michael@0: #include "nsTArray.h" michael@0: #include "MediaDecoder.h" michael@0: #include "MediaDecoderReader.h" michael@0: #include "mozilla/mozalloc.h" michael@0: #include "VideoUtils.h" michael@0: #include "mozilla/dom/TimeRanges.h" michael@0: #include "nsDeque.h" michael@0: #include "AudioSegment.h" michael@0: #include "VideoSegment.h" michael@0: #include "ImageContainer.h" michael@0: #include "nsComponentManagerUtils.h" michael@0: #include "nsITimer.h" michael@0: #include "nsContentUtils.h" michael@0: #include "MediaShutdownManager.h" michael@0: #include "SharedThreadPool.h" michael@0: #include "MediaTaskQueue.h" michael@0: #include "nsIEventTarget.h" michael@0: #include "prenv.h" michael@0: #include "mozilla/Preferences.h" michael@0: #include "gfx2DGlue.h" michael@0: michael@0: #include michael@0: michael@0: namespace mozilla { michael@0: michael@0: using namespace mozilla::layers; michael@0: using namespace mozilla::dom; michael@0: using namespace mozilla::gfx; michael@0: michael@0: // avoid redefined macro in unified build michael@0: #undef DECODER_LOG michael@0: #undef VERBOSE_LOG michael@0: michael@0: #ifdef PR_LOGGING michael@0: extern PRLogModuleInfo* gMediaDecoderLog; michael@0: #define DECODER_LOG(type, msg, ...) \ michael@0: PR_LOG(gMediaDecoderLog, type, ("Decoder=%p " msg, mDecoder.get(), ##__VA_ARGS__)) michael@0: #define VERBOSE_LOG(msg, ...) \ michael@0: PR_BEGIN_MACRO \ michael@0: if (!PR_GetEnv("MOZ_QUIET")) { \ michael@0: DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \ michael@0: } \ michael@0: PR_END_MACRO michael@0: #else michael@0: #define DECODER_LOG(type, msg, ...) michael@0: #define VERBOSE_LOG(msg, ...) michael@0: #endif michael@0: michael@0: // GetCurrentTime is defined in winbase.h as zero argument macro forwarding to michael@0: // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime michael@0: // implementation. With unified builds, putting this in headers is not enough. michael@0: #ifdef GetCurrentTime michael@0: #undef GetCurrentTime michael@0: #endif michael@0: michael@0: // Wait this number of seconds when buffering, then leave and play michael@0: // as best as we can if the required amount of data hasn't been michael@0: // retrieved. michael@0: static const uint32_t BUFFERING_WAIT_S = 30; michael@0: michael@0: // If audio queue has less than this many usecs of decoded audio, we won't risk michael@0: // trying to decode the video, we'll skip decoding video up to the next michael@0: // keyframe. We may increase this value for an individual decoder if we michael@0: // encounter video frames which take a long time to decode. michael@0: static const uint32_t LOW_AUDIO_USECS = 300000; michael@0: michael@0: // If more than this many usecs of decoded audio is queued, we'll hold off michael@0: // decoding more audio. If we increase the low audio threshold (see michael@0: // LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not michael@0: // less than the low audio threshold. michael@0: const int64_t AMPLE_AUDIO_USECS = 1000000; michael@0: michael@0: // When we're only playing audio and we don't have a video stream, we divide michael@0: // AMPLE_AUDIO_USECS and LOW_AUDIO_USECS by the following value. This reduces michael@0: // the amount of decoded audio we buffer, reducing our memory usage. We only michael@0: // need to decode far ahead when we're decoding video using software decoding, michael@0: // as otherwise a long video decode could cause an audio underrun. michael@0: const int64_t NO_VIDEO_AMPLE_AUDIO_DIVISOR = 8; michael@0: michael@0: // Maximum number of bytes we'll allocate and write at once to the audio michael@0: // hardware when the audio stream contains missing frames and we're michael@0: // writing silence in order to fill the gap. We limit our silence-writes michael@0: // to 32KB in order to avoid allocating an impossibly large chunk of michael@0: // memory if we encounter a large chunk of silence. michael@0: const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024; michael@0: michael@0: // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and michael@0: // we're not "prerolling video", we'll skip the video up to the next keyframe michael@0: // which is at or after the current playback position. michael@0: static const uint32_t LOW_VIDEO_FRAMES = 1; michael@0: michael@0: // Arbitrary "frame duration" when playing only audio. michael@0: static const int AUDIO_DURATION_USECS = 40000; michael@0: michael@0: // If we increase our "low audio threshold" (see LOW_AUDIO_USECS above), we michael@0: // use this as a factor in all our calculations. Increasing this will cause michael@0: // us to be more likely to increase our low audio threshold, and to michael@0: // increase it by more. michael@0: static const int THRESHOLD_FACTOR = 2; michael@0: michael@0: // If we have less than this much undecoded data available, we'll consider michael@0: // ourselves to be running low on undecoded data. We determine how much michael@0: // undecoded data we have remaining using the reader's GetBuffered() michael@0: // implementation. michael@0: static const int64_t LOW_DATA_THRESHOLD_USECS = 5000000; michael@0: michael@0: // LOW_DATA_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, otherwise michael@0: // the skip-to-keyframe logic can activate when we're running low on data. michael@0: static_assert(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS, michael@0: "LOW_DATA_THRESHOLD_USECS is too small"); michael@0: michael@0: // Amount of excess usecs of data to add in to the "should we buffer" calculation. michael@0: static const uint32_t EXHAUSTED_DATA_MARGIN_USECS = 60000; michael@0: michael@0: // If we enter buffering within QUICK_BUFFER_THRESHOLD_USECS seconds of starting michael@0: // decoding, we'll enter "quick buffering" mode, which exits a lot sooner than michael@0: // normal buffering mode. This exists so that if the decode-ahead exhausts the michael@0: // downloaded data while decode/playback is just starting up (for example michael@0: // after a seek while the media is still playing, or when playing a media michael@0: // as soon as it's load started), we won't necessarily stop for 30s and wait michael@0: // for buffering. We may actually be able to playback in this case, so exit michael@0: // buffering early and try to play. If it turns out we can't play, we'll fall michael@0: // back to buffering normally. michael@0: static const uint32_t QUICK_BUFFER_THRESHOLD_USECS = 2000000; michael@0: michael@0: // If we're quick buffering, we'll remain in buffering mode while we have less than michael@0: // QUICK_BUFFERING_LOW_DATA_USECS of decoded data available. michael@0: static const uint32_t QUICK_BUFFERING_LOW_DATA_USECS = 1000000; michael@0: michael@0: // If QUICK_BUFFERING_LOW_DATA_USECS is > AMPLE_AUDIO_USECS, we won't exit michael@0: // quick buffering in a timely fashion, as the decode pauses when it michael@0: // reaches AMPLE_AUDIO_USECS decoded data, and thus we'll never reach michael@0: // QUICK_BUFFERING_LOW_DATA_USECS. michael@0: static_assert(QUICK_BUFFERING_LOW_DATA_USECS <= AMPLE_AUDIO_USECS, michael@0: "QUICK_BUFFERING_LOW_DATA_USECS is too large"); michael@0: michael@0: // This value has been chosen empirically. michael@0: static const uint32_t AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS = 200000; michael@0: michael@0: // The amount of instability we tollerate in calls to michael@0: // MediaDecoderStateMachine::UpdateEstimatedDuration(); changes of duration michael@0: // less than this are ignored, as they're assumed to be the result of michael@0: // instability in the duration estimation. michael@0: static const int64_t ESTIMATED_DURATION_FUZZ_FACTOR_USECS = USECS_PER_S / 2; michael@0: michael@0: static TimeDuration UsecsToDuration(int64_t aUsecs) { michael@0: return TimeDuration::FromMilliseconds(static_cast(aUsecs) / USECS_PER_MS); michael@0: } michael@0: michael@0: static int64_t DurationToUsecs(TimeDuration aDuration) { michael@0: return static_cast(aDuration.ToSeconds() * USECS_PER_S); michael@0: } michael@0: michael@0: MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder, michael@0: MediaDecoderReader* aReader, michael@0: bool aRealTime) : michael@0: mDecoder(aDecoder), michael@0: mState(DECODER_STATE_DECODING_METADATA), michael@0: mInRunningStateMachine(false), michael@0: mSyncPointInMediaStream(-1), michael@0: mSyncPointInDecodedStream(-1), michael@0: mResetPlayStartTime(false), michael@0: mPlayDuration(0), michael@0: mStartTime(-1), michael@0: mEndTime(-1), michael@0: mFragmentEndTime(-1), michael@0: mReader(aReader), michael@0: mCurrentFrameTime(0), michael@0: mAudioStartTime(-1), michael@0: mAudioEndTime(-1), michael@0: mVideoFrameEndTime(-1), michael@0: mVolume(1.0), michael@0: mPlaybackRate(1.0), michael@0: mPreservesPitch(true), michael@0: mBasePosition(0), michael@0: mAmpleVideoFrames(2), michael@0: mLowAudioThresholdUsecs(LOW_AUDIO_USECS), michael@0: mAmpleAudioThresholdUsecs(AMPLE_AUDIO_USECS), michael@0: mDispatchedAudioDecodeTask(false), michael@0: mDispatchedVideoDecodeTask(false), michael@0: mIsReaderIdle(false), michael@0: mAudioCaptured(false), michael@0: mTransportSeekable(true), michael@0: mMediaSeekable(true), michael@0: mPositionChangeQueued(false), michael@0: mAudioCompleted(false), michael@0: mGotDurationFromMetaData(false), michael@0: mDispatchedEventToDecode(false), michael@0: mStopAudioThread(true), michael@0: mQuickBuffering(false), michael@0: mMinimizePreroll(false), michael@0: mDecodeThreadWaiting(false), michael@0: mRealTime(aRealTime), michael@0: mLastFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED), michael@0: mTimerId(0) michael@0: { michael@0: MOZ_COUNT_CTOR(MediaDecoderStateMachine); michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: michael@0: // Only enable realtime mode when "media.realtime_decoder.enabled" is true. michael@0: if (Preferences::GetBool("media.realtime_decoder.enabled", false) == false) michael@0: mRealTime = false; michael@0: michael@0: mAmpleVideoFrames = michael@0: std::max(Preferences::GetUint("media.video-queue.default-size", 10), 3); michael@0: michael@0: mBufferingWait = mRealTime ? 0 : BUFFERING_WAIT_S; michael@0: mLowDataThresholdUsecs = mRealTime ? 0 : LOW_DATA_THRESHOLD_USECS; michael@0: michael@0: mVideoPrerollFrames = mRealTime ? 0 : mAmpleVideoFrames / 2; michael@0: mAudioPrerollUsecs = mRealTime ? 0 : LOW_AUDIO_USECS * 2; michael@0: michael@0: #ifdef XP_WIN michael@0: // Ensure high precision timers are enabled on Windows, otherwise the state michael@0: // machine thread isn't woken up at reliable intervals to set the next frame, michael@0: // and we drop frames while painting. Note that multiple calls to this michael@0: // function per-process is OK, provided each call is matched by a corresponding michael@0: // timeEndPeriod() call. michael@0: timeBeginPeriod(1); michael@0: #endif michael@0: } michael@0: michael@0: MediaDecoderStateMachine::~MediaDecoderStateMachine() michael@0: { michael@0: MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); michael@0: MOZ_COUNT_DTOR(MediaDecoderStateMachine); michael@0: NS_ASSERTION(!mPendingWakeDecoder.get(), michael@0: "WakeDecoder should have been revoked already"); michael@0: michael@0: MOZ_ASSERT(!mDecodeTaskQueue, "Should be released in SHUTDOWN"); michael@0: // No need to cancel the timer here for we've done that in SHUTDOWN. michael@0: MOZ_ASSERT(!mTimer, "Should be released in SHUTDOWN"); michael@0: mReader = nullptr; michael@0: michael@0: #ifdef XP_WIN michael@0: timeEndPeriod(1); michael@0: #endif michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::HasFutureAudio() { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio"); michael@0: // We've got audio ready to play if: michael@0: // 1. We've not completed playback of audio, and michael@0: // 2. we either have more than the threshold of decoded audio available, or michael@0: // we've completely decoded all audio (but not finished playing it yet michael@0: // as per 1). michael@0: return !mAudioCompleted && michael@0: (AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || AudioQueue().IsFinished()); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::HaveNextFrameData() { michael@0: AssertCurrentThreadInMonitor(); michael@0: return (!HasAudio() || HasFutureAudio()) && michael@0: (!HasVideo() || VideoQueue().GetSize() > 0); michael@0: } michael@0: michael@0: int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() { michael@0: NS_ASSERTION(OnDecodeThread() || OnStateMachineThread(), michael@0: "Should be on decode thread or state machine thread"); michael@0: AssertCurrentThreadInMonitor(); michael@0: int64_t audioDecoded = AudioQueue().Duration(); michael@0: if (mAudioEndTime != -1) { michael@0: audioDecoded += mAudioEndTime - GetMediaTime(); michael@0: } michael@0: return audioDecoded; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SendStreamAudio(AudioData* aAudio, michael@0: DecodedStreamData* aStream, michael@0: AudioSegment* aOutput) michael@0: { michael@0: NS_ASSERTION(OnDecodeThread() || michael@0: OnStateMachineThread(), "Should be on decode thread or state machine thread"); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (aAudio->mTime <= aStream->mLastAudioPacketTime) { michael@0: // ignore packet that we've already processed michael@0: return; michael@0: } michael@0: aStream->mLastAudioPacketTime = aAudio->mTime; michael@0: aStream->mLastAudioPacketEndTime = aAudio->GetEndTime(); michael@0: michael@0: // This logic has to mimic AudioLoop closely to make sure we write michael@0: // the exact same silences michael@0: CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudio.mRate, michael@0: aStream->mInitialTime + mStartTime) + aStream->mAudioFramesWritten; michael@0: CheckedInt64 frameOffset = UsecsToFrames(mInfo.mAudio.mRate, aAudio->mTime); michael@0: if (!audioWrittenOffset.isValid() || !frameOffset.isValid()) michael@0: return; michael@0: if (audioWrittenOffset.value() < frameOffset.value()) { michael@0: // Write silence to catch up michael@0: VERBOSE_LOG("writing %d frames of silence to MediaStream", michael@0: int32_t(frameOffset.value() - audioWrittenOffset.value())); michael@0: AudioSegment silence; michael@0: silence.InsertNullDataAtStart(frameOffset.value() - audioWrittenOffset.value()); michael@0: aStream->mAudioFramesWritten += silence.GetDuration(); michael@0: aOutput->AppendFrom(&silence); michael@0: } michael@0: michael@0: int64_t offset; michael@0: if (aStream->mAudioFramesWritten == 0) { michael@0: NS_ASSERTION(frameOffset.value() <= audioWrittenOffset.value(), michael@0: "Otherwise we'd have taken the write-silence path"); michael@0: // We're starting in the middle of a packet. Split the packet. michael@0: offset = audioWrittenOffset.value() - frameOffset.value(); michael@0: } else { michael@0: // Write the entire packet. michael@0: offset = 0; michael@0: } michael@0: michael@0: if (offset >= aAudio->mFrames) michael@0: return; michael@0: michael@0: aAudio->EnsureAudioBuffer(); michael@0: nsRefPtr buffer = aAudio->mAudioBuffer; michael@0: AudioDataValue* bufferData = static_cast(buffer->Data()); michael@0: nsAutoTArray channels; michael@0: for (uint32_t i = 0; i < aAudio->mChannels; ++i) { michael@0: channels.AppendElement(bufferData + i*aAudio->mFrames + offset); michael@0: } michael@0: aOutput->AppendFrames(buffer.forget(), channels, aAudio->mFrames); michael@0: VERBOSE_LOG("writing %d frames of data to MediaStream for AudioData at %lld", michael@0: aAudio->mFrames - int32_t(offset), aAudio->mTime); michael@0: aStream->mAudioFramesWritten += aAudio->mFrames - int32_t(offset); michael@0: } michael@0: michael@0: static void WriteVideoToMediaStream(layers::Image* aImage, michael@0: int64_t aDuration, michael@0: const IntSize& aIntrinsicSize, michael@0: VideoSegment* aOutput) michael@0: { michael@0: nsRefPtr image = aImage; michael@0: aOutput->AppendFrame(image.forget(), aDuration, aIntrinsicSize); michael@0: } michael@0: michael@0: static const TrackID TRACK_AUDIO = 1; michael@0: static const TrackID TRACK_VIDEO = 2; michael@0: static const TrackRate RATE_VIDEO = USECS_PER_S; michael@0: michael@0: void MediaDecoderStateMachine::SendStreamData() michael@0: { michael@0: NS_ASSERTION(OnDecodeThread() || michael@0: OnStateMachineThread(), "Should be on decode thread or state machine thread"); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: DecodedStreamData* stream = mDecoder->GetDecodedStream(); michael@0: if (!stream) michael@0: return; michael@0: michael@0: if (mState == DECODER_STATE_DECODING_METADATA) michael@0: return; michael@0: michael@0: // If there's still an audio thread alive, then we can't send any stream michael@0: // data yet since both SendStreamData and the audio thread want to be in michael@0: // charge of popping the audio queue. We're waiting for the audio thread michael@0: // to die before sending anything to our stream. michael@0: if (mAudioThread) michael@0: return; michael@0: michael@0: int64_t minLastAudioPacketTime = INT64_MAX; michael@0: bool finished = michael@0: (!mInfo.HasAudio() || AudioQueue().IsFinished()) && michael@0: (!mInfo.HasVideo() || VideoQueue().IsFinished()); michael@0: if (mDecoder->IsSameOriginMedia()) { michael@0: SourceMediaStream* mediaStream = stream->mStream; michael@0: StreamTime endPosition = 0; michael@0: michael@0: if (!stream->mStreamInitialized) { michael@0: if (mInfo.HasAudio()) { michael@0: AudioSegment* audio = new AudioSegment(); michael@0: mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudio.mRate, 0, audio); michael@0: stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO, michael@0: GetStateMachineThread(), GetWakeDecoderRunnable()); michael@0: } michael@0: if (mInfo.HasVideo()) { michael@0: VideoSegment* video = new VideoSegment(); michael@0: mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video); michael@0: stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO, michael@0: GetStateMachineThread(), GetWakeDecoderRunnable()); michael@0: } michael@0: stream->mStreamInitialized = true; michael@0: } michael@0: michael@0: if (mInfo.HasAudio()) { michael@0: nsAutoTArray audio; michael@0: // It's OK to hold references to the AudioData because while audio michael@0: // is captured, only the decoder thread pops from the queue (see below). michael@0: AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio); michael@0: AudioSegment output; michael@0: for (uint32_t i = 0; i < audio.Length(); ++i) { michael@0: SendStreamAudio(audio[i], stream, &output); michael@0: } michael@0: if (output.GetDuration() > 0) { michael@0: mediaStream->AppendToTrack(TRACK_AUDIO, &output); michael@0: } michael@0: if (AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) { michael@0: mediaStream->EndTrack(TRACK_AUDIO); michael@0: stream->mHaveSentFinishAudio = true; michael@0: } michael@0: minLastAudioPacketTime = std::min(minLastAudioPacketTime, stream->mLastAudioPacketTime); michael@0: endPosition = std::max(endPosition, michael@0: TicksToTimeRoundDown(mInfo.mAudio.mRate, stream->mAudioFramesWritten)); michael@0: } michael@0: michael@0: if (mInfo.HasVideo()) { michael@0: nsAutoTArray video; michael@0: // It's OK to hold references to the VideoData only the decoder thread michael@0: // pops from the queue. michael@0: VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video); michael@0: VideoSegment output; michael@0: for (uint32_t i = 0; i < video.Length(); ++i) { michael@0: VideoData* v = video[i]; michael@0: if (stream->mNextVideoTime < v->mTime) { michael@0: VERBOSE_LOG("writing last video to MediaStream %p for %lldus", michael@0: mediaStream, v->mTime - stream->mNextVideoTime); michael@0: // Write last video frame to catch up. mLastVideoImage can be null here michael@0: // which is fine, it just means there's no video. michael@0: WriteVideoToMediaStream(stream->mLastVideoImage, michael@0: v->mTime - stream->mNextVideoTime, stream->mLastVideoImageDisplaySize, michael@0: &output); michael@0: stream->mNextVideoTime = v->mTime; michael@0: } michael@0: if (stream->mNextVideoTime < v->GetEndTime()) { michael@0: VERBOSE_LOG("writing video frame %lldus to MediaStream %p for %lldus", michael@0: v->mTime, mediaStream, v->GetEndTime() - stream->mNextVideoTime); michael@0: WriteVideoToMediaStream(v->mImage, michael@0: v->GetEndTime() - stream->mNextVideoTime, v->mDisplay, michael@0: &output); michael@0: stream->mNextVideoTime = v->GetEndTime(); michael@0: stream->mLastVideoImage = v->mImage; michael@0: stream->mLastVideoImageDisplaySize = v->mDisplay; michael@0: } else { michael@0: VERBOSE_LOG("skipping writing video frame %lldus (end %lldus) to MediaStream", michael@0: v->mTime, v->GetEndTime()); michael@0: } michael@0: } michael@0: if (output.GetDuration() > 0) { michael@0: mediaStream->AppendToTrack(TRACK_VIDEO, &output); michael@0: } michael@0: if (VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) { michael@0: mediaStream->EndTrack(TRACK_VIDEO); michael@0: stream->mHaveSentFinishVideo = true; michael@0: } michael@0: endPosition = std::max(endPosition, michael@0: TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime)); michael@0: } michael@0: michael@0: if (!stream->mHaveSentFinish) { michael@0: stream->mStream->AdvanceKnownTracksTime(endPosition); michael@0: } michael@0: michael@0: if (finished && !stream->mHaveSentFinish) { michael@0: stream->mHaveSentFinish = true; michael@0: stream->mStream->Finish(); michael@0: } michael@0: } michael@0: michael@0: if (mAudioCaptured) { michael@0: // Discard audio packets that are no longer needed. michael@0: while (true) { michael@0: const AudioData* a = AudioQueue().PeekFront(); michael@0: // Packet times are not 100% reliable so this may discard packets that michael@0: // actually contain data for mCurrentFrameTime. This means if someone might michael@0: // create a new output stream and we actually don't have the audio for the michael@0: // very start. That's OK, we'll play silence instead for a brief moment. michael@0: // That's OK. Seeking to this time would have a similar issue for such michael@0: // badly muxed resources. michael@0: if (!a || a->GetEndTime() >= minLastAudioPacketTime) michael@0: break; michael@0: mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime()); michael@0: delete AudioQueue().PopFront(); michael@0: } michael@0: michael@0: if (finished) { michael@0: mAudioCompleted = true; michael@0: UpdateReadyState(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: MediaDecoderStateMachine::WakeDecoderRunnable* michael@0: MediaDecoderStateMachine::GetWakeDecoderRunnable() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (!mPendingWakeDecoder.get()) { michael@0: mPendingWakeDecoder = new WakeDecoderRunnable(this); michael@0: } michael@0: return mPendingWakeDecoder.get(); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs) michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (AudioQueue().GetSize() == 0 || michael@0: GetDecodedAudioDuration() < aAmpleAudioUSecs) { michael@0: return false; michael@0: } michael@0: if (!mAudioCaptured) { michael@0: return true; michael@0: } michael@0: michael@0: DecodedStreamData* stream = mDecoder->GetDecodedStream(); michael@0: if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishAudio) { michael@0: if (!stream->mStream->HaveEnoughBuffered(TRACK_AUDIO)) { michael@0: return false; michael@0: } michael@0: stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO, michael@0: GetStateMachineThread(), GetWakeDecoderRunnable()); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::HaveEnoughDecodedVideo() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (static_cast(VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) { michael@0: return false; michael@0: } michael@0: michael@0: DecodedStreamData* stream = mDecoder->GetDecodedStream(); michael@0: if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) { michael@0: if (!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) { michael@0: return false; michael@0: } michael@0: stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO, michael@0: GetStateMachineThread(), GetWakeDecoderRunnable()); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: MediaDecoderStateMachine::NeedToDecodeVideo() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: return mIsVideoDecoding && michael@0: !mMinimizePreroll && michael@0: !HaveEnoughDecodedVideo(); michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::DecodeVideo() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: michael@0: if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) { michael@0: mDispatchedVideoDecodeTask = false; michael@0: return; michael@0: } michael@0: EnsureActive(); michael@0: michael@0: // We don't want to consider skipping to the next keyframe if we've michael@0: // only just started up the decode loop, so wait until we've decoded michael@0: // some frames before enabling the keyframe skip logic on video. michael@0: if (mIsVideoPrerolling && michael@0: (static_cast(VideoQueue().GetSize()) michael@0: >= mVideoPrerollFrames * mPlaybackRate)) michael@0: { michael@0: mIsVideoPrerolling = false; michael@0: } michael@0: michael@0: // We'll skip the video decode to the nearest keyframe if we're low on michael@0: // audio, or if we're low on video, provided we're not running low on michael@0: // data to decode. If we're running low on downloaded data to decode, michael@0: // we won't start keyframe skipping, as we'll be pausing playback to buffer michael@0: // soon anyway and we'll want to be able to display frames immediately michael@0: // after buffering finishes. michael@0: if (mState == DECODER_STATE_DECODING && michael@0: !mSkipToNextKeyFrame && michael@0: mIsVideoDecoding && michael@0: ((!mIsAudioPrerolling && mIsAudioDecoding && michael@0: GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) || michael@0: (!mIsVideoPrerolling && mIsVideoDecoding && michael@0: // don't skip frame when |clock time| <= |mVideoFrameEndTime| for michael@0: // we are still in the safe range without underrunning video frames michael@0: GetClock() > mVideoFrameEndTime && michael@0: (static_cast(VideoQueue().GetSize()) michael@0: < LOW_VIDEO_FRAMES * mPlaybackRate))) && michael@0: !HasLowUndecodedData()) michael@0: { michael@0: mSkipToNextKeyFrame = true; michael@0: DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe"); michael@0: } michael@0: michael@0: // Time the video decode, so that if it's slow, we can increase our low michael@0: // audio threshold to reduce the chance of an audio underrun while we're michael@0: // waiting for a video decode to complete. michael@0: TimeDuration decodeTime; michael@0: { michael@0: int64_t currentTime = GetMediaTime(); michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: TimeStamp start = TimeStamp::Now(); michael@0: mIsVideoDecoding = mReader->DecodeVideoFrame(mSkipToNextKeyFrame, currentTime); michael@0: decodeTime = TimeStamp::Now() - start; michael@0: } michael@0: if (!mIsVideoDecoding) { michael@0: // Playback ended for this stream, close the sample queue. michael@0: VideoQueue().Finish(); michael@0: CheckIfDecodeComplete(); michael@0: } michael@0: michael@0: if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs && michael@0: !HasLowUndecodedData()) michael@0: { michael@0: mLowAudioThresholdUsecs = michael@0: std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS); michael@0: mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs, michael@0: mAmpleAudioThresholdUsecs); michael@0: DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld", michael@0: mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs); michael@0: } michael@0: michael@0: SendStreamData(); michael@0: michael@0: // The ready state can change when we've decoded data, so update the michael@0: // ready state, so that DOM events can fire. michael@0: UpdateReadyState(); michael@0: michael@0: mDispatchedVideoDecodeTask = false; michael@0: DispatchDecodeTasksIfNeeded(); michael@0: } michael@0: michael@0: bool michael@0: MediaDecoderStateMachine::NeedToDecodeAudio() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: return mIsAudioDecoding && michael@0: !mMinimizePreroll && michael@0: !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate); michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::DecodeAudio() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: michael@0: if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) { michael@0: mDispatchedAudioDecodeTask = false; michael@0: return; michael@0: } michael@0: EnsureActive(); michael@0: michael@0: // We don't want to consider skipping to the next keyframe if we've michael@0: // only just started up the decode loop, so wait until we've decoded michael@0: // some audio data before enabling the keyframe skip logic on audio. michael@0: if (mIsAudioPrerolling && michael@0: GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) { michael@0: mIsAudioPrerolling = false; michael@0: } michael@0: michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: mIsAudioDecoding = mReader->DecodeAudioData(); michael@0: } michael@0: if (!mIsAudioDecoding) { michael@0: // Playback ended for this stream, close the sample queue. michael@0: AudioQueue().Finish(); michael@0: CheckIfDecodeComplete(); michael@0: } michael@0: michael@0: SendStreamData(); michael@0: michael@0: // Notify to ensure that the AudioLoop() is not waiting, in case it was michael@0: // waiting for more audio to be decoded. michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: michael@0: // The ready state can change when we've decoded data, so update the michael@0: // ready state, so that DOM events can fire. michael@0: UpdateReadyState(); michael@0: michael@0: mDispatchedAudioDecodeTask = false; michael@0: DispatchDecodeTasksIfNeeded(); michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::CheckIfDecodeComplete() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: if (mState == DECODER_STATE_SHUTDOWN || michael@0: mState == DECODER_STATE_SEEKING || michael@0: mState == DECODER_STATE_COMPLETED) { michael@0: // Don't change our state if we've already been shutdown, or we're seeking, michael@0: // since we don't want to abort the shutdown or seek processes. michael@0: return; michael@0: } michael@0: MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding); michael@0: MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding); michael@0: if (!mIsVideoDecoding && !mIsAudioDecoding) { michael@0: // We've finished decoding all active streams, michael@0: // so move to COMPLETED state. michael@0: mState = DECODER_STATE_COMPLETED; michael@0: DispatchDecodeTasksIfNeeded(); michael@0: ScheduleStateMachine(); michael@0: } michael@0: DECODER_LOG(PR_LOG_DEBUG, "CheckIfDecodeComplete %scompleted", michael@0: ((mState == DECODER_STATE_COMPLETED) ? "" : "NOT ")); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::IsPlaying() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: return !mPlayStartTime.IsNull(); michael@0: } michael@0: michael@0: // If we have already written enough frames to the AudioStream, start the michael@0: // playback. michael@0: static void michael@0: StartAudioStreamPlaybackIfNeeded(AudioStream* aStream) michael@0: { michael@0: // We want to have enough data in the buffer to start the stream. michael@0: if (static_cast(aStream->GetWritten()) / aStream->GetRate() >= michael@0: static_cast(AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS) / USECS_PER_S) { michael@0: aStream->Start(); michael@0: } michael@0: } michael@0: michael@0: static void WriteSilence(AudioStream* aStream, uint32_t aFrames) michael@0: { michael@0: uint32_t numSamples = aFrames * aStream->GetChannels(); michael@0: nsAutoTArray buf; michael@0: buf.SetLength(numSamples); michael@0: memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue)); michael@0: aStream->Write(buf.Elements(), aFrames); michael@0: michael@0: StartAudioStreamPlaybackIfNeeded(aStream); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::AudioLoop() michael@0: { michael@0: NS_ASSERTION(OnAudioThread(), "Should be on audio thread."); michael@0: DECODER_LOG(PR_LOG_DEBUG, "Begun audio thread/loop"); michael@0: int64_t audioDuration = 0; michael@0: int64_t audioStartTime = -1; michael@0: uint32_t channels, rate; michael@0: double volume = -1; michael@0: bool setVolume; michael@0: double playbackRate = -1; michael@0: bool setPlaybackRate; michael@0: bool preservesPitch; michael@0: bool setPreservesPitch; michael@0: AudioChannel audioChannel; michael@0: michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mAudioCompleted = false; michael@0: audioStartTime = mAudioStartTime; michael@0: NS_ASSERTION(audioStartTime != -1, "Should have audio start time by now"); michael@0: channels = mInfo.mAudio.mChannels; michael@0: rate = mInfo.mAudio.mRate; michael@0: michael@0: audioChannel = mDecoder->GetAudioChannel(); michael@0: volume = mVolume; michael@0: preservesPitch = mPreservesPitch; michael@0: playbackRate = mPlaybackRate; michael@0: } michael@0: michael@0: { michael@0: // AudioStream initialization can block for extended periods in unusual michael@0: // circumstances, so we take care to drop the decoder monitor while michael@0: // initializing. michael@0: RefPtr audioStream(new AudioStream()); michael@0: audioStream->Init(channels, rate, audioChannel, AudioStream::HighLatency); michael@0: audioStream->SetVolume(volume); michael@0: if (audioStream->SetPreservesPitch(preservesPitch) != NS_OK) { michael@0: NS_WARNING("Setting the pitch preservation failed at AudioLoop start."); michael@0: } michael@0: if (playbackRate != 1.0) { michael@0: NS_ASSERTION(playbackRate != 0, michael@0: "Don't set the playbackRate to 0 on an AudioStream."); michael@0: if (audioStream->SetPlaybackRate(playbackRate) != NS_OK) { michael@0: NS_WARNING("Setting the playback rate failed at AudioLoop start."); michael@0: } michael@0: } michael@0: michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mAudioStream = audioStream.forget(); michael@0: } michael@0: } michael@0: michael@0: while (1) { michael@0: // Wait while we're not playing, and we're not shutting down, or we're michael@0: // playing and we've got no audio to play. michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(mState != DECODER_STATE_DECODING_METADATA, michael@0: "Should have meta data before audio started playing."); michael@0: while (mState != DECODER_STATE_SHUTDOWN && michael@0: !mStopAudioThread && michael@0: (!IsPlaying() || michael@0: mState == DECODER_STATE_BUFFERING || michael@0: (AudioQueue().GetSize() == 0 && michael@0: !AudioQueue().AtEndOfStream()))) michael@0: { michael@0: if (!IsPlaying() && !mAudioStream->IsPaused()) { michael@0: mAudioStream->Pause(); michael@0: } michael@0: mon.Wait(); michael@0: } michael@0: michael@0: // If we're shutting down, break out and exit the audio thread. michael@0: // Also break out if audio is being captured. michael@0: if (mState == DECODER_STATE_SHUTDOWN || michael@0: mStopAudioThread || michael@0: AudioQueue().AtEndOfStream()) michael@0: { michael@0: break; michael@0: } michael@0: michael@0: // We only want to go to the expense of changing the volume if michael@0: // the volume has changed. michael@0: setVolume = volume != mVolume; michael@0: volume = mVolume; michael@0: michael@0: // Same for the playbackRate. michael@0: setPlaybackRate = playbackRate != mPlaybackRate; michael@0: playbackRate = mPlaybackRate; michael@0: michael@0: // Same for the pitch preservation. michael@0: setPreservesPitch = preservesPitch != mPreservesPitch; michael@0: preservesPitch = mPreservesPitch; michael@0: michael@0: if (IsPlaying() && mAudioStream->IsPaused()) { michael@0: mAudioStream->Resume(); michael@0: } michael@0: } michael@0: michael@0: if (setVolume) { michael@0: mAudioStream->SetVolume(volume); michael@0: } michael@0: if (setPlaybackRate) { michael@0: NS_ASSERTION(playbackRate != 0, michael@0: "Don't set the playbackRate to 0 in the AudioStreams"); michael@0: if (mAudioStream->SetPlaybackRate(playbackRate) != NS_OK) { michael@0: NS_WARNING("Setting the playback rate failed in AudioLoop."); michael@0: } michael@0: } michael@0: if (setPreservesPitch) { michael@0: if (mAudioStream->SetPreservesPitch(preservesPitch) != NS_OK) { michael@0: NS_WARNING("Setting the pitch preservation failed in AudioLoop."); michael@0: } michael@0: } michael@0: NS_ASSERTION(AudioQueue().GetSize() > 0, michael@0: "Should have data to play"); michael@0: // See if there's a gap in the audio. If there is, push silence into the michael@0: // audio hardware, so we can play across the gap. michael@0: const AudioData* s = AudioQueue().PeekFront(); michael@0: michael@0: // Calculate the number of frames that have been pushed onto the audio michael@0: // hardware. michael@0: CheckedInt64 playedFrames = UsecsToFrames(audioStartTime, rate) + michael@0: audioDuration; michael@0: // Calculate the timestamp of the next chunk of audio in numbers of michael@0: // samples. michael@0: CheckedInt64 sampleTime = UsecsToFrames(s->mTime, rate); michael@0: CheckedInt64 missingFrames = sampleTime - playedFrames; michael@0: if (!missingFrames.isValid() || !sampleTime.isValid()) { michael@0: NS_WARNING("Int overflow adding in AudioLoop()"); michael@0: break; michael@0: } michael@0: michael@0: int64_t framesWritten = 0; michael@0: if (missingFrames.value() > 0) { michael@0: // The next audio chunk begins some time after the end of the last chunk michael@0: // we pushed to the audio hardware. We must push silence into the audio michael@0: // hardware so that the next audio chunk begins playback at the correct michael@0: // time. michael@0: missingFrames = std::min(UINT32_MAX, missingFrames.value()); michael@0: VERBOSE_LOG("playing %d frames of silence", int32_t(missingFrames.value())); michael@0: framesWritten = PlaySilence(static_cast(missingFrames.value()), michael@0: channels, playedFrames.value()); michael@0: } else { michael@0: framesWritten = PlayFromAudioQueue(sampleTime.value(), channels); michael@0: } michael@0: audioDuration += framesWritten; michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: CheckedInt64 playedUsecs = FramesToUsecs(audioDuration, rate) + audioStartTime; michael@0: if (!playedUsecs.isValid()) { michael@0: NS_WARNING("Int overflow calculating audio end time"); michael@0: break; michael@0: } michael@0: mAudioEndTime = playedUsecs.value(); michael@0: } michael@0: } michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: if (AudioQueue().AtEndOfStream() && michael@0: mState != DECODER_STATE_SHUTDOWN && michael@0: !mStopAudioThread) michael@0: { michael@0: // If the media was too short to trigger the start of the audio stream, michael@0: // start it now. michael@0: mAudioStream->Start(); michael@0: // Last frame pushed to audio hardware, wait for the audio to finish, michael@0: // before the audio thread terminates. michael@0: bool seeking = false; michael@0: { michael@0: int64_t oldPosition = -1; michael@0: int64_t position = GetMediaTime(); michael@0: while (oldPosition != position && michael@0: mAudioEndTime - position > 0 && michael@0: mState != DECODER_STATE_SEEKING && michael@0: mState != DECODER_STATE_SHUTDOWN) michael@0: { michael@0: const int64_t DRAIN_BLOCK_USECS = 100000; michael@0: Wait(std::min(mAudioEndTime - position, DRAIN_BLOCK_USECS)); michael@0: oldPosition = position; michael@0: position = GetMediaTime(); michael@0: } michael@0: seeking = mState == DECODER_STATE_SEEKING; michael@0: } michael@0: michael@0: if (!seeking && !mAudioStream->IsPaused()) { michael@0: { michael@0: ReentrantMonitorAutoExit exit(mDecoder->GetReentrantMonitor()); michael@0: mAudioStream->Drain(); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: DECODER_LOG(PR_LOG_DEBUG, "Reached audio stream end."); michael@0: { michael@0: // Must hold lock while shutting down and anulling the audio stream to prevent michael@0: // state machine thread trying to use it while we're destroying it. michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mAudioStream->Shutdown(); michael@0: mAudioStream = nullptr; michael@0: if (!mAudioCaptured) { michael@0: mAudioCompleted = true; michael@0: UpdateReadyState(); michael@0: // Kick the decode thread; it may be sleeping waiting for this to finish. michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: } michael@0: } michael@0: michael@0: DECODER_LOG(PR_LOG_DEBUG, "Audio stream finished playing, audio thread exit"); michael@0: } michael@0: michael@0: uint32_t MediaDecoderStateMachine::PlaySilence(uint32_t aFrames, michael@0: uint32_t aChannels, michael@0: uint64_t aFrameOffset) michael@0: michael@0: { michael@0: NS_ASSERTION(OnAudioThread(), "Only call on audio thread."); michael@0: NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); michael@0: uint32_t maxFrames = SILENCE_BYTES_CHUNK / aChannels / sizeof(AudioDataValue); michael@0: uint32_t frames = std::min(aFrames, maxFrames); michael@0: WriteSilence(mAudioStream, frames); michael@0: return frames; michael@0: } michael@0: michael@0: uint32_t MediaDecoderStateMachine::PlayFromAudioQueue(uint64_t aFrameOffset, michael@0: uint32_t aChannels) michael@0: { michael@0: NS_ASSERTION(OnAudioThread(), "Only call on audio thread."); michael@0: NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); michael@0: nsAutoPtr audio(AudioQueue().PopFront()); michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_WARN_IF_FALSE(IsPlaying(), "Should be playing"); michael@0: // Awaken the decode loop if it's waiting for space to free up in the michael@0: // audio queue. michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: } michael@0: int64_t offset = -1; michael@0: uint32_t frames = 0; michael@0: VERBOSE_LOG("playing %d frames of data to stream for AudioData at %lld", michael@0: audio->mFrames, audio->mTime); michael@0: mAudioStream->Write(audio->mAudioData, michael@0: audio->mFrames); michael@0: michael@0: aChannels = mAudioStream->GetOutChannels(); michael@0: michael@0: StartAudioStreamPlaybackIfNeeded(mAudioStream); michael@0: michael@0: offset = audio->mOffset; michael@0: frames = audio->mFrames; michael@0: michael@0: if (offset != -1) { michael@0: mDecoder->UpdatePlaybackOffset(offset); michael@0: } michael@0: return frames; michael@0: } michael@0: michael@0: nsresult MediaDecoderStateMachine::Init(MediaDecoderStateMachine* aCloneDonor) michael@0: { michael@0: MOZ_ASSERT(NS_IsMainThread()); michael@0: michael@0: RefPtr decodePool( michael@0: SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"), michael@0: Preferences::GetUint("media.num-decode-threads", 25))); michael@0: NS_ENSURE_TRUE(decodePool, NS_ERROR_FAILURE); michael@0: michael@0: RefPtr stateMachinePool( michael@0: SharedThreadPool::Get(NS_LITERAL_CSTRING("Media State Machine"), 1)); michael@0: NS_ENSURE_TRUE(stateMachinePool, NS_ERROR_FAILURE); michael@0: michael@0: mDecodeTaskQueue = new MediaTaskQueue(decodePool.forget()); michael@0: NS_ENSURE_TRUE(mDecodeTaskQueue, NS_ERROR_FAILURE); michael@0: michael@0: MediaDecoderReader* cloneReader = nullptr; michael@0: if (aCloneDonor) { michael@0: cloneReader = aCloneDonor->mReader; michael@0: } michael@0: michael@0: mStateMachineThreadPool = stateMachinePool; michael@0: michael@0: nsresult rv; michael@0: mTimer = do_CreateInstance("@mozilla.org/timer;1", &rv); michael@0: NS_ENSURE_SUCCESS(rv, rv); michael@0: rv = mTimer->SetTarget(GetStateMachineThread()); michael@0: NS_ENSURE_SUCCESS(rv, rv); michael@0: michael@0: return mReader->Init(cloneReader); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::StopPlayback() michael@0: { michael@0: DECODER_LOG(PR_LOG_DEBUG, "StopPlayback()"); michael@0: michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: mDecoder->NotifyPlaybackStopped(); michael@0: michael@0: if (IsPlaying()) { michael@0: mPlayDuration = GetClock(); michael@0: mPlayStartTime = TimeStamp(); michael@0: } michael@0: // Notify the audio thread, so that it notices that we've stopped playing, michael@0: // so it can pause audio playback. michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: NS_ASSERTION(!IsPlaying(), "Should report not playing at end of StopPlayback()"); michael@0: mDecoder->UpdateStreamBlockingForStateMachinePlaying(); michael@0: michael@0: DispatchDecodeTasksIfNeeded(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetSyncPointForMediaStream() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: DecodedStreamData* stream = mDecoder->GetDecodedStream(); michael@0: if (!stream) { michael@0: return; michael@0: } michael@0: michael@0: mSyncPointInMediaStream = stream->GetLastOutputTime(); michael@0: mSyncPointInDecodedStream = mStartTime + mPlayDuration; michael@0: } michael@0: michael@0: int64_t MediaDecoderStateMachine::GetCurrentTimeViaMediaStreamSync() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(mSyncPointInDecodedStream >= 0, "Should have set up sync point"); michael@0: DecodedStreamData* stream = mDecoder->GetDecodedStream(); michael@0: StreamTime streamDelta = stream->GetLastOutputTime() - mSyncPointInMediaStream; michael@0: return mSyncPointInDecodedStream + MediaTimeToMicroseconds(streamDelta); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::StartPlayback() michael@0: { michael@0: DECODER_LOG(PR_LOG_DEBUG, "StartPlayback()"); michael@0: michael@0: NS_ASSERTION(!IsPlaying(), "Shouldn't be playing when StartPlayback() is called"); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: mDecoder->NotifyPlaybackStarted(); michael@0: mPlayStartTime = TimeStamp::Now(); michael@0: michael@0: NS_ASSERTION(IsPlaying(), "Should report playing by end of StartPlayback()"); michael@0: if (NS_FAILED(StartAudioThread())) { michael@0: NS_WARNING("Failed to create audio thread"); michael@0: } michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: mDecoder->UpdateStreamBlockingForStateMachinePlaying(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime) michael@0: { michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime"); michael@0: mCurrentFrameTime = aTime - mStartTime; michael@0: NS_ASSERTION(mCurrentFrameTime >= 0, "CurrentTime should be positive!"); michael@0: if (aTime > mEndTime) { michael@0: NS_ASSERTION(mCurrentFrameTime > GetDuration(), michael@0: "CurrentTime must be after duration if aTime > endTime!"); michael@0: mEndTime = aTime; michael@0: nsCOMPtr event = michael@0: NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged); michael@0: NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); michael@0: } michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime) michael@0: { michael@0: UpdatePlaybackPositionInternal(aTime); michael@0: michael@0: bool fragmentEnded = mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime; michael@0: if (!mPositionChangeQueued || fragmentEnded) { michael@0: mPositionChangeQueued = true; michael@0: nsCOMPtr event = michael@0: NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackPositionChanged); michael@0: NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); michael@0: } michael@0: michael@0: mMetadataManager.DispatchMetadataIfNeeded(mDecoder, aTime); michael@0: michael@0: if (fragmentEnded) { michael@0: StopPlayback(); michael@0: } michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::ClearPositionChangeFlag() michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: mPositionChangeQueued = false; michael@0: } michael@0: michael@0: MediaDecoderOwner::NextFrameStatus MediaDecoderStateMachine::GetNextFrameStatus() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: if (IsBuffering() || IsSeeking()) { michael@0: return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING; michael@0: } else if (HaveNextFrameData()) { michael@0: return MediaDecoderOwner::NEXT_FRAME_AVAILABLE; michael@0: } michael@0: return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetVolume(double volume) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mVolume = volume; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: if (!mAudioCaptured && aCaptured && !mStopAudioThread) { michael@0: // Make sure the state machine runs as soon as possible. That will michael@0: // stop the audio thread. michael@0: // If mStopAudioThread is true then we're already stopping the audio thread michael@0: // and since we set mAudioCaptured to true, nothing can start it again. michael@0: ScheduleStateMachine(); michael@0: } michael@0: mAudioCaptured = aCaptured; michael@0: } michael@0: michael@0: double MediaDecoderStateMachine::GetCurrentTime() const michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread() || michael@0: OnStateMachineThread() || michael@0: OnDecodeThread(), michael@0: "Should be on main, decode, or state machine thread."); michael@0: michael@0: return static_cast(mCurrentFrameTime) / static_cast(USECS_PER_S); michael@0: } michael@0: michael@0: int64_t MediaDecoderStateMachine::GetDuration() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (mEndTime == -1 || mStartTime == -1) michael@0: return -1; michael@0: return mEndTime - mStartTime; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetDuration(int64_t aDuration) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(), michael@0: "Should be on main or decode thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (aDuration == -1) { michael@0: return; michael@0: } michael@0: michael@0: if (mStartTime != -1) { michael@0: mEndTime = mStartTime + aDuration; michael@0: } else { michael@0: mStartTime = 0; michael@0: mEndTime = aDuration; michael@0: } michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::UpdateEstimatedDuration(int64_t aDuration) michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: int64_t duration = GetDuration(); michael@0: if (aDuration != duration && michael@0: abs(aDuration - duration) > ESTIMATED_DURATION_FUZZ_FACTOR_USECS) { michael@0: SetDuration(aDuration); michael@0: nsCOMPtr event = michael@0: NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged); michael@0: NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); michael@0: } michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetMediaEndTime(int64_t aEndTime) michael@0: { michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread"); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: mEndTime = aEndTime; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetFragmentEndTime(int64_t aEndTime) michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime + mStartTime; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetTransportSeekable(bool aTransportSeekable) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(), michael@0: "Should be on main thread or the decoder thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: mTransportSeekable = aTransportSeekable; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetMediaSeekable(bool aMediaSeekable) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(), michael@0: "Should be on main thread or the decoder thread."); michael@0: michael@0: mMediaSeekable = aMediaSeekable; michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::IsDormantNeeded() michael@0: { michael@0: return mReader->IsDormantNeeded(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetDormant(bool aDormant) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (!mReader) { michael@0: return; michael@0: } michael@0: michael@0: if (aDormant) { michael@0: ScheduleStateMachine(); michael@0: mState = DECODER_STATE_DORMANT; michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: } else if ((aDormant != true) && (mState == DECODER_STATE_DORMANT)) { michael@0: ScheduleStateMachine(); michael@0: mStartTime = 0; michael@0: mCurrentFrameTime = 0; michael@0: mState = DECODER_STATE_DECODING_METADATA; michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: } michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::Shutdown() michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: michael@0: // Once we've entered the shutdown state here there's no going back. michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: michael@0: // Change state before issuing shutdown request to threads so those michael@0: // threads can start exiting cleanly during the Shutdown call. michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state to SHUTDOWN"); michael@0: ScheduleStateMachine(); michael@0: mState = DECODER_STATE_SHUTDOWN; michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::StartDecoding() michael@0: { michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: if (mState == DECODER_STATE_DECODING) { michael@0: return; michael@0: } michael@0: mState = DECODER_STATE_DECODING; michael@0: michael@0: mDecodeStartTime = TimeStamp::Now(); michael@0: michael@0: // Reset our "stream finished decoding" flags, so we try to decode all michael@0: // streams that we have when we start decoding. michael@0: mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished(); michael@0: mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished(); michael@0: michael@0: CheckIfDecodeComplete(); michael@0: if (mState == DECODER_STATE_COMPLETED) { michael@0: return; michael@0: } michael@0: michael@0: // Reset other state to pristine values before starting decode. michael@0: mSkipToNextKeyFrame = false; michael@0: mIsAudioPrerolling = true; michael@0: mIsVideoPrerolling = true; michael@0: michael@0: // Ensure that we've got tasks enqueued to decode data if we need to. michael@0: DispatchDecodeTasksIfNeeded(); michael@0: michael@0: ScheduleStateMachine(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::StartWaitForResources() michael@0: { michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: mState = DECODER_STATE_WAIT_FOR_RESOURCES; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::NotifyWaitingForResourcesStatusChanged() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: if (mState != DECODER_STATE_WAIT_FOR_RESOURCES || michael@0: mReader->IsWaitingMediaResources()) { michael@0: return; michael@0: } michael@0: // The reader is no longer waiting for resources (say a hardware decoder), michael@0: // we can now proceed to decode metadata. michael@0: mState = DECODER_STATE_DECODING_METADATA; michael@0: EnqueueDecodeMetadataTask(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::Play() michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: // When asked to play, switch to decoding state only if michael@0: // we are currently buffering. In other cases, we'll start playing anyway michael@0: // when the state machine notices the decoder's state change to PLAYING. michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: if (mState == DECODER_STATE_BUFFERING) { michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING"); michael@0: mState = DECODER_STATE_DECODING; michael@0: mDecodeStartTime = TimeStamp::Now(); michael@0: } michael@0: // Once we start playing, we don't want to minimize our prerolling, as we michael@0: // assume the user is likely to want to keep playing in future. michael@0: mMinimizePreroll = false; michael@0: ScheduleStateMachine(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::ResetPlayback() michael@0: { michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: mVideoFrameEndTime = -1; michael@0: mAudioStartTime = -1; michael@0: mAudioEndTime = -1; michael@0: mAudioCompleted = false; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer, michael@0: uint32_t aLength, michael@0: int64_t aOffset) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); michael@0: mReader->NotifyDataArrived(aBuffer, aLength, aOffset); michael@0: michael@0: // While playing an unseekable stream of unknown duration, mEndTime is michael@0: // updated (in AdvanceFrame()) as we play. But if data is being downloaded michael@0: // faster than played, mEndTime won't reflect the end of playable data michael@0: // since we haven't played the frame at the end of buffered data. So update michael@0: // mEndTime here as new data is downloaded to prevent such a lag. michael@0: dom::TimeRanges buffered; michael@0: if (mDecoder->IsInfinite() && michael@0: NS_SUCCEEDED(mDecoder->GetBuffered(&buffered))) michael@0: { michael@0: uint32_t length = 0; michael@0: buffered.GetLength(&length); michael@0: if (length) { michael@0: double end = 0; michael@0: buffered.End(length - 1, &end); michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: mEndTime = std::max(mEndTime, end * USECS_PER_S); michael@0: } michael@0: } michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::Seek(const SeekTarget& aTarget) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: michael@0: // We need to be able to seek both at a transport level and at a media level michael@0: // to seek. michael@0: if (!mMediaSeekable) { michael@0: return; michael@0: } michael@0: // MediaDecoder::mPlayState should be SEEKING while we seek, and michael@0: // in that case MediaDecoder shouldn't be calling us. michael@0: NS_ASSERTION(mState != DECODER_STATE_SEEKING, michael@0: "We shouldn't already be seeking"); michael@0: NS_ASSERTION(mState >= DECODER_STATE_DECODING, michael@0: "We should have loaded metadata"); michael@0: michael@0: // Bound the seek time to be inside the media range. michael@0: NS_ASSERTION(mStartTime != -1, "Should know start time by now"); michael@0: NS_ASSERTION(mEndTime != -1, "Should know end time by now"); michael@0: int64_t seekTime = aTarget.mTime + mStartTime; michael@0: seekTime = std::min(seekTime, mEndTime); michael@0: seekTime = std::max(mStartTime, seekTime); michael@0: NS_ASSERTION(seekTime >= mStartTime && seekTime <= mEndTime, michael@0: "Can only seek in range [0,duration]"); michael@0: mSeekTarget = SeekTarget(seekTime, aTarget.mType); michael@0: michael@0: mBasePosition = seekTime - mStartTime; michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state to SEEKING (to %lld)", mSeekTarget.mTime); michael@0: mState = DECODER_STATE_SEEKING; michael@0: if (mDecoder->GetDecodedStream()) { michael@0: mDecoder->RecreateDecodedStream(seekTime - mStartTime); michael@0: } michael@0: ScheduleStateMachine(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::StopAudioThread() michael@0: { michael@0: NS_ASSERTION(OnDecodeThread() || michael@0: OnStateMachineThread(), "Should be on decode thread or state machine thread"); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (mStopAudioThread) { michael@0: // Nothing to do, since the thread is already stopping michael@0: return; michael@0: } michael@0: michael@0: mStopAudioThread = true; michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: if (mAudioThread) { michael@0: DECODER_LOG(PR_LOG_DEBUG, "Shutdown audio thread"); michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: mAudioThread->Shutdown(); michael@0: } michael@0: mAudioThread = nullptr; michael@0: // Now that the audio thread is dead, try sending data to our MediaStream(s). michael@0: // That may have been waiting for the audio thread to stop. michael@0: SendStreamData(); michael@0: } michael@0: } michael@0: michael@0: nsresult michael@0: MediaDecoderStateMachine::EnqueueDecodeMetadataTask() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (mState != DECODER_STATE_DECODING_METADATA) { michael@0: return NS_OK; michael@0: } michael@0: nsresult rv = mDecodeTaskQueue->Dispatch( michael@0: NS_NewRunnableMethod(this, &MediaDecoderStateMachine::CallDecodeMetadata)); michael@0: NS_ENSURE_SUCCESS(rv, rv); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::EnsureActive() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: MOZ_ASSERT(OnDecodeThread()); michael@0: if (!mIsReaderIdle) { michael@0: return; michael@0: } michael@0: mIsReaderIdle = false; michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: SetReaderActive(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::SetReaderIdle() michael@0: { michael@0: #ifdef PR_LOGGING michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: DECODER_LOG(PR_LOG_DEBUG, "SetReaderIdle() audioQueue=%lld videoQueue=%lld", michael@0: GetDecodedAudioDuration(), michael@0: VideoQueue().Duration()); michael@0: } michael@0: #endif michael@0: MOZ_ASSERT(OnDecodeThread()); michael@0: mReader->SetIdle(); michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::SetReaderActive() michael@0: { michael@0: DECODER_LOG(PR_LOG_DEBUG, "SetReaderActive()"); michael@0: MOZ_ASSERT(OnDecodeThread()); michael@0: mReader->SetActive(); michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::DispatchDecodeTasksIfNeeded() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: // NeedToDecodeAudio() can go from false to true while we hold the michael@0: // monitor, but it can't go from true to false. This can happen because michael@0: // NeedToDecodeAudio() takes into account the amount of decoded audio michael@0: // that's been written to the AudioStream but not played yet. So if we michael@0: // were calling NeedToDecodeAudio() twice and we thread-context switch michael@0: // between the calls, audio can play, which can affect the return value michael@0: // of NeedToDecodeAudio() giving inconsistent results. So we cache the michael@0: // value returned by NeedToDecodeAudio(), and make decisions michael@0: // based on the cached value. If NeedToDecodeAudio() has michael@0: // returned false, and then subsequently returns true and we're not michael@0: // playing, it will probably be OK since we don't need to consume data michael@0: // anyway. michael@0: michael@0: const bool needToDecodeAudio = NeedToDecodeAudio(); michael@0: const bool needToDecodeVideo = NeedToDecodeVideo(); michael@0: michael@0: // If we're in completed state, we should not need to decode anything else. michael@0: MOZ_ASSERT(mState != DECODER_STATE_COMPLETED || michael@0: (!needToDecodeAudio && !needToDecodeVideo)); michael@0: michael@0: bool needIdle = !mDecoder->IsLogicallyPlaying() && michael@0: mState != DECODER_STATE_SEEKING && michael@0: !needToDecodeAudio && michael@0: !needToDecodeVideo && michael@0: !IsPlaying(); michael@0: michael@0: if (needToDecodeAudio) { michael@0: EnsureAudioDecodeTaskQueued(); michael@0: } michael@0: if (needToDecodeVideo) { michael@0: EnsureVideoDecodeTaskQueued(); michael@0: } michael@0: michael@0: if (mIsReaderIdle == needIdle) { michael@0: return; michael@0: } michael@0: mIsReaderIdle = needIdle; michael@0: RefPtr event; michael@0: if (mIsReaderIdle) { michael@0: event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderIdle); michael@0: } else { michael@0: event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderActive); michael@0: } michael@0: if (NS_FAILED(mDecodeTaskQueue->Dispatch(event.forget())) && michael@0: mState != DECODER_STATE_SHUTDOWN) { michael@0: NS_WARNING("Failed to dispatch event to set decoder idle state"); michael@0: } michael@0: } michael@0: michael@0: nsresult michael@0: MediaDecoderStateMachine::EnqueueDecodeSeekTask() michael@0: { michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (mState != DECODER_STATE_SEEKING) { michael@0: return NS_OK; michael@0: } michael@0: nsresult rv = mDecodeTaskQueue->Dispatch( michael@0: NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeSeek)); michael@0: NS_ENSURE_SUCCESS(rv, rv); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: michael@0: if (NeedToDecodeAudio()) { michael@0: return EnsureAudioDecodeTaskQueued(); michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaDecoderStateMachine::EnsureAudioDecodeTaskQueued() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: michael@0: if (mState >= DECODER_STATE_COMPLETED) { michael@0: return NS_OK; michael@0: } michael@0: michael@0: MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA); michael@0: michael@0: if (mIsAudioDecoding && !mDispatchedAudioDecodeTask) { michael@0: nsresult rv = mDecodeTaskQueue->Dispatch( michael@0: NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeAudio)); michael@0: if (NS_SUCCEEDED(rv)) { michael@0: mDispatchedAudioDecodeTask = true; michael@0: } else { michael@0: NS_WARNING("Failed to dispatch task to decode audio"); michael@0: } michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: michael@0: if (NeedToDecodeVideo()) { michael@0: return EnsureVideoDecodeTaskQueued(); michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaDecoderStateMachine::EnsureVideoDecodeTaskQueued() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: michael@0: if (mState >= DECODER_STATE_COMPLETED) { michael@0: return NS_OK; michael@0: } michael@0: michael@0: MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA); michael@0: michael@0: if (mIsVideoDecoding && !mDispatchedVideoDecodeTask) { michael@0: nsresult rv = mDecodeTaskQueue->Dispatch( michael@0: NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeVideo)); michael@0: if (NS_SUCCEEDED(rv)) { michael@0: mDispatchedVideoDecodeTask = true; michael@0: } else { michael@0: NS_WARNING("Failed to dispatch task to decode video"); michael@0: } michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: MediaDecoderStateMachine::StartAudioThread() michael@0: { michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: if (mAudioCaptured) { michael@0: NS_ASSERTION(mStopAudioThread, "mStopAudioThread must always be true if audio is captured"); michael@0: return NS_OK; michael@0: } michael@0: michael@0: mStopAudioThread = false; michael@0: if (HasAudio() && !mAudioThread) { michael@0: nsresult rv = NS_NewNamedThread("Media Audio", michael@0: getter_AddRefs(mAudioThread), michael@0: nullptr, michael@0: MEDIA_THREAD_STACK_SIZE); michael@0: if (NS_FAILED(rv)) { michael@0: DECODER_LOG(PR_LOG_WARNING, "Changed state to SHUTDOWN because failed to create audio thread"); michael@0: mState = DECODER_STATE_SHUTDOWN; michael@0: return rv; michael@0: } michael@0: michael@0: nsCOMPtr event = michael@0: NS_NewRunnableMethod(this, &MediaDecoderStateMachine::AudioLoop); michael@0: mAudioThread->Dispatch(event, NS_DISPATCH_NORMAL); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: michael@0: int64_t MediaDecoderStateMachine::AudioDecodedUsecs() michael@0: { michael@0: NS_ASSERTION(HasAudio(), michael@0: "Should only call AudioDecodedUsecs() when we have audio"); michael@0: // The amount of audio we have decoded is the amount of audio data we've michael@0: // already decoded and pushed to the hardware, plus the amount of audio michael@0: // data waiting to be pushed to the hardware. michael@0: int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0; michael@0: return pushed + AudioQueue().Duration(); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs) michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: // We consider ourselves low on decoded data if we're low on audio, michael@0: // provided we've not decoded to the end of the audio stream, or michael@0: // if we're low on video frames, provided michael@0: // we've not decoded to the end of the video stream. michael@0: return ((HasAudio() && michael@0: !AudioQueue().IsFinished() && michael@0: AudioDecodedUsecs() < aAudioUsecs) michael@0: || michael@0: (HasVideo() && michael@0: !VideoQueue().IsFinished() && michael@0: static_cast(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES)); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::HasLowUndecodedData() michael@0: { michael@0: return HasLowUndecodedData(mLowDataThresholdUsecs); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs) michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA, michael@0: "Must have loaded metadata for GetBuffered() to work"); michael@0: michael@0: bool reliable; michael@0: double bytesPerSecond = mDecoder->ComputePlaybackRate(&reliable); michael@0: if (!reliable) { michael@0: // Default to assuming we have enough michael@0: return false; michael@0: } michael@0: michael@0: MediaResource* stream = mDecoder->GetResource(); michael@0: int64_t currentPos = stream->Tell(); michael@0: int64_t requiredPos = currentPos + int64_t((aUsecs/1000000.0)*bytesPerSecond); michael@0: int64_t length = stream->GetLength(); michael@0: if (length >= 0) { michael@0: requiredPos = std::min(requiredPos, length); michael@0: } michael@0: michael@0: return stream->GetCachedDataEnd(currentPos) < requiredPos; michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::DecodeError() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: michael@0: // Change state to shutdown before sending error report to MediaDecoder michael@0: // and the HTMLMediaElement, so that our pipeline can start exiting michael@0: // cleanly during the sync dispatch below. michael@0: DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN"); michael@0: ScheduleStateMachine(); michael@0: mState = DECODER_STATE_SHUTDOWN; michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: michael@0: // Dispatch the event to call DecodeError synchronously. This ensures michael@0: // we're in shutdown state by the time we exit the decode thread. michael@0: // If we just moved to shutdown state here on the decode thread, we may michael@0: // cause the state machine to shutdown/free memory without closing its michael@0: // media stream properly, and we'll get callbacks from the media stream michael@0: // causing a crash. michael@0: { michael@0: nsCOMPtr event = michael@0: NS_NewRunnableMethod(mDecoder, &MediaDecoder::DecodeError); michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: NS_DispatchToMainThread(event, NS_DISPATCH_SYNC); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::CallDecodeMetadata() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: if (mState != DECODER_STATE_DECODING_METADATA) { michael@0: return; michael@0: } michael@0: if (NS_FAILED(DecodeMetadata())) { michael@0: DECODER_LOG(PR_LOG_WARNING, "Decode metadata failed, shutting down decoder"); michael@0: DecodeError(); michael@0: } michael@0: } michael@0: michael@0: nsresult MediaDecoderStateMachine::DecodeMetadata() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers"); michael@0: if (mState != DECODER_STATE_DECODING_METADATA) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: EnsureActive(); michael@0: michael@0: nsresult res; michael@0: MediaInfo info; michael@0: MetadataTags* tags; michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: res = mReader->ReadMetadata(&info, &tags); michael@0: } michael@0: if (NS_SUCCEEDED(res) && michael@0: mState == DECODER_STATE_DECODING_METADATA && michael@0: mReader->IsWaitingMediaResources()) { michael@0: // change state to DECODER_STATE_WAIT_FOR_RESOURCES michael@0: StartWaitForResources(); michael@0: return NS_OK; michael@0: } michael@0: michael@0: mInfo = info; michael@0: michael@0: if (NS_FAILED(res) || (!info.HasValidMedia())) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: mDecoder->StartProgressUpdates(); michael@0: mGotDurationFromMetaData = (GetDuration() != -1); michael@0: michael@0: VideoData* videoData = FindStartTime(); michael@0: if (videoData) { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: RenderVideoFrame(videoData, TimeStamp::Now()); michael@0: } michael@0: michael@0: if (mState == DECODER_STATE_SHUTDOWN) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: michael@0: NS_ASSERTION(mStartTime != -1, "Must have start time"); michael@0: MOZ_ASSERT((!HasVideo() && !HasAudio()) || michael@0: !(mMediaSeekable && mTransportSeekable) || mEndTime != -1, michael@0: "Active seekable media should have end time"); michael@0: MOZ_ASSERT(!(mMediaSeekable && mTransportSeekable) || michael@0: GetDuration() != -1, "Seekable media should have duration"); michael@0: DECODER_LOG(PR_LOG_DEBUG, "Media goes from %lld to %lld (duration %lld) " michael@0: "transportSeekable=%d, mediaSeekable=%d", michael@0: mStartTime, mEndTime, GetDuration(), mTransportSeekable, mMediaSeekable); michael@0: michael@0: if (HasAudio() && !HasVideo()) { michael@0: // We're playing audio only. We don't need to worry about slow video michael@0: // decodes causing audio underruns, so don't buffer so much audio in michael@0: // order to reduce memory usage. michael@0: mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR; michael@0: mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR; michael@0: } michael@0: michael@0: // Inform the element that we've loaded the metadata and the first frame. michael@0: nsCOMPtr metadataLoadedEvent = michael@0: new AudioMetadataEventRunner(mDecoder, michael@0: mInfo.mAudio.mChannels, michael@0: mInfo.mAudio.mRate, michael@0: HasAudio(), michael@0: HasVideo(), michael@0: tags); michael@0: NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL); michael@0: michael@0: if (HasAudio()) { michael@0: RefPtr decodeTask( michael@0: NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded)); michael@0: AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue); michael@0: } michael@0: if (HasVideo()) { michael@0: RefPtr decodeTask( michael@0: NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded)); michael@0: VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue); michael@0: } michael@0: michael@0: if (mState == DECODER_STATE_DECODING_METADATA) { michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING_METADATA to DECODING"); michael@0: StartDecoding(); michael@0: } michael@0: michael@0: // For very short media FindStartTime() can decode the entire media. michael@0: // So we need to check if this has occurred, else our decode pipeline won't michael@0: // run (since it doesn't need to) and we won't detect end of stream. michael@0: CheckIfDecodeComplete(); michael@0: michael@0: if ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_COMPLETED) && michael@0: mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && michael@0: !IsPlaying()) michael@0: { michael@0: StartPlayback(); michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::DecodeSeek() michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: if (mState != DECODER_STATE_SEEKING) { michael@0: return; michael@0: } michael@0: EnsureActive(); michael@0: michael@0: // During the seek, don't have a lock on the decoder state, michael@0: // otherwise long seek operations can block the main thread. michael@0: // The events dispatched to the main thread are SYNC calls. michael@0: // These calls are made outside of the decode monitor lock so michael@0: // it is safe for the main thread to makes calls that acquire michael@0: // the lock since it won't deadlock. We check the state when michael@0: // acquiring the lock again in case shutdown has occurred michael@0: // during the time when we didn't have the lock. michael@0: int64_t seekTime = mSeekTarget.mTime; michael@0: mDecoder->StopProgressUpdates(); michael@0: michael@0: bool currentTimeChanged = false; michael@0: const int64_t mediaTime = GetMediaTime(); michael@0: if (mediaTime != seekTime) { michael@0: currentTimeChanged = true; michael@0: // Stop playback now to ensure that while we're outside the monitor michael@0: // dispatching SeekingStarted, playback doesn't advance and mess with michael@0: // mCurrentFrameTime that we've setting to seekTime here. michael@0: StopPlayback(); michael@0: UpdatePlaybackPositionInternal(seekTime); michael@0: } michael@0: michael@0: // SeekingStarted will do a UpdateReadyStateForData which will michael@0: // inform the element and its users that we have no frames michael@0: // to display michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: nsCOMPtr startEvent = michael@0: NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStarted); michael@0: NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC); michael@0: } michael@0: michael@0: int64_t newCurrentTime = seekTime; michael@0: if (currentTimeChanged) { michael@0: // The seek target is different than the current playback position, michael@0: // we'll need to seek the playback position, so shutdown our decode michael@0: // and audio threads. michael@0: StopAudioThread(); michael@0: ResetPlayback(); michael@0: nsresult res; michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: // Now perform the seek. We must not hold the state machine monitor michael@0: // while we seek, since the seek reads, which could block on I/O. michael@0: res = mReader->Seek(seekTime, michael@0: mStartTime, michael@0: mEndTime, michael@0: mediaTime); michael@0: michael@0: if (NS_SUCCEEDED(res) && mSeekTarget.mType == SeekTarget::Accurate) { michael@0: res = mReader->DecodeToTarget(seekTime); michael@0: } michael@0: } michael@0: michael@0: if (NS_SUCCEEDED(res)) { michael@0: int64_t nextSampleStartTime = 0; michael@0: VideoData* video = nullptr; michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: video = mReader->FindStartTime(nextSampleStartTime); michael@0: } michael@0: michael@0: // Setup timestamp state. michael@0: if (seekTime == mEndTime) { michael@0: newCurrentTime = mAudioStartTime = seekTime; michael@0: } else if (HasAudio()) { michael@0: AudioData* audio = AudioQueue().PeekFront(); michael@0: newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime; michael@0: } else { michael@0: newCurrentTime = video ? video->mTime : seekTime; michael@0: } michael@0: mPlayDuration = newCurrentTime - mStartTime; michael@0: michael@0: if (HasVideo()) { michael@0: if (video) { michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: RenderVideoFrame(video, TimeStamp::Now()); michael@0: } michael@0: nsCOMPtr event = michael@0: NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate); michael@0: NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); michael@0: } michael@0: } michael@0: } else { michael@0: DecodeError(); michael@0: } michael@0: } michael@0: mDecoder->StartProgressUpdates(); michael@0: if (mState == DECODER_STATE_DECODING_METADATA || michael@0: mState == DECODER_STATE_DORMANT || michael@0: mState == DECODER_STATE_SHUTDOWN) { michael@0: return; michael@0: } michael@0: michael@0: // Change state to DECODING or COMPLETED now. SeekingStopped will michael@0: // call MediaDecoderStateMachine::Seek to reset our state to SEEKING michael@0: // if we need to seek again. michael@0: michael@0: nsCOMPtr stopEvent; michael@0: bool isLiveStream = mDecoder->GetResource()->GetLength() == -1; michael@0: if (GetMediaTime() == mEndTime && !isLiveStream) { michael@0: // Seeked to end of media, move to COMPLETED state. Note we don't do michael@0: // this if we're playing a live stream, since the end of media will advance michael@0: // once we download more data! michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to COMPLETED", seekTime); michael@0: stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStoppedAtEnd); michael@0: // Explicitly set our state so we don't decode further, and so michael@0: // we report playback ended to the media element. michael@0: mState = DECODER_STATE_COMPLETED; michael@0: mIsAudioDecoding = false; michael@0: mIsVideoDecoding = false; michael@0: DispatchDecodeTasksIfNeeded(); michael@0: } else { michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to DECODING", seekTime); michael@0: stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStopped); michael@0: StartDecoding(); michael@0: } michael@0: michael@0: if (newCurrentTime != mediaTime) { michael@0: UpdatePlaybackPositionInternal(newCurrentTime); michael@0: if (mDecoder->GetDecodedStream()) { michael@0: SetSyncPointForMediaStream(); michael@0: } michael@0: } michael@0: michael@0: // Try to decode another frame to detect if we're at the end... michael@0: DECODER_LOG(PR_LOG_DEBUG, "Seek completed, mCurrentFrameTime=%lld", mCurrentFrameTime); michael@0: michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC); michael@0: } michael@0: michael@0: // Reset quick buffering status. This ensures that if we began the michael@0: // seek while quick-buffering, we won't bypass quick buffering mode michael@0: // if we need to buffer after the seek. michael@0: mQuickBuffering = false; michael@0: michael@0: ScheduleStateMachine(); michael@0: } michael@0: michael@0: // Runnable to dispose of the decoder and state machine on the main thread. michael@0: class nsDecoderDisposeEvent : public nsRunnable { michael@0: public: michael@0: nsDecoderDisposeEvent(already_AddRefed aDecoder, michael@0: already_AddRefed aStateMachine) michael@0: : mDecoder(aDecoder), mStateMachine(aStateMachine) {} michael@0: NS_IMETHOD Run() { michael@0: NS_ASSERTION(NS_IsMainThread(), "Must be on main thread."); michael@0: mStateMachine->ReleaseDecoder(); michael@0: mDecoder->ReleaseStateMachine(); michael@0: mStateMachine = nullptr; michael@0: mDecoder = nullptr; michael@0: return NS_OK; michael@0: } michael@0: private: michael@0: nsRefPtr mDecoder; michael@0: nsRefPtr mStateMachine; michael@0: }; michael@0: michael@0: // Runnable which dispatches an event to the main thread to dispose of the michael@0: // decoder and state machine. This runs on the state machine thread after michael@0: // the state machine has shutdown, and all events for that state machine have michael@0: // finished running. michael@0: class nsDispatchDisposeEvent : public nsRunnable { michael@0: public: michael@0: nsDispatchDisposeEvent(MediaDecoder* aDecoder, michael@0: MediaDecoderStateMachine* aStateMachine) michael@0: : mDecoder(aDecoder), mStateMachine(aStateMachine) {} michael@0: NS_IMETHOD Run() { michael@0: NS_DispatchToMainThread(new nsDecoderDisposeEvent(mDecoder.forget(), michael@0: mStateMachine.forget())); michael@0: return NS_OK; michael@0: } michael@0: private: michael@0: nsRefPtr mDecoder; michael@0: nsRefPtr mStateMachine; michael@0: }; michael@0: michael@0: nsresult MediaDecoderStateMachine::RunStateMachine() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: MediaResource* resource = mDecoder->GetResource(); michael@0: NS_ENSURE_TRUE(resource, NS_ERROR_NULL_POINTER); michael@0: michael@0: switch (mState) { michael@0: case DECODER_STATE_SHUTDOWN: { michael@0: if (IsPlaying()) { michael@0: StopPlayback(); michael@0: } michael@0: StopAudioThread(); michael@0: // If mAudioThread is non-null after StopAudioThread completes, we are michael@0: // running in a nested event loop waiting for Shutdown() on michael@0: // mAudioThread to complete. Return to the event loop and let it michael@0: // finish processing before continuing with shutdown. michael@0: if (mAudioThread) { michael@0: MOZ_ASSERT(mStopAudioThread); michael@0: return NS_OK; michael@0: } michael@0: michael@0: // The reader's listeners hold references to the state machine, michael@0: // creating a cycle which keeps the state machine and its shared michael@0: // thread pools alive. So break it here. michael@0: AudioQueue().ClearListeners(); michael@0: VideoQueue().ClearListeners(); michael@0: michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: // Wait for the thread decoding to exit. michael@0: mDecodeTaskQueue->Shutdown(); michael@0: mDecodeTaskQueue = nullptr; michael@0: mReader->ReleaseMediaResources(); michael@0: } michael@0: // Now that those threads are stopped, there's no possibility of michael@0: // mPendingWakeDecoder being needed again. Revoke it. michael@0: mPendingWakeDecoder = nullptr; michael@0: michael@0: MOZ_ASSERT(mState == DECODER_STATE_SHUTDOWN, michael@0: "How did we escape from the shutdown state?"); michael@0: // We must daisy-chain these events to destroy the decoder. We must michael@0: // destroy the decoder on the main thread, but we can't destroy the michael@0: // decoder while this thread holds the decoder monitor. We can't michael@0: // dispatch an event to the main thread to destroy the decoder from michael@0: // here, as the event may run before the dispatch returns, and we michael@0: // hold the decoder monitor here. We also want to guarantee that the michael@0: // state machine is destroyed on the main thread, and so the michael@0: // event runner running this function (which holds a reference to the michael@0: // state machine) needs to finish and be released in order to allow michael@0: // that. So we dispatch an event to run after this event runner has michael@0: // finished and released its monitor/references. That event then will michael@0: // dispatch an event to the main thread to release the decoder and michael@0: // state machine. michael@0: GetStateMachineThread()->Dispatch( michael@0: new nsDispatchDisposeEvent(mDecoder, this), NS_DISPATCH_NORMAL); michael@0: michael@0: mTimer->Cancel(); michael@0: mTimer = nullptr; michael@0: return NS_OK; michael@0: } michael@0: michael@0: case DECODER_STATE_DORMANT: { michael@0: if (IsPlaying()) { michael@0: StopPlayback(); michael@0: } michael@0: StopAudioThread(); michael@0: // Now that those threads are stopped, there's no possibility of michael@0: // mPendingWakeDecoder being needed again. Revoke it. michael@0: mPendingWakeDecoder = nullptr; michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: // Wait for the thread decoding, if any, to exit. michael@0: mDecodeTaskQueue->AwaitIdle(); michael@0: mReader->ReleaseMediaResources(); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: michael@0: case DECODER_STATE_WAIT_FOR_RESOURCES: { michael@0: return NS_OK; michael@0: } michael@0: michael@0: case DECODER_STATE_DECODING_METADATA: { michael@0: // Ensure we have a decode thread to decode metadata. michael@0: return EnqueueDecodeMetadataTask(); michael@0: } michael@0: michael@0: case DECODER_STATE_DECODING: { michael@0: if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING && michael@0: IsPlaying()) michael@0: { michael@0: // We're playing, but the element/decoder is in paused state. Stop michael@0: // playing! michael@0: StopPlayback(); michael@0: } michael@0: michael@0: if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && michael@0: !IsPlaying()) { michael@0: // We are playing, but the state machine does not know it yet. Tell it michael@0: // that it is, so that the clock can be properly queried. michael@0: StartPlayback(); michael@0: } michael@0: michael@0: AdvanceFrame(); michael@0: NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING || michael@0: IsStateMachineScheduled() || michael@0: mPlaybackRate == 0.0, "Must have timer scheduled"); michael@0: return NS_OK; michael@0: } michael@0: michael@0: case DECODER_STATE_BUFFERING: { michael@0: TimeStamp now = TimeStamp::Now(); michael@0: NS_ASSERTION(!mBufferingStart.IsNull(), "Must know buffering start time."); michael@0: michael@0: // We will remain in the buffering state if we've not decoded enough michael@0: // data to begin playback, or if we've not downloaded a reasonable michael@0: // amount of data inside our buffering time. michael@0: TimeDuration elapsed = now - mBufferingStart; michael@0: bool isLiveStream = resource->GetLength() == -1; michael@0: if ((isLiveStream || !mDecoder->CanPlayThrough()) && michael@0: elapsed < TimeDuration::FromSeconds(mBufferingWait * mPlaybackRate) && michael@0: (mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_USECS) michael@0: : HasLowUndecodedData(mBufferingWait * USECS_PER_S)) && michael@0: !mDecoder->IsDataCachedToEndOfResource() && michael@0: !resource->IsSuspended()) michael@0: { michael@0: DECODER_LOG(PR_LOG_DEBUG, "Buffering: wait %ds, timeout in %.3lfs %s", michael@0: mBufferingWait, mBufferingWait - elapsed.ToSeconds(), michael@0: (mQuickBuffering ? "(quick exit)" : "")); michael@0: ScheduleStateMachine(USECS_PER_S); michael@0: return NS_OK; michael@0: } else { michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING"); michael@0: DECODER_LOG(PR_LOG_DEBUG, "Buffered for %.3lfs", (now - mBufferingStart).ToSeconds()); michael@0: StartDecoding(); michael@0: } michael@0: michael@0: // Notify to allow blocked decoder thread to continue michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: UpdateReadyState(); michael@0: if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && michael@0: !IsPlaying()) michael@0: { michael@0: StartPlayback(); michael@0: } michael@0: NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled"); michael@0: return NS_OK; michael@0: } michael@0: michael@0: case DECODER_STATE_SEEKING: { michael@0: // Ensure we have a decode thread to perform the seek. michael@0: return EnqueueDecodeSeekTask(); michael@0: } michael@0: michael@0: case DECODER_STATE_COMPLETED: { michael@0: // Play the remaining media. We want to run AdvanceFrame() at least michael@0: // once to ensure the current playback position is advanced to the michael@0: // end of the media, and so that we update the readyState. michael@0: if (VideoQueue().GetSize() > 0 || michael@0: (HasAudio() && !mAudioCompleted) || michael@0: (mDecoder->GetDecodedStream() && !mDecoder->GetDecodedStream()->IsFinished())) michael@0: { michael@0: AdvanceFrame(); michael@0: NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING || michael@0: mPlaybackRate == 0 || michael@0: IsStateMachineScheduled(), michael@0: "Must have timer scheduled"); michael@0: return NS_OK; michael@0: } michael@0: michael@0: // StopPlayback in order to reset the IsPlaying() state so audio michael@0: // is restarted correctly. michael@0: StopPlayback(); michael@0: michael@0: if (mState != DECODER_STATE_COMPLETED) { michael@0: // While we're presenting a frame we can change state. Whatever changed michael@0: // our state should have scheduled another state machine run. michael@0: NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled"); michael@0: return NS_OK; michael@0: } michael@0: michael@0: StopAudioThread(); michael@0: // When we're decoding to a stream, the stream's main-thread finish signal michael@0: // will take care of calling MediaDecoder::PlaybackEnded. michael@0: if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && michael@0: !mDecoder->GetDecodedStream()) { michael@0: int64_t videoTime = HasVideo() ? mVideoFrameEndTime : 0; michael@0: int64_t clockTime = std::max(mEndTime, std::max(videoTime, GetAudioClock())); michael@0: UpdatePlaybackPosition(clockTime); michael@0: michael@0: { michael@0: // Wait for the state change is completed in the main thread, michael@0: // otherwise we might see |mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING| michael@0: // in next loop and send |MediaDecoder::PlaybackEnded| again to trigger 'ended' michael@0: // event twice in the media element. michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: nsCOMPtr event = michael@0: NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackEnded); michael@0: NS_DispatchToMainThread(event, NS_DISPATCH_SYNC); michael@0: } michael@0: } michael@0: return NS_OK; michael@0: } michael@0: } michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::RenderVideoFrame(VideoData* aData, michael@0: TimeStamp aTarget) michael@0: { michael@0: NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), michael@0: "Should be on state machine or decode thread."); michael@0: mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn(); michael@0: michael@0: if (aData->mDuplicate) { michael@0: return; michael@0: } michael@0: michael@0: VERBOSE_LOG("playing video frame %lld", aData->mTime); michael@0: michael@0: VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); michael@0: if (container) { michael@0: container->SetCurrentFrame(ThebesIntSize(aData->mDisplay), aData->mImage, michael@0: aTarget); michael@0: } michael@0: } michael@0: michael@0: int64_t michael@0: MediaDecoderStateMachine::GetAudioClock() michael@0: { michael@0: // We must hold the decoder monitor while using the audio stream off the michael@0: // audio thread to ensure that it doesn't get destroyed on the audio thread michael@0: // while we're using it. michael@0: AssertCurrentThreadInMonitor(); michael@0: if (!HasAudio() || mAudioCaptured) michael@0: return -1; michael@0: if (!mAudioStream) { michael@0: // Audio thread hasn't played any data yet. michael@0: return mAudioStartTime; michael@0: } michael@0: int64_t t = mAudioStream->GetPosition(); michael@0: return (t == -1) ? -1 : t + mAudioStartTime; michael@0: } michael@0: michael@0: int64_t MediaDecoderStateMachine::GetVideoStreamPosition() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (!IsPlaying()) { michael@0: return mPlayDuration + mStartTime; michael@0: } michael@0: michael@0: // The playbackRate has been just been changed, reset the playstartTime. michael@0: if (mResetPlayStartTime) { michael@0: mPlayStartTime = TimeStamp::Now(); michael@0: mResetPlayStartTime = false; michael@0: } michael@0: michael@0: int64_t pos = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration; michael@0: pos -= mBasePosition; michael@0: NS_ASSERTION(pos >= 0, "Video stream position should be positive."); michael@0: return mBasePosition + pos * mPlaybackRate + mStartTime; michael@0: } michael@0: michael@0: int64_t MediaDecoderStateMachine::GetClock() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: // Determine the clock time. If we've got audio, and we've not reached michael@0: // the end of the audio, use the audio clock. However if we've finished michael@0: // audio, or don't have audio, use the system clock. If our output is being michael@0: // fed to a MediaStream, use that stream as the source of the clock. michael@0: int64_t clock_time = -1; michael@0: DecodedStreamData* stream = mDecoder->GetDecodedStream(); michael@0: if (!IsPlaying()) { michael@0: clock_time = mPlayDuration + mStartTime; michael@0: } else if (stream) { michael@0: clock_time = GetCurrentTimeViaMediaStreamSync(); michael@0: } else { michael@0: int64_t audio_time = GetAudioClock(); michael@0: if (HasAudio() && !mAudioCompleted && audio_time != -1) { michael@0: clock_time = audio_time; michael@0: // Resync against the audio clock, while we're trusting the michael@0: // audio clock. This ensures no "drift", particularly on Linux. michael@0: mPlayDuration = clock_time - mStartTime; michael@0: mPlayStartTime = TimeStamp::Now(); michael@0: } else { michael@0: // Audio is disabled on this system. Sync to the system clock. michael@0: clock_time = GetVideoStreamPosition(); michael@0: // Ensure the clock can never go backwards. michael@0: NS_ASSERTION(mCurrentFrameTime <= clock_time || mPlaybackRate <= 0, michael@0: "Clock should go forwards if the playback rate is > 0."); michael@0: } michael@0: } michael@0: return clock_time; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::AdvanceFrame() michael@0: { michael@0: NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(!HasAudio() || mAudioStartTime != -1, michael@0: "Should know audio start time if we have audio."); michael@0: michael@0: if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING) { michael@0: return; michael@0: } michael@0: michael@0: // If playbackRate is 0.0, we should stop the progress, but not be in paused michael@0: // state, per spec. michael@0: if (mPlaybackRate == 0.0) { michael@0: return; michael@0: } michael@0: michael@0: int64_t clock_time = GetClock(); michael@0: // Skip frames up to the frame at the playback position, and figure out michael@0: // the time remaining until it's time to display the next frame. michael@0: int64_t remainingTime = AUDIO_DURATION_USECS; michael@0: NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time."); michael@0: nsAutoPtr currentFrame; michael@0: #ifdef PR_LOGGING michael@0: int32_t droppedFrames = 0; michael@0: #endif michael@0: if (VideoQueue().GetSize() > 0) { michael@0: VideoData* frame = VideoQueue().PeekFront(); michael@0: while (mRealTime || clock_time >= frame->mTime) { michael@0: mVideoFrameEndTime = frame->GetEndTime(); michael@0: currentFrame = frame; michael@0: #ifdef PR_LOGGING michael@0: VERBOSE_LOG("discarding video frame %lld", frame->mTime); michael@0: if (droppedFrames++) { michael@0: VERBOSE_LOG("discarding video frame %lld (%d so far)", frame->mTime, droppedFrames-1); michael@0: } michael@0: #endif michael@0: VideoQueue().PopFront(); michael@0: // Notify the decode thread that the video queue's buffers may have michael@0: // free'd up space for more frames. michael@0: mDecoder->GetReentrantMonitor().NotifyAll(); michael@0: mDecoder->UpdatePlaybackOffset(frame->mOffset); michael@0: if (VideoQueue().GetSize() == 0) michael@0: break; michael@0: frame = VideoQueue().PeekFront(); michael@0: } michael@0: // Current frame has already been presented, wait until it's time to michael@0: // present the next frame. michael@0: if (frame && !currentFrame) { michael@0: int64_t now = IsPlaying() ? clock_time : mPlayDuration; michael@0: michael@0: remainingTime = frame->mTime - now; michael@0: } michael@0: } michael@0: michael@0: // Check to see if we don't have enough data to play up to the next frame. michael@0: // If we don't, switch to buffering mode. michael@0: MediaResource* resource = mDecoder->GetResource(); michael@0: if (mState == DECODER_STATE_DECODING && michael@0: mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && michael@0: HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) && michael@0: !mDecoder->IsDataCachedToEndOfResource() && michael@0: !resource->IsSuspended()) { michael@0: if (JustExitedQuickBuffering() || HasLowUndecodedData()) { michael@0: if (currentFrame) { michael@0: VideoQueue().PushFront(currentFrame.forget()); michael@0: } michael@0: StartBuffering(); michael@0: // Don't go straight back to the state machine loop since that might michael@0: // cause us to start decoding again and we could flip-flop between michael@0: // decoding and quick-buffering. michael@0: ScheduleStateMachine(USECS_PER_S); michael@0: return; michael@0: } michael@0: } michael@0: michael@0: // We've got enough data to keep playing until at least the next frame. michael@0: // Start playing now if need be. michael@0: if (!IsPlaying() && ((mFragmentEndTime >= 0 && clock_time < mFragmentEndTime) || mFragmentEndTime < 0)) { michael@0: StartPlayback(); michael@0: } michael@0: michael@0: if (currentFrame) { michael@0: // Decode one frame and display it. michael@0: TimeStamp presTime = mPlayStartTime - UsecsToDuration(mPlayDuration) + michael@0: UsecsToDuration(currentFrame->mTime - mStartTime); michael@0: NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time"); michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: // If we have video, we want to increment the clock in steps of the frame michael@0: // duration. michael@0: RenderVideoFrame(currentFrame, presTime); michael@0: } michael@0: // If we're no longer playing after dropping and reacquiring the lock, michael@0: // playback must've been stopped on the decode thread (by a seek, for michael@0: // example). In that case, the current frame is probably out of date. michael@0: if (!IsPlaying()) { michael@0: ScheduleStateMachine(); michael@0: return; michael@0: } michael@0: MediaDecoder::FrameStatistics& frameStats = mDecoder->GetFrameStatistics(); michael@0: frameStats.NotifyPresentedFrame(); michael@0: remainingTime = currentFrame->GetEndTime() - clock_time; michael@0: currentFrame = nullptr; michael@0: } michael@0: michael@0: // Cap the current time to the larger of the audio and video end time. michael@0: // This ensures that if we're running off the system clock, we don't michael@0: // advance the clock to after the media end time. michael@0: if (mVideoFrameEndTime != -1 || mAudioEndTime != -1) { michael@0: // These will be non -1 if we've displayed a video frame, or played an audio frame. michael@0: clock_time = std::min(clock_time, std::max(mVideoFrameEndTime, mAudioEndTime)); michael@0: if (clock_time > GetMediaTime()) { michael@0: // Only update the playback position if the clock time is greater michael@0: // than the previous playback position. The audio clock can michael@0: // sometimes report a time less than its previously reported in michael@0: // some situations, and we need to gracefully handle that. michael@0: UpdatePlaybackPosition(clock_time); michael@0: } michael@0: } michael@0: michael@0: // If the number of audio/video frames queued has changed, either by michael@0: // this function popping and playing a video frame, or by the audio michael@0: // thread popping and playing an audio frame, we may need to update our michael@0: // ready state. Post an update to do so. michael@0: UpdateReadyState(); michael@0: michael@0: ScheduleStateMachine(remainingTime); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::Wait(int64_t aUsecs) { michael@0: NS_ASSERTION(OnAudioThread(), "Only call on the audio thread"); michael@0: AssertCurrentThreadInMonitor(); michael@0: TimeStamp end = TimeStamp::Now() + UsecsToDuration(std::max(USECS_PER_MS, aUsecs)); michael@0: TimeStamp now; michael@0: while ((now = TimeStamp::Now()) < end && michael@0: mState != DECODER_STATE_SHUTDOWN && michael@0: mState != DECODER_STATE_SEEKING && michael@0: !mStopAudioThread && michael@0: IsPlaying()) michael@0: { michael@0: int64_t ms = static_cast(NS_round((end - now).ToSeconds() * 1000)); michael@0: if (ms == 0 || ms > UINT32_MAX) { michael@0: break; michael@0: } michael@0: mDecoder->GetReentrantMonitor().Wait(PR_MillisecondsToInterval(static_cast(ms))); michael@0: } michael@0: } michael@0: michael@0: VideoData* MediaDecoderStateMachine::FindStartTime() michael@0: { michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: int64_t startTime = 0; michael@0: mStartTime = 0; michael@0: VideoData* v = nullptr; michael@0: { michael@0: ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); michael@0: v = mReader->FindStartTime(startTime); michael@0: } michael@0: if (startTime != 0) { michael@0: mStartTime = startTime; michael@0: if (mGotDurationFromMetaData) { michael@0: NS_ASSERTION(mEndTime != -1, michael@0: "We should have mEndTime as supplied duration here"); michael@0: // We were specified a duration from a Content-Duration HTTP header. michael@0: // Adjust mEndTime so that mEndTime-mStartTime matches the specified michael@0: // duration. michael@0: mEndTime = mStartTime + mEndTime; michael@0: } michael@0: } michael@0: // Set the audio start time to be start of media. If this lies before the michael@0: // first actual audio frame we have, we'll inject silence during playback michael@0: // to ensure the audio starts at the correct time. michael@0: mAudioStartTime = mStartTime; michael@0: DECODER_LOG(PR_LOG_DEBUG, "Media start time is %lld", mStartTime); michael@0: return v; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::UpdateReadyState() { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: MediaDecoderOwner::NextFrameStatus nextFrameStatus = GetNextFrameStatus(); michael@0: if (nextFrameStatus == mLastFrameStatus) { michael@0: return; michael@0: } michael@0: mLastFrameStatus = nextFrameStatus; michael@0: michael@0: /* This is a bit tricky. MediaDecoder::UpdateReadyStateForData will run on michael@0: * the main thread and re-evaluate GetNextFrameStatus there, passing it to michael@0: * HTMLMediaElement::UpdateReadyStateForData. It doesn't use the value of michael@0: * GetNextFrameStatus we computed here, because what we're computing here michael@0: * could be stale by the time MediaDecoder::UpdateReadyStateForData runs. michael@0: * We only compute GetNextFrameStatus here to avoid posting runnables to the main michael@0: * thread unnecessarily. michael@0: */ michael@0: nsCOMPtr event; michael@0: event = NS_NewRunnableMethod(mDecoder, &MediaDecoder::UpdateReadyStateForData); michael@0: NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::JustExitedQuickBuffering() michael@0: { michael@0: return !mDecodeStartTime.IsNull() && michael@0: mQuickBuffering && michael@0: (TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromMicroseconds(QUICK_BUFFER_THRESHOLD_USECS); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::StartBuffering() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: michael@0: if (mState != DECODER_STATE_DECODING) { michael@0: // We only move into BUFFERING state if we're actually decoding. michael@0: // If we're currently doing something else, we don't need to buffer, michael@0: // and more importantly, we shouldn't overwrite mState to interrupt michael@0: // the current operation, as that could leave us in an inconsistent michael@0: // state! michael@0: return; michael@0: } michael@0: michael@0: if (IsPlaying()) { michael@0: StopPlayback(); michael@0: } michael@0: michael@0: TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime; michael@0: // Go into quick buffering mode provided we've not just left buffering using michael@0: // a "quick exit". This stops us flip-flopping between playing and buffering michael@0: // when the download speed is similar to the decode speed. michael@0: mQuickBuffering = michael@0: !JustExitedQuickBuffering() && michael@0: decodeDuration < UsecsToDuration(QUICK_BUFFER_THRESHOLD_USECS); michael@0: mBufferingStart = TimeStamp::Now(); michael@0: michael@0: // We need to tell the element that buffering has started. michael@0: // We can't just directly send an asynchronous runnable that michael@0: // eventually fires the "waiting" event. The problem is that michael@0: // there might be pending main-thread events, such as "data michael@0: // received" notifications, that mean we're not actually still michael@0: // buffering by the time this runnable executes. So instead michael@0: // we just trigger UpdateReadyStateForData; when it runs, it michael@0: // will check the current state and decide whether to tell michael@0: // the element we're buffering or not. michael@0: UpdateReadyState(); michael@0: mState = DECODER_STATE_BUFFERING; michael@0: DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING to BUFFERING, decoded for %.3lfs", michael@0: decodeDuration.ToSeconds()); michael@0: #ifdef PR_LOGGING michael@0: MediaDecoder::Statistics stats = mDecoder->GetStatistics(); michael@0: DECODER_LOG(PR_LOG_DEBUG, "Playback rate: %.1lfKB/s%s download rate: %.1lfKB/s%s", michael@0: stats.mPlaybackRate/1024, stats.mPlaybackRateReliable ? "" : " (unreliable)", michael@0: stats.mDownloadRate/1024, stats.mDownloadRateReliable ? "" : " (unreliable)"); michael@0: #endif michael@0: } michael@0: michael@0: nsresult MediaDecoderStateMachine::GetBuffered(dom::TimeRanges* aBuffered) { michael@0: MediaResource* resource = mDecoder->GetResource(); michael@0: NS_ENSURE_TRUE(resource, NS_ERROR_FAILURE); michael@0: resource->Pin(); michael@0: nsresult res = mReader->GetBuffered(aBuffered, mStartTime); michael@0: resource->Unpin(); michael@0: return res; michael@0: } michael@0: michael@0: nsresult MediaDecoderStateMachine::CallRunStateMachine() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread."); michael@0: michael@0: // If audio is being captured, stop the audio thread if it's running michael@0: if (mAudioCaptured) { michael@0: StopAudioThread(); michael@0: } michael@0: michael@0: MOZ_ASSERT(!mInRunningStateMachine, "State machine cycles must run in sequence!"); michael@0: mTimeout = TimeStamp(); michael@0: mInRunningStateMachine = true; michael@0: nsresult res = RunStateMachine(); michael@0: mInRunningStateMachine = false; michael@0: return res; michael@0: } michael@0: michael@0: nsresult MediaDecoderStateMachine::TimeoutExpired(int aTimerId) michael@0: { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: NS_ASSERTION(OnStateMachineThread(), "Must be on state machine thread"); michael@0: mTimer->Cancel(); michael@0: if (mTimerId == aTimerId) { michael@0: return CallRunStateMachine(); michael@0: } else { michael@0: return NS_OK; michael@0: } michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder() { michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: DispatchAudioDecodeTaskIfNeeded(); michael@0: DispatchVideoDecodeTaskIfNeeded(); michael@0: } michael@0: michael@0: class TimerEvent : public nsITimerCallback, public nsRunnable { michael@0: NS_DECL_THREADSAFE_ISUPPORTS michael@0: public: michael@0: TimerEvent(MediaDecoderStateMachine* aStateMachine, int aTimerId) michael@0: : mStateMachine(aStateMachine), mTimerId(aTimerId) {} michael@0: michael@0: NS_IMETHOD Run() MOZ_OVERRIDE { michael@0: return mStateMachine->TimeoutExpired(mTimerId); michael@0: } michael@0: michael@0: NS_IMETHOD Notify(nsITimer* aTimer) { michael@0: return mStateMachine->TimeoutExpired(mTimerId); michael@0: } michael@0: private: michael@0: const nsRefPtr mStateMachine; michael@0: int mTimerId; michael@0: }; michael@0: michael@0: NS_IMPL_ISUPPORTS(TimerEvent, nsITimerCallback, nsIRunnable); michael@0: michael@0: nsresult MediaDecoderStateMachine::ScheduleStateMachine(int64_t aUsecs) { michael@0: AssertCurrentThreadInMonitor(); michael@0: NS_ABORT_IF_FALSE(GetStateMachineThread(), michael@0: "Must have a state machine thread to schedule"); michael@0: michael@0: if (mState == DECODER_STATE_SHUTDOWN) { michael@0: return NS_ERROR_FAILURE; michael@0: } michael@0: aUsecs = std::max(aUsecs, 0); michael@0: michael@0: TimeStamp timeout = TimeStamp::Now() + UsecsToDuration(aUsecs); michael@0: if (!mTimeout.IsNull() && timeout >= mTimeout) { michael@0: // We've already scheduled a timer set to expire at or before this time, michael@0: // or have an event dispatched to run the state machine. michael@0: return NS_OK; michael@0: } michael@0: michael@0: uint32_t ms = static_cast((aUsecs / USECS_PER_MS) & 0xFFFFFFFF); michael@0: if (mRealTime && ms > 40) { michael@0: ms = 40; michael@0: } michael@0: michael@0: // Don't cancel the timer here for this function will be called from michael@0: // different threads. michael@0: michael@0: nsresult rv = NS_ERROR_FAILURE; michael@0: nsRefPtr event = new TimerEvent(this, mTimerId+1); michael@0: michael@0: if (ms == 0) { michael@0: // Dispatch a runnable to the state machine thread when delay is 0. michael@0: // It will has less latency than dispatching a runnable to the state michael@0: // machine thread which will then schedule a zero-delay timer. michael@0: rv = GetStateMachineThread()->Dispatch(event, NS_DISPATCH_NORMAL); michael@0: } else if (OnStateMachineThread()) { michael@0: rv = mTimer->InitWithCallback(event, ms, nsITimer::TYPE_ONE_SHOT); michael@0: } else { michael@0: MOZ_ASSERT(false, "non-zero delay timer should be only scheduled in state machine thread"); michael@0: } michael@0: michael@0: if (NS_SUCCEEDED(rv)) { michael@0: mTimeout = timeout; michael@0: ++mTimerId; michael@0: } else { michael@0: NS_WARNING("Failed to schedule state machine"); michael@0: } michael@0: michael@0: return rv; michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::OnDecodeThread() const michael@0: { michael@0: return mDecodeTaskQueue->IsCurrentThreadIn(); michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::OnStateMachineThread() const michael@0: { michael@0: bool rv = false; michael@0: mStateMachineThreadPool->IsOnCurrentThread(&rv); michael@0: return rv; michael@0: } michael@0: michael@0: nsIEventTarget* MediaDecoderStateMachine::GetStateMachineThread() michael@0: { michael@0: return mStateMachineThreadPool->GetEventTarget(); michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: NS_ASSERTION(aPlaybackRate != 0, michael@0: "PlaybackRate == 0 should be handled before this function."); michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: michael@0: if (mPlaybackRate == aPlaybackRate) { michael@0: return; michael@0: } michael@0: michael@0: // Get position of the last time we changed the rate. michael@0: if (!HasAudio()) { michael@0: // mBasePosition is a position in the video stream, not an absolute time. michael@0: if (mState == DECODER_STATE_SEEKING) { michael@0: mBasePosition = mSeekTarget.mTime - mStartTime; michael@0: } else { michael@0: mBasePosition = GetVideoStreamPosition(); michael@0: } michael@0: mPlayDuration = mBasePosition; michael@0: mResetPlayStartTime = true; michael@0: mPlayStartTime = TimeStamp::Now(); michael@0: } michael@0: michael@0: mPlaybackRate = aPlaybackRate; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::SetPreservesPitch(bool aPreservesPitch) michael@0: { michael@0: NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); michael@0: ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); michael@0: michael@0: mPreservesPitch = aPreservesPitch; michael@0: } michael@0: michael@0: void michael@0: MediaDecoderStateMachine::SetMinimizePrerollUntilPlaybackStarts() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: mMinimizePreroll = true; michael@0: } michael@0: michael@0: bool MediaDecoderStateMachine::IsShutdown() michael@0: { michael@0: AssertCurrentThreadInMonitor(); michael@0: return GetState() == DECODER_STATE_SHUTDOWN; michael@0: } michael@0: michael@0: void MediaDecoderStateMachine::QueueMetadata(int64_t aPublishTime, michael@0: int aChannels, michael@0: int aRate, michael@0: bool aHasAudio, michael@0: bool aHasVideo, michael@0: MetadataTags* aTags) michael@0: { michael@0: NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); michael@0: AssertCurrentThreadInMonitor(); michael@0: TimedMetadata* metadata = new TimedMetadata; michael@0: metadata->mPublishTime = aPublishTime; michael@0: metadata->mChannels = aChannels; michael@0: metadata->mRate = aRate; michael@0: metadata->mHasAudio = aHasAudio; michael@0: metadata->mHasVideo = aHasVideo; michael@0: metadata->mTags = aTags; michael@0: mMetadataManager.QueueMetadata(metadata); michael@0: } michael@0: michael@0: } // namespace mozilla michael@0: michael@0: // avoid redefined macro in unified build michael@0: #undef DECODER_LOG michael@0: #undef VERBOSE_LOG